summaryrefslogtreecommitdiffstats
path: root/builtin
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 14:47:53 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 14:47:53 +0000
commitc8bae7493d2f2910b57f13ded012e86bdcfb0532 (patch)
tree24e09d9f84dec336720cf393e156089ca2835791 /builtin
parentInitial commit. (diff)
downloadgit-c8bae7493d2f2910b57f13ded012e86bdcfb0532.tar.xz
git-c8bae7493d2f2910b57f13ded012e86bdcfb0532.zip
Adding upstream version 1:2.39.2.upstream/1%2.39.2upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--builtin.h251
-rw-r--r--builtin/add.c700
-rw-r--r--builtin/am.c2546
-rw-r--r--builtin/annotate.c22
-rw-r--r--builtin/apply.c33
-rw-r--r--builtin/archive.c110
-rw-r--r--builtin/bisect--helper.c1429
-rw-r--r--builtin/blame.c1223
-rw-r--r--builtin/branch.c915
-rw-r--r--builtin/bugreport.c188
-rw-r--r--builtin/bundle.c224
-rw-r--r--builtin/cat-file.c1033
-rw-r--r--builtin/check-attr.c189
-rw-r--r--builtin/check-ignore.c197
-rw-r--r--builtin/check-mailmap.c67
-rw-r--r--builtin/check-ref-format.c96
-rw-r--r--builtin/checkout--worker.c145
-rw-r--r--builtin/checkout-index.c334
-rw-r--r--builtin/checkout.c1958
-rw-r--r--builtin/clean.c1096
-rw-r--r--builtin/clone.c1400
-rw-r--r--builtin/column.c59
-rw-r--r--builtin/commit-graph.c328
-rw-r--r--builtin/commit-tree.c151
-rw-r--r--builtin/commit.c1882
-rw-r--r--builtin/config.c970
-rw-r--r--builtin/count-objects.c172
-rw-r--r--builtin/credential-cache--daemon.c319
-rw-r--r--builtin/credential-cache.c183
-rw-r--r--builtin/credential-store.c199
-rw-r--r--builtin/credential.c34
-rw-r--r--builtin/describe.c686
-rw-r--r--builtin/diagnose.c62
-rw-r--r--builtin/diff-files.c88
-rw-r--r--builtin/diff-index.c76
-rw-r--r--builtin/diff-tree.c228
-rw-r--r--builtin/diff.c618
-rw-r--r--builtin/difftool.c778
-rw-r--r--builtin/env--helper.c100
-rw-r--r--builtin/fast-export.c1282
-rw-r--r--builtin/fast-import.c3645
-rw-r--r--builtin/fetch-pack.c277
-rw-r--r--builtin/fetch.c2359
-rw-r--r--builtin/fmt-merge-msg.c69
-rw-r--r--builtin/for-each-ref.c101
-rw-r--r--builtin/for-each-repo.c62
-rw-r--r--builtin/fsck.c1018
-rw-r--r--builtin/fsmonitor--daemon.c1586
-rw-r--r--builtin/gc.c2651
-rw-r--r--builtin/get-tar-commit-id.c52
-rw-r--r--builtin/grep.c1252
-rw-r--r--builtin/hash-object.c166
-rw-r--r--builtin/help.c722
-rw-r--r--builtin/hook.c80
-rw-r--r--builtin/index-pack.c1959
-rw-r--r--builtin/init-db.c699
-rw-r--r--builtin/interpret-trailers.c142
-rw-r--r--builtin/log.c2502
-rw-r--r--builtin/ls-files.c891
-rw-r--r--builtin/ls-remote.c161
-rw-r--r--builtin/ls-tree.c437
-rw-r--r--builtin/mailinfo.c114
-rw-r--r--builtin/mailsplit.c370
-rw-r--r--builtin/merge-base.c193
-rw-r--r--builtin/merge-file.c124
-rw-r--r--builtin/merge-index.c118
-rw-r--r--builtin/merge-ours.c32
-rw-r--r--builtin/merge-recursive.c92
-rw-r--r--builtin/merge-tree.c587
-rw-r--r--builtin/merge.c1793
-rw-r--r--builtin/mktag.c108
-rw-r--r--builtin/mktree.c200
-rw-r--r--builtin/multi-pack-index.c287
-rw-r--r--builtin/mv.c565
-rw-r--r--builtin/name-rev.c686
-rw-r--r--builtin/notes.c1035
-rw-r--r--builtin/pack-objects.c4519
-rw-r--r--builtin/pack-redundant.c672
-rw-r--r--builtin/pack-refs.c24
-rw-r--r--builtin/patch-id.c240
-rw-r--r--builtin/prune-packed.c31
-rw-r--r--builtin/prune.c203
-rw-r--r--builtin/pull.c1159
-rw-r--r--builtin/push.c703
-rw-r--r--builtin/range-diff.c157
-rw-r--r--builtin/read-tree.c282
-rw-r--r--builtin/rebase.c1837
-rw-r--r--builtin/receive-pack.c2600
-rw-r--r--builtin/reflog.c430
-rw-r--r--builtin/remote-ext.c202
-rw-r--r--builtin/remote-fd.c82
-rw-r--r--builtin/remote.c1782
-rw-r--r--builtin/repack.c1181
-rw-r--r--builtin/replace.c626
-rw-r--r--builtin/rerere.c118
-rw-r--r--builtin/reset.c489
-rw-r--r--builtin/rev-list.c791
-rw-r--r--builtin/rev-parse.c1096
-rw-r--r--builtin/revert.c267
-rw-r--r--builtin/rm.c437
-rw-r--r--builtin/send-pack.c345
-rw-r--r--builtin/shortlog.c515
-rw-r--r--builtin/show-branch.c960
-rw-r--r--builtin/show-index.c108
-rw-r--r--builtin/show-ref.c228
-rw-r--r--builtin/sparse-checkout.c948
-rw-r--r--builtin/stash.c1863
-rw-r--r--builtin/stripspace.c65
-rw-r--r--builtin/submodule--helper.c3395
-rw-r--r--builtin/symbolic-ref.c87
-rw-r--r--builtin/tag.c647
-rw-r--r--builtin/unpack-file.c38
-rw-r--r--builtin/unpack-objects.c686
-rw-r--r--builtin/update-index.c1307
-rw-r--r--builtin/update-ref.c579
-rw-r--r--builtin/update-server-info.c26
-rw-r--r--builtin/upload-archive.c134
-rw-r--r--builtin/upload-pack.c75
-rw-r--r--builtin/var.c101
-rw-r--r--builtin/verify-commit.c90
-rw-r--r--builtin/verify-pack.c90
-rw-r--r--builtin/verify-tag.c77
-rw-r--r--builtin/worktree.c1195
-rw-r--r--builtin/write-tree.c57
124 files changed, 83050 insertions, 0 deletions
diff --git a/builtin.h b/builtin.h
new file mode 100644
index 0000000..8901a34
--- /dev/null
+++ b/builtin.h
@@ -0,0 +1,251 @@
+#ifndef BUILTIN_H
+#define BUILTIN_H
+
+#include "git-compat-util.h"
+#include "strbuf.h"
+#include "cache.h"
+#include "commit.h"
+
+/*
+ * builtin API
+ * ===========
+ *
+ * Adding a new built-in
+ * ---------------------
+ *
+ * There are 4 things to do to add a built-in command implementation to
+ * Git:
+ *
+ * . Define the implementation of the built-in command `foo` with
+ * signature:
+ *
+ * int cmd_foo(int argc, const char **argv, const char *prefix);
+ *
+ * . Add the external declaration for the function to `builtin.h`.
+ *
+ * . Add the command to the `commands[]` table defined in `git.c`.
+ * The entry should look like:
+ *
+ * { "foo", cmd_foo, <options> },
+ *
+ * where options is the bitwise-or of:
+ *
+ * `RUN_SETUP`:
+ * If there is not a Git directory to work on, abort. If there
+ * is a work tree, chdir to the top of it if the command was
+ * invoked in a subdirectory. If there is no work tree, no
+ * chdir() is done.
+ *
+ * `RUN_SETUP_GENTLY`:
+ * If there is a Git directory, chdir as per RUN_SETUP, otherwise,
+ * don't chdir anywhere.
+ *
+ * `USE_PAGER`:
+ *
+ * If the standard output is connected to a tty, spawn a pager and
+ * feed our output to it.
+ *
+ * `NEED_WORK_TREE`:
+ *
+ * Make sure there is a work tree, i.e. the command cannot act
+ * on bare repositories.
+ * This only makes sense when `RUN_SETUP` is also set.
+ *
+ * `SUPPORT_SUPER_PREFIX`:
+ *
+ * The built-in supports `--super-prefix`.
+ *
+ * `DELAY_PAGER_CONFIG`:
+ *
+ * If RUN_SETUP or RUN_SETUP_GENTLY is set, git.c normally handles
+ * the `pager.<cmd>`-configuration. If this flag is used, git.c
+ * will skip that step, instead allowing the built-in to make a
+ * more informed decision, e.g., by ignoring `pager.<cmd>` for
+ * certain subcommands.
+ *
+ * . Add `builtin/foo.o` to `BUILTIN_OBJS` in `Makefile`.
+ *
+ * Additionally, if `foo` is a new command, there are 4 more things to do:
+ *
+ * . Add tests to `t/` directory.
+ *
+ * . Write documentation in `Documentation/git-foo.txt`.
+ *
+ * . Add an entry for `git-foo` to `command-list.txt`.
+ *
+ * . Add an entry for `/git-foo` to `.gitignore`.
+ *
+ *
+ * How a built-in is called
+ * ------------------------
+ *
+ * The implementation `cmd_foo()` takes three parameters, `argc`, `argv,
+ * and `prefix`. The first two are similar to what `main()` of a
+ * standalone command would be called with.
+ *
+ * When `RUN_SETUP` is specified in the `commands[]` table, and when you
+ * were started from a subdirectory of the work tree, `cmd_foo()` is called
+ * after chdir(2) to the top of the work tree, and `prefix` gets the path
+ * to the subdirectory the command started from. This allows you to
+ * convert a user-supplied pathname (typically relative to that directory)
+ * to a pathname relative to the top of the work tree.
+ *
+ * The return value from `cmd_foo()` becomes the exit status of the
+ * command.
+ */
+
+extern const char git_usage_string[];
+extern const char git_more_info_string[];
+
+/**
+ * If a built-in has DELAY_PAGER_CONFIG set, the built-in should call this early
+ * when it wishes to respect the `pager.foo`-config. The `cmd` is the name of
+ * the built-in, e.g., "foo". If a paging-choice has already been setup, this
+ * does nothing. The default in `def` should be 0 for "pager off", 1 for "pager
+ * on" or -1 for "punt".
+ *
+ * You should most likely use a default of 0 or 1. "Punt" (-1) could be useful
+ * to be able to fall back to some historical compatibility name.
+ */
+void setup_auto_pager(const char *cmd, int def);
+
+int is_builtin(const char *s);
+
+int cmd_add(int argc, const char **argv, const char *prefix);
+int cmd_am(int argc, const char **argv, const char *prefix);
+int cmd_annotate(int argc, const char **argv, const char *prefix);
+int cmd_apply(int argc, const char **argv, const char *prefix);
+int cmd_archive(int argc, const char **argv, const char *prefix);
+int cmd_bisect__helper(int argc, const char **argv, const char *prefix);
+int cmd_blame(int argc, const char **argv, const char *prefix);
+int cmd_branch(int argc, const char **argv, const char *prefix);
+int cmd_bugreport(int argc, const char **argv, const char *prefix);
+int cmd_bundle(int argc, const char **argv, const char *prefix);
+int cmd_cat_file(int argc, const char **argv, const char *prefix);
+int cmd_checkout(int argc, const char **argv, const char *prefix);
+int cmd_checkout__worker(int argc, const char **argv, const char *prefix);
+int cmd_checkout_index(int argc, const char **argv, const char *prefix);
+int cmd_check_attr(int argc, const char **argv, const char *prefix);
+int cmd_check_ignore(int argc, const char **argv, const char *prefix);
+int cmd_check_mailmap(int argc, const char **argv, const char *prefix);
+int cmd_check_ref_format(int argc, const char **argv, const char *prefix);
+int cmd_cherry(int argc, const char **argv, const char *prefix);
+int cmd_cherry_pick(int argc, const char **argv, const char *prefix);
+int cmd_clone(int argc, const char **argv, const char *prefix);
+int cmd_clean(int argc, const char **argv, const char *prefix);
+int cmd_column(int argc, const char **argv, const char *prefix);
+int cmd_commit(int argc, const char **argv, const char *prefix);
+int cmd_commit_graph(int argc, const char **argv, const char *prefix);
+int cmd_commit_tree(int argc, const char **argv, const char *prefix);
+int cmd_config(int argc, const char **argv, const char *prefix);
+int cmd_count_objects(int argc, const char **argv, const char *prefix);
+int cmd_credential(int argc, const char **argv, const char *prefix);
+int cmd_credential_cache(int argc, const char **argv, const char *prefix);
+int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix);
+int cmd_credential_store(int argc, const char **argv, const char *prefix);
+int cmd_describe(int argc, const char **argv, const char *prefix);
+int cmd_diagnose(int argc, const char **argv, const char *prefix);
+int cmd_diff_files(int argc, const char **argv, const char *prefix);
+int cmd_diff_index(int argc, const char **argv, const char *prefix);
+int cmd_diff(int argc, const char **argv, const char *prefix);
+int cmd_diff_tree(int argc, const char **argv, const char *prefix);
+int cmd_difftool(int argc, const char **argv, const char *prefix);
+int cmd_env__helper(int argc, const char **argv, const char *prefix);
+int cmd_fast_export(int argc, const char **argv, const char *prefix);
+int cmd_fast_import(int argc, const char **argv, const char *prefix);
+int cmd_fetch(int argc, const char **argv, const char *prefix);
+int cmd_fetch_pack(int argc, const char **argv, const char *prefix);
+int cmd_fmt_merge_msg(int argc, const char **argv, const char *prefix);
+int cmd_for_each_ref(int argc, const char **argv, const char *prefix);
+int cmd_for_each_repo(int argc, const char **argv, const char *prefix);
+int cmd_format_patch(int argc, const char **argv, const char *prefix);
+int cmd_fsck(int argc, const char **argv, const char *prefix);
+int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix);
+int cmd_gc(int argc, const char **argv, const char *prefix);
+int cmd_get_tar_commit_id(int argc, const char **argv, const char *prefix);
+int cmd_grep(int argc, const char **argv, const char *prefix);
+int cmd_hash_object(int argc, const char **argv, const char *prefix);
+int cmd_help(int argc, const char **argv, const char *prefix);
+int cmd_hook(int argc, const char **argv, const char *prefix);
+int cmd_index_pack(int argc, const char **argv, const char *prefix);
+int cmd_init_db(int argc, const char **argv, const char *prefix);
+int cmd_interpret_trailers(int argc, const char **argv, const char *prefix);
+int cmd_log(int argc, const char **argv, const char *prefix);
+int cmd_log_reflog(int argc, const char **argv, const char *prefix);
+int cmd_ls_files(int argc, const char **argv, const char *prefix);
+int cmd_ls_tree(int argc, const char **argv, const char *prefix);
+int cmd_ls_remote(int argc, const char **argv, const char *prefix);
+int cmd_mailinfo(int argc, const char **argv, const char *prefix);
+int cmd_mailsplit(int argc, const char **argv, const char *prefix);
+int cmd_maintenance(int argc, const char **argv, const char *prefix);
+int cmd_merge(int argc, const char **argv, const char *prefix);
+int cmd_merge_base(int argc, const char **argv, const char *prefix);
+int cmd_merge_index(int argc, const char **argv, const char *prefix);
+int cmd_merge_ours(int argc, const char **argv, const char *prefix);
+int cmd_merge_file(int argc, const char **argv, const char *prefix);
+int cmd_merge_recursive(int argc, const char **argv, const char *prefix);
+int cmd_merge_tree(int argc, const char **argv, const char *prefix);
+int cmd_mktag(int argc, const char **argv, const char *prefix);
+int cmd_mktree(int argc, const char **argv, const char *prefix);
+int cmd_multi_pack_index(int argc, const char **argv, const char *prefix);
+int cmd_mv(int argc, const char **argv, const char *prefix);
+int cmd_name_rev(int argc, const char **argv, const char *prefix);
+int cmd_notes(int argc, const char **argv, const char *prefix);
+int cmd_pack_objects(int argc, const char **argv, const char *prefix);
+int cmd_pack_redundant(int argc, const char **argv, const char *prefix);
+int cmd_patch_id(int argc, const char **argv, const char *prefix);
+int cmd_prune(int argc, const char **argv, const char *prefix);
+int cmd_prune_packed(int argc, const char **argv, const char *prefix);
+int cmd_pull(int argc, const char **argv, const char *prefix);
+int cmd_push(int argc, const char **argv, const char *prefix);
+int cmd_range_diff(int argc, const char **argv, const char *prefix);
+int cmd_read_tree(int argc, const char **argv, const char *prefix);
+int cmd_rebase(int argc, const char **argv, const char *prefix);
+int cmd_rebase__interactive(int argc, const char **argv, const char *prefix);
+int cmd_receive_pack(int argc, const char **argv, const char *prefix);
+int cmd_reflog(int argc, const char **argv, const char *prefix);
+int cmd_remote(int argc, const char **argv, const char *prefix);
+int cmd_remote_ext(int argc, const char **argv, const char *prefix);
+int cmd_remote_fd(int argc, const char **argv, const char *prefix);
+int cmd_repack(int argc, const char **argv, const char *prefix);
+int cmd_rerere(int argc, const char **argv, const char *prefix);
+int cmd_reset(int argc, const char **argv, const char *prefix);
+int cmd_restore(int argc, const char **argv, const char *prefix);
+int cmd_rev_list(int argc, const char **argv, const char *prefix);
+int cmd_rev_parse(int argc, const char **argv, const char *prefix);
+int cmd_revert(int argc, const char **argv, const char *prefix);
+int cmd_rm(int argc, const char **argv, const char *prefix);
+int cmd_send_pack(int argc, const char **argv, const char *prefix);
+int cmd_shortlog(int argc, const char **argv, const char *prefix);
+int cmd_show(int argc, const char **argv, const char *prefix);
+int cmd_show_branch(int argc, const char **argv, const char *prefix);
+int cmd_show_index(int argc, const char **argv, const char *prefix);
+int cmd_sparse_checkout(int argc, const char **argv, const char *prefix);
+int cmd_status(int argc, const char **argv, const char *prefix);
+int cmd_stash(int argc, const char **argv, const char *prefix);
+int cmd_stripspace(int argc, const char **argv, const char *prefix);
+int cmd_submodule__helper(int argc, const char **argv, const char *prefix);
+int cmd_switch(int argc, const char **argv, const char *prefix);
+int cmd_symbolic_ref(int argc, const char **argv, const char *prefix);
+int cmd_tag(int argc, const char **argv, const char *prefix);
+int cmd_unpack_file(int argc, const char **argv, const char *prefix);
+int cmd_unpack_objects(int argc, const char **argv, const char *prefix);
+int cmd_update_index(int argc, const char **argv, const char *prefix);
+int cmd_update_ref(int argc, const char **argv, const char *prefix);
+int cmd_update_server_info(int argc, const char **argv, const char *prefix);
+int cmd_upload_archive(int argc, const char **argv, const char *prefix);
+int cmd_upload_archive_writer(int argc, const char **argv, const char *prefix);
+int cmd_upload_pack(int argc, const char **argv, const char *prefix);
+int cmd_var(int argc, const char **argv, const char *prefix);
+int cmd_verify_commit(int argc, const char **argv, const char *prefix);
+int cmd_verify_tag(int argc, const char **argv, const char *prefix);
+int cmd_version(int argc, const char **argv, const char *prefix);
+int cmd_whatchanged(int argc, const char **argv, const char *prefix);
+int cmd_worktree(int argc, const char **argv, const char *prefix);
+int cmd_write_tree(int argc, const char **argv, const char *prefix);
+int cmd_verify_pack(int argc, const char **argv, const char *prefix);
+int cmd_show_ref(int argc, const char **argv, const char *prefix);
+int cmd_pack_refs(int argc, const char **argv, const char *prefix);
+int cmd_replace(int argc, const char **argv, const char *prefix);
+
+#endif
diff --git a/builtin/add.c b/builtin/add.c
new file mode 100644
index 0000000..76277df
--- /dev/null
+++ b/builtin/add.c
@@ -0,0 +1,700 @@
+/*
+ * "git add" builtin command
+ *
+ * Copyright (C) 2006 Linus Torvalds
+ */
+#define USE_THE_INDEX_VARIABLE
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "lockfile.h"
+#include "dir.h"
+#include "pathspec.h"
+#include "exec-cmd.h"
+#include "cache-tree.h"
+#include "run-command.h"
+#include "parse-options.h"
+#include "diff.h"
+#include "diffcore.h"
+#include "revision.h"
+#include "bulk-checkin.h"
+#include "strvec.h"
+#include "submodule.h"
+#include "add-interactive.h"
+
+static const char * const builtin_add_usage[] = {
+ N_("git add [<options>] [--] <pathspec>..."),
+ NULL
+};
+static int patch_interactive, add_interactive, edit_interactive;
+static int take_worktree_changes;
+static int add_renormalize;
+static int pathspec_file_nul;
+static int include_sparse;
+static const char *pathspec_from_file;
+
+struct update_callback_data {
+ int flags;
+ int add_errors;
+};
+
+static int chmod_pathspec(struct pathspec *pathspec, char flip, int show_only)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < the_index.cache_nr; i++) {
+ struct cache_entry *ce = the_index.cache[i];
+ int err;
+
+ if (!include_sparse &&
+ (ce_skip_worktree(ce) ||
+ !path_in_sparse_checkout(ce->name, &the_index)))
+ continue;
+
+ if (pathspec && !ce_path_match(&the_index, ce, pathspec, NULL))
+ continue;
+
+ if (!show_only)
+ err = chmod_index_entry(&the_index, ce, flip);
+ else
+ err = S_ISREG(ce->ce_mode) ? 0 : -1;
+
+ if (err < 0)
+ ret = error(_("cannot chmod %cx '%s'"), flip, ce->name);
+ }
+
+ return ret;
+}
+
+static int fix_unmerged_status(struct diff_filepair *p,
+ struct update_callback_data *data)
+{
+ if (p->status != DIFF_STATUS_UNMERGED)
+ return p->status;
+ if (!(data->flags & ADD_CACHE_IGNORE_REMOVAL) && !p->two->mode)
+ /*
+ * This is not an explicit add request, and the
+ * path is missing from the working tree (deleted)
+ */
+ return DIFF_STATUS_DELETED;
+ else
+ /*
+ * Either an explicit add request, or path exists
+ * in the working tree. An attempt to explicitly
+ * add a path that does not exist in the working tree
+ * will be caught as an error by the caller immediately.
+ */
+ return DIFF_STATUS_MODIFIED;
+}
+
+static void update_callback(struct diff_queue_struct *q,
+ struct diff_options *opt, void *cbdata)
+{
+ int i;
+ struct update_callback_data *data = cbdata;
+
+ for (i = 0; i < q->nr; i++) {
+ struct diff_filepair *p = q->queue[i];
+ const char *path = p->one->path;
+
+ if (!include_sparse && !path_in_sparse_checkout(path, &the_index))
+ continue;
+
+ switch (fix_unmerged_status(p, data)) {
+ default:
+ die(_("unexpected diff status %c"), p->status);
+ case DIFF_STATUS_MODIFIED:
+ case DIFF_STATUS_TYPE_CHANGED:
+ if (add_file_to_index(&the_index, path, data->flags)) {
+ if (!(data->flags & ADD_CACHE_IGNORE_ERRORS))
+ die(_("updating files failed"));
+ data->add_errors++;
+ }
+ break;
+ case DIFF_STATUS_DELETED:
+ if (data->flags & ADD_CACHE_IGNORE_REMOVAL)
+ break;
+ if (!(data->flags & ADD_CACHE_PRETEND))
+ remove_file_from_index(&the_index, path);
+ if (data->flags & (ADD_CACHE_PRETEND|ADD_CACHE_VERBOSE))
+ printf(_("remove '%s'\n"), path);
+ break;
+ }
+ }
+}
+
+int add_files_to_cache(const char *prefix,
+ const struct pathspec *pathspec, int flags)
+{
+ struct update_callback_data data;
+ struct rev_info rev;
+
+ memset(&data, 0, sizeof(data));
+ data.flags = flags;
+
+ repo_init_revisions(the_repository, &rev, prefix);
+ setup_revisions(0, NULL, &rev, NULL);
+ if (pathspec)
+ copy_pathspec(&rev.prune_data, pathspec);
+ rev.diffopt.output_format = DIFF_FORMAT_CALLBACK;
+ rev.diffopt.format_callback = update_callback;
+ rev.diffopt.format_callback_data = &data;
+ rev.diffopt.flags.override_submodule_config = 1;
+ rev.max_count = 0; /* do not compare unmerged paths with stage #2 */
+
+ /*
+ * Use an ODB transaction to optimize adding multiple objects.
+ * This function is invoked from commands other than 'add', which
+ * may not have their own transaction active.
+ */
+ begin_odb_transaction();
+ run_diff_files(&rev, DIFF_RACY_IS_MODIFIED);
+ end_odb_transaction();
+
+ release_revisions(&rev);
+ return !!data.add_errors;
+}
+
+static int renormalize_tracked_files(const struct pathspec *pathspec, int flags)
+{
+ int i, retval = 0;
+
+ for (i = 0; i < the_index.cache_nr; i++) {
+ struct cache_entry *ce = the_index.cache[i];
+
+ if (!include_sparse &&
+ (ce_skip_worktree(ce) ||
+ !path_in_sparse_checkout(ce->name, &the_index)))
+ continue;
+ if (ce_stage(ce))
+ continue; /* do not touch unmerged paths */
+ if (!S_ISREG(ce->ce_mode) && !S_ISLNK(ce->ce_mode))
+ continue; /* do not touch non blobs */
+ if (pathspec && !ce_path_match(&the_index, ce, pathspec, NULL))
+ continue;
+ retval |= add_file_to_index(&the_index, ce->name,
+ flags | ADD_CACHE_RENORMALIZE);
+ }
+
+ return retval;
+}
+
+static char *prune_directory(struct dir_struct *dir, struct pathspec *pathspec, int prefix)
+{
+ char *seen;
+ int i;
+ struct dir_entry **src, **dst;
+
+ seen = xcalloc(pathspec->nr, 1);
+
+ src = dst = dir->entries;
+ i = dir->nr;
+ while (--i >= 0) {
+ struct dir_entry *entry = *src++;
+ if (dir_path_match(&the_index, entry, pathspec, prefix, seen))
+ *dst++ = entry;
+ }
+ dir->nr = dst - dir->entries;
+ add_pathspec_matches_against_index(pathspec, &the_index, seen,
+ PS_IGNORE_SKIP_WORKTREE);
+ return seen;
+}
+
+static int refresh(int verbose, const struct pathspec *pathspec)
+{
+ char *seen;
+ int i, ret = 0;
+ char *skip_worktree_seen = NULL;
+ struct string_list only_match_skip_worktree = STRING_LIST_INIT_NODUP;
+ int flags = REFRESH_IGNORE_SKIP_WORKTREE |
+ (verbose ? REFRESH_IN_PORCELAIN : REFRESH_QUIET);
+
+ seen = xcalloc(pathspec->nr, 1);
+ refresh_index(&the_index, flags, pathspec, seen,
+ _("Unstaged changes after refreshing the index:"));
+ for (i = 0; i < pathspec->nr; i++) {
+ if (!seen[i]) {
+ const char *path = pathspec->items[i].original;
+
+ if (matches_skip_worktree(pathspec, i, &skip_worktree_seen) ||
+ !path_in_sparse_checkout(path, &the_index)) {
+ string_list_append(&only_match_skip_worktree,
+ pathspec->items[i].original);
+ } else {
+ die(_("pathspec '%s' did not match any files"),
+ pathspec->items[i].original);
+ }
+ }
+ }
+
+ if (only_match_skip_worktree.nr) {
+ advise_on_updating_sparse_paths(&only_match_skip_worktree);
+ ret = 1;
+ }
+
+ free(seen);
+ free(skip_worktree_seen);
+ string_list_clear(&only_match_skip_worktree, 0);
+ return ret;
+}
+
+int run_add_interactive(const char *revision, const char *patch_mode,
+ const struct pathspec *pathspec)
+{
+ int i;
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ int use_builtin_add_i =
+ git_env_bool("GIT_TEST_ADD_I_USE_BUILTIN", -1);
+
+ if (use_builtin_add_i < 0 &&
+ git_config_get_bool("add.interactive.usebuiltin",
+ &use_builtin_add_i))
+ use_builtin_add_i = 1;
+
+ if (use_builtin_add_i != 0) {
+ enum add_p_mode mode;
+
+ if (!patch_mode)
+ return !!run_add_i(the_repository, pathspec);
+
+ if (!strcmp(patch_mode, "--patch"))
+ mode = ADD_P_ADD;
+ else if (!strcmp(patch_mode, "--patch=stash"))
+ mode = ADD_P_STASH;
+ else if (!strcmp(patch_mode, "--patch=reset"))
+ mode = ADD_P_RESET;
+ else if (!strcmp(patch_mode, "--patch=checkout"))
+ mode = ADD_P_CHECKOUT;
+ else if (!strcmp(patch_mode, "--patch=worktree"))
+ mode = ADD_P_WORKTREE;
+ else
+ die("'%s' not supported", patch_mode);
+
+ return !!run_add_p(the_repository, mode, revision, pathspec);
+ }
+
+ strvec_push(&cmd.args, "add--interactive");
+ if (patch_mode)
+ strvec_push(&cmd.args, patch_mode);
+ if (revision)
+ strvec_push(&cmd.args, revision);
+ strvec_push(&cmd.args, "--");
+ for (i = 0; i < pathspec->nr; i++)
+ /* pass original pathspec, to be re-parsed */
+ strvec_push(&cmd.args, pathspec->items[i].original);
+
+ cmd.git_cmd = 1;
+ return run_command(&cmd);
+}
+
+int interactive_add(const char **argv, const char *prefix, int patch)
+{
+ struct pathspec pathspec;
+
+ parse_pathspec(&pathspec, 0,
+ PATHSPEC_PREFER_FULL |
+ PATHSPEC_SYMLINK_LEADING_PATH |
+ PATHSPEC_PREFIX_ORIGIN,
+ prefix, argv);
+
+ return run_add_interactive(NULL,
+ patch ? "--patch" : NULL,
+ &pathspec);
+}
+
+static int edit_patch(int argc, const char **argv, const char *prefix)
+{
+ char *file = git_pathdup("ADD_EDIT.patch");
+ struct child_process child = CHILD_PROCESS_INIT;
+ struct rev_info rev;
+ int out;
+ struct stat st;
+
+ git_config(git_diff_basic_config, NULL); /* no "diff" UI options */
+
+ if (repo_read_index(the_repository) < 0)
+ die(_("Could not read the index"));
+
+ repo_init_revisions(the_repository, &rev, prefix);
+ rev.diffopt.context = 7;
+
+ argc = setup_revisions(argc, argv, &rev, NULL);
+ rev.diffopt.output_format = DIFF_FORMAT_PATCH;
+ rev.diffopt.use_color = 0;
+ rev.diffopt.flags.ignore_dirty_submodules = 1;
+ out = xopen(file, O_CREAT | O_WRONLY | O_TRUNC, 0666);
+ rev.diffopt.file = xfdopen(out, "w");
+ rev.diffopt.close_file = 1;
+ if (run_diff_files(&rev, 0))
+ die(_("Could not write patch"));
+
+ if (launch_editor(file, NULL, NULL))
+ die(_("editing patch failed"));
+
+ if (stat(file, &st))
+ die_errno(_("Could not stat '%s'"), file);
+ if (!st.st_size)
+ die(_("Empty patch. Aborted."));
+
+ child.git_cmd = 1;
+ strvec_pushl(&child.args, "apply", "--recount", "--cached", file,
+ NULL);
+ if (run_command(&child))
+ die(_("Could not apply '%s'"), file);
+
+ unlink(file);
+ free(file);
+ release_revisions(&rev);
+ return 0;
+}
+
+static const char ignore_error[] =
+N_("The following paths are ignored by one of your .gitignore files:\n");
+
+static int verbose, show_only, ignored_too, refresh_only;
+static int ignore_add_errors, intent_to_add, ignore_missing;
+static int warn_on_embedded_repo = 1;
+
+#define ADDREMOVE_DEFAULT 1
+static int addremove = ADDREMOVE_DEFAULT;
+static int addremove_explicit = -1; /* unspecified */
+
+static char *chmod_arg;
+
+static int ignore_removal_cb(const struct option *opt, const char *arg, int unset)
+{
+ /* if we are told to ignore, we are not adding removals */
+ *(int *)opt->value = !unset ? 0 : 1;
+ return 0;
+}
+
+static struct option builtin_add_options[] = {
+ OPT__DRY_RUN(&show_only, N_("dry run")),
+ OPT__VERBOSE(&verbose, N_("be verbose")),
+ OPT_GROUP(""),
+ OPT_BOOL('i', "interactive", &add_interactive, N_("interactive picking")),
+ OPT_BOOL('p', "patch", &patch_interactive, N_("select hunks interactively")),
+ OPT_BOOL('e', "edit", &edit_interactive, N_("edit current diff and apply")),
+ OPT__FORCE(&ignored_too, N_("allow adding otherwise ignored files"), 0),
+ OPT_BOOL('u', "update", &take_worktree_changes, N_("update tracked files")),
+ OPT_BOOL(0, "renormalize", &add_renormalize, N_("renormalize EOL of tracked files (implies -u)")),
+ OPT_BOOL('N', "intent-to-add", &intent_to_add, N_("record only the fact that the path will be added later")),
+ OPT_BOOL('A', "all", &addremove_explicit, N_("add changes from all tracked and untracked files")),
+ OPT_CALLBACK_F(0, "ignore-removal", &addremove_explicit,
+ NULL /* takes no arguments */,
+ N_("ignore paths removed in the working tree (same as --no-all)"),
+ PARSE_OPT_NOARG, ignore_removal_cb),
+ OPT_BOOL( 0 , "refresh", &refresh_only, N_("don't add, only refresh the index")),
+ OPT_BOOL( 0 , "ignore-errors", &ignore_add_errors, N_("just skip files which cannot be added because of errors")),
+ OPT_BOOL( 0 , "ignore-missing", &ignore_missing, N_("check if - even missing - files are ignored in dry run")),
+ OPT_BOOL(0, "sparse", &include_sparse, N_("allow updating entries outside of the sparse-checkout cone")),
+ OPT_STRING(0, "chmod", &chmod_arg, "(+|-)x",
+ N_("override the executable bit of the listed files")),
+ OPT_HIDDEN_BOOL(0, "warn-embedded-repo", &warn_on_embedded_repo,
+ N_("warn when adding an embedded repository")),
+ OPT_PATHSPEC_FROM_FILE(&pathspec_from_file),
+ OPT_PATHSPEC_FILE_NUL(&pathspec_file_nul),
+ OPT_END(),
+};
+
+static int add_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "add.ignoreerrors") ||
+ !strcmp(var, "add.ignore-errors")) {
+ ignore_add_errors = git_config_bool(var, value);
+ return 0;
+ }
+
+ return git_default_config(var, value, cb);
+}
+
+static const char embedded_advice[] = N_(
+"You've added another git repository inside your current repository.\n"
+"Clones of the outer repository will not contain the contents of\n"
+"the embedded repository and will not know how to obtain it.\n"
+"If you meant to add a submodule, use:\n"
+"\n"
+" git submodule add <url> %s\n"
+"\n"
+"If you added this path by mistake, you can remove it from the\n"
+"index with:\n"
+"\n"
+" git rm --cached %s\n"
+"\n"
+"See \"git help submodule\" for more information."
+);
+
+static void check_embedded_repo(const char *path)
+{
+ struct strbuf name = STRBUF_INIT;
+ static int adviced_on_embedded_repo = 0;
+
+ if (!warn_on_embedded_repo)
+ return;
+ if (!ends_with(path, "/"))
+ return;
+
+ /* Drop trailing slash for aesthetics */
+ strbuf_addstr(&name, path);
+ strbuf_strip_suffix(&name, "/");
+
+ warning(_("adding embedded git repository: %s"), name.buf);
+ if (!adviced_on_embedded_repo &&
+ advice_enabled(ADVICE_ADD_EMBEDDED_REPO)) {
+ advise(embedded_advice, name.buf, name.buf);
+ adviced_on_embedded_repo = 1;
+ }
+
+ strbuf_release(&name);
+}
+
+static int add_files(struct dir_struct *dir, int flags)
+{
+ int i, exit_status = 0;
+ struct string_list matched_sparse_paths = STRING_LIST_INIT_NODUP;
+
+ if (dir->ignored_nr) {
+ fprintf(stderr, _(ignore_error));
+ for (i = 0; i < dir->ignored_nr; i++)
+ fprintf(stderr, "%s\n", dir->ignored[i]->name);
+ if (advice_enabled(ADVICE_ADD_IGNORED_FILE))
+ advise(_("Use -f if you really want to add them.\n"
+ "Turn this message off by running\n"
+ "\"git config advice.addIgnoredFile false\""));
+ exit_status = 1;
+ }
+
+ for (i = 0; i < dir->nr; i++) {
+ if (!include_sparse &&
+ !path_in_sparse_checkout(dir->entries[i]->name, &the_index)) {
+ string_list_append(&matched_sparse_paths,
+ dir->entries[i]->name);
+ continue;
+ }
+ if (add_file_to_index(&the_index, dir->entries[i]->name, flags)) {
+ if (!ignore_add_errors)
+ die(_("adding files failed"));
+ exit_status = 1;
+ } else {
+ check_embedded_repo(dir->entries[i]->name);
+ }
+ }
+
+ if (matched_sparse_paths.nr) {
+ advise_on_updating_sparse_paths(&matched_sparse_paths);
+ exit_status = 1;
+ }
+
+ string_list_clear(&matched_sparse_paths, 0);
+
+ return exit_status;
+}
+
+int cmd_add(int argc, const char **argv, const char *prefix)
+{
+ int exit_status = 0;
+ struct pathspec pathspec;
+ struct dir_struct dir = DIR_INIT;
+ int flags;
+ int add_new_files;
+ int require_pathspec;
+ char *seen = NULL;
+ struct lock_file lock_file = LOCK_INIT;
+
+ git_config(add_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, builtin_add_options,
+ builtin_add_usage, PARSE_OPT_KEEP_ARGV0);
+ if (patch_interactive)
+ add_interactive = 1;
+ if (add_interactive) {
+ if (show_only)
+ die(_("options '%s' and '%s' cannot be used together"), "--dry-run", "--interactive/--patch");
+ if (pathspec_from_file)
+ die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--interactive/--patch");
+ exit(interactive_add(argv + 1, prefix, patch_interactive));
+ }
+
+ if (edit_interactive) {
+ if (pathspec_from_file)
+ die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--edit");
+ return(edit_patch(argc, argv, prefix));
+ }
+ argc--;
+ argv++;
+
+ if (0 <= addremove_explicit)
+ addremove = addremove_explicit;
+ else if (take_worktree_changes && ADDREMOVE_DEFAULT)
+ addremove = 0; /* "-u" was given but not "-A" */
+
+ if (addremove && take_worktree_changes)
+ die(_("options '%s' and '%s' cannot be used together"), "-A", "-u");
+
+ if (!show_only && ignore_missing)
+ die(_("the option '%s' requires '%s'"), "--ignore-missing", "--dry-run");
+
+ if (chmod_arg && ((chmod_arg[0] != '-' && chmod_arg[0] != '+') ||
+ chmod_arg[1] != 'x' || chmod_arg[2]))
+ die(_("--chmod param '%s' must be either -x or +x"), chmod_arg);
+
+ add_new_files = !take_worktree_changes && !refresh_only && !add_renormalize;
+ require_pathspec = !(take_worktree_changes || (0 < addremove_explicit));
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR);
+
+ /*
+ * Check the "pathspec '%s' did not match any files" block
+ * below before enabling new magic.
+ */
+ parse_pathspec(&pathspec, PATHSPEC_ATTR,
+ PATHSPEC_PREFER_FULL |
+ PATHSPEC_SYMLINK_LEADING_PATH,
+ prefix, argv);
+
+ if (pathspec_from_file) {
+ if (pathspec.nr)
+ die(_("'%s' and pathspec arguments cannot be used together"), "--pathspec-from-file");
+
+ parse_pathspec_file(&pathspec, PATHSPEC_ATTR,
+ PATHSPEC_PREFER_FULL |
+ PATHSPEC_SYMLINK_LEADING_PATH,
+ prefix, pathspec_from_file, pathspec_file_nul);
+ } else if (pathspec_file_nul) {
+ die(_("the option '%s' requires '%s'"), "--pathspec-file-nul", "--pathspec-from-file");
+ }
+
+ if (require_pathspec && pathspec.nr == 0) {
+ fprintf(stderr, _("Nothing specified, nothing added.\n"));
+ if (advice_enabled(ADVICE_ADD_EMPTY_PATHSPEC))
+ advise( _("Maybe you wanted to say 'git add .'?\n"
+ "Turn this message off by running\n"
+ "\"git config advice.addEmptyPathspec false\""));
+ return 0;
+ }
+
+ if (!take_worktree_changes && addremove_explicit < 0 && pathspec.nr)
+ /* Turn "git add pathspec..." to "git add -A pathspec..." */
+ addremove = 1;
+
+ flags = ((verbose ? ADD_CACHE_VERBOSE : 0) |
+ (show_only ? ADD_CACHE_PRETEND : 0) |
+ (intent_to_add ? ADD_CACHE_INTENT : 0) |
+ (ignore_add_errors ? ADD_CACHE_IGNORE_ERRORS : 0) |
+ (!(addremove || take_worktree_changes)
+ ? ADD_CACHE_IGNORE_REMOVAL : 0));
+
+ if (repo_read_index_preload(the_repository, &pathspec, 0) < 0)
+ die(_("index file corrupt"));
+
+ die_in_unpopulated_submodule(&the_index, prefix);
+ die_path_inside_submodule(&the_index, &pathspec);
+
+ if (add_new_files) {
+ int baselen;
+
+ /* Set up the default git porcelain excludes */
+ if (!ignored_too) {
+ dir.flags |= DIR_COLLECT_IGNORED;
+ setup_standard_excludes(&dir);
+ }
+
+ /* This picks up the paths that are not tracked */
+ baselen = fill_directory(&dir, &the_index, &pathspec);
+ if (pathspec.nr)
+ seen = prune_directory(&dir, &pathspec, baselen);
+ }
+
+ if (refresh_only) {
+ exit_status |= refresh(verbose, &pathspec);
+ goto finish;
+ }
+
+ if (pathspec.nr) {
+ int i;
+ char *skip_worktree_seen = NULL;
+ struct string_list only_match_skip_worktree = STRING_LIST_INIT_NODUP;
+
+ if (!seen)
+ seen = find_pathspecs_matching_against_index(&pathspec,
+ &the_index, PS_IGNORE_SKIP_WORKTREE);
+
+ /*
+ * file_exists() assumes exact match
+ */
+ GUARD_PATHSPEC(&pathspec,
+ PATHSPEC_FROMTOP |
+ PATHSPEC_LITERAL |
+ PATHSPEC_GLOB |
+ PATHSPEC_ICASE |
+ PATHSPEC_EXCLUDE);
+
+ for (i = 0; i < pathspec.nr; i++) {
+ const char *path = pathspec.items[i].match;
+
+ if (pathspec.items[i].magic & PATHSPEC_EXCLUDE)
+ continue;
+ if (seen[i])
+ continue;
+
+ if (!include_sparse &&
+ matches_skip_worktree(&pathspec, i, &skip_worktree_seen)) {
+ string_list_append(&only_match_skip_worktree,
+ pathspec.items[i].original);
+ continue;
+ }
+
+ /* Don't complain at 'git add .' on empty repo */
+ if (!path[0])
+ continue;
+
+ if ((pathspec.items[i].magic & (PATHSPEC_GLOB | PATHSPEC_ICASE)) ||
+ !file_exists(path)) {
+ if (ignore_missing) {
+ int dtype = DT_UNKNOWN;
+ if (is_excluded(&dir, &the_index, path, &dtype))
+ dir_add_ignored(&dir, &the_index,
+ path, pathspec.items[i].len);
+ } else
+ die(_("pathspec '%s' did not match any files"),
+ pathspec.items[i].original);
+ }
+ }
+
+
+ if (only_match_skip_worktree.nr) {
+ advise_on_updating_sparse_paths(&only_match_skip_worktree);
+ exit_status = 1;
+ }
+
+ free(seen);
+ free(skip_worktree_seen);
+ string_list_clear(&only_match_skip_worktree, 0);
+ }
+
+ begin_odb_transaction();
+
+ if (add_renormalize)
+ exit_status |= renormalize_tracked_files(&pathspec, flags);
+ else
+ exit_status |= add_files_to_cache(prefix, &pathspec, flags);
+
+ if (add_new_files)
+ exit_status |= add_files(&dir, flags);
+
+ if (chmod_arg && pathspec.nr)
+ exit_status |= chmod_pathspec(&pathspec, chmod_arg[0], show_only);
+ end_odb_transaction();
+
+finish:
+ if (write_locked_index(&the_index, &lock_file,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ die(_("Unable to write new index file"));
+
+ dir_clear(&dir);
+ UNLEAK(pathspec);
+ return exit_status;
+}
diff --git a/builtin/am.c b/builtin/am.c
new file mode 100644
index 0000000..30c9b3a
--- /dev/null
+++ b/builtin/am.c
@@ -0,0 +1,2546 @@
+/*
+ * Builtin "git am"
+ *
+ * Based on git-am.sh by Junio C Hamano.
+ */
+#define USE_THE_INDEX_COMPATIBILITY_MACROS
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "exec-cmd.h"
+#include "parse-options.h"
+#include "dir.h"
+#include "run-command.h"
+#include "hook.h"
+#include "quote.h"
+#include "tempfile.h"
+#include "lockfile.h"
+#include "cache-tree.h"
+#include "refs.h"
+#include "commit.h"
+#include "diff.h"
+#include "diffcore.h"
+#include "unpack-trees.h"
+#include "branch.h"
+#include "sequencer.h"
+#include "revision.h"
+#include "merge-recursive.h"
+#include "log-tree.h"
+#include "notes-utils.h"
+#include "rerere.h"
+#include "prompt.h"
+#include "mailinfo.h"
+#include "apply.h"
+#include "string-list.h"
+#include "packfile.h"
+#include "repository.h"
+#include "pretty.h"
+
+/**
+ * Returns the length of the first line of msg.
+ */
+static int linelen(const char *msg)
+{
+ return strchrnul(msg, '\n') - msg;
+}
+
+/**
+ * Returns true if `str` consists of only whitespace, false otherwise.
+ */
+static int str_isspace(const char *str)
+{
+ for (; *str; str++)
+ if (!isspace(*str))
+ return 0;
+
+ return 1;
+}
+
+enum patch_format {
+ PATCH_FORMAT_UNKNOWN = 0,
+ PATCH_FORMAT_MBOX,
+ PATCH_FORMAT_STGIT,
+ PATCH_FORMAT_STGIT_SERIES,
+ PATCH_FORMAT_HG,
+ PATCH_FORMAT_MBOXRD
+};
+
+enum keep_type {
+ KEEP_FALSE = 0,
+ KEEP_TRUE, /* pass -k flag to git-mailinfo */
+ KEEP_NON_PATCH /* pass -b flag to git-mailinfo */
+};
+
+enum scissors_type {
+ SCISSORS_UNSET = -1,
+ SCISSORS_FALSE = 0, /* pass --no-scissors to git-mailinfo */
+ SCISSORS_TRUE /* pass --scissors to git-mailinfo */
+};
+
+enum signoff_type {
+ SIGNOFF_FALSE = 0,
+ SIGNOFF_TRUE = 1,
+ SIGNOFF_EXPLICIT /* --signoff was set on the command-line */
+};
+
+enum show_patch_type {
+ SHOW_PATCH_RAW = 0,
+ SHOW_PATCH_DIFF = 1,
+};
+
+enum empty_action {
+ STOP_ON_EMPTY_COMMIT = 0, /* output errors and stop in the middle of an am session */
+ DROP_EMPTY_COMMIT, /* skip with a notice message, unless "--quiet" has been passed */
+ KEEP_EMPTY_COMMIT, /* keep recording as empty commits */
+};
+
+struct am_state {
+ /* state directory path */
+ char *dir;
+
+ /* current and last patch numbers, 1-indexed */
+ int cur;
+ int last;
+
+ /* commit metadata and message */
+ char *author_name;
+ char *author_email;
+ char *author_date;
+ char *msg;
+ size_t msg_len;
+
+ /* when --rebasing, records the original commit the patch came from */
+ struct object_id orig_commit;
+
+ /* number of digits in patch filename */
+ int prec;
+
+ /* various operating modes and command line options */
+ int interactive;
+ int threeway;
+ int quiet;
+ int signoff; /* enum signoff_type */
+ int utf8;
+ int keep; /* enum keep_type */
+ int message_id;
+ int scissors; /* enum scissors_type */
+ int quoted_cr; /* enum quoted_cr_action */
+ int empty_type; /* enum empty_action */
+ struct strvec git_apply_opts;
+ const char *resolvemsg;
+ int committer_date_is_author_date;
+ int ignore_date;
+ int allow_rerere_autoupdate;
+ const char *sign_commit;
+ int rebasing;
+};
+
+/**
+ * Initializes am_state with the default values.
+ */
+static void am_state_init(struct am_state *state)
+{
+ int gpgsign;
+
+ memset(state, 0, sizeof(*state));
+
+ state->dir = git_pathdup("rebase-apply");
+
+ state->prec = 4;
+
+ git_config_get_bool("am.threeway", &state->threeway);
+
+ state->utf8 = 1;
+
+ git_config_get_bool("am.messageid", &state->message_id);
+
+ state->scissors = SCISSORS_UNSET;
+ state->quoted_cr = quoted_cr_unset;
+
+ strvec_init(&state->git_apply_opts);
+
+ if (!git_config_get_bool("commit.gpgsign", &gpgsign))
+ state->sign_commit = gpgsign ? "" : NULL;
+}
+
+/**
+ * Releases memory allocated by an am_state.
+ */
+static void am_state_release(struct am_state *state)
+{
+ free(state->dir);
+ free(state->author_name);
+ free(state->author_email);
+ free(state->author_date);
+ free(state->msg);
+ strvec_clear(&state->git_apply_opts);
+}
+
+static int am_option_parse_quoted_cr(const struct option *opt,
+ const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+
+ if (mailinfo_parse_quoted_cr_action(arg, opt->value) != 0)
+ return error(_("bad action '%s' for '%s'"), arg, "--quoted-cr");
+ return 0;
+}
+
+static int am_option_parse_empty(const struct option *opt,
+ const char *arg, int unset)
+{
+ int *opt_value = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ if (!strcmp(arg, "stop"))
+ *opt_value = STOP_ON_EMPTY_COMMIT;
+ else if (!strcmp(arg, "drop"))
+ *opt_value = DROP_EMPTY_COMMIT;
+ else if (!strcmp(arg, "keep"))
+ *opt_value = KEEP_EMPTY_COMMIT;
+ else
+ return error(_("invalid value for '%s': '%s'"), "--empty", arg);
+
+ return 0;
+}
+
+/**
+ * Returns path relative to the am_state directory.
+ */
+static inline const char *am_path(const struct am_state *state, const char *path)
+{
+ return mkpath("%s/%s", state->dir, path);
+}
+
+/**
+ * For convenience to call write_file()
+ */
+static void write_state_text(const struct am_state *state,
+ const char *name, const char *string)
+{
+ write_file(am_path(state, name), "%s", string);
+}
+
+static void write_state_count(const struct am_state *state,
+ const char *name, int value)
+{
+ write_file(am_path(state, name), "%d", value);
+}
+
+static void write_state_bool(const struct am_state *state,
+ const char *name, int value)
+{
+ write_state_text(state, name, value ? "t" : "f");
+}
+
+/**
+ * If state->quiet is false, calls fprintf(fp, fmt, ...), and appends a newline
+ * at the end.
+ */
+__attribute__((format (printf, 3, 4)))
+static void say(const struct am_state *state, FILE *fp, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ if (!state->quiet) {
+ vfprintf(fp, fmt, ap);
+ putc('\n', fp);
+ }
+ va_end(ap);
+}
+
+/**
+ * Returns 1 if there is an am session in progress, 0 otherwise.
+ */
+static int am_in_progress(const struct am_state *state)
+{
+ struct stat st;
+
+ if (lstat(state->dir, &st) < 0 || !S_ISDIR(st.st_mode))
+ return 0;
+ if (lstat(am_path(state, "last"), &st) || !S_ISREG(st.st_mode))
+ return 0;
+ if (lstat(am_path(state, "next"), &st) || !S_ISREG(st.st_mode))
+ return 0;
+ return 1;
+}
+
+/**
+ * Reads the contents of `file` in the `state` directory into `sb`. Returns the
+ * number of bytes read on success, -1 if the file does not exist. If `trim` is
+ * set, trailing whitespace will be removed.
+ */
+static int read_state_file(struct strbuf *sb, const struct am_state *state,
+ const char *file, int trim)
+{
+ strbuf_reset(sb);
+
+ if (strbuf_read_file(sb, am_path(state, file), 0) >= 0) {
+ if (trim)
+ strbuf_trim(sb);
+
+ return sb->len;
+ }
+
+ if (errno == ENOENT)
+ return -1;
+
+ die_errno(_("could not read '%s'"), am_path(state, file));
+}
+
+/**
+ * Reads and parses the state directory's "author-script" file, and sets
+ * state->author_name, state->author_email and state->author_date accordingly.
+ * Returns 0 on success, -1 if the file could not be parsed.
+ *
+ * The author script is of the format:
+ *
+ * GIT_AUTHOR_NAME='$author_name'
+ * GIT_AUTHOR_EMAIL='$author_email'
+ * GIT_AUTHOR_DATE='$author_date'
+ *
+ * where $author_name, $author_email and $author_date are quoted. We are strict
+ * with our parsing, as the file was meant to be eval'd in the old git-am.sh
+ * script, and thus if the file differs from what this function expects, it is
+ * better to bail out than to do something that the user does not expect.
+ */
+static int read_am_author_script(struct am_state *state)
+{
+ const char *filename = am_path(state, "author-script");
+
+ assert(!state->author_name);
+ assert(!state->author_email);
+ assert(!state->author_date);
+
+ return read_author_script(filename, &state->author_name,
+ &state->author_email, &state->author_date, 1);
+}
+
+/**
+ * Saves state->author_name, state->author_email and state->author_date in the
+ * state directory's "author-script" file.
+ */
+static void write_author_script(const struct am_state *state)
+{
+ struct strbuf sb = STRBUF_INIT;
+
+ strbuf_addstr(&sb, "GIT_AUTHOR_NAME=");
+ sq_quote_buf(&sb, state->author_name);
+ strbuf_addch(&sb, '\n');
+
+ strbuf_addstr(&sb, "GIT_AUTHOR_EMAIL=");
+ sq_quote_buf(&sb, state->author_email);
+ strbuf_addch(&sb, '\n');
+
+ strbuf_addstr(&sb, "GIT_AUTHOR_DATE=");
+ sq_quote_buf(&sb, state->author_date);
+ strbuf_addch(&sb, '\n');
+
+ write_state_text(state, "author-script", sb.buf);
+
+ strbuf_release(&sb);
+}
+
+/**
+ * Reads the commit message from the state directory's "final-commit" file,
+ * setting state->msg to its contents and state->msg_len to the length of its
+ * contents in bytes.
+ *
+ * Returns 0 on success, -1 if the file does not exist.
+ */
+static int read_commit_msg(struct am_state *state)
+{
+ struct strbuf sb = STRBUF_INIT;
+
+ assert(!state->msg);
+
+ if (read_state_file(&sb, state, "final-commit", 0) < 0) {
+ strbuf_release(&sb);
+ return -1;
+ }
+
+ state->msg = strbuf_detach(&sb, &state->msg_len);
+ return 0;
+}
+
+/**
+ * Saves state->msg in the state directory's "final-commit" file.
+ */
+static void write_commit_msg(const struct am_state *state)
+{
+ const char *filename = am_path(state, "final-commit");
+ write_file_buf(filename, state->msg, state->msg_len);
+}
+
+/**
+ * Loads state from disk.
+ */
+static void am_load(struct am_state *state)
+{
+ struct strbuf sb = STRBUF_INIT;
+
+ if (read_state_file(&sb, state, "next", 1) < 0)
+ BUG("state file 'next' does not exist");
+ state->cur = strtol(sb.buf, NULL, 10);
+
+ if (read_state_file(&sb, state, "last", 1) < 0)
+ BUG("state file 'last' does not exist");
+ state->last = strtol(sb.buf, NULL, 10);
+
+ if (read_am_author_script(state) < 0)
+ die(_("could not parse author script"));
+
+ read_commit_msg(state);
+
+ if (read_state_file(&sb, state, "original-commit", 1) < 0)
+ oidclr(&state->orig_commit);
+ else if (get_oid_hex(sb.buf, &state->orig_commit) < 0)
+ die(_("could not parse %s"), am_path(state, "original-commit"));
+
+ read_state_file(&sb, state, "threeway", 1);
+ state->threeway = !strcmp(sb.buf, "t");
+
+ read_state_file(&sb, state, "quiet", 1);
+ state->quiet = !strcmp(sb.buf, "t");
+
+ read_state_file(&sb, state, "sign", 1);
+ state->signoff = !strcmp(sb.buf, "t");
+
+ read_state_file(&sb, state, "utf8", 1);
+ state->utf8 = !strcmp(sb.buf, "t");
+
+ if (file_exists(am_path(state, "rerere-autoupdate"))) {
+ read_state_file(&sb, state, "rerere-autoupdate", 1);
+ state->allow_rerere_autoupdate = strcmp(sb.buf, "t") ?
+ RERERE_NOAUTOUPDATE : RERERE_AUTOUPDATE;
+ } else {
+ state->allow_rerere_autoupdate = 0;
+ }
+
+ read_state_file(&sb, state, "keep", 1);
+ if (!strcmp(sb.buf, "t"))
+ state->keep = KEEP_TRUE;
+ else if (!strcmp(sb.buf, "b"))
+ state->keep = KEEP_NON_PATCH;
+ else
+ state->keep = KEEP_FALSE;
+
+ read_state_file(&sb, state, "messageid", 1);
+ state->message_id = !strcmp(sb.buf, "t");
+
+ read_state_file(&sb, state, "scissors", 1);
+ if (!strcmp(sb.buf, "t"))
+ state->scissors = SCISSORS_TRUE;
+ else if (!strcmp(sb.buf, "f"))
+ state->scissors = SCISSORS_FALSE;
+ else
+ state->scissors = SCISSORS_UNSET;
+
+ read_state_file(&sb, state, "quoted-cr", 1);
+ if (!*sb.buf)
+ state->quoted_cr = quoted_cr_unset;
+ else if (mailinfo_parse_quoted_cr_action(sb.buf, &state->quoted_cr) != 0)
+ die(_("could not parse %s"), am_path(state, "quoted-cr"));
+
+ read_state_file(&sb, state, "apply-opt", 1);
+ strvec_clear(&state->git_apply_opts);
+ if (sq_dequote_to_strvec(sb.buf, &state->git_apply_opts) < 0)
+ die(_("could not parse %s"), am_path(state, "apply-opt"));
+
+ state->rebasing = !!file_exists(am_path(state, "rebasing"));
+
+ strbuf_release(&sb);
+}
+
+/**
+ * Removes the am_state directory, forcefully terminating the current am
+ * session.
+ */
+static void am_destroy(const struct am_state *state)
+{
+ struct strbuf sb = STRBUF_INIT;
+
+ strbuf_addstr(&sb, state->dir);
+ remove_dir_recursively(&sb, 0);
+ strbuf_release(&sb);
+}
+
+/**
+ * Runs applypatch-msg hook. Returns its exit code.
+ */
+static int run_applypatch_msg_hook(struct am_state *state)
+{
+ int ret;
+
+ assert(state->msg);
+ ret = run_hooks_l("applypatch-msg", am_path(state, "final-commit"), NULL);
+
+ if (!ret) {
+ FREE_AND_NULL(state->msg);
+ if (read_commit_msg(state) < 0)
+ die(_("'%s' was deleted by the applypatch-msg hook"),
+ am_path(state, "final-commit"));
+ }
+
+ return ret;
+}
+
+/**
+ * Runs post-rewrite hook. Returns it exit code.
+ */
+static int run_post_rewrite_hook(const struct am_state *state)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ const char *hook = find_hook("post-rewrite");
+ int ret;
+
+ if (!hook)
+ return 0;
+
+ strvec_push(&cp.args, hook);
+ strvec_push(&cp.args, "rebase");
+
+ cp.in = xopen(am_path(state, "rewritten"), O_RDONLY);
+ cp.stdout_to_stderr = 1;
+ cp.trace2_hook_name = "post-rewrite";
+
+ ret = run_command(&cp);
+
+ close(cp.in);
+ return ret;
+}
+
+/**
+ * Reads the state directory's "rewritten" file, and copies notes from the old
+ * commits listed in the file to their rewritten commits.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int copy_notes_for_rebase(const struct am_state *state)
+{
+ struct notes_rewrite_cfg *c;
+ struct strbuf sb = STRBUF_INIT;
+ const char *invalid_line = _("Malformed input line: '%s'.");
+ const char *msg = "Notes added by 'git rebase'";
+ FILE *fp;
+ int ret = 0;
+
+ assert(state->rebasing);
+
+ c = init_copy_notes_for_rewrite("rebase");
+ if (!c)
+ return 0;
+
+ fp = xfopen(am_path(state, "rewritten"), "r");
+
+ while (!strbuf_getline_lf(&sb, fp)) {
+ struct object_id from_obj, to_obj;
+ const char *p;
+
+ if (sb.len != the_hash_algo->hexsz * 2 + 1) {
+ ret = error(invalid_line, sb.buf);
+ goto finish;
+ }
+
+ if (parse_oid_hex(sb.buf, &from_obj, &p)) {
+ ret = error(invalid_line, sb.buf);
+ goto finish;
+ }
+
+ if (*p != ' ') {
+ ret = error(invalid_line, sb.buf);
+ goto finish;
+ }
+
+ if (get_oid_hex(p + 1, &to_obj)) {
+ ret = error(invalid_line, sb.buf);
+ goto finish;
+ }
+
+ if (copy_note_for_rewrite(c, &from_obj, &to_obj))
+ ret = error(_("Failed to copy notes from '%s' to '%s'"),
+ oid_to_hex(&from_obj), oid_to_hex(&to_obj));
+ }
+
+finish:
+ finish_copy_notes_for_rewrite(the_repository, c, msg);
+ fclose(fp);
+ strbuf_release(&sb);
+ return ret;
+}
+
+/**
+ * Determines if the file looks like a piece of RFC2822 mail by grabbing all
+ * non-indented lines and checking if they look like they begin with valid
+ * header field names.
+ *
+ * Returns 1 if the file looks like a piece of mail, 0 otherwise.
+ */
+static int is_mail(FILE *fp)
+{
+ const char *header_regex = "^[!-9;-~]+:";
+ struct strbuf sb = STRBUF_INIT;
+ regex_t regex;
+ int ret = 1;
+
+ if (fseek(fp, 0L, SEEK_SET))
+ die_errno(_("fseek failed"));
+
+ if (regcomp(&regex, header_regex, REG_NOSUB | REG_EXTENDED))
+ die("invalid pattern: %s", header_regex);
+
+ while (!strbuf_getline(&sb, fp)) {
+ if (!sb.len)
+ break; /* End of header */
+
+ /* Ignore indented folded lines */
+ if (*sb.buf == '\t' || *sb.buf == ' ')
+ continue;
+
+ /* It's a header if it matches header_regex */
+ if (regexec(&regex, sb.buf, 0, NULL, 0)) {
+ ret = 0;
+ goto done;
+ }
+ }
+
+done:
+ regfree(&regex);
+ strbuf_release(&sb);
+ return ret;
+}
+
+/**
+ * Attempts to detect the patch_format of the patches contained in `paths`,
+ * returning the PATCH_FORMAT_* enum value. Returns PATCH_FORMAT_UNKNOWN if
+ * detection fails.
+ */
+static int detect_patch_format(const char **paths)
+{
+ enum patch_format ret = PATCH_FORMAT_UNKNOWN;
+ struct strbuf l1 = STRBUF_INIT;
+ struct strbuf l2 = STRBUF_INIT;
+ struct strbuf l3 = STRBUF_INIT;
+ FILE *fp;
+
+ /*
+ * We default to mbox format if input is from stdin and for directories
+ */
+ if (!*paths || !strcmp(*paths, "-") || is_directory(*paths))
+ return PATCH_FORMAT_MBOX;
+
+ /*
+ * Otherwise, check the first few lines of the first patch, starting
+ * from the first non-blank line, to try to detect its format.
+ */
+
+ fp = xfopen(*paths, "r");
+
+ while (!strbuf_getline(&l1, fp)) {
+ if (l1.len)
+ break;
+ }
+
+ if (starts_with(l1.buf, "From ") || starts_with(l1.buf, "From: ")) {
+ ret = PATCH_FORMAT_MBOX;
+ goto done;
+ }
+
+ if (starts_with(l1.buf, "# This series applies on GIT commit")) {
+ ret = PATCH_FORMAT_STGIT_SERIES;
+ goto done;
+ }
+
+ if (!strcmp(l1.buf, "# HG changeset patch")) {
+ ret = PATCH_FORMAT_HG;
+ goto done;
+ }
+
+ strbuf_getline(&l2, fp);
+ strbuf_getline(&l3, fp);
+
+ /*
+ * If the second line is empty and the third is a From, Author or Date
+ * entry, this is likely an StGit patch.
+ */
+ if (l1.len && !l2.len &&
+ (starts_with(l3.buf, "From:") ||
+ starts_with(l3.buf, "Author:") ||
+ starts_with(l3.buf, "Date:"))) {
+ ret = PATCH_FORMAT_STGIT;
+ goto done;
+ }
+
+ if (l1.len && is_mail(fp)) {
+ ret = PATCH_FORMAT_MBOX;
+ goto done;
+ }
+
+done:
+ fclose(fp);
+ strbuf_release(&l1);
+ strbuf_release(&l2);
+ strbuf_release(&l3);
+ return ret;
+}
+
+/**
+ * Splits out individual email patches from `paths`, where each path is either
+ * a mbox file or a Maildir. Returns 0 on success, -1 on failure.
+ */
+static int split_mail_mbox(struct am_state *state, const char **paths,
+ int keep_cr, int mboxrd)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct strbuf last = STRBUF_INIT;
+ int ret;
+
+ cp.git_cmd = 1;
+ strvec_push(&cp.args, "mailsplit");
+ strvec_pushf(&cp.args, "-d%d", state->prec);
+ strvec_pushf(&cp.args, "-o%s", state->dir);
+ strvec_push(&cp.args, "-b");
+ if (keep_cr)
+ strvec_push(&cp.args, "--keep-cr");
+ if (mboxrd)
+ strvec_push(&cp.args, "--mboxrd");
+ strvec_push(&cp.args, "--");
+ strvec_pushv(&cp.args, paths);
+
+ ret = capture_command(&cp, &last, 8);
+ if (ret)
+ goto exit;
+
+ state->cur = 1;
+ state->last = strtol(last.buf, NULL, 10);
+
+exit:
+ strbuf_release(&last);
+ return ret ? -1 : 0;
+}
+
+/**
+ * Callback signature for split_mail_conv(). The foreign patch should be
+ * read from `in`, and the converted patch (in RFC2822 mail format) should be
+ * written to `out`. Return 0 on success, or -1 on failure.
+ */
+typedef int (*mail_conv_fn)(FILE *out, FILE *in, int keep_cr);
+
+/**
+ * Calls `fn` for each file in `paths` to convert the foreign patch to the
+ * RFC2822 mail format suitable for parsing with git-mailinfo.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int split_mail_conv(mail_conv_fn fn, struct am_state *state,
+ const char **paths, int keep_cr)
+{
+ static const char *stdin_only[] = {"-", NULL};
+ int i;
+
+ if (!*paths)
+ paths = stdin_only;
+
+ for (i = 0; *paths; paths++, i++) {
+ FILE *in, *out;
+ const char *mail;
+ int ret;
+
+ if (!strcmp(*paths, "-"))
+ in = stdin;
+ else
+ in = fopen(*paths, "r");
+
+ if (!in)
+ return error_errno(_("could not open '%s' for reading"),
+ *paths);
+
+ mail = mkpath("%s/%0*d", state->dir, state->prec, i + 1);
+
+ out = fopen(mail, "w");
+ if (!out) {
+ if (in != stdin)
+ fclose(in);
+ return error_errno(_("could not open '%s' for writing"),
+ mail);
+ }
+
+ ret = fn(out, in, keep_cr);
+
+ fclose(out);
+ if (in != stdin)
+ fclose(in);
+
+ if (ret)
+ return error(_("could not parse patch '%s'"), *paths);
+ }
+
+ state->cur = 1;
+ state->last = i;
+ return 0;
+}
+
+/**
+ * A split_mail_conv() callback that converts an StGit patch to an RFC2822
+ * message suitable for parsing with git-mailinfo.
+ */
+static int stgit_patch_to_mail(FILE *out, FILE *in, int keep_cr)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int subject_printed = 0;
+
+ while (!strbuf_getline_lf(&sb, in)) {
+ const char *str;
+
+ if (str_isspace(sb.buf))
+ continue;
+ else if (skip_prefix(sb.buf, "Author:", &str))
+ fprintf(out, "From:%s\n", str);
+ else if (starts_with(sb.buf, "From") || starts_with(sb.buf, "Date"))
+ fprintf(out, "%s\n", sb.buf);
+ else if (!subject_printed) {
+ fprintf(out, "Subject: %s\n", sb.buf);
+ subject_printed = 1;
+ } else {
+ fprintf(out, "\n%s\n", sb.buf);
+ break;
+ }
+ }
+
+ strbuf_reset(&sb);
+ while (strbuf_fread(&sb, 8192, in) > 0) {
+ fwrite(sb.buf, 1, sb.len, out);
+ strbuf_reset(&sb);
+ }
+
+ strbuf_release(&sb);
+ return 0;
+}
+
+/**
+ * This function only supports a single StGit series file in `paths`.
+ *
+ * Given an StGit series file, converts the StGit patches in the series into
+ * RFC2822 messages suitable for parsing with git-mailinfo, and queues them in
+ * the state directory.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int split_mail_stgit_series(struct am_state *state, const char **paths,
+ int keep_cr)
+{
+ const char *series_dir;
+ char *series_dir_buf;
+ FILE *fp;
+ struct strvec patches = STRVEC_INIT;
+ struct strbuf sb = STRBUF_INIT;
+ int ret;
+
+ if (!paths[0] || paths[1])
+ return error(_("Only one StGIT patch series can be applied at once"));
+
+ series_dir_buf = xstrdup(*paths);
+ series_dir = dirname(series_dir_buf);
+
+ fp = fopen(*paths, "r");
+ if (!fp)
+ return error_errno(_("could not open '%s' for reading"), *paths);
+
+ while (!strbuf_getline_lf(&sb, fp)) {
+ if (*sb.buf == '#')
+ continue; /* skip comment lines */
+
+ strvec_push(&patches, mkpath("%s/%s", series_dir, sb.buf));
+ }
+
+ fclose(fp);
+ strbuf_release(&sb);
+ free(series_dir_buf);
+
+ ret = split_mail_conv(stgit_patch_to_mail, state, patches.v, keep_cr);
+
+ strvec_clear(&patches);
+ return ret;
+}
+
+/**
+ * A split_patches_conv() callback that converts a mercurial patch to a RFC2822
+ * message suitable for parsing with git-mailinfo.
+ */
+static int hg_patch_to_mail(FILE *out, FILE *in, int keep_cr)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int rc = 0;
+
+ while (!strbuf_getline_lf(&sb, in)) {
+ const char *str;
+
+ if (skip_prefix(sb.buf, "# User ", &str))
+ fprintf(out, "From: %s\n", str);
+ else if (skip_prefix(sb.buf, "# Date ", &str)) {
+ timestamp_t timestamp;
+ long tz, tz2;
+ char *end;
+
+ errno = 0;
+ timestamp = parse_timestamp(str, &end, 10);
+ if (errno) {
+ rc = error(_("invalid timestamp"));
+ goto exit;
+ }
+
+ if (!skip_prefix(end, " ", &str)) {
+ rc = error(_("invalid Date line"));
+ goto exit;
+ }
+
+ errno = 0;
+ tz = strtol(str, &end, 10);
+ if (errno) {
+ rc = error(_("invalid timezone offset"));
+ goto exit;
+ }
+
+ if (*end) {
+ rc = error(_("invalid Date line"));
+ goto exit;
+ }
+
+ /*
+ * mercurial's timezone is in seconds west of UTC,
+ * however git's timezone is in hours + minutes east of
+ * UTC. Convert it.
+ */
+ tz2 = labs(tz) / 3600 * 100 + labs(tz) % 3600 / 60;
+ if (tz > 0)
+ tz2 = -tz2;
+
+ fprintf(out, "Date: %s\n", show_date(timestamp, tz2, DATE_MODE(RFC2822)));
+ } else if (starts_with(sb.buf, "# ")) {
+ continue;
+ } else {
+ fprintf(out, "\n%s\n", sb.buf);
+ break;
+ }
+ }
+
+ strbuf_reset(&sb);
+ while (strbuf_fread(&sb, 8192, in) > 0) {
+ fwrite(sb.buf, 1, sb.len, out);
+ strbuf_reset(&sb);
+ }
+exit:
+ strbuf_release(&sb);
+ return rc;
+}
+
+/**
+ * Splits a list of files/directories into individual email patches. Each path
+ * in `paths` must be a file/directory that is formatted according to
+ * `patch_format`.
+ *
+ * Once split out, the individual email patches will be stored in the state
+ * directory, with each patch's filename being its index, padded to state->prec
+ * digits.
+ *
+ * state->cur will be set to the index of the first mail, and state->last will
+ * be set to the index of the last mail.
+ *
+ * Set keep_cr to 0 to convert all lines ending with \r\n to end with \n, 1
+ * to disable this behavior, -1 to use the default configured setting.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int split_mail(struct am_state *state, enum patch_format patch_format,
+ const char **paths, int keep_cr)
+{
+ if (keep_cr < 0) {
+ keep_cr = 0;
+ git_config_get_bool("am.keepcr", &keep_cr);
+ }
+
+ switch (patch_format) {
+ case PATCH_FORMAT_MBOX:
+ return split_mail_mbox(state, paths, keep_cr, 0);
+ case PATCH_FORMAT_STGIT:
+ return split_mail_conv(stgit_patch_to_mail, state, paths, keep_cr);
+ case PATCH_FORMAT_STGIT_SERIES:
+ return split_mail_stgit_series(state, paths, keep_cr);
+ case PATCH_FORMAT_HG:
+ return split_mail_conv(hg_patch_to_mail, state, paths, keep_cr);
+ case PATCH_FORMAT_MBOXRD:
+ return split_mail_mbox(state, paths, keep_cr, 1);
+ default:
+ BUG("invalid patch_format");
+ }
+ return -1;
+}
+
+/**
+ * Setup a new am session for applying patches
+ */
+static void am_setup(struct am_state *state, enum patch_format patch_format,
+ const char **paths, int keep_cr)
+{
+ struct object_id curr_head;
+ const char *str;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (!patch_format)
+ patch_format = detect_patch_format(paths);
+
+ if (!patch_format) {
+ fprintf_ln(stderr, _("Patch format detection failed."));
+ exit(128);
+ }
+
+ if (mkdir(state->dir, 0777) < 0 && errno != EEXIST)
+ die_errno(_("failed to create directory '%s'"), state->dir);
+ delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
+
+ if (split_mail(state, patch_format, paths, keep_cr) < 0) {
+ am_destroy(state);
+ die(_("Failed to split patches."));
+ }
+
+ if (state->rebasing)
+ state->threeway = 1;
+
+ write_state_bool(state, "threeway", state->threeway);
+ write_state_bool(state, "quiet", state->quiet);
+ write_state_bool(state, "sign", state->signoff);
+ write_state_bool(state, "utf8", state->utf8);
+
+ if (state->allow_rerere_autoupdate)
+ write_state_bool(state, "rerere-autoupdate",
+ state->allow_rerere_autoupdate == RERERE_AUTOUPDATE);
+
+ switch (state->keep) {
+ case KEEP_FALSE:
+ str = "f";
+ break;
+ case KEEP_TRUE:
+ str = "t";
+ break;
+ case KEEP_NON_PATCH:
+ str = "b";
+ break;
+ default:
+ BUG("invalid value for state->keep");
+ }
+
+ write_state_text(state, "keep", str);
+ write_state_bool(state, "messageid", state->message_id);
+
+ switch (state->scissors) {
+ case SCISSORS_UNSET:
+ str = "";
+ break;
+ case SCISSORS_FALSE:
+ str = "f";
+ break;
+ case SCISSORS_TRUE:
+ str = "t";
+ break;
+ default:
+ BUG("invalid value for state->scissors");
+ }
+ write_state_text(state, "scissors", str);
+
+ switch (state->quoted_cr) {
+ case quoted_cr_unset:
+ str = "";
+ break;
+ case quoted_cr_nowarn:
+ str = "nowarn";
+ break;
+ case quoted_cr_warn:
+ str = "warn";
+ break;
+ case quoted_cr_strip:
+ str = "strip";
+ break;
+ default:
+ BUG("invalid value for state->quoted_cr");
+ }
+ write_state_text(state, "quoted-cr", str);
+
+ sq_quote_argv(&sb, state->git_apply_opts.v);
+ write_state_text(state, "apply-opt", sb.buf);
+
+ if (state->rebasing)
+ write_state_text(state, "rebasing", "");
+ else
+ write_state_text(state, "applying", "");
+
+ if (!get_oid("HEAD", &curr_head)) {
+ write_state_text(state, "abort-safety", oid_to_hex(&curr_head));
+ if (!state->rebasing)
+ update_ref("am", "ORIG_HEAD", &curr_head, NULL, 0,
+ UPDATE_REFS_DIE_ON_ERR);
+ } else {
+ write_state_text(state, "abort-safety", "");
+ if (!state->rebasing)
+ delete_ref(NULL, "ORIG_HEAD", NULL, 0);
+ }
+
+ /*
+ * NOTE: Since the "next" and "last" files determine if an am_state
+ * session is in progress, they should be written last.
+ */
+
+ write_state_count(state, "next", state->cur);
+ write_state_count(state, "last", state->last);
+
+ strbuf_release(&sb);
+}
+
+/**
+ * Increments the patch pointer, and cleans am_state for the application of the
+ * next patch.
+ */
+static void am_next(struct am_state *state)
+{
+ struct object_id head;
+
+ FREE_AND_NULL(state->author_name);
+ FREE_AND_NULL(state->author_email);
+ FREE_AND_NULL(state->author_date);
+ FREE_AND_NULL(state->msg);
+ state->msg_len = 0;
+
+ unlink(am_path(state, "author-script"));
+ unlink(am_path(state, "final-commit"));
+
+ oidclr(&state->orig_commit);
+ unlink(am_path(state, "original-commit"));
+ delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
+
+ if (!get_oid("HEAD", &head))
+ write_state_text(state, "abort-safety", oid_to_hex(&head));
+ else
+ write_state_text(state, "abort-safety", "");
+
+ state->cur++;
+ write_state_count(state, "next", state->cur);
+}
+
+/**
+ * Returns the filename of the current patch email.
+ */
+static const char *msgnum(const struct am_state *state)
+{
+ static struct strbuf sb = STRBUF_INIT;
+
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "%0*d", state->prec, state->cur);
+
+ return sb.buf;
+}
+
+/**
+ * Dies with a user-friendly message on how to proceed after resolving the
+ * problem. This message can be overridden with state->resolvemsg.
+ */
+static void NORETURN die_user_resolve(const struct am_state *state)
+{
+ if (state->resolvemsg) {
+ printf_ln("%s", state->resolvemsg);
+ } else {
+ const char *cmdline = state->interactive ? "git am -i" : "git am";
+
+ printf_ln(_("When you have resolved this problem, run \"%s --continue\"."), cmdline);
+ printf_ln(_("If you prefer to skip this patch, run \"%s --skip\" instead."), cmdline);
+
+ if (advice_enabled(ADVICE_AM_WORK_DIR) &&
+ is_empty_or_missing_file(am_path(state, "patch")) &&
+ !repo_index_has_changes(the_repository, NULL, NULL))
+ printf_ln(_("To record the empty patch as an empty commit, run \"%s --allow-empty\"."), cmdline);
+
+ printf_ln(_("To restore the original branch and stop patching, run \"%s --abort\"."), cmdline);
+ }
+
+ exit(128);
+}
+
+/**
+ * Appends signoff to the "msg" field of the am_state.
+ */
+static void am_append_signoff(struct am_state *state)
+{
+ struct strbuf sb = STRBUF_INIT;
+
+ strbuf_attach(&sb, state->msg, state->msg_len, state->msg_len);
+ append_signoff(&sb, 0, 0);
+ state->msg = strbuf_detach(&sb, &state->msg_len);
+}
+
+/**
+ * Parses `mail` using git-mailinfo, extracting its patch and authorship info.
+ * state->msg will be set to the patch message. state->author_name,
+ * state->author_email and state->author_date will be set to the patch author's
+ * name, email and date respectively. The patch body will be written to the
+ * state directory's "patch" file.
+ *
+ * Returns 1 if the patch should be skipped, 0 otherwise.
+ */
+static int parse_mail(struct am_state *state, const char *mail)
+{
+ FILE *fp;
+ struct strbuf sb = STRBUF_INIT;
+ struct strbuf msg = STRBUF_INIT;
+ struct strbuf author_name = STRBUF_INIT;
+ struct strbuf author_date = STRBUF_INIT;
+ struct strbuf author_email = STRBUF_INIT;
+ int ret = 0;
+ struct mailinfo mi;
+
+ setup_mailinfo(&mi);
+
+ if (state->utf8)
+ mi.metainfo_charset = get_commit_output_encoding();
+ else
+ mi.metainfo_charset = NULL;
+
+ switch (state->keep) {
+ case KEEP_FALSE:
+ break;
+ case KEEP_TRUE:
+ mi.keep_subject = 1;
+ break;
+ case KEEP_NON_PATCH:
+ mi.keep_non_patch_brackets_in_subject = 1;
+ break;
+ default:
+ BUG("invalid value for state->keep");
+ }
+
+ if (state->message_id)
+ mi.add_message_id = 1;
+
+ switch (state->scissors) {
+ case SCISSORS_UNSET:
+ break;
+ case SCISSORS_FALSE:
+ mi.use_scissors = 0;
+ break;
+ case SCISSORS_TRUE:
+ mi.use_scissors = 1;
+ break;
+ default:
+ BUG("invalid value for state->scissors");
+ }
+
+ switch (state->quoted_cr) {
+ case quoted_cr_unset:
+ break;
+ case quoted_cr_nowarn:
+ case quoted_cr_warn:
+ case quoted_cr_strip:
+ mi.quoted_cr = state->quoted_cr;
+ break;
+ default:
+ BUG("invalid value for state->quoted_cr");
+ }
+
+ mi.input = xfopen(mail, "r");
+ mi.output = xfopen(am_path(state, "info"), "w");
+ if (mailinfo(&mi, am_path(state, "msg"), am_path(state, "patch")))
+ die("could not parse patch");
+
+ fclose(mi.input);
+ fclose(mi.output);
+
+ if (mi.format_flowed)
+ warning(_("Patch sent with format=flowed; "
+ "space at the end of lines might be lost."));
+
+ /* Extract message and author information */
+ fp = xfopen(am_path(state, "info"), "r");
+ while (!strbuf_getline_lf(&sb, fp)) {
+ const char *x;
+
+ if (skip_prefix(sb.buf, "Subject: ", &x)) {
+ if (msg.len)
+ strbuf_addch(&msg, '\n');
+ strbuf_addstr(&msg, x);
+ } else if (skip_prefix(sb.buf, "Author: ", &x))
+ strbuf_addstr(&author_name, x);
+ else if (skip_prefix(sb.buf, "Email: ", &x))
+ strbuf_addstr(&author_email, x);
+ else if (skip_prefix(sb.buf, "Date: ", &x))
+ strbuf_addstr(&author_date, x);
+ }
+ fclose(fp);
+
+ /* Skip pine's internal folder data */
+ if (!strcmp(author_name.buf, "Mail System Internal Data")) {
+ ret = 1;
+ goto finish;
+ }
+
+ strbuf_addstr(&msg, "\n\n");
+ strbuf_addbuf(&msg, &mi.log_message);
+ strbuf_stripspace(&msg, 0);
+
+ assert(!state->author_name);
+ state->author_name = strbuf_detach(&author_name, NULL);
+
+ assert(!state->author_email);
+ state->author_email = strbuf_detach(&author_email, NULL);
+
+ assert(!state->author_date);
+ state->author_date = strbuf_detach(&author_date, NULL);
+
+ assert(!state->msg);
+ state->msg = strbuf_detach(&msg, &state->msg_len);
+
+finish:
+ strbuf_release(&msg);
+ strbuf_release(&author_date);
+ strbuf_release(&author_email);
+ strbuf_release(&author_name);
+ strbuf_release(&sb);
+ clear_mailinfo(&mi);
+ return ret;
+}
+
+/**
+ * Sets commit_id to the commit hash where the mail was generated from.
+ * Returns 0 on success, -1 on failure.
+ */
+static int get_mail_commit_oid(struct object_id *commit_id, const char *mail)
+{
+ struct strbuf sb = STRBUF_INIT;
+ FILE *fp = xfopen(mail, "r");
+ const char *x;
+ int ret = 0;
+
+ if (strbuf_getline_lf(&sb, fp) ||
+ !skip_prefix(sb.buf, "From ", &x) ||
+ get_oid_hex(x, commit_id) < 0)
+ ret = -1;
+
+ strbuf_release(&sb);
+ fclose(fp);
+ return ret;
+}
+
+/**
+ * Sets state->msg, state->author_name, state->author_email, state->author_date
+ * to the commit's respective info.
+ */
+static void get_commit_info(struct am_state *state, struct commit *commit)
+{
+ const char *buffer, *ident_line, *msg;
+ size_t ident_len;
+ struct ident_split id;
+
+ buffer = logmsg_reencode(commit, NULL, get_commit_output_encoding());
+
+ ident_line = find_commit_header(buffer, "author", &ident_len);
+ if (!ident_line)
+ die(_("missing author line in commit %s"),
+ oid_to_hex(&commit->object.oid));
+ if (split_ident_line(&id, ident_line, ident_len) < 0)
+ die(_("invalid ident line: %.*s"), (int)ident_len, ident_line);
+
+ assert(!state->author_name);
+ if (id.name_begin)
+ state->author_name =
+ xmemdupz(id.name_begin, id.name_end - id.name_begin);
+ else
+ state->author_name = xstrdup("");
+
+ assert(!state->author_email);
+ if (id.mail_begin)
+ state->author_email =
+ xmemdupz(id.mail_begin, id.mail_end - id.mail_begin);
+ else
+ state->author_email = xstrdup("");
+
+ assert(!state->author_date);
+ state->author_date = xstrdup(show_ident_date(&id, DATE_MODE(NORMAL)));
+
+ assert(!state->msg);
+ msg = strstr(buffer, "\n\n");
+ if (!msg)
+ die(_("unable to parse commit %s"), oid_to_hex(&commit->object.oid));
+ state->msg = xstrdup(msg + 2);
+ state->msg_len = strlen(state->msg);
+ unuse_commit_buffer(commit, buffer);
+}
+
+/**
+ * Writes `commit` as a patch to the state directory's "patch" file.
+ */
+static void write_commit_patch(const struct am_state *state, struct commit *commit)
+{
+ struct rev_info rev_info;
+ FILE *fp;
+
+ fp = xfopen(am_path(state, "patch"), "w");
+ repo_init_revisions(the_repository, &rev_info, NULL);
+ rev_info.diff = 1;
+ rev_info.abbrev = 0;
+ rev_info.disable_stdin = 1;
+ rev_info.show_root_diff = 1;
+ rev_info.diffopt.output_format = DIFF_FORMAT_PATCH;
+ rev_info.no_commit_id = 1;
+ rev_info.diffopt.flags.binary = 1;
+ rev_info.diffopt.flags.full_index = 1;
+ rev_info.diffopt.use_color = 0;
+ rev_info.diffopt.file = fp;
+ rev_info.diffopt.close_file = 1;
+ add_pending_object(&rev_info, &commit->object, "");
+ diff_setup_done(&rev_info.diffopt);
+ log_tree_commit(&rev_info, commit);
+ release_revisions(&rev_info);
+}
+
+/**
+ * Writes the diff of the index against HEAD as a patch to the state
+ * directory's "patch" file.
+ */
+static void write_index_patch(const struct am_state *state)
+{
+ struct tree *tree;
+ struct object_id head;
+ struct rev_info rev_info;
+ FILE *fp;
+
+ if (!get_oid("HEAD", &head)) {
+ struct commit *commit = lookup_commit_or_die(&head, "HEAD");
+ tree = get_commit_tree(commit);
+ } else
+ tree = lookup_tree(the_repository,
+ the_repository->hash_algo->empty_tree);
+
+ fp = xfopen(am_path(state, "patch"), "w");
+ repo_init_revisions(the_repository, &rev_info, NULL);
+ rev_info.diff = 1;
+ rev_info.disable_stdin = 1;
+ rev_info.no_commit_id = 1;
+ rev_info.diffopt.output_format = DIFF_FORMAT_PATCH;
+ rev_info.diffopt.use_color = 0;
+ rev_info.diffopt.file = fp;
+ rev_info.diffopt.close_file = 1;
+ add_pending_object(&rev_info, &tree->object, "");
+ diff_setup_done(&rev_info.diffopt);
+ run_diff_index(&rev_info, 1);
+ release_revisions(&rev_info);
+}
+
+/**
+ * Like parse_mail(), but parses the mail by looking up its commit ID
+ * directly. This is used in --rebasing mode to bypass git-mailinfo's munging
+ * of patches.
+ *
+ * state->orig_commit will be set to the original commit ID.
+ *
+ * Will always return 0 as the patch should never be skipped.
+ */
+static int parse_mail_rebase(struct am_state *state, const char *mail)
+{
+ struct commit *commit;
+ struct object_id commit_oid;
+
+ if (get_mail_commit_oid(&commit_oid, mail) < 0)
+ die(_("could not parse %s"), mail);
+
+ commit = lookup_commit_or_die(&commit_oid, mail);
+
+ get_commit_info(state, commit);
+
+ write_commit_patch(state, commit);
+
+ oidcpy(&state->orig_commit, &commit_oid);
+ write_state_text(state, "original-commit", oid_to_hex(&commit_oid));
+ update_ref("am", "REBASE_HEAD", &commit_oid,
+ NULL, REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR);
+
+ return 0;
+}
+
+/**
+ * Applies current patch with git-apply. Returns 0 on success, -1 otherwise. If
+ * `index_file` is not NULL, the patch will be applied to that index.
+ */
+static int run_apply(const struct am_state *state, const char *index_file)
+{
+ struct strvec apply_paths = STRVEC_INIT;
+ struct strvec apply_opts = STRVEC_INIT;
+ struct apply_state apply_state;
+ int res, opts_left;
+ int force_apply = 0;
+ int options = 0;
+
+ if (init_apply_state(&apply_state, the_repository, NULL))
+ BUG("init_apply_state() failed");
+
+ strvec_push(&apply_opts, "apply");
+ strvec_pushv(&apply_opts, state->git_apply_opts.v);
+
+ opts_left = apply_parse_options(apply_opts.nr, apply_opts.v,
+ &apply_state, &force_apply, &options,
+ NULL);
+
+ if (opts_left != 0)
+ die("unknown option passed through to git apply");
+
+ if (index_file) {
+ apply_state.index_file = index_file;
+ apply_state.cached = 1;
+ } else
+ apply_state.check_index = 1;
+
+ /*
+ * If we are allowed to fall back on 3-way merge, don't give false
+ * errors during the initial attempt.
+ */
+ if (state->threeway && !index_file)
+ apply_state.apply_verbosity = verbosity_silent;
+
+ if (check_apply_state(&apply_state, force_apply))
+ BUG("check_apply_state() failed");
+
+ strvec_push(&apply_paths, am_path(state, "patch"));
+
+ res = apply_all_patches(&apply_state, apply_paths.nr, apply_paths.v, options);
+
+ strvec_clear(&apply_paths);
+ strvec_clear(&apply_opts);
+ clear_apply_state(&apply_state);
+
+ if (res)
+ return res;
+
+ if (index_file) {
+ /* Reload index as apply_all_patches() will have modified it. */
+ discard_index(&the_index);
+ read_index_from(&the_index, index_file, get_git_dir());
+ }
+
+ return 0;
+}
+
+/**
+ * Builds an index that contains just the blobs needed for a 3way merge.
+ */
+static int build_fake_ancestor(const struct am_state *state, const char *index_file)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ strvec_push(&cp.args, "apply");
+ strvec_pushv(&cp.args, state->git_apply_opts.v);
+ strvec_pushf(&cp.args, "--build-fake-ancestor=%s", index_file);
+ strvec_push(&cp.args, am_path(state, "patch"));
+
+ if (run_command(&cp))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Attempt a threeway merge, using index_path as the temporary index.
+ */
+static int fall_back_threeway(const struct am_state *state, const char *index_path)
+{
+ struct object_id orig_tree, their_tree, our_tree;
+ const struct object_id *bases[1] = { &orig_tree };
+ struct merge_options o;
+ struct commit *result;
+ char *their_tree_name;
+
+ if (get_oid("HEAD", &our_tree) < 0)
+ oidcpy(&our_tree, the_hash_algo->empty_tree);
+
+ if (build_fake_ancestor(state, index_path))
+ return error("could not build fake ancestor");
+
+ discard_index(&the_index);
+ read_index_from(&the_index, index_path, get_git_dir());
+
+ if (write_index_as_tree(&orig_tree, &the_index, index_path, 0, NULL))
+ return error(_("Repository lacks necessary blobs to fall back on 3-way merge."));
+
+ say(state, stdout, _("Using index info to reconstruct a base tree..."));
+
+ if (!state->quiet) {
+ /*
+ * List paths that needed 3-way fallback, so that the user can
+ * review them with extra care to spot mismerges.
+ */
+ struct rev_info rev_info;
+
+ repo_init_revisions(the_repository, &rev_info, NULL);
+ rev_info.diffopt.output_format = DIFF_FORMAT_NAME_STATUS;
+ rev_info.diffopt.filter |= diff_filter_bit('A');
+ rev_info.diffopt.filter |= diff_filter_bit('M');
+ add_pending_oid(&rev_info, "HEAD", &our_tree, 0);
+ diff_setup_done(&rev_info.diffopt);
+ run_diff_index(&rev_info, 1);
+ release_revisions(&rev_info);
+ }
+
+ if (run_apply(state, index_path))
+ return error(_("Did you hand edit your patch?\n"
+ "It does not apply to blobs recorded in its index."));
+
+ if (write_index_as_tree(&their_tree, &the_index, index_path, 0, NULL))
+ return error("could not write tree");
+
+ say(state, stdout, _("Falling back to patching base and 3-way merge..."));
+
+ discard_index(&the_index);
+ repo_read_index(the_repository);
+
+ /*
+ * This is not so wrong. Depending on which base we picked, orig_tree
+ * may be wildly different from ours, but their_tree has the same set of
+ * wildly different changes in parts the patch did not touch, so
+ * recursive ends up canceling them, saying that we reverted all those
+ * changes.
+ */
+
+ init_merge_options(&o, the_repository);
+
+ o.branch1 = "HEAD";
+ their_tree_name = xstrfmt("%.*s", linelen(state->msg), state->msg);
+ o.branch2 = their_tree_name;
+ o.detect_directory_renames = MERGE_DIRECTORY_RENAMES_NONE;
+
+ if (state->quiet)
+ o.verbosity = 0;
+
+ if (merge_recursive_generic(&o, &our_tree, &their_tree, 1, bases, &result)) {
+ repo_rerere(the_repository, state->allow_rerere_autoupdate);
+ free(their_tree_name);
+ return error(_("Failed to merge in the changes."));
+ }
+
+ free(their_tree_name);
+ return 0;
+}
+
+/**
+ * Commits the current index with state->msg as the commit message and
+ * state->author_name, state->author_email and state->author_date as the author
+ * information.
+ */
+static void do_commit(const struct am_state *state)
+{
+ struct object_id tree, parent, commit;
+ const struct object_id *old_oid;
+ struct commit_list *parents = NULL;
+ const char *reflog_msg, *author, *committer = NULL;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (run_hooks("pre-applypatch"))
+ exit(1);
+
+ if (write_cache_as_tree(&tree, 0, NULL))
+ die(_("git write-tree failed to write a tree"));
+
+ if (!get_oid_commit("HEAD", &parent)) {
+ old_oid = &parent;
+ commit_list_insert(lookup_commit(the_repository, &parent),
+ &parents);
+ } else {
+ old_oid = NULL;
+ say(state, stderr, _("applying to an empty history"));
+ }
+
+ author = fmt_ident(state->author_name, state->author_email,
+ WANT_AUTHOR_IDENT,
+ state->ignore_date ? NULL : state->author_date,
+ IDENT_STRICT);
+
+ if (state->committer_date_is_author_date)
+ committer = fmt_ident(getenv("GIT_COMMITTER_NAME"),
+ getenv("GIT_COMMITTER_EMAIL"),
+ WANT_COMMITTER_IDENT,
+ state->ignore_date ? NULL
+ : state->author_date,
+ IDENT_STRICT);
+
+ if (commit_tree_extended(state->msg, state->msg_len, &tree, parents,
+ &commit, author, committer, state->sign_commit,
+ NULL))
+ die(_("failed to write commit object"));
+
+ reflog_msg = getenv("GIT_REFLOG_ACTION");
+ if (!reflog_msg)
+ reflog_msg = "am";
+
+ strbuf_addf(&sb, "%s: %.*s", reflog_msg, linelen(state->msg),
+ state->msg);
+
+ update_ref(sb.buf, "HEAD", &commit, old_oid, 0,
+ UPDATE_REFS_DIE_ON_ERR);
+
+ if (state->rebasing) {
+ FILE *fp = xfopen(am_path(state, "rewritten"), "a");
+
+ assert(!is_null_oid(&state->orig_commit));
+ fprintf(fp, "%s ", oid_to_hex(&state->orig_commit));
+ fprintf(fp, "%s\n", oid_to_hex(&commit));
+ fclose(fp);
+ }
+
+ run_hooks("post-applypatch");
+
+ strbuf_release(&sb);
+}
+
+/**
+ * Validates the am_state for resuming -- the "msg" and authorship fields must
+ * be filled up.
+ */
+static void validate_resume_state(const struct am_state *state)
+{
+ if (!state->msg)
+ die(_("cannot resume: %s does not exist."),
+ am_path(state, "final-commit"));
+
+ if (!state->author_name || !state->author_email || !state->author_date)
+ die(_("cannot resume: %s does not exist."),
+ am_path(state, "author-script"));
+}
+
+/**
+ * Interactively prompt the user on whether the current patch should be
+ * applied.
+ *
+ * Returns 0 if the user chooses to apply the patch, 1 if the user chooses to
+ * skip it.
+ */
+static int do_interactive(struct am_state *state)
+{
+ assert(state->msg);
+
+ for (;;) {
+ char reply[64];
+
+ puts(_("Commit Body is:"));
+ puts("--------------------------");
+ printf("%s", state->msg);
+ puts("--------------------------");
+
+ /*
+ * TRANSLATORS: Make sure to include [y], [n], [e], [v] and [a]
+ * in your translation. The program will only accept English
+ * input at this point.
+ */
+ printf(_("Apply? [y]es/[n]o/[e]dit/[v]iew patch/[a]ccept all: "));
+ if (!fgets(reply, sizeof(reply), stdin))
+ die("unable to read from stdin; aborting");
+
+ if (*reply == 'y' || *reply == 'Y') {
+ return 0;
+ } else if (*reply == 'a' || *reply == 'A') {
+ state->interactive = 0;
+ return 0;
+ } else if (*reply == 'n' || *reply == 'N') {
+ return 1;
+ } else if (*reply == 'e' || *reply == 'E') {
+ struct strbuf msg = STRBUF_INIT;
+
+ if (!launch_editor(am_path(state, "final-commit"), &msg, NULL)) {
+ free(state->msg);
+ state->msg = strbuf_detach(&msg, &state->msg_len);
+ }
+ strbuf_release(&msg);
+ } else if (*reply == 'v' || *reply == 'V') {
+ const char *pager = git_pager(1);
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ if (!pager)
+ pager = "cat";
+ prepare_pager_args(&cp, pager);
+ strvec_push(&cp.args, am_path(state, "patch"));
+ run_command(&cp);
+ }
+ }
+}
+
+/**
+ * Applies all queued mail.
+ *
+ * If `resume` is true, we are "resuming". The "msg" and authorship fields, as
+ * well as the state directory's "patch" file is used as-is for applying the
+ * patch and committing it.
+ */
+static void am_run(struct am_state *state, int resume)
+{
+ struct strbuf sb = STRBUF_INIT;
+
+ unlink(am_path(state, "dirtyindex"));
+
+ if (repo_refresh_and_write_index(the_repository, REFRESH_QUIET, 0, 0,
+ NULL, NULL, NULL) < 0)
+ die(_("unable to write index file"));
+
+ if (repo_index_has_changes(the_repository, NULL, &sb)) {
+ write_state_bool(state, "dirtyindex", 1);
+ die(_("Dirty index: cannot apply patches (dirty: %s)"), sb.buf);
+ }
+
+ strbuf_release(&sb);
+
+ while (state->cur <= state->last) {
+ const char *mail = am_path(state, msgnum(state));
+ int apply_status;
+ int to_keep;
+
+ reset_ident_date();
+
+ if (!file_exists(mail))
+ goto next;
+
+ if (resume) {
+ validate_resume_state(state);
+ } else {
+ int skip;
+
+ if (state->rebasing)
+ skip = parse_mail_rebase(state, mail);
+ else
+ skip = parse_mail(state, mail);
+
+ if (skip)
+ goto next; /* mail should be skipped */
+
+ if (state->signoff)
+ am_append_signoff(state);
+
+ write_author_script(state);
+ write_commit_msg(state);
+ }
+
+ if (state->interactive && do_interactive(state))
+ goto next;
+
+ to_keep = 0;
+ if (is_empty_or_missing_file(am_path(state, "patch"))) {
+ switch (state->empty_type) {
+ case DROP_EMPTY_COMMIT:
+ say(state, stdout, _("Skipping: %.*s"), linelen(state->msg), state->msg);
+ goto next;
+ break;
+ case KEEP_EMPTY_COMMIT:
+ to_keep = 1;
+ say(state, stdout, _("Creating an empty commit: %.*s"),
+ linelen(state->msg), state->msg);
+ break;
+ case STOP_ON_EMPTY_COMMIT:
+ printf_ln(_("Patch is empty."));
+ die_user_resolve(state);
+ break;
+ }
+ }
+
+ if (run_applypatch_msg_hook(state))
+ exit(1);
+ if (to_keep)
+ goto commit;
+
+ say(state, stdout, _("Applying: %.*s"), linelen(state->msg), state->msg);
+
+ apply_status = run_apply(state, NULL);
+
+ if (apply_status && state->threeway) {
+ struct strbuf sb = STRBUF_INIT;
+
+ strbuf_addstr(&sb, am_path(state, "patch-merge-index"));
+ apply_status = fall_back_threeway(state, sb.buf);
+ strbuf_release(&sb);
+
+ /*
+ * Applying the patch to an earlier tree and merging
+ * the result may have produced the same tree as ours.
+ */
+ if (!apply_status &&
+ !repo_index_has_changes(the_repository, NULL, NULL)) {
+ say(state, stdout, _("No changes -- Patch already applied."));
+ goto next;
+ }
+ }
+
+ if (apply_status) {
+ printf_ln(_("Patch failed at %s %.*s"), msgnum(state),
+ linelen(state->msg), state->msg);
+
+ if (advice_enabled(ADVICE_AM_WORK_DIR))
+ advise(_("Use 'git am --show-current-patch=diff' to see the failed patch"));
+
+ die_user_resolve(state);
+ }
+
+commit:
+ do_commit(state);
+
+next:
+ am_next(state);
+
+ if (resume)
+ am_load(state);
+ resume = 0;
+ }
+
+ if (!is_empty_or_missing_file(am_path(state, "rewritten"))) {
+ assert(state->rebasing);
+ copy_notes_for_rebase(state);
+ run_post_rewrite_hook(state);
+ }
+
+ /*
+ * In rebasing mode, it's up to the caller to take care of
+ * housekeeping.
+ */
+ if (!state->rebasing) {
+ am_destroy(state);
+ run_auto_maintenance(state->quiet);
+ }
+}
+
+/**
+ * Resume the current am session after patch application failure. The user did
+ * all the hard work, and we do not have to do any patch application. Just
+ * trust and commit what the user has in the index and working tree. If `allow_empty`
+ * is true, commit as an empty commit when index has not changed and lacking a patch.
+ */
+static void am_resolve(struct am_state *state, int allow_empty)
+{
+ validate_resume_state(state);
+
+ say(state, stdout, _("Applying: %.*s"), linelen(state->msg), state->msg);
+
+ if (!repo_index_has_changes(the_repository, NULL, NULL)) {
+ if (allow_empty && is_empty_or_missing_file(am_path(state, "patch"))) {
+ printf_ln(_("No changes - recorded it as an empty commit."));
+ } else {
+ printf_ln(_("No changes - did you forget to use 'git add'?\n"
+ "If there is nothing left to stage, chances are that something else\n"
+ "already introduced the same changes; you might want to skip this patch."));
+ die_user_resolve(state);
+ }
+ }
+
+ if (unmerged_index(&the_index)) {
+ printf_ln(_("You still have unmerged paths in your index.\n"
+ "You should 'git add' each file with resolved conflicts to mark them as such.\n"
+ "You might run `git rm` on a file to accept \"deleted by them\" for it."));
+ die_user_resolve(state);
+ }
+
+ if (state->interactive) {
+ write_index_patch(state);
+ if (do_interactive(state))
+ goto next;
+ }
+
+ repo_rerere(the_repository, 0);
+
+ do_commit(state);
+
+next:
+ am_next(state);
+ am_load(state);
+ am_run(state, 0);
+}
+
+/**
+ * Performs a checkout fast-forward from `head` to `remote`. If `reset` is
+ * true, any unmerged entries will be discarded. Returns 0 on success, -1 on
+ * failure.
+ */
+static int fast_forward_to(struct tree *head, struct tree *remote, int reset)
+{
+ struct lock_file lock_file = LOCK_INIT;
+ struct unpack_trees_options opts;
+ struct tree_desc t[2];
+
+ if (parse_tree(head) || parse_tree(remote))
+ return -1;
+
+ repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR);
+
+ refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL);
+
+ memset(&opts, 0, sizeof(opts));
+ opts.head_idx = 1;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+ opts.update = 1;
+ opts.merge = 1;
+ opts.reset = reset ? UNPACK_RESET_PROTECT_UNTRACKED : 0;
+ opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
+ opts.fn = twoway_merge;
+ init_tree_desc(&t[0], head->buffer, head->size);
+ init_tree_desc(&t[1], remote->buffer, remote->size);
+
+ if (unpack_trees(2, t, &opts)) {
+ rollback_lock_file(&lock_file);
+ return -1;
+ }
+
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+ die(_("unable to write new index file"));
+
+ return 0;
+}
+
+/**
+ * Merges a tree into the index. The index's stat info will take precedence
+ * over the merged tree's. Returns 0 on success, -1 on failure.
+ */
+static int merge_tree(struct tree *tree)
+{
+ struct lock_file lock_file = LOCK_INIT;
+ struct unpack_trees_options opts;
+ struct tree_desc t[1];
+
+ if (parse_tree(tree))
+ return -1;
+
+ repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR);
+
+ memset(&opts, 0, sizeof(opts));
+ opts.head_idx = 1;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+ opts.merge = 1;
+ opts.fn = oneway_merge;
+ init_tree_desc(&t[0], tree->buffer, tree->size);
+
+ if (unpack_trees(1, t, &opts)) {
+ rollback_lock_file(&lock_file);
+ return -1;
+ }
+
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+ die(_("unable to write new index file"));
+
+ return 0;
+}
+
+/**
+ * Clean the index without touching entries that are not modified between
+ * `head` and `remote`.
+ */
+static int clean_index(const struct object_id *head, const struct object_id *remote)
+{
+ struct tree *head_tree, *remote_tree, *index_tree;
+ struct object_id index;
+
+ head_tree = parse_tree_indirect(head);
+ if (!head_tree)
+ return error(_("Could not parse object '%s'."), oid_to_hex(head));
+
+ remote_tree = parse_tree_indirect(remote);
+ if (!remote_tree)
+ return error(_("Could not parse object '%s'."), oid_to_hex(remote));
+
+ repo_read_index_unmerged(the_repository);
+
+ if (fast_forward_to(head_tree, head_tree, 1))
+ return -1;
+
+ if (write_cache_as_tree(&index, 0, NULL))
+ return -1;
+
+ index_tree = parse_tree_indirect(&index);
+ if (!index_tree)
+ return error(_("Could not parse object '%s'."), oid_to_hex(&index));
+
+ if (fast_forward_to(index_tree, remote_tree, 0))
+ return -1;
+
+ if (merge_tree(remote_tree))
+ return -1;
+
+ remove_branch_state(the_repository, 0);
+
+ return 0;
+}
+
+/**
+ * Resets rerere's merge resolution metadata.
+ */
+static void am_rerere_clear(void)
+{
+ struct string_list merge_rr = STRING_LIST_INIT_DUP;
+ rerere_clear(the_repository, &merge_rr);
+ string_list_clear(&merge_rr, 1);
+}
+
+/**
+ * Resume the current am session by skipping the current patch.
+ */
+static void am_skip(struct am_state *state)
+{
+ struct object_id head;
+
+ am_rerere_clear();
+
+ if (get_oid("HEAD", &head))
+ oidcpy(&head, the_hash_algo->empty_tree);
+
+ if (clean_index(&head, &head))
+ die(_("failed to clean index"));
+
+ if (state->rebasing) {
+ FILE *fp = xfopen(am_path(state, "rewritten"), "a");
+
+ assert(!is_null_oid(&state->orig_commit));
+ fprintf(fp, "%s ", oid_to_hex(&state->orig_commit));
+ fprintf(fp, "%s\n", oid_to_hex(&head));
+ fclose(fp);
+ }
+
+ am_next(state);
+ am_load(state);
+ am_run(state, 0);
+}
+
+/**
+ * Returns true if it is safe to reset HEAD to the ORIG_HEAD, false otherwise.
+ *
+ * It is not safe to reset HEAD when:
+ * 1. git-am previously failed because the index was dirty.
+ * 2. HEAD has moved since git-am previously failed.
+ */
+static int safe_to_abort(const struct am_state *state)
+{
+ struct strbuf sb = STRBUF_INIT;
+ struct object_id abort_safety, head;
+
+ if (file_exists(am_path(state, "dirtyindex")))
+ return 0;
+
+ if (read_state_file(&sb, state, "abort-safety", 1) > 0) {
+ if (get_oid_hex(sb.buf, &abort_safety))
+ die(_("could not parse %s"), am_path(state, "abort-safety"));
+ } else
+ oidclr(&abort_safety);
+ strbuf_release(&sb);
+
+ if (get_oid("HEAD", &head))
+ oidclr(&head);
+
+ if (oideq(&head, &abort_safety))
+ return 1;
+
+ warning(_("You seem to have moved HEAD since the last 'am' failure.\n"
+ "Not rewinding to ORIG_HEAD"));
+
+ return 0;
+}
+
+/**
+ * Aborts the current am session if it is safe to do so.
+ */
+static void am_abort(struct am_state *state)
+{
+ struct object_id curr_head, orig_head;
+ int has_curr_head, has_orig_head;
+ char *curr_branch;
+
+ if (!safe_to_abort(state)) {
+ am_destroy(state);
+ return;
+ }
+
+ am_rerere_clear();
+
+ curr_branch = resolve_refdup("HEAD", 0, &curr_head, NULL);
+ has_curr_head = curr_branch && !is_null_oid(&curr_head);
+ if (!has_curr_head)
+ oidcpy(&curr_head, the_hash_algo->empty_tree);
+
+ has_orig_head = !get_oid("ORIG_HEAD", &orig_head);
+ if (!has_orig_head)
+ oidcpy(&orig_head, the_hash_algo->empty_tree);
+
+ if (clean_index(&curr_head, &orig_head))
+ die(_("failed to clean index"));
+
+ if (has_orig_head)
+ update_ref("am --abort", "HEAD", &orig_head,
+ has_curr_head ? &curr_head : NULL, 0,
+ UPDATE_REFS_DIE_ON_ERR);
+ else if (curr_branch)
+ delete_ref(NULL, curr_branch, NULL, REF_NO_DEREF);
+
+ free(curr_branch);
+ am_destroy(state);
+}
+
+static int show_patch(struct am_state *state, enum show_patch_type sub_mode)
+{
+ struct strbuf sb = STRBUF_INIT;
+ const char *patch_path;
+ int len;
+
+ if (!is_null_oid(&state->orig_commit)) {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&cmd.args, "show", oid_to_hex(&state->orig_commit),
+ "--", NULL);
+ cmd.git_cmd = 1;
+ return run_command(&cmd);
+ }
+
+ switch (sub_mode) {
+ case SHOW_PATCH_RAW:
+ patch_path = am_path(state, msgnum(state));
+ break;
+ case SHOW_PATCH_DIFF:
+ patch_path = am_path(state, "patch");
+ break;
+ default:
+ BUG("invalid mode for --show-current-patch");
+ }
+
+ len = strbuf_read_file(&sb, patch_path, 0);
+ if (len < 0)
+ die_errno(_("failed to read '%s'"), patch_path);
+
+ setup_pager();
+ write_in_full(1, sb.buf, sb.len);
+ strbuf_release(&sb);
+ return 0;
+}
+
+/**
+ * parse_options() callback that validates and sets opt->value to the
+ * PATCH_FORMAT_* enum value corresponding to `arg`.
+ */
+static int parse_opt_patchformat(const struct option *opt, const char *arg, int unset)
+{
+ int *opt_value = opt->value;
+
+ if (unset)
+ *opt_value = PATCH_FORMAT_UNKNOWN;
+ else if (!strcmp(arg, "mbox"))
+ *opt_value = PATCH_FORMAT_MBOX;
+ else if (!strcmp(arg, "stgit"))
+ *opt_value = PATCH_FORMAT_STGIT;
+ else if (!strcmp(arg, "stgit-series"))
+ *opt_value = PATCH_FORMAT_STGIT_SERIES;
+ else if (!strcmp(arg, "hg"))
+ *opt_value = PATCH_FORMAT_HG;
+ else if (!strcmp(arg, "mboxrd"))
+ *opt_value = PATCH_FORMAT_MBOXRD;
+ /*
+ * Please update $__git_patchformat in git-completion.bash
+ * when you add new options
+ */
+ else
+ return error(_("invalid value for '%s': '%s'"),
+ "--patch-format", arg);
+ return 0;
+}
+
+enum resume_type {
+ RESUME_FALSE = 0,
+ RESUME_APPLY,
+ RESUME_RESOLVED,
+ RESUME_SKIP,
+ RESUME_ABORT,
+ RESUME_QUIT,
+ RESUME_SHOW_PATCH,
+ RESUME_ALLOW_EMPTY,
+};
+
+struct resume_mode {
+ enum resume_type mode;
+ enum show_patch_type sub_mode;
+};
+
+static int parse_opt_show_current_patch(const struct option *opt, const char *arg, int unset)
+{
+ int *opt_value = opt->value;
+ struct resume_mode *resume = container_of(opt_value, struct resume_mode, mode);
+
+ /*
+ * Please update $__git_showcurrentpatch in git-completion.bash
+ * when you add new options
+ */
+ const char *valid_modes[] = {
+ [SHOW_PATCH_DIFF] = "diff",
+ [SHOW_PATCH_RAW] = "raw"
+ };
+ int new_value = SHOW_PATCH_RAW;
+
+ BUG_ON_OPT_NEG(unset);
+
+ if (arg) {
+ for (new_value = 0; new_value < ARRAY_SIZE(valid_modes); new_value++) {
+ if (!strcmp(arg, valid_modes[new_value]))
+ break;
+ }
+ if (new_value >= ARRAY_SIZE(valid_modes))
+ return error(_("invalid value for '%s': '%s'"),
+ "--show-current-patch", arg);
+ }
+
+ if (resume->mode == RESUME_SHOW_PATCH && new_value != resume->sub_mode)
+ return error(_("options '%s=%s' and '%s=%s' "
+ "cannot be used together"),
+ "--show-current-patch", "--show-current-patch", arg, valid_modes[resume->sub_mode]);
+
+ resume->mode = RESUME_SHOW_PATCH;
+ resume->sub_mode = new_value;
+ return 0;
+}
+
+static int git_am_config(const char *k, const char *v, void *cb UNUSED)
+{
+ int status;
+
+ status = git_gpg_config(k, v, NULL);
+ if (status)
+ return status;
+
+ return git_default_config(k, v, NULL);
+}
+
+int cmd_am(int argc, const char **argv, const char *prefix)
+{
+ struct am_state state;
+ int binary = -1;
+ int keep_cr = -1;
+ int patch_format = PATCH_FORMAT_UNKNOWN;
+ struct resume_mode resume = { .mode = RESUME_FALSE };
+ int in_progress;
+ int ret = 0;
+
+ const char * const usage[] = {
+ N_("git am [<options>] [(<mbox> | <Maildir>)...]"),
+ N_("git am [<options>] (--continue | --skip | --abort)"),
+ NULL
+ };
+
+ struct option options[] = {
+ OPT_BOOL('i', "interactive", &state.interactive,
+ N_("run interactively")),
+ OPT_HIDDEN_BOOL('b', "binary", &binary,
+ N_("historical option -- no-op")),
+ OPT_BOOL('3', "3way", &state.threeway,
+ N_("allow fall back on 3way merging if needed")),
+ OPT__QUIET(&state.quiet, N_("be quiet")),
+ OPT_SET_INT('s', "signoff", &state.signoff,
+ N_("add a Signed-off-by trailer to the commit message"),
+ SIGNOFF_EXPLICIT),
+ OPT_BOOL('u', "utf8", &state.utf8,
+ N_("recode into utf8 (default)")),
+ OPT_SET_INT('k', "keep", &state.keep,
+ N_("pass -k flag to git-mailinfo"), KEEP_TRUE),
+ OPT_SET_INT(0, "keep-non-patch", &state.keep,
+ N_("pass -b flag to git-mailinfo"), KEEP_NON_PATCH),
+ OPT_BOOL('m', "message-id", &state.message_id,
+ N_("pass -m flag to git-mailinfo")),
+ OPT_SET_INT_F(0, "keep-cr", &keep_cr,
+ N_("pass --keep-cr flag to git-mailsplit for mbox format"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "no-keep-cr", &keep_cr,
+ N_("do not pass --keep-cr flag to git-mailsplit independent of am.keepcr"),
+ 0, PARSE_OPT_NONEG),
+ OPT_BOOL('c', "scissors", &state.scissors,
+ N_("strip everything before a scissors line")),
+ OPT_CALLBACK_F(0, "quoted-cr", &state.quoted_cr, N_("action"),
+ N_("pass it through git-mailinfo"),
+ PARSE_OPT_NONEG, am_option_parse_quoted_cr),
+ OPT_PASSTHRU_ARGV(0, "whitespace", &state.git_apply_opts, N_("action"),
+ N_("pass it through git-apply"),
+ 0),
+ OPT_PASSTHRU_ARGV(0, "ignore-space-change", &state.git_apply_opts, NULL,
+ N_("pass it through git-apply"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU_ARGV(0, "ignore-whitespace", &state.git_apply_opts, NULL,
+ N_("pass it through git-apply"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU_ARGV(0, "directory", &state.git_apply_opts, N_("root"),
+ N_("pass it through git-apply"),
+ 0),
+ OPT_PASSTHRU_ARGV(0, "exclude", &state.git_apply_opts, N_("path"),
+ N_("pass it through git-apply"),
+ 0),
+ OPT_PASSTHRU_ARGV(0, "include", &state.git_apply_opts, N_("path"),
+ N_("pass it through git-apply"),
+ 0),
+ OPT_PASSTHRU_ARGV('C', NULL, &state.git_apply_opts, N_("n"),
+ N_("pass it through git-apply"),
+ 0),
+ OPT_PASSTHRU_ARGV('p', NULL, &state.git_apply_opts, N_("num"),
+ N_("pass it through git-apply"),
+ 0),
+ OPT_CALLBACK(0, "patch-format", &patch_format, N_("format"),
+ N_("format the patch(es) are in"),
+ parse_opt_patchformat),
+ OPT_PASSTHRU_ARGV(0, "reject", &state.git_apply_opts, NULL,
+ N_("pass it through git-apply"),
+ PARSE_OPT_NOARG),
+ OPT_STRING(0, "resolvemsg", &state.resolvemsg, NULL,
+ N_("override error message when patch failure occurs")),
+ OPT_CMDMODE(0, "continue", &resume.mode,
+ N_("continue applying patches after resolving a conflict"),
+ RESUME_RESOLVED),
+ OPT_CMDMODE('r', "resolved", &resume.mode,
+ N_("synonyms for --continue"),
+ RESUME_RESOLVED),
+ OPT_CMDMODE(0, "skip", &resume.mode,
+ N_("skip the current patch"),
+ RESUME_SKIP),
+ OPT_CMDMODE(0, "abort", &resume.mode,
+ N_("restore the original branch and abort the patching operation"),
+ RESUME_ABORT),
+ OPT_CMDMODE(0, "quit", &resume.mode,
+ N_("abort the patching operation but keep HEAD where it is"),
+ RESUME_QUIT),
+ { OPTION_CALLBACK, 0, "show-current-patch", &resume.mode,
+ "(diff|raw)",
+ N_("show the patch being applied"),
+ PARSE_OPT_CMDMODE | PARSE_OPT_OPTARG | PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP,
+ parse_opt_show_current_patch, RESUME_SHOW_PATCH },
+ OPT_CMDMODE(0, "allow-empty", &resume.mode,
+ N_("record the empty patch as an empty commit"),
+ RESUME_ALLOW_EMPTY),
+ OPT_BOOL(0, "committer-date-is-author-date",
+ &state.committer_date_is_author_date,
+ N_("lie about committer date")),
+ OPT_BOOL(0, "ignore-date", &state.ignore_date,
+ N_("use current timestamp for author date")),
+ OPT_RERERE_AUTOUPDATE(&state.allow_rerere_autoupdate),
+ { OPTION_STRING, 'S', "gpg-sign", &state.sign_commit, N_("key-id"),
+ N_("GPG-sign commits"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
+ OPT_CALLBACK_F(STOP_ON_EMPTY_COMMIT, "empty", &state.empty_type, "{stop,drop,keep}",
+ N_("how to handle empty patches"),
+ PARSE_OPT_NONEG, am_option_parse_empty),
+ OPT_HIDDEN_BOOL(0, "rebasing", &state.rebasing,
+ N_("(internal use for git-rebase)")),
+ OPT_END()
+ };
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(usage, options);
+
+ git_config(git_am_config, NULL);
+
+ am_state_init(&state);
+
+ in_progress = am_in_progress(&state);
+ if (in_progress)
+ am_load(&state);
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+
+ if (binary >= 0)
+ fprintf_ln(stderr, _("The -b/--binary option has been a no-op for long time, and\n"
+ "it will be removed. Please do not use it anymore."));
+
+ /* Ensure a valid committer ident can be constructed */
+ git_committer_info(IDENT_STRICT);
+
+ if (repo_read_index_preload(the_repository, NULL, 0) < 0)
+ die(_("failed to read the index"));
+
+ if (in_progress) {
+ /*
+ * Catch user error to feed us patches when there is a session
+ * in progress:
+ *
+ * 1. mbox path(s) are provided on the command-line.
+ * 2. stdin is not a tty: the user is trying to feed us a patch
+ * from standard input. This is somewhat unreliable -- stdin
+ * could be /dev/null for example and the caller did not
+ * intend to feed us a patch but wanted to continue
+ * unattended.
+ */
+ if (argc || (resume.mode == RESUME_FALSE && !isatty(0)))
+ die(_("previous rebase directory %s still exists but mbox given."),
+ state.dir);
+
+ if (resume.mode == RESUME_FALSE)
+ resume.mode = RESUME_APPLY;
+
+ if (state.signoff == SIGNOFF_EXPLICIT)
+ am_append_signoff(&state);
+ } else {
+ struct strvec paths = STRVEC_INIT;
+ int i;
+
+ /*
+ * Handle stray state directory in the independent-run case. In
+ * the --rebasing case, it is up to the caller to take care of
+ * stray directories.
+ */
+ if (file_exists(state.dir) && !state.rebasing) {
+ if (resume.mode == RESUME_ABORT || resume.mode == RESUME_QUIT) {
+ am_destroy(&state);
+ am_state_release(&state);
+ return 0;
+ }
+
+ die(_("Stray %s directory found.\n"
+ "Use \"git am --abort\" to remove it."),
+ state.dir);
+ }
+
+ if (resume.mode)
+ die(_("Resolve operation not in progress, we are not resuming."));
+
+ for (i = 0; i < argc; i++) {
+ if (is_absolute_path(argv[i]) || !prefix)
+ strvec_push(&paths, argv[i]);
+ else
+ strvec_push(&paths, mkpath("%s/%s", prefix, argv[i]));
+ }
+
+ if (state.interactive && !paths.nr)
+ die(_("interactive mode requires patches on the command line"));
+
+ am_setup(&state, patch_format, paths.v, keep_cr);
+
+ strvec_clear(&paths);
+ }
+
+ switch (resume.mode) {
+ case RESUME_FALSE:
+ am_run(&state, 0);
+ break;
+ case RESUME_APPLY:
+ am_run(&state, 1);
+ break;
+ case RESUME_RESOLVED:
+ case RESUME_ALLOW_EMPTY:
+ am_resolve(&state, resume.mode == RESUME_ALLOW_EMPTY ? 1 : 0);
+ break;
+ case RESUME_SKIP:
+ am_skip(&state);
+ break;
+ case RESUME_ABORT:
+ am_abort(&state);
+ break;
+ case RESUME_QUIT:
+ am_rerere_clear();
+ am_destroy(&state);
+ break;
+ case RESUME_SHOW_PATCH:
+ ret = show_patch(&state, resume.sub_mode);
+ break;
+ default:
+ BUG("invalid resume value");
+ }
+
+ am_state_release(&state);
+
+ return ret;
+}
diff --git a/builtin/annotate.c b/builtin/annotate.c
new file mode 100644
index 0000000..58ff977
--- /dev/null
+++ b/builtin/annotate.c
@@ -0,0 +1,22 @@
+/*
+ * "git annotate" builtin alias
+ *
+ * Copyright (C) 2006 Ryan Anderson
+ */
+#include "git-compat-util.h"
+#include "builtin.h"
+#include "strvec.h"
+
+int cmd_annotate(int argc, const char **argv, const char *prefix)
+{
+ struct strvec args = STRVEC_INIT;
+ int i;
+
+ strvec_pushl(&args, "annotate", "-c", NULL);
+
+ for (i = 1; i < argc; i++) {
+ strvec_push(&args, argv[i]);
+ }
+
+ return cmd_blame(args.nr, args.v, prefix);
+}
diff --git a/builtin/apply.c b/builtin/apply.c
new file mode 100644
index 0000000..555219d
--- /dev/null
+++ b/builtin/apply.c
@@ -0,0 +1,33 @@
+#include "cache.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "apply.h"
+
+static const char * const apply_usage[] = {
+ N_("git apply [<options>] [<patch>...]"),
+ NULL
+};
+
+int cmd_apply(int argc, const char **argv, const char *prefix)
+{
+ int force_apply = 0;
+ int options = 0;
+ int ret;
+ struct apply_state state;
+
+ if (init_apply_state(&state, the_repository, prefix))
+ exit(128);
+
+ argc = apply_parse_options(argc, argv,
+ &state, &force_apply, &options,
+ apply_usage);
+
+ if (check_apply_state(&state, force_apply))
+ exit(128);
+
+ ret = apply_all_patches(&state, argc, argv, options);
+
+ clear_apply_state(&state);
+
+ return ret;
+}
diff --git a/builtin/archive.c b/builtin/archive.c
new file mode 100644
index 0000000..f094390
--- /dev/null
+++ b/builtin/archive.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2006 Franck Bui-Huu
+ * Copyright (c) 2006 Rene Scharfe
+ */
+#include "cache.h"
+#include "builtin.h"
+#include "archive.h"
+#include "transport.h"
+#include "parse-options.h"
+#include "pkt-line.h"
+#include "sideband.h"
+
+static void create_output_file(const char *output_file)
+{
+ int output_fd = xopen(output_file, O_CREAT | O_WRONLY | O_TRUNC, 0666);
+ if (output_fd != 1) {
+ if (dup2(output_fd, 1) < 0)
+ die_errno(_("could not redirect output"));
+ else
+ close(output_fd);
+ }
+}
+
+static int run_remote_archiver(int argc, const char **argv,
+ const char *remote, const char *exec,
+ const char *name_hint)
+{
+ int fd[2], i, rv;
+ struct transport *transport;
+ struct remote *_remote;
+ struct packet_reader reader;
+
+ _remote = remote_get(remote);
+ if (!_remote->url[0])
+ die(_("git archive: Remote with no URL"));
+ transport = transport_get(_remote, _remote->url[0]);
+ transport_connect(transport, "git-upload-archive", exec, fd);
+
+ /*
+ * Inject a fake --format field at the beginning of the
+ * arguments, with the format inferred from our output
+ * filename. This way explicit --format options can override
+ * it.
+ */
+ if (name_hint) {
+ const char *format = archive_format_from_filename(name_hint);
+ if (format)
+ packet_write_fmt(fd[1], "argument --format=%s\n", format);
+ }
+ for (i = 1; i < argc; i++)
+ packet_write_fmt(fd[1], "argument %s\n", argv[i]);
+ packet_flush(fd[1]);
+
+ packet_reader_init(&reader, fd[0], NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_DIE_ON_ERR_PACKET);
+
+ if (packet_reader_read(&reader) != PACKET_READ_NORMAL)
+ die(_("git archive: expected ACK/NAK, got a flush packet"));
+ if (strcmp(reader.line, "ACK")) {
+ if (starts_with(reader.line, "NACK "))
+ die(_("git archive: NACK %s"), reader.line + 5);
+ die(_("git archive: protocol error"));
+ }
+
+ if (packet_reader_read(&reader) != PACKET_READ_FLUSH)
+ die(_("git archive: expected a flush"));
+
+ /* Now, start reading from fd[0] and spit it out to stdout */
+ rv = recv_sideband("archive", fd[0], 1);
+ rv |= transport_disconnect(transport);
+
+ return !!rv;
+}
+
+#define PARSE_OPT_KEEP_ALL ( PARSE_OPT_KEEP_DASHDASH | \
+ PARSE_OPT_KEEP_ARGV0 | \
+ PARSE_OPT_KEEP_UNKNOWN_OPT | \
+ PARSE_OPT_NO_INTERNAL_HELP )
+
+int cmd_archive(int argc, const char **argv, const char *prefix)
+{
+ const char *exec = "git-upload-archive";
+ const char *output = NULL;
+ const char *remote = NULL;
+ struct option local_opts[] = {
+ OPT_FILENAME('o', "output", &output,
+ N_("write the archive to this file")),
+ OPT_STRING(0, "remote", &remote, N_("repo"),
+ N_("retrieve the archive from remote repository <repo>")),
+ OPT_STRING(0, "exec", &exec, N_("command"),
+ N_("path to the remote git-upload-archive command")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, local_opts, NULL,
+ PARSE_OPT_KEEP_ALL);
+
+ init_archivers();
+
+ if (output)
+ create_output_file(output);
+
+ if (remote)
+ return run_remote_archiver(argc, argv, remote, exec, output);
+
+ setvbuf(stderr, NULL, _IOLBF, BUFSIZ);
+
+ return write_archive(argc, argv, prefix, the_repository, output, 0);
+}
diff --git a/builtin/bisect--helper.c b/builtin/bisect--helper.c
new file mode 100644
index 0000000..6e41cbd
--- /dev/null
+++ b/builtin/bisect--helper.c
@@ -0,0 +1,1429 @@
+#include "builtin.h"
+#include "cache.h"
+#include "parse-options.h"
+#include "bisect.h"
+#include "refs.h"
+#include "dir.h"
+#include "strvec.h"
+#include "run-command.h"
+#include "prompt.h"
+#include "quote.h"
+#include "revision.h"
+
+static GIT_PATH_FUNC(git_path_bisect_terms, "BISECT_TERMS")
+static GIT_PATH_FUNC(git_path_bisect_expected_rev, "BISECT_EXPECTED_REV")
+static GIT_PATH_FUNC(git_path_bisect_ancestors_ok, "BISECT_ANCESTORS_OK")
+static GIT_PATH_FUNC(git_path_bisect_start, "BISECT_START")
+static GIT_PATH_FUNC(git_path_bisect_log, "BISECT_LOG")
+static GIT_PATH_FUNC(git_path_head_name, "head-name")
+static GIT_PATH_FUNC(git_path_bisect_names, "BISECT_NAMES")
+static GIT_PATH_FUNC(git_path_bisect_first_parent, "BISECT_FIRST_PARENT")
+static GIT_PATH_FUNC(git_path_bisect_run, "BISECT_RUN")
+
+static const char * const git_bisect_helper_usage[] = {
+ N_("git bisect--helper --bisect-reset [<commit>]"),
+ "git bisect--helper --bisect-terms [--term-good | --term-old | --term-bad | --term-new]",
+ N_("git bisect--helper --bisect-start [--term-{new,bad}=<term> --term-{old,good}=<term>]"
+ " [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<paths>...]"),
+ "git bisect--helper --bisect-next",
+ N_("git bisect--helper --bisect-state (bad|new) [<rev>]"),
+ N_("git bisect--helper --bisect-state (good|old) [<rev>...]"),
+ N_("git bisect--helper --bisect-replay <filename>"),
+ N_("git bisect--helper --bisect-skip [(<rev>|<range>)...]"),
+ "git bisect--helper --bisect-visualize",
+ N_("git bisect--helper --bisect-run <cmd>..."),
+ NULL
+};
+
+struct add_bisect_ref_data {
+ struct rev_info *revs;
+ unsigned int object_flags;
+};
+
+struct bisect_terms {
+ char *term_good;
+ char *term_bad;
+};
+
+static void free_terms(struct bisect_terms *terms)
+{
+ FREE_AND_NULL(terms->term_good);
+ FREE_AND_NULL(terms->term_bad);
+}
+
+static void set_terms(struct bisect_terms *terms, const char *bad,
+ const char *good)
+{
+ free((void *)terms->term_good);
+ terms->term_good = xstrdup(good);
+ free((void *)terms->term_bad);
+ terms->term_bad = xstrdup(bad);
+}
+
+static const char vocab_bad[] = "bad|new";
+static const char vocab_good[] = "good|old";
+
+static int bisect_autostart(struct bisect_terms *terms);
+
+/*
+ * Check whether the string `term` belongs to the set of strings
+ * included in the variable arguments.
+ */
+LAST_ARG_MUST_BE_NULL
+static int one_of(const char *term, ...)
+{
+ int res = 0;
+ va_list matches;
+ const char *match;
+
+ va_start(matches, term);
+ while (!res && (match = va_arg(matches, const char *)))
+ res = !strcmp(term, match);
+ va_end(matches);
+
+ return res;
+}
+
+/*
+ * return code BISECT_INTERNAL_SUCCESS_MERGE_BASE
+ * and BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND are codes
+ * that indicate special success.
+ */
+
+static int is_bisect_success(enum bisect_error res)
+{
+ return !res ||
+ res == BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND ||
+ res == BISECT_INTERNAL_SUCCESS_MERGE_BASE;
+}
+
+static int write_in_file(const char *path, const char *mode, const char *format, va_list args)
+{
+ FILE *fp = NULL;
+ int res = 0;
+
+ if (strcmp(mode, "w") && strcmp(mode, "a"))
+ BUG("write-in-file does not support '%s' mode", mode);
+ fp = fopen(path, mode);
+ if (!fp)
+ return error_errno(_("cannot open file '%s' in mode '%s'"), path, mode);
+ res = vfprintf(fp, format, args);
+
+ if (res < 0) {
+ int saved_errno = errno;
+ fclose(fp);
+ errno = saved_errno;
+ return error_errno(_("could not write to file '%s'"), path);
+ }
+
+ return fclose(fp);
+}
+
+__attribute__((format (printf, 2, 3)))
+static int write_to_file(const char *path, const char *format, ...)
+{
+ int res;
+ va_list args;
+
+ va_start(args, format);
+ res = write_in_file(path, "w", format, args);
+ va_end(args);
+
+ return res;
+}
+
+__attribute__((format (printf, 2, 3)))
+static int append_to_file(const char *path, const char *format, ...)
+{
+ int res;
+ va_list args;
+
+ va_start(args, format);
+ res = write_in_file(path, "a", format, args);
+ va_end(args);
+
+ return res;
+}
+
+static int print_file_to_stdout(const char *path)
+{
+ int fd = open(path, O_RDONLY);
+ int ret = 0;
+
+ if (fd < 0)
+ return error_errno(_("cannot open file '%s' for reading"), path);
+ if (copy_fd(fd, 1) < 0)
+ ret = error_errno(_("failed to read '%s'"), path);
+ close(fd);
+ return ret;
+}
+
+static int check_term_format(const char *term, const char *orig_term)
+{
+ int res;
+ char *new_term = xstrfmt("refs/bisect/%s", term);
+
+ res = check_refname_format(new_term, 0);
+ free(new_term);
+
+ if (res)
+ return error(_("'%s' is not a valid term"), term);
+
+ if (one_of(term, "help", "start", "skip", "next", "reset",
+ "visualize", "view", "replay", "log", "run", "terms", NULL))
+ return error(_("can't use the builtin command '%s' as a term"), term);
+
+ /*
+ * In theory, nothing prevents swapping completely good and bad,
+ * but this situation could be confusing and hasn't been tested
+ * enough. Forbid it for now.
+ */
+
+ if ((strcmp(orig_term, "bad") && one_of(term, "bad", "new", NULL)) ||
+ (strcmp(orig_term, "good") && one_of(term, "good", "old", NULL)))
+ return error(_("can't change the meaning of the term '%s'"), term);
+
+ return 0;
+}
+
+static int write_terms(const char *bad, const char *good)
+{
+ int res;
+
+ if (!strcmp(bad, good))
+ return error(_("please use two different terms"));
+
+ if (check_term_format(bad, "bad") || check_term_format(good, "good"))
+ return -1;
+
+ res = write_to_file(git_path_bisect_terms(), "%s\n%s\n", bad, good);
+
+ return res;
+}
+
+static int bisect_reset(const char *commit)
+{
+ struct strbuf branch = STRBUF_INIT;
+
+ if (!commit) {
+ if (strbuf_read_file(&branch, git_path_bisect_start(), 0) < 1) {
+ printf(_("We are not bisecting.\n"));
+ return 0;
+ }
+ strbuf_rtrim(&branch);
+ } else {
+ struct object_id oid;
+
+ if (get_oid_commit(commit, &oid))
+ return error(_("'%s' is not a valid commit"), commit);
+ strbuf_addstr(&branch, commit);
+ }
+
+ if (!ref_exists("BISECT_HEAD")) {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ strvec_pushl(&cmd.args, "checkout", branch.buf, "--", NULL);
+ if (run_command(&cmd)) {
+ error(_("could not check out original"
+ " HEAD '%s'. Try 'git bisect"
+ " reset <commit>'."), branch.buf);
+ strbuf_release(&branch);
+ return -1;
+ }
+ }
+
+ strbuf_release(&branch);
+ return bisect_clean_state();
+}
+
+static void log_commit(FILE *fp, char *fmt, const char *state,
+ struct commit *commit)
+{
+ struct pretty_print_context pp = {0};
+ struct strbuf commit_msg = STRBUF_INIT;
+ char *label = xstrfmt(fmt, state);
+
+ format_commit_message(commit, "%s", &commit_msg, &pp);
+
+ fprintf(fp, "# %s: [%s] %s\n", label, oid_to_hex(&commit->object.oid),
+ commit_msg.buf);
+
+ strbuf_release(&commit_msg);
+ free(label);
+}
+
+static int bisect_write(const char *state, const char *rev,
+ const struct bisect_terms *terms, int nolog)
+{
+ struct strbuf tag = STRBUF_INIT;
+ struct object_id oid;
+ struct commit *commit;
+ FILE *fp = NULL;
+ int res = 0;
+
+ if (!strcmp(state, terms->term_bad)) {
+ strbuf_addf(&tag, "refs/bisect/%s", state);
+ } else if (one_of(state, terms->term_good, "skip", NULL)) {
+ strbuf_addf(&tag, "refs/bisect/%s-%s", state, rev);
+ } else {
+ res = error(_("Bad bisect_write argument: %s"), state);
+ goto finish;
+ }
+
+ if (get_oid(rev, &oid)) {
+ res = error(_("couldn't get the oid of the rev '%s'"), rev);
+ goto finish;
+ }
+
+ if (update_ref(NULL, tag.buf, &oid, NULL, 0,
+ UPDATE_REFS_MSG_ON_ERR)) {
+ res = -1;
+ goto finish;
+ }
+
+ fp = fopen(git_path_bisect_log(), "a");
+ if (!fp) {
+ res = error_errno(_("couldn't open the file '%s'"), git_path_bisect_log());
+ goto finish;
+ }
+
+ commit = lookup_commit_reference(the_repository, &oid);
+ log_commit(fp, "%s", state, commit);
+
+ if (!nolog)
+ fprintf(fp, "git bisect %s %s\n", state, rev);
+
+finish:
+ if (fp)
+ fclose(fp);
+ strbuf_release(&tag);
+ return res;
+}
+
+static int check_and_set_terms(struct bisect_terms *terms, const char *cmd)
+{
+ int has_term_file = !is_empty_or_missing_file(git_path_bisect_terms());
+
+ if (one_of(cmd, "skip", "start", "terms", NULL))
+ return 0;
+
+ if (has_term_file && strcmp(cmd, terms->term_bad) &&
+ strcmp(cmd, terms->term_good))
+ return error(_("Invalid command: you're currently in a "
+ "%s/%s bisect"), terms->term_bad,
+ terms->term_good);
+
+ if (!has_term_file) {
+ if (one_of(cmd, "bad", "good", NULL)) {
+ set_terms(terms, "bad", "good");
+ return write_terms(terms->term_bad, terms->term_good);
+ }
+ if (one_of(cmd, "new", "old", NULL)) {
+ set_terms(terms, "new", "old");
+ return write_terms(terms->term_bad, terms->term_good);
+ }
+ }
+
+ return 0;
+}
+
+static int inc_nr(const char *refname UNUSED,
+ const struct object_id *oid UNUSED,
+ int flag UNUSED, void *cb_data)
+{
+ unsigned int *nr = (unsigned int *)cb_data;
+ (*nr)++;
+ return 0;
+}
+
+static const char need_bad_and_good_revision_warning[] =
+ N_("You need to give me at least one %s and %s revision.\n"
+ "You can use \"git bisect %s\" and \"git bisect %s\" for that.");
+
+static const char need_bisect_start_warning[] =
+ N_("You need to start by \"git bisect start\".\n"
+ "You then need to give me at least one %s and %s revision.\n"
+ "You can use \"git bisect %s\" and \"git bisect %s\" for that.");
+
+static int decide_next(const struct bisect_terms *terms,
+ const char *current_term, int missing_good,
+ int missing_bad)
+{
+ if (!missing_good && !missing_bad)
+ return 0;
+ if (!current_term)
+ return -1;
+
+ if (missing_good && !missing_bad &&
+ !strcmp(current_term, terms->term_good)) {
+ char *yesno;
+ /*
+ * have bad (or new) but not good (or old). We could bisect
+ * although this is less optimum.
+ */
+ warning(_("bisecting only with a %s commit"), terms->term_bad);
+ if (!isatty(0))
+ return 0;
+ /*
+ * TRANSLATORS: Make sure to include [Y] and [n] in your
+ * translation. The program will only accept English input
+ * at this point.
+ */
+ yesno = git_prompt(_("Are you sure [Y/n]? "), PROMPT_ECHO);
+ if (starts_with(yesno, "N") || starts_with(yesno, "n"))
+ return -1;
+ return 0;
+ }
+
+ if (!is_empty_or_missing_file(git_path_bisect_start()))
+ return error(_(need_bad_and_good_revision_warning),
+ vocab_bad, vocab_good, vocab_bad, vocab_good);
+ else
+ return error(_(need_bisect_start_warning),
+ vocab_good, vocab_bad, vocab_good, vocab_bad);
+}
+
+static void bisect_status(struct bisect_state *state,
+ const struct bisect_terms *terms)
+{
+ char *bad_ref = xstrfmt("refs/bisect/%s", terms->term_bad);
+ char *good_glob = xstrfmt("%s-*", terms->term_good);
+
+ if (ref_exists(bad_ref))
+ state->nr_bad = 1;
+
+ for_each_glob_ref_in(inc_nr, good_glob, "refs/bisect/",
+ (void *) &state->nr_good);
+
+ free(good_glob);
+ free(bad_ref);
+}
+
+__attribute__((format (printf, 1, 2)))
+static void bisect_log_printf(const char *fmt, ...)
+{
+ struct strbuf buf = STRBUF_INIT;
+ va_list ap;
+
+ va_start(ap, fmt);
+ strbuf_vaddf(&buf, fmt, ap);
+ va_end(ap);
+
+ printf("%s", buf.buf);
+ append_to_file(git_path_bisect_log(), "# %s", buf.buf);
+
+ strbuf_release(&buf);
+}
+
+static void bisect_print_status(const struct bisect_terms *terms)
+{
+ struct bisect_state state = { 0 };
+
+ bisect_status(&state, terms);
+
+ /* If we had both, we'd already be started, and shouldn't get here. */
+ if (state.nr_good && state.nr_bad)
+ return;
+
+ if (!state.nr_good && !state.nr_bad)
+ bisect_log_printf(_("status: waiting for both good and bad commits\n"));
+ else if (state.nr_good)
+ bisect_log_printf(Q_("status: waiting for bad commit, %d good commit known\n",
+ "status: waiting for bad commit, %d good commits known\n",
+ state.nr_good), state.nr_good);
+ else
+ bisect_log_printf(_("status: waiting for good commit(s), bad commit known\n"));
+}
+
+static int bisect_next_check(const struct bisect_terms *terms,
+ const char *current_term)
+{
+ struct bisect_state state = { 0 };
+ bisect_status(&state, terms);
+ return decide_next(terms, current_term, !state.nr_good, !state.nr_bad);
+}
+
+static int get_terms(struct bisect_terms *terms)
+{
+ struct strbuf str = STRBUF_INIT;
+ FILE *fp = NULL;
+ int res = 0;
+
+ fp = fopen(git_path_bisect_terms(), "r");
+ if (!fp) {
+ res = -1;
+ goto finish;
+ }
+
+ free_terms(terms);
+ strbuf_getline_lf(&str, fp);
+ terms->term_bad = strbuf_detach(&str, NULL);
+ strbuf_getline_lf(&str, fp);
+ terms->term_good = strbuf_detach(&str, NULL);
+
+finish:
+ if (fp)
+ fclose(fp);
+ strbuf_release(&str);
+ return res;
+}
+
+static int bisect_terms(struct bisect_terms *terms, const char *option)
+{
+ if (get_terms(terms))
+ return error(_("no terms defined"));
+
+ if (!option) {
+ printf(_("Your current terms are %s for the old state\n"
+ "and %s for the new state.\n"),
+ terms->term_good, terms->term_bad);
+ return 0;
+ }
+ if (one_of(option, "--term-good", "--term-old", NULL))
+ printf("%s\n", terms->term_good);
+ else if (one_of(option, "--term-bad", "--term-new", NULL))
+ printf("%s\n", terms->term_bad);
+ else
+ return error(_("invalid argument %s for 'git bisect terms'.\n"
+ "Supported options are: "
+ "--term-good|--term-old and "
+ "--term-bad|--term-new."), option);
+
+ return 0;
+}
+
+static int bisect_append_log_quoted(const char **argv)
+{
+ int res = 0;
+ FILE *fp = fopen(git_path_bisect_log(), "a");
+ struct strbuf orig_args = STRBUF_INIT;
+
+ if (!fp)
+ return -1;
+
+ if (fprintf(fp, "git bisect start") < 1) {
+ res = -1;
+ goto finish;
+ }
+
+ sq_quote_argv(&orig_args, argv);
+ if (fprintf(fp, "%s\n", orig_args.buf) < 1)
+ res = -1;
+
+finish:
+ fclose(fp);
+ strbuf_release(&orig_args);
+ return res;
+}
+
+static int add_bisect_ref(const char *refname, const struct object_id *oid,
+ int flags UNUSED, void *cb)
+{
+ struct add_bisect_ref_data *data = cb;
+
+ add_pending_oid(data->revs, refname, oid, data->object_flags);
+
+ return 0;
+}
+
+static int prepare_revs(struct bisect_terms *terms, struct rev_info *revs)
+{
+ int res = 0;
+ struct add_bisect_ref_data cb = { revs };
+ char *good = xstrfmt("%s-*", terms->term_good);
+
+ /*
+ * We cannot use terms->term_bad directly in
+ * for_each_glob_ref_in() and we have to append a '*' to it,
+ * otherwise for_each_glob_ref_in() will append '/' and '*'.
+ */
+ char *bad = xstrfmt("%s*", terms->term_bad);
+
+ /*
+ * It is important to reset the flags used by revision walks
+ * as the previous call to bisect_next_all() in turn
+ * sets up a revision walk.
+ */
+ reset_revision_walk();
+ init_revisions(revs, NULL);
+ setup_revisions(0, NULL, revs, NULL);
+ for_each_glob_ref_in(add_bisect_ref, bad, "refs/bisect/", &cb);
+ cb.object_flags = UNINTERESTING;
+ for_each_glob_ref_in(add_bisect_ref, good, "refs/bisect/", &cb);
+ if (prepare_revision_walk(revs))
+ res = error(_("revision walk setup failed\n"));
+
+ free(good);
+ free(bad);
+ return res;
+}
+
+static int bisect_skipped_commits(struct bisect_terms *terms)
+{
+ int res;
+ FILE *fp = NULL;
+ struct rev_info revs;
+ struct commit *commit;
+ struct pretty_print_context pp = {0};
+ struct strbuf commit_name = STRBUF_INIT;
+
+ res = prepare_revs(terms, &revs);
+ if (res)
+ return res;
+
+ fp = fopen(git_path_bisect_log(), "a");
+ if (!fp)
+ return error_errno(_("could not open '%s' for appending"),
+ git_path_bisect_log());
+
+ if (fprintf(fp, "# only skipped commits left to test\n") < 0)
+ return error_errno(_("failed to write to '%s'"), git_path_bisect_log());
+
+ while ((commit = get_revision(&revs)) != NULL) {
+ strbuf_reset(&commit_name);
+ format_commit_message(commit, "%s",
+ &commit_name, &pp);
+ fprintf(fp, "# possible first %s commit: [%s] %s\n",
+ terms->term_bad, oid_to_hex(&commit->object.oid),
+ commit_name.buf);
+ }
+
+ /*
+ * Reset the flags used by revision walks in case
+ * there is another revision walk after this one.
+ */
+ reset_revision_walk();
+
+ strbuf_release(&commit_name);
+ release_revisions(&revs);
+ fclose(fp);
+ return 0;
+}
+
+static int bisect_successful(struct bisect_terms *terms)
+{
+ struct object_id oid;
+ struct commit *commit;
+ struct pretty_print_context pp = {0};
+ struct strbuf commit_name = STRBUF_INIT;
+ char *bad_ref = xstrfmt("refs/bisect/%s",terms->term_bad);
+ int res;
+
+ read_ref(bad_ref, &oid);
+ commit = lookup_commit_reference_by_name(bad_ref);
+ format_commit_message(commit, "%s", &commit_name, &pp);
+
+ res = append_to_file(git_path_bisect_log(), "# first %s commit: [%s] %s\n",
+ terms->term_bad, oid_to_hex(&commit->object.oid),
+ commit_name.buf);
+
+ strbuf_release(&commit_name);
+ free(bad_ref);
+ return res;
+}
+
+static enum bisect_error bisect_next(struct bisect_terms *terms, const char *prefix)
+{
+ enum bisect_error res;
+
+ if (bisect_autostart(terms))
+ return BISECT_FAILED;
+
+ if (bisect_next_check(terms, terms->term_good))
+ return BISECT_FAILED;
+
+ /* Perform all bisection computation */
+ res = bisect_next_all(the_repository, prefix);
+
+ if (res == BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND) {
+ res = bisect_successful(terms);
+ return res ? res : BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND;
+ } else if (res == BISECT_ONLY_SKIPPED_LEFT) {
+ res = bisect_skipped_commits(terms);
+ return res ? res : BISECT_ONLY_SKIPPED_LEFT;
+ }
+ return res;
+}
+
+static enum bisect_error bisect_auto_next(struct bisect_terms *terms, const char *prefix)
+{
+ if (bisect_next_check(terms, NULL)) {
+ bisect_print_status(terms);
+ return BISECT_OK;
+ }
+
+ return bisect_next(terms, prefix);
+}
+
+static enum bisect_error bisect_start(struct bisect_terms *terms, const char **argv, int argc)
+{
+ int no_checkout = 0;
+ int first_parent_only = 0;
+ int i, has_double_dash = 0, must_write_terms = 0, bad_seen = 0;
+ int flags, pathspec_pos;
+ enum bisect_error res = BISECT_OK;
+ struct string_list revs = STRING_LIST_INIT_DUP;
+ struct string_list states = STRING_LIST_INIT_DUP;
+ struct strbuf start_head = STRBUF_INIT;
+ struct strbuf bisect_names = STRBUF_INIT;
+ struct object_id head_oid;
+ struct object_id oid;
+ const char *head;
+
+ if (is_bare_repository())
+ no_checkout = 1;
+
+ /*
+ * Check for one bad and then some good revisions
+ */
+ for (i = 0; i < argc; i++) {
+ if (!strcmp(argv[i], "--")) {
+ has_double_dash = 1;
+ break;
+ }
+ }
+
+ for (i = 0; i < argc; i++) {
+ const char *arg = argv[i];
+ if (!strcmp(argv[i], "--")) {
+ break;
+ } else if (!strcmp(arg, "--no-checkout")) {
+ no_checkout = 1;
+ } else if (!strcmp(arg, "--first-parent")) {
+ first_parent_only = 1;
+ } else if (!strcmp(arg, "--term-good") ||
+ !strcmp(arg, "--term-old")) {
+ i++;
+ if (argc <= i)
+ return error(_("'' is not a valid term"));
+ must_write_terms = 1;
+ free((void *) terms->term_good);
+ terms->term_good = xstrdup(argv[i]);
+ } else if (skip_prefix(arg, "--term-good=", &arg) ||
+ skip_prefix(arg, "--term-old=", &arg)) {
+ must_write_terms = 1;
+ free((void *) terms->term_good);
+ terms->term_good = xstrdup(arg);
+ } else if (!strcmp(arg, "--term-bad") ||
+ !strcmp(arg, "--term-new")) {
+ i++;
+ if (argc <= i)
+ return error(_("'' is not a valid term"));
+ must_write_terms = 1;
+ free((void *) terms->term_bad);
+ terms->term_bad = xstrdup(argv[i]);
+ } else if (skip_prefix(arg, "--term-bad=", &arg) ||
+ skip_prefix(arg, "--term-new=", &arg)) {
+ must_write_terms = 1;
+ free((void *) terms->term_bad);
+ terms->term_bad = xstrdup(arg);
+ } else if (starts_with(arg, "--")) {
+ return error(_("unrecognized option: '%s'"), arg);
+ } else if (!get_oidf(&oid, "%s^{commit}", arg)) {
+ string_list_append(&revs, oid_to_hex(&oid));
+ } else if (has_double_dash) {
+ die(_("'%s' does not appear to be a valid "
+ "revision"), arg);
+ } else {
+ break;
+ }
+ }
+ pathspec_pos = i;
+
+ /*
+ * The user ran "git bisect start <sha1> <sha1>", hence did not
+ * explicitly specify the terms, but we are already starting to
+ * set references named with the default terms, and won't be able
+ * to change afterwards.
+ */
+ if (revs.nr)
+ must_write_terms = 1;
+ for (i = 0; i < revs.nr; i++) {
+ if (bad_seen) {
+ string_list_append(&states, terms->term_good);
+ } else {
+ bad_seen = 1;
+ string_list_append(&states, terms->term_bad);
+ }
+ }
+
+ /*
+ * Verify HEAD
+ */
+ head = resolve_ref_unsafe("HEAD", 0, &head_oid, &flags);
+ if (!head)
+ if (get_oid("HEAD", &head_oid))
+ return error(_("bad HEAD - I need a HEAD"));
+
+ /*
+ * Check if we are bisecting
+ */
+ if (!is_empty_or_missing_file(git_path_bisect_start())) {
+ /* Reset to the rev from where we started */
+ strbuf_read_file(&start_head, git_path_bisect_start(), 0);
+ strbuf_trim(&start_head);
+ if (!no_checkout) {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ strvec_pushl(&cmd.args, "checkout", start_head.buf,
+ "--", NULL);
+ if (run_command(&cmd)) {
+ res = error(_("checking out '%s' failed."
+ " Try 'git bisect start "
+ "<valid-branch>'."),
+ start_head.buf);
+ goto finish;
+ }
+ }
+ } else {
+ /* Get the rev from where we start. */
+ if (!get_oid(head, &head_oid) &&
+ !starts_with(head, "refs/heads/")) {
+ strbuf_reset(&start_head);
+ strbuf_addstr(&start_head, oid_to_hex(&head_oid));
+ } else if (!get_oid(head, &head_oid) &&
+ skip_prefix(head, "refs/heads/", &head)) {
+ /*
+ * This error message should only be triggered by
+ * cogito usage, and cogito users should understand
+ * it relates to cg-seek.
+ */
+ if (!is_empty_or_missing_file(git_path_head_name()))
+ return error(_("won't bisect on cg-seek'ed tree"));
+ strbuf_addstr(&start_head, head);
+ } else {
+ return error(_("bad HEAD - strange symbolic ref"));
+ }
+ }
+
+ /*
+ * Get rid of any old bisect state.
+ */
+ if (bisect_clean_state())
+ return BISECT_FAILED;
+
+ /*
+ * Write new start state
+ */
+ write_file(git_path_bisect_start(), "%s\n", start_head.buf);
+
+ if (first_parent_only)
+ write_file(git_path_bisect_first_parent(), "\n");
+
+ if (no_checkout) {
+ if (get_oid(start_head.buf, &oid) < 0) {
+ res = error(_("invalid ref: '%s'"), start_head.buf);
+ goto finish;
+ }
+ if (update_ref(NULL, "BISECT_HEAD", &oid, NULL, 0,
+ UPDATE_REFS_MSG_ON_ERR)) {
+ res = BISECT_FAILED;
+ goto finish;
+ }
+ }
+
+ if (pathspec_pos < argc - 1)
+ sq_quote_argv(&bisect_names, argv + pathspec_pos);
+ write_file(git_path_bisect_names(), "%s\n", bisect_names.buf);
+
+ for (i = 0; i < states.nr; i++)
+ if (bisect_write(states.items[i].string,
+ revs.items[i].string, terms, 1)) {
+ res = BISECT_FAILED;
+ goto finish;
+ }
+
+ if (must_write_terms && write_terms(terms->term_bad,
+ terms->term_good)) {
+ res = BISECT_FAILED;
+ goto finish;
+ }
+
+ res = bisect_append_log_quoted(argv);
+ if (res)
+ res = BISECT_FAILED;
+
+finish:
+ string_list_clear(&revs, 0);
+ string_list_clear(&states, 0);
+ strbuf_release(&start_head);
+ strbuf_release(&bisect_names);
+ if (res)
+ return res;
+
+ res = bisect_auto_next(terms, NULL);
+ if (!is_bisect_success(res))
+ bisect_clean_state();
+ return res;
+}
+
+static inline int file_is_not_empty(const char *path)
+{
+ return !is_empty_or_missing_file(path);
+}
+
+static int bisect_autostart(struct bisect_terms *terms)
+{
+ int res;
+ const char *yesno;
+
+ if (file_is_not_empty(git_path_bisect_start()))
+ return 0;
+
+ fprintf_ln(stderr, _("You need to start by \"git bisect "
+ "start\"\n"));
+
+ if (!isatty(STDIN_FILENO))
+ return -1;
+
+ /*
+ * TRANSLATORS: Make sure to include [Y] and [n] in your
+ * translation. The program will only accept English input
+ * at this point.
+ */
+ yesno = git_prompt(_("Do you want me to do it for you "
+ "[Y/n]? "), PROMPT_ECHO);
+ res = tolower(*yesno) == 'n' ?
+ -1 : bisect_start(terms, empty_strvec, 0);
+
+ return res;
+}
+
+static enum bisect_error bisect_state(struct bisect_terms *terms, const char **argv,
+ int argc)
+{
+ const char *state;
+ int i, verify_expected = 1;
+ struct object_id oid, expected;
+ struct strbuf buf = STRBUF_INIT;
+ struct oid_array revs = OID_ARRAY_INIT;
+
+ if (!argc)
+ return error(_("Please call `--bisect-state` with at least one argument"));
+
+ if (bisect_autostart(terms))
+ return BISECT_FAILED;
+
+ state = argv[0];
+ if (check_and_set_terms(terms, state) ||
+ !one_of(state, terms->term_good, terms->term_bad, "skip", NULL))
+ return BISECT_FAILED;
+
+ argv++;
+ argc--;
+ if (argc > 1 && !strcmp(state, terms->term_bad))
+ return error(_("'git bisect %s' can take only one argument."), terms->term_bad);
+
+ if (argc == 0) {
+ const char *head = "BISECT_HEAD";
+ enum get_oid_result res_head = get_oid(head, &oid);
+
+ if (res_head == MISSING_OBJECT) {
+ head = "HEAD";
+ res_head = get_oid(head, &oid);
+ }
+
+ if (res_head)
+ error(_("Bad rev input: %s"), head);
+ oid_array_append(&revs, &oid);
+ }
+
+ /*
+ * All input revs must be checked before executing bisect_write()
+ * to discard junk revs.
+ */
+
+ for (; argc; argc--, argv++) {
+ struct commit *commit;
+
+ if (get_oid(*argv, &oid)){
+ error(_("Bad rev input: %s"), *argv);
+ oid_array_clear(&revs);
+ return BISECT_FAILED;
+ }
+
+ commit = lookup_commit_reference(the_repository, &oid);
+ if (!commit)
+ die(_("Bad rev input (not a commit): %s"), *argv);
+
+ oid_array_append(&revs, &commit->object.oid);
+ }
+
+ if (strbuf_read_file(&buf, git_path_bisect_expected_rev(), 0) < the_hash_algo->hexsz ||
+ get_oid_hex(buf.buf, &expected) < 0)
+ verify_expected = 0; /* Ignore invalid file contents */
+ strbuf_release(&buf);
+
+ for (i = 0; i < revs.nr; i++) {
+ if (bisect_write(state, oid_to_hex(&revs.oid[i]), terms, 0)) {
+ oid_array_clear(&revs);
+ return BISECT_FAILED;
+ }
+ if (verify_expected && !oideq(&revs.oid[i], &expected)) {
+ unlink_or_warn(git_path_bisect_ancestors_ok());
+ unlink_or_warn(git_path_bisect_expected_rev());
+ verify_expected = 0;
+ }
+ }
+
+ oid_array_clear(&revs);
+ return bisect_auto_next(terms, NULL);
+}
+
+static enum bisect_error bisect_log(void)
+{
+ int fd, status;
+ const char* filename = git_path_bisect_log();
+
+ if (is_empty_or_missing_file(filename))
+ return error(_("We are not bisecting."));
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ return BISECT_FAILED;
+
+ status = copy_fd(fd, STDOUT_FILENO);
+ close(fd);
+ return status ? BISECT_FAILED : BISECT_OK;
+}
+
+static int process_replay_line(struct bisect_terms *terms, struct strbuf *line)
+{
+ const char *p = line->buf + strspn(line->buf, " \t");
+ char *word_end, *rev;
+
+ if ((!skip_prefix(p, "git bisect", &p) &&
+ !skip_prefix(p, "git-bisect", &p)) || !isspace(*p))
+ return 0;
+ p += strspn(p, " \t");
+
+ word_end = (char *)p + strcspn(p, " \t");
+ rev = word_end + strspn(word_end, " \t");
+ *word_end = '\0'; /* NUL-terminate the word */
+
+ get_terms(terms);
+ if (check_and_set_terms(terms, p))
+ return -1;
+
+ if (!strcmp(p, "start")) {
+ struct strvec argv = STRVEC_INIT;
+ int res;
+ sq_dequote_to_strvec(rev, &argv);
+ res = bisect_start(terms, argv.v, argv.nr);
+ strvec_clear(&argv);
+ return res;
+ }
+
+ if (one_of(p, terms->term_good,
+ terms->term_bad, "skip", NULL))
+ return bisect_write(p, rev, terms, 0);
+
+ if (!strcmp(p, "terms")) {
+ struct strvec argv = STRVEC_INIT;
+ int res;
+ sq_dequote_to_strvec(rev, &argv);
+ res = bisect_terms(terms, argv.nr == 1 ? argv.v[0] : NULL);
+ strvec_clear(&argv);
+ return res;
+ }
+ error(_("'%s'?? what are you talking about?"), p);
+
+ return -1;
+}
+
+static enum bisect_error bisect_replay(struct bisect_terms *terms, const char *filename)
+{
+ FILE *fp = NULL;
+ enum bisect_error res = BISECT_OK;
+ struct strbuf line = STRBUF_INIT;
+
+ if (is_empty_or_missing_file(filename))
+ return error(_("cannot read file '%s' for replaying"), filename);
+
+ if (bisect_reset(NULL))
+ return BISECT_FAILED;
+
+ fp = fopen(filename, "r");
+ if (!fp)
+ return BISECT_FAILED;
+
+ while ((strbuf_getline(&line, fp) != EOF) && !res)
+ res = process_replay_line(terms, &line);
+
+ strbuf_release(&line);
+ fclose(fp);
+
+ if (res)
+ return BISECT_FAILED;
+
+ return bisect_auto_next(terms, NULL);
+}
+
+static enum bisect_error bisect_skip(struct bisect_terms *terms, const char **argv, int argc)
+{
+ int i;
+ enum bisect_error res;
+ struct strvec argv_state = STRVEC_INIT;
+
+ strvec_push(&argv_state, "skip");
+
+ for (i = 0; i < argc; i++) {
+ const char *dotdot = strstr(argv[i], "..");
+
+ if (dotdot) {
+ struct rev_info revs;
+ struct commit *commit;
+
+ init_revisions(&revs, NULL);
+ setup_revisions(2, argv + i - 1, &revs, NULL);
+
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed\n"));
+ while ((commit = get_revision(&revs)) != NULL)
+ strvec_push(&argv_state,
+ oid_to_hex(&commit->object.oid));
+
+ reset_revision_walk();
+ release_revisions(&revs);
+ } else {
+ strvec_push(&argv_state, argv[i]);
+ }
+ }
+ res = bisect_state(terms, argv_state.v, argv_state.nr);
+
+ strvec_clear(&argv_state);
+ return res;
+}
+
+static int bisect_visualize(struct bisect_terms *terms, const char **argv, int argc)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (bisect_next_check(terms, NULL) != 0)
+ return BISECT_FAILED;
+
+ cmd.no_stdin = 1;
+ if (!argc) {
+ if ((getenv("DISPLAY") || getenv("SESSIONNAME") || getenv("MSYSTEM") ||
+ getenv("SECURITYSESSIONID")) && exists_in_PATH("gitk")) {
+ strvec_push(&cmd.args, "gitk");
+ } else {
+ strvec_push(&cmd.args, "log");
+ cmd.git_cmd = 1;
+ }
+ } else {
+ if (argv[0][0] == '-') {
+ strvec_push(&cmd.args, "log");
+ cmd.git_cmd = 1;
+ } else if (strcmp(argv[0], "tig") && !starts_with(argv[0], "git"))
+ cmd.git_cmd = 1;
+
+ strvec_pushv(&cmd.args, argv);
+ }
+
+ strvec_pushl(&cmd.args, "--bisect", "--", NULL);
+
+ strbuf_read_file(&sb, git_path_bisect_names(), 0);
+ sq_dequote_to_strvec(sb.buf, &cmd.args);
+ strbuf_release(&sb);
+
+ return run_command(&cmd);
+}
+
+static int get_first_good(const char *refname UNUSED,
+ const struct object_id *oid,
+ int flag UNUSED, void *cb_data)
+{
+ oidcpy(cb_data, oid);
+ return 1;
+}
+
+static int do_bisect_run(const char *command)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ printf(_("running %s\n"), command);
+ cmd.use_shell = 1;
+ strvec_push(&cmd.args, command);
+ return run_command(&cmd);
+}
+
+static int verify_good(const struct bisect_terms *terms, const char *command)
+{
+ int rc;
+ enum bisect_error res;
+ struct object_id good_rev;
+ struct object_id current_rev;
+ char *good_glob = xstrfmt("%s-*", terms->term_good);
+ int no_checkout = ref_exists("BISECT_HEAD");
+
+ for_each_glob_ref_in(get_first_good, good_glob, "refs/bisect/",
+ &good_rev);
+ free(good_glob);
+
+ if (read_ref(no_checkout ? "BISECT_HEAD" : "HEAD", &current_rev))
+ return -1;
+
+ res = bisect_checkout(&good_rev, no_checkout);
+ if (res != BISECT_OK)
+ return -1;
+
+ rc = do_bisect_run(command);
+
+ res = bisect_checkout(&current_rev, no_checkout);
+ if (res != BISECT_OK)
+ return -1;
+
+ return rc;
+}
+
+static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
+{
+ int res = BISECT_OK;
+ struct strbuf command = STRBUF_INIT;
+ const char *new_state;
+ int temporary_stdout_fd, saved_stdout;
+ int is_first_run = 1;
+
+ if (bisect_next_check(terms, NULL))
+ return BISECT_FAILED;
+
+ if (argc)
+ sq_quote_argv(&command, argv);
+ else {
+ error(_("bisect run failed: no command provided."));
+ return BISECT_FAILED;
+ }
+
+ while (1) {
+ res = do_bisect_run(command.buf);
+
+ /*
+ * Exit code 126 and 127 can either come from the shell
+ * if it was unable to execute or even find the script,
+ * or from the script itself. Check with a known-good
+ * revision to avoid trashing the bisect run due to a
+ * missing or non-executable script.
+ */
+ if (is_first_run && (res == 126 || res == 127)) {
+ int rc = verify_good(terms, command.buf);
+ is_first_run = 0;
+ if (rc < 0) {
+ error(_("unable to verify '%s' on good"
+ " revision"), command.buf);
+ res = BISECT_FAILED;
+ break;
+ }
+ if (rc == res) {
+ error(_("bogus exit code %d for good revision"),
+ rc);
+ res = BISECT_FAILED;
+ break;
+ }
+ }
+
+ if (res < 0 || 128 <= res) {
+ error(_("bisect run failed: exit code %d from"
+ " '%s' is < 0 or >= 128"), res, command.buf);
+ break;
+ }
+
+ if (res == 125)
+ new_state = "skip";
+ else if (!res)
+ new_state = terms->term_good;
+ else
+ new_state = terms->term_bad;
+
+ temporary_stdout_fd = open(git_path_bisect_run(), O_CREAT | O_WRONLY | O_TRUNC, 0666);
+
+ if (temporary_stdout_fd < 0) {
+ res = error_errno(_("cannot open file '%s' for writing"), git_path_bisect_run());
+ break;
+ }
+
+ fflush(stdout);
+ saved_stdout = dup(1);
+ dup2(temporary_stdout_fd, 1);
+
+ res = bisect_state(terms, &new_state, 1);
+
+ fflush(stdout);
+ dup2(saved_stdout, 1);
+ close(saved_stdout);
+ close(temporary_stdout_fd);
+
+ print_file_to_stdout(git_path_bisect_run());
+
+ if (res == BISECT_ONLY_SKIPPED_LEFT)
+ error(_("bisect run cannot continue any more"));
+ else if (res == BISECT_INTERNAL_SUCCESS_MERGE_BASE) {
+ printf(_("bisect run success"));
+ res = BISECT_OK;
+ } else if (res == BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND) {
+ printf(_("bisect found first bad commit"));
+ res = BISECT_OK;
+ } else if (res) {
+ error(_("bisect run failed: 'git bisect--helper --bisect-state"
+ " %s' exited with error code %d"), new_state, res);
+ } else {
+ continue;
+ }
+ break;
+ }
+
+ strbuf_release(&command);
+ return res;
+}
+
+static int cmd_bisect__reset(int argc, const char **argv, const char *prefix UNUSED)
+{
+ if (argc > 1)
+ return error(_("--bisect-reset requires either no argument or a commit"));
+ return bisect_reset(argc ? argv[0] : NULL);
+}
+
+static int cmd_bisect__terms(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ if (argc > 1)
+ return error(_("--bisect-terms requires 0 or 1 argument"));
+ res = bisect_terms(&terms, argc == 1 ? argv[0] : NULL);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__start(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ set_terms(&terms, "bad", "good");
+ res = bisect_start(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__next(int argc, const char **argv UNUSED, const char *prefix)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ if (argc)
+ return error(_("--bisect-next requires 0 arguments"));
+ get_terms(&terms);
+ res = bisect_next(&terms, prefix);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__state(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ set_terms(&terms, "bad", "good");
+ get_terms(&terms);
+ res = bisect_state(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__log(int argc, const char **argv UNUSED, const char *prefix UNUSED)
+{
+ if (argc)
+ return error(_("--bisect-log requires 0 arguments"));
+ return bisect_log();
+}
+
+static int cmd_bisect__replay(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ if (argc != 1)
+ return error(_("no logfile given"));
+ set_terms(&terms, "bad", "good");
+ res = bisect_replay(&terms, argv[0]);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__skip(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ set_terms(&terms, "bad", "good");
+ get_terms(&terms);
+ res = bisect_skip(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__visualize(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ get_terms(&terms);
+ res = bisect_visualize(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__run(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ if (!argc)
+ return error(_("bisect run failed: no command provided."));
+ get_terms(&terms);
+ res = bisect_run(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+int cmd_bisect__helper(int argc, const char **argv, const char *prefix)
+{
+ int res = 0;
+ parse_opt_subcommand_fn *fn = NULL;
+ struct option options[] = {
+ OPT_SUBCOMMAND("reset", &fn, cmd_bisect__reset),
+ OPT_SUBCOMMAND("terms", &fn, cmd_bisect__terms),
+ OPT_SUBCOMMAND("start", &fn, cmd_bisect__start),
+ OPT_SUBCOMMAND("next", &fn, cmd_bisect__next),
+ OPT_SUBCOMMAND("state", &fn, cmd_bisect__state),
+ OPT_SUBCOMMAND("log", &fn, cmd_bisect__log),
+ OPT_SUBCOMMAND("replay", &fn, cmd_bisect__replay),
+ OPT_SUBCOMMAND("skip", &fn, cmd_bisect__skip),
+ OPT_SUBCOMMAND("visualize", &fn, cmd_bisect__visualize),
+ OPT_SUBCOMMAND("view", &fn, cmd_bisect__visualize),
+ OPT_SUBCOMMAND("run", &fn, cmd_bisect__run),
+ OPT_END()
+ };
+ argc = parse_options(argc, argv, prefix, options,
+ git_bisect_helper_usage, 0);
+
+ if (!fn)
+ usage_with_options(git_bisect_helper_usage, options);
+ argc--;
+ argv++;
+
+ res = fn(argc, argv, prefix);
+
+ /*
+ * Handle early success
+ * From check_merge_bases > check_good_are_ancestors_of_bad > bisect_next_all
+ */
+ if ((res == BISECT_INTERNAL_SUCCESS_MERGE_BASE) || (res == BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND))
+ res = BISECT_OK;
+
+ return -res;
+}
diff --git a/builtin/blame.c b/builtin/blame.c
new file mode 100644
index 0000000..71f925e
--- /dev/null
+++ b/builtin/blame.c
@@ -0,0 +1,1223 @@
+/*
+ * Blame
+ *
+ * Copyright (c) 2006, 2014 by its authors
+ * See COPYING for licensing conditions
+ */
+
+#include "cache.h"
+#include "config.h"
+#include "color.h"
+#include "builtin.h"
+#include "repository.h"
+#include "commit.h"
+#include "diff.h"
+#include "revision.h"
+#include "quote.h"
+#include "string-list.h"
+#include "mailmap.h"
+#include "parse-options.h"
+#include "prio-queue.h"
+#include "utf8.h"
+#include "userdiff.h"
+#include "line-range.h"
+#include "line-log.h"
+#include "dir.h"
+#include "progress.h"
+#include "object-store.h"
+#include "blame.h"
+#include "refs.h"
+#include "tag.h"
+
+static char blame_usage[] = N_("git blame [<options>] [<rev-opts>] [<rev>] [--] <file>");
+static char annotate_usage[] = N_("git annotate [<options>] [<rev-opts>] [<rev>] [--] <file>");
+
+static const char *blame_opt_usage[] = {
+ blame_usage,
+ "",
+ N_("<rev-opts> are documented in git-rev-list(1)"),
+ NULL
+};
+
+static const char *annotate_opt_usage[] = {
+ annotate_usage,
+ "",
+ N_("<rev-opts> are documented in git-rev-list(1)"),
+ NULL
+};
+
+static int longest_file;
+static int longest_author;
+static int max_orig_digits;
+static int max_digits;
+static int max_score_digits;
+static int show_root;
+static int reverse;
+static int blank_boundary;
+static int incremental;
+static int xdl_opts;
+static int abbrev = -1;
+static int no_whole_file_rename;
+static int show_progress;
+static char repeated_meta_color[COLOR_MAXLEN];
+static int coloring_mode;
+static struct string_list ignore_revs_file_list = STRING_LIST_INIT_NODUP;
+static int mark_unblamable_lines;
+static int mark_ignored_lines;
+
+static struct date_mode blame_date_mode = { DATE_ISO8601 };
+static size_t blame_date_width;
+
+static struct string_list mailmap = STRING_LIST_INIT_NODUP;
+
+#ifndef DEBUG_BLAME
+#define DEBUG_BLAME 0
+#endif
+
+static unsigned blame_move_score;
+static unsigned blame_copy_score;
+
+/* Remember to update object flag allocation in object.h */
+#define METAINFO_SHOWN (1u<<12)
+#define MORE_THAN_ONE_PATH (1u<<13)
+
+struct progress_info {
+ struct progress *progress;
+ int blamed_lines;
+};
+
+static const char *nth_line_cb(void *data, long lno)
+{
+ return blame_nth_line((struct blame_scoreboard *)data, lno);
+}
+
+/*
+ * Information on commits, used for output.
+ */
+struct commit_info {
+ struct strbuf author;
+ struct strbuf author_mail;
+ timestamp_t author_time;
+ struct strbuf author_tz;
+
+ /* filled only when asked for details */
+ struct strbuf committer;
+ struct strbuf committer_mail;
+ timestamp_t committer_time;
+ struct strbuf committer_tz;
+
+ struct strbuf summary;
+};
+
+#define COMMIT_INFO_INIT { \
+ .author = STRBUF_INIT, \
+ .author_mail = STRBUF_INIT, \
+ .author_tz = STRBUF_INIT, \
+ .committer = STRBUF_INIT, \
+ .committer_mail = STRBUF_INIT, \
+ .committer_tz = STRBUF_INIT, \
+ .summary = STRBUF_INIT, \
+}
+
+/*
+ * Parse author/committer line in the commit object buffer
+ */
+static void get_ac_line(const char *inbuf, const char *what,
+ struct strbuf *name, struct strbuf *mail,
+ timestamp_t *time, struct strbuf *tz)
+{
+ struct ident_split ident;
+ size_t len, maillen, namelen;
+ char *tmp, *endp;
+ const char *namebuf, *mailbuf;
+
+ tmp = strstr(inbuf, what);
+ if (!tmp)
+ goto error_out;
+ tmp += strlen(what);
+ endp = strchr(tmp, '\n');
+ if (!endp)
+ len = strlen(tmp);
+ else
+ len = endp - tmp;
+
+ if (split_ident_line(&ident, tmp, len)) {
+ error_out:
+ /* Ugh */
+ tmp = "(unknown)";
+ strbuf_addstr(name, tmp);
+ strbuf_addstr(mail, tmp);
+ strbuf_addstr(tz, tmp);
+ *time = 0;
+ return;
+ }
+
+ namelen = ident.name_end - ident.name_begin;
+ namebuf = ident.name_begin;
+
+ maillen = ident.mail_end - ident.mail_begin;
+ mailbuf = ident.mail_begin;
+
+ if (ident.date_begin && ident.date_end)
+ *time = strtoul(ident.date_begin, NULL, 10);
+ else
+ *time = 0;
+
+ if (ident.tz_begin && ident.tz_end)
+ strbuf_add(tz, ident.tz_begin, ident.tz_end - ident.tz_begin);
+ else
+ strbuf_addstr(tz, "(unknown)");
+
+ /*
+ * Now, convert both name and e-mail using mailmap
+ */
+ map_user(&mailmap, &mailbuf, &maillen,
+ &namebuf, &namelen);
+
+ strbuf_addf(mail, "<%.*s>", (int)maillen, mailbuf);
+ strbuf_add(name, namebuf, namelen);
+}
+
+static void commit_info_destroy(struct commit_info *ci)
+{
+
+ strbuf_release(&ci->author);
+ strbuf_release(&ci->author_mail);
+ strbuf_release(&ci->author_tz);
+ strbuf_release(&ci->committer);
+ strbuf_release(&ci->committer_mail);
+ strbuf_release(&ci->committer_tz);
+ strbuf_release(&ci->summary);
+}
+
+static void get_commit_info(struct commit *commit,
+ struct commit_info *ret,
+ int detailed)
+{
+ int len;
+ const char *subject, *encoding;
+ const char *message;
+
+ encoding = get_log_output_encoding();
+ message = logmsg_reencode(commit, NULL, encoding);
+ get_ac_line(message, "\nauthor ",
+ &ret->author, &ret->author_mail,
+ &ret->author_time, &ret->author_tz);
+
+ if (!detailed) {
+ unuse_commit_buffer(commit, message);
+ return;
+ }
+
+ get_ac_line(message, "\ncommitter ",
+ &ret->committer, &ret->committer_mail,
+ &ret->committer_time, &ret->committer_tz);
+
+ len = find_commit_subject(message, &subject);
+ if (len)
+ strbuf_add(&ret->summary, subject, len);
+ else
+ strbuf_addf(&ret->summary, "(%s)", oid_to_hex(&commit->object.oid));
+
+ unuse_commit_buffer(commit, message);
+}
+
+/*
+ * Write out any suspect information which depends on the path. This must be
+ * handled separately from emit_one_suspect_detail(), because a given commit
+ * may have changes in multiple paths. So this needs to appear each time
+ * we mention a new group.
+ *
+ * To allow LF and other nonportable characters in pathnames,
+ * they are c-style quoted as needed.
+ */
+static void write_filename_info(struct blame_origin *suspect)
+{
+ if (suspect->previous) {
+ struct blame_origin *prev = suspect->previous;
+ printf("previous %s ", oid_to_hex(&prev->commit->object.oid));
+ write_name_quoted(prev->path, stdout, '\n');
+ }
+ printf("filename ");
+ write_name_quoted(suspect->path, stdout, '\n');
+}
+
+/*
+ * Porcelain/Incremental format wants to show a lot of details per
+ * commit. Instead of repeating this every line, emit it only once,
+ * the first time each commit appears in the output (unless the
+ * user has specifically asked for us to repeat).
+ */
+static int emit_one_suspect_detail(struct blame_origin *suspect, int repeat)
+{
+ struct commit_info ci = COMMIT_INFO_INIT;
+
+ if (!repeat && (suspect->commit->object.flags & METAINFO_SHOWN))
+ return 0;
+
+ suspect->commit->object.flags |= METAINFO_SHOWN;
+ get_commit_info(suspect->commit, &ci, 1);
+ printf("author %s\n", ci.author.buf);
+ printf("author-mail %s\n", ci.author_mail.buf);
+ printf("author-time %"PRItime"\n", ci.author_time);
+ printf("author-tz %s\n", ci.author_tz.buf);
+ printf("committer %s\n", ci.committer.buf);
+ printf("committer-mail %s\n", ci.committer_mail.buf);
+ printf("committer-time %"PRItime"\n", ci.committer_time);
+ printf("committer-tz %s\n", ci.committer_tz.buf);
+ printf("summary %s\n", ci.summary.buf);
+ if (suspect->commit->object.flags & UNINTERESTING)
+ printf("boundary\n");
+
+ commit_info_destroy(&ci);
+
+ return 1;
+}
+
+/*
+ * The blame_entry is found to be guilty for the range.
+ * Show it in incremental output.
+ */
+static void found_guilty_entry(struct blame_entry *ent, void *data)
+{
+ struct progress_info *pi = (struct progress_info *)data;
+
+ if (incremental) {
+ struct blame_origin *suspect = ent->suspect;
+
+ printf("%s %d %d %d\n",
+ oid_to_hex(&suspect->commit->object.oid),
+ ent->s_lno + 1, ent->lno + 1, ent->num_lines);
+ emit_one_suspect_detail(suspect, 0);
+ write_filename_info(suspect);
+ maybe_flush_or_die(stdout, "stdout");
+ }
+ pi->blamed_lines += ent->num_lines;
+ display_progress(pi->progress, pi->blamed_lines);
+}
+
+static const char *format_time(timestamp_t time, const char *tz_str,
+ int show_raw_time)
+{
+ static struct strbuf time_buf = STRBUF_INIT;
+
+ strbuf_reset(&time_buf);
+ if (show_raw_time) {
+ strbuf_addf(&time_buf, "%"PRItime" %s", time, tz_str);
+ }
+ else {
+ const char *time_str;
+ size_t time_width;
+ int tz;
+ tz = atoi(tz_str);
+ time_str = show_date(time, tz, &blame_date_mode);
+ strbuf_addstr(&time_buf, time_str);
+ /*
+ * Add space paddings to time_buf to display a fixed width
+ * string, and use time_width for display width calibration.
+ */
+ for (time_width = utf8_strwidth(time_str);
+ time_width < blame_date_width;
+ time_width++)
+ strbuf_addch(&time_buf, ' ');
+ }
+ return time_buf.buf;
+}
+
+#define OUTPUT_ANNOTATE_COMPAT (1U<<0)
+#define OUTPUT_LONG_OBJECT_NAME (1U<<1)
+#define OUTPUT_RAW_TIMESTAMP (1U<<2)
+#define OUTPUT_PORCELAIN (1U<<3)
+#define OUTPUT_SHOW_NAME (1U<<4)
+#define OUTPUT_SHOW_NUMBER (1U<<5)
+#define OUTPUT_SHOW_SCORE (1U<<6)
+#define OUTPUT_NO_AUTHOR (1U<<7)
+#define OUTPUT_SHOW_EMAIL (1U<<8)
+#define OUTPUT_LINE_PORCELAIN (1U<<9)
+#define OUTPUT_COLOR_LINE (1U<<10)
+#define OUTPUT_SHOW_AGE_WITH_COLOR (1U<<11)
+
+static void emit_porcelain_details(struct blame_origin *suspect, int repeat)
+{
+ if (emit_one_suspect_detail(suspect, repeat) ||
+ (suspect->commit->object.flags & MORE_THAN_ONE_PATH))
+ write_filename_info(suspect);
+}
+
+static void emit_porcelain(struct blame_scoreboard *sb, struct blame_entry *ent,
+ int opt)
+{
+ int repeat = opt & OUTPUT_LINE_PORCELAIN;
+ int cnt;
+ const char *cp;
+ struct blame_origin *suspect = ent->suspect;
+ char hex[GIT_MAX_HEXSZ + 1];
+
+ oid_to_hex_r(hex, &suspect->commit->object.oid);
+ printf("%s %d %d %d\n",
+ hex,
+ ent->s_lno + 1,
+ ent->lno + 1,
+ ent->num_lines);
+ emit_porcelain_details(suspect, repeat);
+
+ cp = blame_nth_line(sb, ent->lno);
+ for (cnt = 0; cnt < ent->num_lines; cnt++) {
+ char ch;
+ if (cnt) {
+ printf("%s %d %d\n", hex,
+ ent->s_lno + 1 + cnt,
+ ent->lno + 1 + cnt);
+ if (repeat)
+ emit_porcelain_details(suspect, 1);
+ }
+ putchar('\t');
+ do {
+ ch = *cp++;
+ putchar(ch);
+ } while (ch != '\n' &&
+ cp < sb->final_buf + sb->final_buf_size);
+ }
+
+ if (sb->final_buf_size && cp[-1] != '\n')
+ putchar('\n');
+}
+
+static struct color_field {
+ timestamp_t hop;
+ char col[COLOR_MAXLEN];
+} *colorfield;
+static int colorfield_nr, colorfield_alloc;
+
+static void parse_color_fields(const char *s)
+{
+ struct string_list l = STRING_LIST_INIT_DUP;
+ struct string_list_item *item;
+ enum { EXPECT_DATE, EXPECT_COLOR } next = EXPECT_COLOR;
+
+ colorfield_nr = 0;
+
+ /* Ideally this would be stripped and split at the same time? */
+ string_list_split(&l, s, ',', -1);
+ ALLOC_GROW(colorfield, colorfield_nr + 1, colorfield_alloc);
+
+ for_each_string_list_item(item, &l) {
+ switch (next) {
+ case EXPECT_DATE:
+ colorfield[colorfield_nr].hop = approxidate(item->string);
+ next = EXPECT_COLOR;
+ colorfield_nr++;
+ ALLOC_GROW(colorfield, colorfield_nr + 1, colorfield_alloc);
+ break;
+ case EXPECT_COLOR:
+ if (color_parse(item->string, colorfield[colorfield_nr].col))
+ die(_("expecting a color: %s"), item->string);
+ next = EXPECT_DATE;
+ break;
+ }
+ }
+
+ if (next == EXPECT_COLOR)
+ die(_("must end with a color"));
+
+ colorfield[colorfield_nr].hop = TIME_MAX;
+ string_list_clear(&l, 0);
+}
+
+static void setup_default_color_by_age(void)
+{
+ parse_color_fields("blue,12 month ago,white,1 month ago,red");
+}
+
+static void determine_line_heat(struct commit_info *ci, const char **dest_color)
+{
+ int i = 0;
+
+ while (i < colorfield_nr && ci->author_time > colorfield[i].hop)
+ i++;
+
+ *dest_color = colorfield[i].col;
+}
+
+static void emit_other(struct blame_scoreboard *sb, struct blame_entry *ent, int opt)
+{
+ int cnt;
+ const char *cp;
+ struct blame_origin *suspect = ent->suspect;
+ struct commit_info ci = COMMIT_INFO_INIT;
+ char hex[GIT_MAX_HEXSZ + 1];
+ int show_raw_time = !!(opt & OUTPUT_RAW_TIMESTAMP);
+ const char *default_color = NULL, *color = NULL, *reset = NULL;
+
+ get_commit_info(suspect->commit, &ci, 1);
+ oid_to_hex_r(hex, &suspect->commit->object.oid);
+
+ cp = blame_nth_line(sb, ent->lno);
+
+ if (opt & OUTPUT_SHOW_AGE_WITH_COLOR) {
+ determine_line_heat(&ci, &default_color);
+ color = default_color;
+ reset = GIT_COLOR_RESET;
+ }
+
+ for (cnt = 0; cnt < ent->num_lines; cnt++) {
+ char ch;
+ int length = (opt & OUTPUT_LONG_OBJECT_NAME) ? the_hash_algo->hexsz : abbrev;
+
+ if (opt & OUTPUT_COLOR_LINE) {
+ if (cnt > 0) {
+ color = repeated_meta_color;
+ reset = GIT_COLOR_RESET;
+ } else {
+ color = default_color ? default_color : NULL;
+ reset = default_color ? GIT_COLOR_RESET : NULL;
+ }
+ }
+ if (color)
+ fputs(color, stdout);
+
+ if (suspect->commit->object.flags & UNINTERESTING) {
+ if (blank_boundary)
+ memset(hex, ' ', length);
+ else if (!(opt & OUTPUT_ANNOTATE_COMPAT)) {
+ length--;
+ putchar('^');
+ }
+ }
+
+ if (mark_unblamable_lines && ent->unblamable) {
+ length--;
+ putchar('*');
+ }
+ if (mark_ignored_lines && ent->ignored) {
+ length--;
+ putchar('?');
+ }
+ printf("%.*s", length, hex);
+ if (opt & OUTPUT_ANNOTATE_COMPAT) {
+ const char *name;
+ if (opt & OUTPUT_SHOW_EMAIL)
+ name = ci.author_mail.buf;
+ else
+ name = ci.author.buf;
+ printf("\t(%10s\t%10s\t%d)", name,
+ format_time(ci.author_time, ci.author_tz.buf,
+ show_raw_time),
+ ent->lno + 1 + cnt);
+ } else {
+ if (opt & OUTPUT_SHOW_SCORE)
+ printf(" %*d %02d",
+ max_score_digits, ent->score,
+ ent->suspect->refcnt);
+ if (opt & OUTPUT_SHOW_NAME)
+ printf(" %-*.*s", longest_file, longest_file,
+ suspect->path);
+ if (opt & OUTPUT_SHOW_NUMBER)
+ printf(" %*d", max_orig_digits,
+ ent->s_lno + 1 + cnt);
+
+ if (!(opt & OUTPUT_NO_AUTHOR)) {
+ const char *name;
+ int pad;
+ if (opt & OUTPUT_SHOW_EMAIL)
+ name = ci.author_mail.buf;
+ else
+ name = ci.author.buf;
+ pad = longest_author - utf8_strwidth(name);
+ printf(" (%s%*s %10s",
+ name, pad, "",
+ format_time(ci.author_time,
+ ci.author_tz.buf,
+ show_raw_time));
+ }
+ printf(" %*d) ",
+ max_digits, ent->lno + 1 + cnt);
+ }
+ if (reset)
+ fputs(reset, stdout);
+ do {
+ ch = *cp++;
+ putchar(ch);
+ } while (ch != '\n' &&
+ cp < sb->final_buf + sb->final_buf_size);
+ }
+
+ if (sb->final_buf_size && cp[-1] != '\n')
+ putchar('\n');
+
+ commit_info_destroy(&ci);
+}
+
+static void output(struct blame_scoreboard *sb, int option)
+{
+ struct blame_entry *ent;
+
+ if (option & OUTPUT_PORCELAIN) {
+ for (ent = sb->ent; ent; ent = ent->next) {
+ int count = 0;
+ struct blame_origin *suspect;
+ struct commit *commit = ent->suspect->commit;
+ if (commit->object.flags & MORE_THAN_ONE_PATH)
+ continue;
+ for (suspect = get_blame_suspects(commit); suspect; suspect = suspect->next) {
+ if (suspect->guilty && count++) {
+ commit->object.flags |= MORE_THAN_ONE_PATH;
+ break;
+ }
+ }
+ }
+ }
+
+ for (ent = sb->ent; ent; ent = ent->next) {
+ if (option & OUTPUT_PORCELAIN)
+ emit_porcelain(sb, ent, option);
+ else {
+ emit_other(sb, ent, option);
+ }
+ }
+}
+
+/*
+ * Add phony grafts for use with -S; this is primarily to
+ * support git's cvsserver that wants to give a linear history
+ * to its clients.
+ */
+static int read_ancestry(const char *graft_file)
+{
+ FILE *fp = fopen_or_warn(graft_file, "r");
+ struct strbuf buf = STRBUF_INIT;
+ if (!fp)
+ return -1;
+ while (!strbuf_getwholeline(&buf, fp, '\n')) {
+ /* The format is just "Commit Parent1 Parent2 ...\n" */
+ struct commit_graft *graft = read_graft_line(&buf);
+ if (graft)
+ register_commit_graft(the_repository, graft, 0);
+ }
+ fclose(fp);
+ strbuf_release(&buf);
+ return 0;
+}
+
+static int update_auto_abbrev(int auto_abbrev, struct blame_origin *suspect)
+{
+ const char *uniq = find_unique_abbrev(&suspect->commit->object.oid,
+ auto_abbrev);
+ int len = strlen(uniq);
+ if (auto_abbrev < len)
+ return len;
+ return auto_abbrev;
+}
+
+/*
+ * How many columns do we need to show line numbers, authors,
+ * and filenames?
+ */
+static void find_alignment(struct blame_scoreboard *sb, int *option)
+{
+ int longest_src_lines = 0;
+ int longest_dst_lines = 0;
+ unsigned largest_score = 0;
+ struct blame_entry *e;
+ int compute_auto_abbrev = (abbrev < 0);
+ int auto_abbrev = DEFAULT_ABBREV;
+
+ for (e = sb->ent; e; e = e->next) {
+ struct blame_origin *suspect = e->suspect;
+ int num;
+
+ if (compute_auto_abbrev)
+ auto_abbrev = update_auto_abbrev(auto_abbrev, suspect);
+ if (strcmp(suspect->path, sb->path))
+ *option |= OUTPUT_SHOW_NAME;
+ num = strlen(suspect->path);
+ if (longest_file < num)
+ longest_file = num;
+ if (!(suspect->commit->object.flags & METAINFO_SHOWN)) {
+ struct commit_info ci = COMMIT_INFO_INIT;
+ suspect->commit->object.flags |= METAINFO_SHOWN;
+ get_commit_info(suspect->commit, &ci, 1);
+ if (*option & OUTPUT_SHOW_EMAIL)
+ num = utf8_strwidth(ci.author_mail.buf);
+ else
+ num = utf8_strwidth(ci.author.buf);
+ if (longest_author < num)
+ longest_author = num;
+ commit_info_destroy(&ci);
+ }
+ num = e->s_lno + e->num_lines;
+ if (longest_src_lines < num)
+ longest_src_lines = num;
+ num = e->lno + e->num_lines;
+ if (longest_dst_lines < num)
+ longest_dst_lines = num;
+ if (largest_score < blame_entry_score(sb, e))
+ largest_score = blame_entry_score(sb, e);
+ }
+ max_orig_digits = decimal_width(longest_src_lines);
+ max_digits = decimal_width(longest_dst_lines);
+ max_score_digits = decimal_width(largest_score);
+
+ if (compute_auto_abbrev)
+ /* one more abbrev length is needed for the boundary commit */
+ abbrev = auto_abbrev + 1;
+}
+
+static void sanity_check_on_fail(struct blame_scoreboard *sb, int baa)
+{
+ int opt = OUTPUT_SHOW_SCORE | OUTPUT_SHOW_NUMBER | OUTPUT_SHOW_NAME;
+ find_alignment(sb, &opt);
+ output(sb, opt);
+ die("Baa %d!", baa);
+}
+
+static unsigned parse_score(const char *arg)
+{
+ char *end;
+ unsigned long score = strtoul(arg, &end, 10);
+ if (*end)
+ return 0;
+ return score;
+}
+
+static const char *add_prefix(const char *prefix, const char *path)
+{
+ return prefix_path(prefix, prefix ? strlen(prefix) : 0, path);
+}
+
+static int git_blame_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "blame.showroot")) {
+ show_root = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "blame.blankboundary")) {
+ blank_boundary = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "blame.showemail")) {
+ int *output_option = cb;
+ if (git_config_bool(var, value))
+ *output_option |= OUTPUT_SHOW_EMAIL;
+ else
+ *output_option &= ~OUTPUT_SHOW_EMAIL;
+ return 0;
+ }
+ if (!strcmp(var, "blame.date")) {
+ if (!value)
+ return config_error_nonbool(var);
+ parse_date_format(value, &blame_date_mode);
+ return 0;
+ }
+ if (!strcmp(var, "blame.ignorerevsfile")) {
+ const char *str;
+ int ret;
+
+ ret = git_config_pathname(&str, var, value);
+ if (ret)
+ return ret;
+ string_list_insert(&ignore_revs_file_list, str);
+ return 0;
+ }
+ if (!strcmp(var, "blame.markunblamablelines")) {
+ mark_unblamable_lines = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "blame.markignoredlines")) {
+ mark_ignored_lines = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "color.blame.repeatedlines")) {
+ if (color_parse_mem(value, strlen(value), repeated_meta_color))
+ warning(_("invalid value for '%s': '%s'"),
+ "color.blame.repeatedLines", value);
+ return 0;
+ }
+ if (!strcmp(var, "color.blame.highlightrecent")) {
+ parse_color_fields(value);
+ return 0;
+ }
+
+ if (!strcmp(var, "blame.coloring")) {
+ if (!strcmp(value, "repeatedLines")) {
+ coloring_mode |= OUTPUT_COLOR_LINE;
+ } else if (!strcmp(value, "highlightRecent")) {
+ coloring_mode |= OUTPUT_SHOW_AGE_WITH_COLOR;
+ } else if (!strcmp(value, "none")) {
+ coloring_mode &= ~(OUTPUT_COLOR_LINE |
+ OUTPUT_SHOW_AGE_WITH_COLOR);
+ } else {
+ warning(_("invalid value for '%s': '%s'"),
+ "blame.coloring", value);
+ return 0;
+ }
+ }
+
+ if (git_diff_heuristic_config(var, value, cb) < 0)
+ return -1;
+ if (userdiff_config(var, value) < 0)
+ return -1;
+
+ return git_default_config(var, value, cb);
+}
+
+static int blame_copy_callback(const struct option *option, const char *arg, int unset)
+{
+ int *opt = option->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ /*
+ * -C enables copy from removed files;
+ * -C -C enables copy from existing files, but only
+ * when blaming a new file;
+ * -C -C -C enables copy from existing files for
+ * everybody
+ */
+ if (*opt & PICKAXE_BLAME_COPY_HARDER)
+ *opt |= PICKAXE_BLAME_COPY_HARDEST;
+ if (*opt & PICKAXE_BLAME_COPY)
+ *opt |= PICKAXE_BLAME_COPY_HARDER;
+ *opt |= PICKAXE_BLAME_COPY | PICKAXE_BLAME_MOVE;
+
+ if (arg)
+ blame_copy_score = parse_score(arg);
+ return 0;
+}
+
+static int blame_move_callback(const struct option *option, const char *arg, int unset)
+{
+ int *opt = option->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ *opt |= PICKAXE_BLAME_MOVE;
+
+ if (arg)
+ blame_move_score = parse_score(arg);
+ return 0;
+}
+
+static int is_a_rev(const char *name)
+{
+ struct object_id oid;
+
+ if (get_oid(name, &oid))
+ return 0;
+ return OBJ_NONE < oid_object_info(the_repository, &oid, NULL);
+}
+
+static int peel_to_commit_oid(struct object_id *oid_ret, void *cbdata)
+{
+ struct repository *r = ((struct blame_scoreboard *)cbdata)->repo;
+ struct object_id oid;
+
+ oidcpy(&oid, oid_ret);
+ while (1) {
+ struct object *obj;
+ int kind = oid_object_info(r, &oid, NULL);
+ if (kind == OBJ_COMMIT) {
+ oidcpy(oid_ret, &oid);
+ return 0;
+ }
+ if (kind != OBJ_TAG)
+ return -1;
+ obj = deref_tag(r, parse_object(r, &oid), NULL, 0);
+ if (!obj)
+ return -1;
+ oidcpy(&oid, &obj->oid);
+ }
+}
+
+static void build_ignorelist(struct blame_scoreboard *sb,
+ struct string_list *ignore_revs_file_list,
+ struct string_list *ignore_rev_list)
+{
+ struct string_list_item *i;
+ struct object_id oid;
+
+ oidset_init(&sb->ignore_list, 0);
+ for_each_string_list_item(i, ignore_revs_file_list) {
+ if (!strcmp(i->string, ""))
+ oidset_clear(&sb->ignore_list);
+ else
+ oidset_parse_file_carefully(&sb->ignore_list, i->string,
+ peel_to_commit_oid, sb);
+ }
+ for_each_string_list_item(i, ignore_rev_list) {
+ if (get_oid_committish(i->string, &oid) ||
+ peel_to_commit_oid(&oid, sb))
+ die(_("cannot find revision %s to ignore"), i->string);
+ oidset_insert(&sb->ignore_list, &oid);
+ }
+}
+
+int cmd_blame(int argc, const char **argv, const char *prefix)
+{
+ struct rev_info revs;
+ const char *path;
+ struct blame_scoreboard sb;
+ struct blame_origin *o;
+ struct blame_entry *ent = NULL;
+ long dashdash_pos, lno;
+ struct progress_info pi = { NULL, 0 };
+
+ struct string_list range_list = STRING_LIST_INIT_NODUP;
+ struct string_list ignore_rev_list = STRING_LIST_INIT_NODUP;
+ int output_option = 0, opt = 0;
+ int show_stats = 0;
+ const char *revs_file = NULL;
+ const char *contents_from = NULL;
+ const struct option options[] = {
+ OPT_BOOL(0, "incremental", &incremental, N_("show blame entries as we find them, incrementally")),
+ OPT_BOOL('b', NULL, &blank_boundary, N_("do not show object names of boundary commits (Default: off)")),
+ OPT_BOOL(0, "root", &show_root, N_("do not treat root commits as boundaries (Default: off)")),
+ OPT_BOOL(0, "show-stats", &show_stats, N_("show work cost statistics")),
+ OPT_BOOL(0, "progress", &show_progress, N_("force progress reporting")),
+ OPT_BIT(0, "score-debug", &output_option, N_("show output score for blame entries"), OUTPUT_SHOW_SCORE),
+ OPT_BIT('f', "show-name", &output_option, N_("show original filename (Default: auto)"), OUTPUT_SHOW_NAME),
+ OPT_BIT('n', "show-number", &output_option, N_("show original linenumber (Default: off)"), OUTPUT_SHOW_NUMBER),
+ OPT_BIT('p', "porcelain", &output_option, N_("show in a format designed for machine consumption"), OUTPUT_PORCELAIN),
+ OPT_BIT(0, "line-porcelain", &output_option, N_("show porcelain format with per-line commit information"), OUTPUT_PORCELAIN|OUTPUT_LINE_PORCELAIN),
+ OPT_BIT('c', NULL, &output_option, N_("use the same output mode as git-annotate (Default: off)"), OUTPUT_ANNOTATE_COMPAT),
+ OPT_BIT('t', NULL, &output_option, N_("show raw timestamp (Default: off)"), OUTPUT_RAW_TIMESTAMP),
+ OPT_BIT('l', NULL, &output_option, N_("show long commit SHA1 (Default: off)"), OUTPUT_LONG_OBJECT_NAME),
+ OPT_BIT('s', NULL, &output_option, N_("suppress author name and timestamp (Default: off)"), OUTPUT_NO_AUTHOR),
+ OPT_BIT('e', "show-email", &output_option, N_("show author email instead of name (Default: off)"), OUTPUT_SHOW_EMAIL),
+ OPT_BIT('w', NULL, &xdl_opts, N_("ignore whitespace differences"), XDF_IGNORE_WHITESPACE),
+ OPT_STRING_LIST(0, "ignore-rev", &ignore_rev_list, N_("rev"), N_("ignore <rev> when blaming")),
+ OPT_STRING_LIST(0, "ignore-revs-file", &ignore_revs_file_list, N_("file"), N_("ignore revisions from <file>")),
+ OPT_BIT(0, "color-lines", &output_option, N_("color redundant metadata from previous line differently"), OUTPUT_COLOR_LINE),
+ OPT_BIT(0, "color-by-age", &output_option, N_("color lines by age"), OUTPUT_SHOW_AGE_WITH_COLOR),
+ OPT_BIT(0, "minimal", &xdl_opts, N_("spend extra cycles to find better match"), XDF_NEED_MINIMAL),
+ OPT_STRING('S', NULL, &revs_file, N_("file"), N_("use revisions from <file> instead of calling git-rev-list")),
+ OPT_STRING(0, "contents", &contents_from, N_("file"), N_("use <file>'s contents as the final image")),
+ OPT_CALLBACK_F('C', NULL, &opt, N_("score"), N_("find line copies within and across files"), PARSE_OPT_OPTARG, blame_copy_callback),
+ OPT_CALLBACK_F('M', NULL, &opt, N_("score"), N_("find line movements within and across files"), PARSE_OPT_OPTARG, blame_move_callback),
+ OPT_STRING_LIST('L', NULL, &range_list, N_("range"),
+ N_("process only line range <start>,<end> or function :<funcname>")),
+ OPT__ABBREV(&abbrev),
+ OPT_END()
+ };
+
+ struct parse_opt_ctx_t ctx;
+ int cmd_is_annotate = !strcmp(argv[0], "annotate");
+ struct range_set ranges;
+ unsigned int range_i;
+ long anchor;
+ const int hexsz = the_hash_algo->hexsz;
+ long num_lines = 0;
+ const char *str_usage = cmd_is_annotate ? annotate_usage : blame_usage;
+ const char **opt_usage = cmd_is_annotate ? annotate_opt_usage : blame_opt_usage;
+
+ setup_default_color_by_age();
+ git_config(git_blame_config, &output_option);
+ repo_init_revisions(the_repository, &revs, NULL);
+ revs.date_mode = blame_date_mode;
+ revs.diffopt.flags.allow_textconv = 1;
+ revs.diffopt.flags.follow_renames = 1;
+
+ save_commit_buffer = 0;
+ dashdash_pos = 0;
+ show_progress = -1;
+
+ parse_options_start(&ctx, argc, argv, prefix, options,
+ PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0);
+ for (;;) {
+ switch (parse_options_step(&ctx, options, opt_usage)) {
+ case PARSE_OPT_NON_OPTION:
+ case PARSE_OPT_UNKNOWN:
+ break;
+ case PARSE_OPT_HELP:
+ case PARSE_OPT_ERROR:
+ case PARSE_OPT_SUBCOMMAND:
+ exit(129);
+ case PARSE_OPT_COMPLETE:
+ exit(0);
+ case PARSE_OPT_DONE:
+ if (ctx.argv[0])
+ dashdash_pos = ctx.cpidx;
+ goto parse_done;
+ }
+
+ if (!strcmp(ctx.argv[0], "--reverse")) {
+ ctx.argv[0] = "--children";
+ reverse = 1;
+ }
+ parse_revision_opt(&revs, &ctx, options, opt_usage);
+ }
+parse_done:
+ revision_opts_finish(&revs);
+ no_whole_file_rename = !revs.diffopt.flags.follow_renames;
+ xdl_opts |= revs.diffopt.xdl_opts & XDF_INDENT_HEURISTIC;
+ revs.diffopt.flags.follow_renames = 0;
+ argc = parse_options_end(&ctx);
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ if (incremental || (output_option & OUTPUT_PORCELAIN)) {
+ if (show_progress > 0)
+ die(_("--progress can't be used with --incremental or porcelain formats"));
+ show_progress = 0;
+ } else if (show_progress < 0)
+ show_progress = isatty(2);
+
+ if (0 < abbrev && abbrev < hexsz)
+ /* one more abbrev length is needed for the boundary commit */
+ abbrev++;
+ else if (!abbrev)
+ abbrev = hexsz;
+
+ if (revs_file && read_ancestry(revs_file))
+ die_errno("reading graft file '%s' failed", revs_file);
+
+ if (cmd_is_annotate) {
+ output_option |= OUTPUT_ANNOTATE_COMPAT;
+ blame_date_mode.type = DATE_ISO8601;
+ } else {
+ blame_date_mode = revs.date_mode;
+ }
+
+ /* The maximum width used to show the dates */
+ switch (blame_date_mode.type) {
+ case DATE_RFC2822:
+ blame_date_width = sizeof("Thu, 19 Oct 2006 16:00:04 -0700");
+ break;
+ case DATE_ISO8601_STRICT:
+ blame_date_width = sizeof("2006-10-19T16:00:04-07:00");
+ break;
+ case DATE_ISO8601:
+ blame_date_width = sizeof("2006-10-19 16:00:04 -0700");
+ break;
+ case DATE_RAW:
+ blame_date_width = sizeof("1161298804 -0700");
+ break;
+ case DATE_UNIX:
+ blame_date_width = sizeof("1161298804");
+ break;
+ case DATE_SHORT:
+ blame_date_width = sizeof("2006-10-19");
+ break;
+ case DATE_RELATIVE:
+ /*
+ * TRANSLATORS: This string is used to tell us the
+ * maximum display width for a relative timestamp in
+ * "git blame" output. For C locale, "4 years, 11
+ * months ago", which takes 22 places, is the longest
+ * among various forms of relative timestamps, but
+ * your language may need more or fewer display
+ * columns.
+ */
+ blame_date_width = utf8_strwidth(_("4 years, 11 months ago")) + 1; /* add the null */
+ break;
+ case DATE_HUMAN:
+ /* If the year is shown, no time is shown */
+ blame_date_width = sizeof("Thu Oct 19 16:00");
+ break;
+ case DATE_NORMAL:
+ blame_date_width = sizeof("Thu Oct 19 16:00:04 2006 -0700");
+ break;
+ case DATE_STRFTIME:
+ blame_date_width = strlen(show_date(0, 0, &blame_date_mode)) + 1; /* add the null */
+ break;
+ }
+ blame_date_width -= 1; /* strip the null */
+
+ if (revs.diffopt.flags.find_copies_harder)
+ opt |= (PICKAXE_BLAME_COPY | PICKAXE_BLAME_MOVE |
+ PICKAXE_BLAME_COPY_HARDER);
+
+ /*
+ * We have collected options unknown to us in argv[1..unk]
+ * which are to be passed to revision machinery if we are
+ * going to do the "bottom" processing.
+ *
+ * The remaining are:
+ *
+ * (1) if dashdash_pos != 0, it is either
+ * "blame [revisions] -- <path>" or
+ * "blame -- <path> <rev>"
+ *
+ * (2) otherwise, it is one of the two:
+ * "blame [revisions] <path>"
+ * "blame <path> <rev>"
+ *
+ * Note that we must strip out <path> from the arguments: we do not
+ * want the path pruning but we may want "bottom" processing.
+ */
+ if (dashdash_pos) {
+ switch (argc - dashdash_pos - 1) {
+ case 2: /* (1b) */
+ if (argc != 4)
+ usage_with_options(opt_usage, options);
+ /* reorder for the new way: <rev> -- <path> */
+ argv[1] = argv[3];
+ argv[3] = argv[2];
+ argv[2] = "--";
+ /* FALLTHROUGH */
+ case 1: /* (1a) */
+ path = add_prefix(prefix, argv[--argc]);
+ argv[argc] = NULL;
+ break;
+ default:
+ usage_with_options(opt_usage, options);
+ }
+ } else {
+ if (argc < 2)
+ usage_with_options(opt_usage, options);
+ if (argc == 3 && is_a_rev(argv[argc - 1])) { /* (2b) */
+ path = add_prefix(prefix, argv[1]);
+ argv[1] = argv[2];
+ } else { /* (2a) */
+ if (argc == 2 && is_a_rev(argv[1]) && !get_git_work_tree())
+ die("missing <path> to blame");
+ path = add_prefix(prefix, argv[argc - 1]);
+ }
+ argv[argc - 1] = "--";
+ }
+
+ revs.disable_stdin = 1;
+ setup_revisions(argc, argv, &revs, NULL);
+ if (!revs.pending.nr && is_bare_repository()) {
+ struct commit *head_commit;
+ struct object_id head_oid;
+
+ if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING,
+ &head_oid, NULL) ||
+ !(head_commit = lookup_commit_reference_gently(revs.repo,
+ &head_oid, 1)))
+ die("no such ref: HEAD");
+
+ add_pending_object(&revs, &head_commit->object, "HEAD");
+ }
+
+ init_scoreboard(&sb);
+ sb.revs = &revs;
+ sb.contents_from = contents_from;
+ sb.reverse = reverse;
+ sb.repo = the_repository;
+ sb.path = path;
+ build_ignorelist(&sb, &ignore_revs_file_list, &ignore_rev_list);
+ string_list_clear(&ignore_revs_file_list, 0);
+ string_list_clear(&ignore_rev_list, 0);
+ setup_scoreboard(&sb, &o);
+
+ /*
+ * Changed-path Bloom filters are disabled when looking
+ * for copies.
+ */
+ if (!(opt & PICKAXE_BLAME_COPY))
+ setup_blame_bloom_data(&sb);
+
+ lno = sb.num_lines;
+
+ if (lno && !range_list.nr)
+ string_list_append(&range_list, "1");
+
+ anchor = 1;
+ range_set_init(&ranges, range_list.nr);
+ for (range_i = 0; range_i < range_list.nr; ++range_i) {
+ long bottom, top;
+ if (parse_range_arg(range_list.items[range_i].string,
+ nth_line_cb, &sb, lno, anchor,
+ &bottom, &top, sb.path,
+ the_repository->index))
+ usage(str_usage);
+ if ((!lno && (top || bottom)) || lno < bottom)
+ die(Q_("file %s has only %lu line",
+ "file %s has only %lu lines",
+ lno), sb.path, lno);
+ if (bottom < 1)
+ bottom = 1;
+ if (top < 1 || lno < top)
+ top = lno;
+ bottom--;
+ range_set_append_unsafe(&ranges, bottom, top);
+ anchor = top + 1;
+ }
+ sort_and_merge_range_set(&ranges);
+
+ for (range_i = ranges.nr; range_i > 0; --range_i) {
+ const struct range *r = &ranges.ranges[range_i - 1];
+ ent = blame_entry_prepend(ent, r->start, r->end, o);
+ num_lines += (r->end - r->start);
+ }
+ if (!num_lines)
+ num_lines = sb.num_lines;
+
+ o->suspects = ent;
+ prio_queue_put(&sb.commits, o->commit);
+
+ blame_origin_decref(o);
+
+ range_set_release(&ranges);
+ string_list_clear(&range_list, 0);
+
+ sb.ent = NULL;
+
+ if (blame_move_score)
+ sb.move_score = blame_move_score;
+ if (blame_copy_score)
+ sb.copy_score = blame_copy_score;
+
+ sb.debug = DEBUG_BLAME;
+ sb.on_sanity_fail = &sanity_check_on_fail;
+
+ sb.show_root = show_root;
+ sb.xdl_opts = xdl_opts;
+ sb.no_whole_file_rename = no_whole_file_rename;
+
+ read_mailmap(&mailmap);
+
+ sb.found_guilty_entry = &found_guilty_entry;
+ sb.found_guilty_entry_data = &pi;
+ if (show_progress)
+ pi.progress = start_delayed_progress(_("Blaming lines"), num_lines);
+
+ assign_blame(&sb, opt);
+
+ stop_progress(&pi.progress);
+
+ if (!incremental)
+ setup_pager();
+ else
+ goto cleanup;
+
+ blame_sort_final(&sb);
+
+ blame_coalesce(&sb);
+
+ if (!(output_option & (OUTPUT_COLOR_LINE | OUTPUT_SHOW_AGE_WITH_COLOR)))
+ output_option |= coloring_mode;
+
+ if (!(output_option & OUTPUT_PORCELAIN)) {
+ find_alignment(&sb, &output_option);
+ if (!*repeated_meta_color &&
+ (output_option & OUTPUT_COLOR_LINE))
+ xsnprintf(repeated_meta_color,
+ sizeof(repeated_meta_color),
+ "%s", GIT_COLOR_CYAN);
+ }
+ if (output_option & OUTPUT_ANNOTATE_COMPAT)
+ output_option &= ~(OUTPUT_COLOR_LINE | OUTPUT_SHOW_AGE_WITH_COLOR);
+
+ output(&sb, output_option);
+ free((void *)sb.final_buf);
+ for (ent = sb.ent; ent; ) {
+ struct blame_entry *e = ent->next;
+ free(ent);
+ ent = e;
+ }
+
+ if (show_stats) {
+ printf("num read blob: %d\n", sb.num_read_blob);
+ printf("num get patch: %d\n", sb.num_get_patch);
+ printf("num commits: %d\n", sb.num_commits);
+ }
+
+cleanup:
+ cleanup_scoreboard(&sb);
+ release_revisions(&revs);
+ return 0;
+}
diff --git a/builtin/branch.c b/builtin/branch.c
new file mode 100644
index 0000000..9470c98
--- /dev/null
+++ b/builtin/branch.c
@@ -0,0 +1,915 @@
+/*
+ * Builtin "git branch"
+ *
+ * Copyright (c) 2006 Kristian Høgsberg <krh@redhat.com>
+ * Based on git-branch.sh by Junio C Hamano.
+ */
+
+#include "cache.h"
+#include "config.h"
+#include "color.h"
+#include "refs.h"
+#include "commit.h"
+#include "builtin.h"
+#include "remote.h"
+#include "parse-options.h"
+#include "branch.h"
+#include "diff.h"
+#include "revision.h"
+#include "string-list.h"
+#include "column.h"
+#include "utf8.h"
+#include "wt-status.h"
+#include "ref-filter.h"
+#include "worktree.h"
+#include "help.h"
+#include "commit-reach.h"
+
+static const char * const builtin_branch_usage[] = {
+ N_("git branch [<options>] [-r | -a] [--merged] [--no-merged]"),
+ N_("git branch [<options>] [-f] [--recurse-submodules] <branch-name> [<start-point>]"),
+ N_("git branch [<options>] [-l] [<pattern>...]"),
+ N_("git branch [<options>] [-r] (-d | -D) <branch-name>..."),
+ N_("git branch [<options>] (-m | -M) [<old-branch>] <new-branch>"),
+ N_("git branch [<options>] (-c | -C) [<old-branch>] <new-branch>"),
+ N_("git branch [<options>] [-r | -a] [--points-at]"),
+ N_("git branch [<options>] [-r | -a] [--format]"),
+ NULL
+};
+
+static const char *head;
+static struct object_id head_oid;
+static int recurse_submodules = 0;
+static int submodule_propagate_branches = 0;
+
+static int branch_use_color = -1;
+static char branch_colors[][COLOR_MAXLEN] = {
+ GIT_COLOR_RESET,
+ GIT_COLOR_NORMAL, /* PLAIN */
+ GIT_COLOR_RED, /* REMOTE */
+ GIT_COLOR_NORMAL, /* LOCAL */
+ GIT_COLOR_GREEN, /* CURRENT */
+ GIT_COLOR_BLUE, /* UPSTREAM */
+ GIT_COLOR_CYAN, /* WORKTREE */
+};
+enum color_branch {
+ BRANCH_COLOR_RESET = 0,
+ BRANCH_COLOR_PLAIN = 1,
+ BRANCH_COLOR_REMOTE = 2,
+ BRANCH_COLOR_LOCAL = 3,
+ BRANCH_COLOR_CURRENT = 4,
+ BRANCH_COLOR_UPSTREAM = 5,
+ BRANCH_COLOR_WORKTREE = 6
+};
+
+static const char *color_branch_slots[] = {
+ [BRANCH_COLOR_RESET] = "reset",
+ [BRANCH_COLOR_PLAIN] = "plain",
+ [BRANCH_COLOR_REMOTE] = "remote",
+ [BRANCH_COLOR_LOCAL] = "local",
+ [BRANCH_COLOR_CURRENT] = "current",
+ [BRANCH_COLOR_UPSTREAM] = "upstream",
+ [BRANCH_COLOR_WORKTREE] = "worktree",
+};
+
+static struct string_list output = STRING_LIST_INIT_DUP;
+static unsigned int colopts;
+
+define_list_config_array(color_branch_slots);
+
+static int git_branch_config(const char *var, const char *value, void *cb)
+{
+ const char *slot_name;
+
+ if (!strcmp(var, "branch.sort")) {
+ if (!value)
+ return config_error_nonbool(var);
+ string_list_append(cb, value);
+ return 0;
+ }
+
+ if (starts_with(var, "column."))
+ return git_column_config(var, value, "branch", &colopts);
+ if (!strcmp(var, "color.branch")) {
+ branch_use_color = git_config_colorbool(var, value);
+ return 0;
+ }
+ if (skip_prefix(var, "color.branch.", &slot_name)) {
+ int slot = LOOKUP_CONFIG(color_branch_slots, slot_name);
+ if (slot < 0)
+ return 0;
+ if (!value)
+ return config_error_nonbool(var);
+ return color_parse(value, branch_colors[slot]);
+ }
+ if (!strcmp(var, "submodule.recurse")) {
+ recurse_submodules = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcasecmp(var, "submodule.propagateBranches")) {
+ submodule_propagate_branches = git_config_bool(var, value);
+ return 0;
+ }
+
+ return git_color_default_config(var, value, cb);
+}
+
+static const char *branch_get_color(enum color_branch ix)
+{
+ if (want_color(branch_use_color))
+ return branch_colors[ix];
+ return "";
+}
+
+static int branch_merged(int kind, const char *name,
+ struct commit *rev, struct commit *head_rev)
+{
+ /*
+ * This checks whether the merge bases of branch and HEAD (or
+ * the other branch this branch builds upon) contains the
+ * branch, which means that the branch has already been merged
+ * safely to HEAD (or the other branch).
+ */
+ struct commit *reference_rev = NULL;
+ const char *reference_name = NULL;
+ void *reference_name_to_free = NULL;
+ int merged;
+
+ if (kind == FILTER_REFS_BRANCHES) {
+ struct branch *branch = branch_get(name);
+ const char *upstream = branch_get_upstream(branch, NULL);
+ struct object_id oid;
+
+ if (upstream &&
+ (reference_name = reference_name_to_free =
+ resolve_refdup(upstream, RESOLVE_REF_READING,
+ &oid, NULL)) != NULL)
+ reference_rev = lookup_commit_reference(the_repository,
+ &oid);
+ }
+ if (!reference_rev)
+ reference_rev = head_rev;
+
+ merged = reference_rev ? in_merge_bases(rev, reference_rev) : 0;
+
+ /*
+ * After the safety valve is fully redefined to "check with
+ * upstream, if any, otherwise with HEAD", we should just
+ * return the result of the in_merge_bases() above without
+ * any of the following code, but during the transition period,
+ * a gentle reminder is in order.
+ */
+ if ((head_rev != reference_rev) &&
+ (head_rev ? in_merge_bases(rev, head_rev) : 0) != merged) {
+ if (merged)
+ warning(_("deleting branch '%s' that has been merged to\n"
+ " '%s', but not yet merged to HEAD."),
+ name, reference_name);
+ else
+ warning(_("not deleting branch '%s' that is not yet merged to\n"
+ " '%s', even though it is merged to HEAD."),
+ name, reference_name);
+ }
+ free(reference_name_to_free);
+ return merged;
+}
+
+static int check_branch_commit(const char *branchname, const char *refname,
+ const struct object_id *oid, struct commit *head_rev,
+ int kinds, int force)
+{
+ struct commit *rev = lookup_commit_reference(the_repository, oid);
+ if (!force && !rev) {
+ error(_("Couldn't look up commit object for '%s'"), refname);
+ return -1;
+ }
+ if (!force && !branch_merged(kinds, branchname, rev, head_rev)) {
+ error(_("The branch '%s' is not fully merged.\n"
+ "If you are sure you want to delete it, "
+ "run 'git branch -D %s'."), branchname, branchname);
+ return -1;
+ }
+ return 0;
+}
+
+static void delete_branch_config(const char *branchname)
+{
+ struct strbuf buf = STRBUF_INIT;
+ strbuf_addf(&buf, "branch.%s", branchname);
+ if (git_config_rename_section(buf.buf, NULL) < 0)
+ warning(_("Update of config-file failed"));
+ strbuf_release(&buf);
+}
+
+static int delete_branches(int argc, const char **argv, int force, int kinds,
+ int quiet)
+{
+ struct commit *head_rev = NULL;
+ struct object_id oid;
+ char *name = NULL;
+ const char *fmt;
+ int i;
+ int ret = 0;
+ int remote_branch = 0;
+ struct strbuf bname = STRBUF_INIT;
+ unsigned allowed_interpret;
+ struct string_list refs_to_delete = STRING_LIST_INIT_DUP;
+ struct string_list_item *item;
+ int branch_name_pos;
+
+ switch (kinds) {
+ case FILTER_REFS_REMOTES:
+ fmt = "refs/remotes/%s";
+ /* For subsequent UI messages */
+ remote_branch = 1;
+ allowed_interpret = INTERPRET_BRANCH_REMOTE;
+
+ force = 1;
+ break;
+ case FILTER_REFS_BRANCHES:
+ fmt = "refs/heads/%s";
+ allowed_interpret = INTERPRET_BRANCH_LOCAL;
+ break;
+ default:
+ die(_("cannot use -a with -d"));
+ }
+ branch_name_pos = strcspn(fmt, "%");
+
+ if (!force)
+ head_rev = lookup_commit_reference(the_repository, &head_oid);
+
+ for (i = 0; i < argc; i++, strbuf_reset(&bname)) {
+ char *target = NULL;
+ int flags = 0;
+
+ strbuf_branchname(&bname, argv[i], allowed_interpret);
+ free(name);
+ name = mkpathdup(fmt, bname.buf);
+
+ if (kinds == FILTER_REFS_BRANCHES) {
+ const char *path;
+ if ((path = branch_checked_out(name))) {
+ error(_("Cannot delete branch '%s' "
+ "checked out at '%s'"),
+ bname.buf, path);
+ ret = 1;
+ continue;
+ }
+ }
+
+ target = resolve_refdup(name,
+ RESOLVE_REF_READING
+ | RESOLVE_REF_NO_RECURSE
+ | RESOLVE_REF_ALLOW_BAD_NAME,
+ &oid, &flags);
+ if (!target) {
+ error(remote_branch
+ ? _("remote-tracking branch '%s' not found.")
+ : _("branch '%s' not found."), bname.buf);
+ ret = 1;
+ continue;
+ }
+
+ if (!(flags & (REF_ISSYMREF|REF_ISBROKEN)) &&
+ check_branch_commit(bname.buf, name, &oid, head_rev, kinds,
+ force)) {
+ ret = 1;
+ goto next;
+ }
+
+ item = string_list_append(&refs_to_delete, name);
+ item->util = xstrdup((flags & REF_ISBROKEN) ? "broken"
+ : (flags & REF_ISSYMREF) ? target
+ : find_unique_abbrev(&oid, DEFAULT_ABBREV));
+
+ next:
+ free(target);
+ }
+
+ if (delete_refs(NULL, &refs_to_delete, REF_NO_DEREF))
+ ret = 1;
+
+ for_each_string_list_item(item, &refs_to_delete) {
+ char *describe_ref = item->util;
+ char *name = item->string;
+ if (!ref_exists(name)) {
+ char *refname = name + branch_name_pos;
+ if (!quiet)
+ printf(remote_branch
+ ? _("Deleted remote-tracking branch %s (was %s).\n")
+ : _("Deleted branch %s (was %s).\n"),
+ name + branch_name_pos, describe_ref);
+
+ delete_branch_config(refname);
+ }
+ free(describe_ref);
+ }
+ string_list_clear(&refs_to_delete, 0);
+
+ free(name);
+ strbuf_release(&bname);
+
+ return ret;
+}
+
+static int calc_maxwidth(struct ref_array *refs, int remote_bonus)
+{
+ int i, max = 0;
+ for (i = 0; i < refs->nr; i++) {
+ struct ref_array_item *it = refs->items[i];
+ const char *desc = it->refname;
+ int w;
+
+ skip_prefix(it->refname, "refs/heads/", &desc);
+ skip_prefix(it->refname, "refs/remotes/", &desc);
+ if (it->kind == FILTER_REFS_DETACHED_HEAD) {
+ char *head_desc = get_head_description();
+ w = utf8_strwidth(head_desc);
+ free(head_desc);
+ } else
+ w = utf8_strwidth(desc);
+
+ if (it->kind == FILTER_REFS_REMOTES)
+ w += remote_bonus;
+ if (w > max)
+ max = w;
+ }
+ return max;
+}
+
+static const char *quote_literal_for_format(const char *s)
+{
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ while (*s) {
+ const char *ep = strchrnul(s, '%');
+ if (s < ep)
+ strbuf_add(&buf, s, ep - s);
+ if (*ep == '%') {
+ strbuf_addstr(&buf, "%%");
+ s = ep + 1;
+ } else {
+ s = ep;
+ }
+ }
+ return buf.buf;
+}
+
+static char *build_format(struct ref_filter *filter, int maxwidth, const char *remote_prefix)
+{
+ struct strbuf fmt = STRBUF_INIT;
+ struct strbuf local = STRBUF_INIT;
+ struct strbuf remote = STRBUF_INIT;
+
+ strbuf_addf(&local, "%%(if)%%(HEAD)%%(then)* %s%%(else)%%(if)%%(worktreepath)%%(then)+ %s%%(else) %s%%(end)%%(end)",
+ branch_get_color(BRANCH_COLOR_CURRENT),
+ branch_get_color(BRANCH_COLOR_WORKTREE),
+ branch_get_color(BRANCH_COLOR_LOCAL));
+ strbuf_addf(&remote, " %s",
+ branch_get_color(BRANCH_COLOR_REMOTE));
+
+ if (filter->verbose) {
+ struct strbuf obname = STRBUF_INIT;
+
+ if (filter->abbrev < 0)
+ strbuf_addf(&obname, "%%(objectname:short)");
+ else if (!filter->abbrev)
+ strbuf_addf(&obname, "%%(objectname)");
+ else
+ strbuf_addf(&obname, "%%(objectname:short=%d)", filter->abbrev);
+
+ strbuf_addf(&local, "%%(align:%d,left)%%(refname:lstrip=2)%%(end)", maxwidth);
+ strbuf_addstr(&local, branch_get_color(BRANCH_COLOR_RESET));
+ strbuf_addf(&local, " %s ", obname.buf);
+
+ if (filter->verbose > 1)
+ {
+ strbuf_addf(&local, "%%(if:notequals=*)%%(HEAD)%%(then)%%(if)%%(worktreepath)%%(then)(%s%%(worktreepath)%s) %%(end)%%(end)",
+ branch_get_color(BRANCH_COLOR_WORKTREE), branch_get_color(BRANCH_COLOR_RESET));
+ strbuf_addf(&local, "%%(if)%%(upstream)%%(then)[%s%%(upstream:short)%s%%(if)%%(upstream:track)"
+ "%%(then): %%(upstream:track,nobracket)%%(end)] %%(end)%%(contents:subject)",
+ branch_get_color(BRANCH_COLOR_UPSTREAM), branch_get_color(BRANCH_COLOR_RESET));
+ }
+ else
+ strbuf_addf(&local, "%%(if)%%(upstream:track)%%(then)%%(upstream:track) %%(end)%%(contents:subject)");
+
+ strbuf_addf(&remote, "%%(align:%d,left)%s%%(refname:lstrip=2)%%(end)%s"
+ "%%(if)%%(symref)%%(then) -> %%(symref:short)"
+ "%%(else) %s %%(contents:subject)%%(end)",
+ maxwidth, quote_literal_for_format(remote_prefix),
+ branch_get_color(BRANCH_COLOR_RESET), obname.buf);
+ strbuf_release(&obname);
+ } else {
+ strbuf_addf(&local, "%%(refname:lstrip=2)%s%%(if)%%(symref)%%(then) -> %%(symref:short)%%(end)",
+ branch_get_color(BRANCH_COLOR_RESET));
+ strbuf_addf(&remote, "%s%%(refname:lstrip=2)%s%%(if)%%(symref)%%(then) -> %%(symref:short)%%(end)",
+ quote_literal_for_format(remote_prefix),
+ branch_get_color(BRANCH_COLOR_RESET));
+ }
+
+ strbuf_addf(&fmt, "%%(if:notequals=refs/remotes)%%(refname:rstrip=-2)%%(then)%s%%(else)%s%%(end)", local.buf, remote.buf);
+
+ strbuf_release(&local);
+ strbuf_release(&remote);
+ return strbuf_detach(&fmt, NULL);
+}
+
+static void print_ref_list(struct ref_filter *filter, struct ref_sorting *sorting,
+ struct ref_format *format, struct string_list *output)
+{
+ int i;
+ struct ref_array array;
+ struct strbuf out = STRBUF_INIT;
+ struct strbuf err = STRBUF_INIT;
+ int maxwidth = 0;
+ const char *remote_prefix = "";
+ char *to_free = NULL;
+
+ /*
+ * If we are listing more than just remote branches,
+ * then remote branches will have a "remotes/" prefix.
+ * We need to account for this in the width.
+ */
+ if (filter->kind != FILTER_REFS_REMOTES)
+ remote_prefix = "remotes/";
+
+ memset(&array, 0, sizeof(array));
+
+ filter_refs(&array, filter, filter->kind);
+
+ if (filter->verbose)
+ maxwidth = calc_maxwidth(&array, strlen(remote_prefix));
+
+ if (!format->format)
+ format->format = to_free = build_format(filter, maxwidth, remote_prefix);
+ format->use_color = branch_use_color;
+
+ if (verify_ref_format(format))
+ die(_("unable to parse format string"));
+
+ ref_array_sort(sorting, &array);
+
+ for (i = 0; i < array.nr; i++) {
+ strbuf_reset(&err);
+ strbuf_reset(&out);
+ if (format_ref_array_item(array.items[i], format, &out, &err))
+ die("%s", err.buf);
+ if (column_active(colopts)) {
+ assert(!filter->verbose && "--column and --verbose are incompatible");
+ /* format to a string_list to let print_columns() do its job */
+ string_list_append(output, out.buf);
+ } else {
+ fwrite(out.buf, 1, out.len, stdout);
+ putchar('\n');
+ }
+ }
+
+ strbuf_release(&err);
+ strbuf_release(&out);
+ ref_array_clear(&array);
+ free(to_free);
+}
+
+static void print_current_branch_name(void)
+{
+ int flags;
+ const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, &flags);
+ const char *shortname;
+ if (!refname)
+ die(_("could not resolve HEAD"));
+ else if (!(flags & REF_ISSYMREF))
+ return;
+ else if (skip_prefix(refname, "refs/heads/", &shortname))
+ puts(shortname);
+ else
+ die(_("HEAD (%s) points outside of refs/heads/"), refname);
+}
+
+static void reject_rebase_or_bisect_branch(const char *target)
+{
+ struct worktree **worktrees = get_worktrees();
+ int i;
+
+ for (i = 0; worktrees[i]; i++) {
+ struct worktree *wt = worktrees[i];
+
+ if (!wt->is_detached)
+ continue;
+
+ if (is_worktree_being_rebased(wt, target))
+ die(_("Branch %s is being rebased at %s"),
+ target, wt->path);
+
+ if (is_worktree_being_bisected(wt, target))
+ die(_("Branch %s is being bisected at %s"),
+ target, wt->path);
+ }
+
+ free_worktrees(worktrees);
+}
+
+static void copy_or_rename_branch(const char *oldname, const char *newname, int copy, int force)
+{
+ struct strbuf oldref = STRBUF_INIT, newref = STRBUF_INIT, logmsg = STRBUF_INIT;
+ struct strbuf oldsection = STRBUF_INIT, newsection = STRBUF_INIT;
+ const char *interpreted_oldname = NULL;
+ const char *interpreted_newname = NULL;
+ int recovery = 0;
+
+ if (strbuf_check_branch_ref(&oldref, oldname)) {
+ /*
+ * Bad name --- this could be an attempt to rename a
+ * ref that we used to allow to be created by accident.
+ */
+ if (ref_exists(oldref.buf))
+ recovery = 1;
+ else
+ die(_("Invalid branch name: '%s'"), oldname);
+ }
+
+ if ((copy || strcmp(head, oldname)) && !ref_exists(oldref.buf)) {
+ if (copy && !strcmp(head, oldname))
+ die(_("No commit on branch '%s' yet."), oldname);
+ else
+ die(_("No branch named '%s'."), oldname);
+ }
+
+ /*
+ * A command like "git branch -M currentbranch currentbranch" cannot
+ * cause the worktree to become inconsistent with HEAD, so allow it.
+ */
+ if (!strcmp(oldname, newname))
+ validate_branchname(newname, &newref);
+ else
+ validate_new_branchname(newname, &newref, force);
+
+ reject_rebase_or_bisect_branch(oldref.buf);
+
+ if (!skip_prefix(oldref.buf, "refs/heads/", &interpreted_oldname) ||
+ !skip_prefix(newref.buf, "refs/heads/", &interpreted_newname)) {
+ BUG("expected prefix missing for refs");
+ }
+
+ if (copy)
+ strbuf_addf(&logmsg, "Branch: copied %s to %s",
+ oldref.buf, newref.buf);
+ else
+ strbuf_addf(&logmsg, "Branch: renamed %s to %s",
+ oldref.buf, newref.buf);
+
+ if (!copy &&
+ (!head || strcmp(oldname, head) || !is_null_oid(&head_oid)) &&
+ rename_ref(oldref.buf, newref.buf, logmsg.buf))
+ die(_("Branch rename failed"));
+ if (copy && copy_existing_ref(oldref.buf, newref.buf, logmsg.buf))
+ die(_("Branch copy failed"));
+
+ if (recovery) {
+ if (copy)
+ warning(_("Created a copy of a misnamed branch '%s'"),
+ interpreted_oldname);
+ else
+ warning(_("Renamed a misnamed branch '%s' away"),
+ interpreted_oldname);
+ }
+
+ if (!copy &&
+ replace_each_worktree_head_symref(oldref.buf, newref.buf, logmsg.buf))
+ die(_("Branch renamed to %s, but HEAD is not updated!"), newname);
+
+ strbuf_release(&logmsg);
+
+ strbuf_addf(&oldsection, "branch.%s", interpreted_oldname);
+ strbuf_release(&oldref);
+ strbuf_addf(&newsection, "branch.%s", interpreted_newname);
+ strbuf_release(&newref);
+ if (!copy && git_config_rename_section(oldsection.buf, newsection.buf) < 0)
+ die(_("Branch is renamed, but update of config-file failed"));
+ if (copy && strcmp(oldname, newname) && git_config_copy_section(oldsection.buf, newsection.buf) < 0)
+ die(_("Branch is copied, but update of config-file failed"));
+ strbuf_release(&oldsection);
+ strbuf_release(&newsection);
+}
+
+static GIT_PATH_FUNC(edit_description, "EDIT_DESCRIPTION")
+
+static int edit_branch_description(const char *branch_name)
+{
+ int exists;
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf name = STRBUF_INIT;
+
+ exists = !read_branch_desc(&buf, branch_name);
+ if (!buf.len || buf.buf[buf.len-1] != '\n')
+ strbuf_addch(&buf, '\n');
+ strbuf_commented_addf(&buf,
+ _("Please edit the description for the branch\n"
+ " %s\n"
+ "Lines starting with '%c' will be stripped.\n"),
+ branch_name, comment_line_char);
+ write_file_buf(edit_description(), buf.buf, buf.len);
+ strbuf_reset(&buf);
+ if (launch_editor(edit_description(), &buf, NULL)) {
+ strbuf_release(&buf);
+ return -1;
+ }
+ strbuf_stripspace(&buf, 1);
+
+ strbuf_addf(&name, "branch.%s.description", branch_name);
+ if (buf.len || exists)
+ git_config_set(name.buf, buf.len ? buf.buf : NULL);
+ strbuf_release(&name);
+ strbuf_release(&buf);
+
+ return 0;
+}
+
+int cmd_branch(int argc, const char **argv, const char *prefix)
+{
+ /* possible actions */
+ int delete = 0, rename = 0, copy = 0, list = 0,
+ unset_upstream = 0, show_current = 0, edit_description = 0;
+ const char *new_upstream = NULL;
+ int noncreate_actions = 0;
+ /* possible options */
+ int reflog = 0, quiet = 0, icase = 0, force = 0,
+ recurse_submodules_explicit = 0;
+ enum branch_track track;
+ struct ref_filter filter;
+ static struct ref_sorting *sorting;
+ struct string_list sorting_options = STRING_LIST_INIT_DUP;
+ struct ref_format format = REF_FORMAT_INIT;
+
+ struct option options[] = {
+ OPT_GROUP(N_("Generic options")),
+ OPT__VERBOSE(&filter.verbose,
+ N_("show hash and subject, give twice for upstream branch")),
+ OPT__QUIET(&quiet, N_("suppress informational messages")),
+ OPT_CALLBACK_F('t', "track", &track, "(direct|inherit)",
+ N_("set branch tracking configuration"),
+ PARSE_OPT_OPTARG,
+ parse_opt_tracking_mode),
+ OPT_SET_INT_F(0, "set-upstream", &track, N_("do not use"),
+ BRANCH_TRACK_OVERRIDE, PARSE_OPT_HIDDEN),
+ OPT_STRING('u', "set-upstream-to", &new_upstream, N_("upstream"), N_("change the upstream info")),
+ OPT_BOOL(0, "unset-upstream", &unset_upstream, N_("unset the upstream info")),
+ OPT__COLOR(&branch_use_color, N_("use colored output")),
+ OPT_SET_INT('r', "remotes", &filter.kind, N_("act on remote-tracking branches"),
+ FILTER_REFS_REMOTES),
+ OPT_CONTAINS(&filter.with_commit, N_("print only branches that contain the commit")),
+ OPT_NO_CONTAINS(&filter.no_commit, N_("print only branches that don't contain the commit")),
+ OPT_WITH(&filter.with_commit, N_("print only branches that contain the commit")),
+ OPT_WITHOUT(&filter.no_commit, N_("print only branches that don't contain the commit")),
+ OPT__ABBREV(&filter.abbrev),
+
+ OPT_GROUP(N_("Specific git-branch actions:")),
+ OPT_SET_INT('a', "all", &filter.kind, N_("list both remote-tracking and local branches"),
+ FILTER_REFS_REMOTES | FILTER_REFS_BRANCHES),
+ OPT_BIT('d', "delete", &delete, N_("delete fully merged branch"), 1),
+ OPT_BIT('D', NULL, &delete, N_("delete branch (even if not merged)"), 2),
+ OPT_BIT('m', "move", &rename, N_("move/rename a branch and its reflog"), 1),
+ OPT_BIT('M', NULL, &rename, N_("move/rename a branch, even if target exists"), 2),
+ OPT_BIT('c', "copy", &copy, N_("copy a branch and its reflog"), 1),
+ OPT_BIT('C', NULL, &copy, N_("copy a branch, even if target exists"), 2),
+ OPT_BOOL('l', "list", &list, N_("list branch names")),
+ OPT_BOOL(0, "show-current", &show_current, N_("show current branch name")),
+ OPT_BOOL(0, "create-reflog", &reflog, N_("create the branch's reflog")),
+ OPT_BOOL(0, "edit-description", &edit_description,
+ N_("edit the description for the branch")),
+ OPT__FORCE(&force, N_("force creation, move/rename, deletion"), PARSE_OPT_NOCOMPLETE),
+ OPT_MERGED(&filter, N_("print only branches that are merged")),
+ OPT_NO_MERGED(&filter, N_("print only branches that are not merged")),
+ OPT_COLUMN(0, "column", &colopts, N_("list branches in columns")),
+ OPT_REF_SORT(&sorting_options),
+ OPT_CALLBACK(0, "points-at", &filter.points_at, N_("object"),
+ N_("print only branches of the object"), parse_opt_object_name),
+ OPT_BOOL('i', "ignore-case", &icase, N_("sorting and filtering are case insensitive")),
+ OPT_BOOL(0, "recurse-submodules", &recurse_submodules_explicit, N_("recurse through submodules")),
+ OPT_STRING( 0 , "format", &format.format, N_("format"), N_("format to use for the output")),
+ OPT_END(),
+ };
+
+ setup_ref_filter_porcelain_msg();
+
+ memset(&filter, 0, sizeof(filter));
+ filter.kind = FILTER_REFS_BRANCHES;
+ filter.abbrev = -1;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_branch_usage, options);
+
+ git_config(git_branch_config, &sorting_options);
+
+ track = git_branch_track;
+
+ head = resolve_refdup("HEAD", 0, &head_oid, NULL);
+ if (!head)
+ die(_("Failed to resolve HEAD as a valid ref."));
+ if (!strcmp(head, "HEAD"))
+ filter.detached = 1;
+ else if (!skip_prefix(head, "refs/heads/", &head))
+ die(_("HEAD not found below refs/heads!"));
+
+ argc = parse_options(argc, argv, prefix, options, builtin_branch_usage,
+ 0);
+
+ if (!delete && !rename && !copy && !edit_description && !new_upstream &&
+ !show_current && !unset_upstream && argc == 0)
+ list = 1;
+
+ if (filter.with_commit || filter.no_commit ||
+ filter.reachable_from || filter.unreachable_from || filter.points_at.nr)
+ list = 1;
+
+ noncreate_actions = !!delete + !!rename + !!copy + !!new_upstream +
+ !!show_current + !!list + !!edit_description +
+ !!unset_upstream;
+ if (noncreate_actions > 1)
+ usage_with_options(builtin_branch_usage, options);
+
+ if (recurse_submodules_explicit) {
+ if (!submodule_propagate_branches)
+ die(_("branch with --recurse-submodules can only be used if submodule.propagateBranches is enabled"));
+ if (noncreate_actions)
+ die(_("--recurse-submodules can only be used to create branches"));
+ }
+
+ recurse_submodules =
+ (recurse_submodules || recurse_submodules_explicit) &&
+ submodule_propagate_branches;
+
+ if (filter.abbrev == -1)
+ filter.abbrev = DEFAULT_ABBREV;
+ filter.ignore_case = icase;
+
+ finalize_colopts(&colopts, -1);
+ if (filter.verbose) {
+ if (explicitly_enable_column(colopts))
+ die(_("options '%s' and '%s' cannot be used together"), "--column", "--verbose");
+ colopts = 0;
+ }
+
+ if (force) {
+ delete *= 2;
+ rename *= 2;
+ copy *= 2;
+ }
+
+ if (list)
+ setup_auto_pager("branch", 1);
+
+ if (delete) {
+ if (!argc)
+ die(_("branch name required"));
+ return delete_branches(argc, argv, delete > 1, filter.kind, quiet);
+ } else if (show_current) {
+ print_current_branch_name();
+ return 0;
+ } else if (list) {
+ /* git branch --list also shows HEAD when it is detached */
+ if ((filter.kind & FILTER_REFS_BRANCHES) && filter.detached)
+ filter.kind |= FILTER_REFS_DETACHED_HEAD;
+ filter.name_patterns = argv;
+ /*
+ * If no sorting parameter is given then we default to sorting
+ * by 'refname'. This would give us an alphabetically sorted
+ * array with the 'HEAD' ref at the beginning followed by
+ * local branches 'refs/heads/...' and finally remote-tracking
+ * branches 'refs/remotes/...'.
+ */
+ sorting = ref_sorting_options(&sorting_options);
+ ref_sorting_set_sort_flags_all(sorting, REF_SORTING_ICASE, icase);
+ ref_sorting_set_sort_flags_all(
+ sorting, REF_SORTING_DETACHED_HEAD_FIRST, 1);
+ print_ref_list(&filter, sorting, &format, &output);
+ print_columns(&output, colopts, NULL);
+ string_list_clear(&output, 0);
+ ref_sorting_release(sorting);
+ return 0;
+ } else if (edit_description) {
+ const char *branch_name;
+ struct strbuf branch_ref = STRBUF_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ int ret = 1; /* assume failure */
+
+ if (!argc) {
+ if (filter.detached)
+ die(_("Cannot give description to detached HEAD"));
+ branch_name = head;
+ } else if (argc == 1) {
+ strbuf_branchname(&buf, argv[0], INTERPRET_BRANCH_LOCAL);
+ branch_name = buf.buf;
+ } else {
+ die(_("cannot edit description of more than one branch"));
+ }
+
+ strbuf_addf(&branch_ref, "refs/heads/%s", branch_name);
+ if (!ref_exists(branch_ref.buf))
+ error((!argc || !strcmp(head, branch_name))
+ ? _("No commit on branch '%s' yet.")
+ : _("No branch named '%s'."),
+ branch_name);
+ else if (!edit_branch_description(branch_name))
+ ret = 0; /* happy */
+
+ strbuf_release(&branch_ref);
+ strbuf_release(&buf);
+
+ return ret;
+ } else if (copy || rename) {
+ if (!argc)
+ die(_("branch name required"));
+ else if ((argc == 1) && filter.detached)
+ die(copy? _("cannot copy the current branch while not on any.")
+ : _("cannot rename the current branch while not on any."));
+ else if (argc == 1)
+ copy_or_rename_branch(head, argv[0], copy, copy + rename > 1);
+ else if (argc == 2)
+ copy_or_rename_branch(argv[0], argv[1], copy, copy + rename > 1);
+ else
+ die(copy? _("too many branches for a copy operation")
+ : _("too many arguments for a rename operation"));
+ } else if (new_upstream) {
+ struct branch *branch;
+ struct strbuf buf = STRBUF_INIT;
+
+ if (!argc)
+ branch = branch_get(NULL);
+ else if (argc == 1) {
+ strbuf_branchname(&buf, argv[0], INTERPRET_BRANCH_LOCAL);
+ branch = branch_get(buf.buf);
+ } else
+ die(_("too many arguments to set new upstream"));
+
+ if (!branch) {
+ if (!argc || !strcmp(argv[0], "HEAD"))
+ die(_("could not set upstream of HEAD to %s when "
+ "it does not point to any branch."),
+ new_upstream);
+ die(_("no such branch '%s'"), argv[0]);
+ }
+
+ if (!ref_exists(branch->refname)) {
+ if (!argc || !strcmp(head, branch->name))
+ die(_("No commit on branch '%s' yet."), branch->name);
+ die(_("branch '%s' does not exist"), branch->name);
+ }
+
+ dwim_and_setup_tracking(the_repository, branch->name,
+ new_upstream, BRANCH_TRACK_OVERRIDE,
+ quiet);
+ strbuf_release(&buf);
+ } else if (unset_upstream) {
+ struct branch *branch;
+ struct strbuf buf = STRBUF_INIT;
+
+ if (!argc)
+ branch = branch_get(NULL);
+ else if (argc == 1) {
+ strbuf_branchname(&buf, argv[0], INTERPRET_BRANCH_LOCAL);
+ branch = branch_get(buf.buf);
+ } else
+ die(_("too many arguments to unset upstream"));
+
+ if (!branch) {
+ if (!argc || !strcmp(argv[0], "HEAD"))
+ die(_("could not unset upstream of HEAD when "
+ "it does not point to any branch."));
+ die(_("no such branch '%s'"), argv[0]);
+ }
+
+ if (!branch_has_merge_config(branch))
+ die(_("Branch '%s' has no upstream information"), branch->name);
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "branch.%s.remote", branch->name);
+ git_config_set_multivar(buf.buf, NULL, NULL, CONFIG_FLAGS_MULTI_REPLACE);
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "branch.%s.merge", branch->name);
+ git_config_set_multivar(buf.buf, NULL, NULL, CONFIG_FLAGS_MULTI_REPLACE);
+ strbuf_release(&buf);
+ } else if (!noncreate_actions && argc > 0 && argc <= 2) {
+ const char *branch_name = argv[0];
+ const char *start_name = argc == 2 ? argv[1] : head;
+
+ if (filter.kind != FILTER_REFS_BRANCHES)
+ die(_("The -a, and -r, options to 'git branch' do not take a branch name.\n"
+ "Did you mean to use: -a|-r --list <pattern>?"));
+
+ if (track == BRANCH_TRACK_OVERRIDE)
+ die(_("the '--set-upstream' option is no longer supported. Please use '--track' or '--set-upstream-to' instead."));
+
+ if (recurse_submodules) {
+ create_branches_recursively(the_repository, branch_name,
+ start_name, NULL, force,
+ reflog, quiet, track, 0);
+ return 0;
+ }
+ create_branch(the_repository, branch_name, start_name, force, 0,
+ reflog, quiet, track, 0);
+ } else
+ usage_with_options(builtin_branch_usage, options);
+
+ return 0;
+}
diff --git a/builtin/bugreport.c b/builtin/bugreport.c
new file mode 100644
index 0000000..9605254
--- /dev/null
+++ b/builtin/bugreport.c
@@ -0,0 +1,188 @@
+#include "builtin.h"
+#include "parse-options.h"
+#include "strbuf.h"
+#include "help.h"
+#include "compat/compiler.h"
+#include "hook.h"
+#include "hook-list.h"
+#include "diagnose.h"
+
+
+static void get_system_info(struct strbuf *sys_info)
+{
+ struct utsname uname_info;
+ char *shell = NULL;
+
+ /* get git version from native cmd */
+ strbuf_addstr(sys_info, _("git version:\n"));
+ get_version_info(sys_info, 1);
+
+ /* system call for other version info */
+ strbuf_addstr(sys_info, "uname: ");
+ if (uname(&uname_info))
+ strbuf_addf(sys_info, _("uname() failed with error '%s' (%d)\n"),
+ strerror(errno),
+ errno);
+ else
+ strbuf_addf(sys_info, "%s %s %s %s\n",
+ uname_info.sysname,
+ uname_info.release,
+ uname_info.version,
+ uname_info.machine);
+
+ strbuf_addstr(sys_info, _("compiler info: "));
+ get_compiler_info(sys_info);
+
+ strbuf_addstr(sys_info, _("libc info: "));
+ get_libc_info(sys_info);
+
+ shell = getenv("SHELL");
+ strbuf_addf(sys_info, "$SHELL (typically, interactive shell): %s\n",
+ shell ? shell : "<unset>");
+}
+
+static void get_populated_hooks(struct strbuf *hook_info, int nongit)
+{
+ const char **p;
+
+ if (nongit) {
+ strbuf_addstr(hook_info,
+ _("not run from a git repository - no hooks to show\n"));
+ return;
+ }
+
+ for (p = hook_name_list; *p; p++) {
+ const char *hook = *p;
+
+ if (hook_exists(hook))
+ strbuf_addf(hook_info, "%s\n", hook);
+ }
+}
+
+static const char * const bugreport_usage[] = {
+ N_("git bugreport [(-o | --output-directory) <path>] [(-s | --suffix) <format>]\n"
+ " [--diagnose[=<mode>]]"),
+ NULL
+};
+
+static int get_bug_template(struct strbuf *template)
+{
+ const char template_text[] = N_(
+"Thank you for filling out a Git bug report!\n"
+"Please answer the following questions to help us understand your issue.\n"
+"\n"
+"What did you do before the bug happened? (Steps to reproduce your issue)\n"
+"\n"
+"What did you expect to happen? (Expected behavior)\n"
+"\n"
+"What happened instead? (Actual behavior)\n"
+"\n"
+"What's different between what you expected and what actually happened?\n"
+"\n"
+"Anything else you want to add:\n"
+"\n"
+"Please review the rest of the bug report below.\n"
+"You can delete any lines you don't wish to share.\n");
+
+ strbuf_addstr(template, _(template_text));
+ return 0;
+}
+
+static void get_header(struct strbuf *buf, const char *title)
+{
+ strbuf_addf(buf, "\n\n[%s]\n", title);
+}
+
+int cmd_bugreport(int argc, const char **argv, const char *prefix)
+{
+ struct strbuf buffer = STRBUF_INIT;
+ struct strbuf report_path = STRBUF_INIT;
+ int report = -1;
+ time_t now = time(NULL);
+ struct tm tm;
+ enum diagnose_mode diagnose = DIAGNOSE_NONE;
+ char *option_output = NULL;
+ char *option_suffix = "%Y-%m-%d-%H%M";
+ const char *user_relative_path = NULL;
+ char *prefixed_filename;
+ size_t output_path_len;
+
+ const struct option bugreport_options[] = {
+ OPT_CALLBACK_F(0, "diagnose", &diagnose, N_("mode"),
+ N_("create an additional zip archive of detailed diagnostics (default 'stats')"),
+ PARSE_OPT_OPTARG, option_parse_diagnose),
+ OPT_STRING('o', "output-directory", &option_output, N_("path"),
+ N_("specify a destination for the bugreport file(s)")),
+ OPT_STRING('s', "suffix", &option_suffix, N_("format"),
+ N_("specify a strftime format suffix for the filename(s)")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, bugreport_options,
+ bugreport_usage, 0);
+
+ /* Prepare the path to put the result */
+ prefixed_filename = prefix_filename(prefix,
+ option_output ? option_output : "");
+ strbuf_addstr(&report_path, prefixed_filename);
+ strbuf_complete(&report_path, '/');
+ output_path_len = report_path.len;
+
+ strbuf_addstr(&report_path, "git-bugreport-");
+ strbuf_addftime(&report_path, option_suffix, localtime_r(&now, &tm), 0, 0);
+ strbuf_addstr(&report_path, ".txt");
+
+ switch (safe_create_leading_directories(report_path.buf)) {
+ case SCLD_OK:
+ case SCLD_EXISTS:
+ break;
+ default:
+ die(_("could not create leading directories for '%s'"),
+ report_path.buf);
+ }
+
+ /* Prepare diagnostics, if requested */
+ if (diagnose != DIAGNOSE_NONE) {
+ struct strbuf zip_path = STRBUF_INIT;
+ strbuf_add(&zip_path, report_path.buf, output_path_len);
+ strbuf_addstr(&zip_path, "git-diagnostics-");
+ strbuf_addftime(&zip_path, option_suffix, localtime_r(&now, &tm), 0, 0);
+ strbuf_addstr(&zip_path, ".zip");
+
+ if (create_diagnostics_archive(&zip_path, diagnose))
+ die_errno(_("unable to create diagnostics archive %s"), zip_path.buf);
+
+ strbuf_release(&zip_path);
+ }
+
+ /* Prepare the report contents */
+ get_bug_template(&buffer);
+
+ get_header(&buffer, _("System Info"));
+ get_system_info(&buffer);
+
+ get_header(&buffer, _("Enabled Hooks"));
+ get_populated_hooks(&buffer, !startup_info->have_repository);
+
+ /* fopen doesn't offer us an O_EXCL alternative, except with glibc. */
+ report = xopen(report_path.buf, O_CREAT | O_EXCL | O_WRONLY, 0666);
+
+ if (write_in_full(report, buffer.buf, buffer.len) < 0)
+ die_errno(_("unable to write to %s"), report_path.buf);
+
+ close(report);
+
+ /*
+ * We want to print the path relative to the user, but we still need the
+ * path relative to us to give to the editor.
+ */
+ if (!(prefix && skip_prefix(report_path.buf, prefix, &user_relative_path)))
+ user_relative_path = report_path.buf;
+ fprintf(stderr, _("Created new report at '%s'.\n"),
+ user_relative_path);
+
+ free(prefixed_filename);
+ UNLEAK(buffer);
+ UNLEAK(report_path);
+ return !!launch_editor(report_path.buf, NULL, NULL);
+}
diff --git a/builtin/bundle.c b/builtin/bundle.c
new file mode 100644
index 0000000..c12c09f
--- /dev/null
+++ b/builtin/bundle.c
@@ -0,0 +1,224 @@
+#include "builtin.h"
+#include "strvec.h"
+#include "parse-options.h"
+#include "cache.h"
+#include "bundle.h"
+
+/*
+ * Basic handler for bundle files to connect repositories via sneakernet.
+ * Invocation must include action.
+ * This function can create a bundle or provide information on an existing
+ * bundle supporting "fetch", "pull", and "ls-remote".
+ */
+
+#define BUILTIN_BUNDLE_CREATE_USAGE \
+ N_("git bundle create [-q | --quiet | --progress | --all-progress] [--all-progress-implied]\n" \
+ " [--version=<version>] <file> <git-rev-list-args>")
+#define BUILTIN_BUNDLE_VERIFY_USAGE \
+ N_("git bundle verify [-q | --quiet] <file>")
+#define BUILTIN_BUNDLE_LIST_HEADS_USAGE \
+ N_("git bundle list-heads <file> [<refname>...]")
+#define BUILTIN_BUNDLE_UNBUNDLE_USAGE \
+ N_("git bundle unbundle [--progress] <file> [<refname>...]")
+
+static char const * const builtin_bundle_usage[] = {
+ BUILTIN_BUNDLE_CREATE_USAGE,
+ BUILTIN_BUNDLE_VERIFY_USAGE,
+ BUILTIN_BUNDLE_LIST_HEADS_USAGE,
+ BUILTIN_BUNDLE_UNBUNDLE_USAGE,
+ NULL,
+};
+
+static const char * const builtin_bundle_create_usage[] = {
+ BUILTIN_BUNDLE_CREATE_USAGE,
+ NULL
+};
+
+static const char * const builtin_bundle_verify_usage[] = {
+ BUILTIN_BUNDLE_VERIFY_USAGE,
+ NULL
+};
+
+static const char * const builtin_bundle_list_heads_usage[] = {
+ BUILTIN_BUNDLE_LIST_HEADS_USAGE,
+ NULL
+};
+
+static const char * const builtin_bundle_unbundle_usage[] = {
+ BUILTIN_BUNDLE_UNBUNDLE_USAGE,
+ NULL
+};
+
+static int parse_options_cmd_bundle(int argc,
+ const char **argv,
+ const char* prefix,
+ const char * const usagestr[],
+ const struct option options[],
+ char **bundle_file) {
+ int newargc;
+ newargc = parse_options(argc, argv, NULL, options, usagestr,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (argc < 1)
+ usage_with_options(usagestr, options);
+ *bundle_file = prefix_filename(prefix, argv[0]);
+ return newargc;
+}
+
+static int cmd_bundle_create(int argc, const char **argv, const char *prefix) {
+ int all_progress_implied = 0;
+ int progress = isatty(STDERR_FILENO);
+ struct strvec pack_opts;
+ int version = -1;
+ int ret;
+ struct option options[] = {
+ OPT_SET_INT('q', "quiet", &progress,
+ N_("do not show progress meter"), 0),
+ OPT_SET_INT(0, "progress", &progress,
+ N_("show progress meter"), 1),
+ OPT_SET_INT(0, "all-progress", &progress,
+ N_("show progress meter during object writing phase"), 2),
+ OPT_BOOL(0, "all-progress-implied",
+ &all_progress_implied,
+ N_("similar to --all-progress when progress meter is shown")),
+ OPT_INTEGER(0, "version", &version,
+ N_("specify bundle format version")),
+ OPT_END()
+ };
+ char *bundle_file;
+
+ argc = parse_options_cmd_bundle(argc, argv, prefix,
+ builtin_bundle_create_usage, options, &bundle_file);
+ /* bundle internals use argv[1] as further parameters */
+
+ strvec_init(&pack_opts);
+ if (progress == 0)
+ strvec_push(&pack_opts, "--quiet");
+ else if (progress == 1)
+ strvec_push(&pack_opts, "--progress");
+ else if (progress == 2)
+ strvec_push(&pack_opts, "--all-progress");
+ if (progress && all_progress_implied)
+ strvec_push(&pack_opts, "--all-progress-implied");
+
+ if (!startup_info->have_repository)
+ die(_("Need a repository to create a bundle."));
+ ret = !!create_bundle(the_repository, bundle_file, argc, argv, &pack_opts, version);
+ strvec_clear(&pack_opts);
+ free(bundle_file);
+ return ret;
+}
+
+static int cmd_bundle_verify(int argc, const char **argv, const char *prefix) {
+ struct bundle_header header = BUNDLE_HEADER_INIT;
+ int bundle_fd = -1;
+ int quiet = 0;
+ int ret;
+ struct option options[] = {
+ OPT_BOOL('q', "quiet", &quiet,
+ N_("do not show bundle details")),
+ OPT_END()
+ };
+ char *bundle_file;
+
+ argc = parse_options_cmd_bundle(argc, argv, prefix,
+ builtin_bundle_verify_usage, options, &bundle_file);
+ /* bundle internals use argv[1] as further parameters */
+
+ if ((bundle_fd = read_bundle_header(bundle_file, &header)) < 0) {
+ ret = 1;
+ goto cleanup;
+ }
+ close(bundle_fd);
+ if (verify_bundle(the_repository, &header,
+ quiet ? VERIFY_BUNDLE_QUIET : VERIFY_BUNDLE_VERBOSE)) {
+ ret = 1;
+ goto cleanup;
+ }
+
+ fprintf(stderr, _("%s is okay\n"), bundle_file);
+ ret = 0;
+cleanup:
+ free(bundle_file);
+ bundle_header_release(&header);
+ return ret;
+}
+
+static int cmd_bundle_list_heads(int argc, const char **argv, const char *prefix) {
+ struct bundle_header header = BUNDLE_HEADER_INIT;
+ int bundle_fd = -1;
+ int ret;
+ struct option options[] = {
+ OPT_END()
+ };
+ char *bundle_file;
+
+ argc = parse_options_cmd_bundle(argc, argv, prefix,
+ builtin_bundle_list_heads_usage, options, &bundle_file);
+ /* bundle internals use argv[1] as further parameters */
+
+ if ((bundle_fd = read_bundle_header(bundle_file, &header)) < 0) {
+ ret = 1;
+ goto cleanup;
+ }
+ close(bundle_fd);
+ ret = !!list_bundle_refs(&header, argc, argv);
+cleanup:
+ free(bundle_file);
+ bundle_header_release(&header);
+ return ret;
+}
+
+static int cmd_bundle_unbundle(int argc, const char **argv, const char *prefix) {
+ struct bundle_header header = BUNDLE_HEADER_INIT;
+ int bundle_fd = -1;
+ int ret;
+ int progress = isatty(2);
+
+ struct option options[] = {
+ OPT_BOOL(0, "progress", &progress,
+ N_("show progress meter")),
+ OPT_END()
+ };
+ char *bundle_file;
+ struct strvec extra_index_pack_args = STRVEC_INIT;
+
+ argc = parse_options_cmd_bundle(argc, argv, prefix,
+ builtin_bundle_unbundle_usage, options, &bundle_file);
+ /* bundle internals use argv[1] as further parameters */
+
+ if ((bundle_fd = read_bundle_header(bundle_file, &header)) < 0) {
+ ret = 1;
+ goto cleanup;
+ }
+ if (!startup_info->have_repository)
+ die(_("Need a repository to unbundle."));
+ if (progress)
+ strvec_pushl(&extra_index_pack_args, "-v", "--progress-title",
+ _("Unbundling objects"), NULL);
+ ret = !!unbundle(the_repository, &header, bundle_fd,
+ &extra_index_pack_args, 0) ||
+ list_bundle_refs(&header, argc, argv);
+ bundle_header_release(&header);
+cleanup:
+ free(bundle_file);
+ return ret;
+}
+
+int cmd_bundle(int argc, const char **argv, const char *prefix)
+{
+ parse_opt_subcommand_fn *fn = NULL;
+ struct option options[] = {
+ OPT_SUBCOMMAND("create", &fn, cmd_bundle_create),
+ OPT_SUBCOMMAND("verify", &fn, cmd_bundle_verify),
+ OPT_SUBCOMMAND("list-heads", &fn, cmd_bundle_list_heads),
+ OPT_SUBCOMMAND("unbundle", &fn, cmd_bundle_unbundle),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, builtin_bundle_usage,
+ 0);
+
+ packet_trace_identity("bundle");
+
+ return !!fn(argc, argv, prefix);
+}
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
new file mode 100644
index 0000000..b3be58b
--- /dev/null
+++ b/builtin/cat-file.c
@@ -0,0 +1,1033 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ */
+#define USE_THE_INDEX_VARIABLE
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "diff.h"
+#include "parse-options.h"
+#include "userdiff.h"
+#include "streaming.h"
+#include "tree-walk.h"
+#include "oid-array.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "promisor-remote.h"
+#include "mailmap.h"
+
+enum batch_mode {
+ BATCH_MODE_CONTENTS,
+ BATCH_MODE_INFO,
+ BATCH_MODE_QUEUE_AND_DISPATCH,
+};
+
+struct batch_options {
+ int enabled;
+ int follow_symlinks;
+ enum batch_mode batch_mode;
+ int buffer_output;
+ int all_objects;
+ int unordered;
+ int transform_mode; /* may be 'w' or 'c' for --filters or --textconv */
+ int nul_terminated;
+ const char *format;
+};
+
+static const char *force_path;
+
+static struct string_list mailmap = STRING_LIST_INIT_NODUP;
+static int use_mailmap;
+
+static char *replace_idents_using_mailmap(char *, size_t *);
+
+static char *replace_idents_using_mailmap(char *object_buf, size_t *size)
+{
+ struct strbuf sb = STRBUF_INIT;
+ const char *headers[] = { "author ", "committer ", "tagger ", NULL };
+
+ strbuf_attach(&sb, object_buf, *size, *size + 1);
+ apply_mailmap_to_header(&sb, headers, &mailmap);
+ *size = sb.len;
+ return strbuf_detach(&sb, NULL);
+}
+
+static int filter_object(const char *path, unsigned mode,
+ const struct object_id *oid,
+ char **buf, unsigned long *size)
+{
+ enum object_type type;
+
+ *buf = read_object_file(oid, &type, size);
+ if (!*buf)
+ return error(_("cannot read object %s '%s'"),
+ oid_to_hex(oid), path);
+ if ((type == OBJ_BLOB) && S_ISREG(mode)) {
+ struct strbuf strbuf = STRBUF_INIT;
+ struct checkout_metadata meta;
+
+ init_checkout_metadata(&meta, NULL, NULL, oid);
+ if (convert_to_working_tree(&the_index, path, *buf, *size, &strbuf, &meta)) {
+ free(*buf);
+ *size = strbuf.len;
+ *buf = strbuf_detach(&strbuf, NULL);
+ }
+ }
+
+ return 0;
+}
+
+static int stream_blob(const struct object_id *oid)
+{
+ if (stream_blob_to_fd(1, oid, NULL, 0))
+ die("unable to stream %s to stdout", oid_to_hex(oid));
+ return 0;
+}
+
+static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
+ int unknown_type)
+{
+ int ret;
+ struct object_id oid;
+ enum object_type type;
+ char *buf;
+ unsigned long size;
+ struct object_context obj_context;
+ struct object_info oi = OBJECT_INFO_INIT;
+ struct strbuf sb = STRBUF_INIT;
+ unsigned flags = OBJECT_INFO_LOOKUP_REPLACE;
+ unsigned get_oid_flags = GET_OID_RECORD_PATH | GET_OID_ONLY_TO_DIE;
+ const char *path = force_path;
+ const int opt_cw = (opt == 'c' || opt == 'w');
+ if (!path && opt_cw)
+ get_oid_flags |= GET_OID_REQUIRE_PATH;
+
+ if (unknown_type)
+ flags |= OBJECT_INFO_ALLOW_UNKNOWN_TYPE;
+
+ if (get_oid_with_context(the_repository, obj_name, get_oid_flags, &oid,
+ &obj_context))
+ die("Not a valid object name %s", obj_name);
+
+ if (!path)
+ path = obj_context.path;
+ if (obj_context.mode == S_IFINVALID)
+ obj_context.mode = 0100644;
+
+ buf = NULL;
+ switch (opt) {
+ case 't':
+ oi.type_name = &sb;
+ if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0)
+ die("git cat-file: could not get object info");
+ if (sb.len) {
+ printf("%s\n", sb.buf);
+ strbuf_release(&sb);
+ ret = 0;
+ goto cleanup;
+ }
+ break;
+
+ case 's':
+ oi.sizep = &size;
+ if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0)
+ die("git cat-file: could not get object info");
+ printf("%"PRIuMAX"\n", (uintmax_t)size);
+ ret = 0;
+ goto cleanup;
+
+ case 'e':
+ return !has_object_file(&oid);
+
+ case 'w':
+
+ if (filter_object(path, obj_context.mode,
+ &oid, &buf, &size)) {
+ ret = -1;
+ goto cleanup;
+ }
+ break;
+
+ case 'c':
+ if (textconv_object(the_repository, path, obj_context.mode,
+ &oid, 1, &buf, &size))
+ break;
+ /* else fallthrough */
+
+ case 'p':
+ type = oid_object_info(the_repository, &oid, NULL);
+ if (type < 0)
+ die("Not a valid object name %s", obj_name);
+
+ /* custom pretty-print here */
+ if (type == OBJ_TREE) {
+ const char *ls_args[3] = { NULL };
+ ls_args[0] = "ls-tree";
+ ls_args[1] = obj_name;
+ ret = cmd_ls_tree(2, ls_args, NULL);
+ goto cleanup;
+ }
+
+ if (type == OBJ_BLOB) {
+ ret = stream_blob(&oid);
+ goto cleanup;
+ }
+ buf = read_object_file(&oid, &type, &size);
+ if (!buf)
+ die("Cannot read object %s", obj_name);
+
+ if (use_mailmap) {
+ size_t s = size;
+ buf = replace_idents_using_mailmap(buf, &s);
+ size = cast_size_t_to_ulong(s);
+ }
+
+ /* otherwise just spit out the data */
+ break;
+
+ case 0:
+ {
+ enum object_type exp_type_id = type_from_string(exp_type);
+
+ if (exp_type_id == OBJ_BLOB) {
+ struct object_id blob_oid;
+ if (oid_object_info(the_repository, &oid, NULL) == OBJ_TAG) {
+ char *buffer = read_object_file(&oid, &type,
+ &size);
+ const char *target;
+ if (!skip_prefix(buffer, "object ", &target) ||
+ get_oid_hex(target, &blob_oid))
+ die("%s not a valid tag", oid_to_hex(&oid));
+ free(buffer);
+ } else
+ oidcpy(&blob_oid, &oid);
+
+ if (oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB) {
+ ret = stream_blob(&blob_oid);
+ goto cleanup;
+ }
+ /*
+ * we attempted to dereference a tag to a blob
+ * and failed; there may be new dereference
+ * mechanisms this code is not aware of.
+ * fall-back to the usual case.
+ */
+ }
+ buf = read_object_with_reference(the_repository, &oid,
+ exp_type_id, &size, NULL);
+
+ if (use_mailmap) {
+ size_t s = size;
+ buf = replace_idents_using_mailmap(buf, &s);
+ size = cast_size_t_to_ulong(s);
+ }
+ break;
+ }
+ default:
+ die("git cat-file: unknown option: %s", exp_type);
+ }
+
+ if (!buf)
+ die("git cat-file %s: bad file", obj_name);
+
+ write_or_die(1, buf, size);
+ ret = 0;
+cleanup:
+ free(buf);
+ free(obj_context.path);
+ return ret;
+}
+
+struct expand_data {
+ struct object_id oid;
+ enum object_type type;
+ unsigned long size;
+ off_t disk_size;
+ const char *rest;
+ struct object_id delta_base_oid;
+
+ /*
+ * If mark_query is true, we do not expand anything, but rather
+ * just mark the object_info with items we wish to query.
+ */
+ int mark_query;
+
+ /*
+ * Whether to split the input on whitespace before feeding it to
+ * get_sha1; this is decided during the mark_query phase based on
+ * whether we have a %(rest) token in our format.
+ */
+ int split_on_whitespace;
+
+ /*
+ * After a mark_query run, this object_info is set up to be
+ * passed to oid_object_info_extended. It will point to the data
+ * elements above, so you can retrieve the response from there.
+ */
+ struct object_info info;
+
+ /*
+ * This flag will be true if the requested batch format and options
+ * don't require us to call oid_object_info, which can then be
+ * optimized out.
+ */
+ unsigned skip_object_info : 1;
+};
+
+static int is_atom(const char *atom, const char *s, int slen)
+{
+ int alen = strlen(atom);
+ return alen == slen && !memcmp(atom, s, alen);
+}
+
+static void expand_atom(struct strbuf *sb, const char *atom, int len,
+ void *vdata)
+{
+ struct expand_data *data = vdata;
+
+ if (is_atom("objectname", atom, len)) {
+ if (!data->mark_query)
+ strbuf_addstr(sb, oid_to_hex(&data->oid));
+ } else if (is_atom("objecttype", atom, len)) {
+ if (data->mark_query)
+ data->info.typep = &data->type;
+ else
+ strbuf_addstr(sb, type_name(data->type));
+ } else if (is_atom("objectsize", atom, len)) {
+ if (data->mark_query)
+ data->info.sizep = &data->size;
+ else
+ strbuf_addf(sb, "%"PRIuMAX , (uintmax_t)data->size);
+ } else if (is_atom("objectsize:disk", atom, len)) {
+ if (data->mark_query)
+ data->info.disk_sizep = &data->disk_size;
+ else
+ strbuf_addf(sb, "%"PRIuMAX, (uintmax_t)data->disk_size);
+ } else if (is_atom("rest", atom, len)) {
+ if (data->mark_query)
+ data->split_on_whitespace = 1;
+ else if (data->rest)
+ strbuf_addstr(sb, data->rest);
+ } else if (is_atom("deltabase", atom, len)) {
+ if (data->mark_query)
+ data->info.delta_base_oid = &data->delta_base_oid;
+ else
+ strbuf_addstr(sb,
+ oid_to_hex(&data->delta_base_oid));
+ } else
+ die("unknown format element: %.*s", len, atom);
+}
+
+static size_t expand_format(struct strbuf *sb, const char *start, void *data)
+{
+ const char *end;
+
+ if (*start != '(')
+ return 0;
+ end = strchr(start + 1, ')');
+ if (!end)
+ die("format element '%s' does not end in ')'", start);
+
+ expand_atom(sb, start + 1, end - start - 1, data);
+
+ return end - start + 1;
+}
+
+static void batch_write(struct batch_options *opt, const void *data, int len)
+{
+ if (opt->buffer_output) {
+ if (fwrite(data, 1, len, stdout) != len)
+ die_errno("unable to write to stdout");
+ } else
+ write_or_die(1, data, len);
+}
+
+static void print_object_or_die(struct batch_options *opt, struct expand_data *data)
+{
+ const struct object_id *oid = &data->oid;
+
+ assert(data->info.typep);
+
+ if (data->type == OBJ_BLOB) {
+ if (opt->buffer_output)
+ fflush(stdout);
+ if (opt->transform_mode) {
+ char *contents;
+ unsigned long size;
+
+ if (!data->rest)
+ die("missing path for '%s'", oid_to_hex(oid));
+
+ if (opt->transform_mode == 'w') {
+ if (filter_object(data->rest, 0100644, oid,
+ &contents, &size))
+ die("could not convert '%s' %s",
+ oid_to_hex(oid), data->rest);
+ } else if (opt->transform_mode == 'c') {
+ enum object_type type;
+ if (!textconv_object(the_repository,
+ data->rest, 0100644, oid,
+ 1, &contents, &size))
+ contents = read_object_file(oid,
+ &type,
+ &size);
+ if (!contents)
+ die("could not convert '%s' %s",
+ oid_to_hex(oid), data->rest);
+ } else
+ BUG("invalid transform_mode: %c", opt->transform_mode);
+ batch_write(opt, contents, size);
+ free(contents);
+ } else {
+ stream_blob(oid);
+ }
+ }
+ else {
+ enum object_type type;
+ unsigned long size;
+ void *contents;
+
+ contents = read_object_file(oid, &type, &size);
+
+ if (use_mailmap) {
+ size_t s = size;
+ contents = replace_idents_using_mailmap(contents, &s);
+ size = cast_size_t_to_ulong(s);
+ }
+
+ if (!contents)
+ die("object %s disappeared", oid_to_hex(oid));
+ if (type != data->type)
+ die("object %s changed type!?", oid_to_hex(oid));
+ if (data->info.sizep && size != data->size && !use_mailmap)
+ die("object %s changed size!?", oid_to_hex(oid));
+
+ batch_write(opt, contents, size);
+ free(contents);
+ }
+}
+
+static void print_default_format(struct strbuf *scratch, struct expand_data *data)
+{
+ strbuf_addf(scratch, "%s %s %"PRIuMAX"\n", oid_to_hex(&data->oid),
+ type_name(data->type),
+ (uintmax_t)data->size);
+}
+
+/*
+ * If "pack" is non-NULL, then "offset" is the byte offset within the pack from
+ * which the object may be accessed (though note that we may also rely on
+ * data->oid, too). If "pack" is NULL, then offset is ignored.
+ */
+static void batch_object_write(const char *obj_name,
+ struct strbuf *scratch,
+ struct batch_options *opt,
+ struct expand_data *data,
+ struct packed_git *pack,
+ off_t offset)
+{
+ if (!data->skip_object_info) {
+ int ret;
+
+ if (pack)
+ ret = packed_object_info(the_repository, pack, offset,
+ &data->info);
+ else
+ ret = oid_object_info_extended(the_repository,
+ &data->oid, &data->info,
+ OBJECT_INFO_LOOKUP_REPLACE);
+ if (ret < 0) {
+ printf("%s missing\n",
+ obj_name ? obj_name : oid_to_hex(&data->oid));
+ fflush(stdout);
+ return;
+ }
+ }
+
+ strbuf_reset(scratch);
+
+ if (!opt->format) {
+ print_default_format(scratch, data);
+ } else {
+ strbuf_expand(scratch, opt->format, expand_format, data);
+ strbuf_addch(scratch, '\n');
+ }
+
+ batch_write(opt, scratch->buf, scratch->len);
+
+ if (opt->batch_mode == BATCH_MODE_CONTENTS) {
+ print_object_or_die(opt, data);
+ batch_write(opt, "\n", 1);
+ }
+}
+
+static void batch_one_object(const char *obj_name,
+ struct strbuf *scratch,
+ struct batch_options *opt,
+ struct expand_data *data)
+{
+ struct object_context ctx;
+ int flags = opt->follow_symlinks ? GET_OID_FOLLOW_SYMLINKS : 0;
+ enum get_oid_result result;
+
+ result = get_oid_with_context(the_repository, obj_name,
+ flags, &data->oid, &ctx);
+ if (result != FOUND) {
+ switch (result) {
+ case MISSING_OBJECT:
+ printf("%s missing\n", obj_name);
+ break;
+ case SHORT_NAME_AMBIGUOUS:
+ printf("%s ambiguous\n", obj_name);
+ break;
+ case DANGLING_SYMLINK:
+ printf("dangling %"PRIuMAX"\n%s\n",
+ (uintmax_t)strlen(obj_name), obj_name);
+ break;
+ case SYMLINK_LOOP:
+ printf("loop %"PRIuMAX"\n%s\n",
+ (uintmax_t)strlen(obj_name), obj_name);
+ break;
+ case NOT_DIR:
+ printf("notdir %"PRIuMAX"\n%s\n",
+ (uintmax_t)strlen(obj_name), obj_name);
+ break;
+ default:
+ BUG("unknown get_sha1_with_context result %d\n",
+ result);
+ break;
+ }
+ fflush(stdout);
+ return;
+ }
+
+ if (ctx.mode == 0) {
+ printf("symlink %"PRIuMAX"\n%s\n",
+ (uintmax_t)ctx.symlink_path.len,
+ ctx.symlink_path.buf);
+ fflush(stdout);
+ return;
+ }
+
+ batch_object_write(obj_name, scratch, opt, data, NULL, 0);
+}
+
+struct object_cb_data {
+ struct batch_options *opt;
+ struct expand_data *expand;
+ struct oidset *seen;
+ struct strbuf *scratch;
+};
+
+static int batch_object_cb(const struct object_id *oid, void *vdata)
+{
+ struct object_cb_data *data = vdata;
+ oidcpy(&data->expand->oid, oid);
+ batch_object_write(NULL, data->scratch, data->opt, data->expand,
+ NULL, 0);
+ return 0;
+}
+
+static int collect_loose_object(const struct object_id *oid,
+ const char *path,
+ void *data)
+{
+ oid_array_append(data, oid);
+ return 0;
+}
+
+static int collect_packed_object(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *data)
+{
+ oid_array_append(data, oid);
+ return 0;
+}
+
+static int batch_unordered_object(const struct object_id *oid,
+ struct packed_git *pack, off_t offset,
+ void *vdata)
+{
+ struct object_cb_data *data = vdata;
+
+ if (oidset_insert(data->seen, oid))
+ return 0;
+
+ oidcpy(&data->expand->oid, oid);
+ batch_object_write(NULL, data->scratch, data->opt, data->expand,
+ pack, offset);
+ return 0;
+}
+
+static int batch_unordered_loose(const struct object_id *oid,
+ const char *path,
+ void *data)
+{
+ return batch_unordered_object(oid, NULL, 0, data);
+}
+
+static int batch_unordered_packed(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *data)
+{
+ return batch_unordered_object(oid, pack,
+ nth_packed_object_offset(pack, pos),
+ data);
+}
+
+typedef void (*parse_cmd_fn_t)(struct batch_options *, const char *,
+ struct strbuf *, struct expand_data *);
+
+struct queued_cmd {
+ parse_cmd_fn_t fn;
+ char *line;
+};
+
+static void parse_cmd_contents(struct batch_options *opt,
+ const char *line,
+ struct strbuf *output,
+ struct expand_data *data)
+{
+ opt->batch_mode = BATCH_MODE_CONTENTS;
+ batch_one_object(line, output, opt, data);
+}
+
+static void parse_cmd_info(struct batch_options *opt,
+ const char *line,
+ struct strbuf *output,
+ struct expand_data *data)
+{
+ opt->batch_mode = BATCH_MODE_INFO;
+ batch_one_object(line, output, opt, data);
+}
+
+static void dispatch_calls(struct batch_options *opt,
+ struct strbuf *output,
+ struct expand_data *data,
+ struct queued_cmd *cmd,
+ int nr)
+{
+ int i;
+
+ if (!opt->buffer_output)
+ die(_("flush is only for --buffer mode"));
+
+ for (i = 0; i < nr; i++)
+ cmd[i].fn(opt, cmd[i].line, output, data);
+
+ fflush(stdout);
+}
+
+static void free_cmds(struct queued_cmd *cmd, size_t *nr)
+{
+ size_t i;
+
+ for (i = 0; i < *nr; i++)
+ FREE_AND_NULL(cmd[i].line);
+
+ *nr = 0;
+}
+
+
+static const struct parse_cmd {
+ const char *name;
+ parse_cmd_fn_t fn;
+ unsigned takes_args;
+} commands[] = {
+ { "contents", parse_cmd_contents, 1},
+ { "info", parse_cmd_info, 1},
+ { "flush", NULL, 0},
+};
+
+static void batch_objects_command(struct batch_options *opt,
+ struct strbuf *output,
+ struct expand_data *data)
+{
+ struct strbuf input = STRBUF_INIT;
+ struct queued_cmd *queued_cmd = NULL;
+ size_t alloc = 0, nr = 0;
+
+ while (1) {
+ int i, ret;
+ const struct parse_cmd *cmd = NULL;
+ const char *p = NULL, *cmd_end;
+ struct queued_cmd call = {0};
+
+ if (opt->nul_terminated)
+ ret = strbuf_getline_nul(&input, stdin);
+ else
+ ret = strbuf_getline(&input, stdin);
+
+ if (ret)
+ break;
+
+ if (!input.len)
+ die(_("empty command in input"));
+ if (isspace(*input.buf))
+ die(_("whitespace before command: '%s'"), input.buf);
+
+ for (i = 0; i < ARRAY_SIZE(commands); i++) {
+ if (!skip_prefix(input.buf, commands[i].name, &cmd_end))
+ continue;
+
+ cmd = &commands[i];
+ if (cmd->takes_args) {
+ if (*cmd_end != ' ')
+ die(_("%s requires arguments"),
+ commands[i].name);
+
+ p = cmd_end + 1;
+ } else if (*cmd_end) {
+ die(_("%s takes no arguments"),
+ commands[i].name);
+ }
+
+ break;
+ }
+
+ if (!cmd)
+ die(_("unknown command: '%s'"), input.buf);
+
+ if (!strcmp(cmd->name, "flush")) {
+ dispatch_calls(opt, output, data, queued_cmd, nr);
+ free_cmds(queued_cmd, &nr);
+ } else if (!opt->buffer_output) {
+ cmd->fn(opt, p, output, data);
+ } else {
+ ALLOC_GROW(queued_cmd, nr + 1, alloc);
+ call.fn = cmd->fn;
+ call.line = xstrdup_or_null(p);
+ queued_cmd[nr++] = call;
+ }
+ }
+
+ if (opt->buffer_output &&
+ nr &&
+ !git_env_bool("GIT_TEST_CAT_FILE_NO_FLUSH_ON_EXIT", 0)) {
+ dispatch_calls(opt, output, data, queued_cmd, nr);
+ free_cmds(queued_cmd, &nr);
+ }
+
+ free_cmds(queued_cmd, &nr);
+ free(queued_cmd);
+ strbuf_release(&input);
+}
+
+#define DEFAULT_FORMAT "%(objectname) %(objecttype) %(objectsize)"
+
+static int batch_objects(struct batch_options *opt)
+{
+ struct strbuf input = STRBUF_INIT;
+ struct strbuf output = STRBUF_INIT;
+ struct expand_data data;
+ int save_warning;
+ int retval = 0;
+
+ /*
+ * Expand once with our special mark_query flag, which will prime the
+ * object_info to be handed to oid_object_info_extended for each
+ * object.
+ */
+ memset(&data, 0, sizeof(data));
+ data.mark_query = 1;
+ strbuf_expand(&output,
+ opt->format ? opt->format : DEFAULT_FORMAT,
+ expand_format,
+ &data);
+ data.mark_query = 0;
+ strbuf_release(&output);
+ if (opt->transform_mode)
+ data.split_on_whitespace = 1;
+
+ if (opt->format && !strcmp(opt->format, DEFAULT_FORMAT))
+ opt->format = NULL;
+ /*
+ * If we are printing out the object, then always fill in the type,
+ * since we will want to decide whether or not to stream.
+ */
+ if (opt->batch_mode == BATCH_MODE_CONTENTS)
+ data.info.typep = &data.type;
+
+ if (opt->all_objects) {
+ struct object_cb_data cb;
+ struct object_info empty = OBJECT_INFO_INIT;
+
+ if (!memcmp(&data.info, &empty, sizeof(empty)))
+ data.skip_object_info = 1;
+
+ if (has_promisor_remote())
+ warning("This repository uses promisor remotes. Some objects may not be loaded.");
+
+ read_replace_refs = 0;
+
+ cb.opt = opt;
+ cb.expand = &data;
+ cb.scratch = &output;
+
+ if (opt->unordered) {
+ struct oidset seen = OIDSET_INIT;
+
+ cb.seen = &seen;
+
+ for_each_loose_object(batch_unordered_loose, &cb, 0);
+ for_each_packed_object(batch_unordered_packed, &cb,
+ FOR_EACH_OBJECT_PACK_ORDER);
+
+ oidset_clear(&seen);
+ } else {
+ struct oid_array sa = OID_ARRAY_INIT;
+
+ for_each_loose_object(collect_loose_object, &sa, 0);
+ for_each_packed_object(collect_packed_object, &sa, 0);
+
+ oid_array_for_each_unique(&sa, batch_object_cb, &cb);
+
+ oid_array_clear(&sa);
+ }
+
+ strbuf_release(&output);
+ return 0;
+ }
+
+ /*
+ * We are going to call get_sha1 on a potentially very large number of
+ * objects. In most large cases, these will be actual object sha1s. The
+ * cost to double-check that each one is not also a ref (just so we can
+ * warn) ends up dwarfing the actual cost of the object lookups
+ * themselves. We can work around it by just turning off the warning.
+ */
+ save_warning = warn_on_object_refname_ambiguity;
+ warn_on_object_refname_ambiguity = 0;
+
+ if (opt->batch_mode == BATCH_MODE_QUEUE_AND_DISPATCH) {
+ batch_objects_command(opt, &output, &data);
+ goto cleanup;
+ }
+
+ while (1) {
+ int ret;
+ if (opt->nul_terminated)
+ ret = strbuf_getline_nul(&input, stdin);
+ else
+ ret = strbuf_getline(&input, stdin);
+
+ if (ret == EOF)
+ break;
+
+ if (data.split_on_whitespace) {
+ /*
+ * Split at first whitespace, tying off the beginning
+ * of the string and saving the remainder (or NULL) in
+ * data.rest.
+ */
+ char *p = strpbrk(input.buf, " \t");
+ if (p) {
+ while (*p && strchr(" \t", *p))
+ *p++ = '\0';
+ }
+ data.rest = p;
+ }
+
+ batch_one_object(input.buf, &output, opt, &data);
+ }
+
+ cleanup:
+ strbuf_release(&input);
+ strbuf_release(&output);
+ warn_on_object_refname_ambiguity = save_warning;
+ return retval;
+}
+
+static int git_cat_file_config(const char *var, const char *value, void *cb)
+{
+ if (userdiff_config(var, value) < 0)
+ return -1;
+
+ return git_default_config(var, value, cb);
+}
+
+static int batch_option_callback(const struct option *opt,
+ const char *arg,
+ int unset)
+{
+ struct batch_options *bo = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ if (bo->enabled) {
+ return error(_("only one batch option may be specified"));
+ }
+
+ bo->enabled = 1;
+
+ if (!strcmp(opt->long_name, "batch"))
+ bo->batch_mode = BATCH_MODE_CONTENTS;
+ else if (!strcmp(opt->long_name, "batch-check"))
+ bo->batch_mode = BATCH_MODE_INFO;
+ else if (!strcmp(opt->long_name, "batch-command"))
+ bo->batch_mode = BATCH_MODE_QUEUE_AND_DISPATCH;
+ else
+ BUG("%s given to batch-option-callback", opt->long_name);
+
+ bo->format = arg;
+
+ return 0;
+}
+
+int cmd_cat_file(int argc, const char **argv, const char *prefix)
+{
+ int opt = 0;
+ int opt_cw = 0;
+ int opt_epts = 0;
+ const char *exp_type = NULL, *obj_name = NULL;
+ struct batch_options batch = {0};
+ int unknown_type = 0;
+
+ const char * const usage[] = {
+ N_("git cat-file <type> <object>"),
+ N_("git cat-file (-e | -p) <object>"),
+ N_("git cat-file (-t | -s) [--allow-unknown-type] <object>"),
+ N_("git cat-file (--batch | --batch-check | --batch-command) [--batch-all-objects]\n"
+ " [--buffer] [--follow-symlinks] [--unordered]\n"
+ " [--textconv | --filters] [-z]"),
+ N_("git cat-file (--textconv | --filters)\n"
+ " [<rev>:<path|tree-ish> | --path=<path|tree-ish> <rev>]"),
+ NULL
+ };
+ const struct option options[] = {
+ /* Simple queries */
+ OPT_GROUP(N_("Check object existence or emit object contents")),
+ OPT_CMDMODE('e', NULL, &opt,
+ N_("check if <object> exists"), 'e'),
+ OPT_CMDMODE('p', NULL, &opt, N_("pretty-print <object> content"), 'p'),
+
+ OPT_GROUP(N_("Emit [broken] object attributes")),
+ OPT_CMDMODE('t', NULL, &opt, N_("show object type (one of 'blob', 'tree', 'commit', 'tag', ...)"), 't'),
+ OPT_CMDMODE('s', NULL, &opt, N_("show object size"), 's'),
+ OPT_BOOL(0, "allow-unknown-type", &unknown_type,
+ N_("allow -s and -t to work with broken/corrupt objects")),
+ OPT_BOOL(0, "use-mailmap", &use_mailmap, N_("use mail map file")),
+ OPT_ALIAS(0, "mailmap", "use-mailmap"),
+ /* Batch mode */
+ OPT_GROUP(N_("Batch objects requested on stdin (or --batch-all-objects)")),
+ OPT_CALLBACK_F(0, "batch", &batch, N_("format"),
+ N_("show full <object> or <rev> contents"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG,
+ batch_option_callback),
+ OPT_CALLBACK_F(0, "batch-check", &batch, N_("format"),
+ N_("like --batch, but don't emit <contents>"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG,
+ batch_option_callback),
+ OPT_BOOL('z', NULL, &batch.nul_terminated, N_("stdin is NUL-terminated")),
+ OPT_CALLBACK_F(0, "batch-command", &batch, N_("format"),
+ N_("read commands from stdin"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG,
+ batch_option_callback),
+ OPT_CMDMODE(0, "batch-all-objects", &opt,
+ N_("with --batch[-check]: ignores stdin, batches all known objects"), 'b'),
+ /* Batch-specific options */
+ OPT_GROUP(N_("Change or optimize batch output")),
+ OPT_BOOL(0, "buffer", &batch.buffer_output, N_("buffer --batch output")),
+ OPT_BOOL(0, "follow-symlinks", &batch.follow_symlinks,
+ N_("follow in-tree symlinks")),
+ OPT_BOOL(0, "unordered", &batch.unordered,
+ N_("do not order objects before emitting them")),
+ /* Textconv options, stand-ole*/
+ OPT_GROUP(N_("Emit object (blob or tree) with conversion or filter (stand-alone, or with batch)")),
+ OPT_CMDMODE(0, "textconv", &opt,
+ N_("run textconv on object's content"), 'c'),
+ OPT_CMDMODE(0, "filters", &opt,
+ N_("run filters on object's content"), 'w'),
+ OPT_STRING(0, "path", &force_path, N_("blob|tree"),
+ N_("use a <path> for (--textconv | --filters); Not with 'batch'")),
+ OPT_END()
+ };
+
+ git_config(git_cat_file_config, NULL);
+
+ batch.buffer_output = -1;
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ opt_cw = (opt == 'c' || opt == 'w');
+ opt_epts = (opt == 'e' || opt == 'p' || opt == 't' || opt == 's');
+
+ if (use_mailmap)
+ read_mailmap(&mailmap);
+
+ /* --batch-all-objects? */
+ if (opt == 'b')
+ batch.all_objects = 1;
+
+ /* Option compatibility */
+ if (force_path && !opt_cw)
+ usage_msg_optf(_("'%s=<%s>' needs '%s' or '%s'"),
+ usage, options,
+ "--path", _("path|tree-ish"), "--filters",
+ "--textconv");
+
+ /* Option compatibility with batch mode */
+ if (batch.enabled)
+ ;
+ else if (batch.follow_symlinks)
+ usage_msg_optf(_("'%s' requires a batch mode"), usage, options,
+ "--follow-symlinks");
+ else if (batch.buffer_output >= 0)
+ usage_msg_optf(_("'%s' requires a batch mode"), usage, options,
+ "--buffer");
+ else if (batch.all_objects)
+ usage_msg_optf(_("'%s' requires a batch mode"), usage, options,
+ "--batch-all-objects");
+ else if (batch.nul_terminated)
+ usage_msg_optf(_("'%s' requires a batch mode"), usage, options,
+ "-z");
+
+ /* Batch defaults */
+ if (batch.buffer_output < 0)
+ batch.buffer_output = batch.all_objects;
+
+ /* Return early if we're in batch mode? */
+ if (batch.enabled) {
+ if (opt_cw)
+ batch.transform_mode = opt;
+ else if (opt && opt != 'b')
+ usage_msg_optf(_("'-%c' is incompatible with batch mode"),
+ usage, options, opt);
+ else if (argc)
+ usage_msg_opt(_("batch modes take no arguments"), usage,
+ options);
+
+ return batch_objects(&batch);
+ }
+
+ if (opt) {
+ if (!argc && opt == 'c')
+ usage_msg_optf(_("<rev> required with '%s'"),
+ usage, options, "--textconv");
+ else if (!argc && opt == 'w')
+ usage_msg_optf(_("<rev> required with '%s'"),
+ usage, options, "--filters");
+ else if (!argc && opt_epts)
+ usage_msg_optf(_("<object> required with '-%c'"),
+ usage, options, opt);
+ else if (argc == 1)
+ obj_name = argv[0];
+ else
+ usage_msg_opt(_("too many arguments"), usage, options);
+ } else if (!argc) {
+ usage_with_options(usage, options);
+ } else if (argc != 2) {
+ usage_msg_optf(_("only two arguments allowed in <type> <object> mode, not %d"),
+ usage, options, argc);
+ } else if (argc) {
+ exp_type = argv[0];
+ obj_name = argv[1];
+ }
+
+ if (unknown_type && opt != 't' && opt != 's')
+ die("git cat-file --allow-unknown-type: use with -s or -t");
+ return cat_one_file(opt, exp_type, obj_name, unknown_type);
+}
diff --git a/builtin/check-attr.c b/builtin/check-attr.c
new file mode 100644
index 0000000..0fef10e
--- /dev/null
+++ b/builtin/check-attr.c
@@ -0,0 +1,189 @@
+#define USE_THE_INDEX_VARIABLE
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "attr.h"
+#include "quote.h"
+#include "parse-options.h"
+
+static int all_attrs;
+static int cached_attrs;
+static int stdin_paths;
+static const char * const check_attr_usage[] = {
+N_("git check-attr [-a | --all | <attr>...] [--] <pathname>..."),
+N_("git check-attr --stdin [-z] [-a | --all | <attr>...]"),
+NULL
+};
+
+static int nul_term_line;
+
+static const struct option check_attr_options[] = {
+ OPT_BOOL('a', "all", &all_attrs, N_("report all attributes set on file")),
+ OPT_BOOL(0, "cached", &cached_attrs, N_("use .gitattributes only from the index")),
+ OPT_BOOL(0 , "stdin", &stdin_paths, N_("read file names from stdin")),
+ OPT_BOOL('z', NULL, &nul_term_line,
+ N_("terminate input and output records by a NUL character")),
+ OPT_END()
+};
+
+static void output_attr(struct attr_check *check, const char *file)
+{
+ int j;
+ int cnt = check->nr;
+
+ for (j = 0; j < cnt; j++) {
+ const char *value = check->items[j].value;
+
+ if (ATTR_TRUE(value))
+ value = "set";
+ else if (ATTR_FALSE(value))
+ value = "unset";
+ else if (ATTR_UNSET(value))
+ value = "unspecified";
+
+ if (nul_term_line) {
+ printf("%s%c" /* path */
+ "%s%c" /* attrname */
+ "%s%c" /* attrvalue */,
+ file, 0,
+ git_attr_name(check->items[j].attr), 0, value, 0);
+ } else {
+ quote_c_style(file, NULL, stdout, 0);
+ printf(": %s: %s\n",
+ git_attr_name(check->items[j].attr), value);
+ }
+ }
+}
+
+static void check_attr(const char *prefix,
+ struct attr_check *check,
+ int collect_all,
+ const char *file)
+{
+ char *full_path =
+ prefix_path(prefix, prefix ? strlen(prefix) : 0, file);
+
+ if (collect_all) {
+ git_all_attrs(&the_index, full_path, check);
+ } else {
+ git_check_attr(&the_index, full_path, check);
+ }
+ output_attr(check, file);
+
+ free(full_path);
+}
+
+static void check_attr_stdin_paths(const char *prefix,
+ struct attr_check *check,
+ int collect_all)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf unquoted = STRBUF_INIT;
+ strbuf_getline_fn getline_fn;
+
+ getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+ while (getline_fn(&buf, stdin) != EOF) {
+ if (!nul_term_line && buf.buf[0] == '"') {
+ strbuf_reset(&unquoted);
+ if (unquote_c_style(&unquoted, buf.buf, NULL))
+ die("line is badly quoted");
+ strbuf_swap(&buf, &unquoted);
+ }
+ check_attr(prefix, check, collect_all, buf.buf);
+ maybe_flush_or_die(stdout, "attribute to stdout");
+ }
+ strbuf_release(&buf);
+ strbuf_release(&unquoted);
+}
+
+static NORETURN void error_with_usage(const char *msg)
+{
+ error("%s", msg);
+ usage_with_options(check_attr_usage, check_attr_options);
+}
+
+int cmd_check_attr(int argc, const char **argv, const char *prefix)
+{
+ struct attr_check *check;
+ int cnt, i, doubledash, filei;
+
+ if (!is_bare_repository())
+ setup_work_tree();
+
+ git_config(git_default_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, check_attr_options,
+ check_attr_usage, PARSE_OPT_KEEP_DASHDASH);
+
+ if (repo_read_index(the_repository) < 0) {
+ die("invalid cache");
+ }
+
+ if (cached_attrs)
+ git_attr_set_direction(GIT_ATTR_INDEX);
+
+ doubledash = -1;
+ for (i = 0; doubledash < 0 && i < argc; i++) {
+ if (!strcmp(argv[i], "--"))
+ doubledash = i;
+ }
+
+ /* Process --all and/or attribute arguments: */
+ if (all_attrs) {
+ if (doubledash >= 1)
+ error_with_usage("Attributes and --all both specified");
+
+ cnt = 0;
+ filei = doubledash + 1;
+ } else if (doubledash == 0) {
+ error_with_usage("No attribute specified");
+ } else if (doubledash < 0) {
+ if (!argc)
+ error_with_usage("No attribute specified");
+
+ if (stdin_paths) {
+ /* Treat all arguments as attribute names. */
+ cnt = argc;
+ filei = argc;
+ } else {
+ /* Treat exactly one argument as an attribute name. */
+ cnt = 1;
+ filei = 1;
+ }
+ } else {
+ cnt = doubledash;
+ filei = doubledash + 1;
+ }
+
+ /* Check file argument(s): */
+ if (stdin_paths) {
+ if (filei < argc)
+ error_with_usage("Can't specify files with --stdin");
+ } else {
+ if (filei >= argc)
+ error_with_usage("No file specified");
+ }
+
+ check = attr_check_alloc();
+ if (!all_attrs) {
+ for (i = 0; i < cnt; i++) {
+ const struct git_attr *a = git_attr(argv[i]);
+
+ if (!a)
+ return error("%s: not a valid attribute name",
+ argv[i]);
+ attr_check_append(check, a);
+ }
+ }
+
+ if (stdin_paths)
+ check_attr_stdin_paths(prefix, check, all_attrs);
+ else {
+ for (i = filei; i < argc; i++)
+ check_attr(prefix, check, all_attrs, argv[i]);
+ maybe_flush_or_die(stdout, "attribute to stdout");
+ }
+
+ attr_check_free(check);
+ return 0;
+}
diff --git a/builtin/check-ignore.c b/builtin/check-ignore.c
new file mode 100644
index 0000000..ab77606
--- /dev/null
+++ b/builtin/check-ignore.c
@@ -0,0 +1,197 @@
+#define USE_THE_INDEX_VARIABLE
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "dir.h"
+#include "quote.h"
+#include "pathspec.h"
+#include "parse-options.h"
+#include "submodule.h"
+
+static int quiet, verbose, stdin_paths, show_non_matching, no_index;
+static const char * const check_ignore_usage[] = {
+"git check-ignore [<options>] <pathname>...",
+"git check-ignore [<options>] --stdin",
+NULL
+};
+
+static int nul_term_line;
+
+static const struct option check_ignore_options[] = {
+ OPT__QUIET(&quiet, N_("suppress progress reporting")),
+ OPT__VERBOSE(&verbose, N_("be verbose")),
+ OPT_GROUP(""),
+ OPT_BOOL(0, "stdin", &stdin_paths,
+ N_("read file names from stdin")),
+ OPT_BOOL('z', NULL, &nul_term_line,
+ N_("terminate input and output records by a NUL character")),
+ OPT_BOOL('n', "non-matching", &show_non_matching,
+ N_("show non-matching input paths")),
+ OPT_BOOL(0, "no-index", &no_index,
+ N_("ignore index when checking")),
+ OPT_END()
+};
+
+static void output_pattern(const char *path, struct path_pattern *pattern)
+{
+ char *bang = (pattern && pattern->flags & PATTERN_FLAG_NEGATIVE) ? "!" : "";
+ char *slash = (pattern && pattern->flags & PATTERN_FLAG_MUSTBEDIR) ? "/" : "";
+ if (!nul_term_line) {
+ if (!verbose) {
+ write_name_quoted(path, stdout, '\n');
+ } else {
+ if (pattern) {
+ quote_c_style(pattern->pl->src, NULL, stdout, 0);
+ printf(":%d:%s%s%s\t",
+ pattern->srcpos,
+ bang, pattern->pattern, slash);
+ }
+ else {
+ printf("::\t");
+ }
+ quote_c_style(path, NULL, stdout, 0);
+ fputc('\n', stdout);
+ }
+ } else {
+ if (!verbose) {
+ printf("%s%c", path, '\0');
+ } else {
+ if (pattern)
+ printf("%s%c%d%c%s%s%s%c%s%c",
+ pattern->pl->src, '\0',
+ pattern->srcpos, '\0',
+ bang, pattern->pattern, slash, '\0',
+ path, '\0');
+ else
+ printf("%c%c%c%s%c", '\0', '\0', '\0', path, '\0');
+ }
+ }
+}
+
+static int check_ignore(struct dir_struct *dir,
+ const char *prefix, int argc, const char **argv)
+{
+ const char *full_path;
+ char *seen;
+ int num_ignored = 0, i;
+ struct path_pattern *pattern;
+ struct pathspec pathspec;
+
+ if (!argc) {
+ if (!quiet)
+ fprintf(stderr, "no pathspec given.\n");
+ return 0;
+ }
+
+ /*
+ * check-ignore just needs paths. Magic beyond :/ is really
+ * irrelevant.
+ */
+ parse_pathspec(&pathspec,
+ PATHSPEC_ALL_MAGIC & ~PATHSPEC_FROMTOP,
+ PATHSPEC_SYMLINK_LEADING_PATH |
+ PATHSPEC_KEEP_ORDER,
+ prefix, argv);
+
+ die_path_inside_submodule(&the_index, &pathspec);
+
+ /*
+ * look for pathspecs matching entries in the index, since these
+ * should not be ignored, in order to be consistent with
+ * 'git status', 'git add' etc.
+ */
+ seen = find_pathspecs_matching_against_index(&pathspec, &the_index,
+ PS_HEED_SKIP_WORKTREE);
+ for (i = 0; i < pathspec.nr; i++) {
+ full_path = pathspec.items[i].match;
+ pattern = NULL;
+ if (!seen[i]) {
+ int dtype = DT_UNKNOWN;
+ pattern = last_matching_pattern(dir, &the_index,
+ full_path, &dtype);
+ if (!verbose && pattern &&
+ pattern->flags & PATTERN_FLAG_NEGATIVE)
+ pattern = NULL;
+ }
+ if (!quiet && (pattern || show_non_matching))
+ output_pattern(pathspec.items[i].original, pattern);
+ if (pattern)
+ num_ignored++;
+ }
+ free(seen);
+ clear_pathspec(&pathspec);
+
+ return num_ignored;
+}
+
+static int check_ignore_stdin_paths(struct dir_struct *dir, const char *prefix)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf unquoted = STRBUF_INIT;
+ char *pathspec[2] = { NULL, NULL };
+ strbuf_getline_fn getline_fn;
+ int num_ignored = 0;
+
+ getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+ while (getline_fn(&buf, stdin) != EOF) {
+ if (!nul_term_line && buf.buf[0] == '"') {
+ strbuf_reset(&unquoted);
+ if (unquote_c_style(&unquoted, buf.buf, NULL))
+ die("line is badly quoted");
+ strbuf_swap(&buf, &unquoted);
+ }
+ pathspec[0] = buf.buf;
+ num_ignored += check_ignore(dir, prefix,
+ 1, (const char **)pathspec);
+ maybe_flush_or_die(stdout, "check-ignore to stdout");
+ }
+ strbuf_release(&buf);
+ strbuf_release(&unquoted);
+ return num_ignored;
+}
+
+int cmd_check_ignore(int argc, const char **argv, const char *prefix)
+{
+ int num_ignored;
+ struct dir_struct dir = DIR_INIT;
+
+ git_config(git_default_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, check_ignore_options,
+ check_ignore_usage, 0);
+
+ if (stdin_paths) {
+ if (argc > 0)
+ die(_("cannot specify pathnames with --stdin"));
+ } else {
+ if (nul_term_line)
+ die(_("-z only makes sense with --stdin"));
+ if (argc == 0)
+ die(_("no path specified"));
+ }
+ if (quiet) {
+ if (argc > 1)
+ die(_("--quiet is only valid with a single pathname"));
+ if (verbose)
+ die(_("cannot have both --quiet and --verbose"));
+ }
+ if (show_non_matching && !verbose)
+ die(_("--non-matching is only valid with --verbose"));
+
+ /* read_cache() is only necessary so we can watch out for submodules. */
+ if (!no_index && repo_read_index(the_repository) < 0)
+ die(_("index file corrupt"));
+
+ setup_standard_excludes(&dir);
+
+ if (stdin_paths) {
+ num_ignored = check_ignore_stdin_paths(&dir, prefix);
+ } else {
+ num_ignored = check_ignore(&dir, prefix, argc, argv);
+ maybe_flush_or_die(stdout, "ignore to stdout");
+ }
+
+ dir_clear(&dir);
+
+ return !num_ignored;
+}
diff --git a/builtin/check-mailmap.c b/builtin/check-mailmap.c
new file mode 100644
index 0000000..7dc47e4
--- /dev/null
+++ b/builtin/check-mailmap.c
@@ -0,0 +1,67 @@
+#include "builtin.h"
+#include "config.h"
+#include "mailmap.h"
+#include "parse-options.h"
+#include "string-list.h"
+
+static int use_stdin;
+static const char * const check_mailmap_usage[] = {
+N_("git check-mailmap [<options>] <contact>..."),
+NULL
+};
+
+static const struct option check_mailmap_options[] = {
+ OPT_BOOL(0, "stdin", &use_stdin, N_("also read contacts from stdin")),
+ OPT_END()
+};
+
+static void check_mailmap(struct string_list *mailmap, const char *contact)
+{
+ const char *name, *mail;
+ size_t namelen, maillen;
+ struct ident_split ident;
+
+ if (split_ident_line(&ident, contact, strlen(contact)))
+ die(_("unable to parse contact: %s"), contact);
+
+ name = ident.name_begin;
+ namelen = ident.name_end - ident.name_begin;
+ mail = ident.mail_begin;
+ maillen = ident.mail_end - ident.mail_begin;
+
+ map_user(mailmap, &mail, &maillen, &name, &namelen);
+
+ if (namelen)
+ printf("%.*s ", (int)namelen, name);
+ printf("<%.*s>\n", (int)maillen, mail);
+}
+
+int cmd_check_mailmap(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ struct string_list mailmap = STRING_LIST_INIT_NODUP;
+
+ git_config(git_default_config, NULL);
+ argc = parse_options(argc, argv, prefix, check_mailmap_options,
+ check_mailmap_usage, 0);
+ if (argc == 0 && !use_stdin)
+ die(_("no contacts specified"));
+
+ read_mailmap(&mailmap);
+
+ for (i = 0; i < argc; ++i)
+ check_mailmap(&mailmap, argv[i]);
+ maybe_flush_or_die(stdout, "stdout");
+
+ if (use_stdin) {
+ struct strbuf buf = STRBUF_INIT;
+ while (strbuf_getline_lf(&buf, stdin) != EOF) {
+ check_mailmap(&mailmap, buf.buf);
+ maybe_flush_or_die(stdout, "stdout");
+ }
+ strbuf_release(&buf);
+ }
+
+ clear_mailmap(&mailmap);
+ return 0;
+}
diff --git a/builtin/check-ref-format.c b/builtin/check-ref-format.c
new file mode 100644
index 0000000..fd0e5f8
--- /dev/null
+++ b/builtin/check-ref-format.c
@@ -0,0 +1,96 @@
+/*
+ * GIT - The information manager from hell
+ */
+
+#include "cache.h"
+#include "refs.h"
+#include "builtin.h"
+#include "strbuf.h"
+
+static const char builtin_check_ref_format_usage[] =
+"git check-ref-format [--normalize] [<options>] <refname>\n"
+" or: git check-ref-format --branch <branchname-shorthand>";
+
+/*
+ * Return a copy of refname but with leading slashes removed and runs
+ * of adjacent slashes replaced with single slashes.
+ *
+ * This function is similar to normalize_path_copy(), but stripped down
+ * to meet check_ref_format's simpler needs.
+ */
+static char *collapse_slashes(const char *refname)
+{
+ char *ret = xmallocz(strlen(refname));
+ char ch;
+ char prev = '/';
+ char *cp = ret;
+
+ while ((ch = *refname++) != '\0') {
+ if (prev == '/' && ch == prev)
+ continue;
+
+ *cp++ = ch;
+ prev = ch;
+ }
+ *cp = '\0';
+ return ret;
+}
+
+static int check_ref_format_branch(const char *arg)
+{
+ struct strbuf sb = STRBUF_INIT;
+ const char *name;
+ int nongit;
+
+ setup_git_directory_gently(&nongit);
+ if (strbuf_check_branch_ref(&sb, arg) ||
+ !skip_prefix(sb.buf, "refs/heads/", &name))
+ die("'%s' is not a valid branch name", arg);
+ printf("%s\n", name);
+ strbuf_release(&sb);
+ return 0;
+}
+
+int cmd_check_ref_format(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ int normalize = 0;
+ int flags = 0;
+ const char *refname;
+ char *to_free = NULL;
+ int ret = 1;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(builtin_check_ref_format_usage);
+
+ if (argc == 3 && !strcmp(argv[1], "--branch"))
+ return check_ref_format_branch(argv[2]);
+
+ for (i = 1; i < argc && argv[i][0] == '-'; i++) {
+ if (!strcmp(argv[i], "--normalize") || !strcmp(argv[i], "--print"))
+ normalize = 1;
+ else if (!strcmp(argv[i], "--allow-onelevel"))
+ flags |= REFNAME_ALLOW_ONELEVEL;
+ else if (!strcmp(argv[i], "--no-allow-onelevel"))
+ flags &= ~REFNAME_ALLOW_ONELEVEL;
+ else if (!strcmp(argv[i], "--refspec-pattern"))
+ flags |= REFNAME_REFSPEC_PATTERN;
+ else
+ usage(builtin_check_ref_format_usage);
+ }
+ if (! (i == argc - 1))
+ usage(builtin_check_ref_format_usage);
+
+ refname = argv[i];
+ if (normalize)
+ refname = to_free = collapse_slashes(refname);
+ if (check_refname_format(refname, flags))
+ goto cleanup;
+ if (normalize)
+ printf("%s\n", refname);
+
+ ret = 0;
+cleanup:
+ free(to_free);
+ return ret;
+}
diff --git a/builtin/checkout--worker.c b/builtin/checkout--worker.c
new file mode 100644
index 0000000..ede7dc3
--- /dev/null
+++ b/builtin/checkout--worker.c
@@ -0,0 +1,145 @@
+#include "builtin.h"
+#include "config.h"
+#include "entry.h"
+#include "parallel-checkout.h"
+#include "parse-options.h"
+#include "pkt-line.h"
+
+static void packet_to_pc_item(const char *buffer, int len,
+ struct parallel_checkout_item *pc_item)
+{
+ const struct pc_item_fixed_portion *fixed_portion;
+ const char *variant;
+ char *encoding;
+
+ if (len < sizeof(struct pc_item_fixed_portion))
+ BUG("checkout worker received too short item (got %dB, exp %dB)",
+ len, (int)sizeof(struct pc_item_fixed_portion));
+
+ fixed_portion = (struct pc_item_fixed_portion *)buffer;
+
+ if (len - sizeof(struct pc_item_fixed_portion) !=
+ fixed_portion->name_len + fixed_portion->working_tree_encoding_len)
+ BUG("checkout worker received corrupted item");
+
+ variant = buffer + sizeof(struct pc_item_fixed_portion);
+
+ /*
+ * Note: the main process uses zero length to communicate that the
+ * encoding is NULL. There is no use case that requires sending an
+ * actual empty string, since convert_attrs() never sets
+ * ca.working_tree_enconding to "".
+ */
+ if (fixed_portion->working_tree_encoding_len) {
+ encoding = xmemdupz(variant,
+ fixed_portion->working_tree_encoding_len);
+ variant += fixed_portion->working_tree_encoding_len;
+ } else {
+ encoding = NULL;
+ }
+
+ memset(pc_item, 0, sizeof(*pc_item));
+ pc_item->ce = make_empty_transient_cache_entry(fixed_portion->name_len, NULL);
+ pc_item->ce->ce_namelen = fixed_portion->name_len;
+ pc_item->ce->ce_mode = fixed_portion->ce_mode;
+ memcpy(pc_item->ce->name, variant, pc_item->ce->ce_namelen);
+ oidcpy(&pc_item->ce->oid, &fixed_portion->oid);
+
+ pc_item->id = fixed_portion->id;
+ pc_item->ca.crlf_action = fixed_portion->crlf_action;
+ pc_item->ca.ident = fixed_portion->ident;
+ pc_item->ca.working_tree_encoding = encoding;
+}
+
+static void report_result(struct parallel_checkout_item *pc_item)
+{
+ struct pc_item_result res = { 0 };
+ size_t size;
+
+ res.id = pc_item->id;
+ res.status = pc_item->status;
+
+ if (pc_item->status == PC_ITEM_WRITTEN) {
+ res.st = pc_item->st;
+ size = sizeof(res);
+ } else {
+ size = PC_ITEM_RESULT_BASE_SIZE;
+ }
+
+ packet_write(1, (const char *)&res, size);
+}
+
+/* Free the worker-side malloced data, but not pc_item itself. */
+static void release_pc_item_data(struct parallel_checkout_item *pc_item)
+{
+ free((char *)pc_item->ca.working_tree_encoding);
+ discard_cache_entry(pc_item->ce);
+}
+
+static void worker_loop(struct checkout *state)
+{
+ struct parallel_checkout_item *items = NULL;
+ size_t i, nr = 0, alloc = 0;
+
+ while (1) {
+ int len = packet_read(0, packet_buffer, sizeof(packet_buffer),
+ 0);
+
+ if (len < 0)
+ BUG("packet_read() returned negative value");
+ else if (!len)
+ break;
+
+ ALLOC_GROW(items, nr + 1, alloc);
+ packet_to_pc_item(packet_buffer, len, &items[nr++]);
+ }
+
+ for (i = 0; i < nr; i++) {
+ struct parallel_checkout_item *pc_item = &items[i];
+ write_pc_item(pc_item, state);
+ report_result(pc_item);
+ release_pc_item_data(pc_item);
+ }
+
+ packet_flush(1);
+
+ free(items);
+}
+
+static const char * const checkout_worker_usage[] = {
+ N_("git checkout--worker [<options>]"),
+ NULL
+};
+
+int cmd_checkout__worker(int argc, const char **argv, const char *prefix)
+{
+ struct checkout state = CHECKOUT_INIT;
+ struct option checkout_worker_options[] = {
+ OPT_STRING(0, "prefix", &state.base_dir, N_("string"),
+ N_("when creating files, prepend <string>")),
+ OPT_END()
+ };
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(checkout_worker_usage,
+ checkout_worker_options);
+
+ git_config(git_default_config, NULL);
+ argc = parse_options(argc, argv, prefix, checkout_worker_options,
+ checkout_worker_usage, 0);
+ if (argc > 0)
+ usage_with_options(checkout_worker_usage, checkout_worker_options);
+
+ if (state.base_dir)
+ state.base_dir_len = strlen(state.base_dir);
+
+ /*
+ * Setting this on a worker won't actually update the index. We just
+ * need to tell the checkout machinery to lstat() the written entries,
+ * so that we can send this data back to the main process.
+ */
+ state.refresh_cache = 1;
+
+ worker_loop(&state);
+ return 0;
+}
diff --git a/builtin/checkout-index.c b/builtin/checkout-index.c
new file mode 100644
index 0000000..cf6fba9
--- /dev/null
+++ b/builtin/checkout-index.c
@@ -0,0 +1,334 @@
+/*
+ * Check-out files from the "current cache directory"
+ *
+ * Copyright (C) 2005 Linus Torvalds
+ *
+ */
+#define USE_THE_INDEX_VARIABLE
+#include "builtin.h"
+#include "config.h"
+#include "dir.h"
+#include "lockfile.h"
+#include "quote.h"
+#include "cache-tree.h"
+#include "parse-options.h"
+#include "entry.h"
+#include "parallel-checkout.h"
+
+#define CHECKOUT_ALL 4
+static int nul_term_line;
+static int checkout_stage; /* default to checkout stage0 */
+static int ignore_skip_worktree; /* default to 0 */
+static int to_tempfile;
+static char topath[4][TEMPORARY_FILENAME_LENGTH + 1];
+
+static struct checkout state = CHECKOUT_INIT;
+
+static void write_tempfile_record(const char *name, const char *prefix)
+{
+ int i;
+ int have_tempname = 0;
+
+ if (CHECKOUT_ALL == checkout_stage) {
+ for (i = 1; i < 4; i++)
+ if (topath[i][0]) {
+ have_tempname = 1;
+ break;
+ }
+
+ if (have_tempname) {
+ for (i = 1; i < 4; i++) {
+ if (i > 1)
+ putchar(' ');
+ if (topath[i][0])
+ fputs(topath[i], stdout);
+ else
+ putchar('.');
+ }
+ }
+ } else if (topath[checkout_stage][0]) {
+ have_tempname = 1;
+ fputs(topath[checkout_stage], stdout);
+ }
+
+ if (have_tempname) {
+ putchar('\t');
+ write_name_quoted_relative(name, prefix, stdout,
+ nul_term_line ? '\0' : '\n');
+ }
+
+ for (i = 0; i < 4; i++) {
+ topath[i][0] = 0;
+ }
+}
+
+static int checkout_file(const char *name, const char *prefix)
+{
+ int namelen = strlen(name);
+ int pos = index_name_pos(&the_index, name, namelen);
+ int has_same_name = 0;
+ int is_file = 0;
+ int is_skipped = 1;
+ int did_checkout = 0;
+ int errs = 0;
+
+ if (pos < 0)
+ pos = -pos - 1;
+
+ while (pos < the_index.cache_nr) {
+ struct cache_entry *ce = the_index.cache[pos];
+ if (ce_namelen(ce) != namelen ||
+ memcmp(ce->name, name, namelen))
+ break;
+ has_same_name = 1;
+ pos++;
+ if (S_ISSPARSEDIR(ce->ce_mode))
+ break;
+ is_file = 1;
+ if (!ignore_skip_worktree && ce_skip_worktree(ce))
+ break;
+ is_skipped = 0;
+ if (ce_stage(ce) != checkout_stage
+ && (CHECKOUT_ALL != checkout_stage || !ce_stage(ce)))
+ continue;
+ did_checkout = 1;
+ if (checkout_entry(ce, &state,
+ to_tempfile ? topath[ce_stage(ce)] : NULL,
+ NULL) < 0)
+ errs++;
+ }
+
+ if (did_checkout) {
+ if (to_tempfile)
+ write_tempfile_record(name, prefix);
+ return errs > 0 ? -1 : 0;
+ }
+
+ /*
+ * At this point we know we didn't try to check anything out. If it was
+ * because we did find an entry but it was stage 0, that's not an
+ * error.
+ */
+ if (has_same_name && checkout_stage == CHECKOUT_ALL)
+ return 0;
+
+ if (!state.quiet) {
+ fprintf(stderr, "git checkout-index: %s ", name);
+ if (!has_same_name)
+ fprintf(stderr, "is not in the cache");
+ else if (!is_file)
+ fprintf(stderr, "is a sparse directory");
+ else if (is_skipped)
+ fprintf(stderr, "has skip-worktree enabled; "
+ "use '--ignore-skip-worktree-bits' to checkout");
+ else if (checkout_stage)
+ fprintf(stderr, "does not exist at stage %d",
+ checkout_stage);
+ else
+ fprintf(stderr, "is unmerged");
+ fputc('\n', stderr);
+ }
+ return -1;
+}
+
+static int checkout_all(const char *prefix, int prefix_length)
+{
+ int i, errs = 0;
+ struct cache_entry *last_ce = NULL;
+
+ for (i = 0; i < the_index.cache_nr ; i++) {
+ struct cache_entry *ce = the_index.cache[i];
+
+ if (S_ISSPARSEDIR(ce->ce_mode)) {
+ if (!ce_skip_worktree(ce))
+ BUG("sparse directory '%s' does not have skip-worktree set", ce->name);
+
+ /*
+ * If the current entry is a sparse directory and skip-worktree
+ * entries are being checked out, expand the index and continue
+ * the loop on the current index position (now pointing to the
+ * first entry inside the expanded sparse directory).
+ */
+ if (ignore_skip_worktree) {
+ ensure_full_index(&the_index);
+ ce = the_index.cache[i];
+ }
+ }
+
+ if (!ignore_skip_worktree && ce_skip_worktree(ce))
+ continue;
+ if (ce_stage(ce) != checkout_stage
+ && (CHECKOUT_ALL != checkout_stage || !ce_stage(ce)))
+ continue;
+ if (prefix && *prefix &&
+ (ce_namelen(ce) <= prefix_length ||
+ memcmp(prefix, ce->name, prefix_length)))
+ continue;
+ if (last_ce && to_tempfile) {
+ if (ce_namelen(last_ce) != ce_namelen(ce)
+ || memcmp(last_ce->name, ce->name, ce_namelen(ce)))
+ write_tempfile_record(last_ce->name, prefix);
+ }
+ if (checkout_entry(ce, &state,
+ to_tempfile ? topath[ce_stage(ce)] : NULL,
+ NULL) < 0)
+ errs++;
+ last_ce = ce;
+ }
+ if (last_ce && to_tempfile)
+ write_tempfile_record(last_ce->name, prefix);
+ return !!errs;
+}
+
+static const char * const builtin_checkout_index_usage[] = {
+ N_("git checkout-index [<options>] [--] [<file>...]"),
+ NULL
+};
+
+static int option_parse_stage(const struct option *opt,
+ const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+
+ if (!strcmp(arg, "all")) {
+ to_tempfile = 1;
+ checkout_stage = CHECKOUT_ALL;
+ } else {
+ int ch = arg[0];
+ if ('1' <= ch && ch <= '3')
+ checkout_stage = arg[0] - '0';
+ else
+ die(_("stage should be between 1 and 3 or all"));
+ }
+ return 0;
+}
+
+int cmd_checkout_index(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ struct lock_file lock_file = LOCK_INIT;
+ int all = 0;
+ int read_from_stdin = 0;
+ int prefix_length;
+ int force = 0, quiet = 0, not_new = 0;
+ int index_opt = 0;
+ int err = 0;
+ int pc_workers, pc_threshold;
+ struct option builtin_checkout_index_options[] = {
+ OPT_BOOL('a', "all", &all,
+ N_("check out all files in the index")),
+ OPT_BOOL(0, "ignore-skip-worktree-bits", &ignore_skip_worktree,
+ N_("do not skip files with skip-worktree set")),
+ OPT__FORCE(&force, N_("force overwrite of existing files"), 0),
+ OPT__QUIET(&quiet,
+ N_("no warning for existing files and files not in index")),
+ OPT_BOOL('n', "no-create", &not_new,
+ N_("don't checkout new files")),
+ OPT_BOOL('u', "index", &index_opt,
+ N_("update stat information in the index file")),
+ OPT_BOOL('z', NULL, &nul_term_line,
+ N_("paths are separated with NUL character")),
+ OPT_BOOL(0, "stdin", &read_from_stdin,
+ N_("read list of paths from the standard input")),
+ OPT_BOOL(0, "temp", &to_tempfile,
+ N_("write the content to temporary files")),
+ OPT_STRING(0, "prefix", &state.base_dir, N_("string"),
+ N_("when creating files, prepend <string>")),
+ OPT_CALLBACK_F(0, "stage", NULL, "(1|2|3|all)",
+ N_("copy out the files from named stage"),
+ PARSE_OPT_NONEG, option_parse_stage),
+ OPT_END()
+ };
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_checkout_index_usage,
+ builtin_checkout_index_options);
+ git_config(git_default_config, NULL);
+ prefix_length = prefix ? strlen(prefix) : 0;
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ if (repo_read_index(the_repository) < 0) {
+ die("invalid cache");
+ }
+
+ argc = parse_options(argc, argv, prefix, builtin_checkout_index_options,
+ builtin_checkout_index_usage, 0);
+ state.istate = &the_index;
+ state.force = force;
+ state.quiet = quiet;
+ state.not_new = not_new;
+
+ if (!state.base_dir)
+ state.base_dir = "";
+ state.base_dir_len = strlen(state.base_dir);
+
+ /*
+ * when --prefix is specified we do not want to update cache.
+ */
+ if (index_opt && !state.base_dir_len && !to_tempfile) {
+ state.refresh_cache = 1;
+ state.istate = &the_index;
+ repo_hold_locked_index(the_repository, &lock_file,
+ LOCK_DIE_ON_ERROR);
+ }
+
+ get_parallel_checkout_configs(&pc_workers, &pc_threshold);
+ if (pc_workers > 1)
+ init_parallel_checkout();
+
+ /* Check out named files first */
+ for (i = 0; i < argc; i++) {
+ const char *arg = argv[i];
+ char *p;
+
+ if (all)
+ die("git checkout-index: don't mix '--all' and explicit filenames");
+ if (read_from_stdin)
+ die("git checkout-index: don't mix '--stdin' and explicit filenames");
+ p = prefix_path(prefix, prefix_length, arg);
+ err |= checkout_file(p, prefix);
+ free(p);
+ }
+
+ if (read_from_stdin) {
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf unquoted = STRBUF_INIT;
+ strbuf_getline_fn getline_fn;
+
+ if (all)
+ die("git checkout-index: don't mix '--all' and '--stdin'");
+
+ getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+ while (getline_fn(&buf, stdin) != EOF) {
+ char *p;
+ if (!nul_term_line && buf.buf[0] == '"') {
+ strbuf_reset(&unquoted);
+ if (unquote_c_style(&unquoted, buf.buf, NULL))
+ die("line is badly quoted");
+ strbuf_swap(&buf, &unquoted);
+ }
+ p = prefix_path(prefix, prefix_length, buf.buf);
+ err |= checkout_file(p, prefix);
+ free(p);
+ }
+ strbuf_release(&unquoted);
+ strbuf_release(&buf);
+ }
+
+ if (all)
+ err |= checkout_all(prefix, prefix_length);
+
+ if (pc_workers > 1)
+ err |= run_parallel_checkout(&state, pc_workers, pc_threshold,
+ NULL, NULL);
+
+ if (err)
+ return 1;
+
+ if (is_lock_file_locked(&lock_file) &&
+ write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+ die("Unable to write new index file");
+ return 0;
+}
diff --git a/builtin/checkout.c b/builtin/checkout.c
new file mode 100644
index 0000000..3fa29a0
--- /dev/null
+++ b/builtin/checkout.c
@@ -0,0 +1,1958 @@
+#define USE_THE_INDEX_VARIABLE
+#include "builtin.h"
+#include "advice.h"
+#include "blob.h"
+#include "branch.h"
+#include "cache-tree.h"
+#include "checkout.h"
+#include "commit.h"
+#include "config.h"
+#include "diff.h"
+#include "dir.h"
+#include "hook.h"
+#include "ll-merge.h"
+#include "lockfile.h"
+#include "merge-recursive.h"
+#include "object-store.h"
+#include "parse-options.h"
+#include "refs.h"
+#include "remote.h"
+#include "resolve-undo.h"
+#include "revision.h"
+#include "run-command.h"
+#include "submodule.h"
+#include "submodule-config.h"
+#include "tree.h"
+#include "tree-walk.h"
+#include "unpack-trees.h"
+#include "wt-status.h"
+#include "xdiff-interface.h"
+#include "entry.h"
+#include "parallel-checkout.h"
+
+static const char * const checkout_usage[] = {
+ N_("git checkout [<options>] <branch>"),
+ N_("git checkout [<options>] [<branch>] -- <file>..."),
+ NULL,
+};
+
+static const char * const switch_branch_usage[] = {
+ N_("git switch [<options>] [<branch>]"),
+ NULL,
+};
+
+static const char * const restore_usage[] = {
+ N_("git restore [<options>] [--source=<branch>] <file>..."),
+ NULL,
+};
+
+struct checkout_opts {
+ int patch_mode;
+ int quiet;
+ int merge;
+ int force;
+ int force_detach;
+ int implicit_detach;
+ int writeout_stage;
+ int overwrite_ignore;
+ int ignore_skipworktree;
+ int ignore_other_worktrees;
+ int show_progress;
+ int count_checkout_paths;
+ int overlay_mode;
+ int dwim_new_local_branch;
+ int discard_changes;
+ int accept_ref;
+ int accept_pathspec;
+ int switch_branch_doing_nothing_is_ok;
+ int only_merge_on_switching_branches;
+ int can_switch_when_in_progress;
+ int orphan_from_empty_tree;
+ int empty_pathspec_ok;
+ int checkout_index;
+ int checkout_worktree;
+ const char *ignore_unmerged_opt;
+ int ignore_unmerged;
+ int pathspec_file_nul;
+ const char *pathspec_from_file;
+
+ const char *new_branch;
+ const char *new_branch_force;
+ const char *new_orphan_branch;
+ int new_branch_log;
+ enum branch_track track;
+ struct diff_options diff_options;
+ char *conflict_style;
+
+ int branch_exists;
+ const char *prefix;
+ struct pathspec pathspec;
+ const char *from_treeish;
+ struct tree *source_tree;
+};
+
+struct branch_info {
+ char *name; /* The short name used */
+ char *path; /* The full name of a real branch */
+ struct commit *commit; /* The named commit */
+ char *refname; /* The full name of the ref being checked out. */
+ struct object_id oid; /* The object ID of the commit being checked out. */
+ /*
+ * if not null the branch is detached because it's already
+ * checked out in this checkout
+ */
+ char *checkout;
+};
+
+static void branch_info_release(struct branch_info *info)
+{
+ free(info->name);
+ free(info->path);
+ free(info->refname);
+ free(info->checkout);
+}
+
+static int post_checkout_hook(struct commit *old_commit, struct commit *new_commit,
+ int changed)
+{
+ return run_hooks_l("post-checkout",
+ oid_to_hex(old_commit ? &old_commit->object.oid : null_oid()),
+ oid_to_hex(new_commit ? &new_commit->object.oid : null_oid()),
+ changed ? "1" : "0", NULL);
+ /* "new_commit" can be NULL when checking out from the index before
+ a commit exists. */
+
+}
+
+static int update_some(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context UNUSED)
+{
+ int len;
+ struct cache_entry *ce;
+ int pos;
+
+ if (S_ISDIR(mode))
+ return READ_TREE_RECURSIVE;
+
+ len = base->len + strlen(pathname);
+ ce = make_empty_cache_entry(&the_index, len);
+ oidcpy(&ce->oid, oid);
+ memcpy(ce->name, base->buf, base->len);
+ memcpy(ce->name + base->len, pathname, len - base->len);
+ ce->ce_flags = create_ce_flags(0) | CE_UPDATE;
+ ce->ce_namelen = len;
+ ce->ce_mode = create_ce_mode(mode);
+
+ /*
+ * If the entry is the same as the current index, we can leave the old
+ * entry in place. Whether it is UPTODATE or not, checkout_entry will
+ * do the right thing.
+ */
+ pos = index_name_pos(&the_index, ce->name, ce->ce_namelen);
+ if (pos >= 0) {
+ struct cache_entry *old = the_index.cache[pos];
+ if (ce->ce_mode == old->ce_mode &&
+ !ce_intent_to_add(old) &&
+ oideq(&ce->oid, &old->oid)) {
+ old->ce_flags |= CE_UPDATE;
+ discard_cache_entry(ce);
+ return 0;
+ }
+ }
+
+ add_index_entry(&the_index, ce,
+ ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
+ return 0;
+}
+
+static int read_tree_some(struct tree *tree, const struct pathspec *pathspec)
+{
+ read_tree(the_repository, tree,
+ pathspec, update_some, NULL);
+
+ /* update the index with the given tree's info
+ * for all args, expanding wildcards, and exit
+ * with any non-zero return code.
+ */
+ return 0;
+}
+
+static int skip_same_name(const struct cache_entry *ce, int pos)
+{
+ while (++pos < the_index.cache_nr &&
+ !strcmp(the_index.cache[pos]->name, ce->name))
+ ; /* skip */
+ return pos;
+}
+
+static int check_stage(int stage, const struct cache_entry *ce, int pos,
+ int overlay_mode)
+{
+ while (pos < the_index.cache_nr &&
+ !strcmp(the_index.cache[pos]->name, ce->name)) {
+ if (ce_stage(the_index.cache[pos]) == stage)
+ return 0;
+ pos++;
+ }
+ if (!overlay_mode)
+ return 0;
+ if (stage == 2)
+ return error(_("path '%s' does not have our version"), ce->name);
+ else
+ return error(_("path '%s' does not have their version"), ce->name);
+}
+
+static int check_stages(unsigned stages, const struct cache_entry *ce, int pos)
+{
+ unsigned seen = 0;
+ const char *name = ce->name;
+
+ while (pos < the_index.cache_nr) {
+ ce = the_index.cache[pos];
+ if (strcmp(name, ce->name))
+ break;
+ seen |= (1 << ce_stage(ce));
+ pos++;
+ }
+ if ((stages & seen) != stages)
+ return error(_("path '%s' does not have all necessary versions"),
+ name);
+ return 0;
+}
+
+static int checkout_stage(int stage, const struct cache_entry *ce, int pos,
+ const struct checkout *state, int *nr_checkouts,
+ int overlay_mode)
+{
+ while (pos < the_index.cache_nr &&
+ !strcmp(the_index.cache[pos]->name, ce->name)) {
+ if (ce_stage(the_index.cache[pos]) == stage)
+ return checkout_entry(the_index.cache[pos], state,
+ NULL, nr_checkouts);
+ pos++;
+ }
+ if (!overlay_mode) {
+ unlink_entry(ce);
+ return 0;
+ }
+ if (stage == 2)
+ return error(_("path '%s' does not have our version"), ce->name);
+ else
+ return error(_("path '%s' does not have their version"), ce->name);
+}
+
+static int checkout_merged(int pos, const struct checkout *state,
+ int *nr_checkouts, struct mem_pool *ce_mem_pool)
+{
+ struct cache_entry *ce = the_index.cache[pos];
+ const char *path = ce->name;
+ mmfile_t ancestor, ours, theirs;
+ enum ll_merge_result merge_status;
+ int status;
+ struct object_id oid;
+ mmbuffer_t result_buf;
+ struct object_id threeway[3];
+ unsigned mode = 0;
+ struct ll_merge_options ll_opts;
+ int renormalize = 0;
+
+ memset(threeway, 0, sizeof(threeway));
+ while (pos < the_index.cache_nr) {
+ int stage;
+ stage = ce_stage(ce);
+ if (!stage || strcmp(path, ce->name))
+ break;
+ oidcpy(&threeway[stage - 1], &ce->oid);
+ if (stage == 2)
+ mode = create_ce_mode(ce->ce_mode);
+ pos++;
+ ce = the_index.cache[pos];
+ }
+ if (is_null_oid(&threeway[1]) || is_null_oid(&threeway[2]))
+ return error(_("path '%s' does not have necessary versions"), path);
+
+ read_mmblob(&ancestor, &threeway[0]);
+ read_mmblob(&ours, &threeway[1]);
+ read_mmblob(&theirs, &threeway[2]);
+
+ memset(&ll_opts, 0, sizeof(ll_opts));
+ git_config_get_bool("merge.renormalize", &renormalize);
+ ll_opts.renormalize = renormalize;
+ merge_status = ll_merge(&result_buf, path, &ancestor, "base",
+ &ours, "ours", &theirs, "theirs",
+ state->istate, &ll_opts);
+ free(ancestor.ptr);
+ free(ours.ptr);
+ free(theirs.ptr);
+ if (merge_status == LL_MERGE_BINARY_CONFLICT)
+ warning("Cannot merge binary files: %s (%s vs. %s)",
+ path, "ours", "theirs");
+ if (merge_status < 0 || !result_buf.ptr) {
+ free(result_buf.ptr);
+ return error(_("path '%s': cannot merge"), path);
+ }
+
+ /*
+ * NEEDSWORK:
+ * There is absolutely no reason to write this as a blob object
+ * and create a phony cache entry. This hack is primarily to get
+ * to the write_entry() machinery that massages the contents to
+ * work-tree format and writes out which only allows it for a
+ * cache entry. The code in write_entry() needs to be refactored
+ * to allow us to feed a <buffer, size, mode> instead of a cache
+ * entry. Such a refactoring would help merge_recursive as well
+ * (it also writes the merge result to the object database even
+ * when it may contain conflicts).
+ */
+ if (write_object_file(result_buf.ptr, result_buf.size, OBJ_BLOB, &oid))
+ die(_("Unable to add merge result for '%s'"), path);
+ free(result_buf.ptr);
+ ce = make_transient_cache_entry(mode, &oid, path, 2, ce_mem_pool);
+ if (!ce)
+ die(_("make_cache_entry failed for path '%s'"), path);
+ status = checkout_entry(ce, state, NULL, nr_checkouts);
+ return status;
+}
+
+static void mark_ce_for_checkout_overlay(struct cache_entry *ce,
+ char *ps_matched,
+ const struct checkout_opts *opts)
+{
+ ce->ce_flags &= ~CE_MATCHED;
+ if (!opts->ignore_skipworktree && ce_skip_worktree(ce))
+ return;
+ if (opts->source_tree && !(ce->ce_flags & CE_UPDATE))
+ /*
+ * "git checkout tree-ish -- path", but this entry
+ * is in the original index but is not in tree-ish
+ * or does not match the pathspec; it will not be
+ * checked out to the working tree. We will not do
+ * anything to this entry at all.
+ */
+ return;
+ /*
+ * Either this entry came from the tree-ish we are
+ * checking the paths out of, or we are checking out
+ * of the index.
+ *
+ * If it comes from the tree-ish, we already know it
+ * matches the pathspec and could just stamp
+ * CE_MATCHED to it from update_some(). But we still
+ * need ps_matched and read_tree (and
+ * eventually tree_entry_interesting) cannot fill
+ * ps_matched yet. Once it can, we can avoid calling
+ * match_pathspec() for _all_ entries when
+ * opts->source_tree != NULL.
+ */
+ if (ce_path_match(&the_index, ce, &opts->pathspec, ps_matched))
+ ce->ce_flags |= CE_MATCHED;
+}
+
+static void mark_ce_for_checkout_no_overlay(struct cache_entry *ce,
+ char *ps_matched,
+ const struct checkout_opts *opts)
+{
+ ce->ce_flags &= ~CE_MATCHED;
+ if (!opts->ignore_skipworktree && ce_skip_worktree(ce))
+ return;
+ if (ce_path_match(&the_index, ce, &opts->pathspec, ps_matched)) {
+ ce->ce_flags |= CE_MATCHED;
+ if (opts->source_tree && !(ce->ce_flags & CE_UPDATE))
+ /*
+ * In overlay mode, but the path is not in
+ * tree-ish, which means we should remove it
+ * from the index and the working tree.
+ */
+ ce->ce_flags |= CE_REMOVE | CE_WT_REMOVE;
+ }
+}
+
+static int checkout_worktree(const struct checkout_opts *opts,
+ const struct branch_info *info)
+{
+ struct checkout state = CHECKOUT_INIT;
+ int nr_checkouts = 0, nr_unmerged = 0;
+ int errs = 0;
+ int pos;
+ int pc_workers, pc_threshold;
+ struct mem_pool ce_mem_pool;
+
+ state.force = 1;
+ state.refresh_cache = 1;
+ state.istate = &the_index;
+
+ mem_pool_init(&ce_mem_pool, 0);
+ get_parallel_checkout_configs(&pc_workers, &pc_threshold);
+ init_checkout_metadata(&state.meta, info->refname,
+ info->commit ? &info->commit->object.oid : &info->oid,
+ NULL);
+
+ enable_delayed_checkout(&state);
+
+ if (pc_workers > 1)
+ init_parallel_checkout();
+
+ for (pos = 0; pos < the_index.cache_nr; pos++) {
+ struct cache_entry *ce = the_index.cache[pos];
+ if (ce->ce_flags & CE_MATCHED) {
+ if (!ce_stage(ce)) {
+ errs |= checkout_entry(ce, &state,
+ NULL, &nr_checkouts);
+ continue;
+ }
+ if (opts->writeout_stage)
+ errs |= checkout_stage(opts->writeout_stage,
+ ce, pos,
+ &state,
+ &nr_checkouts, opts->overlay_mode);
+ else if (opts->merge)
+ errs |= checkout_merged(pos, &state,
+ &nr_unmerged,
+ &ce_mem_pool);
+ pos = skip_same_name(ce, pos) - 1;
+ }
+ }
+ if (pc_workers > 1)
+ errs |= run_parallel_checkout(&state, pc_workers, pc_threshold,
+ NULL, NULL);
+ mem_pool_discard(&ce_mem_pool, should_validate_cache_entries());
+ remove_marked_cache_entries(&the_index, 1);
+ remove_scheduled_dirs();
+ errs |= finish_delayed_checkout(&state, opts->show_progress);
+
+ if (opts->count_checkout_paths) {
+ if (nr_unmerged)
+ fprintf_ln(stderr, Q_("Recreated %d merge conflict",
+ "Recreated %d merge conflicts",
+ nr_unmerged),
+ nr_unmerged);
+ if (opts->source_tree)
+ fprintf_ln(stderr, Q_("Updated %d path from %s",
+ "Updated %d paths from %s",
+ nr_checkouts),
+ nr_checkouts,
+ find_unique_abbrev(&opts->source_tree->object.oid,
+ DEFAULT_ABBREV));
+ else if (!nr_unmerged || nr_checkouts)
+ fprintf_ln(stderr, Q_("Updated %d path from the index",
+ "Updated %d paths from the index",
+ nr_checkouts),
+ nr_checkouts);
+ }
+
+ return errs;
+}
+
+static int checkout_paths(const struct checkout_opts *opts,
+ const struct branch_info *new_branch_info)
+{
+ int pos;
+ static char *ps_matched;
+ struct object_id rev;
+ struct commit *head;
+ int errs = 0;
+ struct lock_file lock_file = LOCK_INIT;
+ int checkout_index;
+
+ trace2_cmd_mode(opts->patch_mode ? "patch" : "path");
+
+ if (opts->track != BRANCH_TRACK_UNSPECIFIED)
+ die(_("'%s' cannot be used with updating paths"), "--track");
+
+ if (opts->new_branch_log)
+ die(_("'%s' cannot be used with updating paths"), "-l");
+
+ if (opts->ignore_unmerged && opts->patch_mode)
+ die(_("'%s' cannot be used with updating paths"),
+ opts->ignore_unmerged_opt);
+
+ if (opts->force_detach)
+ die(_("'%s' cannot be used with updating paths"), "--detach");
+
+ if (opts->merge && opts->patch_mode)
+ die(_("options '%s' and '%s' cannot be used together"), "--merge", "--patch");
+
+ if (opts->ignore_unmerged && opts->merge)
+ die(_("options '%s' and '%s' cannot be used together"),
+ opts->ignore_unmerged_opt, "-m");
+
+ if (opts->new_branch)
+ die(_("Cannot update paths and switch to branch '%s' at the same time."),
+ opts->new_branch);
+
+ if (!opts->checkout_worktree && !opts->checkout_index)
+ die(_("neither '%s' or '%s' is specified"),
+ "--staged", "--worktree");
+
+ if (!opts->checkout_worktree && !opts->from_treeish)
+ die(_("'%s' must be used when '%s' is not specified"),
+ "--worktree", "--source");
+
+ if (opts->checkout_index && !opts->checkout_worktree &&
+ opts->writeout_stage)
+ die(_("'%s' or '%s' cannot be used with %s"),
+ "--ours", "--theirs", "--staged");
+
+ if (opts->checkout_index && !opts->checkout_worktree &&
+ opts->merge)
+ die(_("'%s' or '%s' cannot be used with %s"),
+ "--merge", "--conflict", "--staged");
+
+ if (opts->patch_mode) {
+ const char *patch_mode;
+ const char *rev = new_branch_info->name;
+ char rev_oid[GIT_MAX_HEXSZ + 1];
+
+ /*
+ * Since rev can be in the form of `<a>...<b>` (which is not
+ * recognized by diff-index), we will always replace the name
+ * with the hex of the commit (whether it's in `...` form or
+ * not) for the run_add_interactive() machinery to work
+ * properly. However, there is special logic for the HEAD case
+ * so we mustn't replace that. Also, when we were given a
+ * tree-object, new_branch_info->commit would be NULL, but we
+ * do not have to do any replacement, either.
+ */
+ if (rev && new_branch_info->commit && strcmp(rev, "HEAD"))
+ rev = oid_to_hex_r(rev_oid, &new_branch_info->commit->object.oid);
+
+ if (opts->checkout_index && opts->checkout_worktree)
+ patch_mode = "--patch=checkout";
+ else if (opts->checkout_index && !opts->checkout_worktree)
+ patch_mode = "--patch=reset";
+ else if (!opts->checkout_index && opts->checkout_worktree)
+ patch_mode = "--patch=worktree";
+ else
+ BUG("either flag must have been set, worktree=%d, index=%d",
+ opts->checkout_worktree, opts->checkout_index);
+ return run_add_interactive(rev, patch_mode, &opts->pathspec);
+ }
+
+ repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR);
+ if (repo_read_index_preload(the_repository, &opts->pathspec, 0) < 0)
+ return error(_("index file corrupt"));
+
+ if (opts->source_tree)
+ read_tree_some(opts->source_tree, &opts->pathspec);
+
+ ps_matched = xcalloc(opts->pathspec.nr, 1);
+
+ /*
+ * Make sure all pathspecs participated in locating the paths
+ * to be checked out.
+ */
+ for (pos = 0; pos < the_index.cache_nr; pos++)
+ if (opts->overlay_mode)
+ mark_ce_for_checkout_overlay(the_index.cache[pos],
+ ps_matched,
+ opts);
+ else
+ mark_ce_for_checkout_no_overlay(the_index.cache[pos],
+ ps_matched,
+ opts);
+
+ if (report_path_error(ps_matched, &opts->pathspec)) {
+ free(ps_matched);
+ return 1;
+ }
+ free(ps_matched);
+
+ /* "checkout -m path" to recreate conflicted state */
+ if (opts->merge)
+ unmerge_marked_index(&the_index);
+
+ /* Any unmerged paths? */
+ for (pos = 0; pos < the_index.cache_nr; pos++) {
+ const struct cache_entry *ce = the_index.cache[pos];
+ if (ce->ce_flags & CE_MATCHED) {
+ if (!ce_stage(ce))
+ continue;
+ if (opts->ignore_unmerged) {
+ if (!opts->quiet)
+ warning(_("path '%s' is unmerged"), ce->name);
+ } else if (opts->writeout_stage) {
+ errs |= check_stage(opts->writeout_stage, ce, pos, opts->overlay_mode);
+ } else if (opts->merge) {
+ errs |= check_stages((1<<2) | (1<<3), ce, pos);
+ } else {
+ errs = 1;
+ error(_("path '%s' is unmerged"), ce->name);
+ }
+ pos = skip_same_name(ce, pos) - 1;
+ }
+ }
+ if (errs)
+ return 1;
+
+ /* Now we are committed to check them out */
+ if (opts->checkout_worktree)
+ errs |= checkout_worktree(opts, new_branch_info);
+ else
+ remove_marked_cache_entries(&the_index, 1);
+
+ /*
+ * Allow updating the index when checking out from the index.
+ * This is to save new stat info.
+ */
+ if (opts->checkout_worktree && !opts->checkout_index && !opts->source_tree)
+ checkout_index = 1;
+ else
+ checkout_index = opts->checkout_index;
+
+ if (checkout_index) {
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+ die(_("unable to write new index file"));
+ } else {
+ /*
+ * NEEDSWORK: if --worktree is not specified, we
+ * should save stat info of checked out files in the
+ * index to avoid the next (potentially costly)
+ * refresh. But it's a bit tricker to do...
+ */
+ rollback_lock_file(&lock_file);
+ }
+
+ read_ref_full("HEAD", 0, &rev, NULL);
+ head = lookup_commit_reference_gently(the_repository, &rev, 1);
+
+ errs |= post_checkout_hook(head, head, 0);
+ return errs;
+}
+
+static void show_local_changes(struct object *head,
+ const struct diff_options *opts)
+{
+ struct rev_info rev;
+ /* I think we want full paths, even if we're in a subdirectory. */
+ repo_init_revisions(the_repository, &rev, NULL);
+ rev.diffopt.flags = opts->flags;
+ rev.diffopt.output_format |= DIFF_FORMAT_NAME_STATUS;
+ rev.diffopt.flags.recursive = 1;
+ diff_setup_done(&rev.diffopt);
+ add_pending_object(&rev, head, NULL);
+ run_diff_index(&rev, 0);
+ release_revisions(&rev);
+}
+
+static void describe_detached_head(const char *msg, struct commit *commit)
+{
+ struct strbuf sb = STRBUF_INIT;
+
+ if (!parse_commit(commit))
+ pp_commit_easy(CMIT_FMT_ONELINE, commit, &sb);
+ if (print_sha1_ellipsis()) {
+ fprintf(stderr, "%s %s... %s\n", msg,
+ find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV), sb.buf);
+ } else {
+ fprintf(stderr, "%s %s %s\n", msg,
+ find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV), sb.buf);
+ }
+ strbuf_release(&sb);
+}
+
+static int reset_tree(struct tree *tree, const struct checkout_opts *o,
+ int worktree, int *writeout_error,
+ struct branch_info *info)
+{
+ struct unpack_trees_options opts;
+ struct tree_desc tree_desc;
+
+ memset(&opts, 0, sizeof(opts));
+ opts.head_idx = -1;
+ opts.update = worktree;
+ opts.skip_unmerged = !worktree;
+ opts.reset = o->force ? UNPACK_RESET_OVERWRITE_UNTRACKED :
+ UNPACK_RESET_PROTECT_UNTRACKED;
+ opts.preserve_ignored = (!o->force && !o->overwrite_ignore);
+ opts.merge = 1;
+ opts.fn = oneway_merge;
+ opts.verbose_update = o->show_progress;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+ init_checkout_metadata(&opts.meta, info->refname,
+ info->commit ? &info->commit->object.oid : null_oid(),
+ NULL);
+ parse_tree(tree);
+ init_tree_desc(&tree_desc, tree->buffer, tree->size);
+ switch (unpack_trees(1, &tree_desc, &opts)) {
+ case -2:
+ *writeout_error = 1;
+ /*
+ * We return 0 nevertheless, as the index is all right
+ * and more importantly we have made best efforts to
+ * update paths in the work tree, and we cannot revert
+ * them.
+ */
+ /* fallthrough */
+ case 0:
+ return 0;
+ default:
+ return 128;
+ }
+}
+
+static void setup_branch_path(struct branch_info *branch)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ /*
+ * If this is a ref, resolve it; otherwise, look up the OID for our
+ * expression. Failure here is okay.
+ */
+ if (!dwim_ref(branch->name, strlen(branch->name), &branch->oid, &branch->refname, 0))
+ repo_get_oid_committish(the_repository, branch->name, &branch->oid);
+
+ strbuf_branchname(&buf, branch->name, INTERPRET_BRANCH_LOCAL);
+ if (strcmp(buf.buf, branch->name)) {
+ free(branch->name);
+ branch->name = xstrdup(buf.buf);
+ }
+ strbuf_splice(&buf, 0, 0, "refs/heads/", 11);
+ free(branch->path);
+ branch->path = strbuf_detach(&buf, NULL);
+}
+
+static void init_topts(struct unpack_trees_options *topts, int merge,
+ int show_progress, int overwrite_ignore,
+ struct commit *old_commit)
+{
+ memset(topts, 0, sizeof(*topts));
+ topts->head_idx = -1;
+ topts->src_index = &the_index;
+ topts->dst_index = &the_index;
+
+ setup_unpack_trees_porcelain(topts, "checkout");
+
+ topts->initial_checkout = is_index_unborn(&the_index);
+ topts->update = 1;
+ topts->merge = 1;
+ topts->quiet = merge && old_commit;
+ topts->verbose_update = show_progress;
+ topts->fn = twoway_merge;
+ topts->preserve_ignored = !overwrite_ignore;
+}
+
+static int merge_working_tree(const struct checkout_opts *opts,
+ struct branch_info *old_branch_info,
+ struct branch_info *new_branch_info,
+ int *writeout_error)
+{
+ int ret;
+ struct lock_file lock_file = LOCK_INIT;
+ struct tree *new_tree;
+
+ repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR);
+ if (repo_read_index_preload(the_repository, NULL, 0) < 0)
+ return error(_("index file corrupt"));
+
+ resolve_undo_clear_index(&the_index);
+ if (opts->new_orphan_branch && opts->orphan_from_empty_tree) {
+ if (new_branch_info->commit)
+ BUG("'switch --orphan' should never accept a commit as starting point");
+ new_tree = parse_tree_indirect(the_hash_algo->empty_tree);
+ } else
+ new_tree = get_commit_tree(new_branch_info->commit);
+ if (opts->discard_changes) {
+ ret = reset_tree(new_tree, opts, 1, writeout_error, new_branch_info);
+ if (ret)
+ return ret;
+ } else {
+ struct tree_desc trees[2];
+ struct tree *tree;
+ struct unpack_trees_options topts;
+ const struct object_id *old_commit_oid;
+
+ refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL);
+
+ if (unmerged_index(&the_index)) {
+ error(_("you need to resolve your current index first"));
+ return 1;
+ }
+
+ /* 2-way merge to the new branch */
+ init_topts(&topts, opts->merge, opts->show_progress,
+ opts->overwrite_ignore, old_branch_info->commit);
+ init_checkout_metadata(&topts.meta, new_branch_info->refname,
+ new_branch_info->commit ?
+ &new_branch_info->commit->object.oid :
+ &new_branch_info->oid, NULL);
+
+ old_commit_oid = old_branch_info->commit ?
+ &old_branch_info->commit->object.oid :
+ the_hash_algo->empty_tree;
+ tree = parse_tree_indirect(old_commit_oid);
+ if (!tree)
+ die(_("unable to parse commit %s"),
+ oid_to_hex(old_commit_oid));
+
+ init_tree_desc(&trees[0], tree->buffer, tree->size);
+ parse_tree(new_tree);
+ tree = new_tree;
+ init_tree_desc(&trees[1], tree->buffer, tree->size);
+
+ ret = unpack_trees(2, trees, &topts);
+ clear_unpack_trees_porcelain(&topts);
+ if (ret == -1) {
+ /*
+ * Unpack couldn't do a trivial merge; either
+ * give up or do a real merge, depending on
+ * whether the merge flag was used.
+ */
+ struct tree *work;
+ struct tree *old_tree;
+ struct merge_options o;
+ struct strbuf sb = STRBUF_INIT;
+ struct strbuf old_commit_shortname = STRBUF_INIT;
+
+ if (!opts->merge)
+ return 1;
+
+ /*
+ * Without old_branch_info->commit, the below is the same as
+ * the two-tree unpack we already tried and failed.
+ */
+ if (!old_branch_info->commit)
+ return 1;
+ old_tree = get_commit_tree(old_branch_info->commit);
+
+ if (repo_index_has_changes(the_repository, old_tree, &sb))
+ die(_("cannot continue with staged changes in "
+ "the following files:\n%s"), sb.buf);
+ strbuf_release(&sb);
+
+ /* Do more real merge */
+
+ /*
+ * We update the index fully, then write the
+ * tree from the index, then merge the new
+ * branch with the current tree, with the old
+ * branch as the base. Then we reset the index
+ * (but not the working tree) to the new
+ * branch, leaving the working tree as the
+ * merged version, but skipping unmerged
+ * entries in the index.
+ */
+
+ add_files_to_cache(NULL, NULL, 0);
+ init_merge_options(&o, the_repository);
+ o.verbosity = 0;
+ work = write_in_core_index_as_tree(the_repository);
+
+ ret = reset_tree(new_tree,
+ opts, 1,
+ writeout_error, new_branch_info);
+ if (ret)
+ return ret;
+ o.ancestor = old_branch_info->name;
+ if (!old_branch_info->name) {
+ strbuf_add_unique_abbrev(&old_commit_shortname,
+ &old_branch_info->commit->object.oid,
+ DEFAULT_ABBREV);
+ o.ancestor = old_commit_shortname.buf;
+ }
+ o.branch1 = new_branch_info->name;
+ o.branch2 = "local";
+ ret = merge_trees(&o,
+ new_tree,
+ work,
+ old_tree);
+ if (ret < 0)
+ exit(128);
+ ret = reset_tree(new_tree,
+ opts, 0,
+ writeout_error, new_branch_info);
+ strbuf_release(&o.obuf);
+ strbuf_release(&old_commit_shortname);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (!cache_tree_fully_valid(the_index.cache_tree))
+ cache_tree_update(&the_index, WRITE_TREE_SILENT | WRITE_TREE_REPAIR);
+
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+ die(_("unable to write new index file"));
+
+ if (!opts->discard_changes && !opts->quiet && new_branch_info->commit)
+ show_local_changes(&new_branch_info->commit->object, &opts->diff_options);
+
+ return 0;
+}
+
+static void report_tracking(struct branch_info *new_branch_info)
+{
+ struct strbuf sb = STRBUF_INIT;
+ struct branch *branch = branch_get(new_branch_info->name);
+
+ if (!format_tracking_info(branch, &sb, AHEAD_BEHIND_FULL))
+ return;
+ fputs(sb.buf, stdout);
+ strbuf_release(&sb);
+}
+
+static void update_refs_for_switch(const struct checkout_opts *opts,
+ struct branch_info *old_branch_info,
+ struct branch_info *new_branch_info)
+{
+ struct strbuf msg = STRBUF_INIT;
+ const char *old_desc, *reflog_msg;
+ if (opts->new_branch) {
+ if (opts->new_orphan_branch) {
+ char *refname;
+
+ refname = mkpathdup("refs/heads/%s", opts->new_orphan_branch);
+ if (opts->new_branch_log &&
+ !should_autocreate_reflog(refname)) {
+ int ret;
+ struct strbuf err = STRBUF_INIT;
+
+ ret = safe_create_reflog(refname, &err);
+ if (ret) {
+ fprintf(stderr, _("Can not do reflog for '%s': %s\n"),
+ opts->new_orphan_branch, err.buf);
+ strbuf_release(&err);
+ free(refname);
+ return;
+ }
+ strbuf_release(&err);
+ }
+ free(refname);
+ }
+ else
+ create_branch(the_repository,
+ opts->new_branch, new_branch_info->name,
+ opts->new_branch_force ? 1 : 0,
+ opts->new_branch_force ? 1 : 0,
+ opts->new_branch_log,
+ opts->quiet,
+ opts->track,
+ 0);
+ free(new_branch_info->name);
+ free(new_branch_info->refname);
+ new_branch_info->name = xstrdup(opts->new_branch);
+ setup_branch_path(new_branch_info);
+ }
+
+ old_desc = old_branch_info->name;
+ if (!old_desc && old_branch_info->commit)
+ old_desc = oid_to_hex(&old_branch_info->commit->object.oid);
+
+ reflog_msg = getenv("GIT_REFLOG_ACTION");
+ if (!reflog_msg)
+ strbuf_addf(&msg, "checkout: moving from %s to %s",
+ old_desc ? old_desc : "(invalid)", new_branch_info->name);
+ else
+ strbuf_insertstr(&msg, 0, reflog_msg);
+
+ if (!strcmp(new_branch_info->name, "HEAD") && !new_branch_info->path && !opts->force_detach) {
+ /* Nothing to do. */
+ } else if (opts->force_detach || !new_branch_info->path) { /* No longer on any branch. */
+ update_ref(msg.buf, "HEAD", &new_branch_info->commit->object.oid, NULL,
+ REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR);
+ if (!opts->quiet) {
+ if (old_branch_info->path &&
+ advice_enabled(ADVICE_DETACHED_HEAD) && !opts->force_detach)
+ detach_advice(new_branch_info->name);
+ describe_detached_head(_("HEAD is now at"), new_branch_info->commit);
+ }
+ } else if (new_branch_info->path) { /* Switch branches. */
+ if (create_symref("HEAD", new_branch_info->path, msg.buf) < 0)
+ die(_("unable to update HEAD"));
+ if (!opts->quiet) {
+ if (old_branch_info->path && !strcmp(new_branch_info->path, old_branch_info->path)) {
+ if (opts->new_branch_force)
+ fprintf(stderr, _("Reset branch '%s'\n"),
+ new_branch_info->name);
+ else
+ fprintf(stderr, _("Already on '%s'\n"),
+ new_branch_info->name);
+ } else if (opts->new_branch) {
+ if (opts->branch_exists)
+ fprintf(stderr, _("Switched to and reset branch '%s'\n"), new_branch_info->name);
+ else
+ fprintf(stderr, _("Switched to a new branch '%s'\n"), new_branch_info->name);
+ } else {
+ fprintf(stderr, _("Switched to branch '%s'\n"),
+ new_branch_info->name);
+ }
+ }
+ if (old_branch_info->path && old_branch_info->name) {
+ if (!ref_exists(old_branch_info->path) && reflog_exists(old_branch_info->path))
+ delete_reflog(old_branch_info->path);
+ }
+ }
+ remove_branch_state(the_repository, !opts->quiet);
+ strbuf_release(&msg);
+ if (!opts->quiet &&
+ (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD"))))
+ report_tracking(new_branch_info);
+}
+
+static int add_pending_uninteresting_ref(const char *refname,
+ const struct object_id *oid,
+ int flags UNUSED, void *cb_data)
+{
+ add_pending_oid(cb_data, refname, oid, UNINTERESTING);
+ return 0;
+}
+
+static void describe_one_orphan(struct strbuf *sb, struct commit *commit)
+{
+ strbuf_addstr(sb, " ");
+ strbuf_add_unique_abbrev(sb, &commit->object.oid, DEFAULT_ABBREV);
+ strbuf_addch(sb, ' ');
+ if (!parse_commit(commit))
+ pp_commit_easy(CMIT_FMT_ONELINE, commit, sb);
+ strbuf_addch(sb, '\n');
+}
+
+#define ORPHAN_CUTOFF 4
+static void suggest_reattach(struct commit *commit, struct rev_info *revs)
+{
+ struct commit *c, *last = NULL;
+ struct strbuf sb = STRBUF_INIT;
+ int lost = 0;
+ while ((c = get_revision(revs)) != NULL) {
+ if (lost < ORPHAN_CUTOFF)
+ describe_one_orphan(&sb, c);
+ last = c;
+ lost++;
+ }
+ if (ORPHAN_CUTOFF < lost) {
+ int more = lost - ORPHAN_CUTOFF;
+ if (more == 1)
+ describe_one_orphan(&sb, last);
+ else
+ strbuf_addf(&sb, _(" ... and %d more.\n"), more);
+ }
+
+ fprintf(stderr,
+ Q_(
+ /* The singular version */
+ "Warning: you are leaving %d commit behind, "
+ "not connected to\n"
+ "any of your branches:\n\n"
+ "%s\n",
+ /* The plural version */
+ "Warning: you are leaving %d commits behind, "
+ "not connected to\n"
+ "any of your branches:\n\n"
+ "%s\n",
+ /* Give ngettext() the count */
+ lost),
+ lost,
+ sb.buf);
+ strbuf_release(&sb);
+
+ if (advice_enabled(ADVICE_DETACHED_HEAD))
+ fprintf(stderr,
+ Q_(
+ /* The singular version */
+ "If you want to keep it by creating a new branch, "
+ "this may be a good time\nto do so with:\n\n"
+ " git branch <new-branch-name> %s\n\n",
+ /* The plural version */
+ "If you want to keep them by creating a new branch, "
+ "this may be a good time\nto do so with:\n\n"
+ " git branch <new-branch-name> %s\n\n",
+ /* Give ngettext() the count */
+ lost),
+ find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV));
+}
+
+/*
+ * We are about to leave commit that was at the tip of a detached
+ * HEAD. If it is not reachable from any ref, this is the last chance
+ * for the user to do so without resorting to reflog.
+ */
+static void orphaned_commit_warning(struct commit *old_commit, struct commit *new_commit)
+{
+ struct rev_info revs;
+ struct object *object = &old_commit->object;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ setup_revisions(0, NULL, &revs, NULL);
+
+ object->flags &= ~UNINTERESTING;
+ add_pending_object(&revs, object, oid_to_hex(&object->oid));
+
+ for_each_ref(add_pending_uninteresting_ref, &revs);
+ if (new_commit)
+ add_pending_oid(&revs, "HEAD",
+ &new_commit->object.oid,
+ UNINTERESTING);
+
+ if (prepare_revision_walk(&revs))
+ die(_("internal error in revision walk"));
+ if (!(old_commit->object.flags & UNINTERESTING))
+ suggest_reattach(old_commit, &revs);
+ else
+ describe_detached_head(_("Previous HEAD position was"), old_commit);
+
+ /* Clean up objects used, as they will be reused. */
+ repo_clear_commit_marks(the_repository, ALL_REV_FLAGS);
+ release_revisions(&revs);
+}
+
+static int switch_branches(const struct checkout_opts *opts,
+ struct branch_info *new_branch_info)
+{
+ int ret = 0;
+ struct branch_info old_branch_info = { 0 };
+ struct object_id rev;
+ int flag, writeout_error = 0;
+ int do_merge = 1;
+
+ trace2_cmd_mode("branch");
+
+ memset(&old_branch_info, 0, sizeof(old_branch_info));
+ old_branch_info.path = resolve_refdup("HEAD", 0, &rev, &flag);
+ if (old_branch_info.path)
+ old_branch_info.commit = lookup_commit_reference_gently(the_repository, &rev, 1);
+ if (!(flag & REF_ISSYMREF))
+ FREE_AND_NULL(old_branch_info.path);
+
+ if (old_branch_info.path) {
+ const char *const prefix = "refs/heads/";
+ const char *p;
+ if (skip_prefix(old_branch_info.path, prefix, &p))
+ old_branch_info.name = xstrdup(p);
+ }
+
+ if (opts->new_orphan_branch && opts->orphan_from_empty_tree) {
+ if (new_branch_info->name)
+ BUG("'switch --orphan' should never accept a commit as starting point");
+ new_branch_info->commit = NULL;
+ new_branch_info->name = xstrdup("(empty)");
+ do_merge = 1;
+ }
+
+ if (!new_branch_info->name) {
+ new_branch_info->name = xstrdup("HEAD");
+ new_branch_info->commit = old_branch_info.commit;
+ if (!new_branch_info->commit)
+ die(_("You are on a branch yet to be born"));
+ parse_commit_or_die(new_branch_info->commit);
+
+ if (opts->only_merge_on_switching_branches)
+ do_merge = 0;
+ }
+
+ if (do_merge) {
+ ret = merge_working_tree(opts, &old_branch_info, new_branch_info, &writeout_error);
+ if (ret) {
+ branch_info_release(&old_branch_info);
+ return ret;
+ }
+ }
+
+ if (!opts->quiet && !old_branch_info.path && old_branch_info.commit && new_branch_info->commit != old_branch_info.commit)
+ orphaned_commit_warning(old_branch_info.commit, new_branch_info->commit);
+
+ update_refs_for_switch(opts, &old_branch_info, new_branch_info);
+
+ ret = post_checkout_hook(old_branch_info.commit, new_branch_info->commit, 1);
+ branch_info_release(&old_branch_info);
+
+ return ret || writeout_error;
+}
+
+static int git_checkout_config(const char *var, const char *value, void *cb)
+{
+ struct checkout_opts *opts = cb;
+
+ if (!strcmp(var, "diff.ignoresubmodules")) {
+ handle_ignore_submodules_arg(&opts->diff_options, value);
+ return 0;
+ }
+ if (!strcmp(var, "checkout.guess")) {
+ opts->dwim_new_local_branch = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (starts_with(var, "submodule."))
+ return git_default_submodule_config(var, value, NULL);
+
+ return git_xmerge_config(var, value, NULL);
+}
+
+static void setup_new_branch_info_and_source_tree(
+ struct branch_info *new_branch_info,
+ struct checkout_opts *opts,
+ struct object_id *rev,
+ const char *arg)
+{
+ struct tree **source_tree = &opts->source_tree;
+ struct object_id branch_rev;
+
+ new_branch_info->name = xstrdup(arg);
+ setup_branch_path(new_branch_info);
+
+ if (!check_refname_format(new_branch_info->path, 0) &&
+ !read_ref(new_branch_info->path, &branch_rev))
+ oidcpy(rev, &branch_rev);
+ else
+ /* not an existing branch */
+ FREE_AND_NULL(new_branch_info->path);
+
+ new_branch_info->commit = lookup_commit_reference_gently(the_repository, rev, 1);
+ if (!new_branch_info->commit) {
+ /* not a commit */
+ *source_tree = parse_tree_indirect(rev);
+ } else {
+ parse_commit_or_die(new_branch_info->commit);
+ *source_tree = get_commit_tree(new_branch_info->commit);
+ }
+}
+
+static const char *parse_remote_branch(const char *arg,
+ struct object_id *rev,
+ int could_be_checkout_paths)
+{
+ int num_matches = 0;
+ const char *remote = unique_tracking_name(arg, rev, &num_matches);
+
+ if (remote && could_be_checkout_paths) {
+ die(_("'%s' could be both a local file and a tracking branch.\n"
+ "Please use -- (and optionally --no-guess) to disambiguate"),
+ arg);
+ }
+
+ if (!remote && num_matches > 1) {
+ if (advice_enabled(ADVICE_CHECKOUT_AMBIGUOUS_REMOTE_BRANCH_NAME)) {
+ advise(_("If you meant to check out a remote tracking branch on, e.g. 'origin',\n"
+ "you can do so by fully qualifying the name with the --track option:\n"
+ "\n"
+ " git checkout --track origin/<name>\n"
+ "\n"
+ "If you'd like to always have checkouts of an ambiguous <name> prefer\n"
+ "one remote, e.g. the 'origin' remote, consider setting\n"
+ "checkout.defaultRemote=origin in your config."));
+ }
+
+ die(_("'%s' matched multiple (%d) remote tracking branches"),
+ arg, num_matches);
+ }
+
+ return remote;
+}
+
+static int parse_branchname_arg(int argc, const char **argv,
+ int dwim_new_local_branch_ok,
+ struct branch_info *new_branch_info,
+ struct checkout_opts *opts,
+ struct object_id *rev)
+{
+ const char **new_branch = &opts->new_branch;
+ int argcount = 0;
+ const char *arg;
+ int dash_dash_pos;
+ int has_dash_dash = 0;
+ int i;
+
+ /*
+ * case 1: git checkout <ref> -- [<paths>]
+ *
+ * <ref> must be a valid tree, everything after the '--' must be
+ * a path.
+ *
+ * case 2: git checkout -- [<paths>]
+ *
+ * everything after the '--' must be paths.
+ *
+ * case 3: git checkout <something> [--]
+ *
+ * (a) If <something> is a commit, that is to
+ * switch to the branch or detach HEAD at it. As a special case,
+ * if <something> is A...B (missing A or B means HEAD but you can
+ * omit at most one side), and if there is a unique merge base
+ * between A and B, A...B names that merge base.
+ *
+ * (b) If <something> is _not_ a commit, either "--" is present
+ * or <something> is not a path, no -t or -b was given, and
+ * and there is a tracking branch whose name is <something>
+ * in one and only one remote (or if the branch exists on the
+ * remote named in checkout.defaultRemote), then this is a
+ * short-hand to fork local <something> from that
+ * remote-tracking branch.
+ *
+ * (c) Otherwise, if "--" is present, treat it like case (1).
+ *
+ * (d) Otherwise :
+ * - if it's a reference, treat it like case (1)
+ * - else if it's a path, treat it like case (2)
+ * - else: fail.
+ *
+ * case 4: git checkout <something> <paths>
+ *
+ * The first argument must not be ambiguous.
+ * - If it's *only* a reference, treat it like case (1).
+ * - If it's only a path, treat it like case (2).
+ * - else: fail.
+ *
+ */
+ if (!argc)
+ return 0;
+
+ if (!opts->accept_pathspec) {
+ if (argc > 1)
+ die(_("only one reference expected"));
+ has_dash_dash = 1; /* helps disambiguate */
+ }
+
+ arg = argv[0];
+ dash_dash_pos = -1;
+ for (i = 0; i < argc; i++) {
+ if (opts->accept_pathspec && !strcmp(argv[i], "--")) {
+ dash_dash_pos = i;
+ break;
+ }
+ }
+ if (dash_dash_pos == 0)
+ return 1; /* case (2) */
+ else if (dash_dash_pos == 1)
+ has_dash_dash = 1; /* case (3) or (1) */
+ else if (dash_dash_pos >= 2)
+ die(_("only one reference expected, %d given."), dash_dash_pos);
+ opts->count_checkout_paths = !opts->quiet && !has_dash_dash;
+
+ if (!strcmp(arg, "-"))
+ arg = "@{-1}";
+
+ if (get_oid_mb(arg, rev)) {
+ /*
+ * Either case (3) or (4), with <something> not being
+ * a commit, or an attempt to use case (1) with an
+ * invalid ref.
+ *
+ * It's likely an error, but we need to find out if
+ * we should auto-create the branch, case (3).(b).
+ */
+ int recover_with_dwim = dwim_new_local_branch_ok;
+
+ int could_be_checkout_paths = !has_dash_dash &&
+ check_filename(opts->prefix, arg);
+
+ if (!has_dash_dash && !no_wildcard(arg))
+ recover_with_dwim = 0;
+
+ /*
+ * Accept "git checkout foo", "git checkout foo --"
+ * and "git switch foo" as candidates for dwim.
+ */
+ if (!(argc == 1 && !has_dash_dash) &&
+ !(argc == 2 && has_dash_dash) &&
+ opts->accept_pathspec)
+ recover_with_dwim = 0;
+
+ if (recover_with_dwim) {
+ const char *remote = parse_remote_branch(arg, rev,
+ could_be_checkout_paths);
+ if (remote) {
+ *new_branch = arg;
+ arg = remote;
+ /* DWIMmed to create local branch, case (3).(b) */
+ } else {
+ recover_with_dwim = 0;
+ }
+ }
+
+ if (!recover_with_dwim) {
+ if (has_dash_dash)
+ die(_("invalid reference: %s"), arg);
+ return argcount;
+ }
+ }
+
+ /* we can't end up being in (2) anymore, eat the argument */
+ argcount++;
+ argv++;
+ argc--;
+
+ setup_new_branch_info_and_source_tree(new_branch_info, opts, rev, arg);
+
+ if (!opts->source_tree) /* case (1): want a tree */
+ die(_("reference is not a tree: %s"), arg);
+
+ if (!has_dash_dash) { /* case (3).(d) -> (1) */
+ /*
+ * Do not complain the most common case
+ * git checkout branch
+ * even if there happen to be a file called 'branch';
+ * it would be extremely annoying.
+ */
+ if (argc)
+ verify_non_filename(opts->prefix, arg);
+ } else if (opts->accept_pathspec) {
+ argcount++;
+ argv++;
+ argc--;
+ }
+
+ return argcount;
+}
+
+static int switch_unborn_to_new_branch(const struct checkout_opts *opts)
+{
+ int status;
+ struct strbuf branch_ref = STRBUF_INIT;
+
+ trace2_cmd_mode("unborn");
+
+ if (!opts->new_branch)
+ die(_("You are on a branch yet to be born"));
+ strbuf_addf(&branch_ref, "refs/heads/%s", opts->new_branch);
+ status = create_symref("HEAD", branch_ref.buf, "checkout -b");
+ strbuf_release(&branch_ref);
+ if (!opts->quiet)
+ fprintf(stderr, _("Switched to a new branch '%s'\n"),
+ opts->new_branch);
+ return status;
+}
+
+static void die_expecting_a_branch(const struct branch_info *branch_info)
+{
+ struct object_id oid;
+ char *to_free;
+ int code;
+
+ if (dwim_ref(branch_info->name, strlen(branch_info->name), &oid, &to_free, 0) == 1) {
+ const char *ref = to_free;
+
+ if (skip_prefix(ref, "refs/tags/", &ref))
+ code = die_message(_("a branch is expected, got tag '%s'"), ref);
+ else if (skip_prefix(ref, "refs/remotes/", &ref))
+ code = die_message(_("a branch is expected, got remote branch '%s'"), ref);
+ else
+ code = die_message(_("a branch is expected, got '%s'"), ref);
+ }
+ else if (branch_info->commit)
+ code = die_message(_("a branch is expected, got commit '%s'"), branch_info->name);
+ else
+ /*
+ * This case should never happen because we already die() on
+ * non-commit, but just in case.
+ */
+ code = die_message(_("a branch is expected, got '%s'"), branch_info->name);
+
+ if (advice_enabled(ADVICE_SUGGEST_DETACHING_HEAD))
+ advise(_("If you want to detach HEAD at the commit, try again with the --detach option."));
+
+ exit(code);
+}
+
+static void die_if_some_operation_in_progress(void)
+{
+ struct wt_status_state state;
+
+ memset(&state, 0, sizeof(state));
+ wt_status_get_state(the_repository, &state, 0);
+
+ if (state.merge_in_progress)
+ die(_("cannot switch branch while merging\n"
+ "Consider \"git merge --quit\" "
+ "or \"git worktree add\"."));
+ if (state.am_in_progress)
+ die(_("cannot switch branch in the middle of an am session\n"
+ "Consider \"git am --quit\" "
+ "or \"git worktree add\"."));
+ if (state.rebase_interactive_in_progress || state.rebase_in_progress)
+ die(_("cannot switch branch while rebasing\n"
+ "Consider \"git rebase --quit\" "
+ "or \"git worktree add\"."));
+ if (state.cherry_pick_in_progress)
+ die(_("cannot switch branch while cherry-picking\n"
+ "Consider \"git cherry-pick --quit\" "
+ "or \"git worktree add\"."));
+ if (state.revert_in_progress)
+ die(_("cannot switch branch while reverting\n"
+ "Consider \"git revert --quit\" "
+ "or \"git worktree add\"."));
+ if (state.bisect_in_progress)
+ warning(_("you are switching branch while bisecting"));
+}
+
+static int checkout_branch(struct checkout_opts *opts,
+ struct branch_info *new_branch_info)
+{
+ if (opts->pathspec.nr)
+ die(_("paths cannot be used with switching branches"));
+
+ if (opts->patch_mode)
+ die(_("'%s' cannot be used with switching branches"),
+ "--patch");
+
+ if (opts->overlay_mode != -1)
+ die(_("'%s' cannot be used with switching branches"),
+ "--[no]-overlay");
+
+ if (opts->writeout_stage)
+ die(_("'%s' cannot be used with switching branches"),
+ "--ours/--theirs");
+
+ if (opts->force && opts->merge)
+ die(_("'%s' cannot be used with '%s'"), "-f", "-m");
+
+ if (opts->discard_changes && opts->merge)
+ die(_("'%s' cannot be used with '%s'"), "--discard-changes", "--merge");
+
+ if (opts->force_detach && opts->new_branch)
+ die(_("'%s' cannot be used with '%s'"),
+ "--detach", "-b/-B/--orphan");
+
+ if (opts->new_orphan_branch) {
+ if (opts->track != BRANCH_TRACK_UNSPECIFIED)
+ die(_("'%s' cannot be used with '%s'"), "--orphan", "-t");
+ if (opts->orphan_from_empty_tree && new_branch_info->name)
+ die(_("'%s' cannot take <start-point>"), "--orphan");
+ } else if (opts->force_detach) {
+ if (opts->track != BRANCH_TRACK_UNSPECIFIED)
+ die(_("'%s' cannot be used with '%s'"), "--detach", "-t");
+ } else if (opts->track == BRANCH_TRACK_UNSPECIFIED)
+ opts->track = git_branch_track;
+
+ if (new_branch_info->name && !new_branch_info->commit)
+ die(_("Cannot switch branch to a non-commit '%s'"),
+ new_branch_info->name);
+
+ if (!opts->switch_branch_doing_nothing_is_ok &&
+ !new_branch_info->name &&
+ !opts->new_branch &&
+ !opts->force_detach)
+ die(_("missing branch or commit argument"));
+
+ if (!opts->implicit_detach &&
+ !opts->force_detach &&
+ !opts->new_branch &&
+ !opts->new_branch_force &&
+ new_branch_info->name &&
+ !new_branch_info->path)
+ die_expecting_a_branch(new_branch_info);
+
+ if (!opts->can_switch_when_in_progress)
+ die_if_some_operation_in_progress();
+
+ if (new_branch_info->path && !opts->force_detach && !opts->new_branch &&
+ !opts->ignore_other_worktrees) {
+ int flag;
+ char *head_ref = resolve_refdup("HEAD", 0, NULL, &flag);
+ if (head_ref &&
+ (!(flag & REF_ISSYMREF) || strcmp(head_ref, new_branch_info->path)))
+ die_if_checked_out(new_branch_info->path, 1);
+ free(head_ref);
+ }
+
+ if (!new_branch_info->commit && opts->new_branch) {
+ struct object_id rev;
+ int flag;
+
+ if (!read_ref_full("HEAD", 0, &rev, &flag) &&
+ (flag & REF_ISSYMREF) && is_null_oid(&rev))
+ return switch_unborn_to_new_branch(opts);
+ }
+ return switch_branches(opts, new_branch_info);
+}
+
+static struct option *add_common_options(struct checkout_opts *opts,
+ struct option *prevopts)
+{
+ struct option options[] = {
+ OPT__QUIET(&opts->quiet, N_("suppress progress reporting")),
+ OPT_CALLBACK_F(0, "recurse-submodules", NULL,
+ "checkout", "control recursive updating of submodules",
+ PARSE_OPT_OPTARG, option_parse_recurse_submodules_worktree_updater),
+ OPT_BOOL(0, "progress", &opts->show_progress, N_("force progress reporting")),
+ OPT_BOOL('m', "merge", &opts->merge, N_("perform a 3-way merge with the new branch")),
+ OPT_STRING(0, "conflict", &opts->conflict_style, N_("style"),
+ N_("conflict style (merge, diff3, or zdiff3)")),
+ OPT_END()
+ };
+ struct option *newopts = parse_options_concat(prevopts, options);
+ free(prevopts);
+ return newopts;
+}
+
+static struct option *add_common_switch_branch_options(
+ struct checkout_opts *opts, struct option *prevopts)
+{
+ struct option options[] = {
+ OPT_BOOL('d', "detach", &opts->force_detach, N_("detach HEAD at named commit")),
+ OPT_CALLBACK_F('t', "track", &opts->track, "(direct|inherit)",
+ N_("set branch tracking configuration"),
+ PARSE_OPT_OPTARG,
+ parse_opt_tracking_mode),
+ OPT__FORCE(&opts->force, N_("force checkout (throw away local modifications)"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_STRING(0, "orphan", &opts->new_orphan_branch, N_("new-branch"), N_("new unparented branch")),
+ OPT_BOOL_F(0, "overwrite-ignore", &opts->overwrite_ignore,
+ N_("update ignored files (default)"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL(0, "ignore-other-worktrees", &opts->ignore_other_worktrees,
+ N_("do not check if another worktree is holding the given ref")),
+ OPT_END()
+ };
+ struct option *newopts = parse_options_concat(prevopts, options);
+ free(prevopts);
+ return newopts;
+}
+
+static struct option *add_checkout_path_options(struct checkout_opts *opts,
+ struct option *prevopts)
+{
+ struct option options[] = {
+ OPT_SET_INT_F('2', "ours", &opts->writeout_stage,
+ N_("checkout our version for unmerged files"),
+ 2, PARSE_OPT_NONEG),
+ OPT_SET_INT_F('3', "theirs", &opts->writeout_stage,
+ N_("checkout their version for unmerged files"),
+ 3, PARSE_OPT_NONEG),
+ OPT_BOOL('p', "patch", &opts->patch_mode, N_("select hunks interactively")),
+ OPT_BOOL(0, "ignore-skip-worktree-bits", &opts->ignore_skipworktree,
+ N_("do not limit pathspecs to sparse entries only")),
+ OPT_PATHSPEC_FROM_FILE(&opts->pathspec_from_file),
+ OPT_PATHSPEC_FILE_NUL(&opts->pathspec_file_nul),
+ OPT_END()
+ };
+ struct option *newopts = parse_options_concat(prevopts, options);
+ free(prevopts);
+ return newopts;
+}
+
+/* create-branch option (either b or c) */
+static char cb_option = 'b';
+
+static int checkout_main(int argc, const char **argv, const char *prefix,
+ struct checkout_opts *opts, struct option *options,
+ const char * const usagestr[],
+ struct branch_info *new_branch_info)
+{
+ int parseopt_flags = 0;
+
+ opts->overwrite_ignore = 1;
+ opts->prefix = prefix;
+ opts->show_progress = -1;
+
+ git_config(git_checkout_config, opts);
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
+
+ opts->track = BRANCH_TRACK_UNSPECIFIED;
+
+ if (!opts->accept_pathspec && !opts->accept_ref)
+ BUG("make up your mind, you need to take _something_");
+ if (opts->accept_pathspec && opts->accept_ref)
+ parseopt_flags = PARSE_OPT_KEEP_DASHDASH;
+
+ argc = parse_options(argc, argv, prefix, options,
+ usagestr, parseopt_flags);
+
+ if (opts->show_progress < 0) {
+ if (opts->quiet)
+ opts->show_progress = 0;
+ else
+ opts->show_progress = isatty(2);
+ }
+
+ if (opts->conflict_style) {
+ opts->merge = 1; /* implied */
+ git_xmerge_config("merge.conflictstyle", opts->conflict_style, NULL);
+ }
+ if (opts->force) {
+ opts->discard_changes = 1;
+ opts->ignore_unmerged_opt = "--force";
+ opts->ignore_unmerged = 1;
+ }
+
+ if ((!!opts->new_branch + !!opts->new_branch_force + !!opts->new_orphan_branch) > 1)
+ die(_("options '-%c', '-%c', and '%s' cannot be used together"),
+ cb_option, toupper(cb_option), "--orphan");
+
+ if (opts->overlay_mode == 1 && opts->patch_mode)
+ die(_("options '%s' and '%s' cannot be used together"), "-p", "--overlay");
+
+ if (opts->checkout_index >= 0 || opts->checkout_worktree >= 0) {
+ if (opts->checkout_index < 0)
+ opts->checkout_index = 0;
+ if (opts->checkout_worktree < 0)
+ opts->checkout_worktree = 0;
+ } else {
+ if (opts->checkout_index < 0)
+ opts->checkout_index = -opts->checkout_index - 1;
+ if (opts->checkout_worktree < 0)
+ opts->checkout_worktree = -opts->checkout_worktree - 1;
+ }
+ if (opts->checkout_index < 0 || opts->checkout_worktree < 0)
+ BUG("these flags should be non-negative by now");
+ /*
+ * convenient shortcut: "git restore --staged [--worktree]" equals
+ * "git restore --staged [--worktree] --source HEAD"
+ */
+ if (!opts->from_treeish && opts->checkout_index)
+ opts->from_treeish = "HEAD";
+
+ /*
+ * From here on, new_branch will contain the branch to be checked out,
+ * and new_branch_force and new_orphan_branch will tell us which one of
+ * -b/-B/-c/-C/--orphan is being used.
+ */
+ if (opts->new_branch_force)
+ opts->new_branch = opts->new_branch_force;
+
+ if (opts->new_orphan_branch)
+ opts->new_branch = opts->new_orphan_branch;
+
+ /* --track without -c/-C/-b/-B/--orphan should DWIM */
+ if (opts->track != BRANCH_TRACK_UNSPECIFIED && !opts->new_branch) {
+ const char *argv0 = argv[0];
+ if (!argc || !strcmp(argv0, "--"))
+ die(_("--track needs a branch name"));
+ skip_prefix(argv0, "refs/", &argv0);
+ skip_prefix(argv0, "remotes/", &argv0);
+ argv0 = strchr(argv0, '/');
+ if (!argv0 || !argv0[1])
+ die(_("missing branch name; try -%c"), cb_option);
+ opts->new_branch = argv0 + 1;
+ }
+
+ /*
+ * Extract branch name from command line arguments, so
+ * all that is left is pathspecs.
+ *
+ * Handle
+ *
+ * 1) git checkout <tree> -- [<paths>]
+ * 2) git checkout -- [<paths>]
+ * 3) git checkout <something> [<paths>]
+ *
+ * including "last branch" syntax and DWIM-ery for names of
+ * remote branches, erroring out for invalid or ambiguous cases.
+ */
+ if (argc && opts->accept_ref) {
+ struct object_id rev;
+ int dwim_ok =
+ !opts->patch_mode &&
+ opts->dwim_new_local_branch &&
+ opts->track == BRANCH_TRACK_UNSPECIFIED &&
+ !opts->new_branch;
+ int n = parse_branchname_arg(argc, argv, dwim_ok,
+ new_branch_info, opts, &rev);
+ argv += n;
+ argc -= n;
+ } else if (!opts->accept_ref && opts->from_treeish) {
+ struct object_id rev;
+
+ if (get_oid_mb(opts->from_treeish, &rev))
+ die(_("could not resolve %s"), opts->from_treeish);
+
+ setup_new_branch_info_and_source_tree(new_branch_info,
+ opts, &rev,
+ opts->from_treeish);
+
+ if (!opts->source_tree)
+ die(_("reference is not a tree: %s"), opts->from_treeish);
+ }
+
+ if (argc) {
+ parse_pathspec(&opts->pathspec, 0,
+ opts->patch_mode ? PATHSPEC_PREFIX_ORIGIN : 0,
+ prefix, argv);
+
+ if (!opts->pathspec.nr)
+ die(_("invalid path specification"));
+
+ /*
+ * Try to give more helpful suggestion.
+ * new_branch && argc > 1 will be caught later.
+ */
+ if (opts->new_branch && argc == 1 && !new_branch_info->commit)
+ die(_("'%s' is not a commit and a branch '%s' cannot be created from it"),
+ argv[0], opts->new_branch);
+
+ if (opts->force_detach)
+ die(_("git checkout: --detach does not take a path argument '%s'"),
+ argv[0]);
+ }
+
+ if (opts->pathspec_from_file) {
+ if (opts->pathspec.nr)
+ die(_("'%s' and pathspec arguments cannot be used together"), "--pathspec-from-file");
+
+ if (opts->force_detach)
+ die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--detach");
+
+ if (opts->patch_mode)
+ die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--patch");
+
+ parse_pathspec_file(&opts->pathspec, 0,
+ 0,
+ prefix, opts->pathspec_from_file, opts->pathspec_file_nul);
+ } else if (opts->pathspec_file_nul) {
+ die(_("the option '%s' requires '%s'"), "--pathspec-file-nul", "--pathspec-from-file");
+ }
+
+ opts->pathspec.recursive = 1;
+
+ if (opts->pathspec.nr) {
+ if (1 < !!opts->writeout_stage + !!opts->force + !!opts->merge)
+ die(_("git checkout: --ours/--theirs, --force and --merge are incompatible when\n"
+ "checking out of the index."));
+ } else {
+ if (opts->accept_pathspec && !opts->empty_pathspec_ok &&
+ !opts->patch_mode) /* patch mode is special */
+ die(_("you must specify path(s) to restore"));
+ }
+
+ if (opts->new_branch) {
+ struct strbuf buf = STRBUF_INIT;
+
+ if (opts->new_branch_force)
+ opts->branch_exists = validate_branchname(opts->new_branch, &buf);
+ else
+ opts->branch_exists =
+ validate_new_branchname(opts->new_branch, &buf, 0);
+ strbuf_release(&buf);
+ }
+
+ if (opts->patch_mode || opts->pathspec.nr)
+ return checkout_paths(opts, new_branch_info);
+ else
+ return checkout_branch(opts, new_branch_info);
+}
+
+int cmd_checkout(int argc, const char **argv, const char *prefix)
+{
+ struct checkout_opts opts;
+ struct option *options;
+ struct option checkout_options[] = {
+ OPT_STRING('b', NULL, &opts.new_branch, N_("branch"),
+ N_("create and checkout a new branch")),
+ OPT_STRING('B', NULL, &opts.new_branch_force, N_("branch"),
+ N_("create/reset and checkout a branch")),
+ OPT_BOOL('l', NULL, &opts.new_branch_log, N_("create reflog for new branch")),
+ OPT_BOOL(0, "guess", &opts.dwim_new_local_branch,
+ N_("second guess 'git checkout <no-such-branch>' (default)")),
+ OPT_BOOL(0, "overlay", &opts.overlay_mode, N_("use overlay mode (default)")),
+ OPT_END()
+ };
+ int ret;
+ struct branch_info new_branch_info = { 0 };
+
+ memset(&opts, 0, sizeof(opts));
+ opts.dwim_new_local_branch = 1;
+ opts.switch_branch_doing_nothing_is_ok = 1;
+ opts.only_merge_on_switching_branches = 0;
+ opts.accept_ref = 1;
+ opts.accept_pathspec = 1;
+ opts.implicit_detach = 1;
+ opts.can_switch_when_in_progress = 1;
+ opts.orphan_from_empty_tree = 0;
+ opts.empty_pathspec_ok = 1;
+ opts.overlay_mode = -1;
+ opts.checkout_index = -2; /* default on */
+ opts.checkout_worktree = -2; /* default on */
+
+ if (argc == 3 && !strcmp(argv[1], "-b")) {
+ /*
+ * User ran 'git checkout -b <branch>' and expects
+ * the same behavior as 'git switch -c <branch>'.
+ */
+ opts.switch_branch_doing_nothing_is_ok = 0;
+ opts.only_merge_on_switching_branches = 1;
+ }
+
+ options = parse_options_dup(checkout_options);
+ options = add_common_options(&opts, options);
+ options = add_common_switch_branch_options(&opts, options);
+ options = add_checkout_path_options(&opts, options);
+
+ ret = checkout_main(argc, argv, prefix, &opts,
+ options, checkout_usage, &new_branch_info);
+ branch_info_release(&new_branch_info);
+ clear_pathspec(&opts.pathspec);
+ FREE_AND_NULL(options);
+ return ret;
+}
+
+int cmd_switch(int argc, const char **argv, const char *prefix)
+{
+ struct checkout_opts opts;
+ struct option *options = NULL;
+ struct option switch_options[] = {
+ OPT_STRING('c', "create", &opts.new_branch, N_("branch"),
+ N_("create and switch to a new branch")),
+ OPT_STRING('C', "force-create", &opts.new_branch_force, N_("branch"),
+ N_("create/reset and switch to a branch")),
+ OPT_BOOL(0, "guess", &opts.dwim_new_local_branch,
+ N_("second guess 'git switch <no-such-branch>'")),
+ OPT_BOOL(0, "discard-changes", &opts.discard_changes,
+ N_("throw away local modifications")),
+ OPT_END()
+ };
+ int ret;
+ struct branch_info new_branch_info = { 0 };
+
+ memset(&opts, 0, sizeof(opts));
+ opts.dwim_new_local_branch = 1;
+ opts.accept_ref = 1;
+ opts.accept_pathspec = 0;
+ opts.switch_branch_doing_nothing_is_ok = 0;
+ opts.only_merge_on_switching_branches = 1;
+ opts.implicit_detach = 0;
+ opts.can_switch_when_in_progress = 0;
+ opts.orphan_from_empty_tree = 1;
+ opts.overlay_mode = -1;
+
+ options = parse_options_dup(switch_options);
+ options = add_common_options(&opts, options);
+ options = add_common_switch_branch_options(&opts, options);
+
+ cb_option = 'c';
+
+ ret = checkout_main(argc, argv, prefix, &opts,
+ options, switch_branch_usage, &new_branch_info);
+ branch_info_release(&new_branch_info);
+ FREE_AND_NULL(options);
+ return ret;
+}
+
+int cmd_restore(int argc, const char **argv, const char *prefix)
+{
+ struct checkout_opts opts;
+ struct option *options;
+ struct option restore_options[] = {
+ OPT_STRING('s', "source", &opts.from_treeish, "<tree-ish>",
+ N_("which tree-ish to checkout from")),
+ OPT_BOOL('S', "staged", &opts.checkout_index,
+ N_("restore the index")),
+ OPT_BOOL('W', "worktree", &opts.checkout_worktree,
+ N_("restore the working tree (default)")),
+ OPT_BOOL(0, "ignore-unmerged", &opts.ignore_unmerged,
+ N_("ignore unmerged entries")),
+ OPT_BOOL(0, "overlay", &opts.overlay_mode, N_("use overlay mode")),
+ OPT_END()
+ };
+ int ret;
+ struct branch_info new_branch_info = { 0 };
+
+ memset(&opts, 0, sizeof(opts));
+ opts.accept_ref = 0;
+ opts.accept_pathspec = 1;
+ opts.empty_pathspec_ok = 0;
+ opts.overlay_mode = 0;
+ opts.checkout_index = -1; /* default off */
+ opts.checkout_worktree = -2; /* default on */
+ opts.ignore_unmerged_opt = "--ignore-unmerged";
+
+ options = parse_options_dup(restore_options);
+ options = add_common_options(&opts, options);
+ options = add_checkout_path_options(&opts, options);
+
+ ret = checkout_main(argc, argv, prefix, &opts,
+ options, restore_usage, &new_branch_info);
+ branch_info_release(&new_branch_info);
+ FREE_AND_NULL(options);
+ return ret;
+}
diff --git a/builtin/clean.c b/builtin/clean.c
new file mode 100644
index 0000000..b2701a2
--- /dev/null
+++ b/builtin/clean.c
@@ -0,0 +1,1096 @@
+/*
+ * "git clean" builtin command
+ *
+ * Copyright (C) 2007 Shawn Bohrer
+ *
+ * Based on git-clean.sh by Pavel Roskin
+ */
+
+#define USE_THE_INDEX_VARIABLE
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "dir.h"
+#include "parse-options.h"
+#include "string-list.h"
+#include "quote.h"
+#include "column.h"
+#include "color.h"
+#include "pathspec.h"
+#include "help.h"
+#include "prompt.h"
+
+static int force = -1; /* unset */
+static int interactive;
+static struct string_list del_list = STRING_LIST_INIT_DUP;
+static unsigned int colopts;
+
+static const char *const builtin_clean_usage[] = {
+ N_("git clean [-d] [-f] [-i] [-n] [-q] [-e <pattern>] [-x | -X] [--] [<pathspec>...]"),
+ NULL
+};
+
+static const char *msg_remove = N_("Removing %s\n");
+static const char *msg_would_remove = N_("Would remove %s\n");
+static const char *msg_skip_git_dir = N_("Skipping repository %s\n");
+static const char *msg_would_skip_git_dir = N_("Would skip repository %s\n");
+static const char *msg_warn_remove_failed = N_("failed to remove %s");
+static const char *msg_warn_lstat_failed = N_("could not lstat %s\n");
+static const char *msg_skip_cwd = N_("Refusing to remove current working directory\n");
+static const char *msg_would_skip_cwd = N_("Would refuse to remove current working directory\n");
+
+enum color_clean {
+ CLEAN_COLOR_RESET = 0,
+ CLEAN_COLOR_PLAIN = 1,
+ CLEAN_COLOR_PROMPT = 2,
+ CLEAN_COLOR_HEADER = 3,
+ CLEAN_COLOR_HELP = 4,
+ CLEAN_COLOR_ERROR = 5
+};
+
+static const char *color_interactive_slots[] = {
+ [CLEAN_COLOR_ERROR] = "error",
+ [CLEAN_COLOR_HEADER] = "header",
+ [CLEAN_COLOR_HELP] = "help",
+ [CLEAN_COLOR_PLAIN] = "plain",
+ [CLEAN_COLOR_PROMPT] = "prompt",
+ [CLEAN_COLOR_RESET] = "reset",
+};
+
+static int clean_use_color = -1;
+static char clean_colors[][COLOR_MAXLEN] = {
+ [CLEAN_COLOR_ERROR] = GIT_COLOR_BOLD_RED,
+ [CLEAN_COLOR_HEADER] = GIT_COLOR_BOLD,
+ [CLEAN_COLOR_HELP] = GIT_COLOR_BOLD_RED,
+ [CLEAN_COLOR_PLAIN] = GIT_COLOR_NORMAL,
+ [CLEAN_COLOR_PROMPT] = GIT_COLOR_BOLD_BLUE,
+ [CLEAN_COLOR_RESET] = GIT_COLOR_RESET,
+};
+
+#define MENU_OPTS_SINGLETON 01
+#define MENU_OPTS_IMMEDIATE 02
+#define MENU_OPTS_LIST_ONLY 04
+
+struct menu_opts {
+ const char *header;
+ const char *prompt;
+ int flags;
+};
+
+#define MENU_RETURN_NO_LOOP 10
+
+struct menu_item {
+ char hotkey;
+ const char *title;
+ int selected;
+ int (*fn)(void);
+};
+
+enum menu_stuff_type {
+ MENU_STUFF_TYPE_STRING_LIST = 1,
+ MENU_STUFF_TYPE_MENU_ITEM
+};
+
+struct menu_stuff {
+ enum menu_stuff_type type;
+ int nr;
+ void *stuff;
+};
+
+define_list_config_array(color_interactive_slots);
+
+static int git_clean_config(const char *var, const char *value, void *cb)
+{
+ const char *slot_name;
+
+ if (starts_with(var, "column."))
+ return git_column_config(var, value, "clean", &colopts);
+
+ /* honors the color.interactive* config variables which also
+ applied in git-add--interactive and git-stash */
+ if (!strcmp(var, "color.interactive")) {
+ clean_use_color = git_config_colorbool(var, value);
+ return 0;
+ }
+ if (skip_prefix(var, "color.interactive.", &slot_name)) {
+ int slot = LOOKUP_CONFIG(color_interactive_slots, slot_name);
+ if (slot < 0)
+ return 0;
+ if (!value)
+ return config_error_nonbool(var);
+ return color_parse(value, clean_colors[slot]);
+ }
+
+ if (!strcmp(var, "clean.requireforce")) {
+ force = !git_config_bool(var, value);
+ return 0;
+ }
+
+ /* inspect the color.ui config variable and others */
+ return git_color_default_config(var, value, cb);
+}
+
+static const char *clean_get_color(enum color_clean ix)
+{
+ if (want_color(clean_use_color))
+ return clean_colors[ix];
+ return "";
+}
+
+static void clean_print_color(enum color_clean ix)
+{
+ printf("%s", clean_get_color(ix));
+}
+
+static int exclude_cb(const struct option *opt, const char *arg, int unset)
+{
+ struct string_list *exclude_list = opt->value;
+ BUG_ON_OPT_NEG(unset);
+ string_list_append(exclude_list, arg);
+ return 0;
+}
+
+static int remove_dirs(struct strbuf *path, const char *prefix, int force_flag,
+ int dry_run, int quiet, int *dir_gone)
+{
+ DIR *dir;
+ struct strbuf quoted = STRBUF_INIT;
+ struct strbuf realpath = STRBUF_INIT;
+ struct strbuf real_ocwd = STRBUF_INIT;
+ struct dirent *e;
+ int res = 0, ret = 0, gone = 1, original_len = path->len, len;
+ struct string_list dels = STRING_LIST_INIT_DUP;
+
+ *dir_gone = 1;
+
+ if ((force_flag & REMOVE_DIR_KEEP_NESTED_GIT) &&
+ is_nonbare_repository_dir(path)) {
+ if (!quiet) {
+ quote_path(path->buf, prefix, &quoted, 0);
+ printf(dry_run ? _(msg_would_skip_git_dir) : _(msg_skip_git_dir),
+ quoted.buf);
+ }
+
+ *dir_gone = 0;
+ goto out;
+ }
+
+ dir = opendir(path->buf);
+ if (!dir) {
+ /* an empty dir could be removed even if it is unreadble */
+ res = dry_run ? 0 : rmdir(path->buf);
+ if (res) {
+ int saved_errno = errno;
+ quote_path(path->buf, prefix, &quoted, 0);
+ errno = saved_errno;
+ warning_errno(_(msg_warn_remove_failed), quoted.buf);
+ *dir_gone = 0;
+ }
+ ret = res;
+ goto out;
+ }
+
+ strbuf_complete(path, '/');
+
+ len = path->len;
+ while ((e = readdir_skip_dot_and_dotdot(dir)) != NULL) {
+ struct stat st;
+
+ strbuf_setlen(path, len);
+ strbuf_addstr(path, e->d_name);
+ if (lstat(path->buf, &st))
+ warning_errno(_(msg_warn_lstat_failed), path->buf);
+ else if (S_ISDIR(st.st_mode)) {
+ if (remove_dirs(path, prefix, force_flag, dry_run, quiet, &gone))
+ ret = 1;
+ if (gone) {
+ quote_path(path->buf, prefix, &quoted, 0);
+ string_list_append(&dels, quoted.buf);
+ } else
+ *dir_gone = 0;
+ continue;
+ } else {
+ res = dry_run ? 0 : unlink(path->buf);
+ if (!res) {
+ quote_path(path->buf, prefix, &quoted, 0);
+ string_list_append(&dels, quoted.buf);
+ } else {
+ int saved_errno = errno;
+ quote_path(path->buf, prefix, &quoted, 0);
+ errno = saved_errno;
+ warning_errno(_(msg_warn_remove_failed), quoted.buf);
+ *dir_gone = 0;
+ ret = 1;
+ }
+ continue;
+ }
+
+ /* path too long, stat fails, or non-directory still exists */
+ *dir_gone = 0;
+ ret = 1;
+ break;
+ }
+ closedir(dir);
+
+ strbuf_setlen(path, original_len);
+
+ if (*dir_gone) {
+ /*
+ * Normalize path components in path->buf, e.g. change '\' to
+ * '/' on Windows.
+ */
+ strbuf_realpath(&realpath, path->buf, 1);
+
+ /*
+ * path and realpath are absolute; for comparison, we would
+ * like to transform startup_info->original_cwd to an absolute
+ * path too.
+ */
+ if (startup_info->original_cwd)
+ strbuf_realpath(&real_ocwd,
+ startup_info->original_cwd, 1);
+
+ if (!strbuf_cmp(&realpath, &real_ocwd)) {
+ printf("%s", dry_run ? _(msg_would_skip_cwd) : _(msg_skip_cwd));
+ *dir_gone = 0;
+ } else {
+ res = dry_run ? 0 : rmdir(path->buf);
+ if (!res)
+ *dir_gone = 1;
+ else {
+ int saved_errno = errno;
+ quote_path(path->buf, prefix, &quoted, 0);
+ errno = saved_errno;
+ warning_errno(_(msg_warn_remove_failed), quoted.buf);
+ *dir_gone = 0;
+ ret = 1;
+ }
+ }
+ }
+
+ if (!*dir_gone && !quiet) {
+ int i;
+ for (i = 0; i < dels.nr; i++)
+ printf(dry_run ? _(msg_would_remove) : _(msg_remove), dels.items[i].string);
+ }
+out:
+ strbuf_release(&realpath);
+ strbuf_release(&real_ocwd);
+ strbuf_release(&quoted);
+ string_list_clear(&dels, 0);
+ return ret;
+}
+
+static void pretty_print_dels(void)
+{
+ struct string_list list = STRING_LIST_INIT_DUP;
+ struct string_list_item *item;
+ struct strbuf buf = STRBUF_INIT;
+ const char *qname;
+ struct column_options copts;
+
+ for_each_string_list_item(item, &del_list) {
+ qname = quote_path(item->string, NULL, &buf, 0);
+ string_list_append(&list, qname);
+ }
+
+ /*
+ * always enable column display, we only consult column.*
+ * about layout strategy and stuff
+ */
+ colopts = (colopts & ~COL_ENABLE_MASK) | COL_ENABLED;
+ memset(&copts, 0, sizeof(copts));
+ copts.indent = " ";
+ copts.padding = 2;
+ print_columns(&list, colopts, &copts);
+ strbuf_release(&buf);
+ string_list_clear(&list, 0);
+}
+
+static void pretty_print_menus(struct string_list *menu_list)
+{
+ unsigned int local_colopts = 0;
+ struct column_options copts;
+
+ local_colopts = COL_ENABLED | COL_ROW;
+ memset(&copts, 0, sizeof(copts));
+ copts.indent = " ";
+ copts.padding = 2;
+ print_columns(menu_list, local_colopts, &copts);
+}
+
+static void prompt_help_cmd(int singleton)
+{
+ clean_print_color(CLEAN_COLOR_HELP);
+ printf(singleton ?
+ _("Prompt help:\n"
+ "1 - select a numbered item\n"
+ "foo - select item based on unique prefix\n"
+ " - (empty) select nothing\n") :
+ _("Prompt help:\n"
+ "1 - select a single item\n"
+ "3-5 - select a range of items\n"
+ "2-3,6-9 - select multiple ranges\n"
+ "foo - select item based on unique prefix\n"
+ "-... - unselect specified items\n"
+ "* - choose all items\n"
+ " - (empty) finish selecting\n"));
+ clean_print_color(CLEAN_COLOR_RESET);
+}
+
+/*
+ * display menu stuff with number prefix and hotkey highlight
+ */
+static void print_highlight_menu_stuff(struct menu_stuff *stuff, int **chosen)
+{
+ struct string_list menu_list = STRING_LIST_INIT_DUP;
+ struct strbuf menu = STRBUF_INIT;
+ struct menu_item *menu_item;
+ struct string_list_item *string_list_item;
+ int i;
+
+ switch (stuff->type) {
+ default:
+ die("Bad type of menu_stuff when print menu");
+ case MENU_STUFF_TYPE_MENU_ITEM:
+ menu_item = (struct menu_item *)stuff->stuff;
+ for (i = 0; i < stuff->nr; i++, menu_item++) {
+ const char *p;
+ int highlighted = 0;
+
+ p = menu_item->title;
+ if ((*chosen)[i] < 0)
+ (*chosen)[i] = menu_item->selected ? 1 : 0;
+ strbuf_addf(&menu, "%s%2d: ", (*chosen)[i] ? "*" : " ", i+1);
+ for (; *p; p++) {
+ if (!highlighted && *p == menu_item->hotkey) {
+ strbuf_addstr(&menu, clean_get_color(CLEAN_COLOR_PROMPT));
+ strbuf_addch(&menu, *p);
+ strbuf_addstr(&menu, clean_get_color(CLEAN_COLOR_RESET));
+ highlighted = 1;
+ } else {
+ strbuf_addch(&menu, *p);
+ }
+ }
+ string_list_append(&menu_list, menu.buf);
+ strbuf_reset(&menu);
+ }
+ break;
+ case MENU_STUFF_TYPE_STRING_LIST:
+ i = 0;
+ for_each_string_list_item(string_list_item, (struct string_list *)stuff->stuff) {
+ if ((*chosen)[i] < 0)
+ (*chosen)[i] = 0;
+ strbuf_addf(&menu, "%s%2d: %s",
+ (*chosen)[i] ? "*" : " ", i+1, string_list_item->string);
+ string_list_append(&menu_list, menu.buf);
+ strbuf_reset(&menu);
+ i++;
+ }
+ break;
+ }
+
+ pretty_print_menus(&menu_list);
+
+ strbuf_release(&menu);
+ string_list_clear(&menu_list, 0);
+}
+
+static int find_unique(const char *choice, struct menu_stuff *menu_stuff)
+{
+ struct menu_item *menu_item;
+ struct string_list_item *string_list_item;
+ int i, len, found = 0;
+
+ len = strlen(choice);
+ switch (menu_stuff->type) {
+ default:
+ die("Bad type of menu_stuff when parse choice");
+ case MENU_STUFF_TYPE_MENU_ITEM:
+
+ menu_item = (struct menu_item *)menu_stuff->stuff;
+ for (i = 0; i < menu_stuff->nr; i++, menu_item++) {
+ if (len == 1 && *choice == menu_item->hotkey) {
+ found = i + 1;
+ break;
+ }
+ if (!strncasecmp(choice, menu_item->title, len)) {
+ if (found) {
+ if (len == 1) {
+ /* continue for hotkey matching */
+ found = -1;
+ } else {
+ found = 0;
+ break;
+ }
+ } else {
+ found = i + 1;
+ }
+ }
+ }
+ break;
+ case MENU_STUFF_TYPE_STRING_LIST:
+ string_list_item = ((struct string_list *)menu_stuff->stuff)->items;
+ for (i = 0; i < menu_stuff->nr; i++, string_list_item++) {
+ if (!strncasecmp(choice, string_list_item->string, len)) {
+ if (found) {
+ found = 0;
+ break;
+ }
+ found = i + 1;
+ }
+ }
+ break;
+ }
+ return found;
+}
+
+/*
+ * Parse user input, and return choice(s) for menu (menu_stuff).
+ *
+ * Input
+ * (for single choice)
+ * 1 - select a numbered item
+ * foo - select item based on menu title
+ * - (empty) select nothing
+ *
+ * (for multiple choice)
+ * 1 - select a single item
+ * 3-5 - select a range of items
+ * 2-3,6-9 - select multiple ranges
+ * foo - select item based on menu title
+ * -... - unselect specified items
+ * * - choose all items
+ * - (empty) finish selecting
+ *
+ * The parse result will be saved in array **chosen, and
+ * return number of total selections.
+ */
+static int parse_choice(struct menu_stuff *menu_stuff,
+ int is_single,
+ struct strbuf input,
+ int **chosen)
+{
+ struct strbuf **choice_list, **ptr;
+ int nr = 0;
+ int i;
+
+ if (is_single) {
+ choice_list = strbuf_split_max(&input, '\n', 0);
+ } else {
+ char *p = input.buf;
+ do {
+ if (*p == ',')
+ *p = ' ';
+ } while (*p++);
+ choice_list = strbuf_split_max(&input, ' ', 0);
+ }
+
+ for (ptr = choice_list; *ptr; ptr++) {
+ char *p;
+ int choose = 1;
+ int bottom = 0, top = 0;
+ int is_range, is_number;
+
+ strbuf_trim(*ptr);
+ if (!(*ptr)->len)
+ continue;
+
+ /* Input that begins with '-'; unchoose */
+ if (*(*ptr)->buf == '-') {
+ choose = 0;
+ strbuf_remove((*ptr), 0, 1);
+ }
+
+ is_range = 0;
+ is_number = 1;
+ for (p = (*ptr)->buf; *p; p++) {
+ if ('-' == *p) {
+ if (!is_range) {
+ is_range = 1;
+ is_number = 0;
+ } else {
+ is_number = 0;
+ is_range = 0;
+ break;
+ }
+ } else if (!isdigit(*p)) {
+ is_number = 0;
+ is_range = 0;
+ break;
+ }
+ }
+
+ if (is_number) {
+ bottom = atoi((*ptr)->buf);
+ top = bottom;
+ } else if (is_range) {
+ bottom = atoi((*ptr)->buf);
+ /* a range can be specified like 5-7 or 5- */
+ if (!*(strchr((*ptr)->buf, '-') + 1))
+ top = menu_stuff->nr;
+ else
+ top = atoi(strchr((*ptr)->buf, '-') + 1);
+ } else if (!strcmp((*ptr)->buf, "*")) {
+ bottom = 1;
+ top = menu_stuff->nr;
+ } else {
+ bottom = find_unique((*ptr)->buf, menu_stuff);
+ top = bottom;
+ }
+
+ if (top <= 0 || bottom <= 0 || top > menu_stuff->nr || bottom > top ||
+ (is_single && bottom != top)) {
+ clean_print_color(CLEAN_COLOR_ERROR);
+ printf(_("Huh (%s)?\n"), (*ptr)->buf);
+ clean_print_color(CLEAN_COLOR_RESET);
+ continue;
+ }
+
+ for (i = bottom; i <= top; i++)
+ (*chosen)[i-1] = choose;
+ }
+
+ strbuf_list_free(choice_list);
+
+ for (i = 0; i < menu_stuff->nr; i++)
+ nr += (*chosen)[i];
+ return nr;
+}
+
+/*
+ * Implement a git-add-interactive compatible UI, which is borrowed
+ * from git-add--interactive.perl.
+ *
+ * Return value:
+ *
+ * - Return an array of integers
+ * - , and it is up to you to free the allocated memory.
+ * - The array ends with EOF.
+ * - If user pressed CTRL-D (i.e. EOF), no selection returned.
+ */
+static int *list_and_choose(struct menu_opts *opts, struct menu_stuff *stuff)
+{
+ struct strbuf choice = STRBUF_INIT;
+ int *chosen, *result;
+ int nr = 0;
+ int eof = 0;
+ int i;
+
+ ALLOC_ARRAY(chosen, stuff->nr);
+ /* set chosen as uninitialized */
+ for (i = 0; i < stuff->nr; i++)
+ chosen[i] = -1;
+
+ for (;;) {
+ if (opts->header) {
+ printf_ln("%s%s%s",
+ clean_get_color(CLEAN_COLOR_HEADER),
+ _(opts->header),
+ clean_get_color(CLEAN_COLOR_RESET));
+ }
+
+ /* chosen will be initialized by print_highlight_menu_stuff */
+ print_highlight_menu_stuff(stuff, &chosen);
+
+ if (opts->flags & MENU_OPTS_LIST_ONLY)
+ break;
+
+ if (opts->prompt) {
+ printf("%s%s%s%s",
+ clean_get_color(CLEAN_COLOR_PROMPT),
+ _(opts->prompt),
+ opts->flags & MENU_OPTS_SINGLETON ? "> " : ">> ",
+ clean_get_color(CLEAN_COLOR_RESET));
+ }
+
+ if (git_read_line_interactively(&choice) == EOF) {
+ eof = 1;
+ break;
+ }
+
+ /* help for prompt */
+ if (!strcmp(choice.buf, "?")) {
+ prompt_help_cmd(opts->flags & MENU_OPTS_SINGLETON);
+ continue;
+ }
+
+ /* for a multiple-choice menu, press ENTER (empty) will return back */
+ if (!(opts->flags & MENU_OPTS_SINGLETON) && !choice.len)
+ break;
+
+ nr = parse_choice(stuff,
+ opts->flags & MENU_OPTS_SINGLETON,
+ choice,
+ &chosen);
+
+ if (opts->flags & MENU_OPTS_SINGLETON) {
+ if (nr)
+ break;
+ } else if (opts->flags & MENU_OPTS_IMMEDIATE) {
+ break;
+ }
+ }
+
+ if (eof) {
+ result = xmalloc(sizeof(int));
+ *result = EOF;
+ } else {
+ int j = 0;
+
+ /*
+ * recalculate nr, if return back from menu directly with
+ * default selections.
+ */
+ if (!nr) {
+ for (i = 0; i < stuff->nr; i++)
+ nr += chosen[i];
+ }
+
+ CALLOC_ARRAY(result, st_add(nr, 1));
+ for (i = 0; i < stuff->nr && j < nr; i++) {
+ if (chosen[i])
+ result[j++] = i;
+ }
+ result[j] = EOF;
+ }
+
+ free(chosen);
+ strbuf_release(&choice);
+ return result;
+}
+
+static int clean_cmd(void)
+{
+ return MENU_RETURN_NO_LOOP;
+}
+
+static int filter_by_patterns_cmd(void)
+{
+ struct dir_struct dir = DIR_INIT;
+ struct strbuf confirm = STRBUF_INIT;
+ struct strbuf **ignore_list;
+ struct string_list_item *item;
+ struct pattern_list *pl;
+ int changed = -1, i;
+
+ for (;;) {
+ if (!del_list.nr)
+ break;
+
+ if (changed)
+ pretty_print_dels();
+
+ clean_print_color(CLEAN_COLOR_PROMPT);
+ printf(_("Input ignore patterns>> "));
+ clean_print_color(CLEAN_COLOR_RESET);
+ if (git_read_line_interactively(&confirm) == EOF)
+ putchar('\n');
+
+ /* quit filter_by_pattern mode if press ENTER or Ctrl-D */
+ if (!confirm.len)
+ break;
+
+ pl = add_pattern_list(&dir, EXC_CMDL, "manual exclude");
+ ignore_list = strbuf_split_max(&confirm, ' ', 0);
+
+ for (i = 0; ignore_list[i]; i++) {
+ strbuf_trim(ignore_list[i]);
+ if (!ignore_list[i]->len)
+ continue;
+
+ add_pattern(ignore_list[i]->buf, "", 0, pl, -(i+1));
+ }
+
+ changed = 0;
+ for_each_string_list_item(item, &del_list) {
+ int dtype = DT_UNKNOWN;
+
+ if (is_excluded(&dir, &the_index, item->string, &dtype)) {
+ *item->string = '\0';
+ changed++;
+ }
+ }
+
+ if (changed) {
+ string_list_remove_empty_items(&del_list, 0);
+ } else {
+ clean_print_color(CLEAN_COLOR_ERROR);
+ printf_ln(_("WARNING: Cannot find items matched by: %s"), confirm.buf);
+ clean_print_color(CLEAN_COLOR_RESET);
+ }
+
+ strbuf_list_free(ignore_list);
+ dir_clear(&dir);
+ }
+
+ strbuf_release(&confirm);
+ return 0;
+}
+
+static int select_by_numbers_cmd(void)
+{
+ struct menu_opts menu_opts;
+ struct menu_stuff menu_stuff;
+ struct string_list_item *items;
+ int *chosen;
+ int i, j;
+
+ menu_opts.header = NULL;
+ menu_opts.prompt = N_("Select items to delete");
+ menu_opts.flags = 0;
+
+ menu_stuff.type = MENU_STUFF_TYPE_STRING_LIST;
+ menu_stuff.stuff = &del_list;
+ menu_stuff.nr = del_list.nr;
+
+ chosen = list_and_choose(&menu_opts, &menu_stuff);
+ items = del_list.items;
+ for (i = 0, j = 0; i < del_list.nr; i++) {
+ if (i < chosen[j]) {
+ *(items[i].string) = '\0';
+ } else if (i == chosen[j]) {
+ /* delete selected item */
+ j++;
+ continue;
+ } else {
+ /* end of chosen (chosen[j] == EOF), won't delete */
+ *(items[i].string) = '\0';
+ }
+ }
+
+ string_list_remove_empty_items(&del_list, 0);
+
+ free(chosen);
+ return 0;
+}
+
+static int ask_each_cmd(void)
+{
+ struct strbuf confirm = STRBUF_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ struct string_list_item *item;
+ const char *qname;
+ int changed = 0, eof = 0;
+
+ for_each_string_list_item(item, &del_list) {
+ /* Ctrl-D should stop removing files */
+ if (!eof) {
+ qname = quote_path(item->string, NULL, &buf, 0);
+ /* TRANSLATORS: Make sure to keep [y/N] as is */
+ printf(_("Remove %s [y/N]? "), qname);
+ if (git_read_line_interactively(&confirm) == EOF) {
+ putchar('\n');
+ eof = 1;
+ }
+ }
+ if (!confirm.len || strncasecmp(confirm.buf, "yes", confirm.len)) {
+ *item->string = '\0';
+ changed++;
+ }
+ }
+
+ if (changed)
+ string_list_remove_empty_items(&del_list, 0);
+
+ strbuf_release(&buf);
+ strbuf_release(&confirm);
+ return MENU_RETURN_NO_LOOP;
+}
+
+static int quit_cmd(void)
+{
+ string_list_clear(&del_list, 0);
+ printf(_("Bye.\n"));
+ return MENU_RETURN_NO_LOOP;
+}
+
+static int help_cmd(void)
+{
+ clean_print_color(CLEAN_COLOR_HELP);
+ printf_ln(_(
+ "clean - start cleaning\n"
+ "filter by pattern - exclude items from deletion\n"
+ "select by numbers - select items to be deleted by numbers\n"
+ "ask each - confirm each deletion (like \"rm -i\")\n"
+ "quit - stop cleaning\n"
+ "help - this screen\n"
+ "? - help for prompt selection"
+ ));
+ clean_print_color(CLEAN_COLOR_RESET);
+ return 0;
+}
+
+static void interactive_main_loop(void)
+{
+ while (del_list.nr) {
+ struct menu_opts menu_opts;
+ struct menu_stuff menu_stuff;
+ struct menu_item menus[] = {
+ {'c', "clean", 0, clean_cmd},
+ {'f', "filter by pattern", 0, filter_by_patterns_cmd},
+ {'s', "select by numbers", 0, select_by_numbers_cmd},
+ {'a', "ask each", 0, ask_each_cmd},
+ {'q', "quit", 0, quit_cmd},
+ {'h', "help", 0, help_cmd},
+ };
+ int *chosen;
+
+ menu_opts.header = N_("*** Commands ***");
+ menu_opts.prompt = N_("What now");
+ menu_opts.flags = MENU_OPTS_SINGLETON;
+
+ menu_stuff.type = MENU_STUFF_TYPE_MENU_ITEM;
+ menu_stuff.stuff = menus;
+ menu_stuff.nr = sizeof(menus) / sizeof(struct menu_item);
+
+ clean_print_color(CLEAN_COLOR_HEADER);
+ printf_ln(Q_("Would remove the following item:",
+ "Would remove the following items:",
+ del_list.nr));
+ clean_print_color(CLEAN_COLOR_RESET);
+
+ pretty_print_dels();
+
+ chosen = list_and_choose(&menu_opts, &menu_stuff);
+
+ if (*chosen != EOF) {
+ int ret;
+ ret = menus[*chosen].fn();
+ if (ret != MENU_RETURN_NO_LOOP) {
+ FREE_AND_NULL(chosen);
+ if (!del_list.nr) {
+ clean_print_color(CLEAN_COLOR_ERROR);
+ printf_ln(_("No more files to clean, exiting."));
+ clean_print_color(CLEAN_COLOR_RESET);
+ break;
+ }
+ continue;
+ }
+ } else {
+ quit_cmd();
+ }
+
+ FREE_AND_NULL(chosen);
+ break;
+ }
+}
+
+static void correct_untracked_entries(struct dir_struct *dir)
+{
+ int src, dst, ign;
+
+ for (src = dst = ign = 0; src < dir->nr; src++) {
+ /* skip paths in ignored[] that cannot be inside entries[src] */
+ while (ign < dir->ignored_nr &&
+ 0 <= cmp_dir_entry(&dir->entries[src], &dir->ignored[ign]))
+ ign++;
+
+ if (ign < dir->ignored_nr &&
+ check_dir_entry_contains(dir->entries[src], dir->ignored[ign])) {
+ /* entries[src] contains an ignored path, so we drop it */
+ free(dir->entries[src]);
+ } else {
+ struct dir_entry *ent = dir->entries[src++];
+
+ /* entries[src] does not contain an ignored path, so we keep it */
+ dir->entries[dst++] = ent;
+
+ /* then discard paths in entries[] contained inside entries[src] */
+ while (src < dir->nr &&
+ check_dir_entry_contains(ent, dir->entries[src]))
+ free(dir->entries[src++]);
+
+ /* compensate for the outer loop's loop control */
+ src--;
+ }
+ }
+ dir->nr = dst;
+}
+
+int cmd_clean(int argc, const char **argv, const char *prefix)
+{
+ int i, res;
+ int dry_run = 0, remove_directories = 0, quiet = 0, ignored = 0;
+ int ignored_only = 0, config_set = 0, errors = 0, gone = 1;
+ int rm_flags = REMOVE_DIR_KEEP_NESTED_GIT;
+ struct strbuf abs_path = STRBUF_INIT;
+ struct dir_struct dir = DIR_INIT;
+ struct pathspec pathspec;
+ struct strbuf buf = STRBUF_INIT;
+ struct string_list exclude_list = STRING_LIST_INIT_NODUP;
+ struct pattern_list *pl;
+ struct string_list_item *item;
+ const char *qname;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("do not print names of files removed")),
+ OPT__DRY_RUN(&dry_run, N_("dry run")),
+ OPT__FORCE(&force, N_("force"), PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL('i', "interactive", &interactive, N_("interactive cleaning")),
+ OPT_BOOL('d', NULL, &remove_directories,
+ N_("remove whole directories")),
+ OPT_CALLBACK_F('e', "exclude", &exclude_list, N_("pattern"),
+ N_("add <pattern> to ignore rules"), PARSE_OPT_NONEG, exclude_cb),
+ OPT_BOOL('x', NULL, &ignored, N_("remove ignored files, too")),
+ OPT_BOOL('X', NULL, &ignored_only,
+ N_("remove only ignored files")),
+ OPT_END()
+ };
+
+ git_config(git_clean_config, NULL);
+ if (force < 0)
+ force = 0;
+ else
+ config_set = 1;
+
+ argc = parse_options(argc, argv, prefix, options, builtin_clean_usage,
+ 0);
+
+ if (!interactive && !dry_run && !force) {
+ if (config_set)
+ die(_("clean.requireForce set to true and neither -i, -n, nor -f given; "
+ "refusing to clean"));
+ else
+ die(_("clean.requireForce defaults to true and neither -i, -n, nor -f given;"
+ " refusing to clean"));
+ }
+
+ if (force > 1)
+ rm_flags = 0;
+ else
+ dir.flags |= DIR_SKIP_NESTED_GIT;
+
+ dir.flags |= DIR_SHOW_OTHER_DIRECTORIES;
+
+ if (ignored && ignored_only)
+ die(_("-x and -X cannot be used together"));
+ if (!ignored)
+ setup_standard_excludes(&dir);
+ if (ignored_only)
+ dir.flags |= DIR_SHOW_IGNORED;
+
+ if (argc) {
+ /*
+ * Remaining args implies pathspecs specified, and we should
+ * recurse within those.
+ */
+ remove_directories = 1;
+ }
+
+ if (remove_directories && !ignored_only) {
+ /*
+ * We need to know about ignored files too:
+ *
+ * If (ignored), then we will delete ignored files as well.
+ *
+ * If (!ignored), then even though we not are doing
+ * anything with ignored files, we need to know about them
+ * so that we can avoid deleting a directory of untracked
+ * files that also contains an ignored file within it.
+ *
+ * For the (!ignored) case, since we only need to avoid
+ * deleting ignored files, we can set
+ * DIR_SHOW_IGNORED_TOO_MODE_MATCHING in order to avoid
+ * recursing into a directory which is itself ignored.
+ */
+ dir.flags |= DIR_SHOW_IGNORED_TOO;
+ if (!ignored)
+ dir.flags |= DIR_SHOW_IGNORED_TOO_MODE_MATCHING;
+
+ /*
+ * Let the fill_directory() machinery know that we aren't
+ * just recursing to collect the ignored files; we want all
+ * the untracked ones so that we can delete them. (Note:
+ * we could also set DIR_KEEP_UNTRACKED_CONTENTS when
+ * ignored_only is true, since DIR_KEEP_UNTRACKED_CONTENTS
+ * only has effect in combination with DIR_SHOW_IGNORED_TOO. It makes
+ * the code clearer to exclude it, though.
+ */
+ dir.flags |= DIR_KEEP_UNTRACKED_CONTENTS;
+ }
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ if (repo_read_index(the_repository) < 0)
+ die(_("index file corrupt"));
+
+ pl = add_pattern_list(&dir, EXC_CMDL, "--exclude option");
+ for (i = 0; i < exclude_list.nr; i++)
+ add_pattern(exclude_list.items[i].string, "", 0, pl, -(i+1));
+
+ parse_pathspec(&pathspec, 0,
+ PATHSPEC_PREFER_CWD,
+ prefix, argv);
+
+ fill_directory(&dir, &the_index, &pathspec);
+ correct_untracked_entries(&dir);
+
+ for (i = 0; i < dir.nr; i++) {
+ struct dir_entry *ent = dir.entries[i];
+ struct stat st;
+ const char *rel;
+
+ if (!index_name_is_other(&the_index, ent->name, ent->len))
+ continue;
+
+ if (lstat(ent->name, &st))
+ die_errno("Cannot lstat '%s'", ent->name);
+
+ if (S_ISDIR(st.st_mode) && !remove_directories)
+ continue;
+
+ rel = relative_path(ent->name, prefix, &buf);
+ string_list_append(&del_list, rel);
+ }
+
+ dir_clear(&dir);
+
+ if (interactive && del_list.nr > 0)
+ interactive_main_loop();
+
+ for_each_string_list_item(item, &del_list) {
+ struct stat st;
+
+ strbuf_reset(&abs_path);
+ if (prefix)
+ strbuf_addstr(&abs_path, prefix);
+
+ strbuf_addstr(&abs_path, item->string);
+
+ /*
+ * we might have removed this as part of earlier
+ * recursive directory removal, so lstat() here could
+ * fail with ENOENT.
+ */
+ if (lstat(abs_path.buf, &st))
+ continue;
+
+ if (S_ISDIR(st.st_mode)) {
+ if (remove_dirs(&abs_path, prefix, rm_flags, dry_run, quiet, &gone))
+ errors++;
+ if (gone && !quiet) {
+ qname = quote_path(item->string, NULL, &buf, 0);
+ printf(dry_run ? _(msg_would_remove) : _(msg_remove), qname);
+ }
+ } else {
+ res = dry_run ? 0 : unlink(abs_path.buf);
+ if (res) {
+ int saved_errno = errno;
+ qname = quote_path(item->string, NULL, &buf, 0);
+ errno = saved_errno;
+ warning_errno(_(msg_warn_remove_failed), qname);
+ errors++;
+ } else if (!quiet) {
+ qname = quote_path(item->string, NULL, &buf, 0);
+ printf(dry_run ? _(msg_would_remove) : _(msg_remove), qname);
+ }
+ }
+ }
+
+ strbuf_release(&abs_path);
+ strbuf_release(&buf);
+ string_list_clear(&del_list, 0);
+ string_list_clear(&exclude_list, 0);
+ return (errors != 0);
+}
diff --git a/builtin/clone.c b/builtin/clone.c
new file mode 100644
index 0000000..3c2ae31
--- /dev/null
+++ b/builtin/clone.c
@@ -0,0 +1,1400 @@
+/*
+ * Builtin "git clone"
+ *
+ * Copyright (c) 2007 Kristian Høgsberg <krh@redhat.com>,
+ * 2008 Daniel Barkalow <barkalow@iabervon.org>
+ * Based on git-commit.sh by Junio C Hamano and Linus Torvalds
+ *
+ * Clone a repository into a different directory that does not yet exist.
+ */
+
+#define USE_THE_INDEX_VARIABLE
+#include "builtin.h"
+#include "config.h"
+#include "lockfile.h"
+#include "parse-options.h"
+#include "fetch-pack.h"
+#include "refs.h"
+#include "refspec.h"
+#include "object-store.h"
+#include "tree.h"
+#include "tree-walk.h"
+#include "unpack-trees.h"
+#include "transport.h"
+#include "strbuf.h"
+#include "dir.h"
+#include "dir-iterator.h"
+#include "iterator.h"
+#include "sigchain.h"
+#include "branch.h"
+#include "remote.h"
+#include "run-command.h"
+#include "connected.h"
+#include "packfile.h"
+#include "list-objects-filter-options.h"
+#include "hook.h"
+#include "bundle.h"
+#include "bundle-uri.h"
+
+/*
+ * Overall FIXMEs:
+ * - respect DB_ENVIRONMENT for .git/objects.
+ *
+ * Implementation notes:
+ * - dropping use-separate-remote and no-separate-remote compatibility
+ *
+ */
+static const char * const builtin_clone_usage[] = {
+ N_("git clone [<options>] [--] <repo> [<dir>]"),
+ NULL
+};
+
+static int option_no_checkout, option_bare, option_mirror, option_single_branch = -1;
+static int option_local = -1, option_no_hardlinks, option_shared;
+static int option_no_tags;
+static int option_shallow_submodules;
+static int option_reject_shallow = -1; /* unspecified */
+static int config_reject_shallow = -1; /* unspecified */
+static int deepen;
+static char *option_template, *option_depth, *option_since;
+static char *option_origin = NULL;
+static char *remote_name = NULL;
+static char *option_branch = NULL;
+static struct string_list option_not = STRING_LIST_INIT_NODUP;
+static const char *real_git_dir;
+static char *option_upload_pack = "git-upload-pack";
+static int option_verbosity;
+static int option_progress = -1;
+static int option_sparse_checkout;
+static enum transport_family family;
+static struct string_list option_config = STRING_LIST_INIT_NODUP;
+static struct string_list option_required_reference = STRING_LIST_INIT_NODUP;
+static struct string_list option_optional_reference = STRING_LIST_INIT_NODUP;
+static int option_dissociate;
+static int max_jobs = -1;
+static struct string_list option_recurse_submodules = STRING_LIST_INIT_NODUP;
+static struct list_objects_filter_options filter_options = LIST_OBJECTS_FILTER_INIT;
+static int option_filter_submodules = -1; /* unspecified */
+static int config_filter_submodules = -1; /* unspecified */
+static struct string_list server_options = STRING_LIST_INIT_NODUP;
+static int option_remote_submodules;
+static const char *bundle_uri;
+
+static int recurse_submodules_cb(const struct option *opt,
+ const char *arg, int unset)
+{
+ if (unset)
+ string_list_clear((struct string_list *)opt->value, 0);
+ else if (arg)
+ string_list_append((struct string_list *)opt->value, arg);
+ else
+ string_list_append((struct string_list *)opt->value,
+ (const char *)opt->defval);
+
+ return 0;
+}
+
+static struct option builtin_clone_options[] = {
+ OPT__VERBOSITY(&option_verbosity),
+ OPT_BOOL(0, "progress", &option_progress,
+ N_("force progress reporting")),
+ OPT_BOOL(0, "reject-shallow", &option_reject_shallow,
+ N_("don't clone shallow repository")),
+ OPT_BOOL('n', "no-checkout", &option_no_checkout,
+ N_("don't create a checkout")),
+ OPT_BOOL(0, "bare", &option_bare, N_("create a bare repository")),
+ OPT_HIDDEN_BOOL(0, "naked", &option_bare,
+ N_("create a bare repository")),
+ OPT_BOOL(0, "mirror", &option_mirror,
+ N_("create a mirror repository (implies bare)")),
+ OPT_BOOL('l', "local", &option_local,
+ N_("to clone from a local repository")),
+ OPT_BOOL(0, "no-hardlinks", &option_no_hardlinks,
+ N_("don't use local hardlinks, always copy")),
+ OPT_BOOL('s', "shared", &option_shared,
+ N_("setup as shared repository")),
+ { OPTION_CALLBACK, 0, "recurse-submodules", &option_recurse_submodules,
+ N_("pathspec"), N_("initialize submodules in the clone"),
+ PARSE_OPT_OPTARG, recurse_submodules_cb, (intptr_t)"." },
+ OPT_ALIAS(0, "recursive", "recurse-submodules"),
+ OPT_INTEGER('j', "jobs", &max_jobs,
+ N_("number of submodules cloned in parallel")),
+ OPT_STRING(0, "template", &option_template, N_("template-directory"),
+ N_("directory from which templates will be used")),
+ OPT_STRING_LIST(0, "reference", &option_required_reference, N_("repo"),
+ N_("reference repository")),
+ OPT_STRING_LIST(0, "reference-if-able", &option_optional_reference,
+ N_("repo"), N_("reference repository")),
+ OPT_BOOL(0, "dissociate", &option_dissociate,
+ N_("use --reference only while cloning")),
+ OPT_STRING('o', "origin", &option_origin, N_("name"),
+ N_("use <name> instead of 'origin' to track upstream")),
+ OPT_STRING('b', "branch", &option_branch, N_("branch"),
+ N_("checkout <branch> instead of the remote's HEAD")),
+ OPT_STRING('u', "upload-pack", &option_upload_pack, N_("path"),
+ N_("path to git-upload-pack on the remote")),
+ OPT_STRING(0, "depth", &option_depth, N_("depth"),
+ N_("create a shallow clone of that depth")),
+ OPT_STRING(0, "shallow-since", &option_since, N_("time"),
+ N_("create a shallow clone since a specific time")),
+ OPT_STRING_LIST(0, "shallow-exclude", &option_not, N_("revision"),
+ N_("deepen history of shallow clone, excluding rev")),
+ OPT_BOOL(0, "single-branch", &option_single_branch,
+ N_("clone only one branch, HEAD or --branch")),
+ OPT_BOOL(0, "no-tags", &option_no_tags,
+ N_("don't clone any tags, and make later fetches not to follow them")),
+ OPT_BOOL(0, "shallow-submodules", &option_shallow_submodules,
+ N_("any cloned submodules will be shallow")),
+ OPT_STRING(0, "separate-git-dir", &real_git_dir, N_("gitdir"),
+ N_("separate git dir from working tree")),
+ OPT_STRING_LIST('c', "config", &option_config, N_("key=value"),
+ N_("set config inside the new repository")),
+ OPT_STRING_LIST(0, "server-option", &server_options,
+ N_("server-specific"), N_("option to transmit")),
+ OPT_SET_INT('4', "ipv4", &family, N_("use IPv4 addresses only"),
+ TRANSPORT_FAMILY_IPV4),
+ OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
+ TRANSPORT_FAMILY_IPV6),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
+ OPT_BOOL(0, "also-filter-submodules", &option_filter_submodules,
+ N_("apply partial clone filters to submodules")),
+ OPT_BOOL(0, "remote-submodules", &option_remote_submodules,
+ N_("any cloned submodules will use their remote-tracking branch")),
+ OPT_BOOL(0, "sparse", &option_sparse_checkout,
+ N_("initialize sparse-checkout file to include only files at root")),
+ OPT_STRING(0, "bundle-uri", &bundle_uri,
+ N_("uri"), N_("a URI for downloading bundles before fetching from origin remote")),
+ OPT_END()
+};
+
+static const char *get_repo_path_1(struct strbuf *path, int *is_bundle)
+{
+ static char *suffix[] = { "/.git", "", ".git/.git", ".git" };
+ static char *bundle_suffix[] = { ".bundle", "" };
+ size_t baselen = path->len;
+ struct stat st;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(suffix); i++) {
+ strbuf_setlen(path, baselen);
+ strbuf_addstr(path, suffix[i]);
+ if (stat(path->buf, &st))
+ continue;
+ if (S_ISDIR(st.st_mode) && is_git_directory(path->buf)) {
+ *is_bundle = 0;
+ return path->buf;
+ } else if (S_ISREG(st.st_mode) && st.st_size > 8) {
+ /* Is it a "gitfile"? */
+ char signature[8];
+ const char *dst;
+ int len, fd = open(path->buf, O_RDONLY);
+ if (fd < 0)
+ continue;
+ len = read_in_full(fd, signature, 8);
+ close(fd);
+ if (len != 8 || strncmp(signature, "gitdir: ", 8))
+ continue;
+ dst = read_gitfile(path->buf);
+ if (dst) {
+ *is_bundle = 0;
+ return dst;
+ }
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(bundle_suffix); i++) {
+ strbuf_setlen(path, baselen);
+ strbuf_addstr(path, bundle_suffix[i]);
+ if (!stat(path->buf, &st) && S_ISREG(st.st_mode)) {
+ *is_bundle = 1;
+ return path->buf;
+ }
+ }
+
+ return NULL;
+}
+
+static char *get_repo_path(const char *repo, int *is_bundle)
+{
+ struct strbuf path = STRBUF_INIT;
+ const char *raw;
+ char *canon;
+
+ strbuf_addstr(&path, repo);
+ raw = get_repo_path_1(&path, is_bundle);
+ canon = raw ? absolute_pathdup(raw) : NULL;
+ strbuf_release(&path);
+ return canon;
+}
+
+static int add_one_reference(struct string_list_item *item, void *cb_data)
+{
+ struct strbuf err = STRBUF_INIT;
+ int *required = cb_data;
+ char *ref_git = compute_alternate_path(item->string, &err);
+
+ if (!ref_git) {
+ if (*required)
+ die("%s", err.buf);
+ else
+ fprintf(stderr,
+ _("info: Could not add alternate for '%s': %s\n"),
+ item->string, err.buf);
+ } else {
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addf(&sb, "%s/objects", ref_git);
+ add_to_alternates_file(sb.buf);
+ strbuf_release(&sb);
+ }
+
+ strbuf_release(&err);
+ free(ref_git);
+ return 0;
+}
+
+static void setup_reference(void)
+{
+ int required = 1;
+ for_each_string_list(&option_required_reference,
+ add_one_reference, &required);
+ required = 0;
+ for_each_string_list(&option_optional_reference,
+ add_one_reference, &required);
+}
+
+static void copy_alternates(struct strbuf *src, const char *src_repo)
+{
+ /*
+ * Read from the source objects/info/alternates file
+ * and copy the entries to corresponding file in the
+ * destination repository with add_to_alternates_file().
+ * Both src and dst have "$path/objects/info/alternates".
+ *
+ * Instead of copying bit-for-bit from the original,
+ * we need to append to existing one so that the already
+ * created entry via "clone -s" is not lost, and also
+ * to turn entries with paths relative to the original
+ * absolute, so that they can be used in the new repository.
+ */
+ FILE *in = xfopen(src->buf, "r");
+ struct strbuf line = STRBUF_INIT;
+
+ while (strbuf_getline(&line, in) != EOF) {
+ char *abs_path;
+ if (!line.len || line.buf[0] == '#')
+ continue;
+ if (is_absolute_path(line.buf)) {
+ add_to_alternates_file(line.buf);
+ continue;
+ }
+ abs_path = mkpathdup("%s/objects/%s", src_repo, line.buf);
+ if (!normalize_path_copy(abs_path, abs_path))
+ add_to_alternates_file(abs_path);
+ else
+ warning("skipping invalid relative alternate: %s/%s",
+ src_repo, line.buf);
+ free(abs_path);
+ }
+ strbuf_release(&line);
+ fclose(in);
+}
+
+static void mkdir_if_missing(const char *pathname, mode_t mode)
+{
+ struct stat st;
+
+ if (!mkdir(pathname, mode))
+ return;
+
+ if (errno != EEXIST)
+ die_errno(_("failed to create directory '%s'"), pathname);
+ else if (stat(pathname, &st))
+ die_errno(_("failed to stat '%s'"), pathname);
+ else if (!S_ISDIR(st.st_mode))
+ die(_("%s exists and is not a directory"), pathname);
+}
+
+static void copy_or_link_directory(struct strbuf *src, struct strbuf *dest,
+ const char *src_repo)
+{
+ int src_len, dest_len;
+ struct dir_iterator *iter;
+ int iter_status;
+ struct strbuf realpath = STRBUF_INIT;
+
+ mkdir_if_missing(dest->buf, 0777);
+
+ iter = dir_iterator_begin(src->buf, DIR_ITERATOR_PEDANTIC);
+
+ if (!iter)
+ die_errno(_("failed to start iterator over '%s'"), src->buf);
+
+ strbuf_addch(src, '/');
+ src_len = src->len;
+ strbuf_addch(dest, '/');
+ dest_len = dest->len;
+
+ while ((iter_status = dir_iterator_advance(iter)) == ITER_OK) {
+ strbuf_setlen(src, src_len);
+ strbuf_addstr(src, iter->relative_path);
+ strbuf_setlen(dest, dest_len);
+ strbuf_addstr(dest, iter->relative_path);
+
+ if (S_ISLNK(iter->st.st_mode))
+ die(_("symlink '%s' exists, refusing to clone with --local"),
+ iter->relative_path);
+
+ if (S_ISDIR(iter->st.st_mode)) {
+ mkdir_if_missing(dest->buf, 0777);
+ continue;
+ }
+
+ /* Files that cannot be copied bit-for-bit... */
+ if (!fspathcmp(iter->relative_path, "info/alternates")) {
+ copy_alternates(src, src_repo);
+ continue;
+ }
+
+ if (unlink(dest->buf) && errno != ENOENT)
+ die_errno(_("failed to unlink '%s'"), dest->buf);
+ if (!option_no_hardlinks) {
+ strbuf_realpath(&realpath, src->buf, 1);
+ if (!link(realpath.buf, dest->buf))
+ continue;
+ if (option_local > 0)
+ die_errno(_("failed to create link '%s'"), dest->buf);
+ option_no_hardlinks = 1;
+ }
+ if (copy_file_with_time(dest->buf, src->buf, 0666))
+ die_errno(_("failed to copy file to '%s'"), dest->buf);
+ }
+
+ if (iter_status != ITER_DONE) {
+ strbuf_setlen(src, src_len);
+ die(_("failed to iterate over '%s'"), src->buf);
+ }
+
+ strbuf_release(&realpath);
+}
+
+static void clone_local(const char *src_repo, const char *dest_repo)
+{
+ if (option_shared) {
+ struct strbuf alt = STRBUF_INIT;
+ get_common_dir(&alt, src_repo);
+ strbuf_addstr(&alt, "/objects");
+ add_to_alternates_file(alt.buf);
+ strbuf_release(&alt);
+ } else {
+ struct strbuf src = STRBUF_INIT;
+ struct strbuf dest = STRBUF_INIT;
+ get_common_dir(&src, src_repo);
+ get_common_dir(&dest, dest_repo);
+ strbuf_addstr(&src, "/objects");
+ strbuf_addstr(&dest, "/objects");
+ copy_or_link_directory(&src, &dest, src_repo);
+ strbuf_release(&src);
+ strbuf_release(&dest);
+ }
+
+ if (0 <= option_verbosity)
+ fprintf(stderr, _("done.\n"));
+}
+
+static const char *junk_work_tree;
+static int junk_work_tree_flags;
+static const char *junk_git_dir;
+static int junk_git_dir_flags;
+static enum {
+ JUNK_LEAVE_NONE,
+ JUNK_LEAVE_REPO,
+ JUNK_LEAVE_ALL
+} junk_mode = JUNK_LEAVE_NONE;
+
+static const char junk_leave_repo_msg[] =
+N_("Clone succeeded, but checkout failed.\n"
+ "You can inspect what was checked out with 'git status'\n"
+ "and retry with 'git restore --source=HEAD :/'\n");
+
+static void remove_junk(void)
+{
+ struct strbuf sb = STRBUF_INIT;
+
+ switch (junk_mode) {
+ case JUNK_LEAVE_REPO:
+ warning("%s", _(junk_leave_repo_msg));
+ /* fall-through */
+ case JUNK_LEAVE_ALL:
+ return;
+ default:
+ /* proceed to removal */
+ break;
+ }
+
+ if (junk_git_dir) {
+ strbuf_addstr(&sb, junk_git_dir);
+ remove_dir_recursively(&sb, junk_git_dir_flags);
+ strbuf_reset(&sb);
+ }
+ if (junk_work_tree) {
+ strbuf_addstr(&sb, junk_work_tree);
+ remove_dir_recursively(&sb, junk_work_tree_flags);
+ }
+ strbuf_release(&sb);
+}
+
+static void remove_junk_on_signal(int signo)
+{
+ remove_junk();
+ sigchain_pop(signo);
+ raise(signo);
+}
+
+static struct ref *find_remote_branch(const struct ref *refs, const char *branch)
+{
+ struct ref *ref;
+ struct strbuf head = STRBUF_INIT;
+ strbuf_addstr(&head, "refs/heads/");
+ strbuf_addstr(&head, branch);
+ ref = find_ref_by_name(refs, head.buf);
+ strbuf_release(&head);
+
+ if (ref)
+ return ref;
+
+ strbuf_addstr(&head, "refs/tags/");
+ strbuf_addstr(&head, branch);
+ ref = find_ref_by_name(refs, head.buf);
+ strbuf_release(&head);
+
+ return ref;
+}
+
+static struct ref *wanted_peer_refs(const struct ref *refs,
+ struct refspec *refspec)
+{
+ struct ref *head = copy_ref(find_ref_by_name(refs, "HEAD"));
+ struct ref *local_refs = head;
+ struct ref **tail = head ? &head->next : &local_refs;
+
+ if (option_single_branch) {
+ struct ref *remote_head = NULL;
+
+ if (!option_branch)
+ remote_head = guess_remote_head(head, refs, 0);
+ else {
+ local_refs = NULL;
+ tail = &local_refs;
+ remote_head = copy_ref(find_remote_branch(refs, option_branch));
+ }
+
+ if (!remote_head && option_branch)
+ warning(_("Could not find remote branch %s to clone."),
+ option_branch);
+ else {
+ int i;
+ for (i = 0; i < refspec->nr; i++)
+ get_fetch_map(remote_head, &refspec->items[i],
+ &tail, 0);
+
+ /* if --branch=tag, pull the requested tag explicitly */
+ get_fetch_map(remote_head, tag_refspec, &tail, 0);
+ }
+ free_refs(remote_head);
+ } else {
+ int i;
+ for (i = 0; i < refspec->nr; i++)
+ get_fetch_map(refs, &refspec->items[i], &tail, 0);
+ }
+
+ if (!option_mirror && !option_single_branch && !option_no_tags)
+ get_fetch_map(refs, tag_refspec, &tail, 0);
+
+ return local_refs;
+}
+
+static void write_remote_refs(const struct ref *local_refs)
+{
+ const struct ref *r;
+
+ struct ref_transaction *t;
+ struct strbuf err = STRBUF_INIT;
+
+ t = ref_transaction_begin(&err);
+ if (!t)
+ die("%s", err.buf);
+
+ for (r = local_refs; r; r = r->next) {
+ if (!r->peer_ref)
+ continue;
+ if (ref_transaction_create(t, r->peer_ref->name, &r->old_oid,
+ 0, NULL, &err))
+ die("%s", err.buf);
+ }
+
+ if (initial_ref_transaction_commit(t, &err))
+ die("%s", err.buf);
+
+ strbuf_release(&err);
+ ref_transaction_free(t);
+}
+
+static void write_followtags(const struct ref *refs, const char *msg)
+{
+ const struct ref *ref;
+ for (ref = refs; ref; ref = ref->next) {
+ if (!starts_with(ref->name, "refs/tags/"))
+ continue;
+ if (ends_with(ref->name, "^{}"))
+ continue;
+ if (!has_object_file_with_flags(&ref->old_oid,
+ OBJECT_INFO_QUICK |
+ OBJECT_INFO_SKIP_FETCH_OBJECT))
+ continue;
+ update_ref(msg, ref->name, &ref->old_oid, NULL, 0,
+ UPDATE_REFS_DIE_ON_ERR);
+ }
+}
+
+static const struct object_id *iterate_ref_map(void *cb_data)
+{
+ struct ref **rm = cb_data;
+ struct ref *ref = *rm;
+
+ /*
+ * Skip anything missing a peer_ref, which we are not
+ * actually going to write a ref for.
+ */
+ while (ref && !ref->peer_ref)
+ ref = ref->next;
+ if (!ref)
+ return NULL;
+
+ *rm = ref->next;
+ return &ref->old_oid;
+}
+
+static void update_remote_refs(const struct ref *refs,
+ const struct ref *mapped_refs,
+ const struct ref *remote_head_points_at,
+ const char *branch_top,
+ const char *msg,
+ struct transport *transport,
+ int check_connectivity)
+{
+ const struct ref *rm = mapped_refs;
+
+ if (check_connectivity) {
+ struct check_connected_options opt = CHECK_CONNECTED_INIT;
+
+ opt.transport = transport;
+ opt.progress = transport->progress;
+
+ if (check_connected(iterate_ref_map, &rm, &opt))
+ die(_("remote did not send all necessary objects"));
+ }
+
+ if (refs) {
+ write_remote_refs(mapped_refs);
+ if (option_single_branch && !option_no_tags)
+ write_followtags(refs, msg);
+ }
+
+ if (remote_head_points_at && !option_bare) {
+ struct strbuf head_ref = STRBUF_INIT;
+ strbuf_addstr(&head_ref, branch_top);
+ strbuf_addstr(&head_ref, "HEAD");
+ if (create_symref(head_ref.buf,
+ remote_head_points_at->peer_ref->name,
+ msg) < 0)
+ die(_("unable to update %s"), head_ref.buf);
+ strbuf_release(&head_ref);
+ }
+}
+
+static void update_head(const struct ref *our, const struct ref *remote,
+ const char *unborn, const char *msg)
+{
+ const char *head;
+ if (our && skip_prefix(our->name, "refs/heads/", &head)) {
+ /* Local default branch link */
+ if (create_symref("HEAD", our->name, NULL) < 0)
+ die(_("unable to update HEAD"));
+ if (!option_bare) {
+ update_ref(msg, "HEAD", &our->old_oid, NULL, 0,
+ UPDATE_REFS_DIE_ON_ERR);
+ install_branch_config(0, head, remote_name, our->name);
+ }
+ } else if (our) {
+ struct commit *c = lookup_commit_reference(the_repository,
+ &our->old_oid);
+ /* --branch specifies a non-branch (i.e. tags), detach HEAD */
+ update_ref(msg, "HEAD", &c->object.oid, NULL, REF_NO_DEREF,
+ UPDATE_REFS_DIE_ON_ERR);
+ } else if (remote) {
+ /*
+ * We know remote HEAD points to a non-branch, or
+ * HEAD points to a branch but we don't know which one.
+ * Detach HEAD in all these cases.
+ */
+ update_ref(msg, "HEAD", &remote->old_oid, NULL, REF_NO_DEREF,
+ UPDATE_REFS_DIE_ON_ERR);
+ } else if (unborn && skip_prefix(unborn, "refs/heads/", &head)) {
+ /*
+ * Unborn head from remote; same as "our" case above except
+ * that we have no ref to update.
+ */
+ if (create_symref("HEAD", unborn, NULL) < 0)
+ die(_("unable to update HEAD"));
+ if (!option_bare)
+ install_branch_config(0, head, remote_name, unborn);
+ }
+}
+
+static int git_sparse_checkout_init(const char *repo)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ int result = 0;
+ strvec_pushl(&cmd.args, "-C", repo, "sparse-checkout", "set", NULL);
+
+ /*
+ * We must apply the setting in the current process
+ * for the later checkout to use the sparse-checkout file.
+ */
+ core_apply_sparse_checkout = 1;
+
+ cmd.git_cmd = 1;
+ if (run_command(&cmd)) {
+ error(_("failed to initialize sparse-checkout"));
+ result = 1;
+ }
+
+ return result;
+}
+
+static int checkout(int submodule_progress, int filter_submodules)
+{
+ struct object_id oid;
+ char *head;
+ struct lock_file lock_file = LOCK_INIT;
+ struct unpack_trees_options opts;
+ struct tree *tree;
+ struct tree_desc t;
+ int err = 0;
+
+ if (option_no_checkout)
+ return 0;
+
+ head = resolve_refdup("HEAD", RESOLVE_REF_READING, &oid, NULL);
+ if (!head) {
+ warning(_("remote HEAD refers to nonexistent ref, "
+ "unable to checkout"));
+ return 0;
+ }
+ if (!strcmp(head, "HEAD")) {
+ if (advice_enabled(ADVICE_DETACHED_HEAD))
+ detach_advice(oid_to_hex(&oid));
+ FREE_AND_NULL(head);
+ } else {
+ if (!starts_with(head, "refs/heads/"))
+ die(_("HEAD not found below refs/heads!"));
+ }
+
+ /* We need to be in the new work tree for the checkout */
+ setup_work_tree();
+
+ repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR);
+
+ memset(&opts, 0, sizeof opts);
+ opts.update = 1;
+ opts.merge = 1;
+ opts.clone = 1;
+ opts.preserve_ignored = 0;
+ opts.fn = oneway_merge;
+ opts.verbose_update = (option_verbosity >= 0);
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+ init_checkout_metadata(&opts.meta, head, &oid, NULL);
+
+ tree = parse_tree_indirect(&oid);
+ if (!tree)
+ die(_("unable to parse commit %s"), oid_to_hex(&oid));
+ parse_tree(tree);
+ init_tree_desc(&t, tree->buffer, tree->size);
+ if (unpack_trees(1, &t, &opts) < 0)
+ die(_("unable to checkout working tree"));
+
+ free(head);
+
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+ die(_("unable to write new index file"));
+
+ err |= run_hooks_l("post-checkout", oid_to_hex(null_oid()),
+ oid_to_hex(&oid), "1", NULL);
+
+ if (!err && (option_recurse_submodules.nr > 0)) {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ strvec_pushl(&cmd.args, "submodule", "update", "--require-init",
+ "--recursive", NULL);
+
+ if (option_shallow_submodules == 1)
+ strvec_push(&cmd.args, "--depth=1");
+
+ if (max_jobs != -1)
+ strvec_pushf(&cmd.args, "--jobs=%d", max_jobs);
+
+ if (submodule_progress)
+ strvec_push(&cmd.args, "--progress");
+
+ if (option_verbosity < 0)
+ strvec_push(&cmd.args, "--quiet");
+
+ if (option_remote_submodules) {
+ strvec_push(&cmd.args, "--remote");
+ strvec_push(&cmd.args, "--no-fetch");
+ }
+
+ if (filter_submodules && filter_options.choice)
+ strvec_pushf(&cmd.args, "--filter=%s",
+ expand_list_objects_filter_spec(&filter_options));
+
+ if (option_single_branch >= 0)
+ strvec_push(&cmd.args, option_single_branch ?
+ "--single-branch" :
+ "--no-single-branch");
+
+ cmd.git_cmd = 1;
+ err = run_command(&cmd);
+ }
+
+ return err;
+}
+
+static int git_clone_config(const char *k, const char *v, void *cb)
+{
+ if (!strcmp(k, "clone.defaultremotename")) {
+ free(remote_name);
+ remote_name = xstrdup(v);
+ }
+ if (!strcmp(k, "clone.rejectshallow"))
+ config_reject_shallow = git_config_bool(k, v);
+ if (!strcmp(k, "clone.filtersubmodules"))
+ config_filter_submodules = git_config_bool(k, v);
+
+ return git_default_config(k, v, cb);
+}
+
+static int write_one_config(const char *key, const char *value, void *data)
+{
+ /*
+ * give git_clone_config a chance to write config values back to the
+ * environment, since git_config_set_multivar_gently only deals with
+ * config-file writes
+ */
+ int apply_failed = git_clone_config(key, value, data);
+ if (apply_failed)
+ return apply_failed;
+
+ return git_config_set_multivar_gently(key,
+ value ? value : "true",
+ CONFIG_REGEX_NONE, 0);
+}
+
+static void write_config(struct string_list *config)
+{
+ int i;
+
+ for (i = 0; i < config->nr; i++) {
+ if (git_config_parse_parameter(config->items[i].string,
+ write_one_config, NULL) < 0)
+ die(_("unable to write parameters to config file"));
+ }
+}
+
+static void write_refspec_config(const char *src_ref_prefix,
+ const struct ref *our_head_points_at,
+ const struct ref *remote_head_points_at,
+ struct strbuf *branch_top)
+{
+ struct strbuf key = STRBUF_INIT;
+ struct strbuf value = STRBUF_INIT;
+
+ if (option_mirror || !option_bare) {
+ if (option_single_branch && !option_mirror) {
+ if (option_branch) {
+ if (starts_with(our_head_points_at->name, "refs/tags/"))
+ strbuf_addf(&value, "+%s:%s", our_head_points_at->name,
+ our_head_points_at->name);
+ else
+ strbuf_addf(&value, "+%s:%s%s", our_head_points_at->name,
+ branch_top->buf, option_branch);
+ } else if (remote_head_points_at) {
+ const char *head = remote_head_points_at->name;
+ if (!skip_prefix(head, "refs/heads/", &head))
+ BUG("remote HEAD points at non-head?");
+
+ strbuf_addf(&value, "+%s:%s%s", remote_head_points_at->name,
+ branch_top->buf, head);
+ }
+ /*
+ * otherwise, the next "git fetch" will
+ * simply fetch from HEAD without updating
+ * any remote-tracking branch, which is what
+ * we want.
+ */
+ } else {
+ strbuf_addf(&value, "+%s*:%s*", src_ref_prefix, branch_top->buf);
+ }
+ /* Configure the remote */
+ if (value.len) {
+ strbuf_addf(&key, "remote.%s.fetch", remote_name);
+ git_config_set_multivar(key.buf, value.buf, "^$", 0);
+ strbuf_reset(&key);
+
+ if (option_mirror) {
+ strbuf_addf(&key, "remote.%s.mirror", remote_name);
+ git_config_set(key.buf, "true");
+ strbuf_reset(&key);
+ }
+ }
+ }
+
+ strbuf_release(&key);
+ strbuf_release(&value);
+}
+
+static void dissociate_from_references(void)
+{
+ char *alternates = git_pathdup("objects/info/alternates");
+
+ if (!access(alternates, F_OK)) {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ cmd.no_stdin = 1;
+ strvec_pushl(&cmd.args, "repack", "-a", "-d", NULL);
+ if (run_command(&cmd))
+ die(_("cannot repack to clean up"));
+ if (unlink(alternates) && errno != ENOENT)
+ die_errno(_("cannot unlink temporary alternates file"));
+ }
+ free(alternates);
+}
+
+static int path_exists(const char *path)
+{
+ struct stat sb;
+ return !stat(path, &sb);
+}
+
+int cmd_clone(int argc, const char **argv, const char *prefix)
+{
+ int is_bundle = 0, is_local;
+ int reject_shallow = 0;
+ const char *repo_name, *repo, *work_tree, *git_dir;
+ char *path = NULL, *dir, *display_repo = NULL;
+ int dest_exists, real_dest_exists = 0;
+ const struct ref *refs, *remote_head;
+ struct ref *remote_head_points_at = NULL;
+ const struct ref *our_head_points_at;
+ char *unborn_head = NULL;
+ struct ref *mapped_refs = NULL;
+ const struct ref *ref;
+ struct strbuf key = STRBUF_INIT;
+ struct strbuf branch_top = STRBUF_INIT, reflog_msg = STRBUF_INIT;
+ struct transport *transport = NULL;
+ const char *src_ref_prefix = "refs/heads/";
+ struct remote *remote;
+ int err = 0, complete_refs_before_fetch = 1;
+ int submodule_progress;
+ int filter_submodules = 0;
+
+ struct transport_ls_refs_options transport_ls_refs_options =
+ TRANSPORT_LS_REFS_OPTIONS_INIT;
+
+ packet_trace_identity("clone");
+
+ git_config(git_clone_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, builtin_clone_options,
+ builtin_clone_usage, 0);
+
+ if (argc > 2)
+ usage_msg_opt(_("Too many arguments."),
+ builtin_clone_usage, builtin_clone_options);
+
+ if (argc == 0)
+ usage_msg_opt(_("You must specify a repository to clone."),
+ builtin_clone_usage, builtin_clone_options);
+
+ if (option_depth || option_since || option_not.nr)
+ deepen = 1;
+ if (option_single_branch == -1)
+ option_single_branch = deepen ? 1 : 0;
+
+ if (option_mirror)
+ option_bare = 1;
+
+ if (option_bare) {
+ if (real_git_dir)
+ die(_("options '%s' and '%s' cannot be used together"), "--bare", "--separate-git-dir");
+ option_no_checkout = 1;
+ }
+
+ if (bundle_uri && deepen)
+ die(_("--bundle-uri is incompatible with --depth, --shallow-since, and --shallow-exclude"));
+
+ repo_name = argv[0];
+
+ path = get_repo_path(repo_name, &is_bundle);
+ if (path) {
+ FREE_AND_NULL(path);
+ repo = absolute_pathdup(repo_name);
+ } else if (strchr(repo_name, ':')) {
+ repo = repo_name;
+ display_repo = transport_anonymize_url(repo);
+ } else
+ die(_("repository '%s' does not exist"), repo_name);
+
+ /* no need to be strict, transport_set_option() will validate it again */
+ if (option_depth && atoi(option_depth) < 1)
+ die(_("depth %s is not a positive number"), option_depth);
+
+ if (argc == 2)
+ dir = xstrdup(argv[1]);
+ else
+ dir = git_url_basename(repo_name, is_bundle, option_bare);
+ strip_dir_trailing_slashes(dir);
+
+ dest_exists = path_exists(dir);
+ if (dest_exists && !is_empty_dir(dir))
+ die(_("destination path '%s' already exists and is not "
+ "an empty directory."), dir);
+
+ if (real_git_dir) {
+ real_dest_exists = path_exists(real_git_dir);
+ if (real_dest_exists && !is_empty_dir(real_git_dir))
+ die(_("repository path '%s' already exists and is not "
+ "an empty directory."), real_git_dir);
+ }
+
+
+ strbuf_addf(&reflog_msg, "clone: from %s",
+ display_repo ? display_repo : repo);
+ free(display_repo);
+
+ if (option_bare)
+ work_tree = NULL;
+ else {
+ work_tree = getenv("GIT_WORK_TREE");
+ if (work_tree && path_exists(work_tree))
+ die(_("working tree '%s' already exists."), work_tree);
+ }
+
+ if (option_bare || work_tree)
+ git_dir = xstrdup(dir);
+ else {
+ work_tree = dir;
+ git_dir = mkpathdup("%s/.git", dir);
+ }
+
+ atexit(remove_junk);
+ sigchain_push_common(remove_junk_on_signal);
+
+ if (!option_bare) {
+ if (safe_create_leading_directories_const(work_tree) < 0)
+ die_errno(_("could not create leading directories of '%s'"),
+ work_tree);
+ if (dest_exists)
+ junk_work_tree_flags |= REMOVE_DIR_KEEP_TOPLEVEL;
+ else if (mkdir(work_tree, 0777))
+ die_errno(_("could not create work tree dir '%s'"),
+ work_tree);
+ junk_work_tree = work_tree;
+ set_git_work_tree(work_tree);
+ }
+
+ if (real_git_dir) {
+ if (real_dest_exists)
+ junk_git_dir_flags |= REMOVE_DIR_KEEP_TOPLEVEL;
+ junk_git_dir = real_git_dir;
+ } else {
+ if (dest_exists)
+ junk_git_dir_flags |= REMOVE_DIR_KEEP_TOPLEVEL;
+ junk_git_dir = git_dir;
+ }
+ if (safe_create_leading_directories_const(git_dir) < 0)
+ die(_("could not create leading directories of '%s'"), git_dir);
+
+ if (0 <= option_verbosity) {
+ if (option_bare)
+ fprintf(stderr, _("Cloning into bare repository '%s'...\n"), dir);
+ else
+ fprintf(stderr, _("Cloning into '%s'...\n"), dir);
+ }
+
+ if (option_recurse_submodules.nr > 0) {
+ struct string_list_item *item;
+ struct strbuf sb = STRBUF_INIT;
+ int val;
+
+ /* remove duplicates */
+ string_list_sort(&option_recurse_submodules);
+ string_list_remove_duplicates(&option_recurse_submodules, 0);
+
+ /*
+ * NEEDSWORK: In a multi-working-tree world, this needs to be
+ * set in the per-worktree config.
+ */
+ for_each_string_list_item(item, &option_recurse_submodules) {
+ strbuf_addf(&sb, "submodule.active=%s",
+ item->string);
+ string_list_append(&option_config,
+ strbuf_detach(&sb, NULL));
+ }
+
+ if (!git_config_get_bool("submodule.stickyRecursiveClone", &val) &&
+ val)
+ string_list_append(&option_config, "submodule.recurse=true");
+
+ if (option_required_reference.nr &&
+ option_optional_reference.nr)
+ die(_("clone --recursive is not compatible with "
+ "both --reference and --reference-if-able"));
+ else if (option_required_reference.nr) {
+ string_list_append(&option_config,
+ "submodule.alternateLocation=superproject");
+ string_list_append(&option_config,
+ "submodule.alternateErrorStrategy=die");
+ } else if (option_optional_reference.nr) {
+ string_list_append(&option_config,
+ "submodule.alternateLocation=superproject");
+ string_list_append(&option_config,
+ "submodule.alternateErrorStrategy=info");
+ }
+ }
+
+ init_db(git_dir, real_git_dir, option_template, GIT_HASH_UNKNOWN, NULL,
+ INIT_DB_QUIET);
+
+ if (real_git_dir) {
+ free((char *)git_dir);
+ git_dir = real_git_dir;
+ }
+
+ /*
+ * additional config can be injected with -c, make sure it's included
+ * after init_db, which clears the entire config environment.
+ */
+ write_config(&option_config);
+
+ /*
+ * re-read config after init_db and write_config to pick up any config
+ * injected by --template and --config, respectively.
+ */
+ git_config(git_clone_config, NULL);
+
+ /*
+ * If option_reject_shallow is specified from CLI option,
+ * ignore config_reject_shallow from git_clone_config.
+ */
+ if (config_reject_shallow != -1)
+ reject_shallow = config_reject_shallow;
+ if (option_reject_shallow != -1)
+ reject_shallow = option_reject_shallow;
+
+ /*
+ * If option_filter_submodules is specified from CLI option,
+ * ignore config_filter_submodules from git_clone_config.
+ */
+ if (config_filter_submodules != -1)
+ filter_submodules = config_filter_submodules;
+ if (option_filter_submodules != -1)
+ filter_submodules = option_filter_submodules;
+
+ /*
+ * Exit if the user seems to be doing something silly with submodule
+ * filter flags (but not with filter configs, as those should be
+ * set-and-forget).
+ */
+ if (option_filter_submodules > 0 && !filter_options.choice)
+ die(_("the option '%s' requires '%s'"),
+ "--also-filter-submodules", "--filter");
+ if (option_filter_submodules > 0 && !option_recurse_submodules.nr)
+ die(_("the option '%s' requires '%s'"),
+ "--also-filter-submodules", "--recurse-submodules");
+
+ /*
+ * apply the remote name provided by --origin only after this second
+ * call to git_config, to ensure it overrides all config-based values.
+ */
+ if (option_origin) {
+ free(remote_name);
+ remote_name = xstrdup(option_origin);
+ }
+
+ if (!remote_name)
+ remote_name = xstrdup("origin");
+
+ if (!valid_remote_name(remote_name))
+ die(_("'%s' is not a valid remote name"), remote_name);
+
+ if (option_bare) {
+ if (option_mirror)
+ src_ref_prefix = "refs/";
+ strbuf_addstr(&branch_top, src_ref_prefix);
+
+ git_config_set("core.bare", "true");
+ } else {
+ strbuf_addf(&branch_top, "refs/remotes/%s/", remote_name);
+ }
+
+ strbuf_addf(&key, "remote.%s.url", remote_name);
+ git_config_set(key.buf, repo);
+ strbuf_reset(&key);
+
+ if (option_no_tags) {
+ strbuf_addf(&key, "remote.%s.tagOpt", remote_name);
+ git_config_set(key.buf, "--no-tags");
+ strbuf_reset(&key);
+ }
+
+ if (option_required_reference.nr || option_optional_reference.nr)
+ setup_reference();
+
+ if (option_sparse_checkout && git_sparse_checkout_init(dir))
+ return 1;
+
+ remote = remote_get(remote_name);
+
+ refspec_appendf(&remote->fetch, "+%s*:%s*", src_ref_prefix,
+ branch_top.buf);
+
+ path = get_repo_path(remote->url[0], &is_bundle);
+ is_local = option_local != 0 && path && !is_bundle;
+ if (is_local) {
+ if (option_depth)
+ warning(_("--depth is ignored in local clones; use file:// instead."));
+ if (option_since)
+ warning(_("--shallow-since is ignored in local clones; use file:// instead."));
+ if (option_not.nr)
+ warning(_("--shallow-exclude is ignored in local clones; use file:// instead."));
+ if (filter_options.choice)
+ warning(_("--filter is ignored in local clones; use file:// instead."));
+ if (!access(mkpath("%s/shallow", path), F_OK)) {
+ if (reject_shallow)
+ die(_("source repository is shallow, reject to clone."));
+ if (option_local > 0)
+ warning(_("source repository is shallow, ignoring --local"));
+ is_local = 0;
+ }
+ }
+ if (option_local > 0 && !is_local)
+ warning(_("--local is ignored"));
+
+ transport = transport_get(remote, path ? path : remote->url[0]);
+ transport_set_verbosity(transport, option_verbosity, option_progress);
+ transport->family = family;
+ transport->cloning = 1;
+
+ if (is_bundle) {
+ struct bundle_header header = BUNDLE_HEADER_INIT;
+ int fd = read_bundle_header(path, &header);
+ int has_filter = header.filter.choice != LOFC_DISABLED;
+
+ if (fd > 0)
+ close(fd);
+ bundle_header_release(&header);
+ if (has_filter)
+ die(_("cannot clone from filtered bundle"));
+ }
+
+ transport_set_option(transport, TRANS_OPT_KEEP, "yes");
+
+ if (reject_shallow)
+ transport_set_option(transport, TRANS_OPT_REJECT_SHALLOW, "1");
+ if (option_depth)
+ transport_set_option(transport, TRANS_OPT_DEPTH,
+ option_depth);
+ if (option_since)
+ transport_set_option(transport, TRANS_OPT_DEEPEN_SINCE,
+ option_since);
+ if (option_not.nr)
+ transport_set_option(transport, TRANS_OPT_DEEPEN_NOT,
+ (const char *)&option_not);
+ if (option_single_branch)
+ transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, "1");
+
+ if (option_upload_pack)
+ transport_set_option(transport, TRANS_OPT_UPLOADPACK,
+ option_upload_pack);
+
+ if (server_options.nr)
+ transport->server_options = &server_options;
+
+ if (filter_options.choice) {
+ const char *spec =
+ expand_list_objects_filter_spec(&filter_options);
+ transport_set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER,
+ spec);
+ transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+ }
+
+ if (transport->smart_options && !deepen && !filter_options.choice)
+ transport->smart_options->check_self_contained_and_connected = 1;
+
+ /*
+ * Before fetching from the remote, download and install bundle
+ * data from the --bundle-uri option.
+ */
+ if (bundle_uri) {
+ /* At this point, we need the_repository to match the cloned repo. */
+ if (repo_init(the_repository, git_dir, work_tree))
+ warning(_("failed to initialize the repo, skipping bundle URI"));
+ else if (fetch_bundle_uri(the_repository, bundle_uri))
+ warning(_("failed to fetch objects from bundle URI '%s'"),
+ bundle_uri);
+ }
+
+ strvec_push(&transport_ls_refs_options.ref_prefixes, "HEAD");
+ refspec_ref_prefixes(&remote->fetch,
+ &transport_ls_refs_options.ref_prefixes);
+ if (option_branch)
+ expand_ref_prefix(&transport_ls_refs_options.ref_prefixes,
+ option_branch);
+ if (!option_no_tags)
+ strvec_push(&transport_ls_refs_options.ref_prefixes,
+ "refs/tags/");
+
+ refs = transport_get_remote_refs(transport, &transport_ls_refs_options);
+
+ if (refs)
+ mapped_refs = wanted_peer_refs(refs, &remote->fetch);
+
+ if (mapped_refs) {
+ int hash_algo = hash_algo_by_ptr(transport_get_hash_algo(transport));
+
+ /*
+ * Now that we know what algorithm the remote side is using,
+ * let's set ours to the same thing.
+ */
+ initialize_repository_version(hash_algo, 1);
+ repo_set_hash_algo(the_repository, hash_algo);
+ /*
+ * transport_get_remote_refs() may return refs with null sha-1
+ * in mapped_refs (see struct transport->get_refs_list
+ * comment). In that case we need fetch it early because
+ * remote_head code below relies on it.
+ *
+ * for normal clones, transport_get_remote_refs() should
+ * return reliable ref set, we can delay cloning until after
+ * remote HEAD check.
+ */
+ for (ref = refs; ref; ref = ref->next)
+ if (is_null_oid(&ref->old_oid)) {
+ complete_refs_before_fetch = 0;
+ break;
+ }
+
+ if (!is_local && !complete_refs_before_fetch) {
+ if (transport_fetch_refs(transport, mapped_refs))
+ die(_("remote transport reported error"));
+ }
+ }
+
+ remote_head = find_ref_by_name(refs, "HEAD");
+ remote_head_points_at = guess_remote_head(remote_head, mapped_refs, 0);
+
+ if (option_branch) {
+ our_head_points_at = find_remote_branch(mapped_refs, option_branch);
+ if (!our_head_points_at)
+ die(_("Remote branch %s not found in upstream %s"),
+ option_branch, remote_name);
+ } else if (remote_head_points_at) {
+ our_head_points_at = remote_head_points_at;
+ } else if (remote_head) {
+ our_head_points_at = NULL;
+ } else {
+ const char *branch;
+
+ if (!mapped_refs) {
+ warning(_("You appear to have cloned an empty repository."));
+ option_no_checkout = 1;
+ }
+
+ if (transport_ls_refs_options.unborn_head_target &&
+ skip_prefix(transport_ls_refs_options.unborn_head_target,
+ "refs/heads/", &branch)) {
+ unborn_head = xstrdup(transport_ls_refs_options.unborn_head_target);
+ } else {
+ branch = git_default_branch_name(0);
+ unborn_head = xstrfmt("refs/heads/%s", branch);
+ }
+
+ /*
+ * We may have selected a local default branch name "foo",
+ * and even though the remote's HEAD does not point there,
+ * it may still have a "foo" branch. If so, set it up so
+ * that we can follow the usual checkout code later.
+ *
+ * Note that for an empty repo we'll already have set
+ * option_no_checkout above, which would work against us here.
+ * But for an empty repo, find_remote_branch() can never find
+ * a match.
+ */
+ our_head_points_at = find_remote_branch(mapped_refs, branch);
+ }
+
+ write_refspec_config(src_ref_prefix, our_head_points_at,
+ remote_head_points_at, &branch_top);
+
+ if (filter_options.choice)
+ partial_clone_register(remote_name, &filter_options);
+
+ if (is_local)
+ clone_local(path, git_dir);
+ else if (mapped_refs && complete_refs_before_fetch) {
+ if (transport_fetch_refs(transport, mapped_refs))
+ die(_("remote transport reported error"));
+ }
+
+ update_remote_refs(refs, mapped_refs, remote_head_points_at,
+ branch_top.buf, reflog_msg.buf, transport,
+ !is_local);
+
+ update_head(our_head_points_at, remote_head, unborn_head, reflog_msg.buf);
+
+ /*
+ * We want to show progress for recursive submodule clones iff
+ * we did so for the main clone. But only the transport knows
+ * the final decision for this flag, so we need to rescue the value
+ * before we free the transport.
+ */
+ submodule_progress = transport->progress;
+
+ transport_unlock_pack(transport, 0);
+ transport_disconnect(transport);
+
+ if (option_dissociate) {
+ close_object_store(the_repository->objects);
+ dissociate_from_references();
+ }
+
+ junk_mode = JUNK_LEAVE_REPO;
+ err = checkout(submodule_progress, filter_submodules);
+
+ free(remote_name);
+ strbuf_release(&reflog_msg);
+ strbuf_release(&branch_top);
+ strbuf_release(&key);
+ free_refs(mapped_refs);
+ free_refs(remote_head_points_at);
+ free(unborn_head);
+ free(dir);
+ free(path);
+ UNLEAK(repo);
+ junk_mode = JUNK_LEAVE_ALL;
+
+ transport_ls_refs_options_release(&transport_ls_refs_options);
+ return err;
+}
diff --git a/builtin/column.c b/builtin/column.c
new file mode 100644
index 0000000..158fdf5
--- /dev/null
+++ b/builtin/column.c
@@ -0,0 +1,59 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "strbuf.h"
+#include "parse-options.h"
+#include "string-list.h"
+#include "column.h"
+
+static const char * const builtin_column_usage[] = {
+ N_("git column [<options>]"),
+ NULL
+};
+static unsigned int colopts;
+
+static int column_config(const char *var, const char *value, void *cb)
+{
+ return git_column_config(var, value, cb, &colopts);
+}
+
+int cmd_column(int argc, const char **argv, const char *prefix)
+{
+ struct string_list list = STRING_LIST_INIT_DUP;
+ struct strbuf sb = STRBUF_INIT;
+ struct column_options copts;
+ const char *command = NULL, *real_command = NULL;
+ struct option options[] = {
+ OPT_STRING(0, "command", &real_command, N_("name"), N_("lookup config vars")),
+ OPT_COLUMN(0, "mode", &colopts, N_("layout to use")),
+ OPT_INTEGER(0, "raw-mode", &colopts, N_("layout to use")),
+ OPT_INTEGER(0, "width", &copts.width, N_("maximum width")),
+ OPT_STRING(0, "indent", &copts.indent, N_("string"), N_("padding space on left border")),
+ OPT_STRING(0, "nl", &copts.nl, N_("string"), N_("padding space on right border")),
+ OPT_INTEGER(0, "padding", &copts.padding, N_("padding space between columns")),
+ OPT_END()
+ };
+
+ /* This one is special and must be the first one */
+ if (argc > 1 && starts_with(argv[1], "--command=")) {
+ command = argv[1] + 10;
+ git_config(column_config, (void *)command);
+ } else
+ git_config(column_config, NULL);
+
+ memset(&copts, 0, sizeof(copts));
+ copts.padding = 1;
+ argc = parse_options(argc, argv, prefix, options, builtin_column_usage, 0);
+ if (argc)
+ usage_with_options(builtin_column_usage, options);
+ if (real_command || command) {
+ if (!real_command || !command || strcmp(real_command, command))
+ die(_("--command must be the first argument"));
+ }
+ finalize_colopts(&colopts, -1);
+ while (!strbuf_getline(&sb, stdin))
+ string_list_append(&list, sb.buf);
+
+ print_columns(&list, colopts, &copts);
+ return 0;
+}
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
new file mode 100644
index 0000000..e8f77f5
--- /dev/null
+++ b/builtin/commit-graph.c
@@ -0,0 +1,328 @@
+#include "builtin.h"
+#include "config.h"
+#include "dir.h"
+#include "lockfile.h"
+#include "parse-options.h"
+#include "repository.h"
+#include "commit-graph.h"
+#include "object-store.h"
+#include "progress.h"
+#include "tag.h"
+
+#define BUILTIN_COMMIT_GRAPH_VERIFY_USAGE \
+ N_("git commit-graph verify [--object-dir <dir>] [--shallow] [--[no-]progress]")
+
+#define BUILTIN_COMMIT_GRAPH_WRITE_USAGE \
+ N_("git commit-graph write [--object-dir <dir>] [--append]\n" \
+ " [--split[=<strategy>]] [--reachable | --stdin-packs | --stdin-commits]\n" \
+ " [--changed-paths] [--[no-]max-new-filters <n>] [--[no-]progress]\n" \
+ " <split options>")
+
+static const char * builtin_commit_graph_verify_usage[] = {
+ BUILTIN_COMMIT_GRAPH_VERIFY_USAGE,
+ NULL
+};
+
+static const char * builtin_commit_graph_write_usage[] = {
+ BUILTIN_COMMIT_GRAPH_WRITE_USAGE,
+ NULL
+};
+
+static char const * const builtin_commit_graph_usage[] = {
+ BUILTIN_COMMIT_GRAPH_VERIFY_USAGE,
+ BUILTIN_COMMIT_GRAPH_WRITE_USAGE,
+ NULL,
+};
+
+static struct opts_commit_graph {
+ const char *obj_dir;
+ int reachable;
+ int stdin_packs;
+ int stdin_commits;
+ int append;
+ int split;
+ int shallow;
+ int progress;
+ int enable_changed_paths;
+} opts;
+
+static struct option common_opts[] = {
+ OPT_STRING(0, "object-dir", &opts.obj_dir,
+ N_("dir"),
+ N_("the object directory to store the graph")),
+ OPT_END()
+};
+
+static struct option *add_common_options(struct option *to)
+{
+ return parse_options_concat(common_opts, to);
+}
+
+static int graph_verify(int argc, const char **argv, const char *prefix)
+{
+ struct commit_graph *graph = NULL;
+ struct object_directory *odb = NULL;
+ char *graph_name;
+ int open_ok;
+ int fd;
+ struct stat st;
+ int flags = 0;
+
+ static struct option builtin_commit_graph_verify_options[] = {
+ OPT_BOOL(0, "shallow", &opts.shallow,
+ N_("if the commit-graph is split, only verify the tip file")),
+ OPT_BOOL(0, "progress", &opts.progress,
+ N_("force progress reporting")),
+ OPT_END(),
+ };
+ struct option *options = add_common_options(builtin_commit_graph_verify_options);
+
+ trace2_cmd_mode("verify");
+
+ opts.progress = isatty(2);
+ argc = parse_options(argc, argv, prefix,
+ options,
+ builtin_commit_graph_verify_usage, 0);
+ if (argc)
+ usage_with_options(builtin_commit_graph_verify_usage, options);
+
+ if (!opts.obj_dir)
+ opts.obj_dir = get_object_directory();
+ if (opts.shallow)
+ flags |= COMMIT_GRAPH_VERIFY_SHALLOW;
+ if (opts.progress)
+ flags |= COMMIT_GRAPH_WRITE_PROGRESS;
+
+ odb = find_odb(the_repository, opts.obj_dir);
+ graph_name = get_commit_graph_filename(odb);
+ open_ok = open_commit_graph(graph_name, &fd, &st);
+ if (!open_ok && errno != ENOENT)
+ die_errno(_("Could not open commit-graph '%s'"), graph_name);
+
+ FREE_AND_NULL(graph_name);
+ FREE_AND_NULL(options);
+
+ if (open_ok)
+ graph = load_commit_graph_one_fd_st(the_repository, fd, &st, odb);
+ else
+ graph = read_commit_graph_one(the_repository, odb);
+
+ /* Return failure if open_ok predicted success */
+ if (!graph)
+ return !!open_ok;
+
+ UNLEAK(graph);
+ return verify_commit_graph(the_repository, graph, flags);
+}
+
+extern int read_replace_refs;
+static struct commit_graph_opts write_opts;
+
+static int write_option_parse_split(const struct option *opt, const char *arg,
+ int unset)
+{
+ enum commit_graph_split_flags *flags = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ opts.split = 1;
+ if (!arg)
+ return 0;
+
+ if (!strcmp(arg, "no-merge"))
+ *flags = COMMIT_GRAPH_SPLIT_MERGE_PROHIBITED;
+ else if (!strcmp(arg, "replace"))
+ *flags = COMMIT_GRAPH_SPLIT_REPLACE;
+ else
+ die(_("unrecognized --split argument, %s"), arg);
+
+ return 0;
+}
+
+static int read_one_commit(struct oidset *commits, struct progress *progress,
+ const char *hash)
+{
+ struct object *result;
+ struct object_id oid;
+ const char *end;
+
+ if (parse_oid_hex(hash, &oid, &end))
+ return error(_("unexpected non-hex object ID: %s"), hash);
+
+ result = deref_tag(the_repository, parse_object(the_repository, &oid),
+ NULL, 0);
+ if (!result)
+ return error(_("invalid object: %s"), hash);
+ else if (object_as_type(result, OBJ_COMMIT, 1))
+ oidset_insert(commits, &result->oid);
+
+ display_progress(progress, oidset_size(commits));
+
+ return 0;
+}
+
+static int write_option_max_new_filters(const struct option *opt,
+ const char *arg,
+ int unset)
+{
+ int *to = opt->value;
+ if (unset)
+ *to = -1;
+ else {
+ const char *s;
+ *to = strtol(arg, (char **)&s, 10);
+ if (*s)
+ return error(_("option `%s' expects a numerical value"),
+ "max-new-filters");
+ }
+ return 0;
+}
+
+static int git_commit_graph_write_config(const char *var, const char *value,
+ void *cb UNUSED)
+{
+ if (!strcmp(var, "commitgraph.maxnewfilters"))
+ write_opts.max_new_filters = git_config_int(var, value);
+ /*
+ * No need to fall-back to 'git_default_config', since this was already
+ * called in 'cmd_commit_graph()'.
+ */
+ return 0;
+}
+
+static int graph_write(int argc, const char **argv, const char *prefix)
+{
+ struct string_list pack_indexes = STRING_LIST_INIT_DUP;
+ struct strbuf buf = STRBUF_INIT;
+ struct oidset commits = OIDSET_INIT;
+ struct object_directory *odb = NULL;
+ int result = 0;
+ enum commit_graph_write_flags flags = 0;
+ struct progress *progress = NULL;
+
+ static struct option builtin_commit_graph_write_options[] = {
+ OPT_BOOL(0, "reachable", &opts.reachable,
+ N_("start walk at all refs")),
+ OPT_BOOL(0, "stdin-packs", &opts.stdin_packs,
+ N_("scan pack-indexes listed by stdin for commits")),
+ OPT_BOOL(0, "stdin-commits", &opts.stdin_commits,
+ N_("start walk at commits listed by stdin")),
+ OPT_BOOL(0, "append", &opts.append,
+ N_("include all commits already in the commit-graph file")),
+ OPT_BOOL(0, "changed-paths", &opts.enable_changed_paths,
+ N_("enable computation for changed paths")),
+ OPT_CALLBACK_F(0, "split", &write_opts.split_flags, NULL,
+ N_("allow writing an incremental commit-graph file"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG,
+ write_option_parse_split),
+ OPT_INTEGER(0, "max-commits", &write_opts.max_commits,
+ N_("maximum number of commits in a non-base split commit-graph")),
+ OPT_INTEGER(0, "size-multiple", &write_opts.size_multiple,
+ N_("maximum ratio between two levels of a split commit-graph")),
+ OPT_EXPIRY_DATE(0, "expire-time", &write_opts.expire_time,
+ N_("only expire files older than a given date-time")),
+ OPT_CALLBACK_F(0, "max-new-filters", &write_opts.max_new_filters,
+ NULL, N_("maximum number of changed-path Bloom filters to compute"),
+ 0, write_option_max_new_filters),
+ OPT_BOOL(0, "progress", &opts.progress,
+ N_("force progress reporting")),
+ OPT_END(),
+ };
+ struct option *options = add_common_options(builtin_commit_graph_write_options);
+
+ opts.progress = isatty(2);
+ opts.enable_changed_paths = -1;
+ write_opts.size_multiple = 2;
+ write_opts.max_commits = 0;
+ write_opts.expire_time = 0;
+ write_opts.max_new_filters = -1;
+
+ trace2_cmd_mode("write");
+
+ git_config(git_commit_graph_write_config, &opts);
+
+ argc = parse_options(argc, argv, prefix,
+ options,
+ builtin_commit_graph_write_usage, 0);
+ if (argc)
+ usage_with_options(builtin_commit_graph_write_usage, options);
+
+ if (opts.reachable + opts.stdin_packs + opts.stdin_commits > 1)
+ die(_("use at most one of --reachable, --stdin-commits, or --stdin-packs"));
+ if (!opts.obj_dir)
+ opts.obj_dir = get_object_directory();
+ if (opts.append)
+ flags |= COMMIT_GRAPH_WRITE_APPEND;
+ if (opts.split)
+ flags |= COMMIT_GRAPH_WRITE_SPLIT;
+ if (opts.progress)
+ flags |= COMMIT_GRAPH_WRITE_PROGRESS;
+ if (!opts.enable_changed_paths)
+ flags |= COMMIT_GRAPH_NO_WRITE_BLOOM_FILTERS;
+ if (opts.enable_changed_paths == 1 ||
+ git_env_bool(GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS, 0))
+ flags |= COMMIT_GRAPH_WRITE_BLOOM_FILTERS;
+
+ odb = find_odb(the_repository, opts.obj_dir);
+
+ if (opts.reachable) {
+ if (write_commit_graph_reachable(odb, flags, &write_opts))
+ return 1;
+ return 0;
+ }
+
+ if (opts.stdin_packs) {
+ while (strbuf_getline(&buf, stdin) != EOF)
+ string_list_append_nodup(&pack_indexes,
+ strbuf_detach(&buf, NULL));
+ } else if (opts.stdin_commits) {
+ oidset_init(&commits, 0);
+ if (opts.progress)
+ progress = start_delayed_progress(
+ _("Collecting commits from input"), 0);
+
+ while (strbuf_getline(&buf, stdin) != EOF) {
+ if (read_one_commit(&commits, progress, buf.buf)) {
+ result = 1;
+ goto cleanup;
+ }
+ }
+
+ stop_progress(&progress);
+ }
+
+ if (write_commit_graph(odb,
+ opts.stdin_packs ? &pack_indexes : NULL,
+ opts.stdin_commits ? &commits : NULL,
+ flags,
+ &write_opts))
+ result = 1;
+
+cleanup:
+ FREE_AND_NULL(options);
+ string_list_clear(&pack_indexes, 0);
+ strbuf_release(&buf);
+ return result;
+}
+
+int cmd_commit_graph(int argc, const char **argv, const char *prefix)
+{
+ parse_opt_subcommand_fn *fn = NULL;
+ struct option builtin_commit_graph_options[] = {
+ OPT_SUBCOMMAND("verify", &fn, graph_verify),
+ OPT_SUBCOMMAND("write", &fn, graph_write),
+ OPT_END(),
+ };
+ struct option *options = parse_options_concat(builtin_commit_graph_options, common_opts);
+
+ git_config(git_default_config, NULL);
+
+ read_replace_refs = 0;
+ save_commit_buffer = 0;
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_commit_graph_usage, 0);
+ FREE_AND_NULL(options);
+
+ return fn(argc, argv, prefix);
+}
diff --git a/builtin/commit-tree.c b/builtin/commit-tree.c
new file mode 100644
index 0000000..cc8d584
--- /dev/null
+++ b/builtin/commit-tree.c
@@ -0,0 +1,151 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ */
+#include "cache.h"
+#include "config.h"
+#include "object-store.h"
+#include "repository.h"
+#include "commit.h"
+#include "tree.h"
+#include "builtin.h"
+#include "utf8.h"
+#include "gpg-interface.h"
+#include "parse-options.h"
+
+static const char * const commit_tree_usage[] = {
+ N_("git commit-tree <tree> [(-p <parent>)...]"),
+ N_("git commit-tree [(-p <parent>)...] [-S[<keyid>]] [(-m <message>)...]\n"
+ " [(-F <file>)...] <tree>"),
+ NULL
+};
+
+static const char *sign_commit;
+
+static void new_parent(struct commit *parent, struct commit_list **parents_p)
+{
+ struct object_id *oid = &parent->object.oid;
+ struct commit_list *parents;
+ for (parents = *parents_p; parents; parents = parents->next) {
+ if (parents->item == parent) {
+ error(_("duplicate parent %s ignored"), oid_to_hex(oid));
+ return;
+ }
+ parents_p = &parents->next;
+ }
+ commit_list_insert(parent, parents_p);
+}
+
+static int commit_tree_config(const char *var, const char *value, void *cb)
+{
+ int status = git_gpg_config(var, value, NULL);
+ if (status)
+ return status;
+ return git_default_config(var, value, cb);
+}
+
+static int parse_parent_arg_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct object_id oid;
+ struct commit_list **parents = opt->value;
+
+ BUG_ON_OPT_NEG_NOARG(unset, arg);
+
+ if (get_oid_commit(arg, &oid))
+ die(_("not a valid object name %s"), arg);
+
+ assert_oid_type(&oid, OBJ_COMMIT);
+ new_parent(lookup_commit(the_repository, &oid), parents);
+ return 0;
+}
+
+static int parse_message_arg_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct strbuf *buf = opt->value;
+
+ BUG_ON_OPT_NEG_NOARG(unset, arg);
+
+ if (buf->len)
+ strbuf_addch(buf, '\n');
+ strbuf_addstr(buf, arg);
+ strbuf_complete_line(buf);
+
+ return 0;
+}
+
+static int parse_file_arg_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ int fd;
+ struct strbuf *buf = opt->value;
+
+ BUG_ON_OPT_NEG_NOARG(unset, arg);
+
+ if (buf->len)
+ strbuf_addch(buf, '\n');
+ if (!strcmp(arg, "-"))
+ fd = 0;
+ else {
+ fd = xopen(arg, O_RDONLY);
+ }
+ if (strbuf_read(buf, fd, 0) < 0)
+ die_errno(_("git commit-tree: failed to read '%s'"), arg);
+ if (fd && close(fd))
+ die_errno(_("git commit-tree: failed to close '%s'"), arg);
+
+ return 0;
+}
+
+int cmd_commit_tree(int argc, const char **argv, const char *prefix)
+{
+ static struct strbuf buffer = STRBUF_INIT;
+ struct commit_list *parents = NULL;
+ struct object_id tree_oid;
+ struct object_id commit_oid;
+
+ struct option options[] = {
+ OPT_CALLBACK_F('p', NULL, &parents, N_("parent"),
+ N_("id of a parent commit object"), PARSE_OPT_NONEG,
+ parse_parent_arg_callback),
+ OPT_CALLBACK_F('m', NULL, &buffer, N_("message"),
+ N_("commit message"), PARSE_OPT_NONEG,
+ parse_message_arg_callback),
+ OPT_CALLBACK_F('F', NULL, &buffer, N_("file"),
+ N_("read commit log message from file"), PARSE_OPT_NONEG,
+ parse_file_arg_callback),
+ { OPTION_STRING, 'S', "gpg-sign", &sign_commit, N_("key-id"),
+ N_("GPG sign commit"), PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
+ OPT_END()
+ };
+
+ git_config(commit_tree_config, NULL);
+
+ if (argc < 2 || !strcmp(argv[1], "-h"))
+ usage_with_options(commit_tree_usage, options);
+
+ argc = parse_options(argc, argv, prefix, options, commit_tree_usage, 0);
+
+ if (argc != 1)
+ die(_("must give exactly one tree"));
+
+ if (get_oid_tree(argv[0], &tree_oid))
+ die(_("not a valid object name %s"), argv[0]);
+
+ if (!buffer.len) {
+ if (strbuf_read(&buffer, 0, 0) < 0)
+ die_errno(_("git commit-tree: failed to read"));
+ }
+
+ if (commit_tree(buffer.buf, buffer.len, &tree_oid, parents, &commit_oid,
+ NULL, sign_commit)) {
+ strbuf_release(&buffer);
+ return 1;
+ }
+
+ printf("%s\n", oid_to_hex(&commit_oid));
+ strbuf_release(&buffer);
+ return 0;
+}
diff --git a/builtin/commit.c b/builtin/commit.c
new file mode 100644
index 0000000..06b1330
--- /dev/null
+++ b/builtin/commit.c
@@ -0,0 +1,1882 @@
+/*
+ * Builtin "git commit"
+ *
+ * Copyright (c) 2007 Kristian Høgsberg <krh@redhat.com>
+ * Based on git-commit.sh by Junio C Hamano and Linus Torvalds
+ */
+
+#define USE_THE_INDEX_COMPATIBILITY_MACROS
+#include "cache.h"
+#include "config.h"
+#include "lockfile.h"
+#include "cache-tree.h"
+#include "color.h"
+#include "dir.h"
+#include "builtin.h"
+#include "diff.h"
+#include "diffcore.h"
+#include "commit.h"
+#include "revision.h"
+#include "wt-status.h"
+#include "run-command.h"
+#include "hook.h"
+#include "refs.h"
+#include "log-tree.h"
+#include "strbuf.h"
+#include "utf8.h"
+#include "parse-options.h"
+#include "string-list.h"
+#include "rerere.h"
+#include "unpack-trees.h"
+#include "quote.h"
+#include "submodule.h"
+#include "gpg-interface.h"
+#include "column.h"
+#include "sequencer.h"
+#include "mailmap.h"
+#include "help.h"
+#include "commit-reach.h"
+#include "commit-graph.h"
+#include "pretty.h"
+
+static const char * const builtin_commit_usage[] = {
+ N_("git commit [-a | --interactive | --patch] [-s] [-v] [-u<mode>] [--amend]\n"
+ " [--dry-run] [(-c | -C | --squash) <commit> | --fixup [(amend|reword):]<commit>)]\n"
+ " [-F <file> | -m <msg>] [--reset-author] [--allow-empty]\n"
+ " [--allow-empty-message] [--no-verify] [-e] [--author=<author>]\n"
+ " [--date=<date>] [--cleanup=<mode>] [--[no-]status]\n"
+ " [-i | -o] [--pathspec-from-file=<file> [--pathspec-file-nul]]\n"
+ " [(--trailer <token>[(=|:)<value>])...] [-S[<keyid>]]\n"
+ " [--] [<pathspec>...]"),
+ NULL
+};
+
+static const char * const builtin_status_usage[] = {
+ N_("git status [<options>] [--] [<pathspec>...]"),
+ NULL
+};
+
+static const char empty_amend_advice[] =
+N_("You asked to amend the most recent commit, but doing so would make\n"
+"it empty. You can repeat your command with --allow-empty, or you can\n"
+"remove the commit entirely with \"git reset HEAD^\".\n");
+
+static const char empty_cherry_pick_advice[] =
+N_("The previous cherry-pick is now empty, possibly due to conflict resolution.\n"
+"If you wish to commit it anyway, use:\n"
+"\n"
+" git commit --allow-empty\n"
+"\n");
+
+static const char empty_rebase_pick_advice[] =
+N_("Otherwise, please use 'git rebase --skip'\n");
+
+static const char empty_cherry_pick_advice_single[] =
+N_("Otherwise, please use 'git cherry-pick --skip'\n");
+
+static const char empty_cherry_pick_advice_multi[] =
+N_("and then use:\n"
+"\n"
+" git cherry-pick --continue\n"
+"\n"
+"to resume cherry-picking the remaining commits.\n"
+"If you wish to skip this commit, use:\n"
+"\n"
+" git cherry-pick --skip\n"
+"\n");
+
+static const char *color_status_slots[] = {
+ [WT_STATUS_HEADER] = "header",
+ [WT_STATUS_UPDATED] = "updated",
+ [WT_STATUS_CHANGED] = "changed",
+ [WT_STATUS_UNTRACKED] = "untracked",
+ [WT_STATUS_NOBRANCH] = "noBranch",
+ [WT_STATUS_UNMERGED] = "unmerged",
+ [WT_STATUS_LOCAL_BRANCH] = "localBranch",
+ [WT_STATUS_REMOTE_BRANCH] = "remoteBranch",
+ [WT_STATUS_ONBRANCH] = "branch",
+};
+
+static const char *use_message_buffer;
+static struct lock_file index_lock; /* real index */
+static struct lock_file false_lock; /* used only for partial commits */
+static enum {
+ COMMIT_AS_IS = 1,
+ COMMIT_NORMAL,
+ COMMIT_PARTIAL
+} commit_style;
+
+static const char *logfile, *force_author;
+static const char *template_file;
+/*
+ * The _message variables are commit names from which to take
+ * the commit message and/or authorship.
+ */
+static const char *author_message, *author_message_buffer;
+static char *edit_message, *use_message;
+static char *fixup_message, *fixup_commit, *squash_message;
+static const char *fixup_prefix;
+static int all, also, interactive, patch_interactive, only, amend, signoff;
+static int edit_flag = -1; /* unspecified */
+static int quiet, verbose, no_verify, allow_empty, dry_run, renew_authorship;
+static int config_commit_verbose = -1; /* unspecified */
+static int no_post_rewrite, allow_empty_message, pathspec_file_nul;
+static char *untracked_files_arg, *force_date, *ignore_submodule_arg, *ignored_arg;
+static char *sign_commit, *pathspec_from_file;
+static struct strvec trailer_args = STRVEC_INIT;
+
+/*
+ * The default commit message cleanup mode will remove the lines
+ * beginning with # (shell comments) and leading and trailing
+ * whitespaces (empty lines or containing only whitespaces)
+ * if editor is used, and only the whitespaces if the message
+ * is specified explicitly.
+ */
+static enum commit_msg_cleanup_mode cleanup_mode;
+static const char *cleanup_arg;
+
+static enum commit_whence whence;
+static int use_editor = 1, include_status = 1;
+static int have_option_m;
+static struct strbuf message = STRBUF_INIT;
+
+static enum wt_status_format status_format = STATUS_FORMAT_UNSPECIFIED;
+
+static int opt_pass_trailer(const struct option *opt, const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+
+ strvec_pushl(opt->value, "--trailer", arg, NULL);
+ return 0;
+}
+
+static int opt_parse_porcelain(const struct option *opt, const char *arg, int unset)
+{
+ enum wt_status_format *value = (enum wt_status_format *)opt->value;
+ if (unset)
+ *value = STATUS_FORMAT_NONE;
+ else if (!arg)
+ *value = STATUS_FORMAT_PORCELAIN;
+ else if (!strcmp(arg, "v1") || !strcmp(arg, "1"))
+ *value = STATUS_FORMAT_PORCELAIN;
+ else if (!strcmp(arg, "v2") || !strcmp(arg, "2"))
+ *value = STATUS_FORMAT_PORCELAIN_V2;
+ else
+ die("unsupported porcelain version '%s'", arg);
+
+ return 0;
+}
+
+static int opt_parse_m(const struct option *opt, const char *arg, int unset)
+{
+ struct strbuf *buf = opt->value;
+ if (unset) {
+ have_option_m = 0;
+ strbuf_setlen(buf, 0);
+ } else {
+ have_option_m = 1;
+ if (buf->len)
+ strbuf_addch(buf, '\n');
+ strbuf_addstr(buf, arg);
+ strbuf_complete_line(buf);
+ }
+ return 0;
+}
+
+static int opt_parse_rename_score(const struct option *opt, const char *arg, int unset)
+{
+ const char **value = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ if (arg != NULL && *arg == '=')
+ arg = arg + 1;
+
+ *value = arg;
+ return 0;
+}
+
+static void determine_whence(struct wt_status *s)
+{
+ if (file_exists(git_path_merge_head(the_repository)))
+ whence = FROM_MERGE;
+ else if (!sequencer_determine_whence(the_repository, &whence))
+ whence = FROM_COMMIT;
+ if (s)
+ s->whence = whence;
+}
+
+static void status_init_config(struct wt_status *s, config_fn_t fn)
+{
+ wt_status_prepare(the_repository, s);
+ init_diff_ui_defaults();
+ git_config(fn, s);
+ determine_whence(s);
+ s->hints = advice_enabled(ADVICE_STATUS_HINTS); /* must come after git_config() */
+}
+
+static void rollback_index_files(void)
+{
+ switch (commit_style) {
+ case COMMIT_AS_IS:
+ break; /* nothing to do */
+ case COMMIT_NORMAL:
+ rollback_lock_file(&index_lock);
+ break;
+ case COMMIT_PARTIAL:
+ rollback_lock_file(&index_lock);
+ rollback_lock_file(&false_lock);
+ break;
+ }
+}
+
+static int commit_index_files(void)
+{
+ int err = 0;
+
+ switch (commit_style) {
+ case COMMIT_AS_IS:
+ break; /* nothing to do */
+ case COMMIT_NORMAL:
+ err = commit_lock_file(&index_lock);
+ break;
+ case COMMIT_PARTIAL:
+ err = commit_lock_file(&index_lock);
+ rollback_lock_file(&false_lock);
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * Take a union of paths in the index and the named tree (typically, "HEAD"),
+ * and return the paths that match the given pattern in list.
+ */
+static int list_paths(struct string_list *list, const char *with_tree,
+ const struct pathspec *pattern)
+{
+ int i, ret;
+ char *m;
+
+ if (!pattern->nr)
+ return 0;
+
+ m = xcalloc(1, pattern->nr);
+
+ if (with_tree) {
+ char *max_prefix = common_prefix(pattern);
+ overlay_tree_on_index(&the_index, with_tree, max_prefix);
+ free(max_prefix);
+ }
+
+ /* TODO: audit for interaction with sparse-index. */
+ ensure_full_index(&the_index);
+ for (i = 0; i < the_index.cache_nr; i++) {
+ const struct cache_entry *ce = the_index.cache[i];
+ struct string_list_item *item;
+
+ if (ce->ce_flags & CE_UPDATE)
+ continue;
+ if (!ce_path_match(&the_index, ce, pattern, m))
+ continue;
+ item = string_list_insert(list, ce->name);
+ if (ce_skip_worktree(ce))
+ item->util = item; /* better a valid pointer than a fake one */
+ }
+
+ ret = report_path_error(m, pattern);
+ free(m);
+ return ret;
+}
+
+static void add_remove_files(struct string_list *list)
+{
+ int i;
+ for (i = 0; i < list->nr; i++) {
+ struct stat st;
+ struct string_list_item *p = &(list->items[i]);
+
+ /* p->util is skip-worktree */
+ if (p->util)
+ continue;
+
+ if (!lstat(p->string, &st)) {
+ if (add_to_index(&the_index, p->string, &st, 0))
+ die(_("updating files failed"));
+ } else
+ remove_file_from_index(&the_index, p->string);
+ }
+}
+
+static void create_base_index(const struct commit *current_head)
+{
+ struct tree *tree;
+ struct unpack_trees_options opts;
+ struct tree_desc t;
+
+ if (!current_head) {
+ discard_index(&the_index);
+ return;
+ }
+
+ memset(&opts, 0, sizeof(opts));
+ opts.head_idx = 1;
+ opts.index_only = 1;
+ opts.merge = 1;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+
+ opts.fn = oneway_merge;
+ tree = parse_tree_indirect(&current_head->object.oid);
+ if (!tree)
+ die(_("failed to unpack HEAD tree object"));
+ parse_tree(tree);
+ init_tree_desc(&t, tree->buffer, tree->size);
+ if (unpack_trees(1, &t, &opts))
+ exit(128); /* We've already reported the error, finish dying */
+}
+
+static void refresh_cache_or_die(int refresh_flags)
+{
+ /*
+ * refresh_flags contains REFRESH_QUIET, so the only errors
+ * are for unmerged entries.
+ */
+ if (refresh_index(&the_index, refresh_flags | REFRESH_IN_PORCELAIN, NULL, NULL, NULL))
+ die_resolve_conflict("commit");
+}
+
+static const char *prepare_index(const char **argv, const char *prefix,
+ const struct commit *current_head, int is_status)
+{
+ struct string_list partial = STRING_LIST_INIT_DUP;
+ struct pathspec pathspec;
+ int refresh_flags = REFRESH_QUIET;
+ const char *ret;
+
+ if (is_status)
+ refresh_flags |= REFRESH_UNMERGED;
+ parse_pathspec(&pathspec, 0,
+ PATHSPEC_PREFER_FULL,
+ prefix, argv);
+
+ if (pathspec_from_file) {
+ if (interactive)
+ die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--interactive/--patch");
+
+ if (all)
+ die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "-a");
+
+ if (pathspec.nr)
+ die(_("'%s' and pathspec arguments cannot be used together"), "--pathspec-from-file");
+
+ parse_pathspec_file(&pathspec, 0,
+ PATHSPEC_PREFER_FULL,
+ prefix, pathspec_from_file, pathspec_file_nul);
+ } else if (pathspec_file_nul) {
+ die(_("the option '%s' requires '%s'"), "--pathspec-file-nul", "--pathspec-from-file");
+ }
+
+ if (!pathspec.nr && (also || (only && !allow_empty &&
+ (!amend || (fixup_message && strcmp(fixup_prefix, "amend"))))))
+ die(_("No paths with --include/--only does not make sense."));
+
+ if (repo_read_index_preload(the_repository, &pathspec, 0) < 0)
+ die(_("index file corrupt"));
+
+ if (interactive) {
+ char *old_index_env = NULL, *old_repo_index_file;
+ repo_hold_locked_index(the_repository, &index_lock,
+ LOCK_DIE_ON_ERROR);
+
+ refresh_cache_or_die(refresh_flags);
+
+ if (write_locked_index(&the_index, &index_lock, 0))
+ die(_("unable to create temporary index"));
+
+ old_repo_index_file = the_repository->index_file;
+ the_repository->index_file =
+ (char *)get_lock_file_path(&index_lock);
+ old_index_env = xstrdup_or_null(getenv(INDEX_ENVIRONMENT));
+ setenv(INDEX_ENVIRONMENT, the_repository->index_file, 1);
+
+ if (interactive_add(argv, prefix, patch_interactive) != 0)
+ die(_("interactive add failed"));
+
+ the_repository->index_file = old_repo_index_file;
+ if (old_index_env && *old_index_env)
+ setenv(INDEX_ENVIRONMENT, old_index_env, 1);
+ else
+ unsetenv(INDEX_ENVIRONMENT);
+ FREE_AND_NULL(old_index_env);
+
+ discard_index(&the_index);
+ read_index_from(&the_index, get_lock_file_path(&index_lock),
+ get_git_dir());
+ if (update_main_cache_tree(WRITE_TREE_SILENT) == 0) {
+ if (reopen_lock_file(&index_lock) < 0)
+ die(_("unable to write index file"));
+ if (write_locked_index(&the_index, &index_lock, 0))
+ die(_("unable to update temporary index"));
+ } else
+ warning(_("Failed to update main cache tree"));
+
+ commit_style = COMMIT_NORMAL;
+ ret = get_lock_file_path(&index_lock);
+ goto out;
+ }
+
+ /*
+ * Non partial, non as-is commit.
+ *
+ * (1) get the real index;
+ * (2) update the_index as necessary;
+ * (3) write the_index out to the real index (still locked);
+ * (4) return the name of the locked index file.
+ *
+ * The caller should run hooks on the locked real index, and
+ * (A) if all goes well, commit the real index;
+ * (B) on failure, rollback the real index.
+ */
+ if (all || (also && pathspec.nr)) {
+ repo_hold_locked_index(the_repository, &index_lock,
+ LOCK_DIE_ON_ERROR);
+ add_files_to_cache(also ? prefix : NULL, &pathspec, 0);
+ refresh_cache_or_die(refresh_flags);
+ update_main_cache_tree(WRITE_TREE_SILENT);
+ if (write_locked_index(&the_index, &index_lock, 0))
+ die(_("unable to write new_index file"));
+ commit_style = COMMIT_NORMAL;
+ ret = get_lock_file_path(&index_lock);
+ goto out;
+ }
+
+ /*
+ * As-is commit.
+ *
+ * (1) return the name of the real index file.
+ *
+ * The caller should run hooks on the real index,
+ * and create commit from the_index.
+ * We still need to refresh the index here.
+ */
+ if (!only && !pathspec.nr) {
+ repo_hold_locked_index(the_repository, &index_lock,
+ LOCK_DIE_ON_ERROR);
+ refresh_cache_or_die(refresh_flags);
+ if (the_index.cache_changed
+ || !cache_tree_fully_valid(the_index.cache_tree))
+ update_main_cache_tree(WRITE_TREE_SILENT);
+ if (write_locked_index(&the_index, &index_lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ die(_("unable to write new_index file"));
+ commit_style = COMMIT_AS_IS;
+ ret = get_index_file();
+ goto out;
+ }
+
+ /*
+ * A partial commit.
+ *
+ * (0) find the set of affected paths;
+ * (1) get lock on the real index file;
+ * (2) update the_index with the given paths;
+ * (3) write the_index out to the real index (still locked);
+ * (4) get lock on the false index file;
+ * (5) reset the_index from HEAD;
+ * (6) update the_index the same way as (2);
+ * (7) write the_index out to the false index file;
+ * (8) return the name of the false index file (still locked);
+ *
+ * The caller should run hooks on the locked false index, and
+ * create commit from it. Then
+ * (A) if all goes well, commit the real index;
+ * (B) on failure, rollback the real index;
+ * In either case, rollback the false index.
+ */
+ commit_style = COMMIT_PARTIAL;
+
+ if (whence != FROM_COMMIT) {
+ if (whence == FROM_MERGE)
+ die(_("cannot do a partial commit during a merge."));
+ else if (is_from_cherry_pick(whence))
+ die(_("cannot do a partial commit during a cherry-pick."));
+ else if (is_from_rebase(whence))
+ die(_("cannot do a partial commit during a rebase."));
+ }
+
+ if (list_paths(&partial, !current_head ? NULL : "HEAD", &pathspec))
+ exit(1);
+
+ discard_index(&the_index);
+ if (repo_read_index(the_repository) < 0)
+ die(_("cannot read the index"));
+
+ repo_hold_locked_index(the_repository, &index_lock, LOCK_DIE_ON_ERROR);
+ add_remove_files(&partial);
+ refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL);
+ update_main_cache_tree(WRITE_TREE_SILENT);
+ if (write_locked_index(&the_index, &index_lock, 0))
+ die(_("unable to write new_index file"));
+
+ hold_lock_file_for_update(&false_lock,
+ git_path("next-index-%"PRIuMAX,
+ (uintmax_t) getpid()),
+ LOCK_DIE_ON_ERROR);
+
+ create_base_index(current_head);
+ add_remove_files(&partial);
+ refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL);
+
+ if (write_locked_index(&the_index, &false_lock, 0))
+ die(_("unable to write temporary index file"));
+
+ discard_index(&the_index);
+ ret = get_lock_file_path(&false_lock);
+ read_index_from(&the_index, ret, get_git_dir());
+out:
+ string_list_clear(&partial, 0);
+ clear_pathspec(&pathspec);
+ return ret;
+}
+
+static int run_status(FILE *fp, const char *index_file, const char *prefix, int nowarn,
+ struct wt_status *s)
+{
+ struct object_id oid;
+
+ if (s->relative_paths)
+ s->prefix = prefix;
+
+ if (amend) {
+ s->amend = 1;
+ s->reference = "HEAD^1";
+ }
+ s->verbose = verbose;
+ s->index_file = index_file;
+ s->fp = fp;
+ s->nowarn = nowarn;
+ s->is_initial = get_oid(s->reference, &oid) ? 1 : 0;
+ if (!s->is_initial)
+ oidcpy(&s->oid_commit, &oid);
+ s->status_format = status_format;
+ s->ignore_submodule_arg = ignore_submodule_arg;
+
+ wt_status_collect(s);
+ wt_status_print(s);
+ wt_status_collect_free_buffers(s);
+
+ return s->committable;
+}
+
+static int is_a_merge(const struct commit *current_head)
+{
+ return !!(current_head->parents && current_head->parents->next);
+}
+
+static void assert_split_ident(struct ident_split *id, const struct strbuf *buf)
+{
+ if (split_ident_line(id, buf->buf, buf->len) || !id->date_begin)
+ BUG("unable to parse our own ident: %s", buf->buf);
+}
+
+static void export_one(const char *var, const char *s, const char *e, int hack)
+{
+ struct strbuf buf = STRBUF_INIT;
+ if (hack)
+ strbuf_addch(&buf, hack);
+ strbuf_add(&buf, s, e - s);
+ setenv(var, buf.buf, 1);
+ strbuf_release(&buf);
+}
+
+static int parse_force_date(const char *in, struct strbuf *out)
+{
+ strbuf_addch(out, '@');
+
+ if (parse_date(in, out) < 0) {
+ int errors = 0;
+ unsigned long t = approxidate_careful(in, &errors);
+ if (errors)
+ return -1;
+ strbuf_addf(out, "%lu", t);
+ }
+
+ return 0;
+}
+
+static void set_ident_var(char **buf, char *val)
+{
+ free(*buf);
+ *buf = val;
+}
+
+static void determine_author_info(struct strbuf *author_ident)
+{
+ char *name, *email, *date;
+ struct ident_split author;
+
+ name = xstrdup_or_null(getenv("GIT_AUTHOR_NAME"));
+ email = xstrdup_or_null(getenv("GIT_AUTHOR_EMAIL"));
+ date = xstrdup_or_null(getenv("GIT_AUTHOR_DATE"));
+
+ if (author_message) {
+ struct ident_split ident;
+ size_t len;
+ const char *a;
+
+ a = find_commit_header(author_message_buffer, "author", &len);
+ if (!a)
+ die(_("commit '%s' lacks author header"), author_message);
+ if (split_ident_line(&ident, a, len) < 0)
+ die(_("commit '%s' has malformed author line"), author_message);
+
+ set_ident_var(&name, xmemdupz(ident.name_begin, ident.name_end - ident.name_begin));
+ set_ident_var(&email, xmemdupz(ident.mail_begin, ident.mail_end - ident.mail_begin));
+
+ if (ident.date_begin) {
+ struct strbuf date_buf = STRBUF_INIT;
+ strbuf_addch(&date_buf, '@');
+ strbuf_add(&date_buf, ident.date_begin, ident.date_end - ident.date_begin);
+ strbuf_addch(&date_buf, ' ');
+ strbuf_add(&date_buf, ident.tz_begin, ident.tz_end - ident.tz_begin);
+ set_ident_var(&date, strbuf_detach(&date_buf, NULL));
+ }
+ }
+
+ if (force_author) {
+ struct ident_split ident;
+
+ if (split_ident_line(&ident, force_author, strlen(force_author)) < 0)
+ die(_("malformed --author parameter"));
+ set_ident_var(&name, xmemdupz(ident.name_begin, ident.name_end - ident.name_begin));
+ set_ident_var(&email, xmemdupz(ident.mail_begin, ident.mail_end - ident.mail_begin));
+ }
+
+ if (force_date) {
+ struct strbuf date_buf = STRBUF_INIT;
+ if (parse_force_date(force_date, &date_buf))
+ die(_("invalid date format: %s"), force_date);
+ set_ident_var(&date, strbuf_detach(&date_buf, NULL));
+ }
+
+ strbuf_addstr(author_ident, fmt_ident(name, email, WANT_AUTHOR_IDENT, date,
+ IDENT_STRICT));
+ assert_split_ident(&author, author_ident);
+ export_one("GIT_AUTHOR_NAME", author.name_begin, author.name_end, 0);
+ export_one("GIT_AUTHOR_EMAIL", author.mail_begin, author.mail_end, 0);
+ export_one("GIT_AUTHOR_DATE", author.date_begin, author.tz_end, '@');
+ free(name);
+ free(email);
+ free(date);
+}
+
+static int author_date_is_interesting(void)
+{
+ return author_message || force_date;
+}
+
+static void adjust_comment_line_char(const struct strbuf *sb)
+{
+ char candidates[] = "#;@!$%^&|:";
+ char *candidate;
+ const char *p;
+
+ comment_line_char = candidates[0];
+ if (!memchr(sb->buf, comment_line_char, sb->len))
+ return;
+
+ p = sb->buf;
+ candidate = strchr(candidates, *p);
+ if (candidate)
+ *candidate = ' ';
+ for (p = sb->buf; *p; p++) {
+ if ((p[0] == '\n' || p[0] == '\r') && p[1]) {
+ candidate = strchr(candidates, p[1]);
+ if (candidate)
+ *candidate = ' ';
+ }
+ }
+
+ for (p = candidates; *p == ' '; p++)
+ ;
+ if (!*p)
+ die(_("unable to select a comment character that is not used\n"
+ "in the current commit message"));
+ comment_line_char = *p;
+}
+
+static void prepare_amend_commit(struct commit *commit, struct strbuf *sb,
+ struct pretty_print_context *ctx)
+{
+ const char *buffer, *subject, *fmt;
+
+ buffer = get_commit_buffer(commit, NULL);
+ find_commit_subject(buffer, &subject);
+ /*
+ * If we amend the 'amend!' commit then we don't want to
+ * duplicate the subject line.
+ */
+ fmt = starts_with(subject, "amend!") ? "%b" : "%B";
+ format_commit_message(commit, fmt, sb, ctx);
+ unuse_commit_buffer(commit, buffer);
+}
+
+static int prepare_to_commit(const char *index_file, const char *prefix,
+ struct commit *current_head,
+ struct wt_status *s,
+ struct strbuf *author_ident)
+{
+ struct stat statbuf;
+ struct strbuf committer_ident = STRBUF_INIT;
+ int committable;
+ struct strbuf sb = STRBUF_INIT;
+ const char *hook_arg1 = NULL;
+ const char *hook_arg2 = NULL;
+ int clean_message_contents = (cleanup_mode != COMMIT_MSG_CLEANUP_NONE);
+ int old_display_comment_prefix;
+ int merge_contains_scissors = 0;
+ int invoked_hook;
+
+ /* This checks and barfs if author is badly specified */
+ determine_author_info(author_ident);
+
+ if (!no_verify && run_commit_hook(use_editor, index_file, &invoked_hook,
+ "pre-commit", NULL))
+ return 0;
+
+ if (squash_message) {
+ /*
+ * Insert the proper subject line before other commit
+ * message options add their content.
+ */
+ if (use_message && !strcmp(use_message, squash_message))
+ strbuf_addstr(&sb, "squash! ");
+ else {
+ struct pretty_print_context ctx = {0};
+ struct commit *c;
+ c = lookup_commit_reference_by_name(squash_message);
+ if (!c)
+ die(_("could not lookup commit %s"), squash_message);
+ ctx.output_encoding = get_commit_output_encoding();
+ format_commit_message(c, "squash! %s\n\n", &sb,
+ &ctx);
+ }
+ }
+
+ if (have_option_m && !fixup_message) {
+ strbuf_addbuf(&sb, &message);
+ hook_arg1 = "message";
+ } else if (logfile && !strcmp(logfile, "-")) {
+ if (isatty(0))
+ fprintf(stderr, _("(reading log message from standard input)\n"));
+ if (strbuf_read(&sb, 0, 0) < 0)
+ die_errno(_("could not read log from standard input"));
+ hook_arg1 = "message";
+ } else if (logfile) {
+ if (strbuf_read_file(&sb, logfile, 0) < 0)
+ die_errno(_("could not read log file '%s'"),
+ logfile);
+ hook_arg1 = "message";
+ } else if (use_message) {
+ char *buffer;
+ buffer = strstr(use_message_buffer, "\n\n");
+ if (buffer)
+ strbuf_addstr(&sb, skip_blank_lines(buffer + 2));
+ hook_arg1 = "commit";
+ hook_arg2 = use_message;
+ } else if (fixup_message) {
+ struct pretty_print_context ctx = {0};
+ struct commit *commit;
+ char *fmt;
+ commit = lookup_commit_reference_by_name(fixup_commit);
+ if (!commit)
+ die(_("could not lookup commit %s"), fixup_commit);
+ ctx.output_encoding = get_commit_output_encoding();
+ fmt = xstrfmt("%s! %%s\n\n", fixup_prefix);
+ format_commit_message(commit, fmt, &sb, &ctx);
+ free(fmt);
+ hook_arg1 = "message";
+
+ /*
+ * Only `-m` commit message option is checked here, as
+ * it supports `--fixup` to append the commit message.
+ *
+ * The other commit message options `-c`/`-C`/`-F` are
+ * incompatible with all the forms of `--fixup` and
+ * have already errored out while parsing the `git commit`
+ * options.
+ */
+ if (have_option_m && !strcmp(fixup_prefix, "fixup"))
+ strbuf_addbuf(&sb, &message);
+
+ if (!strcmp(fixup_prefix, "amend")) {
+ if (have_option_m)
+ die(_("options '%s' and '%s:%s' cannot be used together"), "-m", "--fixup", fixup_message);
+ prepare_amend_commit(commit, &sb, &ctx);
+ }
+ } else if (!stat(git_path_merge_msg(the_repository), &statbuf)) {
+ size_t merge_msg_start;
+
+ /*
+ * prepend SQUASH_MSG here if it exists and a
+ * "merge --squash" was originally performed
+ */
+ if (!stat(git_path_squash_msg(the_repository), &statbuf)) {
+ if (strbuf_read_file(&sb, git_path_squash_msg(the_repository), 0) < 0)
+ die_errno(_("could not read SQUASH_MSG"));
+ hook_arg1 = "squash";
+ } else
+ hook_arg1 = "merge";
+
+ merge_msg_start = sb.len;
+ if (strbuf_read_file(&sb, git_path_merge_msg(the_repository), 0) < 0)
+ die_errno(_("could not read MERGE_MSG"));
+
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS &&
+ wt_status_locate_end(sb.buf + merge_msg_start,
+ sb.len - merge_msg_start) <
+ sb.len - merge_msg_start)
+ merge_contains_scissors = 1;
+ } else if (!stat(git_path_squash_msg(the_repository), &statbuf)) {
+ if (strbuf_read_file(&sb, git_path_squash_msg(the_repository), 0) < 0)
+ die_errno(_("could not read SQUASH_MSG"));
+ hook_arg1 = "squash";
+ } else if (template_file) {
+ if (strbuf_read_file(&sb, template_file, 0) < 0)
+ die_errno(_("could not read '%s'"), template_file);
+ hook_arg1 = "template";
+ clean_message_contents = 0;
+ }
+
+ /*
+ * The remaining cases don't modify the template message, but
+ * just set the argument(s) to the prepare-commit-msg hook.
+ */
+ else if (whence == FROM_MERGE)
+ hook_arg1 = "merge";
+ else if (is_from_cherry_pick(whence) || whence == FROM_REBASE_PICK) {
+ hook_arg1 = "commit";
+ hook_arg2 = "CHERRY_PICK_HEAD";
+ }
+
+ if (squash_message) {
+ /*
+ * If squash_commit was used for the commit subject,
+ * then we're possibly hijacking other commit log options.
+ * Reset the hook args to tell the real story.
+ */
+ hook_arg1 = "message";
+ hook_arg2 = "";
+ }
+
+ s->fp = fopen_for_writing(git_path_commit_editmsg());
+ if (!s->fp)
+ die_errno(_("could not open '%s'"), git_path_commit_editmsg());
+
+ /* Ignore status.displayCommentPrefix: we do need comments in COMMIT_EDITMSG. */
+ old_display_comment_prefix = s->display_comment_prefix;
+ s->display_comment_prefix = 1;
+
+ /*
+ * Most hints are counter-productive when the commit has
+ * already started.
+ */
+ s->hints = 0;
+
+ if (clean_message_contents)
+ strbuf_stripspace(&sb, 0);
+
+ if (signoff)
+ append_signoff(&sb, ignore_non_trailer(sb.buf, sb.len), 0);
+
+ if (fwrite(sb.buf, 1, sb.len, s->fp) < sb.len)
+ die_errno(_("could not write commit template"));
+
+ if (auto_comment_line_char)
+ adjust_comment_line_char(&sb);
+ strbuf_release(&sb);
+
+ /* This checks if committer ident is explicitly given */
+ strbuf_addstr(&committer_ident, git_committer_info(IDENT_STRICT));
+ if (use_editor && include_status) {
+ int ident_shown = 0;
+ int saved_color_setting;
+ struct ident_split ci, ai;
+ const char *hint_cleanup_all = allow_empty_message ?
+ _("Please enter the commit message for your changes."
+ " Lines starting\nwith '%c' will be ignored.\n") :
+ _("Please enter the commit message for your changes."
+ " Lines starting\nwith '%c' will be ignored, and an empty"
+ " message aborts the commit.\n");
+ const char *hint_cleanup_space = allow_empty_message ?
+ _("Please enter the commit message for your changes."
+ " Lines starting\n"
+ "with '%c' will be kept; you may remove them"
+ " yourself if you want to.\n") :
+ _("Please enter the commit message for your changes."
+ " Lines starting\n"
+ "with '%c' will be kept; you may remove them"
+ " yourself if you want to.\n"
+ "An empty message aborts the commit.\n");
+ if (whence != FROM_COMMIT) {
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS &&
+ !merge_contains_scissors)
+ wt_status_add_cut_line(s->fp);
+ status_printf_ln(
+ s, GIT_COLOR_NORMAL,
+ whence == FROM_MERGE ?
+ _("\n"
+ "It looks like you may be committing a merge.\n"
+ "If this is not correct, please run\n"
+ " git update-ref -d MERGE_HEAD\n"
+ "and try again.\n") :
+ _("\n"
+ "It looks like you may be committing a cherry-pick.\n"
+ "If this is not correct, please run\n"
+ " git update-ref -d CHERRY_PICK_HEAD\n"
+ "and try again.\n"));
+ }
+
+ fprintf(s->fp, "\n");
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_ALL)
+ status_printf(s, GIT_COLOR_NORMAL, hint_cleanup_all, comment_line_char);
+ else if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS) {
+ if (whence == FROM_COMMIT && !merge_contains_scissors)
+ wt_status_add_cut_line(s->fp);
+ } else /* COMMIT_MSG_CLEANUP_SPACE, that is. */
+ status_printf(s, GIT_COLOR_NORMAL, hint_cleanup_space, comment_line_char);
+
+ /*
+ * These should never fail because they come from our own
+ * fmt_ident. They may fail the sane_ident test, but we know
+ * that the name and mail pointers will at least be valid,
+ * which is enough for our tests and printing here.
+ */
+ assert_split_ident(&ai, author_ident);
+ assert_split_ident(&ci, &committer_ident);
+
+ if (ident_cmp(&ai, &ci))
+ status_printf_ln(s, GIT_COLOR_NORMAL,
+ _("%s"
+ "Author: %.*s <%.*s>"),
+ ident_shown++ ? "" : "\n",
+ (int)(ai.name_end - ai.name_begin), ai.name_begin,
+ (int)(ai.mail_end - ai.mail_begin), ai.mail_begin);
+
+ if (author_date_is_interesting())
+ status_printf_ln(s, GIT_COLOR_NORMAL,
+ _("%s"
+ "Date: %s"),
+ ident_shown++ ? "" : "\n",
+ show_ident_date(&ai, DATE_MODE(NORMAL)));
+
+ if (!committer_ident_sufficiently_given())
+ status_printf_ln(s, GIT_COLOR_NORMAL,
+ _("%s"
+ "Committer: %.*s <%.*s>"),
+ ident_shown++ ? "" : "\n",
+ (int)(ci.name_end - ci.name_begin), ci.name_begin,
+ (int)(ci.mail_end - ci.mail_begin), ci.mail_begin);
+
+ status_printf_ln(s, GIT_COLOR_NORMAL, "%s", ""); /* Add new line for clarity */
+
+ saved_color_setting = s->use_color;
+ s->use_color = 0;
+ committable = run_status(s->fp, index_file, prefix, 1, s);
+ s->use_color = saved_color_setting;
+ string_list_clear(&s->change, 1);
+ } else {
+ struct object_id oid;
+ const char *parent = "HEAD";
+
+ if (!active_nr && read_cache() < 0)
+ die(_("Cannot read index"));
+
+ if (amend)
+ parent = "HEAD^1";
+
+ if (get_oid(parent, &oid)) {
+ int i, ita_nr = 0;
+
+ /* TODO: audit for interaction with sparse-index. */
+ ensure_full_index(&the_index);
+ for (i = 0; i < the_index.cache_nr; i++)
+ if (ce_intent_to_add(the_index.cache[i]))
+ ita_nr++;
+ committable = the_index.cache_nr - ita_nr > 0;
+ } else {
+ /*
+ * Unless the user did explicitly request a submodule
+ * ignore mode by passing a command line option we do
+ * not ignore any changed submodule SHA-1s when
+ * comparing index and parent, no matter what is
+ * configured. Otherwise we won't commit any
+ * submodules which were manually staged, which would
+ * be really confusing.
+ */
+ struct diff_flags flags = DIFF_FLAGS_INIT;
+ flags.override_submodule_config = 1;
+ if (ignore_submodule_arg &&
+ !strcmp(ignore_submodule_arg, "all"))
+ flags.ignore_submodules = 1;
+ committable = index_differs_from(the_repository,
+ parent, &flags, 1);
+ }
+ }
+ strbuf_release(&committer_ident);
+
+ fclose(s->fp);
+
+ if (trailer_args.nr) {
+ struct child_process run_trailer = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&run_trailer.args, "interpret-trailers",
+ "--in-place", git_path_commit_editmsg(), NULL);
+ strvec_pushv(&run_trailer.args, trailer_args.v);
+ run_trailer.git_cmd = 1;
+ if (run_command(&run_trailer))
+ die(_("unable to pass trailers to --trailers"));
+ strvec_clear(&trailer_args);
+ }
+
+ /*
+ * Reject an attempt to record a non-merge empty commit without
+ * explicit --allow-empty. In the cherry-pick case, it may be
+ * empty due to conflict resolution, which the user should okay.
+ */
+ if (!committable && whence != FROM_MERGE && !allow_empty &&
+ !(amend && is_a_merge(current_head))) {
+ s->hints = advice_enabled(ADVICE_STATUS_HINTS);
+ s->display_comment_prefix = old_display_comment_prefix;
+ run_status(stdout, index_file, prefix, 0, s);
+ if (amend)
+ fputs(_(empty_amend_advice), stderr);
+ else if (is_from_cherry_pick(whence) ||
+ whence == FROM_REBASE_PICK) {
+ fputs(_(empty_cherry_pick_advice), stderr);
+ if (whence == FROM_CHERRY_PICK_SINGLE)
+ fputs(_(empty_cherry_pick_advice_single), stderr);
+ else if (whence == FROM_CHERRY_PICK_MULTI)
+ fputs(_(empty_cherry_pick_advice_multi), stderr);
+ else
+ fputs(_(empty_rebase_pick_advice), stderr);
+ }
+ return 0;
+ }
+
+ if (!no_verify && invoked_hook) {
+ /*
+ * Re-read the index as the pre-commit-commit hook was invoked
+ * and could have updated it. We must do this before we invoke
+ * the editor and after we invoke run_status above.
+ */
+ discard_index(&the_index);
+ }
+ read_index_from(&the_index, index_file, get_git_dir());
+
+ if (update_main_cache_tree(0)) {
+ error(_("Error building trees"));
+ return 0;
+ }
+
+ if (run_commit_hook(use_editor, index_file, NULL, "prepare-commit-msg",
+ git_path_commit_editmsg(), hook_arg1, hook_arg2, NULL))
+ return 0;
+
+ if (use_editor) {
+ struct strvec env = STRVEC_INIT;
+
+ strvec_pushf(&env, "GIT_INDEX_FILE=%s", index_file);
+ if (launch_editor(git_path_commit_editmsg(), NULL, env.v)) {
+ fprintf(stderr,
+ _("Please supply the message using either -m or -F option.\n"));
+ exit(1);
+ }
+ strvec_clear(&env);
+ }
+
+ if (!no_verify &&
+ run_commit_hook(use_editor, index_file, NULL, "commit-msg",
+ git_path_commit_editmsg(), NULL)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+static const char *find_author_by_nickname(const char *name)
+{
+ struct rev_info revs;
+ struct commit *commit;
+ struct strbuf buf = STRBUF_INIT;
+ const char *av[20];
+ int ac = 0;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ strbuf_addf(&buf, "--author=%s", name);
+ av[++ac] = "--all";
+ av[++ac] = "-i";
+ av[++ac] = buf.buf;
+ av[++ac] = NULL;
+ setup_revisions(ac, av, &revs, NULL);
+ revs.mailmap = xmalloc(sizeof(struct string_list));
+ string_list_init_nodup(revs.mailmap);
+ read_mailmap(revs.mailmap);
+
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed"));
+ commit = get_revision(&revs);
+ if (commit) {
+ struct pretty_print_context ctx = {0};
+ ctx.date_mode.type = DATE_NORMAL;
+ strbuf_release(&buf);
+ format_commit_message(commit, "%aN <%aE>", &buf, &ctx);
+ release_revisions(&revs);
+ return strbuf_detach(&buf, NULL);
+ }
+ die(_("--author '%s' is not 'Name <email>' and matches no existing author"), name);
+}
+
+static void handle_ignored_arg(struct wt_status *s)
+{
+ if (!ignored_arg)
+ ; /* default already initialized */
+ else if (!strcmp(ignored_arg, "traditional"))
+ s->show_ignored_mode = SHOW_TRADITIONAL_IGNORED;
+ else if (!strcmp(ignored_arg, "no"))
+ s->show_ignored_mode = SHOW_NO_IGNORED;
+ else if (!strcmp(ignored_arg, "matching"))
+ s->show_ignored_mode = SHOW_MATCHING_IGNORED;
+ else
+ die(_("Invalid ignored mode '%s'"), ignored_arg);
+}
+
+static void handle_untracked_files_arg(struct wt_status *s)
+{
+ if (!untracked_files_arg)
+ ; /* default already initialized */
+ else if (!strcmp(untracked_files_arg, "no"))
+ s->show_untracked_files = SHOW_NO_UNTRACKED_FILES;
+ else if (!strcmp(untracked_files_arg, "normal"))
+ s->show_untracked_files = SHOW_NORMAL_UNTRACKED_FILES;
+ else if (!strcmp(untracked_files_arg, "all"))
+ s->show_untracked_files = SHOW_ALL_UNTRACKED_FILES;
+ /*
+ * Please update $__git_untracked_file_modes in
+ * git-completion.bash when you add new options
+ */
+ else
+ die(_("Invalid untracked files mode '%s'"), untracked_files_arg);
+}
+
+static const char *read_commit_message(const char *name)
+{
+ const char *out_enc;
+ struct commit *commit;
+
+ commit = lookup_commit_reference_by_name(name);
+ if (!commit)
+ die(_("could not lookup commit %s"), name);
+ out_enc = get_commit_output_encoding();
+ return logmsg_reencode(commit, NULL, out_enc);
+}
+
+/*
+ * Enumerate what needs to be propagated when --porcelain
+ * is not in effect here.
+ */
+static struct status_deferred_config {
+ enum wt_status_format status_format;
+ int show_branch;
+ enum ahead_behind_flags ahead_behind;
+} status_deferred_config = {
+ STATUS_FORMAT_UNSPECIFIED,
+ -1, /* unspecified */
+ AHEAD_BEHIND_UNSPECIFIED,
+};
+
+static void finalize_deferred_config(struct wt_status *s)
+{
+ int use_deferred_config = (status_format != STATUS_FORMAT_PORCELAIN &&
+ status_format != STATUS_FORMAT_PORCELAIN_V2 &&
+ !s->null_termination);
+
+ if (s->null_termination) {
+ if (status_format == STATUS_FORMAT_NONE ||
+ status_format == STATUS_FORMAT_UNSPECIFIED)
+ status_format = STATUS_FORMAT_PORCELAIN;
+ else if (status_format == STATUS_FORMAT_LONG)
+ die(_("options '%s' and '%s' cannot be used together"), "--long", "-z");
+ }
+
+ if (use_deferred_config && status_format == STATUS_FORMAT_UNSPECIFIED)
+ status_format = status_deferred_config.status_format;
+ if (status_format == STATUS_FORMAT_UNSPECIFIED)
+ status_format = STATUS_FORMAT_NONE;
+
+ if (use_deferred_config && s->show_branch < 0)
+ s->show_branch = status_deferred_config.show_branch;
+ if (s->show_branch < 0)
+ s->show_branch = 0;
+
+ /*
+ * If the user did not give a "--[no]-ahead-behind" command
+ * line argument *AND* we will print in a human-readable format
+ * (short, long etc.) then we inherit from the status.aheadbehind
+ * config setting. In all other cases (and porcelain V[12] formats
+ * in particular), we inherit _FULL for backwards compatibility.
+ */
+ if (use_deferred_config &&
+ s->ahead_behind_flags == AHEAD_BEHIND_UNSPECIFIED)
+ s->ahead_behind_flags = status_deferred_config.ahead_behind;
+
+ if (s->ahead_behind_flags == AHEAD_BEHIND_UNSPECIFIED)
+ s->ahead_behind_flags = AHEAD_BEHIND_FULL;
+}
+
+static void check_fixup_reword_options(int argc, const char *argv[]) {
+ if (whence != FROM_COMMIT) {
+ if (whence == FROM_MERGE)
+ die(_("You are in the middle of a merge -- cannot reword."));
+ else if (is_from_cherry_pick(whence))
+ die(_("You are in the middle of a cherry-pick -- cannot reword."));
+ }
+ if (argc)
+ die(_("reword option of '%s' and path '%s' cannot be used together"), "--fixup", *argv);
+ if (patch_interactive || interactive || all || also || only)
+ die(_("reword option of '%s' and '%s' cannot be used together"),
+ "--fixup", "--patch/--interactive/--all/--include/--only");
+}
+
+static int parse_and_validate_options(int argc, const char *argv[],
+ const struct option *options,
+ const char * const usage[],
+ const char *prefix,
+ struct commit *current_head,
+ struct wt_status *s)
+{
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ finalize_deferred_config(s);
+
+ if (force_author && !strchr(force_author, '>'))
+ force_author = find_author_by_nickname(force_author);
+
+ if (force_author && renew_authorship)
+ die(_("options '%s' and '%s' cannot be used together"), "--reset-author", "--author");
+
+ if (logfile || have_option_m || use_message)
+ use_editor = 0;
+
+ /* Sanity check options */
+ if (amend && !current_head)
+ die(_("You have nothing to amend."));
+ if (amend && whence != FROM_COMMIT) {
+ if (whence == FROM_MERGE)
+ die(_("You are in the middle of a merge -- cannot amend."));
+ else if (is_from_cherry_pick(whence))
+ die(_("You are in the middle of a cherry-pick -- cannot amend."));
+ else if (whence == FROM_REBASE_PICK)
+ die(_("You are in the middle of a rebase -- cannot amend."));
+ }
+ if (fixup_message && squash_message)
+ die(_("options '%s' and '%s' cannot be used together"), "--squash", "--fixup");
+ die_for_incompatible_opt4(!!use_message, "-C",
+ !!edit_message, "-c",
+ !!logfile, "-F",
+ !!fixup_message, "--fixup");
+ die_for_incompatible_opt4(have_option_m, "-m",
+ !!edit_message, "-c",
+ !!use_message, "-C",
+ !!logfile, "-F");
+ if (use_message || edit_message || logfile ||fixup_message || have_option_m)
+ template_file = NULL;
+ if (edit_message)
+ use_message = edit_message;
+ if (amend && !use_message && !fixup_message)
+ use_message = "HEAD";
+ if (!use_message && !is_from_cherry_pick(whence) &&
+ !is_from_rebase(whence) && renew_authorship)
+ die(_("--reset-author can be used only with -C, -c or --amend."));
+ if (use_message) {
+ use_message_buffer = read_commit_message(use_message);
+ if (!renew_authorship) {
+ author_message = use_message;
+ author_message_buffer = use_message_buffer;
+ }
+ }
+ if ((is_from_cherry_pick(whence) || whence == FROM_REBASE_PICK) &&
+ !renew_authorship) {
+ author_message = "CHERRY_PICK_HEAD";
+ author_message_buffer = read_commit_message(author_message);
+ }
+
+ if (patch_interactive)
+ interactive = 1;
+
+ die_for_incompatible_opt4(also, "-i/--include",
+ only, "-o/--only",
+ all, "-a/--all",
+ interactive, "--interactive/-p/--patch");
+ if (fixup_message) {
+ /*
+ * We limit --fixup's suboptions to only alpha characters.
+ * If the first character after a run of alpha is colon,
+ * then the part before the colon may be a known suboption
+ * name like `amend` or `reword`, or a misspelt suboption
+ * name. In either case, we treat it as
+ * --fixup=<suboption>:<arg>.
+ *
+ * Otherwise, we are dealing with --fixup=<commit>.
+ */
+ char *p = fixup_message;
+ while (isalpha(*p))
+ p++;
+ if (p > fixup_message && *p == ':') {
+ *p = '\0';
+ fixup_commit = p + 1;
+ if (!strcmp("amend", fixup_message) ||
+ !strcmp("reword", fixup_message)) {
+ fixup_prefix = "amend";
+ allow_empty = 1;
+ if (*fixup_message == 'r') {
+ check_fixup_reword_options(argc, argv);
+ only = 1;
+ }
+ } else {
+ die(_("unknown option: --fixup=%s:%s"), fixup_message, fixup_commit);
+ }
+ } else {
+ fixup_commit = fixup_message;
+ fixup_prefix = "fixup";
+ use_editor = 0;
+ }
+ }
+
+ if (0 <= edit_flag)
+ use_editor = edit_flag;
+
+ cleanup_mode = get_cleanup_mode(cleanup_arg, use_editor);
+
+ handle_untracked_files_arg(s);
+
+ if (all && argc > 0)
+ die(_("paths '%s ...' with -a does not make sense"),
+ argv[0]);
+
+ if (status_format != STATUS_FORMAT_NONE)
+ dry_run = 1;
+
+ return argc;
+}
+
+static int dry_run_commit(const char **argv, const char *prefix,
+ const struct commit *current_head, struct wt_status *s)
+{
+ int committable;
+ const char *index_file;
+
+ index_file = prepare_index(argv, prefix, current_head, 1);
+ committable = run_status(stdout, index_file, prefix, 0, s);
+ rollback_index_files();
+
+ return committable ? 0 : 1;
+}
+
+define_list_config_array_extra(color_status_slots, {"added"});
+
+static int parse_status_slot(const char *slot)
+{
+ if (!strcasecmp(slot, "added"))
+ return WT_STATUS_UPDATED;
+
+ return LOOKUP_CONFIG(color_status_slots, slot);
+}
+
+static int git_status_config(const char *k, const char *v, void *cb)
+{
+ struct wt_status *s = cb;
+ const char *slot_name;
+
+ if (starts_with(k, "column."))
+ return git_column_config(k, v, "status", &s->colopts);
+ if (!strcmp(k, "status.submodulesummary")) {
+ int is_bool;
+ s->submodule_summary = git_config_bool_or_int(k, v, &is_bool);
+ if (is_bool && s->submodule_summary)
+ s->submodule_summary = -1;
+ return 0;
+ }
+ if (!strcmp(k, "status.short")) {
+ if (git_config_bool(k, v))
+ status_deferred_config.status_format = STATUS_FORMAT_SHORT;
+ else
+ status_deferred_config.status_format = STATUS_FORMAT_NONE;
+ return 0;
+ }
+ if (!strcmp(k, "status.branch")) {
+ status_deferred_config.show_branch = git_config_bool(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "status.aheadbehind")) {
+ status_deferred_config.ahead_behind = git_config_bool(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "status.showstash")) {
+ s->show_stash = git_config_bool(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "status.color") || !strcmp(k, "color.status")) {
+ s->use_color = git_config_colorbool(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "status.displaycommentprefix")) {
+ s->display_comment_prefix = git_config_bool(k, v);
+ return 0;
+ }
+ if (skip_prefix(k, "status.color.", &slot_name) ||
+ skip_prefix(k, "color.status.", &slot_name)) {
+ int slot = parse_status_slot(slot_name);
+ if (slot < 0)
+ return 0;
+ if (!v)
+ return config_error_nonbool(k);
+ return color_parse(v, s->color_palette[slot]);
+ }
+ if (!strcmp(k, "status.relativepaths")) {
+ s->relative_paths = git_config_bool(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "status.showuntrackedfiles")) {
+ if (!v)
+ return config_error_nonbool(k);
+ else if (!strcmp(v, "no"))
+ s->show_untracked_files = SHOW_NO_UNTRACKED_FILES;
+ else if (!strcmp(v, "normal"))
+ s->show_untracked_files = SHOW_NORMAL_UNTRACKED_FILES;
+ else if (!strcmp(v, "all"))
+ s->show_untracked_files = SHOW_ALL_UNTRACKED_FILES;
+ else
+ return error(_("Invalid untracked files mode '%s'"), v);
+ return 0;
+ }
+ if (!strcmp(k, "diff.renamelimit")) {
+ if (s->rename_limit == -1)
+ s->rename_limit = git_config_int(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "status.renamelimit")) {
+ s->rename_limit = git_config_int(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "diff.renames")) {
+ if (s->detect_rename == -1)
+ s->detect_rename = git_config_rename(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "status.renames")) {
+ s->detect_rename = git_config_rename(k, v);
+ return 0;
+ }
+ return git_diff_ui_config(k, v, NULL);
+}
+
+int cmd_status(int argc, const char **argv, const char *prefix)
+{
+ static int no_renames = -1;
+ static const char *rename_score_arg = (const char *)-1;
+ static struct wt_status s;
+ unsigned int progress_flag = 0;
+ int fd;
+ struct object_id oid;
+ static struct option builtin_status_options[] = {
+ OPT__VERBOSE(&verbose, N_("be verbose")),
+ OPT_SET_INT('s', "short", &status_format,
+ N_("show status concisely"), STATUS_FORMAT_SHORT),
+ OPT_BOOL('b', "branch", &s.show_branch,
+ N_("show branch information")),
+ OPT_BOOL(0, "show-stash", &s.show_stash,
+ N_("show stash information")),
+ OPT_BOOL(0, "ahead-behind", &s.ahead_behind_flags,
+ N_("compute full ahead/behind values")),
+ OPT_CALLBACK_F(0, "porcelain", &status_format,
+ N_("version"), N_("machine-readable output"),
+ PARSE_OPT_OPTARG, opt_parse_porcelain),
+ OPT_SET_INT(0, "long", &status_format,
+ N_("show status in long format (default)"),
+ STATUS_FORMAT_LONG),
+ OPT_BOOL('z', "null", &s.null_termination,
+ N_("terminate entries with NUL")),
+ { OPTION_STRING, 'u', "untracked-files", &untracked_files_arg,
+ N_("mode"),
+ N_("show untracked files, optional modes: all, normal, no. (Default: all)"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t)"all" },
+ { OPTION_STRING, 0, "ignored", &ignored_arg,
+ N_("mode"),
+ N_("show ignored files, optional modes: traditional, matching, no. (Default: traditional)"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t)"traditional" },
+ { OPTION_STRING, 0, "ignore-submodules", &ignore_submodule_arg, N_("when"),
+ N_("ignore changes to submodules, optional when: all, dirty, untracked. (Default: all)"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t)"all" },
+ OPT_COLUMN(0, "column", &s.colopts, N_("list untracked files in columns")),
+ OPT_BOOL(0, "no-renames", &no_renames, N_("do not detect renames")),
+ OPT_CALLBACK_F('M', "find-renames", &rename_score_arg,
+ N_("n"), N_("detect renames, optionally set similarity index"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG, opt_parse_rename_score),
+ OPT_END(),
+ };
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_status_usage, builtin_status_options);
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ status_init_config(&s, git_status_config);
+ argc = parse_options(argc, argv, prefix,
+ builtin_status_options,
+ builtin_status_usage, 0);
+ finalize_colopts(&s.colopts, -1);
+ finalize_deferred_config(&s);
+
+ handle_untracked_files_arg(&s);
+ handle_ignored_arg(&s);
+
+ if (s.show_ignored_mode == SHOW_MATCHING_IGNORED &&
+ s.show_untracked_files == SHOW_NO_UNTRACKED_FILES)
+ die(_("Unsupported combination of ignored and untracked-files arguments"));
+
+ parse_pathspec(&s.pathspec, 0,
+ PATHSPEC_PREFER_FULL,
+ prefix, argv);
+
+ if (status_format != STATUS_FORMAT_PORCELAIN &&
+ status_format != STATUS_FORMAT_PORCELAIN_V2)
+ progress_flag = REFRESH_PROGRESS;
+ repo_read_index(the_repository);
+ refresh_index(&the_index,
+ REFRESH_QUIET|REFRESH_UNMERGED|progress_flag,
+ &s.pathspec, NULL, NULL);
+
+ if (use_optional_locks())
+ fd = repo_hold_locked_index(the_repository, &index_lock, 0);
+ else
+ fd = -1;
+
+ s.is_initial = get_oid(s.reference, &oid) ? 1 : 0;
+ if (!s.is_initial)
+ oidcpy(&s.oid_commit, &oid);
+
+ s.ignore_submodule_arg = ignore_submodule_arg;
+ s.status_format = status_format;
+ s.verbose = verbose;
+ if (no_renames != -1)
+ s.detect_rename = !no_renames;
+ if ((intptr_t)rename_score_arg != -1) {
+ if (s.detect_rename < DIFF_DETECT_RENAME)
+ s.detect_rename = DIFF_DETECT_RENAME;
+ if (rename_score_arg)
+ s.rename_score = parse_rename_score(&rename_score_arg);
+ }
+
+ wt_status_collect(&s);
+
+ if (0 <= fd)
+ repo_update_index_if_able(the_repository, &index_lock);
+
+ if (s.relative_paths)
+ s.prefix = prefix;
+
+ wt_status_print(&s);
+ wt_status_collect_free_buffers(&s);
+
+ return 0;
+}
+
+static int git_commit_config(const char *k, const char *v, void *cb)
+{
+ struct wt_status *s = cb;
+ int status;
+
+ if (!strcmp(k, "commit.template"))
+ return git_config_pathname(&template_file, k, v);
+ if (!strcmp(k, "commit.status")) {
+ include_status = git_config_bool(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "commit.cleanup"))
+ return git_config_string(&cleanup_arg, k, v);
+ if (!strcmp(k, "commit.gpgsign")) {
+ sign_commit = git_config_bool(k, v) ? "" : NULL;
+ return 0;
+ }
+ if (!strcmp(k, "commit.verbose")) {
+ int is_bool;
+ config_commit_verbose = git_config_bool_or_int(k, v, &is_bool);
+ return 0;
+ }
+
+ status = git_gpg_config(k, v, NULL);
+ if (status)
+ return status;
+ return git_status_config(k, v, s);
+}
+
+int cmd_commit(int argc, const char **argv, const char *prefix)
+{
+ static struct wt_status s;
+ static struct option builtin_commit_options[] = {
+ OPT__QUIET(&quiet, N_("suppress summary after successful commit")),
+ OPT__VERBOSE(&verbose, N_("show diff in commit message template")),
+
+ OPT_GROUP(N_("Commit message options")),
+ OPT_FILENAME('F', "file", &logfile, N_("read message from file")),
+ OPT_STRING(0, "author", &force_author, N_("author"), N_("override author for commit")),
+ OPT_STRING(0, "date", &force_date, N_("date"), N_("override date for commit")),
+ OPT_CALLBACK('m', "message", &message, N_("message"), N_("commit message"), opt_parse_m),
+ OPT_STRING('c', "reedit-message", &edit_message, N_("commit"), N_("reuse and edit message from specified commit")),
+ OPT_STRING('C', "reuse-message", &use_message, N_("commit"), N_("reuse message from specified commit")),
+ /*
+ * TRANSLATORS: Leave "[(amend|reword):]" as-is,
+ * and only translate <commit>.
+ */
+ OPT_STRING(0, "fixup", &fixup_message, N_("[(amend|reword):]commit"), N_("use autosquash formatted message to fixup or amend/reword specified commit")),
+ OPT_STRING(0, "squash", &squash_message, N_("commit"), N_("use autosquash formatted message to squash specified commit")),
+ OPT_BOOL(0, "reset-author", &renew_authorship, N_("the commit is authored by me now (used with -C/-c/--amend)")),
+ OPT_CALLBACK_F(0, "trailer", &trailer_args, N_("trailer"), N_("add custom trailer(s)"), PARSE_OPT_NONEG, opt_pass_trailer),
+ OPT_BOOL('s', "signoff", &signoff, N_("add a Signed-off-by trailer")),
+ OPT_FILENAME('t', "template", &template_file, N_("use specified template file")),
+ OPT_BOOL('e', "edit", &edit_flag, N_("force edit of commit")),
+ OPT_CLEANUP(&cleanup_arg),
+ OPT_BOOL(0, "status", &include_status, N_("include status in commit message template")),
+ { OPTION_STRING, 'S', "gpg-sign", &sign_commit, N_("key-id"),
+ N_("GPG sign commit"), PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
+ /* end commit message options */
+
+ OPT_GROUP(N_("Commit contents options")),
+ OPT_BOOL('a', "all", &all, N_("commit all changed files")),
+ OPT_BOOL('i', "include", &also, N_("add specified files to index for commit")),
+ OPT_BOOL(0, "interactive", &interactive, N_("interactively add files")),
+ OPT_BOOL('p', "patch", &patch_interactive, N_("interactively add changes")),
+ OPT_BOOL('o', "only", &only, N_("commit only specified files")),
+ OPT_BOOL('n', "no-verify", &no_verify, N_("bypass pre-commit and commit-msg hooks")),
+ OPT_BOOL(0, "dry-run", &dry_run, N_("show what would be committed")),
+ OPT_SET_INT(0, "short", &status_format, N_("show status concisely"),
+ STATUS_FORMAT_SHORT),
+ OPT_BOOL(0, "branch", &s.show_branch, N_("show branch information")),
+ OPT_BOOL(0, "ahead-behind", &s.ahead_behind_flags,
+ N_("compute full ahead/behind values")),
+ OPT_SET_INT(0, "porcelain", &status_format,
+ N_("machine-readable output"), STATUS_FORMAT_PORCELAIN),
+ OPT_SET_INT(0, "long", &status_format,
+ N_("show status in long format (default)"),
+ STATUS_FORMAT_LONG),
+ OPT_BOOL('z', "null", &s.null_termination,
+ N_("terminate entries with NUL")),
+ OPT_BOOL(0, "amend", &amend, N_("amend previous commit")),
+ OPT_BOOL(0, "no-post-rewrite", &no_post_rewrite, N_("bypass post-rewrite hook")),
+ { OPTION_STRING, 'u', "untracked-files", &untracked_files_arg, N_("mode"), N_("show untracked files, optional modes: all, normal, no. (Default: all)"), PARSE_OPT_OPTARG, NULL, (intptr_t)"all" },
+ OPT_PATHSPEC_FROM_FILE(&pathspec_from_file),
+ OPT_PATHSPEC_FILE_NUL(&pathspec_file_nul),
+ /* end commit contents options */
+
+ OPT_HIDDEN_BOOL(0, "allow-empty", &allow_empty,
+ N_("ok to record an empty change")),
+ OPT_HIDDEN_BOOL(0, "allow-empty-message", &allow_empty_message,
+ N_("ok to record a change with an empty message")),
+
+ OPT_END()
+ };
+
+ struct strbuf sb = STRBUF_INIT;
+ struct strbuf author_ident = STRBUF_INIT;
+ const char *index_file, *reflog_msg;
+ struct object_id oid;
+ struct commit_list *parents = NULL;
+ struct stat statbuf;
+ struct commit *current_head = NULL;
+ struct commit_extra_header *extra = NULL;
+ struct strbuf err = STRBUF_INIT;
+ int ret = 0;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_commit_usage, builtin_commit_options);
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ status_init_config(&s, git_commit_config);
+ s.commit_template = 1;
+ status_format = STATUS_FORMAT_NONE; /* Ignore status.short */
+ s.colopts = 0;
+
+ if (get_oid("HEAD", &oid))
+ current_head = NULL;
+ else {
+ current_head = lookup_commit_or_die(&oid, "HEAD");
+ if (parse_commit(current_head))
+ die(_("could not parse HEAD commit"));
+ }
+ verbose = -1; /* unspecified */
+ argc = parse_and_validate_options(argc, argv, builtin_commit_options,
+ builtin_commit_usage,
+ prefix, current_head, &s);
+ if (verbose == -1)
+ verbose = (config_commit_verbose < 0) ? 0 : config_commit_verbose;
+
+ if (dry_run)
+ return dry_run_commit(argv, prefix, current_head, &s);
+ index_file = prepare_index(argv, prefix, current_head, 0);
+
+ /* Set up everything for writing the commit object. This includes
+ running hooks, writing the trees, and interacting with the user. */
+ if (!prepare_to_commit(index_file, prefix,
+ current_head, &s, &author_ident)) {
+ ret = 1;
+ rollback_index_files();
+ goto cleanup;
+ }
+
+ /* Determine parents */
+ reflog_msg = getenv("GIT_REFLOG_ACTION");
+ if (!current_head) {
+ if (!reflog_msg)
+ reflog_msg = "commit (initial)";
+ } else if (amend) {
+ if (!reflog_msg)
+ reflog_msg = "commit (amend)";
+ parents = copy_commit_list(current_head->parents);
+ } else if (whence == FROM_MERGE) {
+ struct strbuf m = STRBUF_INIT;
+ FILE *fp;
+ int allow_fast_forward = 1;
+ struct commit_list **pptr = &parents;
+
+ if (!reflog_msg)
+ reflog_msg = "commit (merge)";
+ pptr = commit_list_append(current_head, pptr);
+ fp = xfopen(git_path_merge_head(the_repository), "r");
+ while (strbuf_getline_lf(&m, fp) != EOF) {
+ struct commit *parent;
+
+ parent = get_merge_parent(m.buf);
+ if (!parent)
+ die(_("Corrupt MERGE_HEAD file (%s)"), m.buf);
+ pptr = commit_list_append(parent, pptr);
+ }
+ fclose(fp);
+ strbuf_release(&m);
+ if (!stat(git_path_merge_mode(the_repository), &statbuf)) {
+ if (strbuf_read_file(&sb, git_path_merge_mode(the_repository), 0) < 0)
+ die_errno(_("could not read MERGE_MODE"));
+ if (!strcmp(sb.buf, "no-ff"))
+ allow_fast_forward = 0;
+ }
+ if (allow_fast_forward)
+ reduce_heads_replace(&parents);
+ } else {
+ if (!reflog_msg)
+ reflog_msg = is_from_cherry_pick(whence)
+ ? "commit (cherry-pick)"
+ : is_from_rebase(whence)
+ ? "commit (rebase)"
+ : "commit";
+ commit_list_insert(current_head, &parents);
+ }
+
+ /* Finally, get the commit message */
+ strbuf_reset(&sb);
+ if (strbuf_read_file(&sb, git_path_commit_editmsg(), 0) < 0) {
+ int saved_errno = errno;
+ rollback_index_files();
+ die(_("could not read commit message: %s"), strerror(saved_errno));
+ }
+
+ cleanup_message(&sb, cleanup_mode, verbose);
+
+ if (message_is_empty(&sb, cleanup_mode) && !allow_empty_message) {
+ rollback_index_files();
+ fprintf(stderr, _("Aborting commit due to empty commit message.\n"));
+ exit(1);
+ }
+ if (template_untouched(&sb, template_file, cleanup_mode) && !allow_empty_message) {
+ rollback_index_files();
+ fprintf(stderr, _("Aborting commit; you did not edit the message.\n"));
+ exit(1);
+ }
+
+ if (fixup_message && starts_with(sb.buf, "amend! ") &&
+ !allow_empty_message) {
+ struct strbuf body = STRBUF_INIT;
+ size_t len = commit_subject_length(sb.buf);
+ strbuf_addstr(&body, sb.buf + len);
+ if (message_is_empty(&body, cleanup_mode)) {
+ rollback_index_files();
+ fprintf(stderr, _("Aborting commit due to empty commit message body.\n"));
+ exit(1);
+ }
+ strbuf_release(&body);
+ }
+
+ if (amend) {
+ const char *exclude_gpgsig[3] = { "gpgsig", "gpgsig-sha256", NULL };
+ extra = read_commit_extra_headers(current_head, exclude_gpgsig);
+ } else {
+ struct commit_extra_header **tail = &extra;
+ append_merge_tag_headers(parents, &tail);
+ }
+
+ if (commit_tree_extended(sb.buf, sb.len, &the_index.cache_tree->oid,
+ parents, &oid, author_ident.buf, NULL,
+ sign_commit, extra)) {
+ rollback_index_files();
+ die(_("failed to write commit object"));
+ }
+ free_commit_extra_headers(extra);
+
+ if (update_head_with_reflog(current_head, &oid, reflog_msg, &sb,
+ &err)) {
+ rollback_index_files();
+ die("%s", err.buf);
+ }
+
+ sequencer_post_commit_cleanup(the_repository, 0);
+ unlink(git_path_merge_head(the_repository));
+ unlink(git_path_merge_msg(the_repository));
+ unlink(git_path_merge_mode(the_repository));
+ unlink(git_path_squash_msg(the_repository));
+
+ if (commit_index_files())
+ die(_("repository has been updated, but unable to write\n"
+ "new_index file. Check that disk is not full and quota is\n"
+ "not exceeded, and then \"git restore --staged :/\" to recover."));
+
+ git_test_write_commit_graph_or_die();
+
+ repo_rerere(the_repository, 0);
+ run_auto_maintenance(quiet);
+ run_commit_hook(use_editor, get_index_file(), NULL, "post-commit",
+ NULL);
+ if (amend && !no_post_rewrite) {
+ commit_post_rewrite(the_repository, current_head, &oid);
+ }
+ if (!quiet) {
+ unsigned int flags = 0;
+
+ if (!current_head)
+ flags |= SUMMARY_INITIAL_COMMIT;
+ if (author_date_is_interesting())
+ flags |= SUMMARY_SHOW_AUTHOR_DATE;
+ print_commit_summary(the_repository, prefix,
+ &oid, flags);
+ }
+
+ apply_autostash(git_path_merge_autostash(the_repository));
+
+cleanup:
+ UNLEAK(author_ident);
+ UNLEAK(err);
+ UNLEAK(sb);
+ return ret;
+}
diff --git a/builtin/config.c b/builtin/config.c
new file mode 100644
index 0000000..753e5fa
--- /dev/null
+++ b/builtin/config.c
@@ -0,0 +1,970 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "color.h"
+#include "parse-options.h"
+#include "urlmatch.h"
+#include "quote.h"
+#include "worktree.h"
+
+static const char *const builtin_config_usage[] = {
+ N_("git config [<options>]"),
+ NULL
+};
+
+static char *key;
+static regex_t *key_regexp;
+static const char *value_pattern;
+static regex_t *regexp;
+static int show_keys;
+static int omit_values;
+static int use_key_regexp;
+static int do_all;
+static int do_not_match;
+static char delim = '=';
+static char key_delim = ' ';
+static char term = '\n';
+
+static int use_global_config, use_system_config, use_local_config;
+static int use_worktree_config;
+static struct git_config_source given_config_source;
+static int actions, type;
+static char *default_value;
+static int end_nul;
+static int respect_includes_opt = -1;
+static struct config_options config_options;
+static int show_origin;
+static int show_scope;
+static int fixed_value;
+
+#define ACTION_GET (1<<0)
+#define ACTION_GET_ALL (1<<1)
+#define ACTION_GET_REGEXP (1<<2)
+#define ACTION_REPLACE_ALL (1<<3)
+#define ACTION_ADD (1<<4)
+#define ACTION_UNSET (1<<5)
+#define ACTION_UNSET_ALL (1<<6)
+#define ACTION_RENAME_SECTION (1<<7)
+#define ACTION_REMOVE_SECTION (1<<8)
+#define ACTION_LIST (1<<9)
+#define ACTION_EDIT (1<<10)
+#define ACTION_SET (1<<11)
+#define ACTION_SET_ALL (1<<12)
+#define ACTION_GET_COLOR (1<<13)
+#define ACTION_GET_COLORBOOL (1<<14)
+#define ACTION_GET_URLMATCH (1<<15)
+
+/*
+ * The actions "ACTION_LIST | ACTION_GET_*" which may produce more than
+ * one line of output and which should therefore be paged.
+ */
+#define PAGING_ACTIONS (ACTION_LIST | ACTION_GET_ALL | \
+ ACTION_GET_REGEXP | ACTION_GET_URLMATCH)
+
+#define TYPE_BOOL 1
+#define TYPE_INT 2
+#define TYPE_BOOL_OR_INT 3
+#define TYPE_PATH 4
+#define TYPE_EXPIRY_DATE 5
+#define TYPE_COLOR 6
+#define TYPE_BOOL_OR_STR 7
+
+#define OPT_CALLBACK_VALUE(s, l, v, h, i) \
+ { OPTION_CALLBACK, (s), (l), (v), NULL, (h), PARSE_OPT_NOARG | \
+ PARSE_OPT_NONEG, option_parse_type, (i) }
+
+static NORETURN void usage_builtin_config(void);
+
+static int option_parse_type(const struct option *opt, const char *arg,
+ int unset)
+{
+ int new_type, *to_type;
+
+ if (unset) {
+ *((int *) opt->value) = 0;
+ return 0;
+ }
+
+ /*
+ * To support '--<type>' style flags, begin with new_type equal to
+ * opt->defval.
+ */
+ new_type = opt->defval;
+ if (!new_type) {
+ if (!strcmp(arg, "bool"))
+ new_type = TYPE_BOOL;
+ else if (!strcmp(arg, "int"))
+ new_type = TYPE_INT;
+ else if (!strcmp(arg, "bool-or-int"))
+ new_type = TYPE_BOOL_OR_INT;
+ else if (!strcmp(arg, "bool-or-str"))
+ new_type = TYPE_BOOL_OR_STR;
+ else if (!strcmp(arg, "path"))
+ new_type = TYPE_PATH;
+ else if (!strcmp(arg, "expiry-date"))
+ new_type = TYPE_EXPIRY_DATE;
+ else if (!strcmp(arg, "color"))
+ new_type = TYPE_COLOR;
+ else
+ die(_("unrecognized --type argument, %s"), arg);
+ }
+
+ to_type = opt->value;
+ if (*to_type && *to_type != new_type) {
+ /*
+ * Complain when there is a new type not equal to the old type.
+ * This allows for combinations like '--int --type=int' and
+ * '--type=int --type=int', but disallows ones like '--type=bool
+ * --int' and '--type=bool
+ * --type=int'.
+ */
+ error(_("only one type at a time"));
+ usage_builtin_config();
+ }
+ *to_type = new_type;
+
+ return 0;
+}
+
+static struct option builtin_config_options[] = {
+ OPT_GROUP(N_("Config file location")),
+ OPT_BOOL(0, "global", &use_global_config, N_("use global config file")),
+ OPT_BOOL(0, "system", &use_system_config, N_("use system config file")),
+ OPT_BOOL(0, "local", &use_local_config, N_("use repository config file")),
+ OPT_BOOL(0, "worktree", &use_worktree_config, N_("use per-worktree config file")),
+ OPT_STRING('f', "file", &given_config_source.file, N_("file"), N_("use given config file")),
+ OPT_STRING(0, "blob", &given_config_source.blob, N_("blob-id"), N_("read config from given blob object")),
+ OPT_GROUP(N_("Action")),
+ OPT_BIT(0, "get", &actions, N_("get value: name [value-pattern]"), ACTION_GET),
+ OPT_BIT(0, "get-all", &actions, N_("get all values: key [value-pattern]"), ACTION_GET_ALL),
+ OPT_BIT(0, "get-regexp", &actions, N_("get values for regexp: name-regex [value-pattern]"), ACTION_GET_REGEXP),
+ OPT_BIT(0, "get-urlmatch", &actions, N_("get value specific for the URL: section[.var] URL"), ACTION_GET_URLMATCH),
+ OPT_BIT(0, "replace-all", &actions, N_("replace all matching variables: name value [value-pattern]"), ACTION_REPLACE_ALL),
+ OPT_BIT(0, "add", &actions, N_("add a new variable: name value"), ACTION_ADD),
+ OPT_BIT(0, "unset", &actions, N_("remove a variable: name [value-pattern]"), ACTION_UNSET),
+ OPT_BIT(0, "unset-all", &actions, N_("remove all matches: name [value-pattern]"), ACTION_UNSET_ALL),
+ OPT_BIT(0, "rename-section", &actions, N_("rename section: old-name new-name"), ACTION_RENAME_SECTION),
+ OPT_BIT(0, "remove-section", &actions, N_("remove a section: name"), ACTION_REMOVE_SECTION),
+ OPT_BIT('l', "list", &actions, N_("list all"), ACTION_LIST),
+ OPT_BOOL(0, "fixed-value", &fixed_value, N_("use string equality when comparing values to 'value-pattern'")),
+ OPT_BIT('e', "edit", &actions, N_("open an editor"), ACTION_EDIT),
+ OPT_BIT(0, "get-color", &actions, N_("find the color configured: slot [default]"), ACTION_GET_COLOR),
+ OPT_BIT(0, "get-colorbool", &actions, N_("find the color setting: slot [stdout-is-tty]"), ACTION_GET_COLORBOOL),
+ OPT_GROUP(N_("Type")),
+ OPT_CALLBACK('t', "type", &type, N_("type"), N_("value is given this type"), option_parse_type),
+ OPT_CALLBACK_VALUE(0, "bool", &type, N_("value is \"true\" or \"false\""), TYPE_BOOL),
+ OPT_CALLBACK_VALUE(0, "int", &type, N_("value is decimal number"), TYPE_INT),
+ OPT_CALLBACK_VALUE(0, "bool-or-int", &type, N_("value is --bool or --int"), TYPE_BOOL_OR_INT),
+ OPT_CALLBACK_VALUE(0, "bool-or-str", &type, N_("value is --bool or string"), TYPE_BOOL_OR_STR),
+ OPT_CALLBACK_VALUE(0, "path", &type, N_("value is a path (file or directory name)"), TYPE_PATH),
+ OPT_CALLBACK_VALUE(0, "expiry-date", &type, N_("value is an expiry date"), TYPE_EXPIRY_DATE),
+ OPT_GROUP(N_("Other")),
+ OPT_BOOL('z', "null", &end_nul, N_("terminate values with NUL byte")),
+ OPT_BOOL(0, "name-only", &omit_values, N_("show variable names only")),
+ OPT_BOOL(0, "includes", &respect_includes_opt, N_("respect include directives on lookup")),
+ OPT_BOOL(0, "show-origin", &show_origin, N_("show origin of config (file, standard input, blob, command line)")),
+ OPT_BOOL(0, "show-scope", &show_scope, N_("show scope of config (worktree, local, global, system, command)")),
+ OPT_STRING(0, "default", &default_value, N_("value"), N_("with --get, use default value when missing entry")),
+ OPT_END(),
+};
+
+static NORETURN void usage_builtin_config(void)
+{
+ usage_with_options(builtin_config_usage, builtin_config_options);
+}
+
+static void check_argc(int argc, int min, int max)
+{
+ if (argc >= min && argc <= max)
+ return;
+ if (min == max)
+ error(_("wrong number of arguments, should be %d"), min);
+ else
+ error(_("wrong number of arguments, should be from %d to %d"),
+ min, max);
+ usage_builtin_config();
+}
+
+static void show_config_origin(struct strbuf *buf)
+{
+ const char term = end_nul ? '\0' : '\t';
+
+ strbuf_addstr(buf, current_config_origin_type());
+ strbuf_addch(buf, ':');
+ if (end_nul)
+ strbuf_addstr(buf, current_config_name());
+ else
+ quote_c_style(current_config_name(), buf, NULL, 0);
+ strbuf_addch(buf, term);
+}
+
+static void show_config_scope(struct strbuf *buf)
+{
+ const char term = end_nul ? '\0' : '\t';
+ const char *scope = config_scope_name(current_config_scope());
+
+ strbuf_addstr(buf, N_(scope));
+ strbuf_addch(buf, term);
+}
+
+static int show_all_config(const char *key_, const char *value_,
+ void *cb UNUSED)
+{
+ if (show_origin || show_scope) {
+ struct strbuf buf = STRBUF_INIT;
+ if (show_scope)
+ show_config_scope(&buf);
+ if (show_origin)
+ show_config_origin(&buf);
+ /* Use fwrite as "buf" can contain \0's if "end_null" is set. */
+ fwrite(buf.buf, 1, buf.len, stdout);
+ strbuf_release(&buf);
+ }
+ if (!omit_values && value_)
+ printf("%s%c%s%c", key_, delim, value_, term);
+ else
+ printf("%s%c", key_, term);
+ return 0;
+}
+
+struct strbuf_list {
+ struct strbuf *items;
+ int nr;
+ int alloc;
+};
+
+static int format_config(struct strbuf *buf, const char *key_, const char *value_)
+{
+ if (show_scope)
+ show_config_scope(buf);
+ if (show_origin)
+ show_config_origin(buf);
+ if (show_keys)
+ strbuf_addstr(buf, key_);
+ if (!omit_values) {
+ if (show_keys)
+ strbuf_addch(buf, key_delim);
+
+ if (type == TYPE_INT)
+ strbuf_addf(buf, "%"PRId64,
+ git_config_int64(key_, value_ ? value_ : ""));
+ else if (type == TYPE_BOOL)
+ strbuf_addstr(buf, git_config_bool(key_, value_) ?
+ "true" : "false");
+ else if (type == TYPE_BOOL_OR_INT) {
+ int is_bool, v;
+ v = git_config_bool_or_int(key_, value_, &is_bool);
+ if (is_bool)
+ strbuf_addstr(buf, v ? "true" : "false");
+ else
+ strbuf_addf(buf, "%d", v);
+ } else if (type == TYPE_BOOL_OR_STR) {
+ int v = git_parse_maybe_bool(value_);
+ if (v < 0)
+ strbuf_addstr(buf, value_);
+ else
+ strbuf_addstr(buf, v ? "true" : "false");
+ } else if (type == TYPE_PATH) {
+ const char *v;
+ if (git_config_pathname(&v, key_, value_) < 0)
+ return -1;
+ strbuf_addstr(buf, v);
+ free((char *)v);
+ } else if (type == TYPE_EXPIRY_DATE) {
+ timestamp_t t;
+ if (git_config_expiry_date(&t, key_, value_) < 0)
+ return -1;
+ strbuf_addf(buf, "%"PRItime, t);
+ } else if (type == TYPE_COLOR) {
+ char v[COLOR_MAXLEN];
+ if (git_config_color(v, key_, value_) < 0)
+ return -1;
+ strbuf_addstr(buf, v);
+ } else if (value_) {
+ strbuf_addstr(buf, value_);
+ } else {
+ /* Just show the key name; back out delimiter */
+ if (show_keys)
+ strbuf_setlen(buf, buf->len - 1);
+ }
+ }
+ strbuf_addch(buf, term);
+ return 0;
+}
+
+static int collect_config(const char *key_, const char *value_, void *cb)
+{
+ struct strbuf_list *values = cb;
+
+ if (!use_key_regexp && strcmp(key_, key))
+ return 0;
+ if (use_key_regexp && regexec(key_regexp, key_, 0, NULL, 0))
+ return 0;
+ if (fixed_value && strcmp(value_pattern, (value_?value_:"")))
+ return 0;
+ if (regexp != NULL &&
+ (do_not_match ^ !!regexec(regexp, (value_?value_:""), 0, NULL, 0)))
+ return 0;
+
+ ALLOC_GROW(values->items, values->nr + 1, values->alloc);
+ strbuf_init(&values->items[values->nr], 0);
+
+ return format_config(&values->items[values->nr++], key_, value_);
+}
+
+static int get_value(const char *key_, const char *regex_, unsigned flags)
+{
+ int ret = CONFIG_GENERIC_ERROR;
+ struct strbuf_list values = {NULL};
+ int i;
+
+ if (use_key_regexp) {
+ char *tl;
+
+ /*
+ * NEEDSWORK: this naive pattern lowercasing obviously does not
+ * work for more complex patterns like "^[^.]*Foo.*bar".
+ * Perhaps we should deprecate this altogether someday.
+ */
+
+ key = xstrdup(key_);
+ for (tl = key + strlen(key) - 1;
+ tl >= key && *tl != '.';
+ tl--)
+ *tl = tolower(*tl);
+ for (tl = key; *tl && *tl != '.'; tl++)
+ *tl = tolower(*tl);
+
+ key_regexp = (regex_t*)xmalloc(sizeof(regex_t));
+ if (regcomp(key_regexp, key, REG_EXTENDED)) {
+ error(_("invalid key pattern: %s"), key_);
+ FREE_AND_NULL(key_regexp);
+ ret = CONFIG_INVALID_PATTERN;
+ goto free_strings;
+ }
+ } else {
+ if (git_config_parse_key(key_, &key, NULL)) {
+ ret = CONFIG_INVALID_KEY;
+ goto free_strings;
+ }
+ }
+
+ if (regex_ && (flags & CONFIG_FLAGS_FIXED_VALUE))
+ value_pattern = regex_;
+ else if (regex_) {
+ if (regex_[0] == '!') {
+ do_not_match = 1;
+ regex_++;
+ }
+
+ regexp = (regex_t*)xmalloc(sizeof(regex_t));
+ if (regcomp(regexp, regex_, REG_EXTENDED)) {
+ error(_("invalid pattern: %s"), regex_);
+ FREE_AND_NULL(regexp);
+ ret = CONFIG_INVALID_PATTERN;
+ goto free_strings;
+ }
+ }
+
+ config_with_options(collect_config, &values,
+ &given_config_source, &config_options);
+
+ if (!values.nr && default_value) {
+ struct strbuf *item;
+ ALLOC_GROW(values.items, values.nr + 1, values.alloc);
+ item = &values.items[values.nr++];
+ strbuf_init(item, 0);
+ if (format_config(item, key_, default_value) < 0)
+ die(_("failed to format default config value: %s"),
+ default_value);
+ }
+
+ ret = !values.nr;
+
+ for (i = 0; i < values.nr; i++) {
+ struct strbuf *buf = values.items + i;
+ if (do_all || i == values.nr - 1)
+ fwrite(buf->buf, 1, buf->len, stdout);
+ strbuf_release(buf);
+ }
+ free(values.items);
+
+free_strings:
+ free(key);
+ if (key_regexp) {
+ regfree(key_regexp);
+ free(key_regexp);
+ }
+ if (regexp) {
+ regfree(regexp);
+ free(regexp);
+ }
+
+ return ret;
+}
+
+static char *normalize_value(const char *key, const char *value)
+{
+ if (!value)
+ return NULL;
+
+ if (type == 0 || type == TYPE_PATH || type == TYPE_EXPIRY_DATE)
+ /*
+ * We don't do normalization for TYPE_PATH here: If
+ * the path is like ~/foobar/, we prefer to store
+ * "~/foobar/" in the config file, and to expand the ~
+ * when retrieving the value.
+ * Also don't do normalization for expiry dates.
+ */
+ return xstrdup(value);
+ if (type == TYPE_INT)
+ return xstrfmt("%"PRId64, git_config_int64(key, value));
+ if (type == TYPE_BOOL)
+ return xstrdup(git_config_bool(key, value) ? "true" : "false");
+ if (type == TYPE_BOOL_OR_INT) {
+ int is_bool, v;
+ v = git_config_bool_or_int(key, value, &is_bool);
+ if (!is_bool)
+ return xstrfmt("%d", v);
+ else
+ return xstrdup(v ? "true" : "false");
+ }
+ if (type == TYPE_BOOL_OR_STR) {
+ int v = git_parse_maybe_bool(value);
+ if (v < 0)
+ return xstrdup(value);
+ else
+ return xstrdup(v ? "true" : "false");
+ }
+ if (type == TYPE_COLOR) {
+ char v[COLOR_MAXLEN];
+ if (git_config_color(v, key, value))
+ die(_("cannot parse color '%s'"), value);
+
+ /*
+ * The contents of `v` now contain an ANSI escape
+ * sequence, not suitable for including within a
+ * configuration file. Treat the above as a
+ * "sanity-check", and return the given value, which we
+ * know is representable as valid color code.
+ */
+ return xstrdup(value);
+ }
+
+ BUG("cannot normalize type %d", type);
+}
+
+static int get_color_found;
+static const char *get_color_slot;
+static const char *get_colorbool_slot;
+static char parsed_color[COLOR_MAXLEN];
+
+static int git_get_color_config(const char *var, const char *value,
+ void *cb UNUSED)
+{
+ if (!strcmp(var, get_color_slot)) {
+ if (!value)
+ config_error_nonbool(var);
+ if (color_parse(value, parsed_color) < 0)
+ return -1;
+ get_color_found = 1;
+ }
+ return 0;
+}
+
+static void get_color(const char *var, const char *def_color)
+{
+ get_color_slot = var;
+ get_color_found = 0;
+ parsed_color[0] = '\0';
+ config_with_options(git_get_color_config, NULL,
+ &given_config_source, &config_options);
+
+ if (!get_color_found && def_color) {
+ if (color_parse(def_color, parsed_color) < 0)
+ die(_("unable to parse default color value"));
+ }
+
+ fputs(parsed_color, stdout);
+}
+
+static int get_colorbool_found;
+static int get_diff_color_found;
+static int get_color_ui_found;
+static int git_get_colorbool_config(const char *var, const char *value,
+ void *data UNUSED)
+{
+ if (!strcmp(var, get_colorbool_slot))
+ get_colorbool_found = git_config_colorbool(var, value);
+ else if (!strcmp(var, "diff.color"))
+ get_diff_color_found = git_config_colorbool(var, value);
+ else if (!strcmp(var, "color.ui"))
+ get_color_ui_found = git_config_colorbool(var, value);
+ return 0;
+}
+
+static int get_colorbool(const char *var, int print)
+{
+ get_colorbool_slot = var;
+ get_colorbool_found = -1;
+ get_diff_color_found = -1;
+ get_color_ui_found = -1;
+ config_with_options(git_get_colorbool_config, NULL,
+ &given_config_source, &config_options);
+
+ if (get_colorbool_found < 0) {
+ if (!strcmp(get_colorbool_slot, "color.diff"))
+ get_colorbool_found = get_diff_color_found;
+ if (get_colorbool_found < 0)
+ get_colorbool_found = get_color_ui_found;
+ }
+
+ if (get_colorbool_found < 0)
+ /* default value if none found in config */
+ get_colorbool_found = GIT_COLOR_AUTO;
+
+ get_colorbool_found = want_color(get_colorbool_found);
+
+ if (print) {
+ printf("%s\n", get_colorbool_found ? "true" : "false");
+ return 0;
+ } else
+ return get_colorbool_found ? 0 : 1;
+}
+
+static void check_write(void)
+{
+ if (!given_config_source.file && !startup_info->have_repository)
+ die(_("not in a git directory"));
+
+ if (given_config_source.use_stdin)
+ die(_("writing to stdin is not supported"));
+
+ if (given_config_source.blob)
+ die(_("writing config blobs is not supported"));
+}
+
+struct urlmatch_current_candidate_value {
+ char value_is_null;
+ struct strbuf value;
+};
+
+static int urlmatch_collect_fn(const char *var, const char *value, void *cb)
+{
+ struct string_list *values = cb;
+ struct string_list_item *item = string_list_insert(values, var);
+ struct urlmatch_current_candidate_value *matched = item->util;
+
+ if (!matched) {
+ matched = xmalloc(sizeof(*matched));
+ strbuf_init(&matched->value, 0);
+ item->util = matched;
+ } else {
+ strbuf_reset(&matched->value);
+ }
+
+ if (value) {
+ strbuf_addstr(&matched->value, value);
+ matched->value_is_null = 0;
+ } else {
+ matched->value_is_null = 1;
+ }
+ return 0;
+}
+
+static int get_urlmatch(const char *var, const char *url)
+{
+ int ret;
+ char *section_tail;
+ struct string_list_item *item;
+ struct urlmatch_config config = URLMATCH_CONFIG_INIT;
+ struct string_list values = STRING_LIST_INIT_DUP;
+
+ config.collect_fn = urlmatch_collect_fn;
+ config.cascade_fn = NULL;
+ config.cb = &values;
+
+ if (!url_normalize(url, &config.url))
+ die("%s", config.url.err);
+
+ config.section = xstrdup_tolower(var);
+ section_tail = strchr(config.section, '.');
+ if (section_tail) {
+ *section_tail = '\0';
+ config.key = section_tail + 1;
+ show_keys = 0;
+ } else {
+ config.key = NULL;
+ show_keys = 1;
+ }
+
+ config_with_options(urlmatch_config_entry, &config,
+ &given_config_source, &config_options);
+
+ ret = !values.nr;
+
+ for_each_string_list_item(item, &values) {
+ struct urlmatch_current_candidate_value *matched = item->util;
+ struct strbuf buf = STRBUF_INIT;
+
+ format_config(&buf, item->string,
+ matched->value_is_null ? NULL : matched->value.buf);
+ fwrite(buf.buf, 1, buf.len, stdout);
+ strbuf_release(&buf);
+
+ strbuf_release(&matched->value);
+ }
+ urlmatch_config_release(&config);
+ string_list_clear(&values, 1);
+ free(config.url.url);
+
+ free((void *)config.section);
+ return ret;
+}
+
+static char *default_user_config(void)
+{
+ struct strbuf buf = STRBUF_INIT;
+ strbuf_addf(&buf,
+ _("# This is Git's per-user configuration file.\n"
+ "[user]\n"
+ "# Please adapt and uncomment the following lines:\n"
+ "# name = %s\n"
+ "# email = %s\n"),
+ ident_default_name(),
+ ident_default_email());
+ return strbuf_detach(&buf, NULL);
+}
+
+int cmd_config(int argc, const char **argv, const char *prefix)
+{
+ int nongit = !startup_info->have_repository;
+ char *value;
+ int flags = 0;
+
+ given_config_source.file = xstrdup_or_null(getenv(CONFIG_ENVIRONMENT));
+
+ argc = parse_options(argc, argv, prefix, builtin_config_options,
+ builtin_config_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (use_global_config + use_system_config + use_local_config +
+ use_worktree_config +
+ !!given_config_source.file + !!given_config_source.blob > 1) {
+ error(_("only one config file at a time"));
+ usage_builtin_config();
+ }
+
+ if (nongit) {
+ if (use_local_config)
+ die(_("--local can only be used inside a git repository"));
+ if (given_config_source.blob)
+ die(_("--blob can only be used inside a git repository"));
+ if (use_worktree_config)
+ die(_("--worktree can only be used inside a git repository"));
+
+ }
+
+ if (given_config_source.file &&
+ !strcmp(given_config_source.file, "-")) {
+ given_config_source.file = NULL;
+ given_config_source.use_stdin = 1;
+ given_config_source.scope = CONFIG_SCOPE_COMMAND;
+ }
+
+ if (use_global_config) {
+ char *user_config, *xdg_config;
+
+ git_global_config(&user_config, &xdg_config);
+ if (!user_config)
+ /*
+ * It is unknown if HOME/.gitconfig exists, so
+ * we do not know if we should write to XDG
+ * location; error out even if XDG_CONFIG_HOME
+ * is set and points at a sane location.
+ */
+ die(_("$HOME not set"));
+
+ given_config_source.scope = CONFIG_SCOPE_GLOBAL;
+
+ if (access_or_warn(user_config, R_OK, 0) &&
+ xdg_config && !access_or_warn(xdg_config, R_OK, 0)) {
+ given_config_source.file = xdg_config;
+ free(user_config);
+ } else {
+ given_config_source.file = user_config;
+ free(xdg_config);
+ }
+ }
+ else if (use_system_config) {
+ given_config_source.file = git_system_config();
+ given_config_source.scope = CONFIG_SCOPE_SYSTEM;
+ } else if (use_local_config) {
+ given_config_source.file = git_pathdup("config");
+ given_config_source.scope = CONFIG_SCOPE_LOCAL;
+ } else if (use_worktree_config) {
+ struct worktree **worktrees = get_worktrees();
+ if (repository_format_worktree_config)
+ given_config_source.file = git_pathdup("config.worktree");
+ else if (worktrees[0] && worktrees[1])
+ die(_("--worktree cannot be used with multiple "
+ "working trees unless the config\n"
+ "extension worktreeConfig is enabled. "
+ "Please read \"CONFIGURATION FILE\"\n"
+ "section in \"git help worktree\" for details"));
+ else
+ given_config_source.file = git_pathdup("config");
+ given_config_source.scope = CONFIG_SCOPE_LOCAL;
+ free_worktrees(worktrees);
+ } else if (given_config_source.file) {
+ if (!is_absolute_path(given_config_source.file) && prefix)
+ given_config_source.file =
+ prefix_filename(prefix, given_config_source.file);
+ given_config_source.scope = CONFIG_SCOPE_COMMAND;
+ } else if (given_config_source.blob) {
+ given_config_source.scope = CONFIG_SCOPE_COMMAND;
+ }
+
+
+ if (respect_includes_opt == -1)
+ config_options.respect_includes = !given_config_source.file;
+ else
+ config_options.respect_includes = respect_includes_opt;
+ if (!nongit) {
+ config_options.commondir = get_git_common_dir();
+ config_options.git_dir = get_git_dir();
+ }
+
+ if (end_nul) {
+ term = '\0';
+ delim = '\n';
+ key_delim = '\n';
+ }
+
+ if ((actions & (ACTION_GET_COLOR|ACTION_GET_COLORBOOL)) && type) {
+ error(_("--get-color and variable type are incoherent"));
+ usage_builtin_config();
+ }
+
+ if (HAS_MULTI_BITS(actions)) {
+ error(_("only one action at a time"));
+ usage_builtin_config();
+ }
+ if (actions == 0)
+ switch (argc) {
+ case 1: actions = ACTION_GET; break;
+ case 2: actions = ACTION_SET; break;
+ case 3: actions = ACTION_SET_ALL; break;
+ default:
+ usage_builtin_config();
+ }
+ if (omit_values &&
+ !(actions == ACTION_LIST || actions == ACTION_GET_REGEXP)) {
+ error(_("--name-only is only applicable to --list or --get-regexp"));
+ usage_builtin_config();
+ }
+
+ if (show_origin && !(actions &
+ (ACTION_GET|ACTION_GET_ALL|ACTION_GET_REGEXP|ACTION_LIST))) {
+ error(_("--show-origin is only applicable to --get, --get-all, "
+ "--get-regexp, and --list"));
+ usage_builtin_config();
+ }
+
+ if (default_value && !(actions & ACTION_GET)) {
+ error(_("--default is only applicable to --get"));
+ usage_builtin_config();
+ }
+
+ /* check usage of --fixed-value */
+ if (fixed_value) {
+ int allowed_usage = 0;
+
+ switch (actions) {
+ /* git config --get <name> <value-pattern> */
+ case ACTION_GET:
+ /* git config --get-all <name> <value-pattern> */
+ case ACTION_GET_ALL:
+ /* git config --get-regexp <name-pattern> <value-pattern> */
+ case ACTION_GET_REGEXP:
+ /* git config --unset <name> <value-pattern> */
+ case ACTION_UNSET:
+ /* git config --unset-all <name> <value-pattern> */
+ case ACTION_UNSET_ALL:
+ allowed_usage = argc > 1 && !!argv[1];
+ break;
+
+ /* git config <name> <value> <value-pattern> */
+ case ACTION_SET_ALL:
+ /* git config --replace-all <name> <value> <value-pattern> */
+ case ACTION_REPLACE_ALL:
+ allowed_usage = argc > 2 && !!argv[2];
+ break;
+
+ /* other options don't allow --fixed-value */
+ }
+
+ if (!allowed_usage) {
+ error(_("--fixed-value only applies with 'value-pattern'"));
+ usage_builtin_config();
+ }
+
+ flags |= CONFIG_FLAGS_FIXED_VALUE;
+ }
+
+ if (actions & PAGING_ACTIONS)
+ setup_auto_pager("config", 1);
+
+ if (actions == ACTION_LIST) {
+ check_argc(argc, 0, 0);
+ if (config_with_options(show_all_config, NULL,
+ &given_config_source,
+ &config_options) < 0) {
+ if (given_config_source.file)
+ die_errno(_("unable to read config file '%s'"),
+ given_config_source.file);
+ else
+ die(_("error processing config file(s)"));
+ }
+ }
+ else if (actions == ACTION_EDIT) {
+ char *config_file;
+
+ check_argc(argc, 0, 0);
+ if (!given_config_source.file && nongit)
+ die(_("not in a git directory"));
+ if (given_config_source.use_stdin)
+ die(_("editing stdin is not supported"));
+ if (given_config_source.blob)
+ die(_("editing blobs is not supported"));
+ git_config(git_default_config, NULL);
+ config_file = given_config_source.file ?
+ xstrdup(given_config_source.file) :
+ git_pathdup("config");
+ if (use_global_config) {
+ int fd = open(config_file, O_CREAT | O_EXCL | O_WRONLY, 0666);
+ if (fd >= 0) {
+ char *content = default_user_config();
+ write_str_in_full(fd, content);
+ free(content);
+ close(fd);
+ }
+ else if (errno != EEXIST)
+ die_errno(_("cannot create configuration file %s"), config_file);
+ }
+ launch_editor(config_file, NULL, NULL);
+ free(config_file);
+ }
+ else if (actions == ACTION_SET) {
+ int ret;
+ check_write();
+ check_argc(argc, 2, 2);
+ value = normalize_value(argv[0], argv[1]);
+ UNLEAK(value);
+ ret = git_config_set_in_file_gently(given_config_source.file, argv[0], value);
+ if (ret == CONFIG_NOTHING_SET)
+ error(_("cannot overwrite multiple values with a single value\n"
+ " Use a regexp, --add or --replace-all to change %s."), argv[0]);
+ return ret;
+ }
+ else if (actions == ACTION_SET_ALL) {
+ check_write();
+ check_argc(argc, 2, 3);
+ value = normalize_value(argv[0], argv[1]);
+ UNLEAK(value);
+ return git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], value, argv[2],
+ flags);
+ }
+ else if (actions == ACTION_ADD) {
+ check_write();
+ check_argc(argc, 2, 2);
+ value = normalize_value(argv[0], argv[1]);
+ UNLEAK(value);
+ return git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], value,
+ CONFIG_REGEX_NONE,
+ flags);
+ }
+ else if (actions == ACTION_REPLACE_ALL) {
+ check_write();
+ check_argc(argc, 2, 3);
+ value = normalize_value(argv[0], argv[1]);
+ UNLEAK(value);
+ return git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], value, argv[2],
+ flags | CONFIG_FLAGS_MULTI_REPLACE);
+ }
+ else if (actions == ACTION_GET) {
+ check_argc(argc, 1, 2);
+ return get_value(argv[0], argv[1], flags);
+ }
+ else if (actions == ACTION_GET_ALL) {
+ do_all = 1;
+ check_argc(argc, 1, 2);
+ return get_value(argv[0], argv[1], flags);
+ }
+ else if (actions == ACTION_GET_REGEXP) {
+ show_keys = 1;
+ use_key_regexp = 1;
+ do_all = 1;
+ check_argc(argc, 1, 2);
+ return get_value(argv[0], argv[1], flags);
+ }
+ else if (actions == ACTION_GET_URLMATCH) {
+ check_argc(argc, 2, 2);
+ return get_urlmatch(argv[0], argv[1]);
+ }
+ else if (actions == ACTION_UNSET) {
+ check_write();
+ check_argc(argc, 1, 2);
+ if (argc == 2)
+ return git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], NULL, argv[1],
+ flags);
+ else
+ return git_config_set_in_file_gently(given_config_source.file,
+ argv[0], NULL);
+ }
+ else if (actions == ACTION_UNSET_ALL) {
+ check_write();
+ check_argc(argc, 1, 2);
+ return git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], NULL, argv[1],
+ flags | CONFIG_FLAGS_MULTI_REPLACE);
+ }
+ else if (actions == ACTION_RENAME_SECTION) {
+ int ret;
+ check_write();
+ check_argc(argc, 2, 2);
+ ret = git_config_rename_section_in_file(given_config_source.file,
+ argv[0], argv[1]);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ die(_("no such section: %s"), argv[0]);
+ }
+ else if (actions == ACTION_REMOVE_SECTION) {
+ int ret;
+ check_write();
+ check_argc(argc, 1, 1);
+ ret = git_config_rename_section_in_file(given_config_source.file,
+ argv[0], NULL);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ die(_("no such section: %s"), argv[0]);
+ }
+ else if (actions == ACTION_GET_COLOR) {
+ check_argc(argc, 1, 2);
+ get_color(argv[0], argv[1]);
+ }
+ else if (actions == ACTION_GET_COLORBOOL) {
+ check_argc(argc, 1, 2);
+ if (argc == 2)
+ color_stdout_is_tty = git_config_bool("command line", argv[1]);
+ return get_colorbool(argv[0], argc == 2);
+ }
+
+ return 0;
+}
diff --git a/builtin/count-objects.c b/builtin/count-objects.c
new file mode 100644
index 0000000..07b9419
--- /dev/null
+++ b/builtin/count-objects.c
@@ -0,0 +1,172 @@
+/*
+ * Builtin "git count-objects".
+ *
+ * Copyright (c) 2006 Junio C Hamano
+ */
+
+#include "cache.h"
+#include "config.h"
+#include "dir.h"
+#include "repository.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "quote.h"
+#include "packfile.h"
+#include "object-store.h"
+
+static unsigned long garbage;
+static off_t size_garbage;
+static int verbose;
+static unsigned long loose, packed, packed_loose;
+static off_t loose_size;
+
+static const char *bits_to_msg(unsigned seen_bits)
+{
+ switch (seen_bits) {
+ case 0:
+ return "no corresponding .idx or .pack";
+ case PACKDIR_FILE_GARBAGE:
+ return "garbage found";
+ case PACKDIR_FILE_PACK:
+ return "no corresponding .idx";
+ case PACKDIR_FILE_IDX:
+ return "no corresponding .pack";
+ case PACKDIR_FILE_PACK|PACKDIR_FILE_IDX:
+ default:
+ return NULL;
+ }
+}
+
+static void real_report_garbage(unsigned seen_bits, const char *path)
+{
+ struct stat st;
+ const char *desc = bits_to_msg(seen_bits);
+
+ if (!desc)
+ return;
+
+ if (!stat(path, &st))
+ size_garbage += st.st_size;
+ warning("%s: %s", desc, path);
+ garbage++;
+}
+
+static void loose_garbage(const char *path)
+{
+ if (verbose)
+ report_garbage(PACKDIR_FILE_GARBAGE, path);
+}
+
+static int count_loose(const struct object_id *oid, const char *path, void *data)
+{
+ struct stat st;
+
+ if (lstat(path, &st) || !S_ISREG(st.st_mode))
+ loose_garbage(path);
+ else {
+ loose_size += on_disk_bytes(st);
+ loose++;
+ if (verbose && has_object_pack(oid))
+ packed_loose++;
+ }
+ return 0;
+}
+
+static int count_cruft(const char *basename, const char *path, void *data)
+{
+ loose_garbage(path);
+ return 0;
+}
+
+static int print_alternate(struct object_directory *odb, void *data)
+{
+ printf("alternate: ");
+ quote_c_style(odb->path, NULL, stdout, 0);
+ putchar('\n');
+ return 0;
+}
+
+static char const * const count_objects_usage[] = {
+ "git count-objects [-v] [-H | --human-readable]",
+ NULL
+};
+
+int cmd_count_objects(int argc, const char **argv, const char *prefix)
+{
+ int human_readable = 0;
+ struct option opts[] = {
+ OPT__VERBOSE(&verbose, N_("be verbose")),
+ OPT_BOOL('H', "human-readable", &human_readable,
+ N_("print sizes in human readable format")),
+ OPT_END(),
+ };
+
+ git_config(git_default_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, opts, count_objects_usage, 0);
+ /* we do not take arguments other than flags for now */
+ if (argc)
+ usage_with_options(count_objects_usage, opts);
+ if (verbose) {
+ report_garbage = real_report_garbage;
+ report_linked_checkout_garbage();
+ }
+
+ for_each_loose_file_in_objdir(get_object_directory(),
+ count_loose, count_cruft, NULL, NULL);
+
+ if (verbose) {
+ struct packed_git *p;
+ unsigned long num_pack = 0;
+ off_t size_pack = 0;
+ struct strbuf loose_buf = STRBUF_INIT;
+ struct strbuf pack_buf = STRBUF_INIT;
+ struct strbuf garbage_buf = STRBUF_INIT;
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ if (!p->pack_local)
+ continue;
+ if (open_pack_index(p))
+ continue;
+ packed += p->num_objects;
+ size_pack += p->pack_size + p->index_size;
+ num_pack++;
+ }
+
+ if (human_readable) {
+ strbuf_humanise_bytes(&loose_buf, loose_size);
+ strbuf_humanise_bytes(&pack_buf, size_pack);
+ strbuf_humanise_bytes(&garbage_buf, size_garbage);
+ } else {
+ strbuf_addf(&loose_buf, "%lu",
+ (unsigned long)(loose_size / 1024));
+ strbuf_addf(&pack_buf, "%lu",
+ (unsigned long)(size_pack / 1024));
+ strbuf_addf(&garbage_buf, "%lu",
+ (unsigned long)(size_garbage / 1024));
+ }
+
+ printf("count: %lu\n", loose);
+ printf("size: %s\n", loose_buf.buf);
+ printf("in-pack: %lu\n", packed);
+ printf("packs: %lu\n", num_pack);
+ printf("size-pack: %s\n", pack_buf.buf);
+ printf("prune-packable: %lu\n", packed_loose);
+ printf("garbage: %lu\n", garbage);
+ printf("size-garbage: %s\n", garbage_buf.buf);
+ foreach_alt_odb(print_alternate, NULL);
+ strbuf_release(&loose_buf);
+ strbuf_release(&pack_buf);
+ strbuf_release(&garbage_buf);
+ } else {
+ struct strbuf buf = STRBUF_INIT;
+ if (human_readable)
+ strbuf_humanise_bytes(&buf, loose_size);
+ else
+ strbuf_addf(&buf, "%lu kilobytes",
+ (unsigned long)(loose_size / 1024));
+ printf("%lu objects, %s\n", loose, buf.buf);
+ strbuf_release(&buf);
+ }
+ return 0;
+}
diff --git a/builtin/credential-cache--daemon.c b/builtin/credential-cache--daemon.c
new file mode 100644
index 0000000..f3c8983
--- /dev/null
+++ b/builtin/credential-cache--daemon.c
@@ -0,0 +1,319 @@
+#include "builtin.h"
+#include "parse-options.h"
+
+#ifndef NO_UNIX_SOCKETS
+
+#include "config.h"
+#include "tempfile.h"
+#include "credential.h"
+#include "unix-socket.h"
+
+struct credential_cache_entry {
+ struct credential item;
+ timestamp_t expiration;
+};
+static struct credential_cache_entry *entries;
+static int entries_nr;
+static int entries_alloc;
+
+static void cache_credential(struct credential *c, int timeout)
+{
+ struct credential_cache_entry *e;
+
+ ALLOC_GROW(entries, entries_nr + 1, entries_alloc);
+ e = &entries[entries_nr++];
+
+ /* take ownership of pointers */
+ memcpy(&e->item, c, sizeof(*c));
+ memset(c, 0, sizeof(*c));
+ e->expiration = time(NULL) + timeout;
+}
+
+static struct credential_cache_entry *lookup_credential(const struct credential *c)
+{
+ int i;
+ for (i = 0; i < entries_nr; i++) {
+ struct credential *e = &entries[i].item;
+ if (credential_match(c, e))
+ return &entries[i];
+ }
+ return NULL;
+}
+
+static void remove_credential(const struct credential *c)
+{
+ struct credential_cache_entry *e;
+
+ e = lookup_credential(c);
+ if (e)
+ e->expiration = 0;
+}
+
+static timestamp_t check_expirations(void)
+{
+ static timestamp_t wait_for_entry_until;
+ int i = 0;
+ timestamp_t now = time(NULL);
+ timestamp_t next = TIME_MAX;
+
+ /*
+ * Initially give the client 30 seconds to actually contact us
+ * and store a credential before we decide there's no point in
+ * keeping the daemon around.
+ */
+ if (!wait_for_entry_until)
+ wait_for_entry_until = now + 30;
+
+ while (i < entries_nr) {
+ if (entries[i].expiration <= now) {
+ entries_nr--;
+ credential_clear(&entries[i].item);
+ if (i != entries_nr)
+ memcpy(&entries[i], &entries[entries_nr], sizeof(*entries));
+ /*
+ * Stick around 30 seconds in case a new credential
+ * shows up (e.g., because we just removed a failed
+ * one, and we will soon get the correct one).
+ */
+ wait_for_entry_until = now + 30;
+ }
+ else {
+ if (entries[i].expiration < next)
+ next = entries[i].expiration;
+ i++;
+ }
+ }
+
+ if (!entries_nr) {
+ if (wait_for_entry_until <= now)
+ return 0;
+ next = wait_for_entry_until;
+ }
+
+ return next - now;
+}
+
+static int read_request(FILE *fh, struct credential *c,
+ struct strbuf *action, int *timeout)
+{
+ static struct strbuf item = STRBUF_INIT;
+ const char *p;
+
+ strbuf_getline_lf(&item, fh);
+ if (!skip_prefix(item.buf, "action=", &p))
+ return error("client sent bogus action line: %s", item.buf);
+ strbuf_addstr(action, p);
+
+ strbuf_getline_lf(&item, fh);
+ if (!skip_prefix(item.buf, "timeout=", &p))
+ return error("client sent bogus timeout line: %s", item.buf);
+ *timeout = atoi(p);
+
+ if (credential_read(c, fh) < 0)
+ return -1;
+ return 0;
+}
+
+static void serve_one_client(FILE *in, FILE *out)
+{
+ struct credential c = CREDENTIAL_INIT;
+ struct strbuf action = STRBUF_INIT;
+ int timeout = -1;
+
+ if (read_request(in, &c, &action, &timeout) < 0)
+ /* ignore error */ ;
+ else if (!strcmp(action.buf, "get")) {
+ struct credential_cache_entry *e = lookup_credential(&c);
+ if (e) {
+ fprintf(out, "username=%s\n", e->item.username);
+ fprintf(out, "password=%s\n", e->item.password);
+ }
+ }
+ else if (!strcmp(action.buf, "exit")) {
+ /*
+ * It's important that we clean up our socket first, and then
+ * signal the client only once we have finished the cleanup.
+ * Calling exit() directly does this, because we clean up in
+ * our atexit() handler, and then signal the client when our
+ * process actually ends, which closes the socket and gives
+ * them EOF.
+ */
+ exit(0);
+ }
+ else if (!strcmp(action.buf, "erase"))
+ remove_credential(&c);
+ else if (!strcmp(action.buf, "store")) {
+ if (timeout < 0)
+ warning("cache client didn't specify a timeout");
+ else if (!c.username || !c.password)
+ warning("cache client gave us a partial credential");
+ else {
+ remove_credential(&c);
+ cache_credential(&c, timeout);
+ }
+ }
+ else
+ warning("cache client sent unknown action: %s", action.buf);
+
+ credential_clear(&c);
+ strbuf_release(&action);
+}
+
+static int serve_cache_loop(int fd)
+{
+ struct pollfd pfd;
+ timestamp_t wakeup;
+
+ wakeup = check_expirations();
+ if (!wakeup)
+ return 0;
+
+ pfd.fd = fd;
+ pfd.events = POLLIN;
+ if (poll(&pfd, 1, 1000 * wakeup) < 0) {
+ if (errno != EINTR)
+ die_errno("poll failed");
+ return 1;
+ }
+
+ if (pfd.revents & POLLIN) {
+ int client, client2;
+ FILE *in, *out;
+
+ client = accept(fd, NULL, NULL);
+ if (client < 0) {
+ warning_errno("accept failed");
+ return 1;
+ }
+ client2 = dup(client);
+ if (client2 < 0) {
+ warning_errno("dup failed");
+ close(client);
+ return 1;
+ }
+
+ in = xfdopen(client, "r");
+ out = xfdopen(client2, "w");
+ serve_one_client(in, out);
+ fclose(in);
+ fclose(out);
+ }
+ return 1;
+}
+
+static void serve_cache(const char *socket_path, int debug)
+{
+ struct unix_stream_listen_opts opts = UNIX_STREAM_LISTEN_OPTS_INIT;
+ int fd;
+
+ fd = unix_stream_listen(socket_path, &opts);
+ if (fd < 0)
+ die_errno("unable to bind to '%s'", socket_path);
+
+ printf("ok\n");
+ fclose(stdout);
+ if (!debug) {
+ if (!freopen("/dev/null", "w", stderr))
+ die_errno("unable to point stderr to /dev/null");
+ }
+
+ while (serve_cache_loop(fd))
+ ; /* nothing */
+
+ close(fd);
+}
+
+static const char permissions_advice[] = N_(
+"The permissions on your socket directory are too loose; other\n"
+"users may be able to read your cached credentials. Consider running:\n"
+"\n"
+" chmod 0700 %s");
+static void init_socket_directory(const char *path)
+{
+ struct stat st;
+ char *path_copy = xstrdup(path);
+ char *dir = dirname(path_copy);
+
+ if (!stat(dir, &st)) {
+ if (st.st_mode & 077)
+ die(_(permissions_advice), dir);
+ } else {
+ /*
+ * We must be sure to create the directory with the correct mode,
+ * not just chmod it after the fact; otherwise, there is a race
+ * condition in which somebody can chdir to it, sleep, then try to open
+ * our protected socket.
+ */
+ if (safe_create_leading_directories_const(dir) < 0)
+ die_errno("unable to create directories for '%s'", dir);
+ if (mkdir(dir, 0700) < 0)
+ die_errno("unable to mkdir '%s'", dir);
+ }
+
+ if (chdir(dir))
+ /*
+ * We don't actually care what our cwd is; we chdir here just to
+ * be a friendly daemon and avoid tying up our original cwd.
+ * If this fails, it's OK to just continue without that benefit.
+ */
+ ;
+
+ free(path_copy);
+}
+
+int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix)
+{
+ struct tempfile *socket_file;
+ const char *socket_path;
+ int ignore_sighup = 0;
+ static const char *usage[] = {
+ "git credential-cache--daemon [--debug] <socket-path>",
+ NULL
+ };
+ int debug = 0;
+ const struct option options[] = {
+ OPT_BOOL(0, "debug", &debug,
+ N_("print debugging messages to stderr")),
+ OPT_END()
+ };
+
+ git_config_get_bool("credentialcache.ignoresighup", &ignore_sighup);
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ socket_path = argv[0];
+
+ if (!socket_path)
+ usage_with_options(usage, options);
+
+ if (!is_absolute_path(socket_path))
+ die("socket directory must be an absolute path");
+
+ init_socket_directory(socket_path);
+ socket_file = register_tempfile(socket_path);
+
+ if (ignore_sighup)
+ signal(SIGHUP, SIG_IGN);
+
+ serve_cache(socket_path, debug);
+ delete_tempfile(&socket_file);
+
+ return 0;
+}
+
+#else
+
+int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix)
+{
+ const char * const usage[] = {
+ "git credential-cache--daemon [--debug] <socket-path>",
+ "",
+ "credential-cache--daemon is disabled in this build of Git",
+ NULL
+ };
+ struct option options[] = { OPT_END() };
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ die(_("credential-cache--daemon unavailable; no unix socket support"));
+}
+
+#endif /* NO_UNIX_SOCKET */
diff --git a/builtin/credential-cache.c b/builtin/credential-cache.c
new file mode 100644
index 0000000..78c02ad
--- /dev/null
+++ b/builtin/credential-cache.c
@@ -0,0 +1,183 @@
+#include "builtin.h"
+#include "parse-options.h"
+
+#ifndef NO_UNIX_SOCKETS
+
+#include "credential.h"
+#include "string-list.h"
+#include "unix-socket.h"
+#include "run-command.h"
+
+#define FLAG_SPAWN 0x1
+#define FLAG_RELAY 0x2
+
+#ifdef GIT_WINDOWS_NATIVE
+
+static int connection_closed(int error)
+{
+ return (error == EINVAL);
+}
+
+static int connection_fatally_broken(int error)
+{
+ return (error != ENOENT) && (error != ENETDOWN);
+}
+
+#else
+
+static int connection_closed(int error)
+{
+ return (error == ECONNRESET);
+}
+
+static int connection_fatally_broken(int error)
+{
+ return (error != ENOENT) && (error != ECONNREFUSED);
+}
+
+#endif
+
+static int send_request(const char *socket, const struct strbuf *out)
+{
+ int got_data = 0;
+ int fd = unix_stream_connect(socket, 0);
+
+ if (fd < 0)
+ return -1;
+
+ if (write_in_full(fd, out->buf, out->len) < 0)
+ die_errno("unable to write to cache daemon");
+ shutdown(fd, SHUT_WR);
+
+ while (1) {
+ char in[1024];
+ int r;
+
+ r = read_in_full(fd, in, sizeof(in));
+ if (r == 0 || (r < 0 && connection_closed(errno)))
+ break;
+ if (r < 0)
+ die_errno("read error from cache daemon");
+ write_or_die(1, in, r);
+ got_data = 1;
+ }
+ close(fd);
+ return got_data;
+}
+
+static void spawn_daemon(const char *socket)
+{
+ struct child_process daemon = CHILD_PROCESS_INIT;
+ char buf[128];
+ int r;
+
+ strvec_pushl(&daemon.args,
+ "credential-cache--daemon", socket,
+ NULL);
+ daemon.git_cmd = 1;
+ daemon.no_stdin = 1;
+ daemon.out = -1;
+
+ if (start_command(&daemon))
+ die_errno("unable to start cache daemon");
+ r = read_in_full(daemon.out, buf, sizeof(buf));
+ if (r < 0)
+ die_errno("unable to read result code from cache daemon");
+ if (r != 3 || memcmp(buf, "ok\n", 3))
+ die("cache daemon did not start: %.*s", r, buf);
+ close(daemon.out);
+}
+
+static void do_cache(const char *socket, const char *action, int timeout,
+ int flags)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_addf(&buf, "action=%s\n", action);
+ strbuf_addf(&buf, "timeout=%d\n", timeout);
+ if (flags & FLAG_RELAY) {
+ if (strbuf_read(&buf, 0, 0) < 0)
+ die_errno("unable to relay credential");
+ }
+
+ if (send_request(socket, &buf) < 0) {
+ if (connection_fatally_broken(errno))
+ die_errno("unable to connect to cache daemon");
+ if (flags & FLAG_SPAWN) {
+ spawn_daemon(socket);
+ if (send_request(socket, &buf) < 0)
+ die_errno("unable to connect to cache daemon");
+ }
+ }
+ strbuf_release(&buf);
+}
+
+static char *get_socket_path(void)
+{
+ struct stat sb;
+ char *old_dir, *socket;
+ old_dir = interpolate_path("~/.git-credential-cache", 0);
+ if (old_dir && !stat(old_dir, &sb) && S_ISDIR(sb.st_mode))
+ socket = xstrfmt("%s/socket", old_dir);
+ else
+ socket = xdg_cache_home("credential/socket");
+ free(old_dir);
+ return socket;
+}
+
+int cmd_credential_cache(int argc, const char **argv, const char *prefix)
+{
+ char *socket_path = NULL;
+ int timeout = 900;
+ const char *op;
+ const char * const usage[] = {
+ "git credential-cache [<options>] <action>",
+ NULL
+ };
+ struct option options[] = {
+ OPT_INTEGER(0, "timeout", &timeout,
+ "number of seconds to cache credentials"),
+ OPT_STRING(0, "socket", &socket_path, "path",
+ "path of cache-daemon socket"),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ if (!argc)
+ usage_with_options(usage, options);
+ op = argv[0];
+
+ if (!socket_path)
+ socket_path = get_socket_path();
+ if (!socket_path)
+ die("unable to find a suitable socket path; use --socket");
+
+ if (!strcmp(op, "exit"))
+ do_cache(socket_path, op, timeout, 0);
+ else if (!strcmp(op, "get") || !strcmp(op, "erase"))
+ do_cache(socket_path, op, timeout, FLAG_RELAY);
+ else if (!strcmp(op, "store"))
+ do_cache(socket_path, op, timeout, FLAG_RELAY|FLAG_SPAWN);
+ else
+ ; /* ignore unknown operation */
+
+ return 0;
+}
+
+#else
+
+int cmd_credential_cache(int argc, const char **argv, const char *prefix)
+{
+ const char * const usage[] = {
+ "git credential-cache [options] <action>",
+ "",
+ "credential-cache is disabled in this build of Git",
+ NULL
+ };
+ struct option options[] = { OPT_END() };
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ die(_("credential-cache unavailable; no unix socket support"));
+}
+
+#endif /* NO_UNIX_SOCKETS */
diff --git a/builtin/credential-store.c b/builtin/credential-store.c
new file mode 100644
index 0000000..62a4f3c
--- /dev/null
+++ b/builtin/credential-store.c
@@ -0,0 +1,199 @@
+#include "builtin.h"
+#include "config.h"
+#include "lockfile.h"
+#include "credential.h"
+#include "string-list.h"
+#include "parse-options.h"
+
+static struct lock_file credential_lock;
+
+static int parse_credential_file(const char *fn,
+ struct credential *c,
+ void (*match_cb)(struct credential *),
+ void (*other_cb)(struct strbuf *))
+{
+ FILE *fh;
+ struct strbuf line = STRBUF_INIT;
+ struct credential entry = CREDENTIAL_INIT;
+ int found_credential = 0;
+
+ fh = fopen(fn, "r");
+ if (!fh) {
+ if (errno != ENOENT && errno != EACCES)
+ die_errno("unable to open %s", fn);
+ return found_credential;
+ }
+
+ while (strbuf_getline_lf(&line, fh) != EOF) {
+ if (!credential_from_url_gently(&entry, line.buf, 1) &&
+ entry.username && entry.password &&
+ credential_match(c, &entry)) {
+ found_credential = 1;
+ if (match_cb) {
+ match_cb(&entry);
+ break;
+ }
+ }
+ else if (other_cb)
+ other_cb(&line);
+ }
+
+ credential_clear(&entry);
+ strbuf_release(&line);
+ fclose(fh);
+ return found_credential;
+}
+
+static void print_entry(struct credential *c)
+{
+ printf("username=%s\n", c->username);
+ printf("password=%s\n", c->password);
+}
+
+static void print_line(struct strbuf *buf)
+{
+ strbuf_addch(buf, '\n');
+ write_or_die(get_lock_file_fd(&credential_lock), buf->buf, buf->len);
+}
+
+static void rewrite_credential_file(const char *fn, struct credential *c,
+ struct strbuf *extra)
+{
+ int timeout_ms = 1000;
+
+ git_config_get_int("credentialstore.locktimeoutms", &timeout_ms);
+ if (hold_lock_file_for_update_timeout(&credential_lock, fn, 0, timeout_ms) < 0)
+ die_errno(_("unable to get credential storage lock in %d ms"), timeout_ms);
+ if (extra)
+ print_line(extra);
+ parse_credential_file(fn, c, NULL, print_line);
+ if (commit_lock_file(&credential_lock) < 0)
+ die_errno("unable to write credential store");
+}
+
+static void store_credential_file(const char *fn, struct credential *c)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_addf(&buf, "%s://", c->protocol);
+ strbuf_addstr_urlencode(&buf, c->username, is_rfc3986_unreserved);
+ strbuf_addch(&buf, ':');
+ strbuf_addstr_urlencode(&buf, c->password, is_rfc3986_unreserved);
+ strbuf_addch(&buf, '@');
+ if (c->host)
+ strbuf_addstr_urlencode(&buf, c->host, is_rfc3986_unreserved);
+ if (c->path) {
+ strbuf_addch(&buf, '/');
+ strbuf_addstr_urlencode(&buf, c->path,
+ is_rfc3986_reserved_or_unreserved);
+ }
+
+ rewrite_credential_file(fn, c, &buf);
+ strbuf_release(&buf);
+}
+
+static void store_credential(const struct string_list *fns, struct credential *c)
+{
+ struct string_list_item *fn;
+
+ /*
+ * Sanity check that what we are storing is actually sensible.
+ * In particular, we can't make a URL without a protocol field.
+ * Without either a host or pathname (depending on the scheme),
+ * we have no primary key. And without a username and password,
+ * we are not actually storing a credential.
+ */
+ if (!c->protocol || !(c->host || c->path) || !c->username || !c->password)
+ return;
+
+ for_each_string_list_item(fn, fns)
+ if (!access(fn->string, F_OK)) {
+ store_credential_file(fn->string, c);
+ return;
+ }
+ /*
+ * Write credential to the filename specified by fns->items[0], thus
+ * creating it
+ */
+ if (fns->nr)
+ store_credential_file(fns->items[0].string, c);
+}
+
+static void remove_credential(const struct string_list *fns, struct credential *c)
+{
+ struct string_list_item *fn;
+
+ /*
+ * Sanity check that we actually have something to match
+ * against. The input we get is a restrictive pattern,
+ * so technically a blank credential means "erase everything".
+ * But it is too easy to accidentally send this, since it is equivalent
+ * to empty input. So explicitly disallow it, and require that the
+ * pattern have some actual content to match.
+ */
+ if (!c->protocol && !c->host && !c->path && !c->username)
+ return;
+ for_each_string_list_item(fn, fns)
+ if (!access(fn->string, F_OK))
+ rewrite_credential_file(fn->string, c, NULL);
+}
+
+static void lookup_credential(const struct string_list *fns, struct credential *c)
+{
+ struct string_list_item *fn;
+
+ for_each_string_list_item(fn, fns)
+ if (parse_credential_file(fn->string, c, print_entry, NULL))
+ return; /* Found credential */
+}
+
+int cmd_credential_store(int argc, const char **argv, const char *prefix)
+{
+ const char * const usage[] = {
+ "git credential-store [<options>] <action>",
+ NULL
+ };
+ const char *op;
+ struct credential c = CREDENTIAL_INIT;
+ struct string_list fns = STRING_LIST_INIT_DUP;
+ char *file = NULL;
+ struct option options[] = {
+ OPT_STRING(0, "file", &file, "path",
+ "fetch and store credentials in <path>"),
+ OPT_END()
+ };
+
+ umask(077);
+
+ argc = parse_options(argc, (const char **)argv, prefix, options, usage, 0);
+ if (argc != 1)
+ usage_with_options(usage, options);
+ op = argv[0];
+
+ if (file) {
+ string_list_append(&fns, file);
+ } else {
+ if ((file = interpolate_path("~/.git-credentials", 0)))
+ string_list_append_nodup(&fns, file);
+ file = xdg_config_home("credentials");
+ if (file)
+ string_list_append_nodup(&fns, file);
+ }
+ if (!fns.nr)
+ die("unable to set up default path; use --file");
+
+ if (credential_read(&c, stdin) < 0)
+ die("unable to read credential");
+
+ if (!strcmp(op, "get"))
+ lookup_credential(&fns, &c);
+ else if (!strcmp(op, "erase"))
+ remove_credential(&fns, &c);
+ else if (!strcmp(op, "store"))
+ store_credential(&fns, &c);
+ else
+ ; /* Ignore unknown operation. */
+
+ string_list_clear(&fns, 0);
+ return 0;
+}
diff --git a/builtin/credential.c b/builtin/credential.c
new file mode 100644
index 0000000..d7b304f
--- /dev/null
+++ b/builtin/credential.c
@@ -0,0 +1,34 @@
+#include "git-compat-util.h"
+#include "credential.h"
+#include "builtin.h"
+#include "config.h"
+
+static const char usage_msg[] =
+ "git credential (fill|approve|reject)";
+
+int cmd_credential(int argc, const char **argv, const char *prefix)
+{
+ const char *op;
+ struct credential c = CREDENTIAL_INIT;
+
+ git_config(git_default_config, NULL);
+
+ if (argc != 2 || !strcmp(argv[1], "-h"))
+ usage(usage_msg);
+ op = argv[1];
+
+ if (credential_read(&c, stdin) < 0)
+ die("unable to read credential from stdin");
+
+ if (!strcmp(op, "fill")) {
+ credential_fill(&c);
+ credential_write(&c, stdout);
+ } else if (!strcmp(op, "approve")) {
+ credential_approve(&c);
+ } else if (!strcmp(op, "reject")) {
+ credential_reject(&c);
+ } else {
+ usage(usage_msg);
+ }
+ return 0;
+}
diff --git a/builtin/describe.c b/builtin/describe.c
new file mode 100644
index 0000000..eea1e33
--- /dev/null
+++ b/builtin/describe.c
@@ -0,0 +1,686 @@
+#define USE_THE_INDEX_VARIABLE
+#include "cache.h"
+#include "config.h"
+#include "lockfile.h"
+#include "commit.h"
+#include "tag.h"
+#include "blob.h"
+#include "refs.h"
+#include "builtin.h"
+#include "exec-cmd.h"
+#include "parse-options.h"
+#include "revision.h"
+#include "diff.h"
+#include "hashmap.h"
+#include "strvec.h"
+#include "run-command.h"
+#include "object-store.h"
+#include "list-objects.h"
+#include "commit-slab.h"
+
+#define MAX_TAGS (FLAG_BITS - 1)
+
+define_commit_slab(commit_names, struct commit_name *);
+
+static const char * const describe_usage[] = {
+ N_("git describe [--all] [--tags] [--contains] [--abbrev=<n>] [<commit-ish>...]"),
+ N_("git describe [--all] [--tags] [--contains] [--abbrev=<n>] --dirty[=<mark>]"),
+ N_("git describe <blob>"),
+ NULL
+};
+
+static int debug; /* Display lots of verbose info */
+static int all; /* Any valid ref can be used */
+static int tags; /* Allow lightweight tags */
+static int longformat;
+static int first_parent;
+static int abbrev = -1; /* unspecified */
+static int max_candidates = 10;
+static struct hashmap names;
+static int have_util;
+static struct string_list patterns = STRING_LIST_INIT_NODUP;
+static struct string_list exclude_patterns = STRING_LIST_INIT_NODUP;
+static int always;
+static const char *suffix, *dirty, *broken;
+static struct commit_names commit_names;
+
+/* diff-index command arguments to check if working tree is dirty. */
+static const char *diff_index_args[] = {
+ "diff-index", "--quiet", "HEAD", "--", NULL
+};
+
+struct commit_name {
+ struct hashmap_entry entry;
+ struct object_id peeled;
+ struct tag *tag;
+ unsigned prio:2; /* annotated tag = 2, tag = 1, head = 0 */
+ unsigned name_checked:1;
+ unsigned misnamed:1;
+ struct object_id oid;
+ char *path;
+};
+
+static const char *prio_names[] = {
+ N_("head"), N_("lightweight"), N_("annotated"),
+};
+
+static int commit_name_neq(const void *cmp_data UNUSED,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
+ const void *peeled)
+{
+ const struct commit_name *cn1, *cn2;
+
+ cn1 = container_of(eptr, const struct commit_name, entry);
+ cn2 = container_of(entry_or_key, const struct commit_name, entry);
+
+ return !oideq(&cn1->peeled, peeled ? peeled : &cn2->peeled);
+}
+
+static inline struct commit_name *find_commit_name(const struct object_id *peeled)
+{
+ return hashmap_get_entry_from_hash(&names, oidhash(peeled), peeled,
+ struct commit_name, entry);
+}
+
+static int replace_name(struct commit_name *e,
+ int prio,
+ const struct object_id *oid,
+ struct tag **tag)
+{
+ if (!e || e->prio < prio)
+ return 1;
+
+ if (e->prio == 2 && prio == 2) {
+ /* Multiple annotated tags point to the same commit.
+ * Select one to keep based upon their tagger date.
+ */
+ struct tag *t;
+
+ if (!e->tag) {
+ t = lookup_tag(the_repository, &e->oid);
+ if (!t || parse_tag(t))
+ return 1;
+ e->tag = t;
+ }
+
+ t = lookup_tag(the_repository, oid);
+ if (!t || parse_tag(t))
+ return 0;
+ *tag = t;
+
+ if (e->tag->date < t->date)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void add_to_known_names(const char *path,
+ const struct object_id *peeled,
+ int prio,
+ const struct object_id *oid)
+{
+ struct commit_name *e = find_commit_name(peeled);
+ struct tag *tag = NULL;
+ if (replace_name(e, prio, oid, &tag)) {
+ if (!e) {
+ e = xmalloc(sizeof(struct commit_name));
+ oidcpy(&e->peeled, peeled);
+ hashmap_entry_init(&e->entry, oidhash(peeled));
+ hashmap_add(&names, &e->entry);
+ e->path = NULL;
+ }
+ e->tag = tag;
+ e->prio = prio;
+ e->name_checked = 0;
+ e->misnamed = 0;
+ oidcpy(&e->oid, oid);
+ free(e->path);
+ e->path = xstrdup(path);
+ }
+}
+
+static int get_name(const char *path, const struct object_id *oid,
+ int flag UNUSED, void *cb_data UNUSED)
+{
+ int is_tag = 0;
+ struct object_id peeled;
+ int is_annotated, prio;
+ const char *path_to_match = NULL;
+
+ if (skip_prefix(path, "refs/tags/", &path_to_match)) {
+ is_tag = 1;
+ } else if (all) {
+ if ((exclude_patterns.nr || patterns.nr) &&
+ !skip_prefix(path, "refs/heads/", &path_to_match) &&
+ !skip_prefix(path, "refs/remotes/", &path_to_match)) {
+ /* Only accept reference of known type if there are match/exclude patterns */
+ return 0;
+ }
+ } else {
+ /* Reject anything outside refs/tags/ unless --all */
+ return 0;
+ }
+
+ /*
+ * If we're given exclude patterns, first exclude any tag which match
+ * any of the exclude pattern.
+ */
+ if (exclude_patterns.nr) {
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, &exclude_patterns) {
+ if (!wildmatch(item->string, path_to_match, 0))
+ return 0;
+ }
+ }
+
+ /*
+ * If we're given patterns, accept only tags which match at least one
+ * pattern.
+ */
+ if (patterns.nr) {
+ int found = 0;
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, &patterns) {
+ if (!wildmatch(item->string, path_to_match, 0)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+ }
+
+ /* Is it annotated? */
+ if (!peel_iterated_oid(oid, &peeled)) {
+ is_annotated = !oideq(oid, &peeled);
+ } else {
+ oidcpy(&peeled, oid);
+ is_annotated = 0;
+ }
+
+ /*
+ * By default, we only use annotated tags, but with --tags
+ * we fall back to lightweight ones (even without --tags,
+ * we still remember lightweight ones, only to give hints
+ * in an error message). --all allows any refs to be used.
+ */
+ if (is_annotated)
+ prio = 2;
+ else if (is_tag)
+ prio = 1;
+ else
+ prio = 0;
+
+ add_to_known_names(all ? path + 5 : path + 10, &peeled, prio, oid);
+ return 0;
+}
+
+struct possible_tag {
+ struct commit_name *name;
+ int depth;
+ int found_order;
+ unsigned flag_within;
+};
+
+static int compare_pt(const void *a_, const void *b_)
+{
+ struct possible_tag *a = (struct possible_tag *)a_;
+ struct possible_tag *b = (struct possible_tag *)b_;
+ if (a->depth != b->depth)
+ return a->depth - b->depth;
+ if (a->found_order != b->found_order)
+ return a->found_order - b->found_order;
+ return 0;
+}
+
+static unsigned long finish_depth_computation(
+ struct commit_list **list,
+ struct possible_tag *best)
+{
+ unsigned long seen_commits = 0;
+ while (*list) {
+ struct commit *c = pop_commit(list);
+ struct commit_list *parents = c->parents;
+ seen_commits++;
+ if (c->object.flags & best->flag_within) {
+ struct commit_list *a = *list;
+ while (a) {
+ struct commit *i = a->item;
+ if (!(i->object.flags & best->flag_within))
+ break;
+ a = a->next;
+ }
+ if (!a)
+ break;
+ } else
+ best->depth++;
+ while (parents) {
+ struct commit *p = parents->item;
+ parse_commit(p);
+ if (!(p->object.flags & SEEN))
+ commit_list_insert_by_date(p, list);
+ p->object.flags |= c->object.flags;
+ parents = parents->next;
+ }
+ }
+ return seen_commits;
+}
+
+static void append_name(struct commit_name *n, struct strbuf *dst)
+{
+ if (n->prio == 2 && !n->tag) {
+ n->tag = lookup_tag(the_repository, &n->oid);
+ if (!n->tag || parse_tag(n->tag))
+ die(_("annotated tag %s not available"), n->path);
+ }
+ if (n->tag && !n->name_checked) {
+ if (strcmp(n->tag->tag, all ? n->path + 5 : n->path)) {
+ warning(_("tag '%s' is externally known as '%s'"),
+ n->path, n->tag->tag);
+ n->misnamed = 1;
+ }
+ n->name_checked = 1;
+ }
+
+ if (n->tag) {
+ if (all)
+ strbuf_addstr(dst, "tags/");
+ strbuf_addstr(dst, n->tag->tag);
+ } else {
+ strbuf_addstr(dst, n->path);
+ }
+}
+
+static void append_suffix(int depth, const struct object_id *oid, struct strbuf *dst)
+{
+ strbuf_addf(dst, "-%d-g%s", depth, find_unique_abbrev(oid, abbrev));
+}
+
+static void describe_commit(struct object_id *oid, struct strbuf *dst)
+{
+ struct commit *cmit, *gave_up_on = NULL;
+ struct commit_list *list;
+ struct commit_name *n;
+ struct possible_tag all_matches[MAX_TAGS];
+ unsigned int match_cnt = 0, annotated_cnt = 0, cur_match;
+ unsigned long seen_commits = 0;
+ unsigned int unannotated_cnt = 0;
+
+ cmit = lookup_commit_reference(the_repository, oid);
+
+ n = find_commit_name(&cmit->object.oid);
+ if (n && (tags || all || n->prio == 2)) {
+ /*
+ * Exact match to an existing ref.
+ */
+ append_name(n, dst);
+ if (n->misnamed || longformat)
+ append_suffix(0, n->tag ? get_tagged_oid(n->tag) : oid, dst);
+ if (suffix)
+ strbuf_addstr(dst, suffix);
+ return;
+ }
+
+ if (!max_candidates)
+ die(_("no tag exactly matches '%s'"), oid_to_hex(&cmit->object.oid));
+ if (debug)
+ fprintf(stderr, _("No exact match on refs or tags, searching to describe\n"));
+
+ if (!have_util) {
+ struct hashmap_iter iter;
+ struct commit *c;
+ struct commit_name *n;
+
+ init_commit_names(&commit_names);
+ hashmap_for_each_entry(&names, &iter, n,
+ entry /* member name */) {
+ c = lookup_commit_reference_gently(the_repository,
+ &n->peeled, 1);
+ if (c)
+ *commit_names_at(&commit_names, c) = n;
+ }
+ have_util = 1;
+ }
+
+ list = NULL;
+ cmit->object.flags = SEEN;
+ commit_list_insert(cmit, &list);
+ while (list) {
+ struct commit *c = pop_commit(&list);
+ struct commit_list *parents = c->parents;
+ struct commit_name **slot;
+
+ seen_commits++;
+ slot = commit_names_peek(&commit_names, c);
+ n = slot ? *slot : NULL;
+ if (n) {
+ if (!tags && !all && n->prio < 2) {
+ unannotated_cnt++;
+ } else if (match_cnt < max_candidates) {
+ struct possible_tag *t = &all_matches[match_cnt++];
+ t->name = n;
+ t->depth = seen_commits - 1;
+ t->flag_within = 1u << match_cnt;
+ t->found_order = match_cnt;
+ c->object.flags |= t->flag_within;
+ if (n->prio == 2)
+ annotated_cnt++;
+ }
+ else {
+ gave_up_on = c;
+ break;
+ }
+ }
+ for (cur_match = 0; cur_match < match_cnt; cur_match++) {
+ struct possible_tag *t = &all_matches[cur_match];
+ if (!(c->object.flags & t->flag_within))
+ t->depth++;
+ }
+ /* Stop if last remaining path already covered by best candidate(s) */
+ if (annotated_cnt && !list) {
+ int best_depth = INT_MAX;
+ unsigned best_within = 0;
+ for (cur_match = 0; cur_match < match_cnt; cur_match++) {
+ struct possible_tag *t = &all_matches[cur_match];
+ if (t->depth < best_depth) {
+ best_depth = t->depth;
+ best_within = t->flag_within;
+ } else if (t->depth == best_depth) {
+ best_within |= t->flag_within;
+ }
+ }
+ if ((c->object.flags & best_within) == best_within) {
+ if (debug)
+ fprintf(stderr, _("finished search at %s\n"),
+ oid_to_hex(&c->object.oid));
+ break;
+ }
+ }
+ while (parents) {
+ struct commit *p = parents->item;
+ parse_commit(p);
+ if (!(p->object.flags & SEEN))
+ commit_list_insert_by_date(p, &list);
+ p->object.flags |= c->object.flags;
+ parents = parents->next;
+
+ if (first_parent)
+ break;
+ }
+ }
+
+ if (!match_cnt) {
+ struct object_id *cmit_oid = &cmit->object.oid;
+ if (always) {
+ strbuf_add_unique_abbrev(dst, cmit_oid, abbrev);
+ if (suffix)
+ strbuf_addstr(dst, suffix);
+ return;
+ }
+ if (unannotated_cnt)
+ die(_("No annotated tags can describe '%s'.\n"
+ "However, there were unannotated tags: try --tags."),
+ oid_to_hex(cmit_oid));
+ else
+ die(_("No tags can describe '%s'.\n"
+ "Try --always, or create some tags."),
+ oid_to_hex(cmit_oid));
+ }
+
+ QSORT(all_matches, match_cnt, compare_pt);
+
+ if (gave_up_on) {
+ commit_list_insert_by_date(gave_up_on, &list);
+ seen_commits--;
+ }
+ seen_commits += finish_depth_computation(&list, &all_matches[0]);
+ free_commit_list(list);
+
+ if (debug) {
+ static int label_width = -1;
+ if (label_width < 0) {
+ int i, w;
+ for (i = 0; i < ARRAY_SIZE(prio_names); i++) {
+ w = strlen(_(prio_names[i]));
+ if (label_width < w)
+ label_width = w;
+ }
+ }
+ for (cur_match = 0; cur_match < match_cnt; cur_match++) {
+ struct possible_tag *t = &all_matches[cur_match];
+ fprintf(stderr, " %-*s %8d %s\n",
+ label_width, _(prio_names[t->name->prio]),
+ t->depth, t->name->path);
+ }
+ fprintf(stderr, _("traversed %lu commits\n"), seen_commits);
+ if (gave_up_on) {
+ fprintf(stderr,
+ _("more than %i tags found; listed %i most recent\n"
+ "gave up search at %s\n"),
+ max_candidates, max_candidates,
+ oid_to_hex(&gave_up_on->object.oid));
+ }
+ }
+
+ append_name(all_matches[0].name, dst);
+ if (all_matches[0].name->misnamed || abbrev)
+ append_suffix(all_matches[0].depth, &cmit->object.oid, dst);
+ if (suffix)
+ strbuf_addstr(dst, suffix);
+}
+
+struct process_commit_data {
+ struct object_id current_commit;
+ struct object_id looking_for;
+ struct strbuf *dst;
+ struct rev_info *revs;
+};
+
+static void process_commit(struct commit *commit, void *data)
+{
+ struct process_commit_data *pcd = data;
+ pcd->current_commit = commit->object.oid;
+}
+
+static void process_object(struct object *obj, const char *path, void *data)
+{
+ struct process_commit_data *pcd = data;
+
+ if (oideq(&pcd->looking_for, &obj->oid) && !pcd->dst->len) {
+ reset_revision_walk();
+ describe_commit(&pcd->current_commit, pcd->dst);
+ strbuf_addf(pcd->dst, ":%s", path);
+ free_commit_list(pcd->revs->commits);
+ pcd->revs->commits = NULL;
+ }
+}
+
+static void describe_blob(struct object_id oid, struct strbuf *dst)
+{
+ struct rev_info revs;
+ struct strvec args = STRVEC_INIT;
+ struct process_commit_data pcd = { *null_oid(), oid, dst, &revs};
+
+ strvec_pushl(&args, "internal: The first arg is not parsed",
+ "--objects", "--in-commit-order", "--reverse", "HEAD",
+ NULL);
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ if (setup_revisions(args.nr, args.v, &revs, NULL) > 1)
+ BUG("setup_revisions could not handle all args?");
+
+ if (prepare_revision_walk(&revs))
+ die("revision walk setup failed");
+
+ traverse_commit_list(&revs, process_commit, process_object, &pcd);
+ reset_revision_walk();
+ release_revisions(&revs);
+}
+
+static void describe(const char *arg, int last_one)
+{
+ struct object_id oid;
+ struct commit *cmit;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (debug)
+ fprintf(stderr, _("describe %s\n"), arg);
+
+ if (get_oid(arg, &oid))
+ die(_("Not a valid object name %s"), arg);
+ cmit = lookup_commit_reference_gently(the_repository, &oid, 1);
+
+ if (cmit)
+ describe_commit(&oid, &sb);
+ else if (oid_object_info(the_repository, &oid, NULL) == OBJ_BLOB)
+ describe_blob(oid, &sb);
+ else
+ die(_("%s is neither a commit nor blob"), arg);
+
+ puts(sb.buf);
+
+ if (!last_one)
+ clear_commit_marks(cmit, -1);
+
+ strbuf_release(&sb);
+}
+
+int cmd_describe(int argc, const char **argv, const char *prefix)
+{
+ int contains = 0;
+ struct option options[] = {
+ OPT_BOOL(0, "contains", &contains, N_("find the tag that comes after the commit")),
+ OPT_BOOL(0, "debug", &debug, N_("debug search strategy on stderr")),
+ OPT_BOOL(0, "all", &all, N_("use any ref")),
+ OPT_BOOL(0, "tags", &tags, N_("use any tag, even unannotated")),
+ OPT_BOOL(0, "long", &longformat, N_("always use long format")),
+ OPT_BOOL(0, "first-parent", &first_parent, N_("only follow first parent")),
+ OPT__ABBREV(&abbrev),
+ OPT_SET_INT(0, "exact-match", &max_candidates,
+ N_("only output exact matches"), 0),
+ OPT_INTEGER(0, "candidates", &max_candidates,
+ N_("consider <n> most recent tags (default: 10)")),
+ OPT_STRING_LIST(0, "match", &patterns, N_("pattern"),
+ N_("only consider tags matching <pattern>")),
+ OPT_STRING_LIST(0, "exclude", &exclude_patterns, N_("pattern"),
+ N_("do not consider tags matching <pattern>")),
+ OPT_BOOL(0, "always", &always,
+ N_("show abbreviated commit object as fallback")),
+ {OPTION_STRING, 0, "dirty", &dirty, N_("mark"),
+ N_("append <mark> on dirty working tree (default: \"-dirty\")"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t) "-dirty"},
+ {OPTION_STRING, 0, "broken", &broken, N_("mark"),
+ N_("append <mark> on broken working tree (default: \"-broken\")"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t) "-broken"},
+ OPT_END(),
+ };
+
+ git_config(git_default_config, NULL);
+ argc = parse_options(argc, argv, prefix, options, describe_usage, 0);
+ if (abbrev < 0)
+ abbrev = DEFAULT_ABBREV;
+
+ if (max_candidates < 0)
+ max_candidates = 0;
+ else if (max_candidates > MAX_TAGS)
+ max_candidates = MAX_TAGS;
+
+ save_commit_buffer = 0;
+
+ if (longformat && abbrev == 0)
+ die(_("options '%s' and '%s' cannot be used together"), "--long", "--abbrev=0");
+
+ if (contains) {
+ struct string_list_item *item;
+ struct strvec args;
+
+ strvec_init(&args);
+ strvec_pushl(&args, "name-rev",
+ "--peel-tag", "--name-only", "--no-undefined",
+ NULL);
+ if (always)
+ strvec_push(&args, "--always");
+ if (!all) {
+ strvec_push(&args, "--tags");
+ for_each_string_list_item(item, &patterns)
+ strvec_pushf(&args, "--refs=refs/tags/%s", item->string);
+ for_each_string_list_item(item, &exclude_patterns)
+ strvec_pushf(&args, "--exclude=refs/tags/%s", item->string);
+ }
+ if (argc)
+ strvec_pushv(&args, argv);
+ else
+ strvec_push(&args, "HEAD");
+ return cmd_name_rev(args.nr, args.v, prefix);
+ }
+
+ hashmap_init(&names, commit_name_neq, NULL, 0);
+ for_each_rawref(get_name, NULL);
+ if (!hashmap_get_size(&names) && !always)
+ die(_("No names found, cannot describe anything."));
+
+ if (argc == 0) {
+ if (broken) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+ strvec_pushv(&cp.args, diff_index_args);
+ cp.git_cmd = 1;
+ cp.no_stdin = 1;
+ cp.no_stdout = 1;
+
+ if (!dirty)
+ dirty = "-dirty";
+
+ switch (run_command(&cp)) {
+ case 0:
+ suffix = NULL;
+ break;
+ case 1:
+ suffix = dirty;
+ break;
+ default:
+ /* diff-index aborted abnormally */
+ suffix = broken;
+ }
+ } else if (dirty) {
+ struct lock_file index_lock = LOCK_INIT;
+ struct rev_info revs;
+ struct strvec args = STRVEC_INIT;
+ int fd, result;
+
+ setup_work_tree();
+ repo_read_index(the_repository);
+ refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED,
+ NULL, NULL, NULL);
+ fd = repo_hold_locked_index(the_repository,
+ &index_lock, 0);
+ if (0 <= fd)
+ repo_update_index_if_able(the_repository, &index_lock);
+
+ repo_init_revisions(the_repository, &revs, prefix);
+ strvec_pushv(&args, diff_index_args);
+ if (setup_revisions(args.nr, args.v, &revs, NULL) != 1)
+ BUG("malformed internal diff-index command line");
+ result = run_diff_index(&revs, 0);
+
+ if (!diff_result_code(&revs.diffopt, result))
+ suffix = NULL;
+ else
+ suffix = dirty;
+ release_revisions(&revs);
+ }
+ describe("HEAD", 1);
+ } else if (dirty) {
+ die(_("option '%s' and commit-ishes cannot be used together"), "--dirty");
+ } else if (broken) {
+ die(_("option '%s' and commit-ishes cannot be used together"), "--broken");
+ } else {
+ while (argc-- > 0)
+ describe(*argv++, argc == 0);
+ }
+ return 0;
+}
diff --git a/builtin/diagnose.c b/builtin/diagnose.c
new file mode 100644
index 0000000..d52015c
--- /dev/null
+++ b/builtin/diagnose.c
@@ -0,0 +1,62 @@
+#include "builtin.h"
+#include "parse-options.h"
+#include "diagnose.h"
+
+static const char * const diagnose_usage[] = {
+ N_("git diagnose [(-o | --output-directory) <path>] [(-s | --suffix) <format>]\n"
+ " [--mode=<mode>]"),
+ NULL
+};
+
+int cmd_diagnose(int argc, const char **argv, const char *prefix)
+{
+ struct strbuf zip_path = STRBUF_INIT;
+ time_t now = time(NULL);
+ struct tm tm;
+ enum diagnose_mode mode = DIAGNOSE_STATS;
+ char *option_output = NULL;
+ char *option_suffix = "%Y-%m-%d-%H%M";
+ char *prefixed_filename;
+
+ const struct option diagnose_options[] = {
+ OPT_STRING('o', "output-directory", &option_output, N_("path"),
+ N_("specify a destination for the diagnostics archive")),
+ OPT_STRING('s', "suffix", &option_suffix, N_("format"),
+ N_("specify a strftime format suffix for the filename")),
+ OPT_CALLBACK_F(0, "mode", &mode, "(stats|all)",
+ N_("specify the content of the diagnostic archive"),
+ PARSE_OPT_NONEG, option_parse_diagnose),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, diagnose_options,
+ diagnose_usage, 0);
+
+ /* Prepare the path to put the result */
+ prefixed_filename = prefix_filename(prefix,
+ option_output ? option_output : "");
+ strbuf_addstr(&zip_path, prefixed_filename);
+ strbuf_complete(&zip_path, '/');
+
+ strbuf_addstr(&zip_path, "git-diagnostics-");
+ strbuf_addftime(&zip_path, option_suffix, localtime_r(&now, &tm), 0, 0);
+ strbuf_addstr(&zip_path, ".zip");
+
+ switch (safe_create_leading_directories(zip_path.buf)) {
+ case SCLD_OK:
+ case SCLD_EXISTS:
+ break;
+ default:
+ die_errno(_("could not create leading directories for '%s'"),
+ zip_path.buf);
+ }
+
+ /* Prepare diagnostics */
+ if (create_diagnostics_archive(&zip_path, mode))
+ die_errno(_("unable to create diagnostics archive %s"),
+ zip_path.buf);
+
+ free(prefixed_filename);
+ strbuf_release(&zip_path);
+ return 0;
+}
diff --git a/builtin/diff-files.c b/builtin/diff-files.c
new file mode 100644
index 0000000..dc991f7
--- /dev/null
+++ b/builtin/diff-files.c
@@ -0,0 +1,88 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ */
+#include "cache.h"
+#include "config.h"
+#include "diff.h"
+#include "diff-merges.h"
+#include "commit.h"
+#include "revision.h"
+#include "builtin.h"
+#include "submodule.h"
+
+static const char diff_files_usage[] =
+"git diff-files [-q] [-0 | -1 | -2 | -3 | -c | --cc] [<common-diff-options>] [<path>...]"
+"\n"
+COMMON_DIFF_OPTIONS_HELP;
+
+int cmd_diff_files(int argc, const char **argv, const char *prefix)
+{
+ struct rev_info rev;
+ int result;
+ unsigned options = 0;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(diff_files_usage);
+
+ git_config(git_diff_basic_config, NULL); /* no "diff" UI options */
+ repo_init_revisions(the_repository, &rev, prefix);
+ rev.abbrev = 0;
+
+ /*
+ * Consider "intent-to-add" files as new by default, unless
+ * explicitly specified in the command line or anywhere else.
+ */
+ rev.diffopt.ita_invisible_in_index = 1;
+
+ prefix = precompose_argv_prefix(argc, argv, prefix);
+
+ argc = setup_revisions(argc, argv, &rev, NULL);
+ while (1 < argc && argv[1][0] == '-') {
+ if (!strcmp(argv[1], "--base"))
+ rev.max_count = 1;
+ else if (!strcmp(argv[1], "--ours"))
+ rev.max_count = 2;
+ else if (!strcmp(argv[1], "--theirs"))
+ rev.max_count = 3;
+ else if (!strcmp(argv[1], "-q"))
+ options |= DIFF_SILENT_ON_REMOVED;
+ else
+ usage(diff_files_usage);
+ argv++; argc--;
+ }
+ if (!rev.diffopt.output_format)
+ rev.diffopt.output_format = DIFF_FORMAT_RAW;
+ rev.diffopt.rotate_to_strict = 1;
+
+ /*
+ * Make sure there are NO revision (i.e. pending object) parameter,
+ * rev.max_count is reasonable (0 <= n <= 3), and
+ * there is no other revision filtering parameters.
+ */
+ if (rev.pending.nr ||
+ rev.min_age != -1 || rev.max_age != -1 ||
+ 3 < rev.max_count)
+ usage(diff_files_usage);
+
+ /*
+ * "diff-files --base -p" should not combine merges because it
+ * was not asked to. "diff-files -c -p" should not densify
+ * (the user should ask with "diff-files --cc" explicitly).
+ */
+ if (rev.max_count == -1 &&
+ (rev.diffopt.output_format & DIFF_FORMAT_PATCH))
+ diff_merges_set_dense_combined_if_unset(&rev);
+
+ if (repo_read_index_preload(the_repository, &rev.diffopt.pathspec, 0) < 0) {
+ perror("repo_read_index_preload");
+ result = -1;
+ goto cleanup;
+ }
+ result = run_diff_files(&rev, options);
+ result = diff_result_code(&rev.diffopt, result);
+cleanup:
+ release_revisions(&rev);
+ return result;
+}
diff --git a/builtin/diff-index.c b/builtin/diff-index.c
new file mode 100644
index 0000000..35dc9b2
--- /dev/null
+++ b/builtin/diff-index.c
@@ -0,0 +1,76 @@
+#include "cache.h"
+#include "config.h"
+#include "diff.h"
+#include "diff-merges.h"
+#include "commit.h"
+#include "revision.h"
+#include "builtin.h"
+#include "submodule.h"
+
+static const char diff_cache_usage[] =
+"git diff-index [-m] [--cached] [--merge-base] "
+"[<common-diff-options>] <tree-ish> [<path>...]"
+"\n"
+COMMON_DIFF_OPTIONS_HELP;
+
+int cmd_diff_index(int argc, const char **argv, const char *prefix)
+{
+ struct rev_info rev;
+ unsigned int option = 0;
+ int i;
+ int result;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(diff_cache_usage);
+
+ git_config(git_diff_basic_config, NULL); /* no "diff" UI options */
+ repo_init_revisions(the_repository, &rev, prefix);
+ rev.abbrev = 0;
+ prefix = precompose_argv_prefix(argc, argv, prefix);
+
+ /*
+ * We need (some of) diff for merges options (e.g., --cc), and we need
+ * to avoid conflict with our own meaning of "-m".
+ */
+ diff_merges_suppress_m_parsing();
+
+ argc = setup_revisions(argc, argv, &rev, NULL);
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+
+ if (!strcmp(arg, "--cached"))
+ option |= DIFF_INDEX_CACHED;
+ else if (!strcmp(arg, "--merge-base"))
+ option |= DIFF_INDEX_MERGE_BASE;
+ else if (!strcmp(arg, "-m"))
+ rev.match_missing = 1;
+ else
+ usage(diff_cache_usage);
+ }
+ if (!rev.diffopt.output_format)
+ rev.diffopt.output_format = DIFF_FORMAT_RAW;
+
+ rev.diffopt.rotate_to_strict = 1;
+
+ /*
+ * Make sure there is one revision (i.e. pending object),
+ * and there is no revision filtering parameters.
+ */
+ if (rev.pending.nr != 1 ||
+ rev.max_count != -1 || rev.min_age != -1 || rev.max_age != -1)
+ usage(diff_cache_usage);
+ if (!(option & DIFF_INDEX_CACHED)) {
+ setup_work_tree();
+ if (repo_read_index_preload(the_repository, &rev.diffopt.pathspec, 0) < 0) {
+ perror("repo_read_index_preload");
+ return -1;
+ }
+ } else if (repo_read_index(the_repository) < 0) {
+ perror("repo_read_index");
+ return -1;
+ }
+ result = run_diff_index(&rev, option);
+ result = diff_result_code(&rev.diffopt, result);
+ release_revisions(&rev);
+ return result;
+}
diff --git a/builtin/diff-tree.c b/builtin/diff-tree.c
new file mode 100644
index 0000000..25b853b
--- /dev/null
+++ b/builtin/diff-tree.c
@@ -0,0 +1,228 @@
+#define USE_THE_INDEX_VARIABLE
+#include "cache.h"
+#include "config.h"
+#include "diff.h"
+#include "commit.h"
+#include "log-tree.h"
+#include "builtin.h"
+#include "submodule.h"
+#include "repository.h"
+
+static struct rev_info log_tree_opt;
+
+static int diff_tree_commit_oid(const struct object_id *oid)
+{
+ struct commit *commit = lookup_commit_reference(the_repository, oid);
+ if (!commit)
+ return -1;
+ return log_tree_commit(&log_tree_opt, commit);
+}
+
+/* Diff one or more commits. */
+static int stdin_diff_commit(struct commit *commit, const char *p)
+{
+ struct object_id oid;
+ struct commit_list **pptr = NULL;
+
+ /* Graft the fake parents locally to the commit */
+ while (isspace(*p++) && !parse_oid_hex(p, &oid, &p)) {
+ struct commit *parent = lookup_commit(the_repository, &oid);
+ if (!pptr) {
+ /* Free the real parent list */
+ free_commit_list(commit->parents);
+ commit->parents = NULL;
+ pptr = &(commit->parents);
+ }
+ if (parent) {
+ pptr = &commit_list_insert(parent, pptr)->next;
+ }
+ }
+ return log_tree_commit(&log_tree_opt, commit);
+}
+
+/* Diff two trees. */
+static int stdin_diff_trees(struct tree *tree1, const char *p)
+{
+ struct object_id oid;
+ struct tree *tree2;
+ if (!isspace(*p++) || parse_oid_hex(p, &oid, &p) || *p)
+ return error("Need exactly two trees, separated by a space");
+ tree2 = lookup_tree(the_repository, &oid);
+ if (!tree2 || parse_tree(tree2))
+ return -1;
+ printf("%s %s\n", oid_to_hex(&tree1->object.oid),
+ oid_to_hex(&tree2->object.oid));
+ diff_tree_oid(&tree1->object.oid, &tree2->object.oid,
+ "", &log_tree_opt.diffopt);
+ log_tree_diff_flush(&log_tree_opt);
+ return 0;
+}
+
+static int diff_tree_stdin(char *line)
+{
+ int len = strlen(line);
+ struct object_id oid;
+ struct object *obj;
+ const char *p;
+
+ if (!len || line[len-1] != '\n')
+ return -1;
+ line[len-1] = 0;
+ if (parse_oid_hex(line, &oid, &p))
+ return -1;
+ obj = parse_object(the_repository, &oid);
+ if (!obj)
+ return -1;
+ if (obj->type == OBJ_COMMIT)
+ return stdin_diff_commit((struct commit *)obj, p);
+ if (obj->type == OBJ_TREE)
+ return stdin_diff_trees((struct tree *)obj, p);
+ error("Object %s is a %s, not a commit or tree",
+ oid_to_hex(&oid), type_name(obj->type));
+ return -1;
+}
+
+static const char diff_tree_usage[] =
+"git diff-tree [--stdin] [-m] [-s] [-v] [--no-commit-id] [--pretty]\n"
+" [-t] [-r] [-c | --cc] [--combined-all-paths] [--root] [--merge-base]\n"
+" [<common-diff-options>] <tree-ish> [<tree-ish>] [<path>...]\n"
+"\n"
+" -r diff recursively\n"
+" -c show combined diff for merge commits\n"
+" --cc show combined diff for merge commits removing uninteresting hunks\n"
+" --combined-all-paths\n"
+" show name of file in all parents for combined diffs\n"
+" --root include the initial commit as diff against /dev/null\n"
+COMMON_DIFF_OPTIONS_HELP;
+
+static void diff_tree_tweak_rev(struct rev_info *rev, struct setup_revision_opt *opt)
+{
+ if (!rev->diffopt.output_format) {
+ if (rev->dense_combined_merges)
+ rev->diffopt.output_format = DIFF_FORMAT_PATCH;
+ else
+ rev->diffopt.output_format = DIFF_FORMAT_RAW;
+ }
+}
+
+int cmd_diff_tree(int argc, const char **argv, const char *prefix)
+{
+ char line[1000];
+ struct object *tree1, *tree2;
+ static struct rev_info *opt = &log_tree_opt;
+ struct setup_revision_opt s_r_opt;
+ struct userformat_want w;
+ int read_stdin = 0;
+ int merge_base = 0;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(diff_tree_usage);
+
+ git_config(git_diff_basic_config, NULL); /* no "diff" UI options */
+ repo_init_revisions(the_repository, opt, prefix);
+ if (repo_read_index(the_repository) < 0)
+ die(_("index file corrupt"));
+ opt->abbrev = 0;
+ opt->diff = 1;
+ opt->disable_stdin = 1;
+ memset(&s_r_opt, 0, sizeof(s_r_opt));
+ s_r_opt.tweak = diff_tree_tweak_rev;
+
+ prefix = precompose_argv_prefix(argc, argv, prefix);
+ argc = setup_revisions(argc, argv, opt, &s_r_opt);
+
+ memset(&w, 0, sizeof(w));
+ userformat_find_requirements(NULL, &w);
+
+ if (!opt->show_notes_given && w.notes)
+ opt->show_notes = 1;
+ if (opt->show_notes)
+ load_display_notes(&opt->notes_opt);
+
+ while (--argc > 0) {
+ const char *arg = *++argv;
+
+ if (!strcmp(arg, "--stdin")) {
+ read_stdin = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--merge-base")) {
+ merge_base = 1;
+ continue;
+ }
+ usage(diff_tree_usage);
+ }
+
+ if (read_stdin && merge_base)
+ die(_("options '%s' and '%s' cannot be used together"), "--stdin", "--merge-base");
+ if (merge_base && opt->pending.nr != 2)
+ die(_("--merge-base only works with two commits"));
+
+ opt->diffopt.rotate_to_strict = 1;
+
+ /*
+ * NOTE! We expect "a..b" to expand to "^a b" but it is
+ * perfectly valid for revision range parser to yield "b ^a",
+ * which means the same thing. If we get the latter, i.e. the
+ * second one is marked UNINTERESTING, we recover the original
+ * order the user gave, i.e. "a..b", by swapping the trees.
+ */
+ switch (opt->pending.nr) {
+ case 0:
+ if (!read_stdin)
+ usage(diff_tree_usage);
+ break;
+ case 1:
+ tree1 = opt->pending.objects[0].item;
+ diff_tree_commit_oid(&tree1->oid);
+ break;
+ case 2:
+ tree1 = opt->pending.objects[0].item;
+ tree2 = opt->pending.objects[1].item;
+ if (merge_base) {
+ struct object_id oid;
+
+ diff_get_merge_base(opt, &oid);
+ tree1 = lookup_object(the_repository, &oid);
+ } else if (tree2->flags & UNINTERESTING) {
+ SWAP(tree2, tree1);
+ }
+ diff_tree_oid(&tree1->oid, &tree2->oid, "", &opt->diffopt);
+ log_tree_diff_flush(opt);
+ break;
+ }
+
+ if (read_stdin) {
+ int saved_nrl = 0;
+ int saved_dcctc = 0;
+
+ opt->diffopt.rotate_to_strict = 0;
+ opt->diffopt.no_free = 1;
+ if (opt->diffopt.detect_rename) {
+ if (!the_index.cache)
+ repo_read_index(the_repository);
+ opt->diffopt.setup |= DIFF_SETUP_USE_SIZE_CACHE;
+ }
+ while (fgets(line, sizeof(line), stdin)) {
+ struct object_id oid;
+
+ if (get_oid_hex(line, &oid)) {
+ fputs(line, stdout);
+ fflush(stdout);
+ }
+ else {
+ diff_tree_stdin(line);
+ if (saved_nrl < opt->diffopt.needed_rename_limit)
+ saved_nrl = opt->diffopt.needed_rename_limit;
+ if (opt->diffopt.degraded_cc_to_c)
+ saved_dcctc = 1;
+ }
+ }
+ opt->diffopt.degraded_cc_to_c = saved_dcctc;
+ opt->diffopt.needed_rename_limit = saved_nrl;
+ opt->diffopt.no_free = 0;
+ diff_free(&opt->diffopt);
+ }
+
+ return diff_result_code(&opt->diffopt, 0);
+}
diff --git a/builtin/diff.c b/builtin/diff.c
new file mode 100644
index 0000000..163f2c6
--- /dev/null
+++ b/builtin/diff.c
@@ -0,0 +1,618 @@
+/*
+ * Builtin "git diff"
+ *
+ * Copyright (c) 2006 Junio C Hamano
+ */
+#define USE_THE_INDEX_VARIABLE
+#include "cache.h"
+#include "config.h"
+#include "ewah/ewok.h"
+#include "lockfile.h"
+#include "color.h"
+#include "commit.h"
+#include "blob.h"
+#include "tag.h"
+#include "diff.h"
+#include "diff-merges.h"
+#include "diffcore.h"
+#include "revision.h"
+#include "log-tree.h"
+#include "builtin.h"
+#include "submodule.h"
+#include "oid-array.h"
+
+#define DIFF_NO_INDEX_EXPLICIT 1
+#define DIFF_NO_INDEX_IMPLICIT 2
+
+static const char builtin_diff_usage[] =
+"git diff [<options>] [<commit>] [--] [<path>...]\n"
+" or: git diff [<options>] --cached [--merge-base] [<commit>] [--] [<path>...]\n"
+" or: git diff [<options>] [--merge-base] <commit> [<commit>...] <commit> [--] [<path>...]\n"
+" or: git diff [<options>] <commit>...<commit> [--] [<path>...]\n"
+" or: git diff [<options>] <blob> <blob>\n"
+" or: git diff [<options>] --no-index [--] <path> <path>"
+"\n"
+COMMON_DIFF_OPTIONS_HELP;
+
+static const char *blob_path(struct object_array_entry *entry)
+{
+ return entry->path ? entry->path : entry->name;
+}
+
+static void stuff_change(struct diff_options *opt,
+ unsigned old_mode, unsigned new_mode,
+ const struct object_id *old_oid,
+ const struct object_id *new_oid,
+ int old_oid_valid,
+ int new_oid_valid,
+ const char *old_path,
+ const char *new_path)
+{
+ struct diff_filespec *one, *two;
+
+ if (!is_null_oid(old_oid) && !is_null_oid(new_oid) &&
+ oideq(old_oid, new_oid) && (old_mode == new_mode))
+ return;
+
+ if (opt->flags.reverse_diff) {
+ SWAP(old_mode, new_mode);
+ SWAP(old_oid, new_oid);
+ SWAP(old_path, new_path);
+ }
+
+ if (opt->prefix &&
+ (strncmp(old_path, opt->prefix, opt->prefix_length) ||
+ strncmp(new_path, opt->prefix, opt->prefix_length)))
+ return;
+
+ one = alloc_filespec(old_path);
+ two = alloc_filespec(new_path);
+ fill_filespec(one, old_oid, old_oid_valid, old_mode);
+ fill_filespec(two, new_oid, new_oid_valid, new_mode);
+
+ diff_queue(&diff_queued_diff, one, two);
+}
+
+static int builtin_diff_b_f(struct rev_info *revs,
+ int argc, const char **argv,
+ struct object_array_entry **blob)
+{
+ /* Blob vs file in the working tree*/
+ struct stat st;
+ const char *path;
+
+ if (argc > 1)
+ usage(builtin_diff_usage);
+
+ GUARD_PATHSPEC(&revs->prune_data, PATHSPEC_FROMTOP | PATHSPEC_LITERAL);
+ path = revs->prune_data.items[0].match;
+
+ if (lstat(path, &st))
+ die_errno(_("failed to stat '%s'"), path);
+ if (!(S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)))
+ die(_("'%s': not a regular file or symlink"), path);
+
+ diff_set_mnemonic_prefix(&revs->diffopt, "o/", "w/");
+
+ if (blob[0]->mode == S_IFINVALID)
+ blob[0]->mode = canon_mode(st.st_mode);
+
+ stuff_change(&revs->diffopt,
+ blob[0]->mode, canon_mode(st.st_mode),
+ &blob[0]->item->oid, null_oid(),
+ 1, 0,
+ blob[0]->path ? blob[0]->path : path,
+ path);
+ diffcore_std(&revs->diffopt);
+ diff_flush(&revs->diffopt);
+ return 0;
+}
+
+static int builtin_diff_blobs(struct rev_info *revs,
+ int argc, const char **argv,
+ struct object_array_entry **blob)
+{
+ const unsigned mode = canon_mode(S_IFREG | 0644);
+
+ if (argc > 1)
+ usage(builtin_diff_usage);
+
+ if (blob[0]->mode == S_IFINVALID)
+ blob[0]->mode = mode;
+
+ if (blob[1]->mode == S_IFINVALID)
+ blob[1]->mode = mode;
+
+ stuff_change(&revs->diffopt,
+ blob[0]->mode, blob[1]->mode,
+ &blob[0]->item->oid, &blob[1]->item->oid,
+ 1, 1,
+ blob_path(blob[0]), blob_path(blob[1]));
+ diffcore_std(&revs->diffopt);
+ diff_flush(&revs->diffopt);
+ return 0;
+}
+
+static int builtin_diff_index(struct rev_info *revs,
+ int argc, const char **argv)
+{
+ unsigned int option = 0;
+ while (1 < argc) {
+ const char *arg = argv[1];
+ if (!strcmp(arg, "--cached") || !strcmp(arg, "--staged"))
+ option |= DIFF_INDEX_CACHED;
+ else if (!strcmp(arg, "--merge-base"))
+ option |= DIFF_INDEX_MERGE_BASE;
+ else
+ usage(builtin_diff_usage);
+ argv++; argc--;
+ }
+ /*
+ * Make sure there is one revision (i.e. pending object),
+ * and there is no revision filtering parameters.
+ */
+ if (revs->pending.nr != 1 ||
+ revs->max_count != -1 || revs->min_age != -1 ||
+ revs->max_age != -1)
+ usage(builtin_diff_usage);
+ if (!(option & DIFF_INDEX_CACHED)) {
+ setup_work_tree();
+ if (repo_read_index_preload(the_repository,
+ &revs->diffopt.pathspec, 0) < 0) {
+ perror("repo_read_index_preload");
+ return -1;
+ }
+ } else if (repo_read_index(the_repository) < 0) {
+ perror("repo_read_cache");
+ return -1;
+ }
+ return run_diff_index(revs, option);
+}
+
+static int builtin_diff_tree(struct rev_info *revs,
+ int argc, const char **argv,
+ struct object_array_entry *ent0,
+ struct object_array_entry *ent1)
+{
+ const struct object_id *(oid[2]);
+ struct object_id mb_oid;
+ int merge_base = 0;
+
+ while (1 < argc) {
+ const char *arg = argv[1];
+ if (!strcmp(arg, "--merge-base"))
+ merge_base = 1;
+ else
+ usage(builtin_diff_usage);
+ argv++; argc--;
+ }
+
+ if (merge_base) {
+ diff_get_merge_base(revs, &mb_oid);
+ oid[0] = &mb_oid;
+ oid[1] = &revs->pending.objects[1].item->oid;
+ } else {
+ int swap = 0;
+
+ /*
+ * We saw two trees, ent0 and ent1. If ent1 is uninteresting,
+ * swap them.
+ */
+ if (ent1->item->flags & UNINTERESTING)
+ swap = 1;
+ oid[swap] = &ent0->item->oid;
+ oid[1 - swap] = &ent1->item->oid;
+ }
+ diff_tree_oid(oid[0], oid[1], "", &revs->diffopt);
+ log_tree_diff_flush(revs);
+ return 0;
+}
+
+static int builtin_diff_combined(struct rev_info *revs,
+ int argc, const char **argv,
+ struct object_array_entry *ent,
+ int ents, int first_non_parent)
+{
+ struct oid_array parents = OID_ARRAY_INIT;
+ int i;
+
+ if (argc > 1)
+ usage(builtin_diff_usage);
+
+ if (first_non_parent < 0)
+ die(_("no merge given, only parents."));
+ if (first_non_parent >= ents)
+ BUG("first_non_parent out of range: %d", first_non_parent);
+
+ diff_merges_set_dense_combined_if_unset(revs);
+
+ for (i = 0; i < ents; i++) {
+ if (i != first_non_parent)
+ oid_array_append(&parents, &ent[i].item->oid);
+ }
+ diff_tree_combined(&ent[first_non_parent].item->oid, &parents, revs);
+ oid_array_clear(&parents);
+ return 0;
+}
+
+static void refresh_index_quietly(void)
+{
+ struct lock_file lock_file = LOCK_INIT;
+ int fd;
+
+ fd = repo_hold_locked_index(the_repository, &lock_file, 0);
+ if (fd < 0)
+ return;
+ discard_index(&the_index);
+ repo_read_index(the_repository);
+ refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL,
+ NULL);
+ repo_update_index_if_able(the_repository, &lock_file);
+}
+
+static int builtin_diff_files(struct rev_info *revs, int argc, const char **argv)
+{
+ unsigned int options = 0;
+
+ while (1 < argc && argv[1][0] == '-') {
+ if (!strcmp(argv[1], "--base"))
+ revs->max_count = 1;
+ else if (!strcmp(argv[1], "--ours"))
+ revs->max_count = 2;
+ else if (!strcmp(argv[1], "--theirs"))
+ revs->max_count = 3;
+ else if (!strcmp(argv[1], "-q"))
+ options |= DIFF_SILENT_ON_REMOVED;
+ else if (!strcmp(argv[1], "-h"))
+ usage(builtin_diff_usage);
+ else
+ return error(_("invalid option: %s"), argv[1]);
+ argv++; argc--;
+ }
+
+ /*
+ * "diff --base" should not combine merges because it was not
+ * asked to. "diff -c" should not densify (if the user wants
+ * dense one, --cc can be explicitly asked for, or just rely
+ * on the default).
+ */
+ if (revs->max_count == -1 &&
+ (revs->diffopt.output_format & DIFF_FORMAT_PATCH))
+ diff_merges_set_dense_combined_if_unset(revs);
+
+ setup_work_tree();
+ if (repo_read_index_preload(the_repository, &revs->diffopt.pathspec,
+ 0) < 0) {
+ perror("repo_read_index_preload");
+ return -1;
+ }
+ return run_diff_files(revs, options);
+}
+
+struct symdiff {
+ struct bitmap *skip;
+ int warn;
+ const char *base, *left, *right;
+};
+
+/*
+ * Check for symmetric-difference arguments, and if present, arrange
+ * everything we need to know to handle them correctly. As a bonus,
+ * weed out all bogus range-based revision specifications, e.g.,
+ * "git diff A..B C..D" or "git diff A..B C" get rejected.
+ *
+ * For an actual symmetric diff, *symdiff is set this way:
+ *
+ * - its skip is non-NULL and marks *all* rev->pending.objects[i]
+ * indices that the caller should ignore (extra merge bases, of
+ * which there might be many, and A in A...B). Note that the
+ * chosen merge base and right side are NOT marked.
+ * - warn is set if there are multiple merge bases.
+ * - base, left, and right point to the names to use in a
+ * warning about multiple merge bases.
+ *
+ * If there is no symmetric diff argument, sym->skip is NULL and
+ * sym->warn is cleared. The remaining fields are not set.
+ */
+static void symdiff_prepare(struct rev_info *rev, struct symdiff *sym)
+{
+ int i, is_symdiff = 0, basecount = 0, othercount = 0;
+ int lpos = -1, rpos = -1, basepos = -1;
+ struct bitmap *map = NULL;
+
+ /*
+ * Use the whence fields to find merge bases and left and
+ * right parts of symmetric difference, so that we do not
+ * depend on the order that revisions are parsed. If there
+ * are any revs that aren't from these sources, we have a
+ * "git diff C A...B" or "git diff A...B C" case. Or we
+ * could even get "git diff A...B C...E", for instance.
+ *
+ * If we don't have just one merge base, we pick one
+ * at random.
+ *
+ * NB: REV_CMD_LEFT, REV_CMD_RIGHT are also used for A..B,
+ * so we must check for SYMMETRIC_LEFT too. The two arrays
+ * rev->pending.objects and rev->cmdline.rev are parallel.
+ */
+ for (i = 0; i < rev->cmdline.nr; i++) {
+ struct object *obj = rev->pending.objects[i].item;
+ switch (rev->cmdline.rev[i].whence) {
+ case REV_CMD_MERGE_BASE:
+ if (basepos < 0)
+ basepos = i;
+ basecount++;
+ break; /* do mark all bases */
+ case REV_CMD_LEFT:
+ if (lpos >= 0)
+ usage(builtin_diff_usage);
+ lpos = i;
+ if (obj->flags & SYMMETRIC_LEFT) {
+ is_symdiff = 1;
+ break; /* do mark A */
+ }
+ continue;
+ case REV_CMD_RIGHT:
+ if (rpos >= 0)
+ usage(builtin_diff_usage);
+ rpos = i;
+ continue; /* don't mark B */
+ case REV_CMD_PARENTS_ONLY:
+ case REV_CMD_REF:
+ case REV_CMD_REV:
+ othercount++;
+ continue;
+ }
+ if (!map)
+ map = bitmap_new();
+ bitmap_set(map, i);
+ }
+
+ /*
+ * Forbid any additional revs for both A...B and A..B.
+ */
+ if (lpos >= 0 && othercount > 0)
+ usage(builtin_diff_usage);
+
+ if (!is_symdiff) {
+ bitmap_free(map);
+ sym->warn = 0;
+ sym->skip = NULL;
+ return;
+ }
+
+ sym->left = rev->pending.objects[lpos].name;
+ sym->right = rev->pending.objects[rpos].name;
+ if (basecount == 0)
+ die(_("%s...%s: no merge base"), sym->left, sym->right);
+ sym->base = rev->pending.objects[basepos].name;
+ bitmap_unset(map, basepos); /* unmark the base we want */
+ sym->warn = basecount > 1;
+ sym->skip = map;
+}
+
+int cmd_diff(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ struct rev_info rev;
+ struct object_array ent = OBJECT_ARRAY_INIT;
+ int first_non_parent = -1;
+ int blobs = 0, paths = 0;
+ struct object_array_entry *blob[2];
+ int nongit = 0, no_index = 0;
+ int result = 0;
+ struct symdiff sdiff;
+
+ /*
+ * We could get N tree-ish in the rev.pending_objects list.
+ * Also there could be M blobs there, and P pathspecs. --cached may
+ * also be present.
+ *
+ * N=0, M=0:
+ * cache vs files (diff-files)
+ *
+ * N=0, M=0, --cached:
+ * HEAD vs cache (diff-index --cached)
+ *
+ * N=0, M=2:
+ * compare two random blobs. P must be zero.
+ *
+ * N=0, M=1, P=1:
+ * compare a blob with a working tree file.
+ *
+ * N=1, M=0:
+ * tree vs files (diff-index)
+ *
+ * N=1, M=0, --cached:
+ * tree vs cache (diff-index --cached)
+ *
+ * N=2, M=0:
+ * tree vs tree (diff-tree)
+ *
+ * N=0, M=0, P=2:
+ * compare two filesystem entities (aka --no-index).
+ *
+ * Other cases are errors.
+ */
+
+ /* Were we asked to do --no-index explicitly? */
+ for (i = 1; i < argc; i++) {
+ if (!strcmp(argv[i], "--")) {
+ i++;
+ break;
+ }
+ if (!strcmp(argv[i], "--no-index"))
+ no_index = DIFF_NO_INDEX_EXPLICIT;
+ if (argv[i][0] != '-')
+ break;
+ }
+
+ prefix = setup_git_directory_gently(&nongit);
+
+ if (!nongit) {
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
+
+ if (!no_index) {
+ /*
+ * Treat git diff with at least one path outside of the
+ * repo the same as if the command would have been executed
+ * outside of a git repository. In this case it behaves
+ * the same way as "git diff --no-index <a> <b>", which acts
+ * as a colourful "diff" replacement.
+ */
+ if (nongit || ((argc == i + 2) &&
+ (!path_inside_repo(prefix, argv[i]) ||
+ !path_inside_repo(prefix, argv[i + 1]))))
+ no_index = DIFF_NO_INDEX_IMPLICIT;
+ }
+
+ init_diff_ui_defaults();
+ git_config(git_diff_ui_config, NULL);
+ prefix = precompose_argv_prefix(argc, argv, prefix);
+
+ repo_init_revisions(the_repository, &rev, prefix);
+
+ /* Set up defaults that will apply to both no-index and regular diffs. */
+ rev.diffopt.stat_width = -1;
+ rev.diffopt.stat_graph_width = -1;
+ rev.diffopt.flags.allow_external = 1;
+ rev.diffopt.flags.allow_textconv = 1;
+
+ /* If this is a no-index diff, just run it and exit there. */
+ if (no_index)
+ exit(diff_no_index(&rev, no_index == DIFF_NO_INDEX_IMPLICIT,
+ argc, argv));
+
+
+ /*
+ * Otherwise, we are doing the usual "git" diff; set up any
+ * further defaults that apply to regular diffs.
+ */
+ rev.diffopt.skip_stat_unmatch = !!diff_auto_refresh_index;
+
+ /*
+ * Default to intent-to-add entries invisible in the
+ * index. This makes them show up as new files in diff-files
+ * and not at all in diff-cached.
+ */
+ rev.diffopt.ita_invisible_in_index = 1;
+
+ if (nongit)
+ die(_("Not a git repository"));
+ argc = setup_revisions(argc, argv, &rev, NULL);
+ if (!rev.diffopt.output_format) {
+ rev.diffopt.output_format = DIFF_FORMAT_PATCH;
+ diff_setup_done(&rev.diffopt);
+ }
+
+ rev.diffopt.flags.recursive = 1;
+ rev.diffopt.rotate_to_strict = 1;
+
+ setup_diff_pager(&rev.diffopt);
+
+ /*
+ * Do we have --cached and not have a pending object, then
+ * default to HEAD by hand. Eek.
+ */
+ if (!rev.pending.nr) {
+ int i;
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (!strcmp(arg, "--"))
+ break;
+ else if (!strcmp(arg, "--cached") ||
+ !strcmp(arg, "--staged")) {
+ add_head_to_pending(&rev);
+ if (!rev.pending.nr) {
+ struct tree *tree;
+ tree = lookup_tree(the_repository,
+ the_repository->hash_algo->empty_tree);
+ add_pending_object(&rev, &tree->object, "HEAD");
+ }
+ break;
+ }
+ }
+ }
+
+ symdiff_prepare(&rev, &sdiff);
+ for (i = 0; i < rev.pending.nr; i++) {
+ struct object_array_entry *entry = &rev.pending.objects[i];
+ struct object *obj = entry->item;
+ const char *name = entry->name;
+ int flags = (obj->flags & UNINTERESTING);
+ if (!obj->parsed)
+ obj = parse_object(the_repository, &obj->oid);
+ obj = deref_tag(the_repository, obj, NULL, 0);
+ if (!obj)
+ die(_("invalid object '%s' given."), name);
+ if (obj->type == OBJ_COMMIT)
+ obj = &get_commit_tree(((struct commit *)obj))->object;
+
+ if (obj->type == OBJ_TREE) {
+ if (sdiff.skip && bitmap_get(sdiff.skip, i))
+ continue;
+ obj->flags |= flags;
+ add_object_array(obj, name, &ent);
+ if (first_non_parent < 0 &&
+ (i >= rev.cmdline.nr || /* HEAD by hand. */
+ rev.cmdline.rev[i].whence != REV_CMD_PARENTS_ONLY))
+ first_non_parent = ent.nr - 1;
+ } else if (obj->type == OBJ_BLOB) {
+ if (2 <= blobs)
+ die(_("more than two blobs given: '%s'"), name);
+ blob[blobs] = entry;
+ blobs++;
+
+ } else {
+ die(_("unhandled object '%s' given."), name);
+ }
+ }
+ if (rev.prune_data.nr)
+ paths += rev.prune_data.nr;
+
+ /*
+ * Now, do the arguments look reasonable?
+ */
+ if (!ent.nr) {
+ switch (blobs) {
+ case 0:
+ result = builtin_diff_files(&rev, argc, argv);
+ break;
+ case 1:
+ if (paths != 1)
+ usage(builtin_diff_usage);
+ result = builtin_diff_b_f(&rev, argc, argv, blob);
+ break;
+ case 2:
+ if (paths)
+ usage(builtin_diff_usage);
+ result = builtin_diff_blobs(&rev, argc, argv, blob);
+ break;
+ default:
+ usage(builtin_diff_usage);
+ }
+ }
+ else if (blobs)
+ usage(builtin_diff_usage);
+ else if (ent.nr == 1)
+ result = builtin_diff_index(&rev, argc, argv);
+ else if (ent.nr == 2) {
+ if (sdiff.warn)
+ warning(_("%s...%s: multiple merge bases, using %s"),
+ sdiff.left, sdiff.right, sdiff.base);
+ result = builtin_diff_tree(&rev, argc, argv,
+ &ent.objects[0], &ent.objects[1]);
+ } else
+ result = builtin_diff_combined(&rev, argc, argv,
+ ent.objects, ent.nr,
+ first_non_parent);
+ result = diff_result_code(&rev.diffopt, result);
+ if (1 < rev.diffopt.skip_stat_unmatch)
+ refresh_index_quietly();
+ release_revisions(&rev);
+ UNLEAK(ent);
+ UNLEAK(blob);
+ return result;
+}
diff --git a/builtin/difftool.c b/builtin/difftool.c
new file mode 100644
index 0000000..d9b7622
--- /dev/null
+++ b/builtin/difftool.c
@@ -0,0 +1,778 @@
+/*
+ * "git difftool" builtin command
+ *
+ * This is a wrapper around the GIT_EXTERNAL_DIFF-compatible
+ * git-difftool--helper script.
+ *
+ * This script exports GIT_EXTERNAL_DIFF and GIT_PAGER for use by git.
+ * The GIT_DIFF* variables are exported for use by git-difftool--helper.
+ *
+ * Any arguments that are unknown to this script are forwarded to 'git diff'.
+ *
+ * Copyright (C) 2016 Johannes Schindelin
+ */
+#define USE_THE_INDEX_VARIABLE
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "run-command.h"
+#include "exec-cmd.h"
+#include "parse-options.h"
+#include "strvec.h"
+#include "strbuf.h"
+#include "lockfile.h"
+#include "object-store.h"
+#include "dir.h"
+#include "entry.h"
+
+static int trust_exit_code;
+
+static const char *const builtin_difftool_usage[] = {
+ N_("git difftool [<options>] [<commit> [<commit>]] [--] [<path>...]"),
+ NULL
+};
+
+static int difftool_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "difftool.trustexitcode")) {
+ trust_exit_code = git_config_bool(var, value);
+ return 0;
+ }
+
+ return git_default_config(var, value, cb);
+}
+
+static int print_tool_help(void)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ strvec_pushl(&cmd.args, "mergetool", "--tool-help=diff", NULL);
+ return run_command(&cmd);
+}
+
+static int parse_index_info(char *p, int *mode1, int *mode2,
+ struct object_id *oid1, struct object_id *oid2,
+ char *status)
+{
+ if (*p != ':')
+ return error("expected ':', got '%c'", *p);
+ *mode1 = (int)strtol(p + 1, &p, 8);
+ if (*p != ' ')
+ return error("expected ' ', got '%c'", *p);
+ *mode2 = (int)strtol(p + 1, &p, 8);
+ if (*p != ' ')
+ return error("expected ' ', got '%c'", *p);
+ if (parse_oid_hex(++p, oid1, (const char **)&p))
+ return error("expected object ID, got '%s'", p);
+ if (*p != ' ')
+ return error("expected ' ', got '%c'", *p);
+ if (parse_oid_hex(++p, oid2, (const char **)&p))
+ return error("expected object ID, got '%s'", p);
+ if (*p != ' ')
+ return error("expected ' ', got '%c'", *p);
+ *status = *++p;
+ if (!*status)
+ return error("missing status");
+ if (p[1] && !isdigit(p[1]))
+ return error("unexpected trailer: '%s'", p + 1);
+ return 0;
+}
+
+/*
+ * Remove any trailing slash from $workdir
+ * before starting to avoid double slashes in symlink targets.
+ */
+static void add_path(struct strbuf *buf, size_t base_len, const char *path)
+{
+ strbuf_setlen(buf, base_len);
+ if (buf->len && buf->buf[buf->len - 1] != '/')
+ strbuf_addch(buf, '/');
+ strbuf_addstr(buf, path);
+}
+
+/*
+ * Determine whether we can simply reuse the file in the worktree.
+ */
+static int use_wt_file(const char *workdir, const char *name,
+ struct object_id *oid)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct stat st;
+ int use = 0;
+
+ strbuf_addstr(&buf, workdir);
+ add_path(&buf, buf.len, name);
+
+ if (!lstat(buf.buf, &st) && !S_ISLNK(st.st_mode)) {
+ struct object_id wt_oid;
+ int fd = open(buf.buf, O_RDONLY);
+
+ if (fd >= 0 &&
+ !index_fd(&the_index, &wt_oid, fd, &st, OBJ_BLOB, name, 0)) {
+ if (is_null_oid(oid)) {
+ oidcpy(oid, &wt_oid);
+ use = 1;
+ } else if (oideq(oid, &wt_oid))
+ use = 1;
+ }
+ }
+
+ strbuf_release(&buf);
+
+ return use;
+}
+
+struct working_tree_entry {
+ struct hashmap_entry entry;
+ char path[FLEX_ARRAY];
+};
+
+static int working_tree_entry_cmp(const void *cmp_data UNUSED,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
+ const void *keydata UNUSED)
+{
+ const struct working_tree_entry *a, *b;
+
+ a = container_of(eptr, const struct working_tree_entry, entry);
+ b = container_of(entry_or_key, const struct working_tree_entry, entry);
+
+ return strcmp(a->path, b->path);
+}
+
+/*
+ * The `left` and `right` entries hold paths for the symlinks hashmap,
+ * and a SHA-1 surrounded by brief text for submodules.
+ */
+struct pair_entry {
+ struct hashmap_entry entry;
+ char left[PATH_MAX], right[PATH_MAX];
+ const char path[FLEX_ARRAY];
+};
+
+static int pair_cmp(const void *cmp_data UNUSED,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
+ const void *keydata UNUSED)
+{
+ const struct pair_entry *a, *b;
+
+ a = container_of(eptr, const struct pair_entry, entry);
+ b = container_of(entry_or_key, const struct pair_entry, entry);
+
+ return strcmp(a->path, b->path);
+}
+
+static void add_left_or_right(struct hashmap *map, const char *path,
+ const char *content, int is_right)
+{
+ struct pair_entry *e, *existing;
+
+ FLEX_ALLOC_STR(e, path, path);
+ hashmap_entry_init(&e->entry, strhash(path));
+ existing = hashmap_get_entry(map, e, entry, NULL);
+ if (existing) {
+ free(e);
+ e = existing;
+ } else {
+ e->left[0] = e->right[0] = '\0';
+ hashmap_add(map, &e->entry);
+ }
+ strlcpy(is_right ? e->right : e->left, content, PATH_MAX);
+}
+
+struct path_entry {
+ struct hashmap_entry entry;
+ char path[FLEX_ARRAY];
+};
+
+static int path_entry_cmp(const void *cmp_data UNUSED,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
+ const void *key)
+{
+ const struct path_entry *a, *b;
+
+ a = container_of(eptr, const struct path_entry, entry);
+ b = container_of(entry_or_key, const struct path_entry, entry);
+
+ return strcmp(a->path, key ? key : b->path);
+}
+
+static void changed_files(struct hashmap *result, const char *index_path,
+ const char *workdir)
+{
+ struct child_process update_index = CHILD_PROCESS_INIT;
+ struct child_process diff_files = CHILD_PROCESS_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ const char *git_dir = absolute_path(get_git_dir());
+ FILE *fp;
+
+ strvec_pushl(&update_index.args,
+ "--git-dir", git_dir, "--work-tree", workdir,
+ "update-index", "--really-refresh", "-q",
+ "--unmerged", NULL);
+ update_index.no_stdin = 1;
+ update_index.no_stdout = 1;
+ update_index.no_stderr = 1;
+ update_index.git_cmd = 1;
+ update_index.use_shell = 0;
+ update_index.clean_on_exit = 1;
+ update_index.dir = workdir;
+ strvec_pushf(&update_index.env, "GIT_INDEX_FILE=%s", index_path);
+ /* Ignore any errors of update-index */
+ run_command(&update_index);
+
+ strvec_pushl(&diff_files.args,
+ "--git-dir", git_dir, "--work-tree", workdir,
+ "diff-files", "--name-only", "-z", NULL);
+ diff_files.no_stdin = 1;
+ diff_files.git_cmd = 1;
+ diff_files.use_shell = 0;
+ diff_files.clean_on_exit = 1;
+ diff_files.out = -1;
+ diff_files.dir = workdir;
+ strvec_pushf(&diff_files.env, "GIT_INDEX_FILE=%s", index_path);
+ if (start_command(&diff_files))
+ die("could not obtain raw diff");
+ fp = xfdopen(diff_files.out, "r");
+ while (!strbuf_getline_nul(&buf, fp)) {
+ struct path_entry *entry;
+ FLEX_ALLOC_STR(entry, path, buf.buf);
+ hashmap_entry_init(&entry->entry, strhash(buf.buf));
+ hashmap_add(result, &entry->entry);
+ }
+ fclose(fp);
+ if (finish_command(&diff_files))
+ die("diff-files did not exit properly");
+ strbuf_release(&buf);
+}
+
+static int ensure_leading_directories(char *path)
+{
+ switch (safe_create_leading_directories(path)) {
+ case SCLD_OK:
+ case SCLD_EXISTS:
+ return 0;
+ default:
+ return error(_("could not create leading directories "
+ "of '%s'"), path);
+ }
+}
+
+/*
+ * Unconditional writing of a plain regular file is what
+ * "git difftool --dir-diff" wants to do for symlinks. We are preparing two
+ * temporary directories to be fed to a Git-unaware tool that knows how to
+ * show a diff of two directories (e.g. "diff -r A B").
+ *
+ * Because the tool is Git-unaware, if a symbolic link appears in either of
+ * these temporary directories, it will try to dereference and show the
+ * difference of the target of the symbolic link, which is not what we want,
+ * as the goal of the dir-diff mode is to produce an output that is logically
+ * equivalent to what "git diff" produces.
+ *
+ * Most importantly, we want to get textual comparison of the result of the
+ * readlink(2). get_symlink() provides that---it returns the contents of
+ * the symlink that gets written to a regular file to force the external tool
+ * to compare the readlink(2) result as text, even on a filesystem that is
+ * capable of doing a symbolic link.
+ */
+static char *get_symlink(const struct object_id *oid, const char *path)
+{
+ char *data;
+ if (is_null_oid(oid)) {
+ /* The symlink is unknown to Git so read from the filesystem */
+ struct strbuf link = STRBUF_INIT;
+ if (has_symlinks) {
+ if (strbuf_readlink(&link, path, strlen(path)))
+ die(_("could not read symlink %s"), path);
+ } else if (strbuf_read_file(&link, path, 128))
+ die(_("could not read symlink file %s"), path);
+
+ data = strbuf_detach(&link, NULL);
+ } else {
+ enum object_type type;
+ unsigned long size;
+ data = read_object_file(oid, &type, &size);
+ if (!data)
+ die(_("could not read object %s for symlink %s"),
+ oid_to_hex(oid), path);
+ }
+
+ return data;
+}
+
+static int checkout_path(unsigned mode, struct object_id *oid,
+ const char *path, const struct checkout *state)
+{
+ struct cache_entry *ce;
+ int ret;
+
+ ce = make_transient_cache_entry(mode, oid, path, 0, NULL);
+ ret = checkout_entry(ce, state, NULL, NULL);
+
+ discard_cache_entry(ce);
+ return ret;
+}
+
+static void write_file_in_directory(struct strbuf *dir, size_t dir_len,
+ const char *path, const char *content)
+{
+ add_path(dir, dir_len, path);
+ ensure_leading_directories(dir->buf);
+ unlink(dir->buf);
+ write_file(dir->buf, "%s", content);
+}
+
+/* Write the file contents for the left and right sides of the difftool
+ * dir-diff representation for submodules and symlinks. Symlinks and submodules
+ * are written as regular text files so that external diff tools can diff them
+ * as text files, resulting in behavior that is analogous to to what "git diff"
+ * displays for symlink and submodule diffs.
+ */
+static void write_standin_files(struct pair_entry *entry,
+ struct strbuf *ldir, size_t ldir_len,
+ struct strbuf *rdir, size_t rdir_len)
+{
+ if (*entry->left)
+ write_file_in_directory(ldir, ldir_len, entry->path, entry->left);
+ if (*entry->right)
+ write_file_in_directory(rdir, rdir_len, entry->path, entry->right);
+}
+
+static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
+ struct child_process *child)
+{
+ struct strbuf info = STRBUF_INIT, lpath = STRBUF_INIT;
+ struct strbuf rpath = STRBUF_INIT, buf = STRBUF_INIT;
+ struct strbuf ldir = STRBUF_INIT, rdir = STRBUF_INIT;
+ struct strbuf wtdir = STRBUF_INIT;
+ struct strbuf tmpdir = STRBUF_INIT;
+ char *lbase_dir = NULL, *rbase_dir = NULL;
+ size_t ldir_len, rdir_len, wtdir_len;
+ const char *workdir, *tmp;
+ int ret = 0, i;
+ FILE *fp = NULL;
+ struct hashmap working_tree_dups = HASHMAP_INIT(working_tree_entry_cmp,
+ NULL);
+ struct hashmap submodules = HASHMAP_INIT(pair_cmp, NULL);
+ struct hashmap symlinks2 = HASHMAP_INIT(pair_cmp, NULL);
+ struct hashmap_iter iter;
+ struct pair_entry *entry;
+ struct index_state wtindex;
+ struct checkout lstate, rstate;
+ int err = 0;
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ struct hashmap wt_modified, tmp_modified;
+ int indices_loaded = 0;
+
+ workdir = get_git_work_tree();
+
+ /* Setup temp directories */
+ tmp = getenv("TMPDIR");
+ strbuf_add_absolute_path(&tmpdir, tmp ? tmp : "/tmp");
+ strbuf_trim_trailing_dir_sep(&tmpdir);
+ strbuf_addstr(&tmpdir, "/git-difftool.XXXXXX");
+ if (!mkdtemp(tmpdir.buf)) {
+ ret = error("could not create '%s'", tmpdir.buf);
+ goto finish;
+ }
+ strbuf_addf(&ldir, "%s/left/", tmpdir.buf);
+ strbuf_addf(&rdir, "%s/right/", tmpdir.buf);
+ strbuf_addstr(&wtdir, workdir);
+ if (!wtdir.len || !is_dir_sep(wtdir.buf[wtdir.len - 1]))
+ strbuf_addch(&wtdir, '/');
+ mkdir(ldir.buf, 0700);
+ mkdir(rdir.buf, 0700);
+
+ memset(&wtindex, 0, sizeof(wtindex));
+
+ memset(&lstate, 0, sizeof(lstate));
+ lstate.base_dir = lbase_dir = xstrdup(ldir.buf);
+ lstate.base_dir_len = ldir.len;
+ lstate.force = 1;
+ memset(&rstate, 0, sizeof(rstate));
+ rstate.base_dir = rbase_dir = xstrdup(rdir.buf);
+ rstate.base_dir_len = rdir.len;
+ rstate.force = 1;
+
+ ldir_len = ldir.len;
+ rdir_len = rdir.len;
+ wtdir_len = wtdir.len;
+
+ child->no_stdin = 1;
+ child->git_cmd = 1;
+ child->use_shell = 0;
+ child->clean_on_exit = 1;
+ child->dir = prefix;
+ child->out = -1;
+ if (start_command(child))
+ die("could not obtain raw diff");
+ fp = xfdopen(child->out, "r");
+
+ /* Build index info for left and right sides of the diff */
+ i = 0;
+ while (!strbuf_getline_nul(&info, fp)) {
+ int lmode, rmode;
+ struct object_id loid, roid;
+ char status;
+ const char *src_path, *dst_path;
+
+ if (starts_with(info.buf, "::"))
+ die(N_("combined diff formats ('-c' and '--cc') are "
+ "not supported in\n"
+ "directory diff mode ('-d' and '--dir-diff')."));
+
+ if (parse_index_info(info.buf, &lmode, &rmode, &loid, &roid,
+ &status))
+ break;
+ if (strbuf_getline_nul(&lpath, fp))
+ break;
+ src_path = lpath.buf;
+
+ i++;
+ if (status != 'C' && status != 'R') {
+ dst_path = src_path;
+ } else {
+ if (strbuf_getline_nul(&rpath, fp))
+ break;
+ dst_path = rpath.buf;
+ }
+
+ if (S_ISGITLINK(lmode) || S_ISGITLINK(rmode)) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "Subproject commit %s",
+ oid_to_hex(&loid));
+ add_left_or_right(&submodules, src_path, buf.buf, 0);
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "Subproject commit %s",
+ oid_to_hex(&roid));
+ if (oideq(&loid, &roid))
+ strbuf_addstr(&buf, "-dirty");
+ add_left_or_right(&submodules, dst_path, buf.buf, 1);
+ continue;
+ }
+
+ if (S_ISLNK(lmode)) {
+ char *content = get_symlink(&loid, src_path);
+ add_left_or_right(&symlinks2, src_path, content, 0);
+ free(content);
+ }
+
+ if (S_ISLNK(rmode)) {
+ char *content = get_symlink(&roid, dst_path);
+ add_left_or_right(&symlinks2, dst_path, content, 1);
+ free(content);
+ }
+
+ if (lmode && status != 'C') {
+ if (checkout_path(lmode, &loid, src_path, &lstate)) {
+ ret = error("could not write '%s'", src_path);
+ goto finish;
+ }
+ }
+
+ if (rmode && !S_ISLNK(rmode)) {
+ struct working_tree_entry *entry;
+
+ /* Avoid duplicate working_tree entries */
+ FLEX_ALLOC_STR(entry, path, dst_path);
+ hashmap_entry_init(&entry->entry, strhash(dst_path));
+ if (hashmap_get(&working_tree_dups, &entry->entry,
+ NULL)) {
+ free(entry);
+ continue;
+ }
+ hashmap_add(&working_tree_dups, &entry->entry);
+
+ if (!use_wt_file(workdir, dst_path, &roid)) {
+ if (checkout_path(rmode, &roid, dst_path,
+ &rstate)) {
+ ret = error("could not write '%s'",
+ dst_path);
+ goto finish;
+ }
+ } else if (!is_null_oid(&roid)) {
+ /*
+ * Changes in the working tree need special
+ * treatment since they are not part of the
+ * index.
+ */
+ struct cache_entry *ce2 =
+ make_cache_entry(&wtindex, rmode, &roid,
+ dst_path, 0, 0);
+
+ add_index_entry(&wtindex, ce2,
+ ADD_CACHE_JUST_APPEND);
+
+ add_path(&rdir, rdir_len, dst_path);
+ if (ensure_leading_directories(rdir.buf)) {
+ ret = error("could not create "
+ "directory for '%s'",
+ dst_path);
+ goto finish;
+ }
+ add_path(&wtdir, wtdir_len, dst_path);
+ if (symlinks) {
+ if (symlink(wtdir.buf, rdir.buf)) {
+ ret = error_errno("could not symlink '%s' to '%s'", wtdir.buf, rdir.buf);
+ goto finish;
+ }
+ } else {
+ struct stat st;
+ if (stat(wtdir.buf, &st))
+ st.st_mode = 0644;
+ if (copy_file(rdir.buf, wtdir.buf,
+ st.st_mode)) {
+ ret = error("could not copy '%s' to '%s'", wtdir.buf, rdir.buf);
+ goto finish;
+ }
+ }
+ }
+ }
+ }
+
+ fclose(fp);
+ fp = NULL;
+ if (finish_command(child)) {
+ ret = error("error occurred running diff --raw");
+ goto finish;
+ }
+
+ if (!i)
+ goto finish;
+
+ /*
+ * Changes to submodules require special treatment.This loop writes a
+ * temporary file to both the left and right directories to show the
+ * change in the recorded SHA1 for the submodule.
+ */
+ hashmap_for_each_entry(&submodules, &iter, entry,
+ entry /* member name */) {
+ write_standin_files(entry, &ldir, ldir_len, &rdir, rdir_len);
+ }
+
+ /*
+ * Symbolic links require special treatment. The standard "git diff"
+ * shows only the link itself, not the contents of the link target.
+ * This loop replicates that behavior.
+ */
+ hashmap_for_each_entry(&symlinks2, &iter, entry,
+ entry /* member name */) {
+
+ write_standin_files(entry, &ldir, ldir_len, &rdir, rdir_len);
+ }
+
+ strbuf_setlen(&ldir, ldir_len);
+ strbuf_setlen(&rdir, rdir_len);
+
+ if (extcmd) {
+ strvec_push(&cmd.args, extcmd);
+ } else {
+ strvec_push(&cmd.args, "difftool--helper");
+ cmd.git_cmd = 1;
+ setenv("GIT_DIFFTOOL_DIRDIFF", "true", 1);
+ }
+ strvec_pushl(&cmd.args, ldir.buf, rdir.buf, NULL);
+ ret = run_command(&cmd);
+
+ /* TODO: audit for interaction with sparse-index. */
+ ensure_full_index(&wtindex);
+
+ /*
+ * If the diff includes working copy files and those
+ * files were modified during the diff, then the changes
+ * should be copied back to the working tree.
+ * Do not copy back files when symlinks are used and the
+ * external tool did not replace the original link with a file.
+ *
+ * These hashes are loaded lazily since they aren't needed
+ * in the common case of --symlinks and the difftool updating
+ * files through the symlink.
+ */
+ hashmap_init(&wt_modified, path_entry_cmp, NULL, wtindex.cache_nr);
+ hashmap_init(&tmp_modified, path_entry_cmp, NULL, wtindex.cache_nr);
+
+ for (i = 0; i < wtindex.cache_nr; i++) {
+ struct hashmap_entry dummy;
+ const char *name = wtindex.cache[i]->name;
+ struct stat st;
+
+ add_path(&rdir, rdir_len, name);
+ if (lstat(rdir.buf, &st))
+ continue;
+
+ if ((symlinks && S_ISLNK(st.st_mode)) || !S_ISREG(st.st_mode))
+ continue;
+
+ if (!indices_loaded) {
+ struct lock_file lock = LOCK_INIT;
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s/wtindex", tmpdir.buf);
+ if (hold_lock_file_for_update(&lock, buf.buf, 0) < 0 ||
+ write_locked_index(&wtindex, &lock, COMMIT_LOCK)) {
+ ret = error("could not write %s", buf.buf);
+ goto finish;
+ }
+ changed_files(&wt_modified, buf.buf, workdir);
+ strbuf_setlen(&rdir, rdir_len);
+ changed_files(&tmp_modified, buf.buf, rdir.buf);
+ add_path(&rdir, rdir_len, name);
+ indices_loaded = 1;
+ }
+
+ hashmap_entry_init(&dummy, strhash(name));
+ if (hashmap_get(&tmp_modified, &dummy, name)) {
+ add_path(&wtdir, wtdir_len, name);
+ if (hashmap_get(&wt_modified, &dummy, name)) {
+ warning(_("both files modified: '%s' and '%s'."),
+ wtdir.buf, rdir.buf);
+ warning(_("working tree file has been left."));
+ warning("%s", "");
+ err = 1;
+ } else if (unlink(wtdir.buf) ||
+ copy_file(wtdir.buf, rdir.buf, st.st_mode))
+ warning_errno(_("could not copy '%s' to '%s'"),
+ rdir.buf, wtdir.buf);
+ }
+ }
+
+ if (err) {
+ warning(_("temporary files exist in '%s'."), tmpdir.buf);
+ warning(_("you may want to cleanup or recover these."));
+ ret = 1;
+ } else {
+ remove_dir_recursively(&tmpdir, 0);
+ if (ret)
+ warning(_("failed: %d"), ret);
+ }
+
+finish:
+ if (fp)
+ fclose(fp);
+
+ free(lbase_dir);
+ free(rbase_dir);
+ strbuf_release(&ldir);
+ strbuf_release(&rdir);
+ strbuf_release(&wtdir);
+ strbuf_release(&buf);
+ strbuf_release(&tmpdir);
+
+ return (ret < 0) ? 1 : ret;
+}
+
+static int run_file_diff(int prompt, const char *prefix,
+ struct child_process *child)
+{
+ const char *env[] = {
+ "GIT_PAGER=", "GIT_EXTERNAL_DIFF=git-difftool--helper", NULL,
+ NULL
+ };
+
+ if (prompt > 0)
+ env[2] = "GIT_DIFFTOOL_PROMPT=true";
+ else if (!prompt)
+ env[2] = "GIT_DIFFTOOL_NO_PROMPT=true";
+
+ child->git_cmd = 1;
+ child->dir = prefix;
+ strvec_pushv(&child->env, env);
+
+ return run_command(child);
+}
+
+int cmd_difftool(int argc, const char **argv, const char *prefix)
+{
+ int use_gui_tool = 0, dir_diff = 0, prompt = -1, symlinks = 0,
+ tool_help = 0, no_index = 0;
+ static char *difftool_cmd = NULL, *extcmd = NULL;
+ struct option builtin_difftool_options[] = {
+ OPT_BOOL('g', "gui", &use_gui_tool,
+ N_("use `diff.guitool` instead of `diff.tool`")),
+ OPT_BOOL('d', "dir-diff", &dir_diff,
+ N_("perform a full-directory diff")),
+ OPT_SET_INT_F('y', "no-prompt", &prompt,
+ N_("do not prompt before launching a diff tool"),
+ 0, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "prompt", &prompt, NULL,
+ 1, PARSE_OPT_NONEG | PARSE_OPT_HIDDEN),
+ OPT_BOOL(0, "symlinks", &symlinks,
+ N_("use symlinks in dir-diff mode")),
+ OPT_STRING('t', "tool", &difftool_cmd, N_("tool"),
+ N_("use the specified diff tool")),
+ OPT_BOOL(0, "tool-help", &tool_help,
+ N_("print a list of diff tools that may be used with "
+ "`--tool`")),
+ OPT_BOOL(0, "trust-exit-code", &trust_exit_code,
+ N_("make 'git-difftool' exit when an invoked diff "
+ "tool returns a non-zero exit code")),
+ OPT_STRING('x', "extcmd", &extcmd, N_("command"),
+ N_("specify a custom command for viewing diffs")),
+ OPT_BOOL(0, "no-index", &no_index, N_("passed to `diff`")),
+ OPT_END()
+ };
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ git_config(difftool_config, NULL);
+ symlinks = has_symlinks;
+
+ argc = parse_options(argc, argv, prefix, builtin_difftool_options,
+ builtin_difftool_usage, PARSE_OPT_KEEP_UNKNOWN_OPT |
+ PARSE_OPT_KEEP_DASHDASH);
+
+ if (tool_help)
+ return print_tool_help();
+
+ if (!no_index && !startup_info->have_repository)
+ die(_("difftool requires worktree or --no-index"));
+
+ if (!no_index){
+ setup_work_tree();
+ setenv(GIT_DIR_ENVIRONMENT, absolute_path(get_git_dir()), 1);
+ setenv(GIT_WORK_TREE_ENVIRONMENT, absolute_path(get_git_work_tree()), 1);
+ } else if (dir_diff)
+ die(_("options '%s' and '%s' cannot be used together"), "--dir-diff", "--no-index");
+
+ die_for_incompatible_opt3(use_gui_tool, "--gui",
+ !!difftool_cmd, "--tool",
+ !!extcmd, "--extcmd");
+
+ if (use_gui_tool)
+ setenv("GIT_MERGETOOL_GUI", "true", 1);
+ else if (difftool_cmd) {
+ if (*difftool_cmd)
+ setenv("GIT_DIFF_TOOL", difftool_cmd, 1);
+ else
+ die(_("no <tool> given for --tool=<tool>"));
+ }
+
+ if (extcmd) {
+ if (*extcmd)
+ setenv("GIT_DIFFTOOL_EXTCMD", extcmd, 1);
+ else
+ die(_("no <cmd> given for --extcmd=<cmd>"));
+ }
+
+ setenv("GIT_DIFFTOOL_TRUST_EXIT_CODE",
+ trust_exit_code ? "true" : "false", 1);
+
+ /*
+ * In directory diff mode, 'git-difftool--helper' is called once
+ * to compare the a / b directories. In file diff mode, 'git diff'
+ * will invoke a separate instance of 'git-difftool--helper' for
+ * each file that changed.
+ */
+ strvec_push(&child.args, "diff");
+ if (no_index)
+ strvec_push(&child.args, "--no-index");
+ if (dir_diff)
+ strvec_pushl(&child.args, "--raw", "--no-abbrev", "-z", NULL);
+ strvec_pushv(&child.args, argv);
+
+ if (dir_diff)
+ return run_dir_diff(extcmd, symlinks, prefix, &child);
+ return run_file_diff(prompt, prefix, &child);
+}
diff --git a/builtin/env--helper.c b/builtin/env--helper.c
new file mode 100644
index 0000000..ea04c16
--- /dev/null
+++ b/builtin/env--helper.c
@@ -0,0 +1,100 @@
+#include "builtin.h"
+#include "config.h"
+#include "parse-options.h"
+
+static char const * const env__helper_usage[] = {
+ N_("git env--helper --type=[bool|ulong] <options> <env-var>"),
+ NULL
+};
+
+enum cmdmode {
+ ENV_HELPER_TYPE_BOOL = 1,
+ ENV_HELPER_TYPE_ULONG
+};
+
+static int option_parse_type(const struct option *opt, const char *arg,
+ int unset)
+{
+ enum cmdmode *cmdmode = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ if (!strcmp(arg, "bool"))
+ *cmdmode = ENV_HELPER_TYPE_BOOL;
+ else if (!strcmp(arg, "ulong"))
+ *cmdmode = ENV_HELPER_TYPE_ULONG;
+ else
+ die(_("unrecognized --type argument, %s"), arg);
+
+ return 0;
+}
+
+int cmd_env__helper(int argc, const char **argv, const char *prefix)
+{
+ int exit_code = 0;
+ const char *env_variable = NULL;
+ const char *env_default = NULL;
+ int ret;
+ int ret_int, default_int;
+ unsigned long ret_ulong, default_ulong;
+ enum cmdmode cmdmode = 0;
+ struct option opts[] = {
+ OPT_CALLBACK_F(0, "type", &cmdmode, N_("type"),
+ N_("value is given this type"), PARSE_OPT_NONEG,
+ option_parse_type),
+ OPT_STRING(0, "default", &env_default, N_("value"),
+ N_("default for git_env_*(...) to fall back on")),
+ OPT_BOOL(0, "exit-code", &exit_code,
+ N_("be quiet only use git_env_*() value as exit code")),
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, prefix, opts, env__helper_usage,
+ PARSE_OPT_KEEP_UNKNOWN_OPT);
+ if (env_default && !*env_default)
+ usage_with_options(env__helper_usage, opts);
+ if (!cmdmode)
+ usage_with_options(env__helper_usage, opts);
+ if (argc != 1)
+ usage_with_options(env__helper_usage, opts);
+ env_variable = argv[0];
+
+ switch (cmdmode) {
+ case ENV_HELPER_TYPE_BOOL:
+ if (env_default) {
+ default_int = git_parse_maybe_bool(env_default);
+ if (default_int == -1) {
+ error(_("option `--default' expects a boolean value with `--type=bool`, not `%s`"),
+ env_default);
+ usage_with_options(env__helper_usage, opts);
+ }
+ } else {
+ default_int = 0;
+ }
+ ret_int = git_env_bool(env_variable, default_int);
+ if (!exit_code)
+ puts(ret_int ? "true" : "false");
+ ret = ret_int;
+ break;
+ case ENV_HELPER_TYPE_ULONG:
+ if (env_default) {
+ if (!git_parse_ulong(env_default, &default_ulong)) {
+ error(_("option `--default' expects an unsigned long value with `--type=ulong`, not `%s`"),
+ env_default);
+ usage_with_options(env__helper_usage, opts);
+ }
+ } else {
+ default_ulong = 0;
+ }
+ ret_ulong = git_env_ulong(env_variable, default_ulong);
+ if (!exit_code)
+ printf("%lu\n", ret_ulong);
+ ret = ret_ulong;
+ break;
+ default:
+ BUG("unknown <type> value");
+ break;
+ }
+
+ return !ret;
+}
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
new file mode 100644
index 0000000..3b3314e
--- /dev/null
+++ b/builtin/fast-export.c
@@ -0,0 +1,1282 @@
+/*
+ * "git fast-export" builtin command
+ *
+ * Copyright (C) 2007 Johannes E. Schindelin
+ */
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "refs.h"
+#include "refspec.h"
+#include "object-store.h"
+#include "commit.h"
+#include "object.h"
+#include "tag.h"
+#include "diff.h"
+#include "diffcore.h"
+#include "log-tree.h"
+#include "revision.h"
+#include "decorate.h"
+#include "string-list.h"
+#include "utf8.h"
+#include "parse-options.h"
+#include "quote.h"
+#include "remote.h"
+#include "blob.h"
+#include "commit-slab.h"
+
+static const char *fast_export_usage[] = {
+ N_("git fast-export [<rev-list-opts>]"),
+ NULL
+};
+
+static int progress;
+static enum { SIGNED_TAG_ABORT, VERBATIM, WARN, WARN_STRIP, STRIP } signed_tag_mode = SIGNED_TAG_ABORT;
+static enum { TAG_FILTERING_ABORT, DROP, REWRITE } tag_of_filtered_mode = TAG_FILTERING_ABORT;
+static enum { REENCODE_ABORT, REENCODE_YES, REENCODE_NO } reencode_mode = REENCODE_ABORT;
+static int fake_missing_tagger;
+static int use_done_feature;
+static int no_data;
+static int full_tree;
+static int reference_excluded_commits;
+static int show_original_ids;
+static int mark_tags;
+static struct string_list extra_refs = STRING_LIST_INIT_NODUP;
+static struct string_list tag_refs = STRING_LIST_INIT_NODUP;
+static struct refspec refspecs = REFSPEC_INIT_FETCH;
+static int anonymize;
+static struct hashmap anonymized_seeds;
+static struct revision_sources revision_sources;
+
+static int parse_opt_signed_tag_mode(const struct option *opt,
+ const char *arg, int unset)
+{
+ if (unset || !strcmp(arg, "abort"))
+ signed_tag_mode = SIGNED_TAG_ABORT;
+ else if (!strcmp(arg, "verbatim") || !strcmp(arg, "ignore"))
+ signed_tag_mode = VERBATIM;
+ else if (!strcmp(arg, "warn"))
+ signed_tag_mode = WARN;
+ else if (!strcmp(arg, "warn-strip"))
+ signed_tag_mode = WARN_STRIP;
+ else if (!strcmp(arg, "strip"))
+ signed_tag_mode = STRIP;
+ else
+ return error("Unknown signed-tags mode: %s", arg);
+ return 0;
+}
+
+static int parse_opt_tag_of_filtered_mode(const struct option *opt,
+ const char *arg, int unset)
+{
+ if (unset || !strcmp(arg, "abort"))
+ tag_of_filtered_mode = TAG_FILTERING_ABORT;
+ else if (!strcmp(arg, "drop"))
+ tag_of_filtered_mode = DROP;
+ else if (!strcmp(arg, "rewrite"))
+ tag_of_filtered_mode = REWRITE;
+ else
+ return error("Unknown tag-of-filtered mode: %s", arg);
+ return 0;
+}
+
+static int parse_opt_reencode_mode(const struct option *opt,
+ const char *arg, int unset)
+{
+ if (unset) {
+ reencode_mode = REENCODE_ABORT;
+ return 0;
+ }
+
+ switch (git_parse_maybe_bool(arg)) {
+ case 0:
+ reencode_mode = REENCODE_NO;
+ break;
+ case 1:
+ reencode_mode = REENCODE_YES;
+ break;
+ default:
+ if (!strcasecmp(arg, "abort"))
+ reencode_mode = REENCODE_ABORT;
+ else
+ return error("Unknown reencoding mode: %s", arg);
+ }
+
+ return 0;
+}
+
+static struct decoration idnums;
+static uint32_t last_idnum;
+struct anonymized_entry {
+ struct hashmap_entry hash;
+ const char *anon;
+ const char orig[FLEX_ARRAY];
+};
+
+struct anonymized_entry_key {
+ struct hashmap_entry hash;
+ const char *orig;
+ size_t orig_len;
+};
+
+static int anonymized_entry_cmp(const void *cmp_data UNUSED,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
+ const void *keydata)
+{
+ const struct anonymized_entry *a, *b;
+
+ a = container_of(eptr, const struct anonymized_entry, hash);
+ if (keydata) {
+ const struct anonymized_entry_key *key = keydata;
+ int equal = !strncmp(a->orig, key->orig, key->orig_len) &&
+ !a->orig[key->orig_len];
+ return !equal;
+ }
+
+ b = container_of(entry_or_key, const struct anonymized_entry, hash);
+ return strcmp(a->orig, b->orig);
+}
+
+/*
+ * Basically keep a cache of X->Y so that we can repeatedly replace
+ * the same anonymized string with another. The actual generation
+ * is farmed out to the generate function.
+ */
+static const char *anonymize_str(struct hashmap *map,
+ char *(*generate)(void *),
+ const char *orig, size_t len,
+ void *data)
+{
+ struct anonymized_entry_key key;
+ struct anonymized_entry *ret;
+
+ if (!map->cmpfn)
+ hashmap_init(map, anonymized_entry_cmp, NULL, 0);
+
+ hashmap_entry_init(&key.hash, memhash(orig, len));
+ key.orig = orig;
+ key.orig_len = len;
+
+ /* First check if it's a token the user configured manually... */
+ if (anonymized_seeds.cmpfn)
+ ret = hashmap_get_entry(&anonymized_seeds, &key, hash, &key);
+ else
+ ret = NULL;
+
+ /* ...otherwise check if we've already seen it in this context... */
+ if (!ret)
+ ret = hashmap_get_entry(map, &key, hash, &key);
+
+ /* ...and finally generate a new mapping if necessary */
+ if (!ret) {
+ FLEX_ALLOC_MEM(ret, orig, orig, len);
+ hashmap_entry_init(&ret->hash, key.hash.hash);
+ ret->anon = generate(data);
+ hashmap_put(map, &ret->hash);
+ }
+
+ return ret->anon;
+}
+
+/*
+ * We anonymize each component of a path individually,
+ * so that paths a/b and a/c will share a common root.
+ * The paths are cached via anonymize_mem so that repeated
+ * lookups for "a" will yield the same value.
+ */
+static void anonymize_path(struct strbuf *out, const char *path,
+ struct hashmap *map,
+ char *(*generate)(void *))
+{
+ while (*path) {
+ const char *end_of_component = strchrnul(path, '/');
+ size_t len = end_of_component - path;
+ const char *c = anonymize_str(map, generate, path, len, NULL);
+ strbuf_addstr(out, c);
+ path = end_of_component;
+ if (*path)
+ strbuf_addch(out, *path++);
+ }
+}
+
+static inline void *mark_to_ptr(uint32_t mark)
+{
+ return (void *)(uintptr_t)mark;
+}
+
+static inline uint32_t ptr_to_mark(void * mark)
+{
+ return (uint32_t)(uintptr_t)mark;
+}
+
+static inline void mark_object(struct object *object, uint32_t mark)
+{
+ add_decoration(&idnums, object, mark_to_ptr(mark));
+}
+
+static inline void mark_next_object(struct object *object)
+{
+ mark_object(object, ++last_idnum);
+}
+
+static int get_object_mark(struct object *object)
+{
+ void *decoration = lookup_decoration(&idnums, object);
+ if (!decoration)
+ return 0;
+ return ptr_to_mark(decoration);
+}
+
+static struct commit *rewrite_commit(struct commit *p)
+{
+ for (;;) {
+ if (p->parents && p->parents->next)
+ break;
+ if (p->object.flags & UNINTERESTING)
+ break;
+ if (!(p->object.flags & TREESAME))
+ break;
+ if (!p->parents)
+ return NULL;
+ p = p->parents->item;
+ }
+ return p;
+}
+
+static void show_progress(void)
+{
+ static int counter = 0;
+ if (!progress)
+ return;
+ if ((++counter % progress) == 0)
+ printf("progress %d objects\n", counter);
+}
+
+/*
+ * Ideally we would want some transformation of the blob data here
+ * that is unreversible, but would still be the same size and have
+ * the same data relationship to other blobs (so that we get the same
+ * delta and packing behavior as the original). But the first and last
+ * requirements there are probably mutually exclusive, so let's take
+ * the easy way out for now, and just generate arbitrary content.
+ *
+ * There's no need to cache this result with anonymize_mem, since
+ * we already handle blob content caching with marks.
+ */
+static char *anonymize_blob(unsigned long *size)
+{
+ static int counter;
+ struct strbuf out = STRBUF_INIT;
+ strbuf_addf(&out, "anonymous blob %d", counter++);
+ *size = out.len;
+ return strbuf_detach(&out, NULL);
+}
+
+static void export_blob(const struct object_id *oid)
+{
+ unsigned long size;
+ enum object_type type;
+ char *buf;
+ struct object *object;
+ int eaten;
+
+ if (no_data)
+ return;
+
+ if (is_null_oid(oid))
+ return;
+
+ object = lookup_object(the_repository, oid);
+ if (object && object->flags & SHOWN)
+ return;
+
+ if (anonymize) {
+ buf = anonymize_blob(&size);
+ object = (struct object *)lookup_blob(the_repository, oid);
+ eaten = 0;
+ } else {
+ buf = read_object_file(oid, &type, &size);
+ if (!buf)
+ die("could not read blob %s", oid_to_hex(oid));
+ if (check_object_signature(the_repository, oid, buf, size,
+ type) < 0)
+ die("oid mismatch in blob %s", oid_to_hex(oid));
+ object = parse_object_buffer(the_repository, oid, type,
+ size, buf, &eaten);
+ }
+
+ if (!object)
+ die("Could not read blob %s", oid_to_hex(oid));
+
+ mark_next_object(object);
+
+ printf("blob\nmark :%"PRIu32"\n", last_idnum);
+ if (show_original_ids)
+ printf("original-oid %s\n", oid_to_hex(oid));
+ printf("data %"PRIuMAX"\n", (uintmax_t)size);
+ if (size && fwrite(buf, size, 1, stdout) != 1)
+ die_errno("could not write blob '%s'", oid_to_hex(oid));
+ printf("\n");
+
+ show_progress();
+
+ object->flags |= SHOWN;
+ if (!eaten)
+ free(buf);
+}
+
+static int depth_first(const void *a_, const void *b_)
+{
+ const struct diff_filepair *a = *((const struct diff_filepair **)a_);
+ const struct diff_filepair *b = *((const struct diff_filepair **)b_);
+ const char *name_a, *name_b;
+ int len_a, len_b, len;
+ int cmp;
+
+ name_a = a->one ? a->one->path : a->two->path;
+ name_b = b->one ? b->one->path : b->two->path;
+
+ len_a = strlen(name_a);
+ len_b = strlen(name_b);
+ len = (len_a < len_b) ? len_a : len_b;
+
+ /* strcmp will sort 'd' before 'd/e', we want 'd/e' before 'd' */
+ cmp = memcmp(name_a, name_b, len);
+ if (cmp)
+ return cmp;
+ cmp = len_b - len_a;
+ if (cmp)
+ return cmp;
+ /*
+ * Move 'R'ename entries last so that all references of the file
+ * appear in the output before it is renamed (e.g., when a file
+ * was copied and renamed in the same commit).
+ */
+ return (a->status == 'R') - (b->status == 'R');
+}
+
+static void print_path_1(const char *path)
+{
+ int need_quote = quote_c_style(path, NULL, NULL, 0);
+ if (need_quote)
+ quote_c_style(path, NULL, stdout, 0);
+ else if (strchr(path, ' '))
+ printf("\"%s\"", path);
+ else
+ printf("%s", path);
+}
+
+static char *anonymize_path_component(void *data)
+{
+ static int counter;
+ struct strbuf out = STRBUF_INIT;
+ strbuf_addf(&out, "path%d", counter++);
+ return strbuf_detach(&out, NULL);
+}
+
+static void print_path(const char *path)
+{
+ if (!anonymize)
+ print_path_1(path);
+ else {
+ static struct hashmap paths;
+ static struct strbuf anon = STRBUF_INIT;
+
+ anonymize_path(&anon, path, &paths, anonymize_path_component);
+ print_path_1(anon.buf);
+ strbuf_reset(&anon);
+ }
+}
+
+static char *generate_fake_oid(void *data)
+{
+ static uint32_t counter = 1; /* avoid null oid */
+ const unsigned hashsz = the_hash_algo->rawsz;
+ struct object_id oid;
+ char *hex = xmallocz(GIT_MAX_HEXSZ);
+
+ oidclr(&oid);
+ put_be32(oid.hash + hashsz - 4, counter++);
+ return oid_to_hex_r(hex, &oid);
+}
+
+static const char *anonymize_oid(const char *oid_hex)
+{
+ static struct hashmap objs;
+ size_t len = strlen(oid_hex);
+ return anonymize_str(&objs, generate_fake_oid, oid_hex, len, NULL);
+}
+
+static void show_filemodify(struct diff_queue_struct *q,
+ struct diff_options *options, void *data)
+{
+ int i;
+ struct string_list *changed = data;
+
+ /*
+ * Handle files below a directory first, in case they are all deleted
+ * and the directory changes to a file or symlink.
+ */
+ QSORT(q->queue, q->nr, depth_first);
+
+ for (i = 0; i < q->nr; i++) {
+ struct diff_filespec *ospec = q->queue[i]->one;
+ struct diff_filespec *spec = q->queue[i]->two;
+
+ switch (q->queue[i]->status) {
+ case DIFF_STATUS_DELETED:
+ printf("D ");
+ print_path(spec->path);
+ string_list_insert(changed, spec->path);
+ putchar('\n');
+ break;
+
+ case DIFF_STATUS_COPIED:
+ case DIFF_STATUS_RENAMED:
+ /*
+ * If a change in the file corresponding to ospec->path
+ * has been observed, we cannot trust its contents
+ * because the diff is calculated based on the prior
+ * contents, not the current contents. So, declare a
+ * copy or rename only if there was no change observed.
+ */
+ if (!string_list_has_string(changed, ospec->path)) {
+ printf("%c ", q->queue[i]->status);
+ print_path(ospec->path);
+ putchar(' ');
+ print_path(spec->path);
+ string_list_insert(changed, spec->path);
+ putchar('\n');
+
+ if (oideq(&ospec->oid, &spec->oid) &&
+ ospec->mode == spec->mode)
+ break;
+ }
+ /* fallthrough */
+
+ case DIFF_STATUS_TYPE_CHANGED:
+ case DIFF_STATUS_MODIFIED:
+ case DIFF_STATUS_ADDED:
+ /*
+ * Links refer to objects in another repositories;
+ * output the SHA-1 verbatim.
+ */
+ if (no_data || S_ISGITLINK(spec->mode))
+ printf("M %06o %s ", spec->mode,
+ anonymize ?
+ anonymize_oid(oid_to_hex(&spec->oid)) :
+ oid_to_hex(&spec->oid));
+ else {
+ struct object *object = lookup_object(the_repository,
+ &spec->oid);
+ printf("M %06o :%d ", spec->mode,
+ get_object_mark(object));
+ }
+ print_path(spec->path);
+ string_list_insert(changed, spec->path);
+ putchar('\n');
+ break;
+
+ default:
+ die("Unexpected comparison status '%c' for %s, %s",
+ q->queue[i]->status,
+ ospec->path ? ospec->path : "none",
+ spec->path ? spec->path : "none");
+ }
+ }
+}
+
+static const char *find_encoding(const char *begin, const char *end)
+{
+ const char *needle = "\nencoding ";
+ char *bol, *eol;
+
+ bol = memmem(begin, end ? end - begin : strlen(begin),
+ needle, strlen(needle));
+ if (!bol)
+ return NULL;
+ bol += strlen(needle);
+ eol = strchrnul(bol, '\n');
+ *eol = '\0';
+ return bol;
+}
+
+static char *anonymize_ref_component(void *data)
+{
+ static int counter;
+ struct strbuf out = STRBUF_INIT;
+ strbuf_addf(&out, "ref%d", counter++);
+ return strbuf_detach(&out, NULL);
+}
+
+static const char *anonymize_refname(const char *refname)
+{
+ /*
+ * If any of these prefixes is found, we will leave it intact
+ * so that tags remain tags and so forth.
+ */
+ static const char *prefixes[] = {
+ "refs/heads/",
+ "refs/tags/",
+ "refs/remotes/",
+ "refs/"
+ };
+ static struct hashmap refs;
+ static struct strbuf anon = STRBUF_INIT;
+ int i;
+
+ strbuf_reset(&anon);
+ for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
+ if (skip_prefix(refname, prefixes[i], &refname)) {
+ strbuf_addstr(&anon, prefixes[i]);
+ break;
+ }
+ }
+
+ anonymize_path(&anon, refname, &refs, anonymize_ref_component);
+ return anon.buf;
+}
+
+/*
+ * We do not even bother to cache commit messages, as they are unlikely
+ * to be repeated verbatim, and it is not that interesting when they are.
+ */
+static char *anonymize_commit_message(const char *old)
+{
+ static int counter;
+ return xstrfmt("subject %d\n\nbody\n", counter++);
+}
+
+static char *anonymize_ident(void *data)
+{
+ static int counter;
+ struct strbuf out = STRBUF_INIT;
+ strbuf_addf(&out, "User %d <user%d@example.com>", counter, counter);
+ counter++;
+ return strbuf_detach(&out, NULL);
+}
+
+/*
+ * Our strategy here is to anonymize the names and email addresses,
+ * but keep timestamps intact, as they influence things like traversal
+ * order (and by themselves should not be too revealing).
+ */
+static void anonymize_ident_line(const char **beg, const char **end)
+{
+ static struct hashmap idents;
+ static struct strbuf buffers[] = { STRBUF_INIT, STRBUF_INIT };
+ static unsigned which_buffer;
+
+ struct strbuf *out;
+ struct ident_split split;
+ const char *end_of_header;
+
+ out = &buffers[which_buffer++];
+ which_buffer %= ARRAY_SIZE(buffers);
+ strbuf_reset(out);
+
+ /* skip "committer", "author", "tagger", etc */
+ end_of_header = strchr(*beg, ' ');
+ if (!end_of_header)
+ BUG("malformed line fed to anonymize_ident_line: %.*s",
+ (int)(*end - *beg), *beg);
+ end_of_header++;
+ strbuf_add(out, *beg, end_of_header - *beg);
+
+ if (!split_ident_line(&split, end_of_header, *end - end_of_header) &&
+ split.date_begin) {
+ const char *ident;
+ size_t len;
+
+ len = split.mail_end - split.name_begin;
+ ident = anonymize_str(&idents, anonymize_ident,
+ split.name_begin, len, NULL);
+ strbuf_addstr(out, ident);
+ strbuf_addch(out, ' ');
+ strbuf_add(out, split.date_begin, split.tz_end - split.date_begin);
+ } else {
+ strbuf_addstr(out, "Malformed Ident <malformed@example.com> 0 -0000");
+ }
+
+ *beg = out->buf;
+ *end = out->buf + out->len;
+}
+
+static void handle_commit(struct commit *commit, struct rev_info *rev,
+ struct string_list *paths_of_changed_objects)
+{
+ int saved_output_format = rev->diffopt.output_format;
+ const char *commit_buffer;
+ const char *author, *author_end, *committer, *committer_end;
+ const char *encoding, *message;
+ char *reencoded = NULL;
+ struct commit_list *p;
+ const char *refname;
+ int i;
+
+ rev->diffopt.output_format = DIFF_FORMAT_CALLBACK;
+
+ parse_commit_or_die(commit);
+ commit_buffer = get_commit_buffer(commit, NULL);
+ author = strstr(commit_buffer, "\nauthor ");
+ if (!author)
+ die("could not find author in commit %s",
+ oid_to_hex(&commit->object.oid));
+ author++;
+ author_end = strchrnul(author, '\n');
+ committer = strstr(author_end, "\ncommitter ");
+ if (!committer)
+ die("could not find committer in commit %s",
+ oid_to_hex(&commit->object.oid));
+ committer++;
+ committer_end = strchrnul(committer, '\n');
+ message = strstr(committer_end, "\n\n");
+ encoding = find_encoding(committer_end, message);
+ if (message)
+ message += 2;
+
+ if (commit->parents &&
+ (get_object_mark(&commit->parents->item->object) != 0 ||
+ reference_excluded_commits) &&
+ !full_tree) {
+ parse_commit_or_die(commit->parents->item);
+ diff_tree_oid(get_commit_tree_oid(commit->parents->item),
+ get_commit_tree_oid(commit), "", &rev->diffopt);
+ }
+ else
+ diff_root_tree_oid(get_commit_tree_oid(commit),
+ "", &rev->diffopt);
+
+ /* Export the referenced blobs, and remember the marks. */
+ for (i = 0; i < diff_queued_diff.nr; i++)
+ if (!S_ISGITLINK(diff_queued_diff.queue[i]->two->mode))
+ export_blob(&diff_queued_diff.queue[i]->two->oid);
+
+ refname = *revision_sources_at(&revision_sources, commit);
+ /*
+ * FIXME: string_list_remove() below for each ref is overall
+ * O(N^2). Compared to a history walk and diffing trees, this is
+ * just lost in the noise in practice. However, theoretically a
+ * repo may have enough refs for this to become slow.
+ */
+ string_list_remove(&extra_refs, refname, 0);
+ if (anonymize) {
+ refname = anonymize_refname(refname);
+ anonymize_ident_line(&committer, &committer_end);
+ anonymize_ident_line(&author, &author_end);
+ }
+
+ mark_next_object(&commit->object);
+ if (anonymize) {
+ reencoded = anonymize_commit_message(message);
+ } else if (encoding) {
+ switch(reencode_mode) {
+ case REENCODE_YES:
+ reencoded = reencode_string(message, "UTF-8", encoding);
+ break;
+ case REENCODE_NO:
+ break;
+ case REENCODE_ABORT:
+ die("Encountered commit-specific encoding %s in commit "
+ "%s; use --reencode=[yes|no] to handle it",
+ encoding, oid_to_hex(&commit->object.oid));
+ }
+ }
+ if (!commit->parents)
+ printf("reset %s\n", refname);
+ printf("commit %s\nmark :%"PRIu32"\n", refname, last_idnum);
+ if (show_original_ids)
+ printf("original-oid %s\n", oid_to_hex(&commit->object.oid));
+ printf("%.*s\n%.*s\n",
+ (int)(author_end - author), author,
+ (int)(committer_end - committer), committer);
+ if (!reencoded && encoding)
+ printf("encoding %s\n", encoding);
+ printf("data %u\n%s",
+ (unsigned)(reencoded
+ ? strlen(reencoded) : message
+ ? strlen(message) : 0),
+ reencoded ? reencoded : message ? message : "");
+ free(reencoded);
+ unuse_commit_buffer(commit, commit_buffer);
+
+ for (i = 0, p = commit->parents; p; p = p->next) {
+ struct object *obj = &p->item->object;
+ int mark = get_object_mark(obj);
+
+ if (!mark && !reference_excluded_commits)
+ continue;
+ if (i == 0)
+ printf("from ");
+ else
+ printf("merge ");
+ if (mark)
+ printf(":%d\n", mark);
+ else
+ printf("%s\n",
+ anonymize ?
+ anonymize_oid(oid_to_hex(&obj->oid)) :
+ oid_to_hex(&obj->oid));
+ i++;
+ }
+
+ if (full_tree)
+ printf("deleteall\n");
+ log_tree_diff_flush(rev);
+ string_list_clear(paths_of_changed_objects, 0);
+ rev->diffopt.output_format = saved_output_format;
+
+ printf("\n");
+
+ show_progress();
+}
+
+static char *anonymize_tag(void *data)
+{
+ static int counter;
+ struct strbuf out = STRBUF_INIT;
+ strbuf_addf(&out, "tag message %d", counter++);
+ return strbuf_detach(&out, NULL);
+}
+
+
+static void handle_tag(const char *name, struct tag *tag)
+{
+ unsigned long size;
+ enum object_type type;
+ char *buf;
+ const char *tagger, *tagger_end, *message;
+ size_t message_size = 0;
+ struct object *tagged;
+ int tagged_mark;
+ struct commit *p;
+
+ /* Trees have no identifier in fast-export output, thus we have no way
+ * to output tags of trees, tags of tags of trees, etc. Simply omit
+ * such tags.
+ */
+ tagged = tag->tagged;
+ while (tagged->type == OBJ_TAG) {
+ tagged = ((struct tag *)tagged)->tagged;
+ }
+ if (tagged->type == OBJ_TREE) {
+ warning("Omitting tag %s,\nsince tags of trees (or tags of tags of trees, etc.) are not supported.",
+ oid_to_hex(&tag->object.oid));
+ return;
+ }
+
+ buf = read_object_file(&tag->object.oid, &type, &size);
+ if (!buf)
+ die("could not read tag %s", oid_to_hex(&tag->object.oid));
+ message = memmem(buf, size, "\n\n", 2);
+ if (message) {
+ message += 2;
+ message_size = strlen(message);
+ }
+ tagger = memmem(buf, message ? message - buf : size, "\ntagger ", 8);
+ if (!tagger) {
+ if (fake_missing_tagger)
+ tagger = "tagger Unspecified Tagger "
+ "<unspecified-tagger> 0 +0000";
+ else
+ tagger = "";
+ tagger_end = tagger + strlen(tagger);
+ } else {
+ tagger++;
+ tagger_end = strchrnul(tagger, '\n');
+ if (anonymize)
+ anonymize_ident_line(&tagger, &tagger_end);
+ }
+
+ if (anonymize) {
+ name = anonymize_refname(name);
+ if (message) {
+ static struct hashmap tags;
+ message = anonymize_str(&tags, anonymize_tag,
+ message, message_size, NULL);
+ message_size = strlen(message);
+ }
+ }
+
+ /* handle signed tags */
+ if (message) {
+ const char *signature = strstr(message,
+ "\n-----BEGIN PGP SIGNATURE-----\n");
+ if (signature)
+ switch(signed_tag_mode) {
+ case SIGNED_TAG_ABORT:
+ die("encountered signed tag %s; use "
+ "--signed-tags=<mode> to handle it",
+ oid_to_hex(&tag->object.oid));
+ case WARN:
+ warning("exporting signed tag %s",
+ oid_to_hex(&tag->object.oid));
+ /* fallthru */
+ case VERBATIM:
+ break;
+ case WARN_STRIP:
+ warning("stripping signature from tag %s",
+ oid_to_hex(&tag->object.oid));
+ /* fallthru */
+ case STRIP:
+ message_size = signature + 1 - message;
+ break;
+ }
+ }
+
+ /* handle tag->tagged having been filtered out due to paths specified */
+ tagged = tag->tagged;
+ tagged_mark = get_object_mark(tagged);
+ if (!tagged_mark) {
+ switch(tag_of_filtered_mode) {
+ case TAG_FILTERING_ABORT:
+ die("tag %s tags unexported object; use "
+ "--tag-of-filtered-object=<mode> to handle it",
+ oid_to_hex(&tag->object.oid));
+ case DROP:
+ /* Ignore this tag altogether */
+ free(buf);
+ return;
+ case REWRITE:
+ if (tagged->type == OBJ_TAG && !mark_tags) {
+ die(_("Error: Cannot export nested tags unless --mark-tags is specified."));
+ } else if (tagged->type == OBJ_COMMIT) {
+ p = rewrite_commit((struct commit *)tagged);
+ if (!p) {
+ printf("reset %s\nfrom %s\n\n",
+ name, oid_to_hex(null_oid()));
+ free(buf);
+ return;
+ }
+ tagged_mark = get_object_mark(&p->object);
+ } else {
+ /* tagged->type is either OBJ_BLOB or OBJ_TAG */
+ tagged_mark = get_object_mark(tagged);
+ }
+ }
+ }
+
+ if (tagged->type == OBJ_TAG) {
+ printf("reset %s\nfrom %s\n\n",
+ name, oid_to_hex(null_oid()));
+ }
+ skip_prefix(name, "refs/tags/", &name);
+ printf("tag %s\n", name);
+ if (mark_tags) {
+ mark_next_object(&tag->object);
+ printf("mark :%"PRIu32"\n", last_idnum);
+ }
+ if (tagged_mark)
+ printf("from :%d\n", tagged_mark);
+ else
+ printf("from %s\n", oid_to_hex(&tagged->oid));
+
+ if (show_original_ids)
+ printf("original-oid %s\n", oid_to_hex(&tag->object.oid));
+ printf("%.*s%sdata %d\n%.*s\n",
+ (int)(tagger_end - tagger), tagger,
+ tagger == tagger_end ? "" : "\n",
+ (int)message_size, (int)message_size, message ? message : "");
+ free(buf);
+}
+
+static struct commit *get_commit(struct rev_cmdline_entry *e, char *full_name)
+{
+ switch (e->item->type) {
+ case OBJ_COMMIT:
+ return (struct commit *)e->item;
+ case OBJ_TAG: {
+ struct tag *tag = (struct tag *)e->item;
+
+ /* handle nested tags */
+ while (tag && tag->object.type == OBJ_TAG) {
+ parse_object(the_repository, &tag->object.oid);
+ string_list_append(&tag_refs, full_name)->util = tag;
+ tag = (struct tag *)tag->tagged;
+ }
+ if (!tag)
+ die("Tag %s points nowhere?", e->name);
+ return (struct commit *)tag;
+ }
+ default:
+ return NULL;
+ }
+}
+
+static void get_tags_and_duplicates(struct rev_cmdline_info *info)
+{
+ int i;
+
+ for (i = 0; i < info->nr; i++) {
+ struct rev_cmdline_entry *e = info->rev + i;
+ struct object_id oid;
+ struct commit *commit;
+ char *full_name;
+
+ if (e->flags & UNINTERESTING)
+ continue;
+
+ if (dwim_ref(e->name, strlen(e->name), &oid, &full_name, 0) != 1)
+ continue;
+
+ if (refspecs.nr) {
+ char *private;
+ private = apply_refspecs(&refspecs, full_name);
+ if (private) {
+ free(full_name);
+ full_name = private;
+ }
+ }
+
+ commit = get_commit(e, full_name);
+ if (!commit) {
+ warning("%s: Unexpected object of type %s, skipping.",
+ e->name,
+ type_name(e->item->type));
+ continue;
+ }
+
+ switch(commit->object.type) {
+ case OBJ_COMMIT:
+ break;
+ case OBJ_BLOB:
+ export_blob(&commit->object.oid);
+ continue;
+ default: /* OBJ_TAG (nested tags) is already handled */
+ warning("Tag points to object of unexpected type %s, skipping.",
+ type_name(commit->object.type));
+ continue;
+ }
+
+ /*
+ * Make sure this ref gets properly updated eventually, whether
+ * through a commit or manually at the end.
+ */
+ if (e->item->type != OBJ_TAG)
+ string_list_append(&extra_refs, full_name)->util = commit;
+
+ if (!*revision_sources_at(&revision_sources, commit))
+ *revision_sources_at(&revision_sources, commit) = full_name;
+ }
+
+ string_list_sort(&extra_refs);
+ string_list_remove_duplicates(&extra_refs, 0);
+}
+
+static void handle_tags_and_duplicates(struct string_list *extras)
+{
+ struct commit *commit;
+ int i;
+
+ for (i = extras->nr - 1; i >= 0; i--) {
+ const char *name = extras->items[i].string;
+ struct object *object = extras->items[i].util;
+ int mark;
+
+ switch (object->type) {
+ case OBJ_TAG:
+ handle_tag(name, (struct tag *)object);
+ break;
+ case OBJ_COMMIT:
+ if (anonymize)
+ name = anonymize_refname(name);
+ /* create refs pointing to already seen commits */
+ commit = rewrite_commit((struct commit *)object);
+ if (!commit) {
+ /*
+ * Neither this object nor any of its
+ * ancestors touch any relevant paths, so
+ * it has been filtered to nothing. Delete
+ * it.
+ */
+ printf("reset %s\nfrom %s\n\n",
+ name, oid_to_hex(null_oid()));
+ continue;
+ }
+
+ mark = get_object_mark(&commit->object);
+ if (!mark) {
+ /*
+ * Getting here means we have a commit which
+ * was excluded by a negative refspec (e.g.
+ * fast-export ^HEAD HEAD). If we are
+ * referencing excluded commits, set the ref
+ * to the exact commit. Otherwise, the user
+ * wants the branch exported but every commit
+ * in its history to be deleted, which basically
+ * just means deletion of the ref.
+ */
+ if (!reference_excluded_commits) {
+ /* delete the ref */
+ printf("reset %s\nfrom %s\n\n",
+ name, oid_to_hex(null_oid()));
+ continue;
+ }
+ /* set ref to commit using oid, not mark */
+ printf("reset %s\nfrom %s\n\n", name,
+ oid_to_hex(&commit->object.oid));
+ continue;
+ }
+
+ printf("reset %s\nfrom :%d\n\n", name, mark
+ );
+ show_progress();
+ break;
+ }
+ }
+}
+
+static void export_marks(char *file)
+{
+ unsigned int i;
+ uint32_t mark;
+ struct decoration_entry *deco = idnums.entries;
+ FILE *f;
+ int e = 0;
+
+ f = fopen_for_writing(file);
+ if (!f)
+ die_errno("Unable to open marks file %s for writing.", file);
+
+ for (i = 0; i < idnums.size; i++) {
+ if (deco->base && deco->base->type == 1) {
+ mark = ptr_to_mark(deco->decoration);
+ if (fprintf(f, ":%"PRIu32" %s\n", mark,
+ oid_to_hex(&deco->base->oid)) < 0) {
+ e = 1;
+ break;
+ }
+ }
+ deco++;
+ }
+
+ e |= ferror(f);
+ e |= fclose(f);
+ if (e)
+ error("Unable to write marks file %s.", file);
+}
+
+static void import_marks(char *input_file, int check_exists)
+{
+ char line[512];
+ FILE *f;
+ struct stat sb;
+
+ if (check_exists && stat(input_file, &sb))
+ return;
+
+ f = xfopen(input_file, "r");
+ while (fgets(line, sizeof(line), f)) {
+ uint32_t mark;
+ char *line_end, *mark_end;
+ struct object_id oid;
+ struct object *object;
+ struct commit *commit;
+ enum object_type type;
+
+ line_end = strchr(line, '\n');
+ if (line[0] != ':' || !line_end)
+ die("corrupt mark line: %s", line);
+ *line_end = '\0';
+
+ mark = strtoumax(line + 1, &mark_end, 10);
+ if (!mark || mark_end == line + 1
+ || *mark_end != ' ' || get_oid_hex(mark_end + 1, &oid))
+ die("corrupt mark line: %s", line);
+
+ if (last_idnum < mark)
+ last_idnum = mark;
+
+ type = oid_object_info(the_repository, &oid, NULL);
+ if (type < 0)
+ die("object not found: %s", oid_to_hex(&oid));
+
+ if (type != OBJ_COMMIT)
+ /* only commits */
+ continue;
+
+ commit = lookup_commit(the_repository, &oid);
+ if (!commit)
+ die("not a commit? can't happen: %s", oid_to_hex(&oid));
+
+ object = &commit->object;
+
+ if (object->flags & SHOWN)
+ error("Object %s already has a mark", oid_to_hex(&oid));
+
+ mark_object(object, mark);
+
+ object->flags |= SHOWN;
+ }
+ fclose(f);
+}
+
+static void handle_deletes(void)
+{
+ int i;
+ for (i = 0; i < refspecs.nr; i++) {
+ struct refspec_item *refspec = &refspecs.items[i];
+ if (*refspec->src)
+ continue;
+
+ printf("reset %s\nfrom %s\n\n",
+ refspec->dst, oid_to_hex(null_oid()));
+ }
+}
+
+static char *anonymize_seed(void *data)
+{
+ return xstrdup(data);
+}
+
+static int parse_opt_anonymize_map(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct hashmap *map = opt->value;
+ const char *delim, *value;
+ size_t keylen;
+
+ BUG_ON_OPT_NEG(unset);
+
+ delim = strchr(arg, ':');
+ if (delim) {
+ keylen = delim - arg;
+ value = delim + 1;
+ } else {
+ keylen = strlen(arg);
+ value = arg;
+ }
+
+ if (!keylen || !*value)
+ return error(_("--anonymize-map token cannot be empty"));
+
+ anonymize_str(map, anonymize_seed, arg, keylen, (void *)value);
+
+ return 0;
+}
+
+int cmd_fast_export(int argc, const char **argv, const char *prefix)
+{
+ struct rev_info revs;
+ struct commit *commit;
+ char *export_filename = NULL,
+ *import_filename = NULL,
+ *import_filename_if_exists = NULL;
+ uint32_t lastimportid;
+ struct string_list refspecs_list = STRING_LIST_INIT_NODUP;
+ struct string_list paths_of_changed_objects = STRING_LIST_INIT_DUP;
+ struct option options[] = {
+ OPT_INTEGER(0, "progress", &progress,
+ N_("show progress after <n> objects")),
+ OPT_CALLBACK(0, "signed-tags", &signed_tag_mode, N_("mode"),
+ N_("select handling of signed tags"),
+ parse_opt_signed_tag_mode),
+ OPT_CALLBACK(0, "tag-of-filtered-object", &tag_of_filtered_mode, N_("mode"),
+ N_("select handling of tags that tag filtered objects"),
+ parse_opt_tag_of_filtered_mode),
+ OPT_CALLBACK(0, "reencode", &reencode_mode, N_("mode"),
+ N_("select handling of commit messages in an alternate encoding"),
+ parse_opt_reencode_mode),
+ OPT_STRING(0, "export-marks", &export_filename, N_("file"),
+ N_("dump marks to this file")),
+ OPT_STRING(0, "import-marks", &import_filename, N_("file"),
+ N_("import marks from this file")),
+ OPT_STRING(0, "import-marks-if-exists",
+ &import_filename_if_exists,
+ N_("file"),
+ N_("import marks from this file if it exists")),
+ OPT_BOOL(0, "fake-missing-tagger", &fake_missing_tagger,
+ N_("fake a tagger when tags lack one")),
+ OPT_BOOL(0, "full-tree", &full_tree,
+ N_("output full tree for each commit")),
+ OPT_BOOL(0, "use-done-feature", &use_done_feature,
+ N_("use the done feature to terminate the stream")),
+ OPT_BOOL(0, "no-data", &no_data, N_("skip output of blob data")),
+ OPT_STRING_LIST(0, "refspec", &refspecs_list, N_("refspec"),
+ N_("apply refspec to exported refs")),
+ OPT_BOOL(0, "anonymize", &anonymize, N_("anonymize output")),
+ OPT_CALLBACK_F(0, "anonymize-map", &anonymized_seeds, N_("from:to"),
+ N_("convert <from> to <to> in anonymized output"),
+ PARSE_OPT_NONEG, parse_opt_anonymize_map),
+ OPT_BOOL(0, "reference-excluded-parents",
+ &reference_excluded_commits, N_("reference parents which are not in fast-export stream by object id")),
+ OPT_BOOL(0, "show-original-ids", &show_original_ids,
+ N_("show original object ids of blobs/commits")),
+ OPT_BOOL(0, "mark-tags", &mark_tags,
+ N_("label tags with mark ids")),
+
+ OPT_END()
+ };
+
+ if (argc == 1)
+ usage_with_options (fast_export_usage, options);
+
+ /* we handle encodings */
+ git_config(git_default_config, NULL);
+
+ repo_init_revisions(the_repository, &revs, prefix);
+ init_revision_sources(&revision_sources);
+ revs.topo_order = 1;
+ revs.sources = &revision_sources;
+ revs.rewrite_parents = 1;
+ argc = parse_options(argc, argv, prefix, options, fast_export_usage,
+ PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT);
+ argc = setup_revisions(argc, argv, &revs, NULL);
+ if (argc > 1)
+ usage_with_options (fast_export_usage, options);
+
+ if (anonymized_seeds.cmpfn && !anonymize)
+ die(_("the option '%s' requires '%s'"), "--anonymize-map", "--anonymize");
+
+ if (refspecs_list.nr) {
+ int i;
+
+ for (i = 0; i < refspecs_list.nr; i++)
+ refspec_append(&refspecs, refspecs_list.items[i].string);
+
+ string_list_clear(&refspecs_list, 1);
+ }
+
+ if (use_done_feature)
+ printf("feature done\n");
+
+ if (import_filename && import_filename_if_exists)
+ die(_("options '%s' and '%s' cannot be used together"), "--import-marks", "--import-marks-if-exists");
+ if (import_filename)
+ import_marks(import_filename, 0);
+ else if (import_filename_if_exists)
+ import_marks(import_filename_if_exists, 1);
+ lastimportid = last_idnum;
+
+ if (import_filename && revs.prune_data.nr)
+ full_tree = 1;
+
+ get_tags_and_duplicates(&revs.cmdline);
+
+ if (prepare_revision_walk(&revs))
+ die("revision walk setup failed");
+
+ revs.reverse = 1;
+ revs.diffopt.format_callback = show_filemodify;
+ revs.diffopt.format_callback_data = &paths_of_changed_objects;
+ revs.diffopt.flags.recursive = 1;
+ revs.diffopt.no_free = 1;
+ while ((commit = get_revision(&revs)))
+ handle_commit(commit, &revs, &paths_of_changed_objects);
+
+ handle_tags_and_duplicates(&extra_refs);
+ handle_tags_and_duplicates(&tag_refs);
+ handle_deletes();
+
+ if (export_filename && lastimportid != last_idnum)
+ export_marks(export_filename);
+
+ if (use_done_feature)
+ printf("done\n");
+
+ refspec_clear(&refspecs);
+ release_revisions(&revs);
+
+ return 0;
+}
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
new file mode 100644
index 0000000..7134683
--- /dev/null
+++ b/builtin/fast-import.c
@@ -0,0 +1,3645 @@
+#include "builtin.h"
+#include "cache.h"
+#include "repository.h"
+#include "config.h"
+#include "lockfile.h"
+#include "object.h"
+#include "blob.h"
+#include "tree.h"
+#include "commit.h"
+#include "delta.h"
+#include "pack.h"
+#include "refs.h"
+#include "csum-file.h"
+#include "quote.h"
+#include "dir.h"
+#include "run-command.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "mem-pool.h"
+#include "commit-reach.h"
+#include "khash.h"
+#include "date.h"
+
+#define PACK_ID_BITS 16
+#define MAX_PACK_ID ((1<<PACK_ID_BITS)-1)
+#define DEPTH_BITS 13
+#define MAX_DEPTH ((1<<DEPTH_BITS)-1)
+
+/*
+ * We abuse the setuid bit on directories to mean "do not delta".
+ */
+#define NO_DELTA S_ISUID
+
+/*
+ * The amount of additional space required in order to write an object into the
+ * current pack. This is the hash lengths at the end of the pack, plus the
+ * length of one object ID.
+ */
+#define PACK_SIZE_THRESHOLD (the_hash_algo->rawsz * 3)
+
+struct object_entry {
+ struct pack_idx_entry idx;
+ struct hashmap_entry ent;
+ uint32_t type : TYPE_BITS,
+ pack_id : PACK_ID_BITS,
+ depth : DEPTH_BITS;
+};
+
+static int object_entry_hashcmp(const void *map_data UNUSED,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
+ const void *keydata)
+{
+ const struct object_id *oid = keydata;
+ const struct object_entry *e1, *e2;
+
+ e1 = container_of(eptr, const struct object_entry, ent);
+ if (oid)
+ return oidcmp(&e1->idx.oid, oid);
+
+ e2 = container_of(entry_or_key, const struct object_entry, ent);
+ return oidcmp(&e1->idx.oid, &e2->idx.oid);
+}
+
+struct object_entry_pool {
+ struct object_entry_pool *next_pool;
+ struct object_entry *next_free;
+ struct object_entry *end;
+ struct object_entry entries[FLEX_ARRAY]; /* more */
+};
+
+struct mark_set {
+ union {
+ struct object_id *oids[1024];
+ struct object_entry *marked[1024];
+ struct mark_set *sets[1024];
+ } data;
+ unsigned int shift;
+};
+
+struct last_object {
+ struct strbuf data;
+ off_t offset;
+ unsigned int depth;
+ unsigned no_swap : 1;
+};
+
+struct atom_str {
+ struct atom_str *next_atom;
+ unsigned short str_len;
+ char str_dat[FLEX_ARRAY]; /* more */
+};
+
+struct tree_content;
+struct tree_entry {
+ struct tree_content *tree;
+ struct atom_str *name;
+ struct tree_entry_ms {
+ uint16_t mode;
+ struct object_id oid;
+ } versions[2];
+};
+
+struct tree_content {
+ unsigned int entry_capacity; /* must match avail_tree_content */
+ unsigned int entry_count;
+ unsigned int delta_depth;
+ struct tree_entry *entries[FLEX_ARRAY]; /* more */
+};
+
+struct avail_tree_content {
+ unsigned int entry_capacity; /* must match tree_content */
+ struct avail_tree_content *next_avail;
+};
+
+struct branch {
+ struct branch *table_next_branch;
+ struct branch *active_next_branch;
+ const char *name;
+ struct tree_entry branch_tree;
+ uintmax_t last_commit;
+ uintmax_t num_notes;
+ unsigned active : 1;
+ unsigned delete : 1;
+ unsigned pack_id : PACK_ID_BITS;
+ struct object_id oid;
+};
+
+struct tag {
+ struct tag *next_tag;
+ const char *name;
+ unsigned int pack_id;
+ struct object_id oid;
+};
+
+struct hash_list {
+ struct hash_list *next;
+ struct object_id oid;
+};
+
+typedef enum {
+ WHENSPEC_RAW = 1,
+ WHENSPEC_RAW_PERMISSIVE,
+ WHENSPEC_RFC2822,
+ WHENSPEC_NOW
+} whenspec_type;
+
+struct recent_command {
+ struct recent_command *prev;
+ struct recent_command *next;
+ char *buf;
+};
+
+typedef void (*mark_set_inserter_t)(struct mark_set **s, struct object_id *oid, uintmax_t mark);
+typedef void (*each_mark_fn_t)(uintmax_t mark, void *obj, void *cbp);
+
+/* Configured limits on output */
+static unsigned long max_depth = 50;
+static off_t max_packsize;
+static int unpack_limit = 100;
+static int force_update;
+
+/* Stats and misc. counters */
+static uintmax_t alloc_count;
+static uintmax_t marks_set_count;
+static uintmax_t object_count_by_type[1 << TYPE_BITS];
+static uintmax_t duplicate_count_by_type[1 << TYPE_BITS];
+static uintmax_t delta_count_by_type[1 << TYPE_BITS];
+static uintmax_t delta_count_attempts_by_type[1 << TYPE_BITS];
+static unsigned long object_count;
+static unsigned long branch_count;
+static unsigned long branch_load_count;
+static int failure;
+static FILE *pack_edges;
+static unsigned int show_stats = 1;
+static int global_argc;
+static const char **global_argv;
+
+/* Memory pools */
+static struct mem_pool fi_mem_pool = {
+ .block_alloc = 2*1024*1024 - sizeof(struct mp_block),
+};
+
+/* Atom management */
+static unsigned int atom_table_sz = 4451;
+static unsigned int atom_cnt;
+static struct atom_str **atom_table;
+
+/* The .pack file being generated */
+static struct pack_idx_option pack_idx_opts;
+static unsigned int pack_id;
+static struct hashfile *pack_file;
+static struct packed_git *pack_data;
+static struct packed_git **all_packs;
+static off_t pack_size;
+
+/* Table of objects we've written. */
+static unsigned int object_entry_alloc = 5000;
+static struct object_entry_pool *blocks;
+static struct hashmap object_table;
+static struct mark_set *marks;
+static const char *export_marks_file;
+static const char *import_marks_file;
+static int import_marks_file_from_stream;
+static int import_marks_file_ignore_missing;
+static int import_marks_file_done;
+static int relative_marks_paths;
+
+/* Our last blob */
+static struct last_object last_blob = {
+ .data = STRBUF_INIT,
+ };
+
+/* Tree management */
+static unsigned int tree_entry_alloc = 1000;
+static void *avail_tree_entry;
+static unsigned int avail_tree_table_sz = 100;
+static struct avail_tree_content **avail_tree_table;
+static size_t tree_entry_allocd;
+static struct strbuf old_tree = STRBUF_INIT;
+static struct strbuf new_tree = STRBUF_INIT;
+
+/* Branch data */
+static unsigned long max_active_branches = 5;
+static unsigned long cur_active_branches;
+static unsigned long branch_table_sz = 1039;
+static struct branch **branch_table;
+static struct branch *active_branches;
+
+/* Tag data */
+static struct tag *first_tag;
+static struct tag *last_tag;
+
+/* Input stream parsing */
+static whenspec_type whenspec = WHENSPEC_RAW;
+static struct strbuf command_buf = STRBUF_INIT;
+static int unread_command_buf;
+static struct recent_command cmd_hist = {
+ .prev = &cmd_hist,
+ .next = &cmd_hist,
+};
+static struct recent_command *cmd_tail = &cmd_hist;
+static struct recent_command *rc_free;
+static unsigned int cmd_save = 100;
+static uintmax_t next_mark;
+static struct strbuf new_data = STRBUF_INIT;
+static int seen_data_command;
+static int require_explicit_termination;
+static int allow_unsafe_features;
+
+/* Signal handling */
+static volatile sig_atomic_t checkpoint_requested;
+
+/* Submodule marks */
+static struct string_list sub_marks_from = STRING_LIST_INIT_DUP;
+static struct string_list sub_marks_to = STRING_LIST_INIT_DUP;
+static kh_oid_map_t *sub_oid_map;
+
+/* Where to write output of cat-blob commands */
+static int cat_blob_fd = STDOUT_FILENO;
+
+static void parse_argv(void);
+static void parse_get_mark(const char *p);
+static void parse_cat_blob(const char *p);
+static void parse_ls(const char *p, struct branch *b);
+
+static void for_each_mark(struct mark_set *m, uintmax_t base, each_mark_fn_t callback, void *p)
+{
+ uintmax_t k;
+ if (m->shift) {
+ for (k = 0; k < 1024; k++) {
+ if (m->data.sets[k])
+ for_each_mark(m->data.sets[k], base + (k << m->shift), callback, p);
+ }
+ } else {
+ for (k = 0; k < 1024; k++) {
+ if (m->data.marked[k])
+ callback(base + k, m->data.marked[k], p);
+ }
+ }
+}
+
+static void dump_marks_fn(uintmax_t mark, void *object, void *cbp) {
+ struct object_entry *e = object;
+ FILE *f = cbp;
+
+ fprintf(f, ":%" PRIuMAX " %s\n", mark, oid_to_hex(&e->idx.oid));
+}
+
+static void write_branch_report(FILE *rpt, struct branch *b)
+{
+ fprintf(rpt, "%s:\n", b->name);
+
+ fprintf(rpt, " status :");
+ if (b->active)
+ fputs(" active", rpt);
+ if (b->branch_tree.tree)
+ fputs(" loaded", rpt);
+ if (is_null_oid(&b->branch_tree.versions[1].oid))
+ fputs(" dirty", rpt);
+ fputc('\n', rpt);
+
+ fprintf(rpt, " tip commit : %s\n", oid_to_hex(&b->oid));
+ fprintf(rpt, " old tree : %s\n",
+ oid_to_hex(&b->branch_tree.versions[0].oid));
+ fprintf(rpt, " cur tree : %s\n",
+ oid_to_hex(&b->branch_tree.versions[1].oid));
+ fprintf(rpt, " commit clock: %" PRIuMAX "\n", b->last_commit);
+
+ fputs(" last pack : ", rpt);
+ if (b->pack_id < MAX_PACK_ID)
+ fprintf(rpt, "%u", b->pack_id);
+ fputc('\n', rpt);
+
+ fputc('\n', rpt);
+}
+
+static void write_crash_report(const char *err)
+{
+ char *loc = git_pathdup("fast_import_crash_%"PRIuMAX, (uintmax_t) getpid());
+ FILE *rpt = fopen(loc, "w");
+ struct branch *b;
+ unsigned long lu;
+ struct recent_command *rc;
+
+ if (!rpt) {
+ error_errno("can't write crash report %s", loc);
+ free(loc);
+ return;
+ }
+
+ fprintf(stderr, "fast-import: dumping crash report to %s\n", loc);
+
+ fprintf(rpt, "fast-import crash report:\n");
+ fprintf(rpt, " fast-import process: %"PRIuMAX"\n", (uintmax_t) getpid());
+ fprintf(rpt, " parent process : %"PRIuMAX"\n", (uintmax_t) getppid());
+ fprintf(rpt, " at %s\n", show_date(time(NULL), 0, DATE_MODE(ISO8601)));
+ fputc('\n', rpt);
+
+ fputs("fatal: ", rpt);
+ fputs(err, rpt);
+ fputc('\n', rpt);
+
+ fputc('\n', rpt);
+ fputs("Most Recent Commands Before Crash\n", rpt);
+ fputs("---------------------------------\n", rpt);
+ for (rc = cmd_hist.next; rc != &cmd_hist; rc = rc->next) {
+ if (rc->next == &cmd_hist)
+ fputs("* ", rpt);
+ else
+ fputs(" ", rpt);
+ fputs(rc->buf, rpt);
+ fputc('\n', rpt);
+ }
+
+ fputc('\n', rpt);
+ fputs("Active Branch LRU\n", rpt);
+ fputs("-----------------\n", rpt);
+ fprintf(rpt, " active_branches = %lu cur, %lu max\n",
+ cur_active_branches,
+ max_active_branches);
+ fputc('\n', rpt);
+ fputs(" pos clock name\n", rpt);
+ fputs(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", rpt);
+ for (b = active_branches, lu = 0; b; b = b->active_next_branch)
+ fprintf(rpt, " %2lu) %6" PRIuMAX" %s\n",
+ ++lu, b->last_commit, b->name);
+
+ fputc('\n', rpt);
+ fputs("Inactive Branches\n", rpt);
+ fputs("-----------------\n", rpt);
+ for (lu = 0; lu < branch_table_sz; lu++) {
+ for (b = branch_table[lu]; b; b = b->table_next_branch)
+ write_branch_report(rpt, b);
+ }
+
+ if (first_tag) {
+ struct tag *tg;
+ fputc('\n', rpt);
+ fputs("Annotated Tags\n", rpt);
+ fputs("--------------\n", rpt);
+ for (tg = first_tag; tg; tg = tg->next_tag) {
+ fputs(oid_to_hex(&tg->oid), rpt);
+ fputc(' ', rpt);
+ fputs(tg->name, rpt);
+ fputc('\n', rpt);
+ }
+ }
+
+ fputc('\n', rpt);
+ fputs("Marks\n", rpt);
+ fputs("-----\n", rpt);
+ if (export_marks_file)
+ fprintf(rpt, " exported to %s\n", export_marks_file);
+ else
+ for_each_mark(marks, 0, dump_marks_fn, rpt);
+
+ fputc('\n', rpt);
+ fputs("-------------------\n", rpt);
+ fputs("END OF CRASH REPORT\n", rpt);
+ fclose(rpt);
+ free(loc);
+}
+
+static void end_packfile(void);
+static void unkeep_all_packs(void);
+static void dump_marks(void);
+
+static NORETURN void die_nicely(const char *err, va_list params)
+{
+ va_list cp;
+ static int zombie;
+ report_fn die_message_fn = get_die_message_routine();
+
+ va_copy(cp, params);
+ die_message_fn(err, params);
+
+ if (!zombie) {
+ char message[2 * PATH_MAX];
+
+ zombie = 1;
+ vsnprintf(message, sizeof(message), err, cp);
+ write_crash_report(message);
+ end_packfile();
+ unkeep_all_packs();
+ dump_marks();
+ }
+ exit(128);
+}
+
+#ifndef SIGUSR1 /* Windows, for example */
+
+static void set_checkpoint_signal(void)
+{
+}
+
+#else
+
+static void checkpoint_signal(int signo)
+{
+ checkpoint_requested = 1;
+}
+
+static void set_checkpoint_signal(void)
+{
+ struct sigaction sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = checkpoint_signal;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART;
+ sigaction(SIGUSR1, &sa, NULL);
+}
+
+#endif
+
+static void alloc_objects(unsigned int cnt)
+{
+ struct object_entry_pool *b;
+
+ b = xmalloc(sizeof(struct object_entry_pool)
+ + cnt * sizeof(struct object_entry));
+ b->next_pool = blocks;
+ b->next_free = b->entries;
+ b->end = b->entries + cnt;
+ blocks = b;
+ alloc_count += cnt;
+}
+
+static struct object_entry *new_object(struct object_id *oid)
+{
+ struct object_entry *e;
+
+ if (blocks->next_free == blocks->end)
+ alloc_objects(object_entry_alloc);
+
+ e = blocks->next_free++;
+ oidcpy(&e->idx.oid, oid);
+ return e;
+}
+
+static struct object_entry *find_object(struct object_id *oid)
+{
+ return hashmap_get_entry_from_hash(&object_table, oidhash(oid), oid,
+ struct object_entry, ent);
+}
+
+static struct object_entry *insert_object(struct object_id *oid)
+{
+ struct object_entry *e;
+ unsigned int hash = oidhash(oid);
+
+ e = hashmap_get_entry_from_hash(&object_table, hash, oid,
+ struct object_entry, ent);
+ if (!e) {
+ e = new_object(oid);
+ e->idx.offset = 0;
+ hashmap_entry_init(&e->ent, hash);
+ hashmap_add(&object_table, &e->ent);
+ }
+
+ return e;
+}
+
+static void invalidate_pack_id(unsigned int id)
+{
+ unsigned long lu;
+ struct tag *t;
+ struct hashmap_iter iter;
+ struct object_entry *e;
+
+ hashmap_for_each_entry(&object_table, &iter, e, ent) {
+ if (e->pack_id == id)
+ e->pack_id = MAX_PACK_ID;
+ }
+
+ for (lu = 0; lu < branch_table_sz; lu++) {
+ struct branch *b;
+
+ for (b = branch_table[lu]; b; b = b->table_next_branch)
+ if (b->pack_id == id)
+ b->pack_id = MAX_PACK_ID;
+ }
+
+ for (t = first_tag; t; t = t->next_tag)
+ if (t->pack_id == id)
+ t->pack_id = MAX_PACK_ID;
+}
+
+static unsigned int hc_str(const char *s, size_t len)
+{
+ unsigned int r = 0;
+ while (len-- > 0)
+ r = r * 31 + *s++;
+ return r;
+}
+
+static void insert_mark(struct mark_set **top, uintmax_t idnum, struct object_entry *oe)
+{
+ struct mark_set *s = *top;
+
+ while ((idnum >> s->shift) >= 1024) {
+ s = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
+ s->shift = (*top)->shift + 10;
+ s->data.sets[0] = *top;
+ *top = s;
+ }
+ while (s->shift) {
+ uintmax_t i = idnum >> s->shift;
+ idnum -= i << s->shift;
+ if (!s->data.sets[i]) {
+ s->data.sets[i] = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
+ s->data.sets[i]->shift = s->shift - 10;
+ }
+ s = s->data.sets[i];
+ }
+ if (!s->data.marked[idnum])
+ marks_set_count++;
+ s->data.marked[idnum] = oe;
+}
+
+static void *find_mark(struct mark_set *s, uintmax_t idnum)
+{
+ uintmax_t orig_idnum = idnum;
+ struct object_entry *oe = NULL;
+ if ((idnum >> s->shift) < 1024) {
+ while (s && s->shift) {
+ uintmax_t i = idnum >> s->shift;
+ idnum -= i << s->shift;
+ s = s->data.sets[i];
+ }
+ if (s)
+ oe = s->data.marked[idnum];
+ }
+ if (!oe)
+ die("mark :%" PRIuMAX " not declared", orig_idnum);
+ return oe;
+}
+
+static struct atom_str *to_atom(const char *s, unsigned short len)
+{
+ unsigned int hc = hc_str(s, len) % atom_table_sz;
+ struct atom_str *c;
+
+ for (c = atom_table[hc]; c; c = c->next_atom)
+ if (c->str_len == len && !strncmp(s, c->str_dat, len))
+ return c;
+
+ c = mem_pool_alloc(&fi_mem_pool, sizeof(struct atom_str) + len + 1);
+ c->str_len = len;
+ memcpy(c->str_dat, s, len);
+ c->str_dat[len] = 0;
+ c->next_atom = atom_table[hc];
+ atom_table[hc] = c;
+ atom_cnt++;
+ return c;
+}
+
+static struct branch *lookup_branch(const char *name)
+{
+ unsigned int hc = hc_str(name, strlen(name)) % branch_table_sz;
+ struct branch *b;
+
+ for (b = branch_table[hc]; b; b = b->table_next_branch)
+ if (!strcmp(name, b->name))
+ return b;
+ return NULL;
+}
+
+static struct branch *new_branch(const char *name)
+{
+ unsigned int hc = hc_str(name, strlen(name)) % branch_table_sz;
+ struct branch *b = lookup_branch(name);
+
+ if (b)
+ die("Invalid attempt to create duplicate branch: %s", name);
+ if (check_refname_format(name, REFNAME_ALLOW_ONELEVEL))
+ die("Branch name doesn't conform to GIT standards: %s", name);
+
+ b = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct branch));
+ b->name = mem_pool_strdup(&fi_mem_pool, name);
+ b->table_next_branch = branch_table[hc];
+ b->branch_tree.versions[0].mode = S_IFDIR;
+ b->branch_tree.versions[1].mode = S_IFDIR;
+ b->num_notes = 0;
+ b->active = 0;
+ b->pack_id = MAX_PACK_ID;
+ branch_table[hc] = b;
+ branch_count++;
+ return b;
+}
+
+static unsigned int hc_entries(unsigned int cnt)
+{
+ cnt = cnt & 7 ? (cnt / 8) + 1 : cnt / 8;
+ return cnt < avail_tree_table_sz ? cnt : avail_tree_table_sz - 1;
+}
+
+static struct tree_content *new_tree_content(unsigned int cnt)
+{
+ struct avail_tree_content *f, *l = NULL;
+ struct tree_content *t;
+ unsigned int hc = hc_entries(cnt);
+
+ for (f = avail_tree_table[hc]; f; l = f, f = f->next_avail)
+ if (f->entry_capacity >= cnt)
+ break;
+
+ if (f) {
+ if (l)
+ l->next_avail = f->next_avail;
+ else
+ avail_tree_table[hc] = f->next_avail;
+ } else {
+ cnt = cnt & 7 ? ((cnt / 8) + 1) * 8 : cnt;
+ f = mem_pool_alloc(&fi_mem_pool, sizeof(*t) + sizeof(t->entries[0]) * cnt);
+ f->entry_capacity = cnt;
+ }
+
+ t = (struct tree_content*)f;
+ t->entry_count = 0;
+ t->delta_depth = 0;
+ return t;
+}
+
+static void release_tree_entry(struct tree_entry *e);
+static void release_tree_content(struct tree_content *t)
+{
+ struct avail_tree_content *f = (struct avail_tree_content*)t;
+ unsigned int hc = hc_entries(f->entry_capacity);
+ f->next_avail = avail_tree_table[hc];
+ avail_tree_table[hc] = f;
+}
+
+static void release_tree_content_recursive(struct tree_content *t)
+{
+ unsigned int i;
+ for (i = 0; i < t->entry_count; i++)
+ release_tree_entry(t->entries[i]);
+ release_tree_content(t);
+}
+
+static struct tree_content *grow_tree_content(
+ struct tree_content *t,
+ int amt)
+{
+ struct tree_content *r = new_tree_content(t->entry_count + amt);
+ r->entry_count = t->entry_count;
+ r->delta_depth = t->delta_depth;
+ COPY_ARRAY(r->entries, t->entries, t->entry_count);
+ release_tree_content(t);
+ return r;
+}
+
+static struct tree_entry *new_tree_entry(void)
+{
+ struct tree_entry *e;
+
+ if (!avail_tree_entry) {
+ unsigned int n = tree_entry_alloc;
+ tree_entry_allocd += n * sizeof(struct tree_entry);
+ ALLOC_ARRAY(e, n);
+ avail_tree_entry = e;
+ while (n-- > 1) {
+ *((void**)e) = e + 1;
+ e++;
+ }
+ *((void**)e) = NULL;
+ }
+
+ e = avail_tree_entry;
+ avail_tree_entry = *((void**)e);
+ return e;
+}
+
+static void release_tree_entry(struct tree_entry *e)
+{
+ if (e->tree)
+ release_tree_content_recursive(e->tree);
+ *((void**)e) = avail_tree_entry;
+ avail_tree_entry = e;
+}
+
+static struct tree_content *dup_tree_content(struct tree_content *s)
+{
+ struct tree_content *d;
+ struct tree_entry *a, *b;
+ unsigned int i;
+
+ if (!s)
+ return NULL;
+ d = new_tree_content(s->entry_count);
+ for (i = 0; i < s->entry_count; i++) {
+ a = s->entries[i];
+ b = new_tree_entry();
+ memcpy(b, a, sizeof(*a));
+ if (a->tree && is_null_oid(&b->versions[1].oid))
+ b->tree = dup_tree_content(a->tree);
+ else
+ b->tree = NULL;
+ d->entries[i] = b;
+ }
+ d->entry_count = s->entry_count;
+ d->delta_depth = s->delta_depth;
+
+ return d;
+}
+
+static void start_packfile(void)
+{
+ struct strbuf tmp_file = STRBUF_INIT;
+ struct packed_git *p;
+ int pack_fd;
+
+ pack_fd = odb_mkstemp(&tmp_file, "pack/tmp_pack_XXXXXX");
+ FLEX_ALLOC_STR(p, pack_name, tmp_file.buf);
+ strbuf_release(&tmp_file);
+
+ p->pack_fd = pack_fd;
+ p->do_not_close = 1;
+ pack_file = hashfd(pack_fd, p->pack_name);
+
+ pack_data = p;
+ pack_size = write_pack_header(pack_file, 0);
+ object_count = 0;
+
+ REALLOC_ARRAY(all_packs, pack_id + 1);
+ all_packs[pack_id] = p;
+}
+
+static const char *create_index(void)
+{
+ const char *tmpfile;
+ struct pack_idx_entry **idx, **c, **last;
+ struct object_entry *e;
+ struct object_entry_pool *o;
+
+ /* Build the table of object IDs. */
+ ALLOC_ARRAY(idx, object_count);
+ c = idx;
+ for (o = blocks; o; o = o->next_pool)
+ for (e = o->next_free; e-- != o->entries;)
+ if (pack_id == e->pack_id)
+ *c++ = &e->idx;
+ last = idx + object_count;
+ if (c != last)
+ die("internal consistency error creating the index");
+
+ tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts,
+ pack_data->hash);
+ free(idx);
+ return tmpfile;
+}
+
+static char *keep_pack(const char *curr_index_name)
+{
+ static const char *keep_msg = "fast-import";
+ struct strbuf name = STRBUF_INIT;
+ int keep_fd;
+
+ odb_pack_name(&name, pack_data->hash, "keep");
+ keep_fd = odb_pack_keep(name.buf);
+ if (keep_fd < 0)
+ die_errno("cannot create keep file");
+ write_or_die(keep_fd, keep_msg, strlen(keep_msg));
+ if (close(keep_fd))
+ die_errno("failed to write keep file");
+
+ odb_pack_name(&name, pack_data->hash, "pack");
+ if (finalize_object_file(pack_data->pack_name, name.buf))
+ die("cannot store pack file");
+
+ odb_pack_name(&name, pack_data->hash, "idx");
+ if (finalize_object_file(curr_index_name, name.buf))
+ die("cannot store index file");
+ free((void *)curr_index_name);
+ return strbuf_detach(&name, NULL);
+}
+
+static void unkeep_all_packs(void)
+{
+ struct strbuf name = STRBUF_INIT;
+ int k;
+
+ for (k = 0; k < pack_id; k++) {
+ struct packed_git *p = all_packs[k];
+ odb_pack_name(&name, p->hash, "keep");
+ unlink_or_warn(name.buf);
+ }
+ strbuf_release(&name);
+}
+
+static int loosen_small_pack(const struct packed_git *p)
+{
+ struct child_process unpack = CHILD_PROCESS_INIT;
+
+ if (lseek(p->pack_fd, 0, SEEK_SET) < 0)
+ die_errno("Failed seeking to start of '%s'", p->pack_name);
+
+ unpack.in = p->pack_fd;
+ unpack.git_cmd = 1;
+ unpack.stdout_to_stderr = 1;
+ strvec_push(&unpack.args, "unpack-objects");
+ if (!show_stats)
+ strvec_push(&unpack.args, "-q");
+
+ return run_command(&unpack);
+}
+
+static void end_packfile(void)
+{
+ static int running;
+
+ if (running || !pack_data)
+ return;
+
+ running = 1;
+ clear_delta_base_cache();
+ if (object_count) {
+ struct packed_git *new_p;
+ struct object_id cur_pack_oid;
+ char *idx_name;
+ int i;
+ struct branch *b;
+ struct tag *t;
+
+ close_pack_windows(pack_data);
+ finalize_hashfile(pack_file, cur_pack_oid.hash, FSYNC_COMPONENT_PACK, 0);
+ fixup_pack_header_footer(pack_data->pack_fd, pack_data->hash,
+ pack_data->pack_name, object_count,
+ cur_pack_oid.hash, pack_size);
+
+ if (object_count <= unpack_limit) {
+ if (!loosen_small_pack(pack_data)) {
+ invalidate_pack_id(pack_id);
+ goto discard_pack;
+ }
+ }
+
+ close(pack_data->pack_fd);
+ idx_name = keep_pack(create_index());
+
+ /* Register the packfile with core git's machinery. */
+ new_p = add_packed_git(idx_name, strlen(idx_name), 1);
+ if (!new_p)
+ die("core git rejected index %s", idx_name);
+ all_packs[pack_id] = new_p;
+ install_packed_git(the_repository, new_p);
+ free(idx_name);
+
+ /* Print the boundary */
+ if (pack_edges) {
+ fprintf(pack_edges, "%s:", new_p->pack_name);
+ for (i = 0; i < branch_table_sz; i++) {
+ for (b = branch_table[i]; b; b = b->table_next_branch) {
+ if (b->pack_id == pack_id)
+ fprintf(pack_edges, " %s",
+ oid_to_hex(&b->oid));
+ }
+ }
+ for (t = first_tag; t; t = t->next_tag) {
+ if (t->pack_id == pack_id)
+ fprintf(pack_edges, " %s",
+ oid_to_hex(&t->oid));
+ }
+ fputc('\n', pack_edges);
+ fflush(pack_edges);
+ }
+
+ pack_id++;
+ }
+ else {
+discard_pack:
+ close(pack_data->pack_fd);
+ unlink_or_warn(pack_data->pack_name);
+ }
+ FREE_AND_NULL(pack_data);
+ running = 0;
+
+ /* We can't carry a delta across packfiles. */
+ strbuf_release(&last_blob.data);
+ last_blob.offset = 0;
+ last_blob.depth = 0;
+}
+
+static void cycle_packfile(void)
+{
+ end_packfile();
+ start_packfile();
+}
+
+static int store_object(
+ enum object_type type,
+ struct strbuf *dat,
+ struct last_object *last,
+ struct object_id *oidout,
+ uintmax_t mark)
+{
+ void *out, *delta;
+ struct object_entry *e;
+ unsigned char hdr[96];
+ struct object_id oid;
+ unsigned long hdrlen, deltalen;
+ git_hash_ctx c;
+ git_zstream s;
+
+ hdrlen = format_object_header((char *)hdr, sizeof(hdr), type,
+ dat->len);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, dat->buf, dat->len);
+ the_hash_algo->final_oid_fn(&oid, &c);
+ if (oidout)
+ oidcpy(oidout, &oid);
+
+ e = insert_object(&oid);
+ if (mark)
+ insert_mark(&marks, mark, e);
+ if (e->idx.offset) {
+ duplicate_count_by_type[type]++;
+ return 1;
+ } else if (find_sha1_pack(oid.hash,
+ get_all_packs(the_repository))) {
+ e->type = type;
+ e->pack_id = MAX_PACK_ID;
+ e->idx.offset = 1; /* just not zero! */
+ duplicate_count_by_type[type]++;
+ return 1;
+ }
+
+ if (last && last->data.len && last->data.buf && last->depth < max_depth
+ && dat->len > the_hash_algo->rawsz) {
+
+ delta_count_attempts_by_type[type]++;
+ delta = diff_delta(last->data.buf, last->data.len,
+ dat->buf, dat->len,
+ &deltalen, dat->len - the_hash_algo->rawsz);
+ } else
+ delta = NULL;
+
+ git_deflate_init(&s, pack_compression_level);
+ if (delta) {
+ s.next_in = delta;
+ s.avail_in = deltalen;
+ } else {
+ s.next_in = (void *)dat->buf;
+ s.avail_in = dat->len;
+ }
+ s.avail_out = git_deflate_bound(&s, s.avail_in);
+ s.next_out = out = xmalloc(s.avail_out);
+ while (git_deflate(&s, Z_FINISH) == Z_OK)
+ ; /* nothing */
+ git_deflate_end(&s);
+
+ /* Determine if we should auto-checkpoint. */
+ if ((max_packsize
+ && (pack_size + PACK_SIZE_THRESHOLD + s.total_out) > max_packsize)
+ || (pack_size + PACK_SIZE_THRESHOLD + s.total_out) < pack_size) {
+
+ /* This new object needs to *not* have the current pack_id. */
+ e->pack_id = pack_id + 1;
+ cycle_packfile();
+
+ /* We cannot carry a delta into the new pack. */
+ if (delta) {
+ FREE_AND_NULL(delta);
+
+ git_deflate_init(&s, pack_compression_level);
+ s.next_in = (void *)dat->buf;
+ s.avail_in = dat->len;
+ s.avail_out = git_deflate_bound(&s, s.avail_in);
+ s.next_out = out = xrealloc(out, s.avail_out);
+ while (git_deflate(&s, Z_FINISH) == Z_OK)
+ ; /* nothing */
+ git_deflate_end(&s);
+ }
+ }
+
+ e->type = type;
+ e->pack_id = pack_id;
+ e->idx.offset = pack_size;
+ object_count++;
+ object_count_by_type[type]++;
+
+ crc32_begin(pack_file);
+
+ if (delta) {
+ off_t ofs = e->idx.offset - last->offset;
+ unsigned pos = sizeof(hdr) - 1;
+
+ delta_count_by_type[type]++;
+ e->depth = last->depth + 1;
+
+ hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
+ OBJ_OFS_DELTA, deltalen);
+ hashwrite(pack_file, hdr, hdrlen);
+ pack_size += hdrlen;
+
+ hdr[pos] = ofs & 127;
+ while (ofs >>= 7)
+ hdr[--pos] = 128 | (--ofs & 127);
+ hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos);
+ pack_size += sizeof(hdr) - pos;
+ } else {
+ e->depth = 0;
+ hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
+ type, dat->len);
+ hashwrite(pack_file, hdr, hdrlen);
+ pack_size += hdrlen;
+ }
+
+ hashwrite(pack_file, out, s.total_out);
+ pack_size += s.total_out;
+
+ e->idx.crc32 = crc32_end(pack_file);
+
+ free(out);
+ free(delta);
+ if (last) {
+ if (last->no_swap) {
+ last->data = *dat;
+ } else {
+ strbuf_swap(&last->data, dat);
+ }
+ last->offset = e->idx.offset;
+ last->depth = e->depth;
+ }
+ return 0;
+}
+
+static void truncate_pack(struct hashfile_checkpoint *checkpoint)
+{
+ if (hashfile_truncate(pack_file, checkpoint))
+ die_errno("cannot truncate pack to skip duplicate");
+ pack_size = checkpoint->offset;
+}
+
+static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
+{
+ size_t in_sz = 64 * 1024, out_sz = 64 * 1024;
+ unsigned char *in_buf = xmalloc(in_sz);
+ unsigned char *out_buf = xmalloc(out_sz);
+ struct object_entry *e;
+ struct object_id oid;
+ unsigned long hdrlen;
+ off_t offset;
+ git_hash_ctx c;
+ git_zstream s;
+ struct hashfile_checkpoint checkpoint;
+ int status = Z_OK;
+
+ /* Determine if we should auto-checkpoint. */
+ if ((max_packsize
+ && (pack_size + PACK_SIZE_THRESHOLD + len) > max_packsize)
+ || (pack_size + PACK_SIZE_THRESHOLD + len) < pack_size)
+ cycle_packfile();
+
+ hashfile_checkpoint(pack_file, &checkpoint);
+ offset = checkpoint.offset;
+
+ hdrlen = format_object_header((char *)out_buf, out_sz, OBJ_BLOB, len);
+
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, out_buf, hdrlen);
+
+ crc32_begin(pack_file);
+
+ git_deflate_init(&s, pack_compression_level);
+
+ hdrlen = encode_in_pack_object_header(out_buf, out_sz, OBJ_BLOB, len);
+
+ s.next_out = out_buf + hdrlen;
+ s.avail_out = out_sz - hdrlen;
+
+ while (status != Z_STREAM_END) {
+ if (0 < len && !s.avail_in) {
+ size_t cnt = in_sz < len ? in_sz : (size_t)len;
+ size_t n = fread(in_buf, 1, cnt, stdin);
+ if (!n && feof(stdin))
+ die("EOF in data (%" PRIuMAX " bytes remaining)", len);
+
+ the_hash_algo->update_fn(&c, in_buf, n);
+ s.next_in = in_buf;
+ s.avail_in = n;
+ len -= n;
+ }
+
+ status = git_deflate(&s, len ? 0 : Z_FINISH);
+
+ if (!s.avail_out || status == Z_STREAM_END) {
+ size_t n = s.next_out - out_buf;
+ hashwrite(pack_file, out_buf, n);
+ pack_size += n;
+ s.next_out = out_buf;
+ s.avail_out = out_sz;
+ }
+
+ switch (status) {
+ case Z_OK:
+ case Z_BUF_ERROR:
+ case Z_STREAM_END:
+ continue;
+ default:
+ die("unexpected deflate failure: %d", status);
+ }
+ }
+ git_deflate_end(&s);
+ the_hash_algo->final_oid_fn(&oid, &c);
+
+ if (oidout)
+ oidcpy(oidout, &oid);
+
+ e = insert_object(&oid);
+
+ if (mark)
+ insert_mark(&marks, mark, e);
+
+ if (e->idx.offset) {
+ duplicate_count_by_type[OBJ_BLOB]++;
+ truncate_pack(&checkpoint);
+
+ } else if (find_sha1_pack(oid.hash,
+ get_all_packs(the_repository))) {
+ e->type = OBJ_BLOB;
+ e->pack_id = MAX_PACK_ID;
+ e->idx.offset = 1; /* just not zero! */
+ duplicate_count_by_type[OBJ_BLOB]++;
+ truncate_pack(&checkpoint);
+
+ } else {
+ e->depth = 0;
+ e->type = OBJ_BLOB;
+ e->pack_id = pack_id;
+ e->idx.offset = offset;
+ e->idx.crc32 = crc32_end(pack_file);
+ object_count++;
+ object_count_by_type[OBJ_BLOB]++;
+ }
+
+ free(in_buf);
+ free(out_buf);
+}
+
+/* All calls must be guarded by find_object() or find_mark() to
+ * ensure the 'struct object_entry' passed was written by this
+ * process instance. We unpack the entry by the offset, avoiding
+ * the need for the corresponding .idx file. This unpacking rule
+ * works because we only use OBJ_REF_DELTA within the packfiles
+ * created by fast-import.
+ *
+ * oe must not be NULL. Such an oe usually comes from giving
+ * an unknown SHA-1 to find_object() or an undefined mark to
+ * find_mark(). Callers must test for this condition and use
+ * the standard read_sha1_file() when it happens.
+ *
+ * oe->pack_id must not be MAX_PACK_ID. Such an oe is usually from
+ * find_mark(), where the mark was reloaded from an existing marks
+ * file and is referencing an object that this fast-import process
+ * instance did not write out to a packfile. Callers must test for
+ * this condition and use read_sha1_file() instead.
+ */
+static void *gfi_unpack_entry(
+ struct object_entry *oe,
+ unsigned long *sizep)
+{
+ enum object_type type;
+ struct packed_git *p = all_packs[oe->pack_id];
+ if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) {
+ /* The object is stored in the packfile we are writing to
+ * and we have modified it since the last time we scanned
+ * back to read a previously written object. If an old
+ * window covered [p->pack_size, p->pack_size + rawsz) its
+ * data is stale and is not valid. Closing all windows
+ * and updating the packfile length ensures we can read
+ * the newly written data.
+ */
+ close_pack_windows(p);
+ hashflush(pack_file);
+
+ /* We have to offer rawsz bytes additional on the end of
+ * the packfile as the core unpacker code assumes the
+ * footer is present at the file end and must promise
+ * at least rawsz bytes within any window it maps. But
+ * we don't actually create the footer here.
+ */
+ p->pack_size = pack_size + the_hash_algo->rawsz;
+ }
+ return unpack_entry(the_repository, p, oe->idx.offset, &type, sizep);
+}
+
+static const char *get_mode(const char *str, uint16_t *modep)
+{
+ unsigned char c;
+ uint16_t mode = 0;
+
+ while ((c = *str++) != ' ') {
+ if (c < '0' || c > '7')
+ return NULL;
+ mode = (mode << 3) + (c - '0');
+ }
+ *modep = mode;
+ return str;
+}
+
+static void load_tree(struct tree_entry *root)
+{
+ struct object_id *oid = &root->versions[1].oid;
+ struct object_entry *myoe;
+ struct tree_content *t;
+ unsigned long size;
+ char *buf;
+ const char *c;
+
+ root->tree = t = new_tree_content(8);
+ if (is_null_oid(oid))
+ return;
+
+ myoe = find_object(oid);
+ if (myoe && myoe->pack_id != MAX_PACK_ID) {
+ if (myoe->type != OBJ_TREE)
+ die("Not a tree: %s", oid_to_hex(oid));
+ t->delta_depth = myoe->depth;
+ buf = gfi_unpack_entry(myoe, &size);
+ if (!buf)
+ die("Can't load tree %s", oid_to_hex(oid));
+ } else {
+ enum object_type type;
+ buf = read_object_file(oid, &type, &size);
+ if (!buf || type != OBJ_TREE)
+ die("Can't load tree %s", oid_to_hex(oid));
+ }
+
+ c = buf;
+ while (c != (buf + size)) {
+ struct tree_entry *e = new_tree_entry();
+
+ if (t->entry_count == t->entry_capacity)
+ root->tree = t = grow_tree_content(t, t->entry_count);
+ t->entries[t->entry_count++] = e;
+
+ e->tree = NULL;
+ c = get_mode(c, &e->versions[1].mode);
+ if (!c)
+ die("Corrupt mode in %s", oid_to_hex(oid));
+ e->versions[0].mode = e->versions[1].mode;
+ e->name = to_atom(c, strlen(c));
+ c += e->name->str_len + 1;
+ oidread(&e->versions[0].oid, (unsigned char *)c);
+ oidread(&e->versions[1].oid, (unsigned char *)c);
+ c += the_hash_algo->rawsz;
+ }
+ free(buf);
+}
+
+static int tecmp0 (const void *_a, const void *_b)
+{
+ struct tree_entry *a = *((struct tree_entry**)_a);
+ struct tree_entry *b = *((struct tree_entry**)_b);
+ return base_name_compare(
+ a->name->str_dat, a->name->str_len, a->versions[0].mode,
+ b->name->str_dat, b->name->str_len, b->versions[0].mode);
+}
+
+static int tecmp1 (const void *_a, const void *_b)
+{
+ struct tree_entry *a = *((struct tree_entry**)_a);
+ struct tree_entry *b = *((struct tree_entry**)_b);
+ return base_name_compare(
+ a->name->str_dat, a->name->str_len, a->versions[1].mode,
+ b->name->str_dat, b->name->str_len, b->versions[1].mode);
+}
+
+static void mktree(struct tree_content *t, int v, struct strbuf *b)
+{
+ size_t maxlen = 0;
+ unsigned int i;
+
+ if (!v)
+ QSORT(t->entries, t->entry_count, tecmp0);
+ else
+ QSORT(t->entries, t->entry_count, tecmp1);
+
+ for (i = 0; i < t->entry_count; i++) {
+ if (t->entries[i]->versions[v].mode)
+ maxlen += t->entries[i]->name->str_len + 34;
+ }
+
+ strbuf_reset(b);
+ strbuf_grow(b, maxlen);
+ for (i = 0; i < t->entry_count; i++) {
+ struct tree_entry *e = t->entries[i];
+ if (!e->versions[v].mode)
+ continue;
+ strbuf_addf(b, "%o %s%c",
+ (unsigned int)(e->versions[v].mode & ~NO_DELTA),
+ e->name->str_dat, '\0');
+ strbuf_add(b, e->versions[v].oid.hash, the_hash_algo->rawsz);
+ }
+}
+
+static void store_tree(struct tree_entry *root)
+{
+ struct tree_content *t;
+ unsigned int i, j, del;
+ struct last_object lo = { STRBUF_INIT, 0, 0, /* no_swap */ 1 };
+ struct object_entry *le = NULL;
+
+ if (!is_null_oid(&root->versions[1].oid))
+ return;
+
+ if (!root->tree)
+ load_tree(root);
+ t = root->tree;
+
+ for (i = 0; i < t->entry_count; i++) {
+ if (t->entries[i]->tree)
+ store_tree(t->entries[i]);
+ }
+
+ if (!(root->versions[0].mode & NO_DELTA))
+ le = find_object(&root->versions[0].oid);
+ if (S_ISDIR(root->versions[0].mode) && le && le->pack_id == pack_id) {
+ mktree(t, 0, &old_tree);
+ lo.data = old_tree;
+ lo.offset = le->idx.offset;
+ lo.depth = t->delta_depth;
+ }
+
+ mktree(t, 1, &new_tree);
+ store_object(OBJ_TREE, &new_tree, &lo, &root->versions[1].oid, 0);
+
+ t->delta_depth = lo.depth;
+ for (i = 0, j = 0, del = 0; i < t->entry_count; i++) {
+ struct tree_entry *e = t->entries[i];
+ if (e->versions[1].mode) {
+ e->versions[0].mode = e->versions[1].mode;
+ oidcpy(&e->versions[0].oid, &e->versions[1].oid);
+ t->entries[j++] = e;
+ } else {
+ release_tree_entry(e);
+ del++;
+ }
+ }
+ t->entry_count -= del;
+}
+
+static void tree_content_replace(
+ struct tree_entry *root,
+ const struct object_id *oid,
+ const uint16_t mode,
+ struct tree_content *newtree)
+{
+ if (!S_ISDIR(mode))
+ die("Root cannot be a non-directory");
+ oidclr(&root->versions[0].oid);
+ oidcpy(&root->versions[1].oid, oid);
+ if (root->tree)
+ release_tree_content_recursive(root->tree);
+ root->tree = newtree;
+}
+
+static int tree_content_set(
+ struct tree_entry *root,
+ const char *p,
+ const struct object_id *oid,
+ const uint16_t mode,
+ struct tree_content *subtree)
+{
+ struct tree_content *t;
+ const char *slash1;
+ unsigned int i, n;
+ struct tree_entry *e;
+
+ slash1 = strchrnul(p, '/');
+ n = slash1 - p;
+ if (!n)
+ die("Empty path component found in input");
+ if (!*slash1 && !S_ISDIR(mode) && subtree)
+ die("Non-directories cannot have subtrees");
+
+ if (!root->tree)
+ load_tree(root);
+ t = root->tree;
+ for (i = 0; i < t->entry_count; i++) {
+ e = t->entries[i];
+ if (e->name->str_len == n && !fspathncmp(p, e->name->str_dat, n)) {
+ if (!*slash1) {
+ if (!S_ISDIR(mode)
+ && e->versions[1].mode == mode
+ && oideq(&e->versions[1].oid, oid))
+ return 0;
+ e->versions[1].mode = mode;
+ oidcpy(&e->versions[1].oid, oid);
+ if (e->tree)
+ release_tree_content_recursive(e->tree);
+ e->tree = subtree;
+
+ /*
+ * We need to leave e->versions[0].sha1 alone
+ * to avoid modifying the preimage tree used
+ * when writing out the parent directory.
+ * But after replacing the subdir with a
+ * completely different one, it's not a good
+ * delta base any more, and besides, we've
+ * thrown away the tree entries needed to
+ * make a delta against it.
+ *
+ * So let's just explicitly disable deltas
+ * for the subtree.
+ */
+ if (S_ISDIR(e->versions[0].mode))
+ e->versions[0].mode |= NO_DELTA;
+
+ oidclr(&root->versions[1].oid);
+ return 1;
+ }
+ if (!S_ISDIR(e->versions[1].mode)) {
+ e->tree = new_tree_content(8);
+ e->versions[1].mode = S_IFDIR;
+ }
+ if (!e->tree)
+ load_tree(e);
+ if (tree_content_set(e, slash1 + 1, oid, mode, subtree)) {
+ oidclr(&root->versions[1].oid);
+ return 1;
+ }
+ return 0;
+ }
+ }
+
+ if (t->entry_count == t->entry_capacity)
+ root->tree = t = grow_tree_content(t, t->entry_count);
+ e = new_tree_entry();
+ e->name = to_atom(p, n);
+ e->versions[0].mode = 0;
+ oidclr(&e->versions[0].oid);
+ t->entries[t->entry_count++] = e;
+ if (*slash1) {
+ e->tree = new_tree_content(8);
+ e->versions[1].mode = S_IFDIR;
+ tree_content_set(e, slash1 + 1, oid, mode, subtree);
+ } else {
+ e->tree = subtree;
+ e->versions[1].mode = mode;
+ oidcpy(&e->versions[1].oid, oid);
+ }
+ oidclr(&root->versions[1].oid);
+ return 1;
+}
+
+static int tree_content_remove(
+ struct tree_entry *root,
+ const char *p,
+ struct tree_entry *backup_leaf,
+ int allow_root)
+{
+ struct tree_content *t;
+ const char *slash1;
+ unsigned int i, n;
+ struct tree_entry *e;
+
+ slash1 = strchrnul(p, '/');
+ n = slash1 - p;
+
+ if (!root->tree)
+ load_tree(root);
+
+ if (!*p && allow_root) {
+ e = root;
+ goto del_entry;
+ }
+
+ t = root->tree;
+ for (i = 0; i < t->entry_count; i++) {
+ e = t->entries[i];
+ if (e->name->str_len == n && !fspathncmp(p, e->name->str_dat, n)) {
+ if (*slash1 && !S_ISDIR(e->versions[1].mode))
+ /*
+ * If p names a file in some subdirectory, and a
+ * file or symlink matching the name of the
+ * parent directory of p exists, then p cannot
+ * exist and need not be deleted.
+ */
+ return 1;
+ if (!*slash1 || !S_ISDIR(e->versions[1].mode))
+ goto del_entry;
+ if (!e->tree)
+ load_tree(e);
+ if (tree_content_remove(e, slash1 + 1, backup_leaf, 0)) {
+ for (n = 0; n < e->tree->entry_count; n++) {
+ if (e->tree->entries[n]->versions[1].mode) {
+ oidclr(&root->versions[1].oid);
+ return 1;
+ }
+ }
+ backup_leaf = NULL;
+ goto del_entry;
+ }
+ return 0;
+ }
+ }
+ return 0;
+
+del_entry:
+ if (backup_leaf)
+ memcpy(backup_leaf, e, sizeof(*backup_leaf));
+ else if (e->tree)
+ release_tree_content_recursive(e->tree);
+ e->tree = NULL;
+ e->versions[1].mode = 0;
+ oidclr(&e->versions[1].oid);
+ oidclr(&root->versions[1].oid);
+ return 1;
+}
+
+static int tree_content_get(
+ struct tree_entry *root,
+ const char *p,
+ struct tree_entry *leaf,
+ int allow_root)
+{
+ struct tree_content *t;
+ const char *slash1;
+ unsigned int i, n;
+ struct tree_entry *e;
+
+ slash1 = strchrnul(p, '/');
+ n = slash1 - p;
+ if (!n && !allow_root)
+ die("Empty path component found in input");
+
+ if (!root->tree)
+ load_tree(root);
+
+ if (!n) {
+ e = root;
+ goto found_entry;
+ }
+
+ t = root->tree;
+ for (i = 0; i < t->entry_count; i++) {
+ e = t->entries[i];
+ if (e->name->str_len == n && !fspathncmp(p, e->name->str_dat, n)) {
+ if (!*slash1)
+ goto found_entry;
+ if (!S_ISDIR(e->versions[1].mode))
+ return 0;
+ if (!e->tree)
+ load_tree(e);
+ return tree_content_get(e, slash1 + 1, leaf, 0);
+ }
+ }
+ return 0;
+
+found_entry:
+ memcpy(leaf, e, sizeof(*leaf));
+ if (e->tree && is_null_oid(&e->versions[1].oid))
+ leaf->tree = dup_tree_content(e->tree);
+ else
+ leaf->tree = NULL;
+ return 1;
+}
+
+static int update_branch(struct branch *b)
+{
+ static const char *msg = "fast-import";
+ struct ref_transaction *transaction;
+ struct object_id old_oid;
+ struct strbuf err = STRBUF_INIT;
+
+ if (is_null_oid(&b->oid)) {
+ if (b->delete)
+ delete_ref(NULL, b->name, NULL, 0);
+ return 0;
+ }
+ if (read_ref(b->name, &old_oid))
+ oidclr(&old_oid);
+ if (!force_update && !is_null_oid(&old_oid)) {
+ struct commit *old_cmit, *new_cmit;
+
+ old_cmit = lookup_commit_reference_gently(the_repository,
+ &old_oid, 0);
+ new_cmit = lookup_commit_reference_gently(the_repository,
+ &b->oid, 0);
+ if (!old_cmit || !new_cmit)
+ return error("Branch %s is missing commits.", b->name);
+
+ if (!in_merge_bases(old_cmit, new_cmit)) {
+ warning("Not updating %s"
+ " (new tip %s does not contain %s)",
+ b->name, oid_to_hex(&b->oid),
+ oid_to_hex(&old_oid));
+ return -1;
+ }
+ }
+ transaction = ref_transaction_begin(&err);
+ if (!transaction ||
+ ref_transaction_update(transaction, b->name, &b->oid, &old_oid,
+ 0, msg, &err) ||
+ ref_transaction_commit(transaction, &err)) {
+ ref_transaction_free(transaction);
+ error("%s", err.buf);
+ strbuf_release(&err);
+ return -1;
+ }
+ ref_transaction_free(transaction);
+ strbuf_release(&err);
+ return 0;
+}
+
+static void dump_branches(void)
+{
+ unsigned int i;
+ struct branch *b;
+
+ for (i = 0; i < branch_table_sz; i++) {
+ for (b = branch_table[i]; b; b = b->table_next_branch)
+ failure |= update_branch(b);
+ }
+}
+
+static void dump_tags(void)
+{
+ static const char *msg = "fast-import";
+ struct tag *t;
+ struct strbuf ref_name = STRBUF_INIT;
+ struct strbuf err = STRBUF_INIT;
+ struct ref_transaction *transaction;
+
+ transaction = ref_transaction_begin(&err);
+ if (!transaction) {
+ failure |= error("%s", err.buf);
+ goto cleanup;
+ }
+ for (t = first_tag; t; t = t->next_tag) {
+ strbuf_reset(&ref_name);
+ strbuf_addf(&ref_name, "refs/tags/%s", t->name);
+
+ if (ref_transaction_update(transaction, ref_name.buf,
+ &t->oid, NULL, 0, msg, &err)) {
+ failure |= error("%s", err.buf);
+ goto cleanup;
+ }
+ }
+ if (ref_transaction_commit(transaction, &err))
+ failure |= error("%s", err.buf);
+
+ cleanup:
+ ref_transaction_free(transaction);
+ strbuf_release(&ref_name);
+ strbuf_release(&err);
+}
+
+static void dump_marks(void)
+{
+ struct lock_file mark_lock = LOCK_INIT;
+ FILE *f;
+
+ if (!export_marks_file || (import_marks_file && !import_marks_file_done))
+ return;
+
+ if (safe_create_leading_directories_const(export_marks_file)) {
+ failure |= error_errno("unable to create leading directories of %s",
+ export_marks_file);
+ return;
+ }
+
+ if (hold_lock_file_for_update(&mark_lock, export_marks_file, 0) < 0) {
+ failure |= error_errno("Unable to write marks file %s",
+ export_marks_file);
+ return;
+ }
+
+ f = fdopen_lock_file(&mark_lock, "w");
+ if (!f) {
+ int saved_errno = errno;
+ rollback_lock_file(&mark_lock);
+ failure |= error("Unable to write marks file %s: %s",
+ export_marks_file, strerror(saved_errno));
+ return;
+ }
+
+ for_each_mark(marks, 0, dump_marks_fn, f);
+ if (commit_lock_file(&mark_lock)) {
+ failure |= error_errno("Unable to write file %s",
+ export_marks_file);
+ return;
+ }
+}
+
+static void insert_object_entry(struct mark_set **s, struct object_id *oid, uintmax_t mark)
+{
+ struct object_entry *e;
+ e = find_object(oid);
+ if (!e) {
+ enum object_type type = oid_object_info(the_repository,
+ oid, NULL);
+ if (type < 0)
+ die("object not found: %s", oid_to_hex(oid));
+ e = insert_object(oid);
+ e->type = type;
+ e->pack_id = MAX_PACK_ID;
+ e->idx.offset = 1; /* just not zero! */
+ }
+ insert_mark(s, mark, e);
+}
+
+static void insert_oid_entry(struct mark_set **s, struct object_id *oid, uintmax_t mark)
+{
+ insert_mark(s, mark, xmemdupz(oid, sizeof(*oid)));
+}
+
+static void read_mark_file(struct mark_set **s, FILE *f, mark_set_inserter_t inserter)
+{
+ char line[512];
+ while (fgets(line, sizeof(line), f)) {
+ uintmax_t mark;
+ char *end;
+ struct object_id oid;
+
+ /* Ensure SHA-1 objects are padded with zeros. */
+ memset(oid.hash, 0, sizeof(oid.hash));
+
+ end = strchr(line, '\n');
+ if (line[0] != ':' || !end)
+ die("corrupt mark line: %s", line);
+ *end = 0;
+ mark = strtoumax(line + 1, &end, 10);
+ if (!mark || end == line + 1
+ || *end != ' '
+ || get_oid_hex_any(end + 1, &oid) == GIT_HASH_UNKNOWN)
+ die("corrupt mark line: %s", line);
+ inserter(s, &oid, mark);
+ }
+}
+
+static void read_marks(void)
+{
+ FILE *f = fopen(import_marks_file, "r");
+ if (f)
+ ;
+ else if (import_marks_file_ignore_missing && errno == ENOENT)
+ goto done; /* Marks file does not exist */
+ else
+ die_errno("cannot read '%s'", import_marks_file);
+ read_mark_file(&marks, f, insert_object_entry);
+ fclose(f);
+done:
+ import_marks_file_done = 1;
+}
+
+
+static int read_next_command(void)
+{
+ static int stdin_eof = 0;
+
+ if (stdin_eof) {
+ unread_command_buf = 0;
+ return EOF;
+ }
+
+ for (;;) {
+ if (unread_command_buf) {
+ unread_command_buf = 0;
+ } else {
+ struct recent_command *rc;
+
+ stdin_eof = strbuf_getline_lf(&command_buf, stdin);
+ if (stdin_eof)
+ return EOF;
+
+ if (!seen_data_command
+ && !starts_with(command_buf.buf, "feature ")
+ && !starts_with(command_buf.buf, "option ")) {
+ parse_argv();
+ }
+
+ rc = rc_free;
+ if (rc)
+ rc_free = rc->next;
+ else {
+ rc = cmd_hist.next;
+ cmd_hist.next = rc->next;
+ cmd_hist.next->prev = &cmd_hist;
+ free(rc->buf);
+ }
+
+ rc->buf = xstrdup(command_buf.buf);
+ rc->prev = cmd_tail;
+ rc->next = cmd_hist.prev;
+ rc->prev->next = rc;
+ cmd_tail = rc;
+ }
+ if (command_buf.buf[0] == '#')
+ continue;
+ return 0;
+ }
+}
+
+static void skip_optional_lf(void)
+{
+ int term_char = fgetc(stdin);
+ if (term_char != '\n' && term_char != EOF)
+ ungetc(term_char, stdin);
+}
+
+static void parse_mark(void)
+{
+ const char *v;
+ if (skip_prefix(command_buf.buf, "mark :", &v)) {
+ next_mark = strtoumax(v, NULL, 10);
+ read_next_command();
+ }
+ else
+ next_mark = 0;
+}
+
+static void parse_original_identifier(void)
+{
+ const char *v;
+ if (skip_prefix(command_buf.buf, "original-oid ", &v))
+ read_next_command();
+}
+
+static int parse_data(struct strbuf *sb, uintmax_t limit, uintmax_t *len_res)
+{
+ const char *data;
+ strbuf_reset(sb);
+
+ if (!skip_prefix(command_buf.buf, "data ", &data))
+ die("Expected 'data n' command, found: %s", command_buf.buf);
+
+ if (skip_prefix(data, "<<", &data)) {
+ char *term = xstrdup(data);
+ size_t term_len = command_buf.len - (data - command_buf.buf);
+
+ for (;;) {
+ if (strbuf_getline_lf(&command_buf, stdin) == EOF)
+ die("EOF in data (terminator '%s' not found)", term);
+ if (term_len == command_buf.len
+ && !strcmp(term, command_buf.buf))
+ break;
+ strbuf_addbuf(sb, &command_buf);
+ strbuf_addch(sb, '\n');
+ }
+ free(term);
+ }
+ else {
+ uintmax_t len = strtoumax(data, NULL, 10);
+ size_t n = 0, length = (size_t)len;
+
+ if (limit && limit < len) {
+ *len_res = len;
+ return 0;
+ }
+ if (length < len)
+ die("data is too large to use in this context");
+
+ while (n < length) {
+ size_t s = strbuf_fread(sb, length - n, stdin);
+ if (!s && feof(stdin))
+ die("EOF in data (%lu bytes remaining)",
+ (unsigned long)(length - n));
+ n += s;
+ }
+ }
+
+ skip_optional_lf();
+ return 1;
+}
+
+static int validate_raw_date(const char *src, struct strbuf *result, int strict)
+{
+ const char *orig_src = src;
+ char *endp;
+ unsigned long num;
+
+ errno = 0;
+
+ num = strtoul(src, &endp, 10);
+ /*
+ * NEEDSWORK: perhaps check for reasonable values? For example, we
+ * could error on values representing times more than a
+ * day in the future.
+ */
+ if (errno || endp == src || *endp != ' ')
+ return -1;
+
+ src = endp + 1;
+ if (*src != '-' && *src != '+')
+ return -1;
+
+ num = strtoul(src + 1, &endp, 10);
+ /*
+ * NEEDSWORK: check for brokenness other than num > 1400, such as
+ * (num % 100) >= 60, or ((num % 100) % 15) != 0 ?
+ */
+ if (errno || endp == src + 1 || *endp || /* did not parse */
+ (strict && (1400 < num)) /* parsed a broken timezone */
+ )
+ return -1;
+
+ strbuf_addstr(result, orig_src);
+ return 0;
+}
+
+static char *parse_ident(const char *buf)
+{
+ const char *ltgt;
+ size_t name_len;
+ struct strbuf ident = STRBUF_INIT;
+
+ /* ensure there is a space delimiter even if there is no name */
+ if (*buf == '<')
+ --buf;
+
+ ltgt = buf + strcspn(buf, "<>");
+ if (*ltgt != '<')
+ die("Missing < in ident string: %s", buf);
+ if (ltgt != buf && ltgt[-1] != ' ')
+ die("Missing space before < in ident string: %s", buf);
+ ltgt = ltgt + 1 + strcspn(ltgt + 1, "<>");
+ if (*ltgt != '>')
+ die("Missing > in ident string: %s", buf);
+ ltgt++;
+ if (*ltgt != ' ')
+ die("Missing space after > in ident string: %s", buf);
+ ltgt++;
+ name_len = ltgt - buf;
+ strbuf_add(&ident, buf, name_len);
+
+ switch (whenspec) {
+ case WHENSPEC_RAW:
+ if (validate_raw_date(ltgt, &ident, 1) < 0)
+ die("Invalid raw date \"%s\" in ident: %s", ltgt, buf);
+ break;
+ case WHENSPEC_RAW_PERMISSIVE:
+ if (validate_raw_date(ltgt, &ident, 0) < 0)
+ die("Invalid raw date \"%s\" in ident: %s", ltgt, buf);
+ break;
+ case WHENSPEC_RFC2822:
+ if (parse_date(ltgt, &ident) < 0)
+ die("Invalid rfc2822 date \"%s\" in ident: %s", ltgt, buf);
+ break;
+ case WHENSPEC_NOW:
+ if (strcmp("now", ltgt))
+ die("Date in ident must be 'now': %s", buf);
+ datestamp(&ident);
+ break;
+ }
+
+ return strbuf_detach(&ident, NULL);
+}
+
+static void parse_and_store_blob(
+ struct last_object *last,
+ struct object_id *oidout,
+ uintmax_t mark)
+{
+ static struct strbuf buf = STRBUF_INIT;
+ uintmax_t len;
+
+ if (parse_data(&buf, big_file_threshold, &len))
+ store_object(OBJ_BLOB, &buf, last, oidout, mark);
+ else {
+ if (last) {
+ strbuf_release(&last->data);
+ last->offset = 0;
+ last->depth = 0;
+ }
+ stream_blob(len, oidout, mark);
+ skip_optional_lf();
+ }
+}
+
+static void parse_new_blob(void)
+{
+ read_next_command();
+ parse_mark();
+ parse_original_identifier();
+ parse_and_store_blob(&last_blob, NULL, next_mark);
+}
+
+static void unload_one_branch(void)
+{
+ while (cur_active_branches
+ && cur_active_branches >= max_active_branches) {
+ uintmax_t min_commit = ULONG_MAX;
+ struct branch *e, *l = NULL, *p = NULL;
+
+ for (e = active_branches; e; e = e->active_next_branch) {
+ if (e->last_commit < min_commit) {
+ p = l;
+ min_commit = e->last_commit;
+ }
+ l = e;
+ }
+
+ if (p) {
+ e = p->active_next_branch;
+ p->active_next_branch = e->active_next_branch;
+ } else {
+ e = active_branches;
+ active_branches = e->active_next_branch;
+ }
+ e->active = 0;
+ e->active_next_branch = NULL;
+ if (e->branch_tree.tree) {
+ release_tree_content_recursive(e->branch_tree.tree);
+ e->branch_tree.tree = NULL;
+ }
+ cur_active_branches--;
+ }
+}
+
+static void load_branch(struct branch *b)
+{
+ load_tree(&b->branch_tree);
+ if (!b->active) {
+ b->active = 1;
+ b->active_next_branch = active_branches;
+ active_branches = b;
+ cur_active_branches++;
+ branch_load_count++;
+ }
+}
+
+static unsigned char convert_num_notes_to_fanout(uintmax_t num_notes)
+{
+ unsigned char fanout = 0;
+ while ((num_notes >>= 8))
+ fanout++;
+ return fanout;
+}
+
+static void construct_path_with_fanout(const char *hex_sha1,
+ unsigned char fanout, char *path)
+{
+ unsigned int i = 0, j = 0;
+ if (fanout >= the_hash_algo->rawsz)
+ die("Too large fanout (%u)", fanout);
+ while (fanout) {
+ path[i++] = hex_sha1[j++];
+ path[i++] = hex_sha1[j++];
+ path[i++] = '/';
+ fanout--;
+ }
+ memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j);
+ path[i + the_hash_algo->hexsz - j] = '\0';
+}
+
+static uintmax_t do_change_note_fanout(
+ struct tree_entry *orig_root, struct tree_entry *root,
+ char *hex_oid, unsigned int hex_oid_len,
+ char *fullpath, unsigned int fullpath_len,
+ unsigned char fanout)
+{
+ struct tree_content *t;
+ struct tree_entry *e, leaf;
+ unsigned int i, tmp_hex_oid_len, tmp_fullpath_len;
+ uintmax_t num_notes = 0;
+ struct object_id oid;
+ /* hex oid + '/' between each pair of hex digits + NUL */
+ char realpath[GIT_MAX_HEXSZ + ((GIT_MAX_HEXSZ / 2) - 1) + 1];
+ const unsigned hexsz = the_hash_algo->hexsz;
+
+ if (!root->tree)
+ load_tree(root);
+ t = root->tree;
+
+ for (i = 0; t && i < t->entry_count; i++) {
+ e = t->entries[i];
+ tmp_hex_oid_len = hex_oid_len + e->name->str_len;
+ tmp_fullpath_len = fullpath_len;
+
+ /*
+ * We're interested in EITHER existing note entries (entries
+ * with exactly 40 hex chars in path, not including directory
+ * separators), OR directory entries that may contain note
+ * entries (with < 40 hex chars in path).
+ * Also, each path component in a note entry must be a multiple
+ * of 2 chars.
+ */
+ if (!e->versions[1].mode ||
+ tmp_hex_oid_len > hexsz ||
+ e->name->str_len % 2)
+ continue;
+
+ /* This _may_ be a note entry, or a subdir containing notes */
+ memcpy(hex_oid + hex_oid_len, e->name->str_dat,
+ e->name->str_len);
+ if (tmp_fullpath_len)
+ fullpath[tmp_fullpath_len++] = '/';
+ memcpy(fullpath + tmp_fullpath_len, e->name->str_dat,
+ e->name->str_len);
+ tmp_fullpath_len += e->name->str_len;
+ fullpath[tmp_fullpath_len] = '\0';
+
+ if (tmp_hex_oid_len == hexsz && !get_oid_hex(hex_oid, &oid)) {
+ /* This is a note entry */
+ if (fanout == 0xff) {
+ /* Counting mode, no rename */
+ num_notes++;
+ continue;
+ }
+ construct_path_with_fanout(hex_oid, fanout, realpath);
+ if (!strcmp(fullpath, realpath)) {
+ /* Note entry is in correct location */
+ num_notes++;
+ continue;
+ }
+
+ /* Rename fullpath to realpath */
+ if (!tree_content_remove(orig_root, fullpath, &leaf, 0))
+ die("Failed to remove path %s", fullpath);
+ tree_content_set(orig_root, realpath,
+ &leaf.versions[1].oid,
+ leaf.versions[1].mode,
+ leaf.tree);
+ } else if (S_ISDIR(e->versions[1].mode)) {
+ /* This is a subdir that may contain note entries */
+ num_notes += do_change_note_fanout(orig_root, e,
+ hex_oid, tmp_hex_oid_len,
+ fullpath, tmp_fullpath_len, fanout);
+ }
+
+ /* The above may have reallocated the current tree_content */
+ t = root->tree;
+ }
+ return num_notes;
+}
+
+static uintmax_t change_note_fanout(struct tree_entry *root,
+ unsigned char fanout)
+{
+ /*
+ * The size of path is due to one slash between every two hex digits,
+ * plus the terminating NUL. Note that there is no slash at the end, so
+ * the number of slashes is one less than half the number of hex
+ * characters.
+ */
+ char hex_oid[GIT_MAX_HEXSZ], path[GIT_MAX_HEXSZ + (GIT_MAX_HEXSZ / 2) - 1 + 1];
+ return do_change_note_fanout(root, root, hex_oid, 0, path, 0, fanout);
+}
+
+static int parse_mapped_oid_hex(const char *hex, struct object_id *oid, const char **end)
+{
+ int algo;
+ khiter_t it;
+
+ /* Make SHA-1 object IDs have all-zero padding. */
+ memset(oid->hash, 0, sizeof(oid->hash));
+
+ algo = parse_oid_hex_any(hex, oid, end);
+ if (algo == GIT_HASH_UNKNOWN)
+ return -1;
+
+ it = kh_get_oid_map(sub_oid_map, *oid);
+ /* No such object? */
+ if (it == kh_end(sub_oid_map)) {
+ /* If we're using the same algorithm, pass it through. */
+ if (hash_algos[algo].format_id == the_hash_algo->format_id)
+ return 0;
+ return -1;
+ }
+ oidcpy(oid, kh_value(sub_oid_map, it));
+ return 0;
+}
+
+/*
+ * Given a pointer into a string, parse a mark reference:
+ *
+ * idnum ::= ':' bigint;
+ *
+ * Return the first character after the value in *endptr.
+ *
+ * Complain if the following character is not what is expected,
+ * either a space or end of the string.
+ */
+static uintmax_t parse_mark_ref(const char *p, char **endptr)
+{
+ uintmax_t mark;
+
+ assert(*p == ':');
+ p++;
+ mark = strtoumax(p, endptr, 10);
+ if (*endptr == p)
+ die("No value after ':' in mark: %s", command_buf.buf);
+ return mark;
+}
+
+/*
+ * Parse the mark reference, and complain if this is not the end of
+ * the string.
+ */
+static uintmax_t parse_mark_ref_eol(const char *p)
+{
+ char *end;
+ uintmax_t mark;
+
+ mark = parse_mark_ref(p, &end);
+ if (*end != '\0')
+ die("Garbage after mark: %s", command_buf.buf);
+ return mark;
+}
+
+/*
+ * Parse the mark reference, demanding a trailing space. Return a
+ * pointer to the space.
+ */
+static uintmax_t parse_mark_ref_space(const char **p)
+{
+ uintmax_t mark;
+ char *end;
+
+ mark = parse_mark_ref(*p, &end);
+ if (*end++ != ' ')
+ die("Missing space after mark: %s", command_buf.buf);
+ *p = end;
+ return mark;
+}
+
+static void file_change_m(const char *p, struct branch *b)
+{
+ static struct strbuf uq = STRBUF_INIT;
+ const char *endp;
+ struct object_entry *oe;
+ struct object_id oid;
+ uint16_t mode, inline_data = 0;
+
+ p = get_mode(p, &mode);
+ if (!p)
+ die("Corrupt mode: %s", command_buf.buf);
+ switch (mode) {
+ case 0644:
+ case 0755:
+ mode |= S_IFREG;
+ case S_IFREG | 0644:
+ case S_IFREG | 0755:
+ case S_IFLNK:
+ case S_IFDIR:
+ case S_IFGITLINK:
+ /* ok */
+ break;
+ default:
+ die("Corrupt mode: %s", command_buf.buf);
+ }
+
+ if (*p == ':') {
+ oe = find_mark(marks, parse_mark_ref_space(&p));
+ oidcpy(&oid, &oe->idx.oid);
+ } else if (skip_prefix(p, "inline ", &p)) {
+ inline_data = 1;
+ oe = NULL; /* not used with inline_data, but makes gcc happy */
+ } else {
+ if (parse_mapped_oid_hex(p, &oid, &p))
+ die("Invalid dataref: %s", command_buf.buf);
+ oe = find_object(&oid);
+ if (*p++ != ' ')
+ die("Missing space after SHA1: %s", command_buf.buf);
+ }
+
+ strbuf_reset(&uq);
+ if (!unquote_c_style(&uq, p, &endp)) {
+ if (*endp)
+ die("Garbage after path in: %s", command_buf.buf);
+ p = uq.buf;
+ }
+
+ /* Git does not track empty, non-toplevel directories. */
+ if (S_ISDIR(mode) && is_empty_tree_oid(&oid) && *p) {
+ tree_content_remove(&b->branch_tree, p, NULL, 0);
+ return;
+ }
+
+ if (S_ISGITLINK(mode)) {
+ if (inline_data)
+ die("Git links cannot be specified 'inline': %s",
+ command_buf.buf);
+ else if (oe) {
+ if (oe->type != OBJ_COMMIT)
+ die("Not a commit (actually a %s): %s",
+ type_name(oe->type), command_buf.buf);
+ }
+ /*
+ * Accept the sha1 without checking; it expected to be in
+ * another repository.
+ */
+ } else if (inline_data) {
+ if (S_ISDIR(mode))
+ die("Directories cannot be specified 'inline': %s",
+ command_buf.buf);
+ if (p != uq.buf) {
+ strbuf_addstr(&uq, p);
+ p = uq.buf;
+ }
+ while (read_next_command() != EOF) {
+ const char *v;
+ if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
+ else {
+ parse_and_store_blob(&last_blob, &oid, 0);
+ break;
+ }
+ }
+ } else {
+ enum object_type expected = S_ISDIR(mode) ?
+ OBJ_TREE: OBJ_BLOB;
+ enum object_type type = oe ? oe->type :
+ oid_object_info(the_repository, &oid,
+ NULL);
+ if (type < 0)
+ die("%s not found: %s",
+ S_ISDIR(mode) ? "Tree" : "Blob",
+ command_buf.buf);
+ if (type != expected)
+ die("Not a %s (actually a %s): %s",
+ type_name(expected), type_name(type),
+ command_buf.buf);
+ }
+
+ if (!*p) {
+ tree_content_replace(&b->branch_tree, &oid, mode, NULL);
+ return;
+ }
+ tree_content_set(&b->branch_tree, p, &oid, mode, NULL);
+}
+
+static void file_change_d(const char *p, struct branch *b)
+{
+ static struct strbuf uq = STRBUF_INIT;
+ const char *endp;
+
+ strbuf_reset(&uq);
+ if (!unquote_c_style(&uq, p, &endp)) {
+ if (*endp)
+ die("Garbage after path in: %s", command_buf.buf);
+ p = uq.buf;
+ }
+ tree_content_remove(&b->branch_tree, p, NULL, 1);
+}
+
+static void file_change_cr(const char *s, struct branch *b, int rename)
+{
+ const char *d;
+ static struct strbuf s_uq = STRBUF_INIT;
+ static struct strbuf d_uq = STRBUF_INIT;
+ const char *endp;
+ struct tree_entry leaf;
+
+ strbuf_reset(&s_uq);
+ if (!unquote_c_style(&s_uq, s, &endp)) {
+ if (*endp != ' ')
+ die("Missing space after source: %s", command_buf.buf);
+ } else {
+ endp = strchr(s, ' ');
+ if (!endp)
+ die("Missing space after source: %s", command_buf.buf);
+ strbuf_add(&s_uq, s, endp - s);
+ }
+ s = s_uq.buf;
+
+ endp++;
+ if (!*endp)
+ die("Missing dest: %s", command_buf.buf);
+
+ d = endp;
+ strbuf_reset(&d_uq);
+ if (!unquote_c_style(&d_uq, d, &endp)) {
+ if (*endp)
+ die("Garbage after dest in: %s", command_buf.buf);
+ d = d_uq.buf;
+ }
+
+ memset(&leaf, 0, sizeof(leaf));
+ if (rename)
+ tree_content_remove(&b->branch_tree, s, &leaf, 1);
+ else
+ tree_content_get(&b->branch_tree, s, &leaf, 1);
+ if (!leaf.versions[1].mode)
+ die("Path %s not in branch", s);
+ if (!*d) { /* C "path/to/subdir" "" */
+ tree_content_replace(&b->branch_tree,
+ &leaf.versions[1].oid,
+ leaf.versions[1].mode,
+ leaf.tree);
+ return;
+ }
+ tree_content_set(&b->branch_tree, d,
+ &leaf.versions[1].oid,
+ leaf.versions[1].mode,
+ leaf.tree);
+}
+
+static void note_change_n(const char *p, struct branch *b, unsigned char *old_fanout)
+{
+ static struct strbuf uq = STRBUF_INIT;
+ struct object_entry *oe;
+ struct branch *s;
+ struct object_id oid, commit_oid;
+ char path[GIT_MAX_RAWSZ * 3];
+ uint16_t inline_data = 0;
+ unsigned char new_fanout;
+
+ /*
+ * When loading a branch, we don't traverse its tree to count the real
+ * number of notes (too expensive to do this for all non-note refs).
+ * This means that recently loaded notes refs might incorrectly have
+ * b->num_notes == 0, and consequently, old_fanout might be wrong.
+ *
+ * Fix this by traversing the tree and counting the number of notes
+ * when b->num_notes == 0. If the notes tree is truly empty, the
+ * calculation should not take long.
+ */
+ if (b->num_notes == 0 && *old_fanout == 0) {
+ /* Invoke change_note_fanout() in "counting mode". */
+ b->num_notes = change_note_fanout(&b->branch_tree, 0xff);
+ *old_fanout = convert_num_notes_to_fanout(b->num_notes);
+ }
+
+ /* Now parse the notemodify command. */
+ /* <dataref> or 'inline' */
+ if (*p == ':') {
+ oe = find_mark(marks, parse_mark_ref_space(&p));
+ oidcpy(&oid, &oe->idx.oid);
+ } else if (skip_prefix(p, "inline ", &p)) {
+ inline_data = 1;
+ oe = NULL; /* not used with inline_data, but makes gcc happy */
+ } else {
+ if (parse_mapped_oid_hex(p, &oid, &p))
+ die("Invalid dataref: %s", command_buf.buf);
+ oe = find_object(&oid);
+ if (*p++ != ' ')
+ die("Missing space after SHA1: %s", command_buf.buf);
+ }
+
+ /* <commit-ish> */
+ s = lookup_branch(p);
+ if (s) {
+ if (is_null_oid(&s->oid))
+ die("Can't add a note on empty branch.");
+ oidcpy(&commit_oid, &s->oid);
+ } else if (*p == ':') {
+ uintmax_t commit_mark = parse_mark_ref_eol(p);
+ struct object_entry *commit_oe = find_mark(marks, commit_mark);
+ if (commit_oe->type != OBJ_COMMIT)
+ die("Mark :%" PRIuMAX " not a commit", commit_mark);
+ oidcpy(&commit_oid, &commit_oe->idx.oid);
+ } else if (!get_oid(p, &commit_oid)) {
+ unsigned long size;
+ char *buf = read_object_with_reference(the_repository,
+ &commit_oid,
+ OBJ_COMMIT, &size,
+ &commit_oid);
+ if (!buf || size < the_hash_algo->hexsz + 6)
+ die("Not a valid commit: %s", p);
+ free(buf);
+ } else
+ die("Invalid ref name or SHA1 expression: %s", p);
+
+ if (inline_data) {
+ if (p != uq.buf) {
+ strbuf_addstr(&uq, p);
+ p = uq.buf;
+ }
+ read_next_command();
+ parse_and_store_blob(&last_blob, &oid, 0);
+ } else if (oe) {
+ if (oe->type != OBJ_BLOB)
+ die("Not a blob (actually a %s): %s",
+ type_name(oe->type), command_buf.buf);
+ } else if (!is_null_oid(&oid)) {
+ enum object_type type = oid_object_info(the_repository, &oid,
+ NULL);
+ if (type < 0)
+ die("Blob not found: %s", command_buf.buf);
+ if (type != OBJ_BLOB)
+ die("Not a blob (actually a %s): %s",
+ type_name(type), command_buf.buf);
+ }
+
+ construct_path_with_fanout(oid_to_hex(&commit_oid), *old_fanout, path);
+ if (tree_content_remove(&b->branch_tree, path, NULL, 0))
+ b->num_notes--;
+
+ if (is_null_oid(&oid))
+ return; /* nothing to insert */
+
+ b->num_notes++;
+ new_fanout = convert_num_notes_to_fanout(b->num_notes);
+ construct_path_with_fanout(oid_to_hex(&commit_oid), new_fanout, path);
+ tree_content_set(&b->branch_tree, path, &oid, S_IFREG | 0644, NULL);
+}
+
+static void file_change_deleteall(struct branch *b)
+{
+ release_tree_content_recursive(b->branch_tree.tree);
+ oidclr(&b->branch_tree.versions[0].oid);
+ oidclr(&b->branch_tree.versions[1].oid);
+ load_tree(&b->branch_tree);
+ b->num_notes = 0;
+}
+
+static void parse_from_commit(struct branch *b, char *buf, unsigned long size)
+{
+ if (!buf || size < the_hash_algo->hexsz + 6)
+ die("Not a valid commit: %s", oid_to_hex(&b->oid));
+ if (memcmp("tree ", buf, 5)
+ || get_oid_hex(buf + 5, &b->branch_tree.versions[1].oid))
+ die("The commit %s is corrupt", oid_to_hex(&b->oid));
+ oidcpy(&b->branch_tree.versions[0].oid,
+ &b->branch_tree.versions[1].oid);
+}
+
+static void parse_from_existing(struct branch *b)
+{
+ if (is_null_oid(&b->oid)) {
+ oidclr(&b->branch_tree.versions[0].oid);
+ oidclr(&b->branch_tree.versions[1].oid);
+ } else {
+ unsigned long size;
+ char *buf;
+
+ buf = read_object_with_reference(the_repository,
+ &b->oid, OBJ_COMMIT, &size,
+ &b->oid);
+ parse_from_commit(b, buf, size);
+ free(buf);
+ }
+}
+
+static int parse_objectish(struct branch *b, const char *objectish)
+{
+ struct branch *s;
+ struct object_id oid;
+
+ oidcpy(&oid, &b->branch_tree.versions[1].oid);
+
+ s = lookup_branch(objectish);
+ if (b == s)
+ die("Can't create a branch from itself: %s", b->name);
+ else if (s) {
+ struct object_id *t = &s->branch_tree.versions[1].oid;
+ oidcpy(&b->oid, &s->oid);
+ oidcpy(&b->branch_tree.versions[0].oid, t);
+ oidcpy(&b->branch_tree.versions[1].oid, t);
+ } else if (*objectish == ':') {
+ uintmax_t idnum = parse_mark_ref_eol(objectish);
+ struct object_entry *oe = find_mark(marks, idnum);
+ if (oe->type != OBJ_COMMIT)
+ die("Mark :%" PRIuMAX " not a commit", idnum);
+ if (!oideq(&b->oid, &oe->idx.oid)) {
+ oidcpy(&b->oid, &oe->idx.oid);
+ if (oe->pack_id != MAX_PACK_ID) {
+ unsigned long size;
+ char *buf = gfi_unpack_entry(oe, &size);
+ parse_from_commit(b, buf, size);
+ free(buf);
+ } else
+ parse_from_existing(b);
+ }
+ } else if (!get_oid(objectish, &b->oid)) {
+ parse_from_existing(b);
+ if (is_null_oid(&b->oid))
+ b->delete = 1;
+ }
+ else
+ die("Invalid ref name or SHA1 expression: %s", objectish);
+
+ if (b->branch_tree.tree && !oideq(&oid, &b->branch_tree.versions[1].oid)) {
+ release_tree_content_recursive(b->branch_tree.tree);
+ b->branch_tree.tree = NULL;
+ }
+
+ read_next_command();
+ return 1;
+}
+
+static int parse_from(struct branch *b)
+{
+ const char *from;
+
+ if (!skip_prefix(command_buf.buf, "from ", &from))
+ return 0;
+
+ return parse_objectish(b, from);
+}
+
+static int parse_objectish_with_prefix(struct branch *b, const char *prefix)
+{
+ const char *base;
+
+ if (!skip_prefix(command_buf.buf, prefix, &base))
+ return 0;
+
+ return parse_objectish(b, base);
+}
+
+static struct hash_list *parse_merge(unsigned int *count)
+{
+ struct hash_list *list = NULL, **tail = &list, *n;
+ const char *from;
+ struct branch *s;
+
+ *count = 0;
+ while (skip_prefix(command_buf.buf, "merge ", &from)) {
+ n = xmalloc(sizeof(*n));
+ s = lookup_branch(from);
+ if (s)
+ oidcpy(&n->oid, &s->oid);
+ else if (*from == ':') {
+ uintmax_t idnum = parse_mark_ref_eol(from);
+ struct object_entry *oe = find_mark(marks, idnum);
+ if (oe->type != OBJ_COMMIT)
+ die("Mark :%" PRIuMAX " not a commit", idnum);
+ oidcpy(&n->oid, &oe->idx.oid);
+ } else if (!get_oid(from, &n->oid)) {
+ unsigned long size;
+ char *buf = read_object_with_reference(the_repository,
+ &n->oid,
+ OBJ_COMMIT,
+ &size, &n->oid);
+ if (!buf || size < the_hash_algo->hexsz + 6)
+ die("Not a valid commit: %s", from);
+ free(buf);
+ } else
+ die("Invalid ref name or SHA1 expression: %s", from);
+
+ n->next = NULL;
+ *tail = n;
+ tail = &n->next;
+
+ (*count)++;
+ read_next_command();
+ }
+ return list;
+}
+
+static void parse_new_commit(const char *arg)
+{
+ static struct strbuf msg = STRBUF_INIT;
+ struct branch *b;
+ char *author = NULL;
+ char *committer = NULL;
+ char *encoding = NULL;
+ struct hash_list *merge_list = NULL;
+ unsigned int merge_count;
+ unsigned char prev_fanout, new_fanout;
+ const char *v;
+
+ b = lookup_branch(arg);
+ if (!b)
+ b = new_branch(arg);
+
+ read_next_command();
+ parse_mark();
+ parse_original_identifier();
+ if (skip_prefix(command_buf.buf, "author ", &v)) {
+ author = parse_ident(v);
+ read_next_command();
+ }
+ if (skip_prefix(command_buf.buf, "committer ", &v)) {
+ committer = parse_ident(v);
+ read_next_command();
+ }
+ if (!committer)
+ die("Expected committer but didn't get one");
+ if (skip_prefix(command_buf.buf, "encoding ", &v)) {
+ encoding = xstrdup(v);
+ read_next_command();
+ }
+ parse_data(&msg, 0, NULL);
+ read_next_command();
+ parse_from(b);
+ merge_list = parse_merge(&merge_count);
+
+ /* ensure the branch is active/loaded */
+ if (!b->branch_tree.tree || !max_active_branches) {
+ unload_one_branch();
+ load_branch(b);
+ }
+
+ prev_fanout = convert_num_notes_to_fanout(b->num_notes);
+
+ /* file_change* */
+ while (command_buf.len > 0) {
+ if (skip_prefix(command_buf.buf, "M ", &v))
+ file_change_m(v, b);
+ else if (skip_prefix(command_buf.buf, "D ", &v))
+ file_change_d(v, b);
+ else if (skip_prefix(command_buf.buf, "R ", &v))
+ file_change_cr(v, b, 1);
+ else if (skip_prefix(command_buf.buf, "C ", &v))
+ file_change_cr(v, b, 0);
+ else if (skip_prefix(command_buf.buf, "N ", &v))
+ note_change_n(v, b, &prev_fanout);
+ else if (!strcmp("deleteall", command_buf.buf))
+ file_change_deleteall(b);
+ else if (skip_prefix(command_buf.buf, "ls ", &v))
+ parse_ls(v, b);
+ else if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
+ else {
+ unread_command_buf = 1;
+ break;
+ }
+ if (read_next_command() == EOF)
+ break;
+ }
+
+ new_fanout = convert_num_notes_to_fanout(b->num_notes);
+ if (new_fanout != prev_fanout)
+ b->num_notes = change_note_fanout(&b->branch_tree, new_fanout);
+
+ /* build the tree and the commit */
+ store_tree(&b->branch_tree);
+ oidcpy(&b->branch_tree.versions[0].oid,
+ &b->branch_tree.versions[1].oid);
+
+ strbuf_reset(&new_data);
+ strbuf_addf(&new_data, "tree %s\n",
+ oid_to_hex(&b->branch_tree.versions[1].oid));
+ if (!is_null_oid(&b->oid))
+ strbuf_addf(&new_data, "parent %s\n",
+ oid_to_hex(&b->oid));
+ while (merge_list) {
+ struct hash_list *next = merge_list->next;
+ strbuf_addf(&new_data, "parent %s\n",
+ oid_to_hex(&merge_list->oid));
+ free(merge_list);
+ merge_list = next;
+ }
+ strbuf_addf(&new_data,
+ "author %s\n"
+ "committer %s\n",
+ author ? author : committer, committer);
+ if (encoding)
+ strbuf_addf(&new_data,
+ "encoding %s\n",
+ encoding);
+ strbuf_addch(&new_data, '\n');
+ strbuf_addbuf(&new_data, &msg);
+ free(author);
+ free(committer);
+ free(encoding);
+
+ if (!store_object(OBJ_COMMIT, &new_data, NULL, &b->oid, next_mark))
+ b->pack_id = pack_id;
+ b->last_commit = object_count_by_type[OBJ_COMMIT];
+}
+
+static void parse_new_tag(const char *arg)
+{
+ static struct strbuf msg = STRBUF_INIT;
+ const char *from;
+ char *tagger;
+ struct branch *s;
+ struct tag *t;
+ uintmax_t from_mark = 0;
+ struct object_id oid;
+ enum object_type type;
+ const char *v;
+
+ t = mem_pool_alloc(&fi_mem_pool, sizeof(struct tag));
+ memset(t, 0, sizeof(struct tag));
+ t->name = mem_pool_strdup(&fi_mem_pool, arg);
+ if (last_tag)
+ last_tag->next_tag = t;
+ else
+ first_tag = t;
+ last_tag = t;
+ read_next_command();
+ parse_mark();
+
+ /* from ... */
+ if (!skip_prefix(command_buf.buf, "from ", &from))
+ die("Expected from command, got %s", command_buf.buf);
+ s = lookup_branch(from);
+ if (s) {
+ if (is_null_oid(&s->oid))
+ die("Can't tag an empty branch.");
+ oidcpy(&oid, &s->oid);
+ type = OBJ_COMMIT;
+ } else if (*from == ':') {
+ struct object_entry *oe;
+ from_mark = parse_mark_ref_eol(from);
+ oe = find_mark(marks, from_mark);
+ type = oe->type;
+ oidcpy(&oid, &oe->idx.oid);
+ } else if (!get_oid(from, &oid)) {
+ struct object_entry *oe = find_object(&oid);
+ if (!oe) {
+ type = oid_object_info(the_repository, &oid, NULL);
+ if (type < 0)
+ die("Not a valid object: %s", from);
+ } else
+ type = oe->type;
+ } else
+ die("Invalid ref name or SHA1 expression: %s", from);
+ read_next_command();
+
+ /* original-oid ... */
+ parse_original_identifier();
+
+ /* tagger ... */
+ if (skip_prefix(command_buf.buf, "tagger ", &v)) {
+ tagger = parse_ident(v);
+ read_next_command();
+ } else
+ tagger = NULL;
+
+ /* tag payload/message */
+ parse_data(&msg, 0, NULL);
+
+ /* build the tag object */
+ strbuf_reset(&new_data);
+
+ strbuf_addf(&new_data,
+ "object %s\n"
+ "type %s\n"
+ "tag %s\n",
+ oid_to_hex(&oid), type_name(type), t->name);
+ if (tagger)
+ strbuf_addf(&new_data,
+ "tagger %s\n", tagger);
+ strbuf_addch(&new_data, '\n');
+ strbuf_addbuf(&new_data, &msg);
+ free(tagger);
+
+ if (store_object(OBJ_TAG, &new_data, NULL, &t->oid, next_mark))
+ t->pack_id = MAX_PACK_ID;
+ else
+ t->pack_id = pack_id;
+}
+
+static void parse_reset_branch(const char *arg)
+{
+ struct branch *b;
+ const char *tag_name;
+
+ b = lookup_branch(arg);
+ if (b) {
+ oidclr(&b->oid);
+ oidclr(&b->branch_tree.versions[0].oid);
+ oidclr(&b->branch_tree.versions[1].oid);
+ if (b->branch_tree.tree) {
+ release_tree_content_recursive(b->branch_tree.tree);
+ b->branch_tree.tree = NULL;
+ }
+ }
+ else
+ b = new_branch(arg);
+ read_next_command();
+ parse_from(b);
+ if (b->delete && skip_prefix(b->name, "refs/tags/", &tag_name)) {
+ /*
+ * Elsewhere, we call dump_branches() before dump_tags(),
+ * and dump_branches() will handle ref deletions first, so
+ * in order to make sure the deletion actually takes effect,
+ * we need to remove the tag from our list of tags to update.
+ *
+ * NEEDSWORK: replace list of tags with hashmap for faster
+ * deletion?
+ */
+ struct tag *t, *prev = NULL;
+ for (t = first_tag; t; t = t->next_tag) {
+ if (!strcmp(t->name, tag_name))
+ break;
+ prev = t;
+ }
+ if (t) {
+ if (prev)
+ prev->next_tag = t->next_tag;
+ else
+ first_tag = t->next_tag;
+ if (!t->next_tag)
+ last_tag = prev;
+ /* There is no mem_pool_free(t) function to call. */
+ }
+ }
+ if (command_buf.len > 0)
+ unread_command_buf = 1;
+}
+
+static void cat_blob_write(const char *buf, unsigned long size)
+{
+ if (write_in_full(cat_blob_fd, buf, size) < 0)
+ die_errno("Write to frontend failed");
+}
+
+static void cat_blob(struct object_entry *oe, struct object_id *oid)
+{
+ struct strbuf line = STRBUF_INIT;
+ unsigned long size;
+ enum object_type type = 0;
+ char *buf;
+
+ if (!oe || oe->pack_id == MAX_PACK_ID) {
+ buf = read_object_file(oid, &type, &size);
+ } else {
+ type = oe->type;
+ buf = gfi_unpack_entry(oe, &size);
+ }
+
+ /*
+ * Output based on batch_one_object() from cat-file.c.
+ */
+ if (type <= 0) {
+ strbuf_reset(&line);
+ strbuf_addf(&line, "%s missing\n", oid_to_hex(oid));
+ cat_blob_write(line.buf, line.len);
+ strbuf_release(&line);
+ free(buf);
+ return;
+ }
+ if (!buf)
+ die("Can't read object %s", oid_to_hex(oid));
+ if (type != OBJ_BLOB)
+ die("Object %s is a %s but a blob was expected.",
+ oid_to_hex(oid), type_name(type));
+ strbuf_reset(&line);
+ strbuf_addf(&line, "%s %s %"PRIuMAX"\n", oid_to_hex(oid),
+ type_name(type), (uintmax_t)size);
+ cat_blob_write(line.buf, line.len);
+ strbuf_release(&line);
+ cat_blob_write(buf, size);
+ cat_blob_write("\n", 1);
+ if (oe && oe->pack_id == pack_id) {
+ last_blob.offset = oe->idx.offset;
+ strbuf_attach(&last_blob.data, buf, size, size);
+ last_blob.depth = oe->depth;
+ } else
+ free(buf);
+}
+
+static void parse_get_mark(const char *p)
+{
+ struct object_entry *oe;
+ char output[GIT_MAX_HEXSZ + 2];
+
+ /* get-mark SP <object> LF */
+ if (*p != ':')
+ die("Not a mark: %s", p);
+
+ oe = find_mark(marks, parse_mark_ref_eol(p));
+ if (!oe)
+ die("Unknown mark: %s", command_buf.buf);
+
+ xsnprintf(output, sizeof(output), "%s\n", oid_to_hex(&oe->idx.oid));
+ cat_blob_write(output, the_hash_algo->hexsz + 1);
+}
+
+static void parse_cat_blob(const char *p)
+{
+ struct object_entry *oe;
+ struct object_id oid;
+
+ /* cat-blob SP <object> LF */
+ if (*p == ':') {
+ oe = find_mark(marks, parse_mark_ref_eol(p));
+ if (!oe)
+ die("Unknown mark: %s", command_buf.buf);
+ oidcpy(&oid, &oe->idx.oid);
+ } else {
+ if (parse_mapped_oid_hex(p, &oid, &p))
+ die("Invalid dataref: %s", command_buf.buf);
+ if (*p)
+ die("Garbage after SHA1: %s", command_buf.buf);
+ oe = find_object(&oid);
+ }
+
+ cat_blob(oe, &oid);
+}
+
+static struct object_entry *dereference(struct object_entry *oe,
+ struct object_id *oid)
+{
+ unsigned long size;
+ char *buf = NULL;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
+ if (!oe) {
+ enum object_type type = oid_object_info(the_repository, oid,
+ NULL);
+ if (type < 0)
+ die("object not found: %s", oid_to_hex(oid));
+ /* cache it! */
+ oe = insert_object(oid);
+ oe->type = type;
+ oe->pack_id = MAX_PACK_ID;
+ oe->idx.offset = 1;
+ }
+ switch (oe->type) {
+ case OBJ_TREE: /* easy case. */
+ return oe;
+ case OBJ_COMMIT:
+ case OBJ_TAG:
+ break;
+ default:
+ die("Not a tree-ish: %s", command_buf.buf);
+ }
+
+ if (oe->pack_id != MAX_PACK_ID) { /* in a pack being written */
+ buf = gfi_unpack_entry(oe, &size);
+ } else {
+ enum object_type unused;
+ buf = read_object_file(oid, &unused, &size);
+ }
+ if (!buf)
+ die("Can't load object %s", oid_to_hex(oid));
+
+ /* Peel one layer. */
+ switch (oe->type) {
+ case OBJ_TAG:
+ if (size < hexsz + strlen("object ") ||
+ get_oid_hex(buf + strlen("object "), oid))
+ die("Invalid SHA1 in tag: %s", command_buf.buf);
+ break;
+ case OBJ_COMMIT:
+ if (size < hexsz + strlen("tree ") ||
+ get_oid_hex(buf + strlen("tree "), oid))
+ die("Invalid SHA1 in commit: %s", command_buf.buf);
+ }
+
+ free(buf);
+ return find_object(oid);
+}
+
+static void insert_mapped_mark(uintmax_t mark, void *object, void *cbp)
+{
+ struct object_id *fromoid = object;
+ struct object_id *tooid = find_mark(cbp, mark);
+ int ret;
+ khiter_t it;
+
+ it = kh_put_oid_map(sub_oid_map, *fromoid, &ret);
+ /* We've already seen this object. */
+ if (ret == 0)
+ return;
+ kh_value(sub_oid_map, it) = tooid;
+}
+
+static void build_mark_map_one(struct mark_set *from, struct mark_set *to)
+{
+ for_each_mark(from, 0, insert_mapped_mark, to);
+}
+
+static void build_mark_map(struct string_list *from, struct string_list *to)
+{
+ struct string_list_item *fromp, *top;
+
+ sub_oid_map = kh_init_oid_map();
+
+ for_each_string_list_item(fromp, from) {
+ top = string_list_lookup(to, fromp->string);
+ if (!fromp->util) {
+ die(_("Missing from marks for submodule '%s'"), fromp->string);
+ } else if (!top || !top->util) {
+ die(_("Missing to marks for submodule '%s'"), fromp->string);
+ }
+ build_mark_map_one(fromp->util, top->util);
+ }
+}
+
+static struct object_entry *parse_treeish_dataref(const char **p)
+{
+ struct object_id oid;
+ struct object_entry *e;
+
+ if (**p == ':') { /* <mark> */
+ e = find_mark(marks, parse_mark_ref_space(p));
+ if (!e)
+ die("Unknown mark: %s", command_buf.buf);
+ oidcpy(&oid, &e->idx.oid);
+ } else { /* <sha1> */
+ if (parse_mapped_oid_hex(*p, &oid, p))
+ die("Invalid dataref: %s", command_buf.buf);
+ e = find_object(&oid);
+ if (*(*p)++ != ' ')
+ die("Missing space after tree-ish: %s", command_buf.buf);
+ }
+
+ while (!e || e->type != OBJ_TREE)
+ e = dereference(e, &oid);
+ return e;
+}
+
+static void print_ls(int mode, const unsigned char *hash, const char *path)
+{
+ static struct strbuf line = STRBUF_INIT;
+
+ /* See show_tree(). */
+ const char *type =
+ S_ISGITLINK(mode) ? commit_type :
+ S_ISDIR(mode) ? tree_type :
+ blob_type;
+
+ if (!mode) {
+ /* missing SP path LF */
+ strbuf_reset(&line);
+ strbuf_addstr(&line, "missing ");
+ quote_c_style(path, &line, NULL, 0);
+ strbuf_addch(&line, '\n');
+ } else {
+ /* mode SP type SP object_name TAB path LF */
+ strbuf_reset(&line);
+ strbuf_addf(&line, "%06o %s %s\t",
+ mode & ~NO_DELTA, type, hash_to_hex(hash));
+ quote_c_style(path, &line, NULL, 0);
+ strbuf_addch(&line, '\n');
+ }
+ cat_blob_write(line.buf, line.len);
+}
+
+static void parse_ls(const char *p, struct branch *b)
+{
+ struct tree_entry *root = NULL;
+ struct tree_entry leaf = {NULL};
+
+ /* ls SP (<tree-ish> SP)? <path> */
+ if (*p == '"') {
+ if (!b)
+ die("Not in a commit: %s", command_buf.buf);
+ root = &b->branch_tree;
+ } else {
+ struct object_entry *e = parse_treeish_dataref(&p);
+ root = new_tree_entry();
+ oidcpy(&root->versions[1].oid, &e->idx.oid);
+ if (!is_null_oid(&root->versions[1].oid))
+ root->versions[1].mode = S_IFDIR;
+ load_tree(root);
+ }
+ if (*p == '"') {
+ static struct strbuf uq = STRBUF_INIT;
+ const char *endp;
+ strbuf_reset(&uq);
+ if (unquote_c_style(&uq, p, &endp))
+ die("Invalid path: %s", command_buf.buf);
+ if (*endp)
+ die("Garbage after path in: %s", command_buf.buf);
+ p = uq.buf;
+ }
+ tree_content_get(root, p, &leaf, 1);
+ /*
+ * A directory in preparation would have a sha1 of zero
+ * until it is saved. Save, for simplicity.
+ */
+ if (S_ISDIR(leaf.versions[1].mode))
+ store_tree(&leaf);
+
+ print_ls(leaf.versions[1].mode, leaf.versions[1].oid.hash, p);
+ if (leaf.tree)
+ release_tree_content_recursive(leaf.tree);
+ if (!b || root != &b->branch_tree)
+ release_tree_entry(root);
+}
+
+static void checkpoint(void)
+{
+ checkpoint_requested = 0;
+ if (object_count) {
+ cycle_packfile();
+ }
+ dump_branches();
+ dump_tags();
+ dump_marks();
+}
+
+static void parse_checkpoint(void)
+{
+ checkpoint_requested = 1;
+ skip_optional_lf();
+}
+
+static void parse_progress(void)
+{
+ fwrite(command_buf.buf, 1, command_buf.len, stdout);
+ fputc('\n', stdout);
+ fflush(stdout);
+ skip_optional_lf();
+}
+
+static void parse_alias(void)
+{
+ struct object_entry *e;
+ struct branch b;
+
+ skip_optional_lf();
+ read_next_command();
+
+ /* mark ... */
+ parse_mark();
+ if (!next_mark)
+ die(_("Expected 'mark' command, got %s"), command_buf.buf);
+
+ /* to ... */
+ memset(&b, 0, sizeof(b));
+ if (!parse_objectish_with_prefix(&b, "to "))
+ die(_("Expected 'to' command, got %s"), command_buf.buf);
+ e = find_object(&b.oid);
+ assert(e);
+ insert_mark(&marks, next_mark, e);
+}
+
+static char* make_fast_import_path(const char *path)
+{
+ if (!relative_marks_paths || is_absolute_path(path))
+ return xstrdup(path);
+ return git_pathdup("info/fast-import/%s", path);
+}
+
+static void option_import_marks(const char *marks,
+ int from_stream, int ignore_missing)
+{
+ if (import_marks_file) {
+ if (from_stream)
+ die("Only one import-marks command allowed per stream");
+
+ /* read previous mark file */
+ if(!import_marks_file_from_stream)
+ read_marks();
+ }
+
+ import_marks_file = make_fast_import_path(marks);
+ import_marks_file_from_stream = from_stream;
+ import_marks_file_ignore_missing = ignore_missing;
+}
+
+static void option_date_format(const char *fmt)
+{
+ if (!strcmp(fmt, "raw"))
+ whenspec = WHENSPEC_RAW;
+ else if (!strcmp(fmt, "raw-permissive"))
+ whenspec = WHENSPEC_RAW_PERMISSIVE;
+ else if (!strcmp(fmt, "rfc2822"))
+ whenspec = WHENSPEC_RFC2822;
+ else if (!strcmp(fmt, "now"))
+ whenspec = WHENSPEC_NOW;
+ else
+ die("unknown --date-format argument %s", fmt);
+}
+
+static unsigned long ulong_arg(const char *option, const char *arg)
+{
+ char *endptr;
+ unsigned long rv = strtoul(arg, &endptr, 0);
+ if (strchr(arg, '-') || endptr == arg || *endptr)
+ die("%s: argument must be a non-negative integer", option);
+ return rv;
+}
+
+static void option_depth(const char *depth)
+{
+ max_depth = ulong_arg("--depth", depth);
+ if (max_depth > MAX_DEPTH)
+ die("--depth cannot exceed %u", MAX_DEPTH);
+}
+
+static void option_active_branches(const char *branches)
+{
+ max_active_branches = ulong_arg("--active-branches", branches);
+}
+
+static void option_export_marks(const char *marks)
+{
+ export_marks_file = make_fast_import_path(marks);
+}
+
+static void option_cat_blob_fd(const char *fd)
+{
+ unsigned long n = ulong_arg("--cat-blob-fd", fd);
+ if (n > (unsigned long) INT_MAX)
+ die("--cat-blob-fd cannot exceed %d", INT_MAX);
+ cat_blob_fd = (int) n;
+}
+
+static void option_export_pack_edges(const char *edges)
+{
+ if (pack_edges)
+ fclose(pack_edges);
+ pack_edges = xfopen(edges, "a");
+}
+
+static void option_rewrite_submodules(const char *arg, struct string_list *list)
+{
+ struct mark_set *ms;
+ FILE *fp;
+ char *s = xstrdup(arg);
+ char *f = strchr(s, ':');
+ if (!f)
+ die(_("Expected format name:filename for submodule rewrite option"));
+ *f = '\0';
+ f++;
+ CALLOC_ARRAY(ms, 1);
+
+ fp = fopen(f, "r");
+ if (!fp)
+ die_errno("cannot read '%s'", f);
+ read_mark_file(&ms, fp, insert_oid_entry);
+ fclose(fp);
+
+ string_list_insert(list, s)->util = ms;
+}
+
+static int parse_one_option(const char *option)
+{
+ if (skip_prefix(option, "max-pack-size=", &option)) {
+ unsigned long v;
+ if (!git_parse_ulong(option, &v))
+ return 0;
+ if (v < 8192) {
+ warning("max-pack-size is now in bytes, assuming --max-pack-size=%lum", v);
+ v *= 1024 * 1024;
+ } else if (v < 1024 * 1024) {
+ warning("minimum max-pack-size is 1 MiB");
+ v = 1024 * 1024;
+ }
+ max_packsize = v;
+ } else if (skip_prefix(option, "big-file-threshold=", &option)) {
+ unsigned long v;
+ if (!git_parse_ulong(option, &v))
+ return 0;
+ big_file_threshold = v;
+ } else if (skip_prefix(option, "depth=", &option)) {
+ option_depth(option);
+ } else if (skip_prefix(option, "active-branches=", &option)) {
+ option_active_branches(option);
+ } else if (skip_prefix(option, "export-pack-edges=", &option)) {
+ option_export_pack_edges(option);
+ } else if (!strcmp(option, "quiet")) {
+ show_stats = 0;
+ } else if (!strcmp(option, "stats")) {
+ show_stats = 1;
+ } else if (!strcmp(option, "allow-unsafe-features")) {
+ ; /* already handled during early option parsing */
+ } else {
+ return 0;
+ }
+
+ return 1;
+}
+
+static void check_unsafe_feature(const char *feature, int from_stream)
+{
+ if (from_stream && !allow_unsafe_features)
+ die(_("feature '%s' forbidden in input without --allow-unsafe-features"),
+ feature);
+}
+
+static int parse_one_feature(const char *feature, int from_stream)
+{
+ const char *arg;
+
+ if (skip_prefix(feature, "date-format=", &arg)) {
+ option_date_format(arg);
+ } else if (skip_prefix(feature, "import-marks=", &arg)) {
+ check_unsafe_feature("import-marks", from_stream);
+ option_import_marks(arg, from_stream, 0);
+ } else if (skip_prefix(feature, "import-marks-if-exists=", &arg)) {
+ check_unsafe_feature("import-marks-if-exists", from_stream);
+ option_import_marks(arg, from_stream, 1);
+ } else if (skip_prefix(feature, "export-marks=", &arg)) {
+ check_unsafe_feature(feature, from_stream);
+ option_export_marks(arg);
+ } else if (!strcmp(feature, "alias")) {
+ ; /* Don't die - this feature is supported */
+ } else if (skip_prefix(feature, "rewrite-submodules-to=", &arg)) {
+ option_rewrite_submodules(arg, &sub_marks_to);
+ } else if (skip_prefix(feature, "rewrite-submodules-from=", &arg)) {
+ option_rewrite_submodules(arg, &sub_marks_from);
+ } else if (!strcmp(feature, "get-mark")) {
+ ; /* Don't die - this feature is supported */
+ } else if (!strcmp(feature, "cat-blob")) {
+ ; /* Don't die - this feature is supported */
+ } else if (!strcmp(feature, "relative-marks")) {
+ relative_marks_paths = 1;
+ } else if (!strcmp(feature, "no-relative-marks")) {
+ relative_marks_paths = 0;
+ } else if (!strcmp(feature, "done")) {
+ require_explicit_termination = 1;
+ } else if (!strcmp(feature, "force")) {
+ force_update = 1;
+ } else if (!strcmp(feature, "notes") || !strcmp(feature, "ls")) {
+ ; /* do nothing; we have the feature */
+ } else {
+ return 0;
+ }
+
+ return 1;
+}
+
+static void parse_feature(const char *feature)
+{
+ if (seen_data_command)
+ die("Got feature command '%s' after data command", feature);
+
+ if (parse_one_feature(feature, 1))
+ return;
+
+ die("This version of fast-import does not support feature %s.", feature);
+}
+
+static void parse_option(const char *option)
+{
+ if (seen_data_command)
+ die("Got option command '%s' after data command", option);
+
+ if (parse_one_option(option))
+ return;
+
+ die("This version of fast-import does not support option: %s", option);
+}
+
+static void git_pack_config(void)
+{
+ int indexversion_value;
+ int limit;
+ unsigned long packsizelimit_value;
+
+ if (!git_config_get_ulong("pack.depth", &max_depth)) {
+ if (max_depth > MAX_DEPTH)
+ max_depth = MAX_DEPTH;
+ }
+ if (!git_config_get_int("pack.indexversion", &indexversion_value)) {
+ pack_idx_opts.version = indexversion_value;
+ if (pack_idx_opts.version > 2)
+ git_die_config("pack.indexversion",
+ "bad pack.indexVersion=%"PRIu32, pack_idx_opts.version);
+ }
+ if (!git_config_get_ulong("pack.packsizelimit", &packsizelimit_value))
+ max_packsize = packsizelimit_value;
+
+ if (!git_config_get_int("fastimport.unpacklimit", &limit))
+ unpack_limit = limit;
+ else if (!git_config_get_int("transfer.unpacklimit", &limit))
+ unpack_limit = limit;
+
+ git_config(git_default_config, NULL);
+}
+
+static const char fast_import_usage[] =
+"git fast-import [--date-format=<f>] [--max-pack-size=<n>] [--big-file-threshold=<n>] [--depth=<n>] [--active-branches=<n>] [--export-marks=<marks.file>]";
+
+static void parse_argv(void)
+{
+ unsigned int i;
+
+ for (i = 1; i < global_argc; i++) {
+ const char *a = global_argv[i];
+
+ if (*a != '-' || !strcmp(a, "--"))
+ break;
+
+ if (!skip_prefix(a, "--", &a))
+ die("unknown option %s", a);
+
+ if (parse_one_option(a))
+ continue;
+
+ if (parse_one_feature(a, 0))
+ continue;
+
+ if (skip_prefix(a, "cat-blob-fd=", &a)) {
+ option_cat_blob_fd(a);
+ continue;
+ }
+
+ die("unknown option --%s", a);
+ }
+ if (i != global_argc)
+ usage(fast_import_usage);
+
+ seen_data_command = 1;
+ if (import_marks_file)
+ read_marks();
+ build_mark_map(&sub_marks_from, &sub_marks_to);
+}
+
+int cmd_fast_import(int argc, const char **argv, const char *prefix)
+{
+ unsigned int i;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(fast_import_usage);
+
+ reset_pack_idx_option(&pack_idx_opts);
+ git_pack_config();
+
+ alloc_objects(object_entry_alloc);
+ strbuf_init(&command_buf, 0);
+ CALLOC_ARRAY(atom_table, atom_table_sz);
+ CALLOC_ARRAY(branch_table, branch_table_sz);
+ CALLOC_ARRAY(avail_tree_table, avail_tree_table_sz);
+ marks = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
+
+ hashmap_init(&object_table, object_entry_hashcmp, NULL, 0);
+
+ /*
+ * We don't parse most options until after we've seen the set of
+ * "feature" lines at the start of the stream (which allows the command
+ * line to override stream data). But we must do an early parse of any
+ * command-line options that impact how we interpret the feature lines.
+ */
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (*arg != '-' || !strcmp(arg, "--"))
+ break;
+ if (!strcmp(arg, "--allow-unsafe-features"))
+ allow_unsafe_features = 1;
+ }
+
+ global_argc = argc;
+ global_argv = argv;
+
+ rc_free = mem_pool_alloc(&fi_mem_pool, cmd_save * sizeof(*rc_free));
+ for (i = 0; i < (cmd_save - 1); i++)
+ rc_free[i].next = &rc_free[i + 1];
+ rc_free[cmd_save - 1].next = NULL;
+
+ start_packfile();
+ set_die_routine(die_nicely);
+ set_checkpoint_signal();
+ while (read_next_command() != EOF) {
+ const char *v;
+ if (!strcmp("blob", command_buf.buf))
+ parse_new_blob();
+ else if (skip_prefix(command_buf.buf, "commit ", &v))
+ parse_new_commit(v);
+ else if (skip_prefix(command_buf.buf, "tag ", &v))
+ parse_new_tag(v);
+ else if (skip_prefix(command_buf.buf, "reset ", &v))
+ parse_reset_branch(v);
+ else if (skip_prefix(command_buf.buf, "ls ", &v))
+ parse_ls(v, NULL);
+ else if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
+ else if (skip_prefix(command_buf.buf, "get-mark ", &v))
+ parse_get_mark(v);
+ else if (!strcmp("checkpoint", command_buf.buf))
+ parse_checkpoint();
+ else if (!strcmp("done", command_buf.buf))
+ break;
+ else if (!strcmp("alias", command_buf.buf))
+ parse_alias();
+ else if (starts_with(command_buf.buf, "progress "))
+ parse_progress();
+ else if (skip_prefix(command_buf.buf, "feature ", &v))
+ parse_feature(v);
+ else if (skip_prefix(command_buf.buf, "option git ", &v))
+ parse_option(v);
+ else if (starts_with(command_buf.buf, "option "))
+ /* ignore non-git options*/;
+ else
+ die("Unsupported command: %s", command_buf.buf);
+
+ if (checkpoint_requested)
+ checkpoint();
+ }
+
+ /* argv hasn't been parsed yet, do so */
+ if (!seen_data_command)
+ parse_argv();
+
+ if (require_explicit_termination && feof(stdin))
+ die("stream ends early");
+
+ end_packfile();
+
+ dump_branches();
+ dump_tags();
+ unkeep_all_packs();
+ dump_marks();
+
+ if (pack_edges)
+ fclose(pack_edges);
+
+ if (show_stats) {
+ uintmax_t total_count = 0, duplicate_count = 0;
+ for (i = 0; i < ARRAY_SIZE(object_count_by_type); i++)
+ total_count += object_count_by_type[i];
+ for (i = 0; i < ARRAY_SIZE(duplicate_count_by_type); i++)
+ duplicate_count += duplicate_count_by_type[i];
+
+ fprintf(stderr, "%s statistics:\n", argv[0]);
+ fprintf(stderr, "---------------------------------------------------------------------\n");
+ fprintf(stderr, "Alloc'd objects: %10" PRIuMAX "\n", alloc_count);
+ fprintf(stderr, "Total objects: %10" PRIuMAX " (%10" PRIuMAX " duplicates )\n", total_count, duplicate_count);
+ fprintf(stderr, " blobs : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_BLOB], duplicate_count_by_type[OBJ_BLOB], delta_count_by_type[OBJ_BLOB], delta_count_attempts_by_type[OBJ_BLOB]);
+ fprintf(stderr, " trees : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_TREE], duplicate_count_by_type[OBJ_TREE], delta_count_by_type[OBJ_TREE], delta_count_attempts_by_type[OBJ_TREE]);
+ fprintf(stderr, " commits: %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_COMMIT], duplicate_count_by_type[OBJ_COMMIT], delta_count_by_type[OBJ_COMMIT], delta_count_attempts_by_type[OBJ_COMMIT]);
+ fprintf(stderr, " tags : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_TAG], duplicate_count_by_type[OBJ_TAG], delta_count_by_type[OBJ_TAG], delta_count_attempts_by_type[OBJ_TAG]);
+ fprintf(stderr, "Total branches: %10lu (%10lu loads )\n", branch_count, branch_load_count);
+ fprintf(stderr, " marks: %10" PRIuMAX " (%10" PRIuMAX " unique )\n", (((uintmax_t)1) << marks->shift) * 1024, marks_set_count);
+ fprintf(stderr, " atoms: %10u\n", atom_cnt);
+ fprintf(stderr, "Memory total: %10" PRIuMAX " KiB\n", (tree_entry_allocd + fi_mem_pool.pool_alloc + alloc_count*sizeof(struct object_entry))/1024);
+ fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)((tree_entry_allocd + fi_mem_pool.pool_alloc) /1024));
+ fprintf(stderr, " objects: %10" PRIuMAX " KiB\n", (alloc_count*sizeof(struct object_entry))/1024);
+ fprintf(stderr, "---------------------------------------------------------------------\n");
+ pack_report();
+ fprintf(stderr, "---------------------------------------------------------------------\n");
+ fprintf(stderr, "\n");
+ }
+
+ return failure ? 1 : 0;
+}
diff --git a/builtin/fetch-pack.c b/builtin/fetch-pack.c
new file mode 100644
index 0000000..afe6793
--- /dev/null
+++ b/builtin/fetch-pack.c
@@ -0,0 +1,277 @@
+#include "builtin.h"
+#include "pkt-line.h"
+#include "fetch-pack.h"
+#include "remote.h"
+#include "connect.h"
+#include "oid-array.h"
+#include "protocol.h"
+
+static const char fetch_pack_usage[] =
+"git fetch-pack [--all] [--stdin] [--quiet | -q] [--keep | -k] [--thin] "
+"[--include-tag] [--upload-pack=<git-upload-pack>] [--depth=<n>] "
+"[--no-progress] [--diag-url] [-v] [<host>:]<directory> [<refs>...]";
+
+static void add_sought_entry(struct ref ***sought, int *nr, int *alloc,
+ const char *name)
+{
+ struct ref *ref;
+ struct object_id oid;
+ const char *p;
+
+ if (!parse_oid_hex(name, &oid, &p)) {
+ if (*p == ' ') {
+ /* <oid> <ref>, find refname */
+ name = p + 1;
+ } else if (*p == '\0') {
+ ; /* <oid>, leave oid as name */
+ } else {
+ /* <ref>, clear cruft from oid */
+ oidclr(&oid);
+ }
+ } else {
+ /* <ref>, clear cruft from get_oid_hex */
+ oidclr(&oid);
+ }
+
+ ref = alloc_ref(name);
+ oidcpy(&ref->old_oid, &oid);
+ (*nr)++;
+ ALLOC_GROW(*sought, *nr, *alloc);
+ (*sought)[*nr - 1] = ref;
+}
+
+int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
+{
+ int i, ret;
+ struct ref *ref = NULL;
+ const char *dest = NULL;
+ struct ref **sought = NULL;
+ int nr_sought = 0, alloc_sought = 0;
+ int fd[2];
+ struct string_list pack_lockfiles = STRING_LIST_INIT_DUP;
+ struct string_list *pack_lockfiles_ptr = NULL;
+ struct child_process *conn;
+ struct fetch_pack_args args;
+ struct oid_array shallow = OID_ARRAY_INIT;
+ struct string_list deepen_not = STRING_LIST_INIT_DUP;
+ struct packet_reader reader;
+ enum protocol_version version;
+
+ fetch_if_missing = 0;
+
+ packet_trace_identity("fetch-pack");
+
+ memset(&args, 0, sizeof(args));
+ list_objects_filter_init(&args.filter_options);
+ args.uploadpack = "git-upload-pack";
+
+ for (i = 1; i < argc && *argv[i] == '-'; i++) {
+ const char *arg = argv[i];
+
+ if (skip_prefix(arg, "--upload-pack=", &arg)) {
+ args.uploadpack = arg;
+ continue;
+ }
+ if (skip_prefix(arg, "--exec=", &arg)) {
+ args.uploadpack = arg;
+ continue;
+ }
+ if (!strcmp("--quiet", arg) || !strcmp("-q", arg)) {
+ args.quiet = 1;
+ continue;
+ }
+ if (!strcmp("--keep", arg) || !strcmp("-k", arg)) {
+ args.lock_pack = args.keep_pack;
+ args.keep_pack = 1;
+ continue;
+ }
+ if (!strcmp("--thin", arg)) {
+ args.use_thin_pack = 1;
+ continue;
+ }
+ if (!strcmp("--include-tag", arg)) {
+ args.include_tag = 1;
+ continue;
+ }
+ if (!strcmp("--all", arg)) {
+ args.fetch_all = 1;
+ continue;
+ }
+ if (!strcmp("--stdin", arg)) {
+ args.stdin_refs = 1;
+ continue;
+ }
+ if (!strcmp("--diag-url", arg)) {
+ args.diag_url = 1;
+ continue;
+ }
+ if (!strcmp("-v", arg)) {
+ args.verbose = 1;
+ continue;
+ }
+ if (skip_prefix(arg, "--depth=", &arg)) {
+ args.depth = strtol(arg, NULL, 0);
+ continue;
+ }
+ if (skip_prefix(arg, "--shallow-since=", &arg)) {
+ args.deepen_since = xstrdup(arg);
+ continue;
+ }
+ if (skip_prefix(arg, "--shallow-exclude=", &arg)) {
+ string_list_append(&deepen_not, arg);
+ continue;
+ }
+ if (!strcmp(arg, "--deepen-relative")) {
+ args.deepen_relative = 1;
+ continue;
+ }
+ if (!strcmp("--no-progress", arg)) {
+ args.no_progress = 1;
+ continue;
+ }
+ if (!strcmp("--stateless-rpc", arg)) {
+ args.stateless_rpc = 1;
+ continue;
+ }
+ if (!strcmp("--lock-pack", arg)) {
+ args.lock_pack = 1;
+ pack_lockfiles_ptr = &pack_lockfiles;
+ continue;
+ }
+ if (!strcmp("--check-self-contained-and-connected", arg)) {
+ args.check_self_contained_and_connected = 1;
+ continue;
+ }
+ if (!strcmp("--cloning", arg)) {
+ args.cloning = 1;
+ continue;
+ }
+ if (!strcmp("--update-shallow", arg)) {
+ args.update_shallow = 1;
+ continue;
+ }
+ if (!strcmp("--from-promisor", arg)) {
+ args.from_promisor = 1;
+ continue;
+ }
+ if (!strcmp("--refetch", arg)) {
+ args.refetch = 1;
+ continue;
+ }
+ if (skip_prefix(arg, ("--filter="), &arg)) {
+ parse_list_objects_filter(&args.filter_options, arg);
+ continue;
+ }
+ if (!strcmp(arg, ("--no-filter"))) {
+ list_objects_filter_set_no_filter(&args.filter_options);
+ continue;
+ }
+ usage(fetch_pack_usage);
+ }
+ if (deepen_not.nr)
+ args.deepen_not = &deepen_not;
+
+ if (i < argc)
+ dest = argv[i++];
+ else
+ usage(fetch_pack_usage);
+
+ /*
+ * Copy refs from cmdline to growable list, then append any
+ * refs from the standard input:
+ */
+ for (; i < argc; i++)
+ add_sought_entry(&sought, &nr_sought, &alloc_sought, argv[i]);
+ if (args.stdin_refs) {
+ if (args.stateless_rpc) {
+ /* in stateless RPC mode we use pkt-line to read
+ * from stdin, until we get a flush packet
+ */
+ for (;;) {
+ char *line = packet_read_line(0, NULL);
+ if (!line)
+ break;
+ add_sought_entry(&sought, &nr_sought, &alloc_sought, line);
+ }
+ }
+ else {
+ /* read from stdin one ref per line, until EOF */
+ struct strbuf line = STRBUF_INIT;
+ while (strbuf_getline_lf(&line, stdin) != EOF)
+ add_sought_entry(&sought, &nr_sought, &alloc_sought, line.buf);
+ strbuf_release(&line);
+ }
+ }
+
+ if (args.stateless_rpc) {
+ conn = NULL;
+ fd[0] = 0;
+ fd[1] = 1;
+ } else {
+ int flags = args.verbose ? CONNECT_VERBOSE : 0;
+ if (args.diag_url)
+ flags |= CONNECT_DIAG_URL;
+ conn = git_connect(fd, dest, args.uploadpack,
+ flags);
+ if (!conn)
+ return args.diag_url ? 0 : 1;
+ }
+
+ packet_reader_init(&reader, fd[0], NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_GENTLE_ON_EOF |
+ PACKET_READ_DIE_ON_ERR_PACKET);
+
+ version = discover_version(&reader);
+ switch (version) {
+ case protocol_v2:
+ get_remote_refs(fd[1], &reader, &ref, 0, NULL, NULL,
+ args.stateless_rpc);
+ break;
+ case protocol_v1:
+ case protocol_v0:
+ get_remote_heads(&reader, &ref, 0, NULL, &shallow);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
+
+ ref = fetch_pack(&args, fd, ref, sought, nr_sought,
+ &shallow, pack_lockfiles_ptr, version);
+ if (pack_lockfiles.nr) {
+ int i;
+
+ printf("lock %s\n", pack_lockfiles.items[0].string);
+ fflush(stdout);
+ for (i = 1; i < pack_lockfiles.nr; i++)
+ warning(_("Lockfile created but not reported: %s"),
+ pack_lockfiles.items[i].string);
+ }
+ if (args.check_self_contained_and_connected &&
+ args.self_contained_and_connected) {
+ printf("connectivity-ok\n");
+ fflush(stdout);
+ }
+ close(fd[0]);
+ close(fd[1]);
+ if (finish_connect(conn))
+ return 1;
+
+ ret = !ref;
+
+ /*
+ * If the heads to pull were given, we should have consumed
+ * all of them by matching the remote. Otherwise, 'git fetch
+ * remote no-such-ref' would silently succeed without issuing
+ * an error.
+ */
+ ret |= report_unmatched_refs(sought, nr_sought);
+
+ while (ref) {
+ printf("%s %s\n",
+ oid_to_hex(&ref->old_oid), ref->name);
+ ref = ref->next;
+ }
+
+ return ret;
+}
diff --git a/builtin/fetch.c b/builtin/fetch.c
new file mode 100644
index 0000000..7378caf
--- /dev/null
+++ b/builtin/fetch.c
@@ -0,0 +1,2359 @@
+/*
+ * "git fetch"
+ */
+#include "cache.h"
+#include "config.h"
+#include "repository.h"
+#include "refs.h"
+#include "refspec.h"
+#include "object-store.h"
+#include "oidset.h"
+#include "commit.h"
+#include "builtin.h"
+#include "string-list.h"
+#include "remote.h"
+#include "transport.h"
+#include "run-command.h"
+#include "parse-options.h"
+#include "sigchain.h"
+#include "submodule-config.h"
+#include "submodule.h"
+#include "connected.h"
+#include "strvec.h"
+#include "utf8.h"
+#include "packfile.h"
+#include "list-objects-filter-options.h"
+#include "commit-reach.h"
+#include "branch.h"
+#include "promisor-remote.h"
+#include "commit-graph.h"
+#include "shallow.h"
+#include "worktree.h"
+
+#define FORCED_UPDATES_DELAY_WARNING_IN_MS (10 * 1000)
+
+static const char * const builtin_fetch_usage[] = {
+ N_("git fetch [<options>] [<repository> [<refspec>...]]"),
+ N_("git fetch [<options>] <group>"),
+ N_("git fetch --multiple [<options>] [(<repository> | <group>)...]"),
+ N_("git fetch --all [<options>]"),
+ NULL
+};
+
+enum {
+ TAGS_UNSET = 0,
+ TAGS_DEFAULT = 1,
+ TAGS_SET = 2
+};
+
+static int fetch_prune_config = -1; /* unspecified */
+static int fetch_show_forced_updates = 1;
+static uint64_t forced_updates_ms = 0;
+static int prefetch = 0;
+static int prune = -1; /* unspecified */
+#define PRUNE_BY_DEFAULT 0 /* do we prune by default? */
+
+static int fetch_prune_tags_config = -1; /* unspecified */
+static int prune_tags = -1; /* unspecified */
+#define PRUNE_TAGS_BY_DEFAULT 0 /* do we prune tags by default? */
+
+static int all, append, dry_run, force, keep, multiple, update_head_ok;
+static int write_fetch_head = 1;
+static int verbosity, deepen_relative, set_upstream, refetch;
+static int progress = -1;
+static int enable_auto_gc = 1;
+static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
+static int max_jobs = -1, submodule_fetch_jobs_config = -1;
+static int fetch_parallel_config = 1;
+static int atomic_fetch;
+static enum transport_family family;
+static const char *depth;
+static const char *deepen_since;
+static const char *upload_pack;
+static struct string_list deepen_not = STRING_LIST_INIT_NODUP;
+static struct strbuf default_rla = STRBUF_INIT;
+static struct transport *gtransport;
+static struct transport *gsecondary;
+static const char *submodule_prefix = "";
+static int recurse_submodules = RECURSE_SUBMODULES_DEFAULT;
+static int recurse_submodules_cli = RECURSE_SUBMODULES_DEFAULT;
+static int recurse_submodules_default = RECURSE_SUBMODULES_ON_DEMAND;
+static int shown_url = 0;
+static struct refspec refmap = REFSPEC_INIT_FETCH;
+static struct list_objects_filter_options filter_options = LIST_OBJECTS_FILTER_INIT;
+static struct string_list server_options = STRING_LIST_INIT_DUP;
+static struct string_list negotiation_tip = STRING_LIST_INIT_NODUP;
+static int fetch_write_commit_graph = -1;
+static int stdin_refspecs = 0;
+static int negotiate_only;
+
+static int git_fetch_config(const char *k, const char *v, void *cb)
+{
+ if (!strcmp(k, "fetch.prune")) {
+ fetch_prune_config = git_config_bool(k, v);
+ return 0;
+ }
+
+ if (!strcmp(k, "fetch.prunetags")) {
+ fetch_prune_tags_config = git_config_bool(k, v);
+ return 0;
+ }
+
+ if (!strcmp(k, "fetch.showforcedupdates")) {
+ fetch_show_forced_updates = git_config_bool(k, v);
+ return 0;
+ }
+
+ if (!strcmp(k, "submodule.recurse")) {
+ int r = git_config_bool(k, v) ?
+ RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF;
+ recurse_submodules = r;
+ }
+
+ if (!strcmp(k, "submodule.fetchjobs")) {
+ submodule_fetch_jobs_config = parse_submodule_fetchjobs(k, v);
+ return 0;
+ } else if (!strcmp(k, "fetch.recursesubmodules")) {
+ recurse_submodules = parse_fetch_recurse_submodules_arg(k, v);
+ return 0;
+ }
+
+ if (!strcmp(k, "fetch.parallel")) {
+ fetch_parallel_config = git_config_int(k, v);
+ if (fetch_parallel_config < 0)
+ die(_("fetch.parallel cannot be negative"));
+ if (!fetch_parallel_config)
+ fetch_parallel_config = online_cpus();
+ return 0;
+ }
+
+ return git_default_config(k, v, cb);
+}
+
+static int parse_refmap_arg(const struct option *opt, const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+
+ /*
+ * "git fetch --refmap='' origin foo"
+ * can be used to tell the command not to store anywhere
+ */
+ refspec_append(&refmap, arg);
+
+ return 0;
+}
+
+static struct option builtin_fetch_options[] = {
+ OPT__VERBOSITY(&verbosity),
+ OPT_BOOL(0, "all", &all,
+ N_("fetch from all remotes")),
+ OPT_BOOL(0, "set-upstream", &set_upstream,
+ N_("set upstream for git pull/fetch")),
+ OPT_BOOL('a', "append", &append,
+ N_("append to .git/FETCH_HEAD instead of overwriting")),
+ OPT_BOOL(0, "atomic", &atomic_fetch,
+ N_("use atomic transaction to update references")),
+ OPT_STRING(0, "upload-pack", &upload_pack, N_("path"),
+ N_("path to upload pack on remote end")),
+ OPT__FORCE(&force, N_("force overwrite of local reference"), 0),
+ OPT_BOOL('m', "multiple", &multiple,
+ N_("fetch from multiple remotes")),
+ OPT_SET_INT('t', "tags", &tags,
+ N_("fetch all tags and associated objects"), TAGS_SET),
+ OPT_SET_INT('n', NULL, &tags,
+ N_("do not fetch all tags (--no-tags)"), TAGS_UNSET),
+ OPT_INTEGER('j', "jobs", &max_jobs,
+ N_("number of submodules fetched in parallel")),
+ OPT_BOOL(0, "prefetch", &prefetch,
+ N_("modify the refspec to place all refs within refs/prefetch/")),
+ OPT_BOOL('p', "prune", &prune,
+ N_("prune remote-tracking branches no longer on remote")),
+ OPT_BOOL('P', "prune-tags", &prune_tags,
+ N_("prune local tags no longer on remote and clobber changed tags")),
+ OPT_CALLBACK_F(0, "recurse-submodules", &recurse_submodules_cli, N_("on-demand"),
+ N_("control recursive fetching of submodules"),
+ PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules),
+ OPT_BOOL(0, "dry-run", &dry_run,
+ N_("dry run")),
+ OPT_BOOL(0, "write-fetch-head", &write_fetch_head,
+ N_("write fetched references to the FETCH_HEAD file")),
+ OPT_BOOL('k', "keep", &keep, N_("keep downloaded pack")),
+ OPT_BOOL('u', "update-head-ok", &update_head_ok,
+ N_("allow updating of HEAD ref")),
+ OPT_BOOL(0, "progress", &progress, N_("force progress reporting")),
+ OPT_STRING(0, "depth", &depth, N_("depth"),
+ N_("deepen history of shallow clone")),
+ OPT_STRING(0, "shallow-since", &deepen_since, N_("time"),
+ N_("deepen history of shallow repository based on time")),
+ OPT_STRING_LIST(0, "shallow-exclude", &deepen_not, N_("revision"),
+ N_("deepen history of shallow clone, excluding rev")),
+ OPT_INTEGER(0, "deepen", &deepen_relative,
+ N_("deepen history of shallow clone")),
+ OPT_SET_INT_F(0, "unshallow", &unshallow,
+ N_("convert to a complete repository"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "refetch", &refetch,
+ N_("re-fetch without negotiating common commits"),
+ 1, PARSE_OPT_NONEG),
+ { OPTION_STRING, 0, "submodule-prefix", &submodule_prefix, N_("dir"),
+ N_("prepend this to submodule path output"), PARSE_OPT_HIDDEN },
+ OPT_CALLBACK_F(0, "recurse-submodules-default",
+ &recurse_submodules_default, N_("on-demand"),
+ N_("default for recursive fetching of submodules "
+ "(lower priority than config files)"),
+ PARSE_OPT_HIDDEN, option_fetch_parse_recurse_submodules),
+ OPT_BOOL(0, "update-shallow", &update_shallow,
+ N_("accept refs that update .git/shallow")),
+ OPT_CALLBACK_F(0, "refmap", NULL, N_("refmap"),
+ N_("specify fetch refmap"), PARSE_OPT_NONEG, parse_refmap_arg),
+ OPT_STRING_LIST('o', "server-option", &server_options, N_("server-specific"), N_("option to transmit")),
+ OPT_SET_INT('4', "ipv4", &family, N_("use IPv4 addresses only"),
+ TRANSPORT_FAMILY_IPV4),
+ OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
+ TRANSPORT_FAMILY_IPV6),
+ OPT_STRING_LIST(0, "negotiation-tip", &negotiation_tip, N_("revision"),
+ N_("report that we have only objects reachable from this object")),
+ OPT_BOOL(0, "negotiate-only", &negotiate_only,
+ N_("do not fetch a packfile; instead, print ancestors of negotiation tips")),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
+ OPT_BOOL(0, "auto-maintenance", &enable_auto_gc,
+ N_("run 'maintenance --auto' after fetching")),
+ OPT_BOOL(0, "auto-gc", &enable_auto_gc,
+ N_("run 'maintenance --auto' after fetching")),
+ OPT_BOOL(0, "show-forced-updates", &fetch_show_forced_updates,
+ N_("check for forced-updates on all updated branches")),
+ OPT_BOOL(0, "write-commit-graph", &fetch_write_commit_graph,
+ N_("write the commit-graph after fetching")),
+ OPT_BOOL(0, "stdin", &stdin_refspecs,
+ N_("accept refspecs from stdin")),
+ OPT_END()
+};
+
+static void unlock_pack(unsigned int flags)
+{
+ if (gtransport)
+ transport_unlock_pack(gtransport, flags);
+ if (gsecondary)
+ transport_unlock_pack(gsecondary, flags);
+}
+
+static void unlock_pack_atexit(void)
+{
+ unlock_pack(0);
+}
+
+static void unlock_pack_on_signal(int signo)
+{
+ unlock_pack(TRANSPORT_UNLOCK_PACK_IN_SIGNAL_HANDLER);
+ sigchain_pop(signo);
+ raise(signo);
+}
+
+static void add_merge_config(struct ref **head,
+ const struct ref *remote_refs,
+ struct branch *branch,
+ struct ref ***tail)
+{
+ int i;
+
+ for (i = 0; i < branch->merge_nr; i++) {
+ struct ref *rm, **old_tail = *tail;
+ struct refspec_item refspec;
+
+ for (rm = *head; rm; rm = rm->next) {
+ if (branch_merge_matches(branch, i, rm->name)) {
+ rm->fetch_head_status = FETCH_HEAD_MERGE;
+ break;
+ }
+ }
+ if (rm)
+ continue;
+
+ /*
+ * Not fetched to a remote-tracking branch? We need to fetch
+ * it anyway to allow this branch's "branch.$name.merge"
+ * to be honored by 'git pull', but we do not have to
+ * fail if branch.$name.merge is misconfigured to point
+ * at a nonexisting branch. If we were indeed called by
+ * 'git pull', it will notice the misconfiguration because
+ * there is no entry in the resulting FETCH_HEAD marked
+ * for merging.
+ */
+ memset(&refspec, 0, sizeof(refspec));
+ refspec.src = branch->merge[i]->src;
+ get_fetch_map(remote_refs, &refspec, tail, 1);
+ for (rm = *old_tail; rm; rm = rm->next)
+ rm->fetch_head_status = FETCH_HEAD_MERGE;
+ }
+}
+
+static void create_fetch_oidset(struct ref **head, struct oidset *out)
+{
+ struct ref *rm = *head;
+ while (rm) {
+ oidset_insert(out, &rm->old_oid);
+ rm = rm->next;
+ }
+}
+
+struct refname_hash_entry {
+ struct hashmap_entry ent;
+ struct object_id oid;
+ int ignore;
+ char refname[FLEX_ARRAY];
+};
+
+static int refname_hash_entry_cmp(const void *hashmap_cmp_fn_data UNUSED,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
+ const void *keydata)
+{
+ const struct refname_hash_entry *e1, *e2;
+
+ e1 = container_of(eptr, const struct refname_hash_entry, ent);
+ e2 = container_of(entry_or_key, const struct refname_hash_entry, ent);
+ return strcmp(e1->refname, keydata ? keydata : e2->refname);
+}
+
+static struct refname_hash_entry *refname_hash_add(struct hashmap *map,
+ const char *refname,
+ const struct object_id *oid)
+{
+ struct refname_hash_entry *ent;
+ size_t len = strlen(refname);
+
+ FLEX_ALLOC_MEM(ent, refname, refname, len);
+ hashmap_entry_init(&ent->ent, strhash(refname));
+ oidcpy(&ent->oid, oid);
+ hashmap_add(map, &ent->ent);
+ return ent;
+}
+
+static int add_one_refname(const char *refname,
+ const struct object_id *oid,
+ int flag UNUSED, void *cbdata)
+{
+ struct hashmap *refname_map = cbdata;
+
+ (void) refname_hash_add(refname_map, refname, oid);
+ return 0;
+}
+
+static void refname_hash_init(struct hashmap *map)
+{
+ hashmap_init(map, refname_hash_entry_cmp, NULL, 0);
+}
+
+static int refname_hash_exists(struct hashmap *map, const char *refname)
+{
+ return !!hashmap_get_from_hash(map, strhash(refname), refname);
+}
+
+static void clear_item(struct refname_hash_entry *item)
+{
+ item->ignore = 1;
+}
+
+
+static void add_already_queued_tags(const char *refname,
+ const struct object_id *old_oid,
+ const struct object_id *new_oid,
+ void *cb_data)
+{
+ struct hashmap *queued_tags = cb_data;
+ if (starts_with(refname, "refs/tags/") && new_oid)
+ (void) refname_hash_add(queued_tags, refname, new_oid);
+}
+
+static void find_non_local_tags(const struct ref *refs,
+ struct ref_transaction *transaction,
+ struct ref **head,
+ struct ref ***tail)
+{
+ struct hashmap existing_refs;
+ struct hashmap remote_refs;
+ struct oidset fetch_oids = OIDSET_INIT;
+ struct string_list remote_refs_list = STRING_LIST_INIT_NODUP;
+ struct string_list_item *remote_ref_item;
+ const struct ref *ref;
+ struct refname_hash_entry *item = NULL;
+ const int quick_flags = OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT;
+
+ refname_hash_init(&existing_refs);
+ refname_hash_init(&remote_refs);
+ create_fetch_oidset(head, &fetch_oids);
+
+ for_each_ref(add_one_refname, &existing_refs);
+
+ /*
+ * If we already have a transaction, then we need to filter out all
+ * tags which have already been queued up.
+ */
+ if (transaction)
+ ref_transaction_for_each_queued_update(transaction,
+ add_already_queued_tags,
+ &existing_refs);
+
+ for (ref = refs; ref; ref = ref->next) {
+ if (!starts_with(ref->name, "refs/tags/"))
+ continue;
+
+ /*
+ * The peeled ref always follows the matching base
+ * ref, so if we see a peeled ref that we don't want
+ * to fetch then we can mark the ref entry in the list
+ * as one to ignore by setting util to NULL.
+ */
+ if (ends_with(ref->name, "^{}")) {
+ if (item &&
+ !has_object_file_with_flags(&ref->old_oid, quick_flags) &&
+ !oidset_contains(&fetch_oids, &ref->old_oid) &&
+ !has_object_file_with_flags(&item->oid, quick_flags) &&
+ !oidset_contains(&fetch_oids, &item->oid))
+ clear_item(item);
+ item = NULL;
+ continue;
+ }
+
+ /*
+ * If item is non-NULL here, then we previously saw a
+ * ref not followed by a peeled reference, so we need
+ * to check if it is a lightweight tag that we want to
+ * fetch.
+ */
+ if (item &&
+ !has_object_file_with_flags(&item->oid, quick_flags) &&
+ !oidset_contains(&fetch_oids, &item->oid))
+ clear_item(item);
+
+ item = NULL;
+
+ /* skip duplicates and refs that we already have */
+ if (refname_hash_exists(&remote_refs, ref->name) ||
+ refname_hash_exists(&existing_refs, ref->name))
+ continue;
+
+ item = refname_hash_add(&remote_refs, ref->name, &ref->old_oid);
+ string_list_insert(&remote_refs_list, ref->name);
+ }
+ hashmap_clear_and_free(&existing_refs, struct refname_hash_entry, ent);
+
+ /*
+ * We may have a final lightweight tag that needs to be
+ * checked to see if it needs fetching.
+ */
+ if (item &&
+ !has_object_file_with_flags(&item->oid, quick_flags) &&
+ !oidset_contains(&fetch_oids, &item->oid))
+ clear_item(item);
+
+ /*
+ * For all the tags in the remote_refs_list,
+ * add them to the list of refs to be fetched
+ */
+ for_each_string_list_item(remote_ref_item, &remote_refs_list) {
+ const char *refname = remote_ref_item->string;
+ struct ref *rm;
+ unsigned int hash = strhash(refname);
+
+ item = hashmap_get_entry_from_hash(&remote_refs, hash, refname,
+ struct refname_hash_entry, ent);
+ if (!item)
+ BUG("unseen remote ref?");
+
+ /* Unless we have already decided to ignore this item... */
+ if (item->ignore)
+ continue;
+
+ rm = alloc_ref(item->refname);
+ rm->peer_ref = alloc_ref(item->refname);
+ oidcpy(&rm->old_oid, &item->oid);
+ **tail = rm;
+ *tail = &rm->next;
+ }
+ hashmap_clear_and_free(&remote_refs, struct refname_hash_entry, ent);
+ string_list_clear(&remote_refs_list, 0);
+ oidset_clear(&fetch_oids);
+}
+
+static void filter_prefetch_refspec(struct refspec *rs)
+{
+ int i;
+
+ if (!prefetch)
+ return;
+
+ for (i = 0; i < rs->nr; i++) {
+ struct strbuf new_dst = STRBUF_INIT;
+ char *old_dst;
+ const char *sub = NULL;
+
+ if (rs->items[i].negative)
+ continue;
+ if (!rs->items[i].dst ||
+ (rs->items[i].src &&
+ !strncmp(rs->items[i].src,
+ ref_namespace[NAMESPACE_TAGS].ref,
+ strlen(ref_namespace[NAMESPACE_TAGS].ref)))) {
+ int j;
+
+ free(rs->items[i].src);
+ free(rs->items[i].dst);
+
+ for (j = i + 1; j < rs->nr; j++) {
+ rs->items[j - 1] = rs->items[j];
+ rs->raw[j - 1] = rs->raw[j];
+ }
+ rs->nr--;
+ i--;
+ continue;
+ }
+
+ old_dst = rs->items[i].dst;
+ strbuf_addstr(&new_dst, ref_namespace[NAMESPACE_PREFETCH].ref);
+
+ /*
+ * If old_dst starts with "refs/", then place
+ * sub after that prefix. Otherwise, start at
+ * the beginning of the string.
+ */
+ if (!skip_prefix(old_dst, "refs/", &sub))
+ sub = old_dst;
+ strbuf_addstr(&new_dst, sub);
+
+ rs->items[i].dst = strbuf_detach(&new_dst, NULL);
+ rs->items[i].force = 1;
+
+ free(old_dst);
+ }
+}
+
+static struct ref *get_ref_map(struct remote *remote,
+ const struct ref *remote_refs,
+ struct refspec *rs,
+ int tags, int *autotags)
+{
+ int i;
+ struct ref *rm;
+ struct ref *ref_map = NULL;
+ struct ref **tail = &ref_map;
+
+ /* opportunistically-updated references: */
+ struct ref *orefs = NULL, **oref_tail = &orefs;
+
+ struct hashmap existing_refs;
+ int existing_refs_populated = 0;
+
+ filter_prefetch_refspec(rs);
+ if (remote)
+ filter_prefetch_refspec(&remote->fetch);
+
+ if (rs->nr) {
+ struct refspec *fetch_refspec;
+
+ for (i = 0; i < rs->nr; i++) {
+ get_fetch_map(remote_refs, &rs->items[i], &tail, 0);
+ if (rs->items[i].dst && rs->items[i].dst[0])
+ *autotags = 1;
+ }
+ /* Merge everything on the command line (but not --tags) */
+ for (rm = ref_map; rm; rm = rm->next)
+ rm->fetch_head_status = FETCH_HEAD_MERGE;
+
+ /*
+ * For any refs that we happen to be fetching via
+ * command-line arguments, the destination ref might
+ * have been missing or have been different than the
+ * remote-tracking ref that would be derived from the
+ * configured refspec. In these cases, we want to
+ * take the opportunity to update their configured
+ * remote-tracking reference. However, we do not want
+ * to mention these entries in FETCH_HEAD at all, as
+ * they would simply be duplicates of existing
+ * entries, so we set them FETCH_HEAD_IGNORE below.
+ *
+ * We compute these entries now, based only on the
+ * refspecs specified on the command line. But we add
+ * them to the list following the refspecs resulting
+ * from the tags option so that one of the latter,
+ * which has FETCH_HEAD_NOT_FOR_MERGE, is not removed
+ * by ref_remove_duplicates() in favor of one of these
+ * opportunistic entries with FETCH_HEAD_IGNORE.
+ */
+ if (refmap.nr)
+ fetch_refspec = &refmap;
+ else
+ fetch_refspec = &remote->fetch;
+
+ for (i = 0; i < fetch_refspec->nr; i++)
+ get_fetch_map(ref_map, &fetch_refspec->items[i], &oref_tail, 1);
+ } else if (refmap.nr) {
+ die("--refmap option is only meaningful with command-line refspec(s)");
+ } else {
+ /* Use the defaults */
+ struct branch *branch = branch_get(NULL);
+ int has_merge = branch_has_merge_config(branch);
+ if (remote &&
+ (remote->fetch.nr ||
+ /* Note: has_merge implies non-NULL branch->remote_name */
+ (has_merge && !strcmp(branch->remote_name, remote->name)))) {
+ for (i = 0; i < remote->fetch.nr; i++) {
+ get_fetch_map(remote_refs, &remote->fetch.items[i], &tail, 0);
+ if (remote->fetch.items[i].dst &&
+ remote->fetch.items[i].dst[0])
+ *autotags = 1;
+ if (!i && !has_merge && ref_map &&
+ !remote->fetch.items[0].pattern)
+ ref_map->fetch_head_status = FETCH_HEAD_MERGE;
+ }
+ /*
+ * if the remote we're fetching from is the same
+ * as given in branch.<name>.remote, we add the
+ * ref given in branch.<name>.merge, too.
+ *
+ * Note: has_merge implies non-NULL branch->remote_name
+ */
+ if (has_merge &&
+ !strcmp(branch->remote_name, remote->name))
+ add_merge_config(&ref_map, remote_refs, branch, &tail);
+ } else if (!prefetch) {
+ ref_map = get_remote_ref(remote_refs, "HEAD");
+ if (!ref_map)
+ die(_("couldn't find remote ref HEAD"));
+ ref_map->fetch_head_status = FETCH_HEAD_MERGE;
+ tail = &ref_map->next;
+ }
+ }
+
+ if (tags == TAGS_SET)
+ /* also fetch all tags */
+ get_fetch_map(remote_refs, tag_refspec, &tail, 0);
+ else if (tags == TAGS_DEFAULT && *autotags)
+ find_non_local_tags(remote_refs, NULL, &ref_map, &tail);
+
+ /* Now append any refs to be updated opportunistically: */
+ *tail = orefs;
+ for (rm = orefs; rm; rm = rm->next) {
+ rm->fetch_head_status = FETCH_HEAD_IGNORE;
+ tail = &rm->next;
+ }
+
+ /*
+ * apply negative refspecs first, before we remove duplicates. This is
+ * necessary as negative refspecs might remove an otherwise conflicting
+ * duplicate.
+ */
+ if (rs->nr)
+ ref_map = apply_negative_refspecs(ref_map, rs);
+ else
+ ref_map = apply_negative_refspecs(ref_map, &remote->fetch);
+
+ ref_map = ref_remove_duplicates(ref_map);
+
+ for (rm = ref_map; rm; rm = rm->next) {
+ if (rm->peer_ref) {
+ const char *refname = rm->peer_ref->name;
+ struct refname_hash_entry *peer_item;
+ unsigned int hash = strhash(refname);
+
+ if (!existing_refs_populated) {
+ refname_hash_init(&existing_refs);
+ for_each_ref(add_one_refname, &existing_refs);
+ existing_refs_populated = 1;
+ }
+
+ peer_item = hashmap_get_entry_from_hash(&existing_refs,
+ hash, refname,
+ struct refname_hash_entry, ent);
+ if (peer_item) {
+ struct object_id *old_oid = &peer_item->oid;
+ oidcpy(&rm->peer_ref->old_oid, old_oid);
+ }
+ }
+ }
+ if (existing_refs_populated)
+ hashmap_clear_and_free(&existing_refs, struct refname_hash_entry, ent);
+
+ return ref_map;
+}
+
+#define STORE_REF_ERROR_OTHER 1
+#define STORE_REF_ERROR_DF_CONFLICT 2
+
+static int s_update_ref(const char *action,
+ struct ref *ref,
+ struct ref_transaction *transaction,
+ int check_old)
+{
+ char *msg;
+ char *rla = getenv("GIT_REFLOG_ACTION");
+ struct ref_transaction *our_transaction = NULL;
+ struct strbuf err = STRBUF_INIT;
+ int ret;
+
+ if (dry_run)
+ return 0;
+ if (!rla)
+ rla = default_rla.buf;
+ msg = xstrfmt("%s: %s", rla, action);
+
+ /*
+ * If no transaction was passed to us, we manage the transaction
+ * ourselves. Otherwise, we trust the caller to handle the transaction
+ * lifecycle.
+ */
+ if (!transaction) {
+ transaction = our_transaction = ref_transaction_begin(&err);
+ if (!transaction) {
+ ret = STORE_REF_ERROR_OTHER;
+ goto out;
+ }
+ }
+
+ ret = ref_transaction_update(transaction, ref->name, &ref->new_oid,
+ check_old ? &ref->old_oid : NULL,
+ 0, msg, &err);
+ if (ret) {
+ ret = STORE_REF_ERROR_OTHER;
+ goto out;
+ }
+
+ if (our_transaction) {
+ switch (ref_transaction_commit(our_transaction, &err)) {
+ case 0:
+ break;
+ case TRANSACTION_NAME_CONFLICT:
+ ret = STORE_REF_ERROR_DF_CONFLICT;
+ goto out;
+ default:
+ ret = STORE_REF_ERROR_OTHER;
+ goto out;
+ }
+ }
+
+out:
+ ref_transaction_free(our_transaction);
+ if (ret)
+ error("%s", err.buf);
+ strbuf_release(&err);
+ free(msg);
+ return ret;
+}
+
+static int refcol_width = 10;
+static int compact_format;
+
+static void adjust_refcol_width(const struct ref *ref)
+{
+ int max, rlen, llen, len;
+
+ /* uptodate lines are only shown on high verbosity level */
+ if (verbosity <= 0 && oideq(&ref->peer_ref->old_oid, &ref->old_oid))
+ return;
+
+ max = term_columns();
+ rlen = utf8_strwidth(prettify_refname(ref->name));
+
+ llen = utf8_strwidth(prettify_refname(ref->peer_ref->name));
+
+ /*
+ * rough estimation to see if the output line is too long and
+ * should not be counted (we can't do precise calculation
+ * anyway because we don't know if the error explanation part
+ * will be printed in update_local_ref)
+ */
+ if (compact_format) {
+ llen = 0;
+ max = max * 2 / 3;
+ }
+ len = 21 /* flag and summary */ + rlen + 4 /* -> */ + llen;
+ if (len >= max)
+ return;
+
+ /*
+ * Not precise calculation for compact mode because '*' can
+ * appear on the left hand side of '->' and shrink the column
+ * back.
+ */
+ if (refcol_width < rlen)
+ refcol_width = rlen;
+}
+
+static void prepare_format_display(struct ref *ref_map)
+{
+ struct ref *rm;
+ const char *format = "full";
+
+ if (verbosity < 0)
+ return;
+
+ git_config_get_string_tmp("fetch.output", &format);
+ if (!strcasecmp(format, "full"))
+ compact_format = 0;
+ else if (!strcasecmp(format, "compact"))
+ compact_format = 1;
+ else
+ die(_("invalid value for '%s': '%s'"),
+ "fetch.output", format);
+
+ for (rm = ref_map; rm; rm = rm->next) {
+ if (rm->status == REF_STATUS_REJECT_SHALLOW ||
+ !rm->peer_ref ||
+ !strcmp(rm->name, "HEAD"))
+ continue;
+
+ adjust_refcol_width(rm);
+ }
+}
+
+static void print_remote_to_local(struct strbuf *display,
+ const char *remote, const char *local)
+{
+ strbuf_addf(display, "%-*s -> %s", refcol_width, remote, local);
+}
+
+static int find_and_replace(struct strbuf *haystack,
+ const char *needle,
+ const char *placeholder)
+{
+ const char *p = NULL;
+ int plen, nlen;
+
+ nlen = strlen(needle);
+ if (ends_with(haystack->buf, needle))
+ p = haystack->buf + haystack->len - nlen;
+ else
+ p = strstr(haystack->buf, needle);
+ if (!p)
+ return 0;
+
+ if (p > haystack->buf && p[-1] != '/')
+ return 0;
+
+ plen = strlen(p);
+ if (plen > nlen && p[nlen] != '/')
+ return 0;
+
+ strbuf_splice(haystack, p - haystack->buf, nlen,
+ placeholder, strlen(placeholder));
+ return 1;
+}
+
+static void print_compact(struct strbuf *display,
+ const char *remote, const char *local)
+{
+ struct strbuf r = STRBUF_INIT;
+ struct strbuf l = STRBUF_INIT;
+
+ if (!strcmp(remote, local)) {
+ strbuf_addf(display, "%-*s -> *", refcol_width, remote);
+ return;
+ }
+
+ strbuf_addstr(&r, remote);
+ strbuf_addstr(&l, local);
+
+ if (!find_and_replace(&r, local, "*"))
+ find_and_replace(&l, remote, "*");
+ print_remote_to_local(display, r.buf, l.buf);
+
+ strbuf_release(&r);
+ strbuf_release(&l);
+}
+
+static void format_display(struct strbuf *display, char code,
+ const char *summary, const char *error,
+ const char *remote, const char *local,
+ int summary_width)
+{
+ int width;
+
+ if (verbosity < 0)
+ return;
+
+ width = (summary_width + strlen(summary) - gettext_width(summary));
+
+ strbuf_addf(display, "%c %-*s ", code, width, summary);
+ if (!compact_format)
+ print_remote_to_local(display, remote, local);
+ else
+ print_compact(display, remote, local);
+ if (error)
+ strbuf_addf(display, " (%s)", error);
+}
+
+static int update_local_ref(struct ref *ref,
+ struct ref_transaction *transaction,
+ const char *remote, const struct ref *remote_ref,
+ struct strbuf *display, int summary_width)
+{
+ struct commit *current = NULL, *updated;
+ const char *pretty_ref = prettify_refname(ref->name);
+ int fast_forward = 0;
+
+ if (!repo_has_object_file(the_repository, &ref->new_oid))
+ die(_("object %s not found"), oid_to_hex(&ref->new_oid));
+
+ if (oideq(&ref->old_oid, &ref->new_oid)) {
+ if (verbosity > 0)
+ format_display(display, '=', _("[up to date]"), NULL,
+ remote, pretty_ref, summary_width);
+ return 0;
+ }
+
+ if (!update_head_ok &&
+ !is_null_oid(&ref->old_oid) &&
+ branch_checked_out(ref->name)) {
+ /*
+ * If this is the head, and it's not okay to update
+ * the head, and the old value of the head isn't empty...
+ */
+ format_display(display, '!', _("[rejected]"),
+ _("can't fetch into checked-out branch"),
+ remote, pretty_ref, summary_width);
+ return 1;
+ }
+
+ if (!is_null_oid(&ref->old_oid) &&
+ starts_with(ref->name, "refs/tags/")) {
+ if (force || ref->force) {
+ int r;
+ r = s_update_ref("updating tag", ref, transaction, 0);
+ format_display(display, r ? '!' : 't', _("[tag update]"),
+ r ? _("unable to update local ref") : NULL,
+ remote, pretty_ref, summary_width);
+ return r;
+ } else {
+ format_display(display, '!', _("[rejected]"), _("would clobber existing tag"),
+ remote, pretty_ref, summary_width);
+ return 1;
+ }
+ }
+
+ current = lookup_commit_reference_gently(the_repository,
+ &ref->old_oid, 1);
+ updated = lookup_commit_reference_gently(the_repository,
+ &ref->new_oid, 1);
+ if (!current || !updated) {
+ const char *msg;
+ const char *what;
+ int r;
+ /*
+ * Nicely describe the new ref we're fetching.
+ * Base this on the remote's ref name, as it's
+ * more likely to follow a standard layout.
+ */
+ const char *name = remote_ref ? remote_ref->name : "";
+ if (starts_with(name, "refs/tags/")) {
+ msg = "storing tag";
+ what = _("[new tag]");
+ } else if (starts_with(name, "refs/heads/")) {
+ msg = "storing head";
+ what = _("[new branch]");
+ } else {
+ msg = "storing ref";
+ what = _("[new ref]");
+ }
+
+ r = s_update_ref(msg, ref, transaction, 0);
+ format_display(display, r ? '!' : '*', what,
+ r ? _("unable to update local ref") : NULL,
+ remote, pretty_ref, summary_width);
+ return r;
+ }
+
+ if (fetch_show_forced_updates) {
+ uint64_t t_before = getnanotime();
+ fast_forward = in_merge_bases(current, updated);
+ forced_updates_ms += (getnanotime() - t_before) / 1000000;
+ } else {
+ fast_forward = 1;
+ }
+
+ if (fast_forward) {
+ struct strbuf quickref = STRBUF_INIT;
+ int r;
+
+ strbuf_add_unique_abbrev(&quickref, &current->object.oid, DEFAULT_ABBREV);
+ strbuf_addstr(&quickref, "..");
+ strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV);
+ r = s_update_ref("fast-forward", ref, transaction, 1);
+ format_display(display, r ? '!' : ' ', quickref.buf,
+ r ? _("unable to update local ref") : NULL,
+ remote, pretty_ref, summary_width);
+ strbuf_release(&quickref);
+ return r;
+ } else if (force || ref->force) {
+ struct strbuf quickref = STRBUF_INIT;
+ int r;
+ strbuf_add_unique_abbrev(&quickref, &current->object.oid, DEFAULT_ABBREV);
+ strbuf_addstr(&quickref, "...");
+ strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV);
+ r = s_update_ref("forced-update", ref, transaction, 1);
+ format_display(display, r ? '!' : '+', quickref.buf,
+ r ? _("unable to update local ref") : _("forced update"),
+ remote, pretty_ref, summary_width);
+ strbuf_release(&quickref);
+ return r;
+ } else {
+ format_display(display, '!', _("[rejected]"), _("non-fast-forward"),
+ remote, pretty_ref, summary_width);
+ return 1;
+ }
+}
+
+static const struct object_id *iterate_ref_map(void *cb_data)
+{
+ struct ref **rm = cb_data;
+ struct ref *ref = *rm;
+
+ while (ref && ref->status == REF_STATUS_REJECT_SHALLOW)
+ ref = ref->next;
+ if (!ref)
+ return NULL;
+ *rm = ref->next;
+ return &ref->old_oid;
+}
+
+struct fetch_head {
+ FILE *fp;
+ struct strbuf buf;
+};
+
+static int open_fetch_head(struct fetch_head *fetch_head)
+{
+ const char *filename = git_path_fetch_head(the_repository);
+
+ if (write_fetch_head) {
+ fetch_head->fp = fopen(filename, "a");
+ if (!fetch_head->fp)
+ return error_errno(_("cannot open '%s'"), filename);
+ strbuf_init(&fetch_head->buf, 0);
+ } else {
+ fetch_head->fp = NULL;
+ }
+
+ return 0;
+}
+
+static void append_fetch_head(struct fetch_head *fetch_head,
+ const struct object_id *old_oid,
+ enum fetch_head_status fetch_head_status,
+ const char *note,
+ const char *url, size_t url_len)
+{
+ char old_oid_hex[GIT_MAX_HEXSZ + 1];
+ const char *merge_status_marker;
+ size_t i;
+
+ if (!fetch_head->fp)
+ return;
+
+ switch (fetch_head_status) {
+ case FETCH_HEAD_NOT_FOR_MERGE:
+ merge_status_marker = "not-for-merge";
+ break;
+ case FETCH_HEAD_MERGE:
+ merge_status_marker = "";
+ break;
+ default:
+ /* do not write anything to FETCH_HEAD */
+ return;
+ }
+
+ strbuf_addf(&fetch_head->buf, "%s\t%s\t%s",
+ oid_to_hex_r(old_oid_hex, old_oid), merge_status_marker, note);
+ for (i = 0; i < url_len; ++i)
+ if ('\n' == url[i])
+ strbuf_addstr(&fetch_head->buf, "\\n");
+ else
+ strbuf_addch(&fetch_head->buf, url[i]);
+ strbuf_addch(&fetch_head->buf, '\n');
+
+ /*
+ * When using an atomic fetch, we do not want to update FETCH_HEAD if
+ * any of the reference updates fails. We thus have to write all
+ * updates to a buffer first and only commit it as soon as all
+ * references have been successfully updated.
+ */
+ if (!atomic_fetch) {
+ strbuf_write(&fetch_head->buf, fetch_head->fp);
+ strbuf_reset(&fetch_head->buf);
+ }
+}
+
+static void commit_fetch_head(struct fetch_head *fetch_head)
+{
+ if (!fetch_head->fp || !atomic_fetch)
+ return;
+ strbuf_write(&fetch_head->buf, fetch_head->fp);
+}
+
+static void close_fetch_head(struct fetch_head *fetch_head)
+{
+ if (!fetch_head->fp)
+ return;
+
+ fclose(fetch_head->fp);
+ strbuf_release(&fetch_head->buf);
+}
+
+static const char warn_show_forced_updates[] =
+N_("fetch normally indicates which branches had a forced update,\n"
+ "but that check has been disabled; to re-enable, use '--show-forced-updates'\n"
+ "flag or run 'git config fetch.showForcedUpdates true'");
+static const char warn_time_show_forced_updates[] =
+N_("it took %.2f seconds to check forced updates; you can use\n"
+ "'--no-show-forced-updates' or run 'git config fetch.showForcedUpdates false'\n"
+ "to avoid this check\n");
+
+static int store_updated_refs(const char *raw_url, const char *remote_name,
+ int connectivity_checked,
+ struct ref_transaction *transaction, struct ref *ref_map,
+ struct fetch_head *fetch_head)
+{
+ int url_len, i, rc = 0;
+ struct strbuf note = STRBUF_INIT;
+ const char *what, *kind;
+ struct ref *rm;
+ char *url;
+ int want_status;
+ int summary_width = 0;
+
+ if (verbosity >= 0)
+ summary_width = transport_summary_width(ref_map);
+
+ if (raw_url)
+ url = transport_anonymize_url(raw_url);
+ else
+ url = xstrdup("foreign");
+
+ if (!connectivity_checked) {
+ struct check_connected_options opt = CHECK_CONNECTED_INIT;
+
+ rm = ref_map;
+ if (check_connected(iterate_ref_map, &rm, &opt)) {
+ rc = error(_("%s did not send all necessary objects\n"), url);
+ goto abort;
+ }
+ }
+
+ prepare_format_display(ref_map);
+
+ /*
+ * We do a pass for each fetch_head_status type in their enum order, so
+ * merged entries are written before not-for-merge. That lets readers
+ * use FETCH_HEAD as a refname to refer to the ref to be merged.
+ */
+ for (want_status = FETCH_HEAD_MERGE;
+ want_status <= FETCH_HEAD_IGNORE;
+ want_status++) {
+ for (rm = ref_map; rm; rm = rm->next) {
+ struct ref *ref = NULL;
+
+ if (rm->status == REF_STATUS_REJECT_SHALLOW) {
+ if (want_status == FETCH_HEAD_MERGE)
+ warning(_("rejected %s because shallow roots are not allowed to be updated"),
+ rm->peer_ref ? rm->peer_ref->name : rm->name);
+ continue;
+ }
+
+ /*
+ * When writing FETCH_HEAD we need to determine whether
+ * we already have the commit or not. If not, then the
+ * reference is not for merge and needs to be written
+ * to the reflog after other commits which we already
+ * have. We're not interested in this property though
+ * in case FETCH_HEAD is not to be updated, so we can
+ * skip the classification in that case.
+ */
+ if (fetch_head->fp) {
+ struct commit *commit = NULL;
+
+ /*
+ * References in "refs/tags/" are often going to point
+ * to annotated tags, which are not part of the
+ * commit-graph. We thus only try to look up refs in
+ * the graph which are not in that namespace to not
+ * regress performance in repositories with many
+ * annotated tags.
+ */
+ if (!starts_with(rm->name, "refs/tags/"))
+ commit = lookup_commit_in_graph(the_repository, &rm->old_oid);
+ if (!commit) {
+ commit = lookup_commit_reference_gently(the_repository,
+ &rm->old_oid,
+ 1);
+ if (!commit)
+ rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE;
+ }
+ }
+
+ if (rm->fetch_head_status != want_status)
+ continue;
+
+ if (rm->peer_ref) {
+ ref = alloc_ref(rm->peer_ref->name);
+ oidcpy(&ref->old_oid, &rm->peer_ref->old_oid);
+ oidcpy(&ref->new_oid, &rm->old_oid);
+ ref->force = rm->peer_ref->force;
+ }
+
+ if (recurse_submodules != RECURSE_SUBMODULES_OFF &&
+ (!rm->peer_ref || !oideq(&ref->old_oid, &ref->new_oid))) {
+ check_for_new_submodule_commits(&rm->old_oid);
+ }
+
+ if (!strcmp(rm->name, "HEAD")) {
+ kind = "";
+ what = "";
+ }
+ else if (skip_prefix(rm->name, "refs/heads/", &what))
+ kind = "branch";
+ else if (skip_prefix(rm->name, "refs/tags/", &what))
+ kind = "tag";
+ else if (skip_prefix(rm->name, "refs/remotes/", &what))
+ kind = "remote-tracking branch";
+ else {
+ kind = "";
+ what = rm->name;
+ }
+
+ url_len = strlen(url);
+ for (i = url_len - 1; url[i] == '/' && 0 <= i; i--)
+ ;
+ url_len = i + 1;
+ if (4 < i && !strncmp(".git", url + i - 3, 4))
+ url_len = i - 3;
+
+ strbuf_reset(&note);
+ if (*what) {
+ if (*kind)
+ strbuf_addf(&note, "%s ", kind);
+ strbuf_addf(&note, "'%s' of ", what);
+ }
+
+ append_fetch_head(fetch_head, &rm->old_oid,
+ rm->fetch_head_status,
+ note.buf, url, url_len);
+
+ strbuf_reset(&note);
+ if (ref) {
+ rc |= update_local_ref(ref, transaction, what,
+ rm, &note, summary_width);
+ free(ref);
+ } else if (write_fetch_head || dry_run) {
+ /*
+ * Display fetches written to FETCH_HEAD (or
+ * would be written to FETCH_HEAD, if --dry-run
+ * is set).
+ */
+ format_display(&note, '*',
+ *kind ? kind : "branch", NULL,
+ *what ? what : "HEAD",
+ "FETCH_HEAD", summary_width);
+ }
+ if (note.len) {
+ if (!shown_url) {
+ fprintf(stderr, _("From %.*s\n"),
+ url_len, url);
+ shown_url = 1;
+ }
+ fprintf(stderr, " %s\n", note.buf);
+ }
+ }
+ }
+
+ if (rc & STORE_REF_ERROR_DF_CONFLICT)
+ error(_("some local refs could not be updated; try running\n"
+ " 'git remote prune %s' to remove any old, conflicting "
+ "branches"), remote_name);
+
+ if (advice_enabled(ADVICE_FETCH_SHOW_FORCED_UPDATES)) {
+ if (!fetch_show_forced_updates) {
+ warning(_(warn_show_forced_updates));
+ } else if (forced_updates_ms > FORCED_UPDATES_DELAY_WARNING_IN_MS) {
+ warning(_(warn_time_show_forced_updates),
+ forced_updates_ms / 1000.0);
+ }
+ }
+
+ abort:
+ strbuf_release(&note);
+ free(url);
+ return rc;
+}
+
+/*
+ * We would want to bypass the object transfer altogether if
+ * everything we are going to fetch already exists and is connected
+ * locally.
+ */
+static int check_exist_and_connected(struct ref *ref_map)
+{
+ struct ref *rm = ref_map;
+ struct check_connected_options opt = CHECK_CONNECTED_INIT;
+ struct ref *r;
+
+ /*
+ * If we are deepening a shallow clone we already have these
+ * objects reachable. Running rev-list here will return with
+ * a good (0) exit status and we'll bypass the fetch that we
+ * really need to perform. Claiming failure now will ensure
+ * we perform the network exchange to deepen our history.
+ */
+ if (deepen)
+ return -1;
+
+ /*
+ * Similarly, if we need to refetch, we always want to perform a full
+ * fetch ignoring existing objects.
+ */
+ if (refetch)
+ return -1;
+
+
+ /*
+ * check_connected() allows objects to merely be promised, but
+ * we need all direct targets to exist.
+ */
+ for (r = rm; r; r = r->next) {
+ if (!has_object_file_with_flags(&r->old_oid,
+ OBJECT_INFO_SKIP_FETCH_OBJECT))
+ return -1;
+ }
+
+ opt.quiet = 1;
+ return check_connected(iterate_ref_map, &rm, &opt);
+}
+
+static int fetch_and_consume_refs(struct transport *transport,
+ struct ref_transaction *transaction,
+ struct ref *ref_map,
+ struct fetch_head *fetch_head)
+{
+ int connectivity_checked = 1;
+ int ret;
+
+ /*
+ * We don't need to perform a fetch in case we can already satisfy all
+ * refs.
+ */
+ ret = check_exist_and_connected(ref_map);
+ if (ret) {
+ trace2_region_enter("fetch", "fetch_refs", the_repository);
+ ret = transport_fetch_refs(transport, ref_map);
+ trace2_region_leave("fetch", "fetch_refs", the_repository);
+ if (ret)
+ goto out;
+ connectivity_checked = transport->smart_options ?
+ transport->smart_options->connectivity_checked : 0;
+ }
+
+ trace2_region_enter("fetch", "consume_refs", the_repository);
+ ret = store_updated_refs(transport->url, transport->remote->name,
+ connectivity_checked, transaction, ref_map,
+ fetch_head);
+ trace2_region_leave("fetch", "consume_refs", the_repository);
+
+out:
+ transport_unlock_pack(transport, 0);
+ return ret;
+}
+
+static int prune_refs(struct refspec *rs,
+ struct ref_transaction *transaction,
+ struct ref *ref_map,
+ const char *raw_url)
+{
+ int url_len, i, result = 0;
+ struct ref *ref, *stale_refs = get_stale_heads(rs, ref_map);
+ struct strbuf err = STRBUF_INIT;
+ char *url;
+ const char *dangling_msg = dry_run
+ ? _(" (%s will become dangling)")
+ : _(" (%s has become dangling)");
+
+ if (raw_url)
+ url = transport_anonymize_url(raw_url);
+ else
+ url = xstrdup("foreign");
+
+ url_len = strlen(url);
+ for (i = url_len - 1; url[i] == '/' && 0 <= i; i--)
+ ;
+
+ url_len = i + 1;
+ if (4 < i && !strncmp(".git", url + i - 3, 4))
+ url_len = i - 3;
+
+ if (!dry_run) {
+ if (transaction) {
+ for (ref = stale_refs; ref; ref = ref->next) {
+ result = ref_transaction_delete(transaction, ref->name, NULL, 0,
+ "fetch: prune", &err);
+ if (result)
+ goto cleanup;
+ }
+ } else {
+ struct string_list refnames = STRING_LIST_INIT_NODUP;
+
+ for (ref = stale_refs; ref; ref = ref->next)
+ string_list_append(&refnames, ref->name);
+
+ result = delete_refs("fetch: prune", &refnames, 0);
+ string_list_clear(&refnames, 0);
+ }
+ }
+
+ if (verbosity >= 0) {
+ int summary_width = transport_summary_width(stale_refs);
+
+ for (ref = stale_refs; ref; ref = ref->next) {
+ struct strbuf sb = STRBUF_INIT;
+ if (!shown_url) {
+ fprintf(stderr, _("From %.*s\n"), url_len, url);
+ shown_url = 1;
+ }
+ format_display(&sb, '-', _("[deleted]"), NULL,
+ _("(none)"), prettify_refname(ref->name),
+ summary_width);
+ fprintf(stderr, " %s\n",sb.buf);
+ strbuf_release(&sb);
+ warn_dangling_symref(stderr, dangling_msg, ref->name);
+ }
+ }
+
+cleanup:
+ strbuf_release(&err);
+ free(url);
+ free_refs(stale_refs);
+ return result;
+}
+
+static void check_not_current_branch(struct ref *ref_map)
+{
+ const char *path;
+ for (; ref_map; ref_map = ref_map->next)
+ if (ref_map->peer_ref &&
+ starts_with(ref_map->peer_ref->name, "refs/heads/") &&
+ (path = branch_checked_out(ref_map->peer_ref->name)))
+ die(_("refusing to fetch into branch '%s' "
+ "checked out at '%s'"),
+ ref_map->peer_ref->name, path);
+}
+
+static int truncate_fetch_head(void)
+{
+ const char *filename = git_path_fetch_head(the_repository);
+ FILE *fp = fopen_for_writing(filename);
+
+ if (!fp)
+ return error_errno(_("cannot open '%s'"), filename);
+ fclose(fp);
+ return 0;
+}
+
+static void set_option(struct transport *transport, const char *name, const char *value)
+{
+ int r = transport_set_option(transport, name, value);
+ if (r < 0)
+ die(_("option \"%s\" value \"%s\" is not valid for %s"),
+ name, value, transport->url);
+ if (r > 0)
+ warning(_("option \"%s\" is ignored for %s\n"),
+ name, transport->url);
+}
+
+
+static int add_oid(const char *refname UNUSED,
+ const struct object_id *oid,
+ int flags UNUSED, void *cb_data)
+{
+ struct oid_array *oids = cb_data;
+
+ oid_array_append(oids, oid);
+ return 0;
+}
+
+static void add_negotiation_tips(struct git_transport_options *smart_options)
+{
+ struct oid_array *oids = xcalloc(1, sizeof(*oids));
+ int i;
+
+ for (i = 0; i < negotiation_tip.nr; i++) {
+ const char *s = negotiation_tip.items[i].string;
+ int old_nr;
+ if (!has_glob_specials(s)) {
+ struct object_id oid;
+ if (get_oid(s, &oid))
+ die(_("%s is not a valid object"), s);
+ if (!has_object(the_repository, &oid, 0))
+ die(_("the object %s does not exist"), s);
+ oid_array_append(oids, &oid);
+ continue;
+ }
+ old_nr = oids->nr;
+ for_each_glob_ref(add_oid, s, oids);
+ if (old_nr == oids->nr)
+ warning("ignoring --negotiation-tip=%s because it does not match any refs",
+ s);
+ }
+ smart_options->negotiation_tips = oids;
+}
+
+static struct transport *prepare_transport(struct remote *remote, int deepen)
+{
+ struct transport *transport;
+
+ transport = transport_get(remote, NULL);
+ transport_set_verbosity(transport, verbosity, progress);
+ transport->family = family;
+ if (upload_pack)
+ set_option(transport, TRANS_OPT_UPLOADPACK, upload_pack);
+ if (keep)
+ set_option(transport, TRANS_OPT_KEEP, "yes");
+ if (depth)
+ set_option(transport, TRANS_OPT_DEPTH, depth);
+ if (deepen && deepen_since)
+ set_option(transport, TRANS_OPT_DEEPEN_SINCE, deepen_since);
+ if (deepen && deepen_not.nr)
+ set_option(transport, TRANS_OPT_DEEPEN_NOT,
+ (const char *)&deepen_not);
+ if (deepen_relative)
+ set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, "yes");
+ if (update_shallow)
+ set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes");
+ if (refetch)
+ set_option(transport, TRANS_OPT_REFETCH, "yes");
+ if (filter_options.choice) {
+ const char *spec =
+ expand_list_objects_filter_spec(&filter_options);
+ set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, spec);
+ set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+ }
+ if (negotiation_tip.nr) {
+ if (transport->smart_options)
+ add_negotiation_tips(transport->smart_options);
+ else
+ warning("ignoring --negotiation-tip because the protocol does not support it");
+ }
+ return transport;
+}
+
+static int backfill_tags(struct transport *transport,
+ struct ref_transaction *transaction,
+ struct ref *ref_map,
+ struct fetch_head *fetch_head)
+{
+ int retcode, cannot_reuse;
+
+ /*
+ * Once we have set TRANS_OPT_DEEPEN_SINCE, we can't unset it
+ * when remote helper is used (setting it to an empty string
+ * is not unsetting). We could extend the remote helper
+ * protocol for that, but for now, just force a new connection
+ * without deepen-since. Similar story for deepen-not.
+ */
+ cannot_reuse = transport->cannot_reuse ||
+ deepen_since || deepen_not.nr;
+ if (cannot_reuse) {
+ gsecondary = prepare_transport(transport->remote, 0);
+ transport = gsecondary;
+ }
+
+ transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL);
+ transport_set_option(transport, TRANS_OPT_DEPTH, "0");
+ transport_set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, NULL);
+ retcode = fetch_and_consume_refs(transport, transaction, ref_map, fetch_head);
+
+ if (gsecondary) {
+ transport_disconnect(gsecondary);
+ gsecondary = NULL;
+ }
+
+ return retcode;
+}
+
+static int do_fetch(struct transport *transport,
+ struct refspec *rs)
+{
+ struct ref_transaction *transaction = NULL;
+ struct ref *ref_map = NULL;
+ int autotags = (transport->remote->fetch_tags == 1);
+ int retcode = 0;
+ const struct ref *remote_refs;
+ struct transport_ls_refs_options transport_ls_refs_options =
+ TRANSPORT_LS_REFS_OPTIONS_INIT;
+ int must_list_refs = 1;
+ struct fetch_head fetch_head = { 0 };
+ struct strbuf err = STRBUF_INIT;
+
+ if (tags == TAGS_DEFAULT) {
+ if (transport->remote->fetch_tags == 2)
+ tags = TAGS_SET;
+ if (transport->remote->fetch_tags == -1)
+ tags = TAGS_UNSET;
+ }
+
+ /* if not appending, truncate FETCH_HEAD */
+ if (!append && write_fetch_head) {
+ retcode = truncate_fetch_head();
+ if (retcode)
+ goto cleanup;
+ }
+
+ if (rs->nr) {
+ int i;
+
+ refspec_ref_prefixes(rs, &transport_ls_refs_options.ref_prefixes);
+
+ /*
+ * We can avoid listing refs if all of them are exact
+ * OIDs
+ */
+ must_list_refs = 0;
+ for (i = 0; i < rs->nr; i++) {
+ if (!rs->items[i].exact_sha1) {
+ must_list_refs = 1;
+ break;
+ }
+ }
+ } else {
+ struct branch *branch = branch_get(NULL);
+
+ if (transport->remote->fetch.nr)
+ refspec_ref_prefixes(&transport->remote->fetch,
+ &transport_ls_refs_options.ref_prefixes);
+ if (branch_has_merge_config(branch) &&
+ !strcmp(branch->remote_name, transport->remote->name)) {
+ int i;
+ for (i = 0; i < branch->merge_nr; i++) {
+ strvec_push(&transport_ls_refs_options.ref_prefixes,
+ branch->merge[i]->src);
+ }
+ }
+ }
+
+ if (tags == TAGS_SET || tags == TAGS_DEFAULT) {
+ must_list_refs = 1;
+ if (transport_ls_refs_options.ref_prefixes.nr)
+ strvec_push(&transport_ls_refs_options.ref_prefixes,
+ "refs/tags/");
+ }
+
+ if (must_list_refs) {
+ trace2_region_enter("fetch", "remote_refs", the_repository);
+ remote_refs = transport_get_remote_refs(transport,
+ &transport_ls_refs_options);
+ trace2_region_leave("fetch", "remote_refs", the_repository);
+ } else
+ remote_refs = NULL;
+
+ transport_ls_refs_options_release(&transport_ls_refs_options);
+
+ ref_map = get_ref_map(transport->remote, remote_refs, rs,
+ tags, &autotags);
+ if (!update_head_ok)
+ check_not_current_branch(ref_map);
+
+ retcode = open_fetch_head(&fetch_head);
+ if (retcode)
+ goto cleanup;
+
+ if (atomic_fetch) {
+ transaction = ref_transaction_begin(&err);
+ if (!transaction) {
+ retcode = error("%s", err.buf);
+ goto cleanup;
+ }
+ }
+
+ if (tags == TAGS_DEFAULT && autotags)
+ transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, "1");
+ if (prune) {
+ /*
+ * We only prune based on refspecs specified
+ * explicitly (via command line or configuration); we
+ * don't care whether --tags was specified.
+ */
+ if (rs->nr) {
+ retcode = prune_refs(rs, transaction, ref_map, transport->url);
+ } else {
+ retcode = prune_refs(&transport->remote->fetch,
+ transaction, ref_map,
+ transport->url);
+ }
+ if (retcode != 0)
+ retcode = 1;
+ }
+
+ if (fetch_and_consume_refs(transport, transaction, ref_map, &fetch_head)) {
+ retcode = 1;
+ goto cleanup;
+ }
+
+ /*
+ * If neither --no-tags nor --tags was specified, do automated tag
+ * following.
+ */
+ if (tags == TAGS_DEFAULT && autotags) {
+ struct ref *tags_ref_map = NULL, **tail = &tags_ref_map;
+
+ find_non_local_tags(remote_refs, transaction, &tags_ref_map, &tail);
+ if (tags_ref_map) {
+ /*
+ * If backfilling of tags fails then we want to tell
+ * the user so, but we have to continue regardless to
+ * populate upstream information of the references we
+ * have already fetched above. The exception though is
+ * when `--atomic` is passed: in that case we'll abort
+ * the transaction and don't commit anything.
+ */
+ if (backfill_tags(transport, transaction, tags_ref_map,
+ &fetch_head))
+ retcode = 1;
+ }
+
+ free_refs(tags_ref_map);
+ }
+
+ if (transaction) {
+ if (retcode)
+ goto cleanup;
+
+ retcode = ref_transaction_commit(transaction, &err);
+ if (retcode) {
+ error("%s", err.buf);
+ ref_transaction_free(transaction);
+ transaction = NULL;
+ goto cleanup;
+ }
+ }
+
+ commit_fetch_head(&fetch_head);
+
+ if (set_upstream) {
+ struct branch *branch = branch_get("HEAD");
+ struct ref *rm;
+ struct ref *source_ref = NULL;
+
+ /*
+ * We're setting the upstream configuration for the
+ * current branch. The relevant upstream is the
+ * fetched branch that is meant to be merged with the
+ * current one, i.e. the one fetched to FETCH_HEAD.
+ *
+ * When there are several such branches, consider the
+ * request ambiguous and err on the safe side by doing
+ * nothing and just emit a warning.
+ */
+ for (rm = ref_map; rm; rm = rm->next) {
+ if (!rm->peer_ref) {
+ if (source_ref) {
+ warning(_("multiple branches detected, incompatible with --set-upstream"));
+ goto cleanup;
+ } else {
+ source_ref = rm;
+ }
+ }
+ }
+ if (source_ref) {
+ if (!branch) {
+ const char *shortname = source_ref->name;
+ skip_prefix(shortname, "refs/heads/", &shortname);
+
+ warning(_("could not set upstream of HEAD to '%s' from '%s' when "
+ "it does not point to any branch."),
+ shortname, transport->remote->name);
+ goto cleanup;
+ }
+
+ if (!strcmp(source_ref->name, "HEAD") ||
+ starts_with(source_ref->name, "refs/heads/"))
+ install_branch_config(0,
+ branch->name,
+ transport->remote->name,
+ source_ref->name);
+ else if (starts_with(source_ref->name, "refs/remotes/"))
+ warning(_("not setting upstream for a remote remote-tracking branch"));
+ else if (starts_with(source_ref->name, "refs/tags/"))
+ warning(_("not setting upstream for a remote tag"));
+ else
+ warning(_("unknown branch type"));
+ } else {
+ warning(_("no source branch found;\n"
+ "you need to specify exactly one branch with the --set-upstream option"));
+ }
+ }
+
+cleanup:
+ if (retcode && transaction) {
+ ref_transaction_abort(transaction, &err);
+ error("%s", err.buf);
+ }
+
+ close_fetch_head(&fetch_head);
+ strbuf_release(&err);
+ free_refs(ref_map);
+ return retcode;
+}
+
+static int get_one_remote_for_fetch(struct remote *remote, void *priv)
+{
+ struct string_list *list = priv;
+ if (!remote->skip_default_update)
+ string_list_append(list, remote->name);
+ return 0;
+}
+
+struct remote_group_data {
+ const char *name;
+ struct string_list *list;
+};
+
+static int get_remote_group(const char *key, const char *value, void *priv)
+{
+ struct remote_group_data *g = priv;
+
+ if (skip_prefix(key, "remotes.", &key) && !strcmp(key, g->name)) {
+ /* split list by white space */
+ while (*value) {
+ size_t wordlen = strcspn(value, " \t\n");
+
+ if (wordlen >= 1)
+ string_list_append_nodup(g->list,
+ xstrndup(value, wordlen));
+ value += wordlen + (value[wordlen] != '\0');
+ }
+ }
+
+ return 0;
+}
+
+static int add_remote_or_group(const char *name, struct string_list *list)
+{
+ int prev_nr = list->nr;
+ struct remote_group_data g;
+ g.name = name; g.list = list;
+
+ git_config(get_remote_group, &g);
+ if (list->nr == prev_nr) {
+ struct remote *remote = remote_get(name);
+ if (!remote_is_configured(remote, 0))
+ return 0;
+ string_list_append(list, remote->name);
+ }
+ return 1;
+}
+
+static void add_options_to_argv(struct strvec *argv)
+{
+ if (dry_run)
+ strvec_push(argv, "--dry-run");
+ if (prune != -1)
+ strvec_push(argv, prune ? "--prune" : "--no-prune");
+ if (prune_tags != -1)
+ strvec_push(argv, prune_tags ? "--prune-tags" : "--no-prune-tags");
+ if (update_head_ok)
+ strvec_push(argv, "--update-head-ok");
+ if (force)
+ strvec_push(argv, "--force");
+ if (keep)
+ strvec_push(argv, "--keep");
+ if (recurse_submodules == RECURSE_SUBMODULES_ON)
+ strvec_push(argv, "--recurse-submodules");
+ else if (recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND)
+ strvec_push(argv, "--recurse-submodules=on-demand");
+ if (tags == TAGS_SET)
+ strvec_push(argv, "--tags");
+ else if (tags == TAGS_UNSET)
+ strvec_push(argv, "--no-tags");
+ if (verbosity >= 2)
+ strvec_push(argv, "-v");
+ if (verbosity >= 1)
+ strvec_push(argv, "-v");
+ else if (verbosity < 0)
+ strvec_push(argv, "-q");
+ if (family == TRANSPORT_FAMILY_IPV4)
+ strvec_push(argv, "--ipv4");
+ else if (family == TRANSPORT_FAMILY_IPV6)
+ strvec_push(argv, "--ipv6");
+}
+
+/* Fetch multiple remotes in parallel */
+
+struct parallel_fetch_state {
+ const char **argv;
+ struct string_list *remotes;
+ int next, result;
+};
+
+static int fetch_next_remote(struct child_process *cp, struct strbuf *out,
+ void *cb, void **task_cb)
+{
+ struct parallel_fetch_state *state = cb;
+ char *remote;
+
+ if (state->next < 0 || state->next >= state->remotes->nr)
+ return 0;
+
+ remote = state->remotes->items[state->next++].string;
+ *task_cb = remote;
+
+ strvec_pushv(&cp->args, state->argv);
+ strvec_push(&cp->args, remote);
+ cp->git_cmd = 1;
+
+ if (verbosity >= 0)
+ printf(_("Fetching %s\n"), remote);
+
+ return 1;
+}
+
+static int fetch_failed_to_start(struct strbuf *out, void *cb, void *task_cb)
+{
+ struct parallel_fetch_state *state = cb;
+ const char *remote = task_cb;
+
+ state->result = error(_("could not fetch %s"), remote);
+
+ return 0;
+}
+
+static int fetch_finished(int result, struct strbuf *out,
+ void *cb, void *task_cb)
+{
+ struct parallel_fetch_state *state = cb;
+ const char *remote = task_cb;
+
+ if (result) {
+ strbuf_addf(out, _("could not fetch '%s' (exit code: %d)\n"),
+ remote, result);
+ state->result = -1;
+ }
+
+ return 0;
+}
+
+static int fetch_multiple(struct string_list *list, int max_children)
+{
+ int i, result = 0;
+ struct strvec argv = STRVEC_INIT;
+
+ if (!append && write_fetch_head) {
+ int errcode = truncate_fetch_head();
+ if (errcode)
+ return errcode;
+ }
+
+ strvec_pushl(&argv, "fetch", "--append", "--no-auto-gc",
+ "--no-write-commit-graph", NULL);
+ add_options_to_argv(&argv);
+
+ if (max_children != 1 && list->nr != 1) {
+ struct parallel_fetch_state state = { argv.v, list, 0, 0 };
+ const struct run_process_parallel_opts opts = {
+ .tr2_category = "fetch",
+ .tr2_label = "parallel/fetch",
+
+ .processes = max_children,
+
+ .get_next_task = &fetch_next_remote,
+ .start_failure = &fetch_failed_to_start,
+ .task_finished = &fetch_finished,
+ .data = &state,
+ };
+
+ strvec_push(&argv, "--end-of-options");
+
+ run_processes_parallel(&opts);
+ result = state.result;
+ } else
+ for (i = 0; i < list->nr; i++) {
+ const char *name = list->items[i].string;
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_pushv(&cmd.args, argv.v);
+ strvec_push(&cmd.args, name);
+ if (verbosity >= 0)
+ printf(_("Fetching %s\n"), name);
+ cmd.git_cmd = 1;
+ if (run_command(&cmd)) {
+ error(_("could not fetch %s"), name);
+ result = 1;
+ }
+ }
+
+ strvec_clear(&argv);
+ return !!result;
+}
+
+/*
+ * Fetching from the promisor remote should use the given filter-spec
+ * or inherit the default filter-spec from the config.
+ */
+static inline void fetch_one_setup_partial(struct remote *remote)
+{
+ /*
+ * Explicit --no-filter argument overrides everything, regardless
+ * of any prior partial clones and fetches.
+ */
+ if (filter_options.no_filter)
+ return;
+
+ /*
+ * If no prior partial clone/fetch and the current fetch DID NOT
+ * request a partial-fetch, do a normal fetch.
+ */
+ if (!has_promisor_remote() && !filter_options.choice)
+ return;
+
+ /*
+ * If this is a partial-fetch request, we enable partial on
+ * this repo if not already enabled and remember the given
+ * filter-spec as the default for subsequent fetches to this
+ * remote if there is currently no default filter-spec.
+ */
+ if (filter_options.choice) {
+ partial_clone_register(remote->name, &filter_options);
+ return;
+ }
+
+ /*
+ * Do a partial-fetch from the promisor remote using either the
+ * explicitly given filter-spec or inherit the filter-spec from
+ * the config.
+ */
+ if (!filter_options.choice)
+ partial_clone_get_default_filter_spec(&filter_options, remote->name);
+ return;
+}
+
+static int fetch_one(struct remote *remote, int argc, const char **argv,
+ int prune_tags_ok, int use_stdin_refspecs)
+{
+ struct refspec rs = REFSPEC_INIT_FETCH;
+ int i;
+ int exit_code;
+ int maybe_prune_tags;
+ int remote_via_config = remote_is_configured(remote, 0);
+
+ if (!remote)
+ die(_("no remote repository specified; please specify either a URL or a\n"
+ "remote name from which new revisions should be fetched"));
+
+ gtransport = prepare_transport(remote, 1);
+
+ if (prune < 0) {
+ /* no command line request */
+ if (0 <= remote->prune)
+ prune = remote->prune;
+ else if (0 <= fetch_prune_config)
+ prune = fetch_prune_config;
+ else
+ prune = PRUNE_BY_DEFAULT;
+ }
+
+ if (prune_tags < 0) {
+ /* no command line request */
+ if (0 <= remote->prune_tags)
+ prune_tags = remote->prune_tags;
+ else if (0 <= fetch_prune_tags_config)
+ prune_tags = fetch_prune_tags_config;
+ else
+ prune_tags = PRUNE_TAGS_BY_DEFAULT;
+ }
+
+ maybe_prune_tags = prune_tags_ok && prune_tags;
+ if (maybe_prune_tags && remote_via_config)
+ refspec_append(&remote->fetch, TAG_REFSPEC);
+
+ if (maybe_prune_tags && (argc || !remote_via_config))
+ refspec_append(&rs, TAG_REFSPEC);
+
+ for (i = 0; i < argc; i++) {
+ if (!strcmp(argv[i], "tag")) {
+ i++;
+ if (i >= argc)
+ die(_("you need to specify a tag name"));
+
+ refspec_appendf(&rs, "refs/tags/%s:refs/tags/%s",
+ argv[i], argv[i]);
+ } else {
+ refspec_append(&rs, argv[i]);
+ }
+ }
+
+ if (use_stdin_refspecs) {
+ struct strbuf line = STRBUF_INIT;
+ while (strbuf_getline_lf(&line, stdin) != EOF)
+ refspec_append(&rs, line.buf);
+ strbuf_release(&line);
+ }
+
+ if (server_options.nr)
+ gtransport->server_options = &server_options;
+
+ sigchain_push_common(unlock_pack_on_signal);
+ atexit(unlock_pack_atexit);
+ sigchain_push(SIGPIPE, SIG_IGN);
+ exit_code = do_fetch(gtransport, &rs);
+ sigchain_pop(SIGPIPE);
+ refspec_clear(&rs);
+ transport_disconnect(gtransport);
+ gtransport = NULL;
+ return exit_code;
+}
+
+int cmd_fetch(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ struct string_list list = STRING_LIST_INIT_DUP;
+ struct remote *remote = NULL;
+ int result = 0;
+ int prune_tags_ok = 1;
+
+ packet_trace_identity("fetch");
+
+ /* Record the command line for the reflog */
+ strbuf_addstr(&default_rla, "fetch");
+ for (i = 1; i < argc; i++) {
+ /* This handles non-URLs gracefully */
+ char *anon = transport_anonymize_url(argv[i]);
+
+ strbuf_addf(&default_rla, " %s", anon);
+ free(anon);
+ }
+
+ git_config(git_fetch_config, NULL);
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
+
+ argc = parse_options(argc, argv, prefix,
+ builtin_fetch_options, builtin_fetch_usage, 0);
+
+ if (recurse_submodules_cli != RECURSE_SUBMODULES_DEFAULT)
+ recurse_submodules = recurse_submodules_cli;
+
+ if (negotiate_only) {
+ switch (recurse_submodules_cli) {
+ case RECURSE_SUBMODULES_OFF:
+ case RECURSE_SUBMODULES_DEFAULT:
+ /*
+ * --negotiate-only should never recurse into
+ * submodules. Skip it by setting recurse_submodules to
+ * RECURSE_SUBMODULES_OFF.
+ */
+ recurse_submodules = RECURSE_SUBMODULES_OFF;
+ break;
+
+ default:
+ die(_("options '%s' and '%s' cannot be used together"),
+ "--negotiate-only", "--recurse-submodules");
+ }
+ }
+
+ if (recurse_submodules != RECURSE_SUBMODULES_OFF) {
+ int *sfjc = submodule_fetch_jobs_config == -1
+ ? &submodule_fetch_jobs_config : NULL;
+ int *rs = recurse_submodules == RECURSE_SUBMODULES_DEFAULT
+ ? &recurse_submodules : NULL;
+
+ fetch_config_from_gitmodules(sfjc, rs);
+ }
+
+ if (negotiate_only && !negotiation_tip.nr)
+ die(_("--negotiate-only needs one or more --negotiation-tip=*"));
+
+ if (deepen_relative) {
+ if (deepen_relative < 0)
+ die(_("negative depth in --deepen is not supported"));
+ if (depth)
+ die(_("options '%s' and '%s' cannot be used together"), "--deepen", "--depth");
+ depth = xstrfmt("%d", deepen_relative);
+ }
+ if (unshallow) {
+ if (depth)
+ die(_("options '%s' and '%s' cannot be used together"), "--depth", "--unshallow");
+ else if (!is_repository_shallow(the_repository))
+ die(_("--unshallow on a complete repository does not make sense"));
+ else
+ depth = xstrfmt("%d", INFINITE_DEPTH);
+ }
+
+ /* no need to be strict, transport_set_option() will validate it again */
+ if (depth && atoi(depth) < 1)
+ die(_("depth %s is not a positive number"), depth);
+ if (depth || deepen_since || deepen_not.nr)
+ deepen = 1;
+
+ /* FETCH_HEAD never gets updated in --dry-run mode */
+ if (dry_run)
+ write_fetch_head = 0;
+
+ if (all) {
+ if (argc == 1)
+ die(_("fetch --all does not take a repository argument"));
+ else if (argc > 1)
+ die(_("fetch --all does not make sense with refspecs"));
+ (void) for_each_remote(get_one_remote_for_fetch, &list);
+
+ /* do not do fetch_multiple() of one */
+ if (list.nr == 1)
+ remote = remote_get(list.items[0].string);
+ } else if (argc == 0) {
+ /* No arguments -- use default remote */
+ remote = remote_get(NULL);
+ } else if (multiple) {
+ /* All arguments are assumed to be remotes or groups */
+ for (i = 0; i < argc; i++)
+ if (!add_remote_or_group(argv[i], &list))
+ die(_("no such remote or remote group: %s"),
+ argv[i]);
+ } else {
+ /* Single remote or group */
+ (void) add_remote_or_group(argv[0], &list);
+ if (list.nr > 1) {
+ /* More than one remote */
+ if (argc > 1)
+ die(_("fetching a group and specifying refspecs does not make sense"));
+ } else {
+ /* Zero or one remotes */
+ remote = remote_get(argv[0]);
+ prune_tags_ok = (argc == 1);
+ argc--;
+ argv++;
+ }
+ }
+
+ if (negotiate_only) {
+ struct oidset acked_commits = OIDSET_INIT;
+ struct oidset_iter iter;
+ const struct object_id *oid;
+
+ if (!remote)
+ die(_("must supply remote when using --negotiate-only"));
+ gtransport = prepare_transport(remote, 1);
+ if (gtransport->smart_options) {
+ gtransport->smart_options->acked_commits = &acked_commits;
+ } else {
+ warning(_("protocol does not support --negotiate-only, exiting"));
+ result = 1;
+ goto cleanup;
+ }
+ if (server_options.nr)
+ gtransport->server_options = &server_options;
+ result = transport_fetch_refs(gtransport, NULL);
+
+ oidset_iter_init(&acked_commits, &iter);
+ while ((oid = oidset_iter_next(&iter)))
+ printf("%s\n", oid_to_hex(oid));
+ oidset_clear(&acked_commits);
+ } else if (remote) {
+ if (filter_options.choice || has_promisor_remote())
+ fetch_one_setup_partial(remote);
+ result = fetch_one(remote, argc, argv, prune_tags_ok, stdin_refspecs);
+ } else {
+ int max_children = max_jobs;
+
+ if (filter_options.choice)
+ die(_("--filter can only be used with the remote "
+ "configured in extensions.partialclone"));
+
+ if (atomic_fetch)
+ die(_("--atomic can only be used when fetching "
+ "from one remote"));
+
+ if (stdin_refspecs)
+ die(_("--stdin can only be used when fetching "
+ "from one remote"));
+
+ if (max_children < 0)
+ max_children = fetch_parallel_config;
+
+ /* TODO should this also die if we have a previous partial-clone? */
+ result = fetch_multiple(&list, max_children);
+ }
+
+
+ /*
+ * This is only needed after fetch_one(), which does not fetch
+ * submodules by itself.
+ *
+ * When we fetch from multiple remotes, fetch_multiple() has
+ * already updated submodules to grab commits necessary for
+ * the fetched history from each remote, so there is no need
+ * to fetch submodules from here.
+ */
+ if (!result && remote && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
+ struct strvec options = STRVEC_INIT;
+ int max_children = max_jobs;
+
+ if (max_children < 0)
+ max_children = submodule_fetch_jobs_config;
+ if (max_children < 0)
+ max_children = fetch_parallel_config;
+
+ add_options_to_argv(&options);
+ result = fetch_submodules(the_repository,
+ &options,
+ submodule_prefix,
+ recurse_submodules,
+ recurse_submodules_default,
+ verbosity < 0,
+ max_children);
+ strvec_clear(&options);
+ }
+
+ /*
+ * Skip irrelevant tasks because we know objects were not
+ * fetched.
+ *
+ * NEEDSWORK: as a future optimization, we can return early
+ * whenever objects were not fetched e.g. if we already have all
+ * of them.
+ */
+ if (negotiate_only)
+ goto cleanup;
+
+ prepare_repo_settings(the_repository);
+ if (fetch_write_commit_graph > 0 ||
+ (fetch_write_commit_graph < 0 &&
+ the_repository->settings.fetch_write_commit_graph)) {
+ int commit_graph_flags = COMMIT_GRAPH_WRITE_SPLIT;
+
+ if (progress)
+ commit_graph_flags |= COMMIT_GRAPH_WRITE_PROGRESS;
+
+ write_commit_graph_reachable(the_repository->objects->odb,
+ commit_graph_flags,
+ NULL);
+ }
+
+ if (enable_auto_gc) {
+ if (refetch) {
+ /*
+ * Hint auto-maintenance strongly to encourage repacking,
+ * but respect config settings disabling it.
+ */
+ int opt_val;
+ if (git_config_get_int("gc.autopacklimit", &opt_val))
+ opt_val = -1;
+ if (opt_val != 0)
+ git_config_push_parameter("gc.autoPackLimit=1");
+
+ if (git_config_get_int("maintenance.incremental-repack.auto", &opt_val))
+ opt_val = -1;
+ if (opt_val != 0)
+ git_config_push_parameter("maintenance.incremental-repack.auto=-1");
+ }
+ run_auto_maintenance(verbosity < 0);
+ }
+
+ cleanup:
+ string_list_clear(&list, 0);
+ return result;
+}
diff --git a/builtin/fmt-merge-msg.c b/builtin/fmt-merge-msg.c
new file mode 100644
index 0000000..8d8fd39
--- /dev/null
+++ b/builtin/fmt-merge-msg.c
@@ -0,0 +1,69 @@
+#include "builtin.h"
+#include "config.h"
+#include "fmt-merge-msg.h"
+#include "parse-options.h"
+
+static const char * const fmt_merge_msg_usage[] = {
+ N_("git fmt-merge-msg [-m <message>] [--log[=<n>] | --no-log] [--file <file>]"),
+ NULL
+};
+
+int cmd_fmt_merge_msg(int argc, const char **argv, const char *prefix)
+{
+ const char *inpath = NULL;
+ const char *message = NULL;
+ char *into_name = NULL;
+ int shortlog_len = -1;
+ struct option options[] = {
+ { OPTION_INTEGER, 0, "log", &shortlog_len, N_("n"),
+ N_("populate log with at most <n> entries from shortlog"),
+ PARSE_OPT_OPTARG, NULL, DEFAULT_MERGE_LOG_LEN },
+ { OPTION_INTEGER, 0, "summary", &shortlog_len, N_("n"),
+ N_("alias for --log (deprecated)"),
+ PARSE_OPT_OPTARG | PARSE_OPT_HIDDEN, NULL,
+ DEFAULT_MERGE_LOG_LEN },
+ OPT_STRING('m', "message", &message, N_("text"),
+ N_("use <text> as start of message")),
+ OPT_STRING(0, "into-name", &into_name, N_("name"),
+ N_("use <name> instead of the real target branch")),
+ OPT_FILENAME('F', "file", &inpath, N_("file to read from")),
+ OPT_END()
+ };
+
+ FILE *in = stdin;
+ struct strbuf input = STRBUF_INIT, output = STRBUF_INIT;
+ int ret;
+ struct fmt_merge_msg_opts opts;
+
+ git_config(fmt_merge_msg_config, NULL);
+ argc = parse_options(argc, argv, prefix, options, fmt_merge_msg_usage,
+ 0);
+ if (argc > 0)
+ usage_with_options(fmt_merge_msg_usage, options);
+ if (shortlog_len < 0)
+ shortlog_len = (merge_log_config > 0) ? merge_log_config : 0;
+
+ if (inpath && strcmp(inpath, "-")) {
+ in = fopen(inpath, "r");
+ if (!in)
+ die_errno("cannot open '%s'", inpath);
+ }
+
+ if (strbuf_read(&input, fileno(in), 0) < 0)
+ die_errno("could not read input file");
+
+ if (message)
+ strbuf_addstr(&output, message);
+
+ memset(&opts, 0, sizeof(opts));
+ opts.add_title = !message;
+ opts.credit_people = 1;
+ opts.shortlog_len = shortlog_len;
+ opts.into_name = into_name;
+
+ ret = fmt_merge_msg(&input, &output, &opts);
+ if (ret)
+ return ret;
+ write_in_full(STDOUT_FILENO, output.buf, output.len);
+ return 0;
+}
diff --git a/builtin/for-each-ref.c b/builtin/for-each-ref.c
new file mode 100644
index 0000000..6f62f40
--- /dev/null
+++ b/builtin/for-each-ref.c
@@ -0,0 +1,101 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "refs.h"
+#include "object.h"
+#include "parse-options.h"
+#include "ref-filter.h"
+
+static char const * const for_each_ref_usage[] = {
+ N_("git for-each-ref [<options>] [<pattern>]"),
+ N_("git for-each-ref [--points-at <object>]"),
+ N_("git for-each-ref [--merged [<commit>]] [--no-merged [<commit>]]"),
+ N_("git for-each-ref [--contains [<commit>]] [--no-contains [<commit>]]"),
+ NULL
+};
+
+int cmd_for_each_ref(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ struct ref_sorting *sorting;
+ struct string_list sorting_options = STRING_LIST_INIT_DUP;
+ int maxcount = 0, icase = 0;
+ struct ref_array array;
+ struct ref_filter filter;
+ struct ref_format format = REF_FORMAT_INIT;
+ struct strbuf output = STRBUF_INIT;
+ struct strbuf err = STRBUF_INIT;
+
+ struct option opts[] = {
+ OPT_BIT('s', "shell", &format.quote_style,
+ N_("quote placeholders suitably for shells"), QUOTE_SHELL),
+ OPT_BIT('p', "perl", &format.quote_style,
+ N_("quote placeholders suitably for perl"), QUOTE_PERL),
+ OPT_BIT(0 , "python", &format.quote_style,
+ N_("quote placeholders suitably for python"), QUOTE_PYTHON),
+ OPT_BIT(0 , "tcl", &format.quote_style,
+ N_("quote placeholders suitably for Tcl"), QUOTE_TCL),
+
+ OPT_GROUP(""),
+ OPT_INTEGER( 0 , "count", &maxcount, N_("show only <n> matched refs")),
+ OPT_STRING( 0 , "format", &format.format, N_("format"), N_("format to use for the output")),
+ OPT__COLOR(&format.use_color, N_("respect format colors")),
+ OPT_REF_SORT(&sorting_options),
+ OPT_CALLBACK(0, "points-at", &filter.points_at,
+ N_("object"), N_("print only refs which points at the given object"),
+ parse_opt_object_name),
+ OPT_MERGED(&filter, N_("print only refs that are merged")),
+ OPT_NO_MERGED(&filter, N_("print only refs that are not merged")),
+ OPT_CONTAINS(&filter.with_commit, N_("print only refs which contain the commit")),
+ OPT_NO_CONTAINS(&filter.no_commit, N_("print only refs which don't contain the commit")),
+ OPT_BOOL(0, "ignore-case", &icase, N_("sorting and filtering are case insensitive")),
+ OPT_END(),
+ };
+
+ memset(&array, 0, sizeof(array));
+ memset(&filter, 0, sizeof(filter));
+
+ format.format = "%(objectname) %(objecttype)\t%(refname)";
+
+ git_config(git_default_config, NULL);
+
+ parse_options(argc, argv, prefix, opts, for_each_ref_usage, 0);
+ if (maxcount < 0) {
+ error("invalid --count argument: `%d'", maxcount);
+ usage_with_options(for_each_ref_usage, opts);
+ }
+ if (HAS_MULTI_BITS(format.quote_style)) {
+ error("more than one quoting style?");
+ usage_with_options(for_each_ref_usage, opts);
+ }
+ if (verify_ref_format(&format))
+ usage_with_options(for_each_ref_usage, opts);
+
+ sorting = ref_sorting_options(&sorting_options);
+ ref_sorting_set_sort_flags_all(sorting, REF_SORTING_ICASE, icase);
+ filter.ignore_case = icase;
+
+ filter.name_patterns = argv;
+ filter.match_as_path = 1;
+ filter_refs(&array, &filter, FILTER_REFS_ALL);
+ ref_array_sort(sorting, &array);
+
+ if (!maxcount || array.nr < maxcount)
+ maxcount = array.nr;
+ for (i = 0; i < maxcount; i++) {
+ strbuf_reset(&err);
+ strbuf_reset(&output);
+ if (format_ref_array_item(array.items[i], &format, &output, &err))
+ die("%s", err.buf);
+ fwrite(output.buf, 1, output.len, stdout);
+ putchar('\n');
+ }
+
+ strbuf_release(&err);
+ strbuf_release(&output);
+ ref_array_clear(&array);
+ free_commit_list(filter.with_commit);
+ free_commit_list(filter.no_commit);
+ ref_sorting_release(sorting);
+ return 0;
+}
diff --git a/builtin/for-each-repo.c b/builtin/for-each-repo.c
new file mode 100644
index 0000000..6aeac37
--- /dev/null
+++ b/builtin/for-each-repo.c
@@ -0,0 +1,62 @@
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "run-command.h"
+#include "string-list.h"
+
+static const char * const for_each_repo_usage[] = {
+ N_("git for-each-repo --config=<config> [--] <arguments>"),
+ NULL
+};
+
+static int run_command_on_repo(const char *path, int argc, const char ** argv)
+{
+ int i;
+ struct child_process child = CHILD_PROCESS_INIT;
+ char *abspath = interpolate_path(path, 0);
+
+ child.git_cmd = 1;
+ strvec_pushl(&child.args, "-C", abspath, NULL);
+
+ for (i = 0; i < argc; i++)
+ strvec_push(&child.args, argv[i]);
+
+ free(abspath);
+
+ return run_command(&child);
+}
+
+int cmd_for_each_repo(int argc, const char **argv, const char *prefix)
+{
+ static const char *config_key = NULL;
+ int i, result = 0;
+ const struct string_list *values;
+
+ const struct option options[] = {
+ OPT_STRING(0, "config", &config_key, N_("config"),
+ N_("config key storing a list of repository paths")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, for_each_repo_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (!config_key)
+ die(_("missing --config=<config>"));
+
+ values = repo_config_get_value_multi(the_repository,
+ config_key);
+
+ /*
+ * Do nothing on an empty list, which is equivalent to the case
+ * where the config variable does not exist at all.
+ */
+ if (!values)
+ return 0;
+
+ for (i = 0; !result && i < values->nr; i++)
+ result = run_command_on_repo(values->items[i].string, argc, argv);
+
+ return result;
+}
diff --git a/builtin/fsck.c b/builtin/fsck.c
new file mode 100644
index 0000000..d207bd9
--- /dev/null
+++ b/builtin/fsck.c
@@ -0,0 +1,1018 @@
+#define USE_THE_INDEX_VARIABLE
+#include "builtin.h"
+#include "cache.h"
+#include "repository.h"
+#include "config.h"
+#include "commit.h"
+#include "tree.h"
+#include "blob.h"
+#include "tag.h"
+#include "refs.h"
+#include "pack.h"
+#include "cache-tree.h"
+#include "tree-walk.h"
+#include "fsck.h"
+#include "parse-options.h"
+#include "dir.h"
+#include "progress.h"
+#include "streaming.h"
+#include "decorate.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "resolve-undo.h"
+#include "run-command.h"
+#include "worktree.h"
+
+#define REACHABLE 0x0001
+#define SEEN 0x0002
+#define HAS_OBJ 0x0004
+/* This flag is set if something points to this object. */
+#define USED 0x0008
+
+static int show_root;
+static int show_tags;
+static int show_unreachable;
+static int include_reflogs = 1;
+static int check_full = 1;
+static int connectivity_only;
+static int check_strict;
+static int keep_cache_objects;
+static struct fsck_options fsck_walk_options = FSCK_OPTIONS_DEFAULT;
+static struct fsck_options fsck_obj_options = FSCK_OPTIONS_DEFAULT;
+static int errors_found;
+static int write_lost_and_found;
+static int verbose;
+static int show_progress = -1;
+static int show_dangling = 1;
+static int name_objects;
+#define ERROR_OBJECT 01
+#define ERROR_REACHABLE 02
+#define ERROR_PACK 04
+#define ERROR_REFS 010
+#define ERROR_COMMIT_GRAPH 020
+#define ERROR_MULTI_PACK_INDEX 040
+
+static const char *describe_object(const struct object_id *oid)
+{
+ return fsck_describe_object(&fsck_walk_options, oid);
+}
+
+static const char *printable_type(const struct object_id *oid,
+ enum object_type type)
+{
+ const char *ret;
+
+ if (type == OBJ_NONE)
+ type = oid_object_info(the_repository, oid, NULL);
+
+ ret = type_name(type);
+ if (!ret)
+ ret = _("unknown");
+
+ return ret;
+}
+
+static int objerror(struct object *obj, const char *err)
+{
+ errors_found |= ERROR_OBJECT;
+ /* TRANSLATORS: e.g. error in tree 01bfda: <more explanation> */
+ fprintf_ln(stderr, _("error in %s %s: %s"),
+ printable_type(&obj->oid, obj->type),
+ describe_object(&obj->oid), err);
+ return -1;
+}
+
+static int fsck_error_func(struct fsck_options *o,
+ const struct object_id *oid,
+ enum object_type object_type,
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
+{
+ switch (msg_type) {
+ case FSCK_WARN:
+ /* TRANSLATORS: e.g. warning in tree 01bfda: <more explanation> */
+ fprintf_ln(stderr, _("warning in %s %s: %s"),
+ printable_type(oid, object_type),
+ describe_object(oid), message);
+ return 0;
+ case FSCK_ERROR:
+ /* TRANSLATORS: e.g. error in tree 01bfda: <more explanation> */
+ fprintf_ln(stderr, _("error in %s %s: %s"),
+ printable_type(oid, object_type),
+ describe_object(oid), message);
+ return 1;
+ default:
+ BUG("%d (FSCK_IGNORE?) should never trigger this callback",
+ msg_type);
+ }
+}
+
+static struct object_array pending;
+
+static int mark_object(struct object *obj, enum object_type type,
+ void *data, struct fsck_options *options)
+{
+ struct object *parent = data;
+
+ /*
+ * The only case data is NULL or type is OBJ_ANY is when
+ * mark_object_reachable() calls us. All the callers of
+ * that function has non-NULL obj hence ...
+ */
+ if (!obj) {
+ /* ... these references to parent->fld are safe here */
+ printf_ln(_("broken link from %7s %s"),
+ printable_type(&parent->oid, parent->type),
+ describe_object(&parent->oid));
+ printf_ln(_("broken link from %7s %s"),
+ (type == OBJ_ANY ? _("unknown") : type_name(type)),
+ _("unknown"));
+ errors_found |= ERROR_REACHABLE;
+ return 1;
+ }
+
+ if (type != OBJ_ANY && obj->type != type)
+ /* ... and the reference to parent is safe here */
+ objerror(parent, _("wrong object type in link"));
+
+ if (obj->flags & REACHABLE)
+ return 0;
+ obj->flags |= REACHABLE;
+
+ if (is_promisor_object(&obj->oid))
+ /*
+ * Further recursion does not need to be performed on this
+ * object since it is a promisor object (so it does not need to
+ * be added to "pending").
+ */
+ return 0;
+
+ if (!(obj->flags & HAS_OBJ)) {
+ if (parent && !has_object(the_repository, &obj->oid, 1)) {
+ printf_ln(_("broken link from %7s %s\n"
+ " to %7s %s"),
+ printable_type(&parent->oid, parent->type),
+ describe_object(&parent->oid),
+ printable_type(&obj->oid, obj->type),
+ describe_object(&obj->oid));
+ errors_found |= ERROR_REACHABLE;
+ }
+ return 1;
+ }
+
+ add_object_array(obj, NULL, &pending);
+ return 0;
+}
+
+static void mark_object_reachable(struct object *obj)
+{
+ mark_object(obj, OBJ_ANY, NULL, NULL);
+}
+
+static int traverse_one_object(struct object *obj)
+{
+ int result = fsck_walk(obj, obj, &fsck_walk_options);
+
+ if (obj->type == OBJ_TREE) {
+ struct tree *tree = (struct tree *)obj;
+ free_tree_buffer(tree);
+ }
+ return result;
+}
+
+static int traverse_reachable(void)
+{
+ struct progress *progress = NULL;
+ unsigned int nr = 0;
+ int result = 0;
+ if (show_progress)
+ progress = start_delayed_progress(_("Checking connectivity"), 0);
+ while (pending.nr) {
+ result |= traverse_one_object(object_array_pop(&pending));
+ display_progress(progress, ++nr);
+ }
+ stop_progress(&progress);
+ return !!result;
+}
+
+static int mark_used(struct object *obj, enum object_type object_type,
+ void *data, struct fsck_options *options)
+{
+ if (!obj)
+ return 1;
+ obj->flags |= USED;
+ return 0;
+}
+
+static void mark_unreachable_referents(const struct object_id *oid)
+{
+ struct fsck_options options = FSCK_OPTIONS_DEFAULT;
+ struct object *obj = lookup_object(the_repository, oid);
+
+ if (!obj || !(obj->flags & HAS_OBJ))
+ return; /* not part of our original set */
+ if (obj->flags & REACHABLE)
+ return; /* reachable objects already traversed */
+
+ /*
+ * Avoid passing OBJ_NONE to fsck_walk, which will parse the object
+ * (and we want to avoid parsing blobs).
+ */
+ if (obj->type == OBJ_NONE) {
+ enum object_type type = oid_object_info(the_repository,
+ &obj->oid, NULL);
+ if (type > 0)
+ object_as_type(obj, type, 0);
+ }
+
+ options.walk = mark_used;
+ fsck_walk(obj, NULL, &options);
+ if (obj->type == OBJ_TREE)
+ free_tree_buffer((struct tree *)obj);
+}
+
+static int mark_loose_unreachable_referents(const struct object_id *oid,
+ const char *path,
+ void *data)
+{
+ mark_unreachable_referents(oid);
+ return 0;
+}
+
+static int mark_packed_unreachable_referents(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *data)
+{
+ mark_unreachable_referents(oid);
+ return 0;
+}
+
+/*
+ * Check a single reachable object
+ */
+static void check_reachable_object(struct object *obj)
+{
+ /*
+ * We obviously want the object to be parsed,
+ * except if it was in a pack-file and we didn't
+ * do a full fsck
+ */
+ if (!(obj->flags & HAS_OBJ)) {
+ if (is_promisor_object(&obj->oid))
+ return;
+ if (has_object_pack(&obj->oid))
+ return; /* it is in pack - forget about it */
+ printf_ln(_("missing %s %s"),
+ printable_type(&obj->oid, obj->type),
+ describe_object(&obj->oid));
+ errors_found |= ERROR_REACHABLE;
+ return;
+ }
+}
+
+/*
+ * Check a single unreachable object
+ */
+static void check_unreachable_object(struct object *obj)
+{
+ /*
+ * Missing unreachable object? Ignore it. It's not like
+ * we miss it (since it can't be reached), nor do we want
+ * to complain about it being unreachable (since it does
+ * not exist).
+ */
+ if (!(obj->flags & HAS_OBJ))
+ return;
+
+ /*
+ * Unreachable object that exists? Show it if asked to,
+ * since this is something that is prunable.
+ */
+ if (show_unreachable) {
+ printf_ln(_("unreachable %s %s"),
+ printable_type(&obj->oid, obj->type),
+ describe_object(&obj->oid));
+ return;
+ }
+
+ /*
+ * "!USED" means that nothing at all points to it, including
+ * other unreachable objects. In other words, it's the "tip"
+ * of some set of unreachable objects, usually a commit that
+ * got dropped.
+ *
+ * Such starting points are more interesting than some random
+ * set of unreachable objects, so we show them even if the user
+ * hasn't asked for _all_ unreachable objects. If you have
+ * deleted a branch by mistake, this is a prime candidate to
+ * start looking at, for example.
+ */
+ if (!(obj->flags & USED)) {
+ if (show_dangling)
+ printf_ln(_("dangling %s %s"),
+ printable_type(&obj->oid, obj->type),
+ describe_object(&obj->oid));
+ if (write_lost_and_found) {
+ char *filename = git_pathdup("lost-found/%s/%s",
+ obj->type == OBJ_COMMIT ? "commit" : "other",
+ describe_object(&obj->oid));
+ FILE *f;
+
+ if (safe_create_leading_directories_const(filename)) {
+ error(_("could not create lost-found"));
+ free(filename);
+ return;
+ }
+ f = xfopen(filename, "w");
+ if (obj->type == OBJ_BLOB) {
+ if (stream_blob_to_fd(fileno(f), &obj->oid, NULL, 1))
+ die_errno(_("could not write '%s'"), filename);
+ } else
+ fprintf(f, "%s\n", describe_object(&obj->oid));
+ if (fclose(f))
+ die_errno(_("could not finish '%s'"),
+ filename);
+ free(filename);
+ }
+ return;
+ }
+
+ /*
+ * Otherwise? It's there, it's unreachable, and some other unreachable
+ * object points to it. Ignore it - it's not interesting, and we showed
+ * all the interesting cases above.
+ */
+}
+
+static void check_object(struct object *obj)
+{
+ if (verbose)
+ fprintf_ln(stderr, _("Checking %s"), describe_object(&obj->oid));
+
+ if (obj->flags & REACHABLE)
+ check_reachable_object(obj);
+ else
+ check_unreachable_object(obj);
+}
+
+static void check_connectivity(void)
+{
+ int i, max;
+
+ /* Traverse the pending reachable objects */
+ traverse_reachable();
+
+ /*
+ * With --connectivity-only, we won't have actually opened and marked
+ * unreachable objects with USED. Do that now to make --dangling, etc
+ * accurate.
+ */
+ if (connectivity_only && (show_dangling || write_lost_and_found)) {
+ /*
+ * Even though we already have a "struct object" for each of
+ * these in memory, we must not iterate over the internal
+ * object hash as we do below. Our loop would potentially
+ * resize the hash, making our iteration invalid.
+ *
+ * Instead, we'll just go back to the source list of objects,
+ * and ignore any that weren't present in our earlier
+ * traversal.
+ */
+ for_each_loose_object(mark_loose_unreachable_referents, NULL, 0);
+ for_each_packed_object(mark_packed_unreachable_referents, NULL, 0);
+ }
+
+ /* Look up all the requirements, warn about missing objects.. */
+ max = get_max_object_index();
+ if (verbose)
+ fprintf_ln(stderr, _("Checking connectivity (%d objects)"), max);
+
+ for (i = 0; i < max; i++) {
+ struct object *obj = get_indexed_object(i);
+
+ if (obj)
+ check_object(obj);
+ }
+}
+
+static int fsck_obj(struct object *obj, void *buffer, unsigned long size)
+{
+ int err;
+
+ if (obj->flags & SEEN)
+ return 0;
+ obj->flags |= SEEN;
+
+ if (verbose)
+ fprintf_ln(stderr, _("Checking %s %s"),
+ printable_type(&obj->oid, obj->type),
+ describe_object(&obj->oid));
+
+ if (fsck_walk(obj, NULL, &fsck_obj_options))
+ objerror(obj, _("broken links"));
+ err = fsck_object(obj, buffer, size, &fsck_obj_options);
+ if (err)
+ goto out;
+
+ if (obj->type == OBJ_COMMIT) {
+ struct commit *commit = (struct commit *) obj;
+
+ if (!commit->parents && show_root)
+ printf_ln(_("root %s"),
+ describe_object(&commit->object.oid));
+ }
+
+ if (obj->type == OBJ_TAG) {
+ struct tag *tag = (struct tag *) obj;
+
+ if (show_tags && tag->tagged) {
+ printf_ln(_("tagged %s %s (%s) in %s"),
+ printable_type(&tag->tagged->oid, tag->tagged->type),
+ describe_object(&tag->tagged->oid),
+ tag->tag,
+ describe_object(&tag->object.oid));
+ }
+ }
+
+out:
+ if (obj->type == OBJ_TREE)
+ free_tree_buffer((struct tree *)obj);
+ return err;
+}
+
+static int fsck_obj_buffer(const struct object_id *oid, enum object_type type,
+ unsigned long size, void *buffer, int *eaten)
+{
+ /*
+ * Note, buffer may be NULL if type is OBJ_BLOB. See
+ * verify_packfile(), data_valid variable for details.
+ */
+ struct object *obj;
+ obj = parse_object_buffer(the_repository, oid, type, size, buffer,
+ eaten);
+ if (!obj) {
+ errors_found |= ERROR_OBJECT;
+ return error(_("%s: object corrupt or missing"),
+ oid_to_hex(oid));
+ }
+ obj->flags &= ~(REACHABLE | SEEN);
+ obj->flags |= HAS_OBJ;
+ return fsck_obj(obj, buffer, size);
+}
+
+static int default_refs;
+
+static void fsck_handle_reflog_oid(const char *refname, struct object_id *oid,
+ timestamp_t timestamp)
+{
+ struct object *obj;
+
+ if (!is_null_oid(oid)) {
+ obj = lookup_object(the_repository, oid);
+ if (obj && (obj->flags & HAS_OBJ)) {
+ if (timestamp)
+ fsck_put_object_name(&fsck_walk_options, oid,
+ "%s@{%"PRItime"}",
+ refname, timestamp);
+ obj->flags |= USED;
+ mark_object_reachable(obj);
+ } else if (!is_promisor_object(oid)) {
+ error(_("%s: invalid reflog entry %s"),
+ refname, oid_to_hex(oid));
+ errors_found |= ERROR_REACHABLE;
+ }
+ }
+}
+
+static int fsck_handle_reflog_ent(struct object_id *ooid, struct object_id *noid,
+ const char *email UNUSED,
+ timestamp_t timestamp, int tz UNUSED,
+ const char *message UNUSED, void *cb_data)
+{
+ const char *refname = cb_data;
+
+ if (verbose)
+ fprintf_ln(stderr, _("Checking reflog %s->%s"),
+ oid_to_hex(ooid), oid_to_hex(noid));
+
+ fsck_handle_reflog_oid(refname, ooid, 0);
+ fsck_handle_reflog_oid(refname, noid, timestamp);
+ return 0;
+}
+
+static int fsck_handle_reflog(const char *logname,
+ const struct object_id *oid UNUSED,
+ int flag UNUSED, void *cb_data)
+{
+ struct strbuf refname = STRBUF_INIT;
+
+ strbuf_worktree_ref(cb_data, &refname, logname);
+ for_each_reflog_ent(refname.buf, fsck_handle_reflog_ent, refname.buf);
+ strbuf_release(&refname);
+ return 0;
+}
+
+static int fsck_handle_ref(const char *refname, const struct object_id *oid,
+ int flag UNUSED, void *cb_data UNUSED)
+{
+ struct object *obj;
+
+ obj = parse_object(the_repository, oid);
+ if (!obj) {
+ if (is_promisor_object(oid)) {
+ /*
+ * Increment default_refs anyway, because this is a
+ * valid ref.
+ */
+ default_refs++;
+ return 0;
+ }
+ error(_("%s: invalid sha1 pointer %s"),
+ refname, oid_to_hex(oid));
+ errors_found |= ERROR_REACHABLE;
+ /* We'll continue with the rest despite the error.. */
+ return 0;
+ }
+ if (obj->type != OBJ_COMMIT && is_branch(refname)) {
+ error(_("%s: not a commit"), refname);
+ errors_found |= ERROR_REFS;
+ }
+ default_refs++;
+ obj->flags |= USED;
+ fsck_put_object_name(&fsck_walk_options,
+ oid, "%s", refname);
+ mark_object_reachable(obj);
+
+ return 0;
+}
+
+static int fsck_head_link(const char *head_ref_name,
+ const char **head_points_at,
+ struct object_id *head_oid);
+
+static void get_default_heads(void)
+{
+ struct worktree **worktrees, **p;
+ const char *head_points_at;
+ struct object_id head_oid;
+
+ for_each_rawref(fsck_handle_ref, NULL);
+
+ worktrees = get_worktrees();
+ for (p = worktrees; *p; p++) {
+ struct worktree *wt = *p;
+ struct strbuf ref = STRBUF_INIT;
+
+ strbuf_worktree_ref(wt, &ref, "HEAD");
+ fsck_head_link(ref.buf, &head_points_at, &head_oid);
+ if (head_points_at && !is_null_oid(&head_oid))
+ fsck_handle_ref(ref.buf, &head_oid, 0, NULL);
+ strbuf_release(&ref);
+
+ if (include_reflogs)
+ refs_for_each_reflog(get_worktree_ref_store(wt),
+ fsck_handle_reflog, wt);
+ }
+ free_worktrees(worktrees);
+
+ /*
+ * Not having any default heads isn't really fatal, but
+ * it does mean that "--unreachable" no longer makes any
+ * sense (since in this case everything will obviously
+ * be unreachable by definition.
+ *
+ * Showing dangling objects is valid, though (as those
+ * dangling objects are likely lost heads).
+ *
+ * So we just print a warning about it, and clear the
+ * "show_unreachable" flag.
+ */
+ if (!default_refs) {
+ fprintf_ln(stderr, _("notice: No default references"));
+ show_unreachable = 0;
+ }
+}
+
+struct for_each_loose_cb
+{
+ struct progress *progress;
+ struct strbuf obj_type;
+};
+
+static int fsck_loose(const struct object_id *oid, const char *path, void *data)
+{
+ struct for_each_loose_cb *cb_data = data;
+ struct object *obj;
+ enum object_type type = OBJ_NONE;
+ unsigned long size;
+ void *contents = NULL;
+ int eaten;
+ struct object_info oi = OBJECT_INFO_INIT;
+ struct object_id real_oid = *null_oid();
+ int err = 0;
+
+ strbuf_reset(&cb_data->obj_type);
+ oi.type_name = &cb_data->obj_type;
+ oi.sizep = &size;
+ oi.typep = &type;
+
+ if (read_loose_object(path, oid, &real_oid, &contents, &oi) < 0) {
+ if (contents && !oideq(&real_oid, oid))
+ err = error(_("%s: hash-path mismatch, found at: %s"),
+ oid_to_hex(&real_oid), path);
+ else
+ err = error(_("%s: object corrupt or missing: %s"),
+ oid_to_hex(oid), path);
+ }
+ if (type != OBJ_NONE && type < 0)
+ err = error(_("%s: object is of unknown type '%s': %s"),
+ oid_to_hex(&real_oid), cb_data->obj_type.buf,
+ path);
+ if (err < 0) {
+ errors_found |= ERROR_OBJECT;
+ free(contents);
+ return 0; /* keep checking other objects */
+ }
+
+ if (!contents && type != OBJ_BLOB)
+ BUG("read_loose_object streamed a non-blob");
+
+ obj = parse_object_buffer(the_repository, oid, type, size,
+ contents, &eaten);
+
+ if (!obj) {
+ errors_found |= ERROR_OBJECT;
+ error(_("%s: object could not be parsed: %s"),
+ oid_to_hex(oid), path);
+ if (!eaten)
+ free(contents);
+ return 0; /* keep checking other objects */
+ }
+
+ obj->flags &= ~(REACHABLE | SEEN);
+ obj->flags |= HAS_OBJ;
+ if (fsck_obj(obj, contents, size))
+ errors_found |= ERROR_OBJECT;
+
+ if (!eaten)
+ free(contents);
+ return 0; /* keep checking other objects, even if we saw an error */
+}
+
+static int fsck_cruft(const char *basename, const char *path, void *data)
+{
+ if (!starts_with(basename, "tmp_obj_"))
+ fprintf_ln(stderr, _("bad sha1 file: %s"), path);
+ return 0;
+}
+
+static int fsck_subdir(unsigned int nr, const char *path, void *data)
+{
+ struct for_each_loose_cb *cb_data = data;
+ struct progress *progress = cb_data->progress;
+ display_progress(progress, nr + 1);
+ return 0;
+}
+
+static void fsck_object_dir(const char *path)
+{
+ struct progress *progress = NULL;
+ struct for_each_loose_cb cb_data = {
+ .obj_type = STRBUF_INIT,
+ .progress = progress,
+ };
+
+ if (verbose)
+ fprintf_ln(stderr, _("Checking object directory"));
+
+ if (show_progress)
+ progress = start_progress(_("Checking object directories"), 256);
+
+ for_each_loose_file_in_objdir(path, fsck_loose, fsck_cruft, fsck_subdir,
+ &cb_data);
+ display_progress(progress, 256);
+ stop_progress(&progress);
+ strbuf_release(&cb_data.obj_type);
+}
+
+static int fsck_head_link(const char *head_ref_name,
+ const char **head_points_at,
+ struct object_id *head_oid)
+{
+ int null_is_error = 0;
+
+ if (verbose)
+ fprintf_ln(stderr, _("Checking %s link"), head_ref_name);
+
+ *head_points_at = resolve_ref_unsafe(head_ref_name, 0, head_oid, NULL);
+ if (!*head_points_at) {
+ errors_found |= ERROR_REFS;
+ return error(_("invalid %s"), head_ref_name);
+ }
+ if (!strcmp(*head_points_at, head_ref_name))
+ /* detached HEAD */
+ null_is_error = 1;
+ else if (!starts_with(*head_points_at, "refs/heads/")) {
+ errors_found |= ERROR_REFS;
+ return error(_("%s points to something strange (%s)"),
+ head_ref_name, *head_points_at);
+ }
+ if (is_null_oid(head_oid)) {
+ if (null_is_error) {
+ errors_found |= ERROR_REFS;
+ return error(_("%s: detached HEAD points at nothing"),
+ head_ref_name);
+ }
+ fprintf_ln(stderr,
+ _("notice: %s points to an unborn branch (%s)"),
+ head_ref_name, *head_points_at + 11);
+ }
+ return 0;
+}
+
+static int fsck_cache_tree(struct cache_tree *it)
+{
+ int i;
+ int err = 0;
+
+ if (verbose)
+ fprintf_ln(stderr, _("Checking cache tree"));
+
+ if (0 <= it->entry_count) {
+ struct object *obj = parse_object(the_repository, &it->oid);
+ if (!obj) {
+ error(_("%s: invalid sha1 pointer in cache-tree"),
+ oid_to_hex(&it->oid));
+ errors_found |= ERROR_REFS;
+ return 1;
+ }
+ obj->flags |= USED;
+ fsck_put_object_name(&fsck_walk_options, &it->oid, ":");
+ mark_object_reachable(obj);
+ if (obj->type != OBJ_TREE)
+ err |= objerror(obj, _("non-tree in cache-tree"));
+ }
+ for (i = 0; i < it->subtree_nr; i++)
+ err |= fsck_cache_tree(it->down[i]->cache_tree);
+ return err;
+}
+
+static int fsck_resolve_undo(struct index_state *istate)
+{
+ struct string_list_item *item;
+ struct string_list *resolve_undo = istate->resolve_undo;
+
+ if (!resolve_undo)
+ return 0;
+
+ for_each_string_list_item(item, resolve_undo) {
+ const char *path = item->string;
+ struct resolve_undo_info *ru = item->util;
+ int i;
+
+ if (!ru)
+ continue;
+ for (i = 0; i < 3; i++) {
+ struct object *obj;
+
+ if (!ru->mode[i] || !S_ISREG(ru->mode[i]))
+ continue;
+
+ obj = parse_object(the_repository, &ru->oid[i]);
+ if (!obj) {
+ error(_("%s: invalid sha1 pointer in resolve-undo"),
+ oid_to_hex(&ru->oid[i]));
+ errors_found |= ERROR_REFS;
+ continue;
+ }
+ obj->flags |= USED;
+ fsck_put_object_name(&fsck_walk_options, &ru->oid[i],
+ ":(%d):%s", i, path);
+ mark_object_reachable(obj);
+ }
+ }
+ return 0;
+}
+
+static void mark_object_for_connectivity(const struct object_id *oid)
+{
+ struct object *obj = lookup_unknown_object(the_repository, oid);
+ obj->flags |= HAS_OBJ;
+}
+
+static int mark_loose_for_connectivity(const struct object_id *oid,
+ const char *path,
+ void *data)
+{
+ mark_object_for_connectivity(oid);
+ return 0;
+}
+
+static int mark_packed_for_connectivity(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *data)
+{
+ mark_object_for_connectivity(oid);
+ return 0;
+}
+
+static char const * const fsck_usage[] = {
+ N_("git fsck [--tags] [--root] [--unreachable] [--cache] [--no-reflogs]\n"
+ " [--[no-]full] [--strict] [--verbose] [--lost-found]\n"
+ " [--[no-]dangling] [--[no-]progress] [--connectivity-only]\n"
+ " [--[no-]name-objects] [<object>...]"),
+ NULL
+};
+
+static struct option fsck_opts[] = {
+ OPT__VERBOSE(&verbose, N_("be verbose")),
+ OPT_BOOL(0, "unreachable", &show_unreachable, N_("show unreachable objects")),
+ OPT_BOOL(0, "dangling", &show_dangling, N_("show dangling objects")),
+ OPT_BOOL(0, "tags", &show_tags, N_("report tags")),
+ OPT_BOOL(0, "root", &show_root, N_("report root nodes")),
+ OPT_BOOL(0, "cache", &keep_cache_objects, N_("make index objects head nodes")),
+ OPT_BOOL(0, "reflogs", &include_reflogs, N_("make reflogs head nodes (default)")),
+ OPT_BOOL(0, "full", &check_full, N_("also consider packs and alternate objects")),
+ OPT_BOOL(0, "connectivity-only", &connectivity_only, N_("check only connectivity")),
+ OPT_BOOL(0, "strict", &check_strict, N_("enable more strict checking")),
+ OPT_BOOL(0, "lost-found", &write_lost_and_found,
+ N_("write dangling objects in .git/lost-found")),
+ OPT_BOOL(0, "progress", &show_progress, N_("show progress")),
+ OPT_BOOL(0, "name-objects", &name_objects, N_("show verbose names for reachable objects")),
+ OPT_END(),
+};
+
+int cmd_fsck(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ struct object_directory *odb;
+
+ /* fsck knows how to handle missing promisor objects */
+ fetch_if_missing = 0;
+
+ errors_found = 0;
+ read_replace_refs = 0;
+ save_commit_buffer = 0;
+
+ argc = parse_options(argc, argv, prefix, fsck_opts, fsck_usage, 0);
+
+ fsck_walk_options.walk = mark_object;
+ fsck_obj_options.walk = mark_used;
+ fsck_obj_options.error_func = fsck_error_func;
+ if (check_strict)
+ fsck_obj_options.strict = 1;
+
+ if (show_progress == -1)
+ show_progress = isatty(2);
+ if (verbose)
+ show_progress = 0;
+
+ if (write_lost_and_found) {
+ check_full = 1;
+ include_reflogs = 0;
+ }
+
+ if (name_objects)
+ fsck_enable_object_names(&fsck_walk_options);
+
+ git_config(git_fsck_config, &fsck_obj_options);
+ prepare_repo_settings(the_repository);
+
+ if (connectivity_only) {
+ for_each_loose_object(mark_loose_for_connectivity, NULL, 0);
+ for_each_packed_object(mark_packed_for_connectivity, NULL, 0);
+ } else {
+ prepare_alt_odb(the_repository);
+ for (odb = the_repository->objects->odb; odb; odb = odb->next)
+ fsck_object_dir(odb->path);
+
+ if (check_full) {
+ struct packed_git *p;
+ uint32_t total = 0, count = 0;
+ struct progress *progress = NULL;
+
+ if (show_progress) {
+ for (p = get_all_packs(the_repository); p;
+ p = p->next) {
+ if (open_pack_index(p))
+ continue;
+ total += p->num_objects;
+ }
+
+ progress = start_progress(_("Checking objects"), total);
+ }
+ for (p = get_all_packs(the_repository); p;
+ p = p->next) {
+ /* verify gives error messages itself */
+ if (verify_pack(the_repository,
+ p, fsck_obj_buffer,
+ progress, count))
+ errors_found |= ERROR_PACK;
+ count += p->num_objects;
+ }
+ stop_progress(&progress);
+ }
+
+ if (fsck_finish(&fsck_obj_options))
+ errors_found |= ERROR_OBJECT;
+ }
+
+ for (i = 0; i < argc; i++) {
+ const char *arg = argv[i];
+ struct object_id oid;
+ if (!get_oid(arg, &oid)) {
+ struct object *obj = lookup_object(the_repository,
+ &oid);
+
+ if (!obj || !(obj->flags & HAS_OBJ)) {
+ if (is_promisor_object(&oid))
+ continue;
+ error(_("%s: object missing"), oid_to_hex(&oid));
+ errors_found |= ERROR_OBJECT;
+ continue;
+ }
+
+ obj->flags |= USED;
+ fsck_put_object_name(&fsck_walk_options, &oid,
+ "%s", arg);
+ mark_object_reachable(obj);
+ continue;
+ }
+ error(_("invalid parameter: expected sha1, got '%s'"), arg);
+ errors_found |= ERROR_OBJECT;
+ }
+
+ /*
+ * If we've not been given any explicit head information, do the
+ * default ones from .git/refs. We also consider the index file
+ * in this case (ie this implies --cache).
+ */
+ if (!argc) {
+ get_default_heads();
+ keep_cache_objects = 1;
+ }
+
+ if (keep_cache_objects) {
+ verify_index_checksum = 1;
+ verify_ce_order = 1;
+ repo_read_index(the_repository);
+ /* TODO: audit for interaction with sparse-index. */
+ ensure_full_index(&the_index);
+ for (i = 0; i < the_index.cache_nr; i++) {
+ unsigned int mode;
+ struct blob *blob;
+ struct object *obj;
+
+ mode = the_index.cache[i]->ce_mode;
+ if (S_ISGITLINK(mode))
+ continue;
+ blob = lookup_blob(the_repository,
+ &the_index.cache[i]->oid);
+ if (!blob)
+ continue;
+ obj = &blob->object;
+ obj->flags |= USED;
+ fsck_put_object_name(&fsck_walk_options, &obj->oid,
+ ":%s", the_index.cache[i]->name);
+ mark_object_reachable(obj);
+ }
+ if (the_index.cache_tree)
+ fsck_cache_tree(the_index.cache_tree);
+ fsck_resolve_undo(&the_index);
+ }
+
+ check_connectivity();
+
+ if (the_repository->settings.core_commit_graph) {
+ struct child_process commit_graph_verify = CHILD_PROCESS_INIT;
+
+ prepare_alt_odb(the_repository);
+ for (odb = the_repository->objects->odb; odb; odb = odb->next) {
+ child_process_init(&commit_graph_verify);
+ commit_graph_verify.git_cmd = 1;
+ strvec_pushl(&commit_graph_verify.args, "commit-graph",
+ "verify", "--object-dir", odb->path, NULL);
+ if (run_command(&commit_graph_verify))
+ errors_found |= ERROR_COMMIT_GRAPH;
+ }
+ }
+
+ if (the_repository->settings.core_multi_pack_index) {
+ struct child_process midx_verify = CHILD_PROCESS_INIT;
+
+ prepare_alt_odb(the_repository);
+ for (odb = the_repository->objects->odb; odb; odb = odb->next) {
+ child_process_init(&midx_verify);
+ midx_verify.git_cmd = 1;
+ strvec_pushl(&midx_verify.args, "multi-pack-index",
+ "verify", "--object-dir", odb->path, NULL);
+ if (run_command(&midx_verify))
+ errors_found |= ERROR_MULTI_PACK_INDEX;
+ }
+ }
+
+ return errors_found;
+}
diff --git a/builtin/fsmonitor--daemon.c b/builtin/fsmonitor--daemon.c
new file mode 100644
index 0000000..6f30a4f
--- /dev/null
+++ b/builtin/fsmonitor--daemon.c
@@ -0,0 +1,1586 @@
+#include "builtin.h"
+#include "config.h"
+#include "parse-options.h"
+#include "fsmonitor.h"
+#include "fsmonitor-ipc.h"
+#include "fsmonitor-path-utils.h"
+#include "compat/fsmonitor/fsm-health.h"
+#include "compat/fsmonitor/fsm-listen.h"
+#include "fsmonitor--daemon.h"
+#include "simple-ipc.h"
+#include "khash.h"
+#include "pkt-line.h"
+
+static const char * const builtin_fsmonitor__daemon_usage[] = {
+ N_("git fsmonitor--daemon start [<options>]"),
+ N_("git fsmonitor--daemon run [<options>]"),
+ "git fsmonitor--daemon stop",
+ "git fsmonitor--daemon status",
+ NULL
+};
+
+#ifdef HAVE_FSMONITOR_DAEMON_BACKEND
+/*
+ * Global state loaded from config.
+ */
+#define FSMONITOR__IPC_THREADS "fsmonitor.ipcthreads"
+static int fsmonitor__ipc_threads = 8;
+
+#define FSMONITOR__START_TIMEOUT "fsmonitor.starttimeout"
+static int fsmonitor__start_timeout_sec = 60;
+
+#define FSMONITOR__ANNOUNCE_STARTUP "fsmonitor.announcestartup"
+static int fsmonitor__announce_startup = 0;
+
+static int fsmonitor_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, FSMONITOR__IPC_THREADS)) {
+ int i = git_config_int(var, value);
+ if (i < 1)
+ return error(_("value of '%s' out of range: %d"),
+ FSMONITOR__IPC_THREADS, i);
+ fsmonitor__ipc_threads = i;
+ return 0;
+ }
+
+ if (!strcmp(var, FSMONITOR__START_TIMEOUT)) {
+ int i = git_config_int(var, value);
+ if (i < 0)
+ return error(_("value of '%s' out of range: %d"),
+ FSMONITOR__START_TIMEOUT, i);
+ fsmonitor__start_timeout_sec = i;
+ return 0;
+ }
+
+ if (!strcmp(var, FSMONITOR__ANNOUNCE_STARTUP)) {
+ int is_bool;
+ int i = git_config_bool_or_int(var, value, &is_bool);
+ if (i < 0)
+ return error(_("value of '%s' not bool or int: %d"),
+ var, i);
+ fsmonitor__announce_startup = i;
+ return 0;
+ }
+
+ return git_default_config(var, value, cb);
+}
+
+/*
+ * Acting as a CLIENT.
+ *
+ * Send a "quit" command to the `git-fsmonitor--daemon` (if running)
+ * and wait for it to shutdown.
+ */
+static int do_as_client__send_stop(void)
+{
+ struct strbuf answer = STRBUF_INIT;
+ int ret;
+
+ ret = fsmonitor_ipc__send_command("quit", &answer);
+
+ /* The quit command does not return any response data. */
+ strbuf_release(&answer);
+
+ if (ret)
+ return ret;
+
+ trace2_region_enter("fsm_client", "polling-for-daemon-exit", NULL);
+ while (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+ sleep_millisec(50);
+ trace2_region_leave("fsm_client", "polling-for-daemon-exit", NULL);
+
+ return 0;
+}
+
+static int do_as_client__status(void)
+{
+ enum ipc_active_state state = fsmonitor_ipc__get_state();
+
+ switch (state) {
+ case IPC_STATE__LISTENING:
+ printf(_("fsmonitor-daemon is watching '%s'\n"),
+ the_repository->worktree);
+ return 0;
+
+ default:
+ printf(_("fsmonitor-daemon is not watching '%s'\n"),
+ the_repository->worktree);
+ return 1;
+ }
+}
+
+enum fsmonitor_cookie_item_result {
+ FCIR_ERROR = -1, /* could not create cookie file ? */
+ FCIR_INIT,
+ FCIR_SEEN,
+ FCIR_ABORT,
+};
+
+struct fsmonitor_cookie_item {
+ struct hashmap_entry entry;
+ char *name;
+ enum fsmonitor_cookie_item_result result;
+};
+
+static int cookies_cmp(const void *data, const struct hashmap_entry *he1,
+ const struct hashmap_entry *he2, const void *keydata)
+{
+ const struct fsmonitor_cookie_item *a =
+ container_of(he1, const struct fsmonitor_cookie_item, entry);
+ const struct fsmonitor_cookie_item *b =
+ container_of(he2, const struct fsmonitor_cookie_item, entry);
+
+ return strcmp(a->name, keydata ? keydata : b->name);
+}
+
+static enum fsmonitor_cookie_item_result with_lock__wait_for_cookie(
+ struct fsmonitor_daemon_state *state)
+{
+ /* assert current thread holding state->main_lock */
+
+ int fd;
+ struct fsmonitor_cookie_item *cookie;
+ struct strbuf cookie_pathname = STRBUF_INIT;
+ struct strbuf cookie_filename = STRBUF_INIT;
+ enum fsmonitor_cookie_item_result result;
+ int my_cookie_seq;
+
+ CALLOC_ARRAY(cookie, 1);
+
+ my_cookie_seq = state->cookie_seq++;
+
+ strbuf_addf(&cookie_filename, "%i-%i", getpid(), my_cookie_seq);
+
+ strbuf_addbuf(&cookie_pathname, &state->path_cookie_prefix);
+ strbuf_addbuf(&cookie_pathname, &cookie_filename);
+
+ cookie->name = strbuf_detach(&cookie_filename, NULL);
+ cookie->result = FCIR_INIT;
+ hashmap_entry_init(&cookie->entry, strhash(cookie->name));
+
+ hashmap_add(&state->cookies, &cookie->entry);
+
+ trace_printf_key(&trace_fsmonitor, "cookie-wait: '%s' '%s'",
+ cookie->name, cookie_pathname.buf);
+
+ /*
+ * Create the cookie file on disk and then wait for a notification
+ * that the listener thread has seen it.
+ */
+ fd = open(cookie_pathname.buf, O_WRONLY | O_CREAT | O_EXCL, 0600);
+ if (fd < 0) {
+ error_errno(_("could not create fsmonitor cookie '%s'"),
+ cookie->name);
+
+ cookie->result = FCIR_ERROR;
+ goto done;
+ }
+
+ /*
+ * Technically, close() and unlink() can fail, but we don't
+ * care here. We only created the file to trigger a watch
+ * event from the FS to know that when we're up to date.
+ */
+ close(fd);
+ unlink(cookie_pathname.buf);
+
+ /*
+ * Technically, this is an infinite wait (well, unless another
+ * thread sends us an abort). I'd like to change this to
+ * use `pthread_cond_timedwait()` and return an error/timeout
+ * and let the caller do the trivial response thing, but we
+ * don't have that routine in our thread-utils.
+ *
+ * After extensive beta testing I'm not really worried about
+ * this. Also note that the above open() and unlink() calls
+ * will cause at least two FS events on that path, so the odds
+ * of getting stuck are pretty slim.
+ */
+ while (cookie->result == FCIR_INIT)
+ pthread_cond_wait(&state->cookies_cond,
+ &state->main_lock);
+
+done:
+ hashmap_remove(&state->cookies, &cookie->entry, NULL);
+
+ result = cookie->result;
+
+ free(cookie->name);
+ free(cookie);
+ strbuf_release(&cookie_pathname);
+
+ return result;
+}
+
+/*
+ * Mark these cookies as _SEEN and wake up the corresponding client threads.
+ */
+static void with_lock__mark_cookies_seen(struct fsmonitor_daemon_state *state,
+ const struct string_list *cookie_names)
+{
+ /* assert current thread holding state->main_lock */
+
+ int k;
+ int nr_seen = 0;
+
+ for (k = 0; k < cookie_names->nr; k++) {
+ struct fsmonitor_cookie_item key;
+ struct fsmonitor_cookie_item *cookie;
+
+ key.name = cookie_names->items[k].string;
+ hashmap_entry_init(&key.entry, strhash(key.name));
+
+ cookie = hashmap_get_entry(&state->cookies, &key, entry, NULL);
+ if (cookie) {
+ trace_printf_key(&trace_fsmonitor, "cookie-seen: '%s'",
+ cookie->name);
+ cookie->result = FCIR_SEEN;
+ nr_seen++;
+ }
+ }
+
+ if (nr_seen)
+ pthread_cond_broadcast(&state->cookies_cond);
+}
+
+/*
+ * Set _ABORT on all pending cookies and wake up all client threads.
+ */
+static void with_lock__abort_all_cookies(struct fsmonitor_daemon_state *state)
+{
+ /* assert current thread holding state->main_lock */
+
+ struct hashmap_iter iter;
+ struct fsmonitor_cookie_item *cookie;
+ int nr_aborted = 0;
+
+ hashmap_for_each_entry(&state->cookies, &iter, cookie, entry) {
+ trace_printf_key(&trace_fsmonitor, "cookie-abort: '%s'",
+ cookie->name);
+ cookie->result = FCIR_ABORT;
+ nr_aborted++;
+ }
+
+ if (nr_aborted)
+ pthread_cond_broadcast(&state->cookies_cond);
+}
+
+/*
+ * Requests to and from a FSMonitor Protocol V2 provider use an opaque
+ * "token" as a virtual timestamp. Clients can request a summary of all
+ * created/deleted/modified files relative to a token. In the response,
+ * clients receive a new token for the next (relative) request.
+ *
+ *
+ * Token Format
+ * ============
+ *
+ * The contents of the token are private and provider-specific.
+ *
+ * For the built-in fsmonitor--daemon, we define a token as follows:
+ *
+ * "builtin" ":" <token_id> ":" <sequence_nr>
+ *
+ * The "builtin" prefix is used as a namespace to avoid conflicts
+ * with other providers (such as Watchman).
+ *
+ * The <token_id> is an arbitrary OPAQUE string, such as a GUID,
+ * UUID, or {timestamp,pid}. It is used to group all filesystem
+ * events that happened while the daemon was monitoring (and in-sync
+ * with the filesystem).
+ *
+ * Unlike FSMonitor Protocol V1, it is not defined as a timestamp
+ * and does not define less-than/greater-than relationships.
+ * (There are too many race conditions to rely on file system
+ * event timestamps.)
+ *
+ * The <sequence_nr> is a simple integer incremented whenever the
+ * daemon needs to make its state public. For example, if 1000 file
+ * system events come in, but no clients have requested the data,
+ * the daemon can continue to accumulate file changes in the same
+ * bin and does not need to advance the sequence number. However,
+ * as soon as a client does arrive, the daemon needs to start a new
+ * bin and increment the sequence number.
+ *
+ * The sequence number serves as the boundary between 2 sets
+ * of bins -- the older ones that the client has already seen
+ * and the newer ones that it hasn't.
+ *
+ * When a new <token_id> is created, the <sequence_nr> is reset to
+ * zero.
+ *
+ *
+ * About Token Ids
+ * ===============
+ *
+ * A new token_id is created:
+ *
+ * [1] each time the daemon is started.
+ *
+ * [2] any time that the daemon must re-sync with the filesystem
+ * (such as when the kernel drops or we miss events on a very
+ * active volume).
+ *
+ * [3] in response to a client "flush" command (for dropped event
+ * testing).
+ *
+ * When a new token_id is created, the daemon is free to discard all
+ * cached filesystem events associated with any previous token_ids.
+ * Events associated with a non-current token_id will never be sent
+ * to a client. A token_id change implicitly means that the daemon
+ * has gap in its event history.
+ *
+ * Therefore, clients that present a token with a stale (non-current)
+ * token_id will always be given a trivial response.
+ */
+struct fsmonitor_token_data {
+ struct strbuf token_id;
+ struct fsmonitor_batch *batch_head;
+ struct fsmonitor_batch *batch_tail;
+ uint64_t client_ref_count;
+};
+
+struct fsmonitor_batch {
+ struct fsmonitor_batch *next;
+ uint64_t batch_seq_nr;
+ const char **interned_paths;
+ size_t nr, alloc;
+ time_t pinned_time;
+};
+
+static struct fsmonitor_token_data *fsmonitor_new_token_data(void)
+{
+ static int test_env_value = -1;
+ static uint64_t flush_count = 0;
+ struct fsmonitor_token_data *token;
+ struct fsmonitor_batch *batch;
+
+ CALLOC_ARRAY(token, 1);
+ batch = fsmonitor_batch__new();
+
+ strbuf_init(&token->token_id, 0);
+ token->batch_head = batch;
+ token->batch_tail = batch;
+ token->client_ref_count = 0;
+
+ if (test_env_value < 0)
+ test_env_value = git_env_bool("GIT_TEST_FSMONITOR_TOKEN", 0);
+
+ if (!test_env_value) {
+ struct timeval tv;
+ struct tm tm;
+ time_t secs;
+
+ gettimeofday(&tv, NULL);
+ secs = tv.tv_sec;
+ gmtime_r(&secs, &tm);
+
+ strbuf_addf(&token->token_id,
+ "%"PRIu64".%d.%4d%02d%02dT%02d%02d%02d.%06ldZ",
+ flush_count++,
+ getpid(),
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ (long)tv.tv_usec);
+ } else {
+ strbuf_addf(&token->token_id, "test_%08x", test_env_value++);
+ }
+
+ /*
+ * We created a new <token_id> and are starting a new series
+ * of tokens with a zero <seq_nr>.
+ *
+ * Since clients cannot guess our new (non test) <token_id>
+ * they will always receive a trivial response (because of the
+ * mismatch on the <token_id>). The trivial response will
+ * tell them our new <token_id> so that subsequent requests
+ * will be relative to our new series. (And when sending that
+ * response, we pin the current head of the batch list.)
+ *
+ * Even if the client correctly guesses the <token_id>, their
+ * request of "builtin:<token_id>:0" asks for all changes MORE
+ * RECENT than batch/bin 0.
+ *
+ * This implies that it is a waste to accumulate paths in the
+ * initial batch/bin (because they will never be transmitted).
+ *
+ * So the daemon could be running for days and watching the
+ * file system, but doesn't need to actually accumulate any
+ * paths UNTIL we need to set a reference point for a later
+ * relative request.
+ *
+ * However, it is very useful for testing to always have a
+ * reference point set. Pin batch 0 to force early file system
+ * events to accumulate.
+ */
+ if (test_env_value)
+ batch->pinned_time = time(NULL);
+
+ return token;
+}
+
+struct fsmonitor_batch *fsmonitor_batch__new(void)
+{
+ struct fsmonitor_batch *batch;
+
+ CALLOC_ARRAY(batch, 1);
+
+ return batch;
+}
+
+void fsmonitor_batch__free_list(struct fsmonitor_batch *batch)
+{
+ while (batch) {
+ struct fsmonitor_batch *next = batch->next;
+
+ /*
+ * The actual strings within the array of this batch
+ * are interned, so we don't own them. We only own
+ * the array.
+ */
+ free(batch->interned_paths);
+ free(batch);
+
+ batch = next;
+ }
+}
+
+void fsmonitor_batch__add_path(struct fsmonitor_batch *batch,
+ const char *path)
+{
+ const char *interned_path = strintern(path);
+
+ trace_printf_key(&trace_fsmonitor, "event: %s", interned_path);
+
+ ALLOC_GROW(batch->interned_paths, batch->nr + 1, batch->alloc);
+ batch->interned_paths[batch->nr++] = interned_path;
+}
+
+static void fsmonitor_batch__combine(struct fsmonitor_batch *batch_dest,
+ const struct fsmonitor_batch *batch_src)
+{
+ size_t k;
+
+ ALLOC_GROW(batch_dest->interned_paths,
+ batch_dest->nr + batch_src->nr + 1,
+ batch_dest->alloc);
+
+ for (k = 0; k < batch_src->nr; k++)
+ batch_dest->interned_paths[batch_dest->nr++] =
+ batch_src->interned_paths[k];
+}
+
+/*
+ * To keep the batch list from growing unbounded in response to filesystem
+ * activity, we try to truncate old batches from the end of the list as
+ * they become irrelevant.
+ *
+ * We assume that the .git/index will be updated with the most recent token
+ * any time the index is updated. And future commands will only ask for
+ * recent changes *since* that new token. So as tokens advance into the
+ * future, older batch items will never be requested/needed. So we can
+ * truncate them without loss of functionality.
+ *
+ * However, multiple commands may be talking to the daemon concurrently
+ * or perform a slow command, so a little "token skew" is possible.
+ * Therefore, we want this to be a little bit lazy and have a generous
+ * delay.
+ *
+ * The current reader thread walked backwards in time from `token->batch_head`
+ * back to `batch_marker` somewhere in the middle of the batch list.
+ *
+ * Let's walk backwards in time from that marker an arbitrary delay
+ * and truncate the list there. Note that these timestamps are completely
+ * artificial (based on when we pinned the batch item) and not on any
+ * filesystem activity.
+ *
+ * Return the obsolete portion of the list after we have removed it from
+ * the official list so that the caller can free it after leaving the lock.
+ */
+#define MY_TIME_DELAY_SECONDS (5 * 60) /* seconds */
+
+static struct fsmonitor_batch *with_lock__truncate_old_batches(
+ struct fsmonitor_daemon_state *state,
+ const struct fsmonitor_batch *batch_marker)
+{
+ /* assert current thread holding state->main_lock */
+
+ const struct fsmonitor_batch *batch;
+ struct fsmonitor_batch *remainder;
+
+ if (!batch_marker)
+ return NULL;
+
+ trace_printf_key(&trace_fsmonitor, "Truncate: mark (%"PRIu64",%"PRIu64")",
+ batch_marker->batch_seq_nr,
+ (uint64_t)batch_marker->pinned_time);
+
+ for (batch = batch_marker; batch; batch = batch->next) {
+ time_t t;
+
+ if (!batch->pinned_time) /* an overflow batch */
+ continue;
+
+ t = batch->pinned_time + MY_TIME_DELAY_SECONDS;
+ if (t > batch_marker->pinned_time) /* too close to marker */
+ continue;
+
+ goto truncate_past_here;
+ }
+
+ return NULL;
+
+truncate_past_here:
+ state->current_token_data->batch_tail = (struct fsmonitor_batch *)batch;
+
+ remainder = ((struct fsmonitor_batch *)batch)->next;
+ ((struct fsmonitor_batch *)batch)->next = NULL;
+
+ return remainder;
+}
+
+static void fsmonitor_free_token_data(struct fsmonitor_token_data *token)
+{
+ if (!token)
+ return;
+
+ assert(token->client_ref_count == 0);
+
+ strbuf_release(&token->token_id);
+
+ fsmonitor_batch__free_list(token->batch_head);
+
+ free(token);
+}
+
+/*
+ * Flush all of our cached data about the filesystem. Call this if we
+ * lose sync with the filesystem and miss some notification events.
+ *
+ * [1] If we are missing events, then we no longer have a complete
+ * history of the directory (relative to our current start token).
+ * We should create a new token and start fresh (as if we just
+ * booted up).
+ *
+ * [2] Some of those lost events may have been for cookie files. We
+ * should assume the worst and abort them rather letting them starve.
+ *
+ * If there are no concurrent threads reading the current token data
+ * series, we can free it now. Otherwise, let the last reader free
+ * it.
+ *
+ * Either way, the old token data series is no longer associated with
+ * our state data.
+ */
+static void with_lock__do_force_resync(struct fsmonitor_daemon_state *state)
+{
+ /* assert current thread holding state->main_lock */
+
+ struct fsmonitor_token_data *free_me = NULL;
+ struct fsmonitor_token_data *new_one = NULL;
+
+ new_one = fsmonitor_new_token_data();
+
+ if (state->current_token_data->client_ref_count == 0)
+ free_me = state->current_token_data;
+ state->current_token_data = new_one;
+
+ fsmonitor_free_token_data(free_me);
+
+ with_lock__abort_all_cookies(state);
+}
+
+void fsmonitor_force_resync(struct fsmonitor_daemon_state *state)
+{
+ pthread_mutex_lock(&state->main_lock);
+ with_lock__do_force_resync(state);
+ pthread_mutex_unlock(&state->main_lock);
+}
+
+/*
+ * Format an opaque token string to send to the client.
+ */
+static void with_lock__format_response_token(
+ struct strbuf *response_token,
+ const struct strbuf *response_token_id,
+ const struct fsmonitor_batch *batch)
+{
+ /* assert current thread holding state->main_lock */
+
+ strbuf_reset(response_token);
+ strbuf_addf(response_token, "builtin:%s:%"PRIu64,
+ response_token_id->buf, batch->batch_seq_nr);
+}
+
+/*
+ * Parse an opaque token from the client.
+ * Returns -1 on error.
+ */
+static int fsmonitor_parse_client_token(const char *buf_token,
+ struct strbuf *requested_token_id,
+ uint64_t *seq_nr)
+{
+ const char *p;
+ char *p_end;
+
+ strbuf_reset(requested_token_id);
+ *seq_nr = 0;
+
+ if (!skip_prefix(buf_token, "builtin:", &p))
+ return -1;
+
+ while (*p && *p != ':')
+ strbuf_addch(requested_token_id, *p++);
+ if (!*p++)
+ return -1;
+
+ *seq_nr = (uint64_t)strtoumax(p, &p_end, 10);
+ if (*p_end)
+ return -1;
+
+ return 0;
+}
+
+KHASH_INIT(str, const char *, int, 0, kh_str_hash_func, kh_str_hash_equal)
+
+static int do_handle_client(struct fsmonitor_daemon_state *state,
+ const char *command,
+ ipc_server_reply_cb *reply,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct fsmonitor_token_data *token_data = NULL;
+ struct strbuf response_token = STRBUF_INIT;
+ struct strbuf requested_token_id = STRBUF_INIT;
+ struct strbuf payload = STRBUF_INIT;
+ uint64_t requested_oldest_seq_nr = 0;
+ uint64_t total_response_len = 0;
+ const char *p;
+ const struct fsmonitor_batch *batch_head;
+ const struct fsmonitor_batch *batch;
+ struct fsmonitor_batch *remainder = NULL;
+ intmax_t count = 0, duplicates = 0;
+ kh_str_t *shown;
+ int hash_ret;
+ int do_trivial = 0;
+ int do_flush = 0;
+ int do_cookie = 0;
+ enum fsmonitor_cookie_item_result cookie_result;
+
+ /*
+ * We expect `command` to be of the form:
+ *
+ * <command> := quit NUL
+ * | flush NUL
+ * | <V1-time-since-epoch-ns> NUL
+ * | <V2-opaque-fsmonitor-token> NUL
+ */
+
+ if (!strcmp(command, "quit")) {
+ /*
+ * A client has requested over the socket/pipe that the
+ * daemon shutdown.
+ *
+ * Tell the IPC thread pool to shutdown (which completes
+ * the await in the main thread (which can stop the
+ * fsmonitor listener thread)).
+ *
+ * There is no reply to the client.
+ */
+ return SIMPLE_IPC_QUIT;
+
+ } else if (!strcmp(command, "flush")) {
+ /*
+ * Flush all of our cached data and generate a new token
+ * just like if we lost sync with the filesystem.
+ *
+ * Then send a trivial response using the new token.
+ */
+ do_flush = 1;
+ do_trivial = 1;
+
+ } else if (!skip_prefix(command, "builtin:", &p)) {
+ /* assume V1 timestamp or garbage */
+
+ char *p_end;
+
+ strtoumax(command, &p_end, 10);
+ trace_printf_key(&trace_fsmonitor,
+ ((*p_end) ?
+ "fsmonitor: invalid command line '%s'" :
+ "fsmonitor: unsupported V1 protocol '%s'"),
+ command);
+ do_trivial = 1;
+
+ } else {
+ /* We have "builtin:*" */
+ if (fsmonitor_parse_client_token(command, &requested_token_id,
+ &requested_oldest_seq_nr)) {
+ trace_printf_key(&trace_fsmonitor,
+ "fsmonitor: invalid V2 protocol token '%s'",
+ command);
+ do_trivial = 1;
+
+ } else {
+ /*
+ * We have a V2 valid token:
+ * "builtin:<token_id>:<seq_nr>"
+ */
+ do_cookie = 1;
+ }
+ }
+
+ pthread_mutex_lock(&state->main_lock);
+
+ if (!state->current_token_data)
+ BUG("fsmonitor state does not have a current token");
+
+ /*
+ * Write a cookie file inside the directory being watched in
+ * an effort to flush out existing filesystem events that we
+ * actually care about. Suspend this client thread until we
+ * see the filesystem events for this cookie file.
+ *
+ * Creating the cookie lets us guarantee that our FS listener
+ * thread has drained the kernel queue and we are caught up
+ * with the kernel.
+ *
+ * If we cannot create the cookie (or otherwise guarantee that
+ * we are caught up), we send a trivial response. We have to
+ * assume that there might be some very, very recent activity
+ * on the FS still in flight.
+ */
+ if (do_cookie) {
+ cookie_result = with_lock__wait_for_cookie(state);
+ if (cookie_result != FCIR_SEEN) {
+ error(_("fsmonitor: cookie_result '%d' != SEEN"),
+ cookie_result);
+ do_trivial = 1;
+ }
+ }
+
+ if (do_flush)
+ with_lock__do_force_resync(state);
+
+ /*
+ * We mark the current head of the batch list as "pinned" so
+ * that the listener thread will treat this item as read-only
+ * (and prevent any more paths from being added to it) from
+ * now on.
+ */
+ token_data = state->current_token_data;
+ batch_head = token_data->batch_head;
+ ((struct fsmonitor_batch *)batch_head)->pinned_time = time(NULL);
+
+ /*
+ * FSMonitor Protocol V2 requires that we send a response header
+ * with a "new current token" and then all of the paths that changed
+ * since the "requested token". We send the seq_nr of the just-pinned
+ * head batch so that future requests from a client will be relative
+ * to it.
+ */
+ with_lock__format_response_token(&response_token,
+ &token_data->token_id, batch_head);
+
+ reply(reply_data, response_token.buf, response_token.len + 1);
+ total_response_len += response_token.len + 1;
+
+ trace2_data_string("fsmonitor", the_repository, "response/token",
+ response_token.buf);
+ trace_printf_key(&trace_fsmonitor, "response token: %s",
+ response_token.buf);
+
+ if (!do_trivial) {
+ if (strcmp(requested_token_id.buf, token_data->token_id.buf)) {
+ /*
+ * The client last spoke to a different daemon
+ * instance -OR- the daemon had to resync with
+ * the filesystem (and lost events), so reject.
+ */
+ trace2_data_string("fsmonitor", the_repository,
+ "response/token", "different");
+ do_trivial = 1;
+
+ } else if (requested_oldest_seq_nr <
+ token_data->batch_tail->batch_seq_nr) {
+ /*
+ * The client wants older events than we have for
+ * this token_id. This means that the end of our
+ * batch list was truncated and we cannot give the
+ * client a complete snapshot relative to their
+ * request.
+ */
+ trace_printf_key(&trace_fsmonitor,
+ "client requested truncated data");
+ do_trivial = 1;
+ }
+ }
+
+ if (do_trivial) {
+ pthread_mutex_unlock(&state->main_lock);
+
+ reply(reply_data, "/", 2);
+
+ trace2_data_intmax("fsmonitor", the_repository,
+ "response/trivial", 1);
+
+ goto cleanup;
+ }
+
+ /*
+ * We're going to hold onto a pointer to the current
+ * token-data while we walk the list of batches of files.
+ * During this time, we will NOT be under the lock.
+ * So we ref-count it.
+ *
+ * This allows the listener thread to continue prepending
+ * new batches of items to the token-data (which we'll ignore).
+ *
+ * AND it allows the listener thread to do a token-reset
+ * (and install a new `current_token_data`).
+ */
+ token_data->client_ref_count++;
+
+ pthread_mutex_unlock(&state->main_lock);
+
+ /*
+ * The client request is relative to the token that they sent,
+ * so walk the batch list backwards from the current head back
+ * to the batch (sequence number) they named.
+ *
+ * We use khash to de-dup the list of pathnames.
+ *
+ * NEEDSWORK: each batch contains a list of interned strings,
+ * so we only need to do pointer comparisons here to build the
+ * hash table. Currently, we're still comparing the string
+ * values.
+ */
+ shown = kh_init_str();
+ for (batch = batch_head;
+ batch && batch->batch_seq_nr > requested_oldest_seq_nr;
+ batch = batch->next) {
+ size_t k;
+
+ for (k = 0; k < batch->nr; k++) {
+ const char *s = batch->interned_paths[k];
+ size_t s_len;
+
+ if (kh_get_str(shown, s) != kh_end(shown))
+ duplicates++;
+ else {
+ kh_put_str(shown, s, &hash_ret);
+
+ trace_printf_key(&trace_fsmonitor,
+ "send[%"PRIuMAX"]: %s",
+ count, s);
+
+ /* Each path gets written with a trailing NUL */
+ s_len = strlen(s) + 1;
+
+ if (payload.len + s_len >=
+ LARGE_PACKET_DATA_MAX) {
+ reply(reply_data, payload.buf,
+ payload.len);
+ total_response_len += payload.len;
+ strbuf_reset(&payload);
+ }
+
+ strbuf_add(&payload, s, s_len);
+ count++;
+ }
+ }
+ }
+
+ if (payload.len) {
+ reply(reply_data, payload.buf, payload.len);
+ total_response_len += payload.len;
+ }
+
+ kh_release_str(shown);
+
+ pthread_mutex_lock(&state->main_lock);
+
+ if (token_data->client_ref_count > 0)
+ token_data->client_ref_count--;
+
+ if (token_data->client_ref_count == 0) {
+ if (token_data != state->current_token_data) {
+ /*
+ * The listener thread did a token-reset while we were
+ * walking the batch list. Therefore, this token is
+ * stale and can be discarded completely. If we are
+ * the last reader thread using this token, we own
+ * that work.
+ */
+ fsmonitor_free_token_data(token_data);
+ } else if (batch) {
+ /*
+ * We are holding the lock and are the only
+ * reader of the ref-counted portion of the
+ * list, so we get the honor of seeing if the
+ * list can be truncated to save memory.
+ *
+ * The main loop did not walk to the end of the
+ * list, so this batch is the first item in the
+ * batch-list that is older than the requested
+ * end-point sequence number. See if the tail
+ * end of the list is obsolete.
+ */
+ remainder = with_lock__truncate_old_batches(state,
+ batch);
+ }
+ }
+
+ pthread_mutex_unlock(&state->main_lock);
+
+ if (remainder)
+ fsmonitor_batch__free_list(remainder);
+
+ trace2_data_intmax("fsmonitor", the_repository, "response/length", total_response_len);
+ trace2_data_intmax("fsmonitor", the_repository, "response/count/files", count);
+ trace2_data_intmax("fsmonitor", the_repository, "response/count/duplicates", duplicates);
+
+cleanup:
+ strbuf_release(&response_token);
+ strbuf_release(&requested_token_id);
+ strbuf_release(&payload);
+
+ return 0;
+}
+
+static ipc_server_application_cb handle_client;
+
+static int handle_client(void *data,
+ const char *command, size_t command_len,
+ ipc_server_reply_cb *reply,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct fsmonitor_daemon_state *state = data;
+ int result;
+
+ /*
+ * The Simple IPC API now supports {char*, len} arguments, but
+ * FSMonitor always uses proper null-terminated strings, so
+ * we can ignore the command_len argument. (Trust, but verify.)
+ */
+ if (command_len != strlen(command))
+ BUG("FSMonitor assumes text messages");
+
+ trace_printf_key(&trace_fsmonitor, "requested token: %s", command);
+
+ trace2_region_enter("fsmonitor", "handle_client", the_repository);
+ trace2_data_string("fsmonitor", the_repository, "request", command);
+
+ result = do_handle_client(state, command, reply, reply_data);
+
+ trace2_region_leave("fsmonitor", "handle_client", the_repository);
+
+ return result;
+}
+
+#define FSMONITOR_DIR "fsmonitor--daemon"
+#define FSMONITOR_COOKIE_DIR "cookies"
+#define FSMONITOR_COOKIE_PREFIX (FSMONITOR_DIR "/" FSMONITOR_COOKIE_DIR "/")
+
+enum fsmonitor_path_type fsmonitor_classify_path_workdir_relative(
+ const char *rel)
+{
+ if (fspathncmp(rel, ".git", 4))
+ return IS_WORKDIR_PATH;
+ rel += 4;
+
+ if (!*rel)
+ return IS_DOT_GIT;
+ if (*rel != '/')
+ return IS_WORKDIR_PATH; /* e.g. .gitignore */
+ rel++;
+
+ if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
+ strlen(FSMONITOR_COOKIE_PREFIX)))
+ return IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX;
+
+ return IS_INSIDE_DOT_GIT;
+}
+
+enum fsmonitor_path_type fsmonitor_classify_path_gitdir_relative(
+ const char *rel)
+{
+ if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
+ strlen(FSMONITOR_COOKIE_PREFIX)))
+ return IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX;
+
+ return IS_INSIDE_GITDIR;
+}
+
+static enum fsmonitor_path_type try_classify_workdir_abs_path(
+ struct fsmonitor_daemon_state *state,
+ const char *path)
+{
+ const char *rel;
+
+ if (fspathncmp(path, state->path_worktree_watch.buf,
+ state->path_worktree_watch.len))
+ return IS_OUTSIDE_CONE;
+
+ rel = path + state->path_worktree_watch.len;
+
+ if (!*rel)
+ return IS_WORKDIR_PATH; /* it is the root dir exactly */
+ if (*rel != '/')
+ return IS_OUTSIDE_CONE;
+ rel++;
+
+ return fsmonitor_classify_path_workdir_relative(rel);
+}
+
+enum fsmonitor_path_type fsmonitor_classify_path_absolute(
+ struct fsmonitor_daemon_state *state,
+ const char *path)
+{
+ const char *rel;
+ enum fsmonitor_path_type t;
+
+ t = try_classify_workdir_abs_path(state, path);
+ if (state->nr_paths_watching == 1)
+ return t;
+ if (t != IS_OUTSIDE_CONE)
+ return t;
+
+ if (fspathncmp(path, state->path_gitdir_watch.buf,
+ state->path_gitdir_watch.len))
+ return IS_OUTSIDE_CONE;
+
+ rel = path + state->path_gitdir_watch.len;
+
+ if (!*rel)
+ return IS_GITDIR; /* it is the <gitdir> exactly */
+ if (*rel != '/')
+ return IS_OUTSIDE_CONE;
+ rel++;
+
+ return fsmonitor_classify_path_gitdir_relative(rel);
+}
+
+/*
+ * We try to combine small batches at the front of the batch-list to avoid
+ * having a long list. This hopefully makes it a little easier when we want
+ * to truncate and maintain the list. However, we don't want the paths array
+ * to just keep growing and growing with realloc, so we insert an arbitrary
+ * limit.
+ */
+#define MY_COMBINE_LIMIT (1024)
+
+void fsmonitor_publish(struct fsmonitor_daemon_state *state,
+ struct fsmonitor_batch *batch,
+ const struct string_list *cookie_names)
+{
+ if (!batch && !cookie_names->nr)
+ return;
+
+ pthread_mutex_lock(&state->main_lock);
+
+ if (batch) {
+ struct fsmonitor_batch *head;
+
+ head = state->current_token_data->batch_head;
+ if (!head) {
+ BUG("token does not have batch");
+ } else if (head->pinned_time) {
+ /*
+ * We cannot alter the current batch list
+ * because:
+ *
+ * [a] it is being transmitted to at least one
+ * client and the handle_client() thread has a
+ * ref-count, but not a lock on the batch list
+ * starting with this item.
+ *
+ * [b] it has been transmitted in the past to
+ * at least one client such that future
+ * requests are relative to this head batch.
+ *
+ * So, we can only prepend a new batch onto
+ * the front of the list.
+ */
+ batch->batch_seq_nr = head->batch_seq_nr + 1;
+ batch->next = head;
+ state->current_token_data->batch_head = batch;
+ } else if (!head->batch_seq_nr) {
+ /*
+ * Batch 0 is unpinned. See the note in
+ * `fsmonitor_new_token_data()` about why we
+ * don't need to accumulate these paths.
+ */
+ fsmonitor_batch__free_list(batch);
+ } else if (head->nr + batch->nr > MY_COMBINE_LIMIT) {
+ /*
+ * The head batch in the list has never been
+ * transmitted to a client, but folding the
+ * contents of the new batch onto it would
+ * exceed our arbitrary limit, so just prepend
+ * the new batch onto the list.
+ */
+ batch->batch_seq_nr = head->batch_seq_nr + 1;
+ batch->next = head;
+ state->current_token_data->batch_head = batch;
+ } else {
+ /*
+ * We are free to add the paths in the given
+ * batch onto the end of the current head batch.
+ */
+ fsmonitor_batch__combine(head, batch);
+ fsmonitor_batch__free_list(batch);
+ }
+ }
+
+ if (cookie_names->nr)
+ with_lock__mark_cookies_seen(state, cookie_names);
+
+ pthread_mutex_unlock(&state->main_lock);
+}
+
+static void *fsm_health__thread_proc(void *_state)
+{
+ struct fsmonitor_daemon_state *state = _state;
+
+ trace2_thread_start("fsm-health");
+
+ fsm_health__loop(state);
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+static void *fsm_listen__thread_proc(void *_state)
+{
+ struct fsmonitor_daemon_state *state = _state;
+
+ trace2_thread_start("fsm-listen");
+
+ trace_printf_key(&trace_fsmonitor, "Watching: worktree '%s'",
+ state->path_worktree_watch.buf);
+ if (state->nr_paths_watching > 1)
+ trace_printf_key(&trace_fsmonitor, "Watching: gitdir '%s'",
+ state->path_gitdir_watch.buf);
+
+ fsm_listen__loop(state);
+
+ pthread_mutex_lock(&state->main_lock);
+ if (state->current_token_data &&
+ state->current_token_data->client_ref_count == 0)
+ fsmonitor_free_token_data(state->current_token_data);
+ state->current_token_data = NULL;
+ pthread_mutex_unlock(&state->main_lock);
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+static int fsmonitor_run_daemon_1(struct fsmonitor_daemon_state *state)
+{
+ struct ipc_server_opts ipc_opts = {
+ .nr_threads = fsmonitor__ipc_threads,
+
+ /*
+ * We know that there are no other active threads yet,
+ * so we can let the IPC layer temporarily chdir() if
+ * it needs to when creating the server side of the
+ * Unix domain socket.
+ */
+ .uds_disallow_chdir = 0
+ };
+ int health_started = 0;
+ int listener_started = 0;
+ int err = 0;
+
+ /*
+ * Start the IPC thread pool before the we've started the file
+ * system event listener thread so that we have the IPC handle
+ * before we need it.
+ */
+ if (ipc_server_run_async(&state->ipc_server_data,
+ state->path_ipc.buf, &ipc_opts,
+ handle_client, state))
+ return error_errno(
+ _("could not start IPC thread pool on '%s'"),
+ state->path_ipc.buf);
+
+ /*
+ * Start the fsmonitor listener thread to collect filesystem
+ * events.
+ */
+ if (pthread_create(&state->listener_thread, NULL,
+ fsm_listen__thread_proc, state) < 0) {
+ ipc_server_stop_async(state->ipc_server_data);
+ err = error(_("could not start fsmonitor listener thread"));
+ goto cleanup;
+ }
+ listener_started = 1;
+
+ /*
+ * Start the health thread to watch over our process.
+ */
+ if (pthread_create(&state->health_thread, NULL,
+ fsm_health__thread_proc, state) < 0) {
+ ipc_server_stop_async(state->ipc_server_data);
+ err = error(_("could not start fsmonitor health thread"));
+ goto cleanup;
+ }
+ health_started = 1;
+
+ /*
+ * The daemon is now fully functional in background threads.
+ * Our primary thread should now just wait while the threads
+ * do all the work.
+ */
+cleanup:
+ /*
+ * Wait for the IPC thread pool to shutdown (whether by client
+ * request, from filesystem activity, or an error).
+ */
+ ipc_server_await(state->ipc_server_data);
+
+ /*
+ * The fsmonitor listener thread may have received a shutdown
+ * event from the IPC thread pool, but it doesn't hurt to tell
+ * it again. And wait for it to shutdown.
+ */
+ if (listener_started) {
+ fsm_listen__stop_async(state);
+ pthread_join(state->listener_thread, NULL);
+ }
+
+ if (health_started) {
+ fsm_health__stop_async(state);
+ pthread_join(state->health_thread, NULL);
+ }
+
+ if (err)
+ return err;
+ if (state->listen_error_code)
+ return state->listen_error_code;
+ if (state->health_error_code)
+ return state->health_error_code;
+ return 0;
+}
+
+static int fsmonitor_run_daemon(void)
+{
+ struct fsmonitor_daemon_state state;
+ const char *home;
+ int err;
+
+ memset(&state, 0, sizeof(state));
+
+ hashmap_init(&state.cookies, cookies_cmp, NULL, 0);
+ pthread_mutex_init(&state.main_lock, NULL);
+ pthread_cond_init(&state.cookies_cond, NULL);
+ state.listen_error_code = 0;
+ state.health_error_code = 0;
+ state.current_token_data = fsmonitor_new_token_data();
+
+ /* Prepare to (recursively) watch the <worktree-root> directory. */
+ strbuf_init(&state.path_worktree_watch, 0);
+ strbuf_addstr(&state.path_worktree_watch, absolute_path(get_git_work_tree()));
+ state.nr_paths_watching = 1;
+
+ strbuf_init(&state.alias.alias, 0);
+ strbuf_init(&state.alias.points_to, 0);
+ if ((err = fsmonitor__get_alias(state.path_worktree_watch.buf, &state.alias)))
+ goto done;
+
+ /*
+ * We create and delete cookie files somewhere inside the .git
+ * directory to help us keep sync with the file system. If
+ * ".git" is not a directory, then <gitdir> is not inside the
+ * cone of <worktree-root>, so set up a second watch to watch
+ * the <gitdir> so that we get events for the cookie files.
+ */
+ strbuf_init(&state.path_gitdir_watch, 0);
+ strbuf_addbuf(&state.path_gitdir_watch, &state.path_worktree_watch);
+ strbuf_addstr(&state.path_gitdir_watch, "/.git");
+ if (!is_directory(state.path_gitdir_watch.buf)) {
+ strbuf_reset(&state.path_gitdir_watch);
+ strbuf_addstr(&state.path_gitdir_watch, absolute_path(get_git_dir()));
+ state.nr_paths_watching = 2;
+ }
+
+ /*
+ * We will write filesystem syncing cookie files into
+ * <gitdir>/<fsmonitor-dir>/<cookie-dir>/<pid>-<seq>.
+ *
+ * The extra layers of subdirectories here keep us from
+ * changing the mtime on ".git/" or ".git/foo/" when we create
+ * or delete cookie files.
+ *
+ * There have been problems with some IDEs that do a
+ * non-recursive watch of the ".git/" directory and run a
+ * series of commands any time something happens.
+ *
+ * For example, if we place our cookie files directly in
+ * ".git/" or ".git/foo/" then a `git status` (or similar
+ * command) from the IDE will cause a cookie file to be
+ * created in one of those dirs. This causes the mtime of
+ * those dirs to change. This triggers the IDE's watch
+ * notification. This triggers the IDE to run those commands
+ * again. And the process repeats and the machine never goes
+ * idle.
+ *
+ * Adding the extra layers of subdirectories prevents the
+ * mtime of ".git/" and ".git/foo" from changing when a
+ * cookie file is created.
+ */
+ strbuf_init(&state.path_cookie_prefix, 0);
+ strbuf_addbuf(&state.path_cookie_prefix, &state.path_gitdir_watch);
+
+ strbuf_addch(&state.path_cookie_prefix, '/');
+ strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_DIR);
+ mkdir(state.path_cookie_prefix.buf, 0777);
+
+ strbuf_addch(&state.path_cookie_prefix, '/');
+ strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_COOKIE_DIR);
+ mkdir(state.path_cookie_prefix.buf, 0777);
+
+ strbuf_addch(&state.path_cookie_prefix, '/');
+
+ /*
+ * We create a named-pipe or unix domain socket inside of the
+ * ".git" directory. (Well, on Windows, we base our named
+ * pipe in the NPFS on the absolute path of the git
+ * directory.)
+ */
+ strbuf_init(&state.path_ipc, 0);
+ strbuf_addstr(&state.path_ipc,
+ absolute_path(fsmonitor_ipc__get_path(the_repository)));
+
+ /*
+ * Confirm that we can create platform-specific resources for the
+ * filesystem listener before we bother starting all the threads.
+ */
+ if (fsm_listen__ctor(&state)) {
+ err = error(_("could not initialize listener thread"));
+ goto done;
+ }
+
+ if (fsm_health__ctor(&state)) {
+ err = error(_("could not initialize health thread"));
+ goto done;
+ }
+
+ /*
+ * CD out of the worktree root directory.
+ *
+ * The common Git startup mechanism causes our CWD to be the
+ * root of the worktree. On Windows, this causes our process
+ * to hold a locked handle on the CWD. This prevents the
+ * worktree from being moved or deleted while the daemon is
+ * running.
+ *
+ * We assume that our FS and IPC listener threads have either
+ * opened all of the handles that they need or will do
+ * everything using absolute paths.
+ */
+ home = getenv("HOME");
+ if (home && *home && chdir(home))
+ die_errno(_("could not cd home '%s'"), home);
+
+ err = fsmonitor_run_daemon_1(&state);
+
+done:
+ pthread_cond_destroy(&state.cookies_cond);
+ pthread_mutex_destroy(&state.main_lock);
+ fsm_listen__dtor(&state);
+ fsm_health__dtor(&state);
+
+ ipc_server_free(state.ipc_server_data);
+
+ strbuf_release(&state.path_worktree_watch);
+ strbuf_release(&state.path_gitdir_watch);
+ strbuf_release(&state.path_cookie_prefix);
+ strbuf_release(&state.path_ipc);
+ strbuf_release(&state.alias.alias);
+ strbuf_release(&state.alias.points_to);
+
+ return err;
+}
+
+static int try_to_run_foreground_daemon(int detach_console)
+{
+ /*
+ * Technically, we don't need to probe for an existing daemon
+ * process, since we could just call `fsmonitor_run_daemon()`
+ * and let it fail if the pipe/socket is busy.
+ *
+ * However, this method gives us a nicer error message for a
+ * common error case.
+ */
+ if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+ die(_("fsmonitor--daemon is already running '%s'"),
+ the_repository->worktree);
+
+ if (fsmonitor__announce_startup) {
+ fprintf(stderr, _("running fsmonitor-daemon in '%s'\n"),
+ the_repository->worktree);
+ fflush(stderr);
+ }
+
+#ifdef GIT_WINDOWS_NATIVE
+ if (detach_console)
+ FreeConsole();
+#endif
+
+ return !!fsmonitor_run_daemon();
+}
+
+static start_bg_wait_cb bg_wait_cb;
+
+static int bg_wait_cb(const struct child_process *cp, void *cb_data)
+{
+ enum ipc_active_state s = fsmonitor_ipc__get_state();
+
+ switch (s) {
+ case IPC_STATE__LISTENING:
+ /* child is "ready" */
+ return 0;
+
+ case IPC_STATE__NOT_LISTENING:
+ case IPC_STATE__PATH_NOT_FOUND:
+ /* give child more time */
+ return 1;
+
+ default:
+ case IPC_STATE__INVALID_PATH:
+ case IPC_STATE__OTHER_ERROR:
+ /* all the time in world won't help */
+ return -1;
+ }
+}
+
+static int try_to_start_background_daemon(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ enum start_bg_result sbgr;
+
+ /*
+ * Before we try to create a background daemon process, see
+ * if a daemon process is already listening. This makes it
+ * easier for us to report an already-listening error to the
+ * console, since our spawn/daemon can only report the success
+ * of creating the background process (and not whether it
+ * immediately exited).
+ */
+ if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+ die(_("fsmonitor--daemon is already running '%s'"),
+ the_repository->worktree);
+
+ if (fsmonitor__announce_startup) {
+ fprintf(stderr, _("starting fsmonitor-daemon in '%s'\n"),
+ the_repository->worktree);
+ fflush(stderr);
+ }
+
+ cp.git_cmd = 1;
+
+ strvec_push(&cp.args, "fsmonitor--daemon");
+ strvec_push(&cp.args, "run");
+ strvec_push(&cp.args, "--detach");
+ strvec_pushf(&cp.args, "--ipc-threads=%d", fsmonitor__ipc_threads);
+
+ cp.no_stdin = 1;
+ cp.no_stdout = 1;
+ cp.no_stderr = 1;
+
+ sbgr = start_bg_command(&cp, bg_wait_cb, NULL,
+ fsmonitor__start_timeout_sec);
+
+ switch (sbgr) {
+ case SBGR_READY:
+ return 0;
+
+ default:
+ case SBGR_ERROR:
+ case SBGR_CB_ERROR:
+ return error(_("daemon failed to start"));
+
+ case SBGR_TIMEOUT:
+ return error(_("daemon not online yet"));
+
+ case SBGR_DIED:
+ return error(_("daemon terminated"));
+ }
+}
+
+int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
+{
+ const char *subcmd;
+ enum fsmonitor_reason reason;
+ int detach_console = 0;
+
+ struct option options[] = {
+ OPT_BOOL(0, "detach", &detach_console, N_("detach from console")),
+ OPT_INTEGER(0, "ipc-threads",
+ &fsmonitor__ipc_threads,
+ N_("use <n> ipc worker threads")),
+ OPT_INTEGER(0, "start-timeout",
+ &fsmonitor__start_timeout_sec,
+ N_("max seconds to wait for background daemon startup")),
+
+ OPT_END()
+ };
+
+ git_config(fsmonitor_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_fsmonitor__daemon_usage, 0);
+ if (argc != 1)
+ usage_with_options(builtin_fsmonitor__daemon_usage, options);
+ subcmd = argv[0];
+
+ if (fsmonitor__ipc_threads < 1)
+ die(_("invalid 'ipc-threads' value (%d)"),
+ fsmonitor__ipc_threads);
+
+ prepare_repo_settings(the_repository);
+ /*
+ * If the repo is fsmonitor-compatible, explicitly set IPC-mode
+ * (without bothering to load the `core.fsmonitor` config settings).
+ *
+ * If the repo is not compatible, the repo-settings will be set to
+ * incompatible rather than IPC, so we can use one of the __get
+ * routines to detect the discrepancy.
+ */
+ fsm_settings__set_ipc(the_repository);
+
+ reason = fsm_settings__get_reason(the_repository);
+ if (reason > FSMONITOR_REASON_OK)
+ die("%s",
+ fsm_settings__get_incompatible_msg(the_repository,
+ reason));
+
+ if (!strcmp(subcmd, "start"))
+ return !!try_to_start_background_daemon();
+
+ if (!strcmp(subcmd, "run"))
+ return !!try_to_run_foreground_daemon(detach_console);
+
+ if (!strcmp(subcmd, "stop"))
+ return !!do_as_client__send_stop();
+
+ if (!strcmp(subcmd, "status"))
+ return !!do_as_client__status();
+
+ die(_("Unhandled subcommand '%s'"), subcmd);
+}
+
+#else
+int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_fsmonitor__daemon_usage, options);
+
+ die(_("fsmonitor--daemon not supported on this platform"));
+}
+#endif
diff --git a/builtin/gc.c b/builtin/gc.c
new file mode 100644
index 0000000..02455fd
--- /dev/null
+++ b/builtin/gc.c
@@ -0,0 +1,2651 @@
+/*
+ * git gc builtin command
+ *
+ * Cleanup unreachable files and optimize the repository.
+ *
+ * Copyright (c) 2007 James Bowes
+ *
+ * Based on git-gc.sh, which is
+ *
+ * Copyright (c) 2006 Shawn O. Pearce
+ */
+
+#include "builtin.h"
+#include "repository.h"
+#include "config.h"
+#include "tempfile.h"
+#include "lockfile.h"
+#include "parse-options.h"
+#include "run-command.h"
+#include "sigchain.h"
+#include "strvec.h"
+#include "commit.h"
+#include "commit-graph.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "pack.h"
+#include "pack-objects.h"
+#include "blob.h"
+#include "tree.h"
+#include "promisor-remote.h"
+#include "refs.h"
+#include "remote.h"
+#include "exec-cmd.h"
+#include "hook.h"
+
+#define FAILED_RUN "failed to run %s"
+
+static const char * const builtin_gc_usage[] = {
+ N_("git gc [<options>]"),
+ NULL
+};
+
+static int pack_refs = 1;
+static int prune_reflogs = 1;
+static int cruft_packs = -1;
+static int aggressive_depth = 50;
+static int aggressive_window = 250;
+static int gc_auto_threshold = 6700;
+static int gc_auto_pack_limit = 50;
+static int detach_auto = 1;
+static timestamp_t gc_log_expire_time;
+static const char *gc_log_expire = "1.day.ago";
+static const char *prune_expire = "2.weeks.ago";
+static const char *prune_worktrees_expire = "3.months.ago";
+static unsigned long big_pack_threshold;
+static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
+
+static struct strvec reflog = STRVEC_INIT;
+static struct strvec repack = STRVEC_INIT;
+static struct strvec prune = STRVEC_INIT;
+static struct strvec prune_worktrees = STRVEC_INIT;
+static struct strvec rerere = STRVEC_INIT;
+
+static struct tempfile *pidfile;
+static struct lock_file log_lock;
+
+static struct string_list pack_garbage = STRING_LIST_INIT_DUP;
+
+static void clean_pack_garbage(void)
+{
+ int i;
+ for (i = 0; i < pack_garbage.nr; i++)
+ unlink_or_warn(pack_garbage.items[i].string);
+ string_list_clear(&pack_garbage, 0);
+}
+
+static void report_pack_garbage(unsigned seen_bits, const char *path)
+{
+ if (seen_bits == PACKDIR_FILE_IDX)
+ string_list_append(&pack_garbage, path);
+}
+
+static void process_log_file(void)
+{
+ struct stat st;
+ if (fstat(get_lock_file_fd(&log_lock), &st)) {
+ /*
+ * Perhaps there was an i/o error or another
+ * unlikely situation. Try to make a note of
+ * this in gc.log along with any existing
+ * messages.
+ */
+ int saved_errno = errno;
+ fprintf(stderr, _("Failed to fstat %s: %s"),
+ get_lock_file_path(&log_lock),
+ strerror(saved_errno));
+ fflush(stderr);
+ commit_lock_file(&log_lock);
+ errno = saved_errno;
+ } else if (st.st_size) {
+ /* There was some error recorded in the lock file */
+ commit_lock_file(&log_lock);
+ } else {
+ /* No error, clean up any old gc.log */
+ unlink(git_path("gc.log"));
+ rollback_lock_file(&log_lock);
+ }
+}
+
+static void process_log_file_at_exit(void)
+{
+ fflush(stderr);
+ process_log_file();
+}
+
+static void process_log_file_on_signal(int signo)
+{
+ process_log_file();
+ sigchain_pop(signo);
+ raise(signo);
+}
+
+static int gc_config_is_timestamp_never(const char *var)
+{
+ const char *value;
+ timestamp_t expire;
+
+ if (!git_config_get_value(var, &value) && value) {
+ if (parse_expiry_date(value, &expire))
+ die(_("failed to parse '%s' value '%s'"), var, value);
+ return expire == 0;
+ }
+ return 0;
+}
+
+static void gc_config(void)
+{
+ const char *value;
+
+ if (!git_config_get_value("gc.packrefs", &value)) {
+ if (value && !strcmp(value, "notbare"))
+ pack_refs = -1;
+ else
+ pack_refs = git_config_bool("gc.packrefs", value);
+ }
+
+ if (gc_config_is_timestamp_never("gc.reflogexpire") &&
+ gc_config_is_timestamp_never("gc.reflogexpireunreachable"))
+ prune_reflogs = 0;
+
+ git_config_get_int("gc.aggressivewindow", &aggressive_window);
+ git_config_get_int("gc.aggressivedepth", &aggressive_depth);
+ git_config_get_int("gc.auto", &gc_auto_threshold);
+ git_config_get_int("gc.autopacklimit", &gc_auto_pack_limit);
+ git_config_get_bool("gc.autodetach", &detach_auto);
+ git_config_get_bool("gc.cruftpacks", &cruft_packs);
+ git_config_get_expiry("gc.pruneexpire", &prune_expire);
+ git_config_get_expiry("gc.worktreepruneexpire", &prune_worktrees_expire);
+ git_config_get_expiry("gc.logexpiry", &gc_log_expire);
+
+ git_config_get_ulong("gc.bigpackthreshold", &big_pack_threshold);
+ git_config_get_ulong("pack.deltacachesize", &max_delta_cache_size);
+
+ git_config(git_default_config, NULL);
+}
+
+struct maintenance_run_opts;
+static int maintenance_task_pack_refs(MAYBE_UNUSED struct maintenance_run_opts *opts)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ strvec_pushl(&cmd.args, "pack-refs", "--all", "--prune", NULL);
+ return run_command(&cmd);
+}
+
+static int too_many_loose_objects(void)
+{
+ /*
+ * Quickly check if a "gc" is needed, by estimating how
+ * many loose objects there are. Because SHA-1 is evenly
+ * distributed, we can check only one and get a reasonable
+ * estimate.
+ */
+ DIR *dir;
+ struct dirent *ent;
+ int auto_threshold;
+ int num_loose = 0;
+ int needed = 0;
+ const unsigned hexsz_loose = the_hash_algo->hexsz - 2;
+
+ dir = opendir(git_path("objects/17"));
+ if (!dir)
+ return 0;
+
+ auto_threshold = DIV_ROUND_UP(gc_auto_threshold, 256);
+ while ((ent = readdir(dir)) != NULL) {
+ if (strspn(ent->d_name, "0123456789abcdef") != hexsz_loose ||
+ ent->d_name[hexsz_loose] != '\0')
+ continue;
+ if (++num_loose > auto_threshold) {
+ needed = 1;
+ break;
+ }
+ }
+ closedir(dir);
+ return needed;
+}
+
+static struct packed_git *find_base_packs(struct string_list *packs,
+ unsigned long limit)
+{
+ struct packed_git *p, *base = NULL;
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ if (!p->pack_local)
+ continue;
+ if (limit) {
+ if (p->pack_size >= limit)
+ string_list_append(packs, p->pack_name);
+ } else if (!base || base->pack_size < p->pack_size) {
+ base = p;
+ }
+ }
+
+ if (base)
+ string_list_append(packs, base->pack_name);
+
+ return base;
+}
+
+static int too_many_packs(void)
+{
+ struct packed_git *p;
+ int cnt;
+
+ if (gc_auto_pack_limit <= 0)
+ return 0;
+
+ for (cnt = 0, p = get_all_packs(the_repository); p; p = p->next) {
+ if (!p->pack_local)
+ continue;
+ if (p->pack_keep)
+ continue;
+ /*
+ * Perhaps check the size of the pack and count only
+ * very small ones here?
+ */
+ cnt++;
+ }
+ return gc_auto_pack_limit < cnt;
+}
+
+static uint64_t total_ram(void)
+{
+#if defined(HAVE_SYSINFO)
+ struct sysinfo si;
+
+ if (!sysinfo(&si))
+ return si.totalram;
+#elif defined(HAVE_BSD_SYSCTL) && (defined(HW_MEMSIZE) || defined(HW_PHYSMEM))
+ int64_t physical_memory;
+ int mib[2];
+ size_t length;
+
+ mib[0] = CTL_HW;
+# if defined(HW_MEMSIZE)
+ mib[1] = HW_MEMSIZE;
+# else
+ mib[1] = HW_PHYSMEM;
+# endif
+ length = sizeof(int64_t);
+ if (!sysctl(mib, 2, &physical_memory, &length, NULL, 0))
+ return physical_memory;
+#elif defined(GIT_WINDOWS_NATIVE)
+ MEMORYSTATUSEX memInfo;
+
+ memInfo.dwLength = sizeof(MEMORYSTATUSEX);
+ if (GlobalMemoryStatusEx(&memInfo))
+ return memInfo.ullTotalPhys;
+#endif
+ return 0;
+}
+
+static uint64_t estimate_repack_memory(struct packed_git *pack)
+{
+ unsigned long nr_objects = approximate_object_count();
+ size_t os_cache, heap;
+
+ if (!pack || !nr_objects)
+ return 0;
+
+ /*
+ * First we have to scan through at least one pack.
+ * Assume enough room in OS file cache to keep the entire pack
+ * or we may accidentally evict data of other processes from
+ * the cache.
+ */
+ os_cache = pack->pack_size + pack->index_size;
+ /* then pack-objects needs lots more for book keeping */
+ heap = sizeof(struct object_entry) * nr_objects;
+ /*
+ * internal rev-list --all --objects takes up some memory too,
+ * let's say half of it is for blobs
+ */
+ heap += sizeof(struct blob) * nr_objects / 2;
+ /*
+ * and the other half is for trees (commits and tags are
+ * usually insignificant)
+ */
+ heap += sizeof(struct tree) * nr_objects / 2;
+ /* and then obj_hash[], underestimated in fact */
+ heap += sizeof(struct object *) * nr_objects;
+ /* revindex is used also */
+ heap += (sizeof(off_t) + sizeof(uint32_t)) * nr_objects;
+ /*
+ * read_sha1_file() (either at delta calculation phase, or
+ * writing phase) also fills up the delta base cache
+ */
+ heap += delta_base_cache_limit;
+ /* and of course pack-objects has its own delta cache */
+ heap += max_delta_cache_size;
+
+ return os_cache + heap;
+}
+
+static int keep_one_pack(struct string_list_item *item, void *data UNUSED)
+{
+ strvec_pushf(&repack, "--keep-pack=%s", basename(item->string));
+ return 0;
+}
+
+static void add_repack_all_option(struct string_list *keep_pack)
+{
+ if (prune_expire && !strcmp(prune_expire, "now"))
+ strvec_push(&repack, "-a");
+ else if (cruft_packs) {
+ strvec_push(&repack, "--cruft");
+ if (prune_expire)
+ strvec_pushf(&repack, "--cruft-expiration=%s", prune_expire);
+ } else {
+ strvec_push(&repack, "-A");
+ if (prune_expire)
+ strvec_pushf(&repack, "--unpack-unreachable=%s", prune_expire);
+ }
+
+ if (keep_pack)
+ for_each_string_list(keep_pack, keep_one_pack, NULL);
+}
+
+static void add_repack_incremental_option(void)
+{
+ strvec_push(&repack, "--no-write-bitmap-index");
+}
+
+static int need_to_gc(void)
+{
+ /*
+ * Setting gc.auto to 0 or negative can disable the
+ * automatic gc.
+ */
+ if (gc_auto_threshold <= 0)
+ return 0;
+
+ /*
+ * If there are too many loose objects, but not too many
+ * packs, we run "repack -d -l". If there are too many packs,
+ * we run "repack -A -d -l". Otherwise we tell the caller
+ * there is no need.
+ */
+ if (too_many_packs()) {
+ struct string_list keep_pack = STRING_LIST_INIT_NODUP;
+
+ if (big_pack_threshold) {
+ find_base_packs(&keep_pack, big_pack_threshold);
+ if (keep_pack.nr >= gc_auto_pack_limit) {
+ big_pack_threshold = 0;
+ string_list_clear(&keep_pack, 0);
+ find_base_packs(&keep_pack, 0);
+ }
+ } else {
+ struct packed_git *p = find_base_packs(&keep_pack, 0);
+ uint64_t mem_have, mem_want;
+
+ mem_have = total_ram();
+ mem_want = estimate_repack_memory(p);
+
+ /*
+ * Only allow 1/2 of memory for pack-objects, leave
+ * the rest for the OS and other processes in the
+ * system.
+ */
+ if (!mem_have || mem_want < mem_have / 2)
+ string_list_clear(&keep_pack, 0);
+ }
+
+ add_repack_all_option(&keep_pack);
+ string_list_clear(&keep_pack, 0);
+ } else if (too_many_loose_objects())
+ add_repack_incremental_option();
+ else
+ return 0;
+
+ if (run_hooks("pre-auto-gc"))
+ return 0;
+ return 1;
+}
+
+/* return NULL on success, else hostname running the gc */
+static const char *lock_repo_for_gc(int force, pid_t* ret_pid)
+{
+ struct lock_file lock = LOCK_INIT;
+ char my_host[HOST_NAME_MAX + 1];
+ struct strbuf sb = STRBUF_INIT;
+ struct stat st;
+ uintmax_t pid;
+ FILE *fp;
+ int fd;
+ char *pidfile_path;
+
+ if (is_tempfile_active(pidfile))
+ /* already locked */
+ return NULL;
+
+ if (xgethostname(my_host, sizeof(my_host)))
+ xsnprintf(my_host, sizeof(my_host), "unknown");
+
+ pidfile_path = git_pathdup("gc.pid");
+ fd = hold_lock_file_for_update(&lock, pidfile_path,
+ LOCK_DIE_ON_ERROR);
+ if (!force) {
+ static char locking_host[HOST_NAME_MAX + 1];
+ static char *scan_fmt;
+ int should_exit;
+
+ if (!scan_fmt)
+ scan_fmt = xstrfmt("%s %%%ds", "%"SCNuMAX, HOST_NAME_MAX);
+ fp = fopen(pidfile_path, "r");
+ memset(locking_host, 0, sizeof(locking_host));
+ should_exit =
+ fp != NULL &&
+ !fstat(fileno(fp), &st) &&
+ /*
+ * 12 hour limit is very generous as gc should
+ * never take that long. On the other hand we
+ * don't really need a strict limit here,
+ * running gc --auto one day late is not a big
+ * problem. --force can be used in manual gc
+ * after the user verifies that no gc is
+ * running.
+ */
+ time(NULL) - st.st_mtime <= 12 * 3600 &&
+ fscanf(fp, scan_fmt, &pid, locking_host) == 2 &&
+ /* be gentle to concurrent "gc" on remote hosts */
+ (strcmp(locking_host, my_host) || !kill(pid, 0) || errno == EPERM);
+ if (fp)
+ fclose(fp);
+ if (should_exit) {
+ if (fd >= 0)
+ rollback_lock_file(&lock);
+ *ret_pid = pid;
+ free(pidfile_path);
+ return locking_host;
+ }
+ }
+
+ strbuf_addf(&sb, "%"PRIuMAX" %s",
+ (uintmax_t) getpid(), my_host);
+ write_in_full(fd, sb.buf, sb.len);
+ strbuf_release(&sb);
+ commit_lock_file(&lock);
+ pidfile = register_tempfile(pidfile_path);
+ free(pidfile_path);
+ return NULL;
+}
+
+/*
+ * Returns 0 if there was no previous error and gc can proceed, 1 if
+ * gc should not proceed due to an error in the last run. Prints a
+ * message and returns with a non-[01] status code if an error occurred
+ * while reading gc.log
+ */
+static int report_last_gc_error(void)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 0;
+ ssize_t len;
+ struct stat st;
+ char *gc_log_path = git_pathdup("gc.log");
+
+ if (stat(gc_log_path, &st)) {
+ if (errno == ENOENT)
+ goto done;
+
+ ret = die_message_errno(_("cannot stat '%s'"), gc_log_path);
+ goto done;
+ }
+
+ if (st.st_mtime < gc_log_expire_time)
+ goto done;
+
+ len = strbuf_read_file(&sb, gc_log_path, 0);
+ if (len < 0)
+ ret = die_message_errno(_("cannot read '%s'"), gc_log_path);
+ else if (len > 0) {
+ /*
+ * A previous gc failed. Report the error, and don't
+ * bother with an automatic gc run since it is likely
+ * to fail in the same way.
+ */
+ warning(_("The last gc run reported the following. "
+ "Please correct the root cause\n"
+ "and remove %s\n"
+ "Automatic cleanup will not be performed "
+ "until the file is removed.\n\n"
+ "%s"),
+ gc_log_path, sb.buf);
+ ret = 1;
+ }
+ strbuf_release(&sb);
+done:
+ free(gc_log_path);
+ return ret;
+}
+
+static void gc_before_repack(void)
+{
+ /*
+ * We may be called twice, as both the pre- and
+ * post-daemonized phases will call us, but running these
+ * commands more than once is pointless and wasteful.
+ */
+ static int done = 0;
+ if (done++)
+ return;
+
+ if (pack_refs && maintenance_task_pack_refs(NULL))
+ die(FAILED_RUN, "pack-refs");
+
+ if (prune_reflogs) {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ strvec_pushv(&cmd.args, reflog.v);
+ if (run_command(&cmd))
+ die(FAILED_RUN, reflog.v[0]);
+ }
+}
+
+int cmd_gc(int argc, const char **argv, const char *prefix)
+{
+ int aggressive = 0;
+ int auto_gc = 0;
+ int quiet = 0;
+ int force = 0;
+ const char *name;
+ pid_t pid;
+ int daemonized = 0;
+ int keep_largest_pack = -1;
+ timestamp_t dummy;
+ struct child_process rerere_cmd = CHILD_PROCESS_INIT;
+
+ struct option builtin_gc_options[] = {
+ OPT__QUIET(&quiet, N_("suppress progress reporting")),
+ { OPTION_STRING, 0, "prune", &prune_expire, N_("date"),
+ N_("prune unreferenced objects"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t)prune_expire },
+ OPT_BOOL(0, "cruft", &cruft_packs, N_("pack unreferenced objects separately")),
+ OPT_BOOL(0, "aggressive", &aggressive, N_("be more thorough (increased runtime)")),
+ OPT_BOOL_F(0, "auto", &auto_gc, N_("enable auto-gc mode"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL_F(0, "force", &force,
+ N_("force running gc even if there may be another gc running"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL(0, "keep-largest-pack", &keep_largest_pack,
+ N_("repack all other packs except the largest pack")),
+ OPT_END()
+ };
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_gc_usage, builtin_gc_options);
+
+ strvec_pushl(&reflog, "reflog", "expire", "--all", NULL);
+ strvec_pushl(&repack, "repack", "-d", "-l", NULL);
+ strvec_pushl(&prune, "prune", "--expire", NULL);
+ strvec_pushl(&prune_worktrees, "worktree", "prune", "--expire", NULL);
+ strvec_pushl(&rerere, "rerere", "gc", NULL);
+
+ /* default expiry time, overwritten in gc_config */
+ gc_config();
+ if (parse_expiry_date(gc_log_expire, &gc_log_expire_time))
+ die(_("failed to parse gc.logExpiry value %s"), gc_log_expire);
+
+ if (pack_refs < 0)
+ pack_refs = !is_bare_repository();
+
+ argc = parse_options(argc, argv, prefix, builtin_gc_options,
+ builtin_gc_usage, 0);
+ if (argc > 0)
+ usage_with_options(builtin_gc_usage, builtin_gc_options);
+
+ if (prune_expire && parse_expiry_date(prune_expire, &dummy))
+ die(_("failed to parse prune expiry value %s"), prune_expire);
+
+ prepare_repo_settings(the_repository);
+ if (cruft_packs < 0)
+ cruft_packs = the_repository->settings.gc_cruft_packs;
+
+ if (aggressive) {
+ strvec_push(&repack, "-f");
+ if (aggressive_depth > 0)
+ strvec_pushf(&repack, "--depth=%d", aggressive_depth);
+ if (aggressive_window > 0)
+ strvec_pushf(&repack, "--window=%d", aggressive_window);
+ }
+ if (quiet)
+ strvec_push(&repack, "-q");
+
+ if (auto_gc) {
+ /*
+ * Auto-gc should be least intrusive as possible.
+ */
+ if (!need_to_gc())
+ return 0;
+ if (!quiet) {
+ if (detach_auto)
+ fprintf(stderr, _("Auto packing the repository in background for optimum performance.\n"));
+ else
+ fprintf(stderr, _("Auto packing the repository for optimum performance.\n"));
+ fprintf(stderr, _("See \"git help gc\" for manual housekeeping.\n"));
+ }
+ if (detach_auto) {
+ int ret = report_last_gc_error();
+
+ if (ret == 1)
+ /* Last gc --auto failed. Skip this one. */
+ return 0;
+ else if (ret)
+ /* an I/O error occurred, already reported */
+ return ret;
+
+ if (lock_repo_for_gc(force, &pid))
+ return 0;
+ gc_before_repack(); /* dies on failure */
+ delete_tempfile(&pidfile);
+
+ /*
+ * failure to daemonize is ok, we'll continue
+ * in foreground
+ */
+ daemonized = !daemonize();
+ }
+ } else {
+ struct string_list keep_pack = STRING_LIST_INIT_NODUP;
+
+ if (keep_largest_pack != -1) {
+ if (keep_largest_pack)
+ find_base_packs(&keep_pack, 0);
+ } else if (big_pack_threshold) {
+ find_base_packs(&keep_pack, big_pack_threshold);
+ }
+
+ add_repack_all_option(&keep_pack);
+ string_list_clear(&keep_pack, 0);
+ }
+
+ name = lock_repo_for_gc(force, &pid);
+ if (name) {
+ if (auto_gc)
+ return 0; /* be quiet on --auto */
+ die(_("gc is already running on machine '%s' pid %"PRIuMAX" (use --force if not)"),
+ name, (uintmax_t)pid);
+ }
+
+ if (daemonized) {
+ hold_lock_file_for_update(&log_lock,
+ git_path("gc.log"),
+ LOCK_DIE_ON_ERROR);
+ dup2(get_lock_file_fd(&log_lock), 2);
+ sigchain_push_common(process_log_file_on_signal);
+ atexit(process_log_file_at_exit);
+ }
+
+ gc_before_repack();
+
+ if (!repository_format_precious_objects) {
+ struct child_process repack_cmd = CHILD_PROCESS_INIT;
+
+ repack_cmd.git_cmd = 1;
+ repack_cmd.close_object_store = 1;
+ strvec_pushv(&repack_cmd.args, repack.v);
+ if (run_command(&repack_cmd))
+ die(FAILED_RUN, repack.v[0]);
+
+ if (prune_expire) {
+ struct child_process prune_cmd = CHILD_PROCESS_INIT;
+
+ /* run `git prune` even if using cruft packs */
+ strvec_push(&prune, prune_expire);
+ if (quiet)
+ strvec_push(&prune, "--no-progress");
+ if (has_promisor_remote())
+ strvec_push(&prune,
+ "--exclude-promisor-objects");
+ prune_cmd.git_cmd = 1;
+ strvec_pushv(&prune_cmd.args, prune.v);
+ if (run_command(&prune_cmd))
+ die(FAILED_RUN, prune.v[0]);
+ }
+ }
+
+ if (prune_worktrees_expire) {
+ struct child_process prune_worktrees_cmd = CHILD_PROCESS_INIT;
+
+ strvec_push(&prune_worktrees, prune_worktrees_expire);
+ prune_worktrees_cmd.git_cmd = 1;
+ strvec_pushv(&prune_worktrees_cmd.args, prune_worktrees.v);
+ if (run_command(&prune_worktrees_cmd))
+ die(FAILED_RUN, prune_worktrees.v[0]);
+ }
+
+ rerere_cmd.git_cmd = 1;
+ strvec_pushv(&rerere_cmd.args, rerere.v);
+ if (run_command(&rerere_cmd))
+ die(FAILED_RUN, rerere.v[0]);
+
+ report_garbage = report_pack_garbage;
+ reprepare_packed_git(the_repository);
+ if (pack_garbage.nr > 0) {
+ close_object_store(the_repository->objects);
+ clean_pack_garbage();
+ }
+
+ if (the_repository->settings.gc_write_commit_graph == 1)
+ write_commit_graph_reachable(the_repository->objects->odb,
+ !quiet && !daemonized ? COMMIT_GRAPH_WRITE_PROGRESS : 0,
+ NULL);
+
+ if (auto_gc && too_many_loose_objects())
+ warning(_("There are too many unreachable loose objects; "
+ "run 'git prune' to remove them."));
+
+ if (!daemonized)
+ unlink(git_path("gc.log"));
+
+ return 0;
+}
+
+static const char *const builtin_maintenance_run_usage[] = {
+ N_("git maintenance run [--auto] [--[no-]quiet] [--task=<task>] [--schedule]"),
+ NULL
+};
+
+enum schedule_priority {
+ SCHEDULE_NONE = 0,
+ SCHEDULE_WEEKLY = 1,
+ SCHEDULE_DAILY = 2,
+ SCHEDULE_HOURLY = 3,
+};
+
+static enum schedule_priority parse_schedule(const char *value)
+{
+ if (!value)
+ return SCHEDULE_NONE;
+ if (!strcasecmp(value, "hourly"))
+ return SCHEDULE_HOURLY;
+ if (!strcasecmp(value, "daily"))
+ return SCHEDULE_DAILY;
+ if (!strcasecmp(value, "weekly"))
+ return SCHEDULE_WEEKLY;
+ return SCHEDULE_NONE;
+}
+
+static int maintenance_opt_schedule(const struct option *opt, const char *arg,
+ int unset)
+{
+ enum schedule_priority *priority = opt->value;
+
+ if (unset)
+ die(_("--no-schedule is not allowed"));
+
+ *priority = parse_schedule(arg);
+
+ if (!*priority)
+ die(_("unrecognized --schedule argument '%s'"), arg);
+
+ return 0;
+}
+
+struct maintenance_run_opts {
+ int auto_flag;
+ int quiet;
+ enum schedule_priority schedule;
+};
+
+/* Remember to update object flag allocation in object.h */
+#define SEEN (1u<<0)
+
+struct cg_auto_data {
+ int num_not_in_graph;
+ int limit;
+};
+
+static int dfs_on_ref(const char *refname UNUSED,
+ const struct object_id *oid,
+ int flags UNUSED,
+ void *cb_data)
+{
+ struct cg_auto_data *data = (struct cg_auto_data *)cb_data;
+ int result = 0;
+ struct object_id peeled;
+ struct commit_list *stack = NULL;
+ struct commit *commit;
+
+ if (!peel_iterated_oid(oid, &peeled))
+ oid = &peeled;
+ if (oid_object_info(the_repository, oid, NULL) != OBJ_COMMIT)
+ return 0;
+
+ commit = lookup_commit(the_repository, oid);
+ if (!commit)
+ return 0;
+ if (parse_commit(commit) ||
+ commit_graph_position(commit) != COMMIT_NOT_FROM_GRAPH)
+ return 0;
+
+ data->num_not_in_graph++;
+
+ if (data->num_not_in_graph >= data->limit)
+ return 1;
+
+ commit_list_append(commit, &stack);
+
+ while (!result && stack) {
+ struct commit_list *parent;
+
+ commit = pop_commit(&stack);
+
+ for (parent = commit->parents; parent; parent = parent->next) {
+ if (parse_commit(parent->item) ||
+ commit_graph_position(parent->item) != COMMIT_NOT_FROM_GRAPH ||
+ parent->item->object.flags & SEEN)
+ continue;
+
+ parent->item->object.flags |= SEEN;
+ data->num_not_in_graph++;
+
+ if (data->num_not_in_graph >= data->limit) {
+ result = 1;
+ break;
+ }
+
+ commit_list_append(parent->item, &stack);
+ }
+ }
+
+ free_commit_list(stack);
+ return result;
+}
+
+static int should_write_commit_graph(void)
+{
+ int result;
+ struct cg_auto_data data;
+
+ data.num_not_in_graph = 0;
+ data.limit = 100;
+ git_config_get_int("maintenance.commit-graph.auto",
+ &data.limit);
+
+ if (!data.limit)
+ return 0;
+ if (data.limit < 0)
+ return 1;
+
+ result = for_each_ref(dfs_on_ref, &data);
+
+ repo_clear_commit_marks(the_repository, SEEN);
+
+ return result;
+}
+
+static int run_write_commit_graph(struct maintenance_run_opts *opts)
+{
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ child.git_cmd = child.close_object_store = 1;
+ strvec_pushl(&child.args, "commit-graph", "write",
+ "--split", "--reachable", NULL);
+
+ if (opts->quiet)
+ strvec_push(&child.args, "--no-progress");
+
+ return !!run_command(&child);
+}
+
+static int maintenance_task_commit_graph(struct maintenance_run_opts *opts)
+{
+ prepare_repo_settings(the_repository);
+ if (!the_repository->settings.core_commit_graph)
+ return 0;
+
+ if (run_write_commit_graph(opts)) {
+ error(_("failed to write commit-graph"));
+ return 1;
+ }
+
+ return 0;
+}
+
+static int fetch_remote(struct remote *remote, void *cbdata)
+{
+ struct maintenance_run_opts *opts = cbdata;
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ if (remote->skip_default_update)
+ return 0;
+
+ child.git_cmd = 1;
+ strvec_pushl(&child.args, "fetch", remote->name,
+ "--prefetch", "--prune", "--no-tags",
+ "--no-write-fetch-head", "--recurse-submodules=no",
+ NULL);
+
+ if (opts->quiet)
+ strvec_push(&child.args, "--quiet");
+
+ return !!run_command(&child);
+}
+
+static int maintenance_task_prefetch(struct maintenance_run_opts *opts)
+{
+ if (for_each_remote(fetch_remote, opts)) {
+ error(_("failed to prefetch remotes"));
+ return 1;
+ }
+
+ return 0;
+}
+
+static int maintenance_task_gc(struct maintenance_run_opts *opts)
+{
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ child.git_cmd = child.close_object_store = 1;
+ strvec_push(&child.args, "gc");
+
+ if (opts->auto_flag)
+ strvec_push(&child.args, "--auto");
+ if (opts->quiet)
+ strvec_push(&child.args, "--quiet");
+ else
+ strvec_push(&child.args, "--no-quiet");
+
+ return run_command(&child);
+}
+
+static int prune_packed(struct maintenance_run_opts *opts)
+{
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ child.git_cmd = 1;
+ strvec_push(&child.args, "prune-packed");
+
+ if (opts->quiet)
+ strvec_push(&child.args, "--quiet");
+
+ return !!run_command(&child);
+}
+
+struct write_loose_object_data {
+ FILE *in;
+ int count;
+ int batch_size;
+};
+
+static int loose_object_auto_limit = 100;
+
+static int loose_object_count(const struct object_id *oid,
+ const char *path,
+ void *data)
+{
+ int *count = (int*)data;
+ if (++(*count) >= loose_object_auto_limit)
+ return 1;
+ return 0;
+}
+
+static int loose_object_auto_condition(void)
+{
+ int count = 0;
+
+ git_config_get_int("maintenance.loose-objects.auto",
+ &loose_object_auto_limit);
+
+ if (!loose_object_auto_limit)
+ return 0;
+ if (loose_object_auto_limit < 0)
+ return 1;
+
+ return for_each_loose_file_in_objdir(the_repository->objects->odb->path,
+ loose_object_count,
+ NULL, NULL, &count);
+}
+
+static int bail_on_loose(const struct object_id *oid,
+ const char *path,
+ void *data)
+{
+ return 1;
+}
+
+static int write_loose_object_to_stdin(const struct object_id *oid,
+ const char *path,
+ void *data)
+{
+ struct write_loose_object_data *d = (struct write_loose_object_data *)data;
+
+ fprintf(d->in, "%s\n", oid_to_hex(oid));
+
+ return ++(d->count) > d->batch_size;
+}
+
+static int pack_loose(struct maintenance_run_opts *opts)
+{
+ struct repository *r = the_repository;
+ int result = 0;
+ struct write_loose_object_data data;
+ struct child_process pack_proc = CHILD_PROCESS_INIT;
+
+ /*
+ * Do not start pack-objects process
+ * if there are no loose objects.
+ */
+ if (!for_each_loose_file_in_objdir(r->objects->odb->path,
+ bail_on_loose,
+ NULL, NULL, NULL))
+ return 0;
+
+ pack_proc.git_cmd = 1;
+
+ strvec_push(&pack_proc.args, "pack-objects");
+ if (opts->quiet)
+ strvec_push(&pack_proc.args, "--quiet");
+ strvec_pushf(&pack_proc.args, "%s/pack/loose", r->objects->odb->path);
+
+ pack_proc.in = -1;
+
+ if (start_command(&pack_proc)) {
+ error(_("failed to start 'git pack-objects' process"));
+ return 1;
+ }
+
+ data.in = xfdopen(pack_proc.in, "w");
+ data.count = 0;
+ data.batch_size = 50000;
+
+ for_each_loose_file_in_objdir(r->objects->odb->path,
+ write_loose_object_to_stdin,
+ NULL,
+ NULL,
+ &data);
+
+ fclose(data.in);
+
+ if (finish_command(&pack_proc)) {
+ error(_("failed to finish 'git pack-objects' process"));
+ result = 1;
+ }
+
+ return result;
+}
+
+static int maintenance_task_loose_objects(struct maintenance_run_opts *opts)
+{
+ return prune_packed(opts) || pack_loose(opts);
+}
+
+static int incremental_repack_auto_condition(void)
+{
+ struct packed_git *p;
+ int incremental_repack_auto_limit = 10;
+ int count = 0;
+
+ prepare_repo_settings(the_repository);
+ if (!the_repository->settings.core_multi_pack_index)
+ return 0;
+
+ git_config_get_int("maintenance.incremental-repack.auto",
+ &incremental_repack_auto_limit);
+
+ if (!incremental_repack_auto_limit)
+ return 0;
+ if (incremental_repack_auto_limit < 0)
+ return 1;
+
+ for (p = get_packed_git(the_repository);
+ count < incremental_repack_auto_limit && p;
+ p = p->next) {
+ if (!p->multi_pack_index)
+ count++;
+ }
+
+ return count >= incremental_repack_auto_limit;
+}
+
+static int multi_pack_index_write(struct maintenance_run_opts *opts)
+{
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ child.git_cmd = 1;
+ strvec_pushl(&child.args, "multi-pack-index", "write", NULL);
+
+ if (opts->quiet)
+ strvec_push(&child.args, "--no-progress");
+
+ if (run_command(&child))
+ return error(_("failed to write multi-pack-index"));
+
+ return 0;
+}
+
+static int multi_pack_index_expire(struct maintenance_run_opts *opts)
+{
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ child.git_cmd = child.close_object_store = 1;
+ strvec_pushl(&child.args, "multi-pack-index", "expire", NULL);
+
+ if (opts->quiet)
+ strvec_push(&child.args, "--no-progress");
+
+ if (run_command(&child))
+ return error(_("'git multi-pack-index expire' failed"));
+
+ return 0;
+}
+
+#define TWO_GIGABYTES (INT32_MAX)
+
+static off_t get_auto_pack_size(void)
+{
+ /*
+ * The "auto" value is special: we optimize for
+ * one large pack-file (i.e. from a clone) and
+ * expect the rest to be small and they can be
+ * repacked quickly.
+ *
+ * The strategy we select here is to select a
+ * size that is one more than the second largest
+ * pack-file. This ensures that we will repack
+ * at least two packs if there are three or more
+ * packs.
+ */
+ off_t max_size = 0;
+ off_t second_largest_size = 0;
+ off_t result_size;
+ struct packed_git *p;
+ struct repository *r = the_repository;
+
+ reprepare_packed_git(r);
+ for (p = get_all_packs(r); p; p = p->next) {
+ if (p->pack_size > max_size) {
+ second_largest_size = max_size;
+ max_size = p->pack_size;
+ } else if (p->pack_size > second_largest_size)
+ second_largest_size = p->pack_size;
+ }
+
+ result_size = second_largest_size + 1;
+
+ /* But limit ourselves to a batch size of 2g */
+ if (result_size > TWO_GIGABYTES)
+ result_size = TWO_GIGABYTES;
+
+ return result_size;
+}
+
+static int multi_pack_index_repack(struct maintenance_run_opts *opts)
+{
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ child.git_cmd = child.close_object_store = 1;
+ strvec_pushl(&child.args, "multi-pack-index", "repack", NULL);
+
+ if (opts->quiet)
+ strvec_push(&child.args, "--no-progress");
+
+ strvec_pushf(&child.args, "--batch-size=%"PRIuMAX,
+ (uintmax_t)get_auto_pack_size());
+
+ if (run_command(&child))
+ return error(_("'git multi-pack-index repack' failed"));
+
+ return 0;
+}
+
+static int maintenance_task_incremental_repack(struct maintenance_run_opts *opts)
+{
+ prepare_repo_settings(the_repository);
+ if (!the_repository->settings.core_multi_pack_index) {
+ warning(_("skipping incremental-repack task because core.multiPackIndex is disabled"));
+ return 0;
+ }
+
+ if (multi_pack_index_write(opts))
+ return 1;
+ if (multi_pack_index_expire(opts))
+ return 1;
+ if (multi_pack_index_repack(opts))
+ return 1;
+ return 0;
+}
+
+typedef int maintenance_task_fn(struct maintenance_run_opts *opts);
+
+/*
+ * An auto condition function returns 1 if the task should run
+ * and 0 if the task should NOT run. See needs_to_gc() for an
+ * example.
+ */
+typedef int maintenance_auto_fn(void);
+
+struct maintenance_task {
+ const char *name;
+ maintenance_task_fn *fn;
+ maintenance_auto_fn *auto_condition;
+ unsigned enabled:1;
+
+ enum schedule_priority schedule;
+
+ /* -1 if not selected. */
+ int selected_order;
+};
+
+enum maintenance_task_label {
+ TASK_PREFETCH,
+ TASK_LOOSE_OBJECTS,
+ TASK_INCREMENTAL_REPACK,
+ TASK_GC,
+ TASK_COMMIT_GRAPH,
+ TASK_PACK_REFS,
+
+ /* Leave as final value */
+ TASK__COUNT
+};
+
+static struct maintenance_task tasks[] = {
+ [TASK_PREFETCH] = {
+ "prefetch",
+ maintenance_task_prefetch,
+ },
+ [TASK_LOOSE_OBJECTS] = {
+ "loose-objects",
+ maintenance_task_loose_objects,
+ loose_object_auto_condition,
+ },
+ [TASK_INCREMENTAL_REPACK] = {
+ "incremental-repack",
+ maintenance_task_incremental_repack,
+ incremental_repack_auto_condition,
+ },
+ [TASK_GC] = {
+ "gc",
+ maintenance_task_gc,
+ need_to_gc,
+ 1,
+ },
+ [TASK_COMMIT_GRAPH] = {
+ "commit-graph",
+ maintenance_task_commit_graph,
+ should_write_commit_graph,
+ },
+ [TASK_PACK_REFS] = {
+ "pack-refs",
+ maintenance_task_pack_refs,
+ NULL,
+ },
+};
+
+static int compare_tasks_by_selection(const void *a_, const void *b_)
+{
+ const struct maintenance_task *a = a_;
+ const struct maintenance_task *b = b_;
+
+ return b->selected_order - a->selected_order;
+}
+
+static int maintenance_run_tasks(struct maintenance_run_opts *opts)
+{
+ int i, found_selected = 0;
+ int result = 0;
+ struct lock_file lk;
+ struct repository *r = the_repository;
+ char *lock_path = xstrfmt("%s/maintenance", r->objects->odb->path);
+
+ if (hold_lock_file_for_update(&lk, lock_path, LOCK_NO_DEREF) < 0) {
+ /*
+ * Another maintenance command is running.
+ *
+ * If --auto was provided, then it is likely due to a
+ * recursive process stack. Do not report an error in
+ * that case.
+ */
+ if (!opts->auto_flag && !opts->quiet)
+ warning(_("lock file '%s' exists, skipping maintenance"),
+ lock_path);
+ free(lock_path);
+ return 0;
+ }
+ free(lock_path);
+
+ for (i = 0; !found_selected && i < TASK__COUNT; i++)
+ found_selected = tasks[i].selected_order >= 0;
+
+ if (found_selected)
+ QSORT(tasks, TASK__COUNT, compare_tasks_by_selection);
+
+ for (i = 0; i < TASK__COUNT; i++) {
+ if (found_selected && tasks[i].selected_order < 0)
+ continue;
+
+ if (!found_selected && !tasks[i].enabled)
+ continue;
+
+ if (opts->auto_flag &&
+ (!tasks[i].auto_condition ||
+ !tasks[i].auto_condition()))
+ continue;
+
+ if (opts->schedule && tasks[i].schedule < opts->schedule)
+ continue;
+
+ trace2_region_enter("maintenance", tasks[i].name, r);
+ if (tasks[i].fn(opts)) {
+ error(_("task '%s' failed"), tasks[i].name);
+ result = 1;
+ }
+ trace2_region_leave("maintenance", tasks[i].name, r);
+ }
+
+ rollback_lock_file(&lk);
+ return result;
+}
+
+static void initialize_maintenance_strategy(void)
+{
+ char *config_str;
+
+ if (git_config_get_string("maintenance.strategy", &config_str))
+ return;
+
+ if (!strcasecmp(config_str, "incremental")) {
+ tasks[TASK_GC].schedule = SCHEDULE_NONE;
+ tasks[TASK_COMMIT_GRAPH].enabled = 1;
+ tasks[TASK_COMMIT_GRAPH].schedule = SCHEDULE_HOURLY;
+ tasks[TASK_PREFETCH].enabled = 1;
+ tasks[TASK_PREFETCH].schedule = SCHEDULE_HOURLY;
+ tasks[TASK_INCREMENTAL_REPACK].enabled = 1;
+ tasks[TASK_INCREMENTAL_REPACK].schedule = SCHEDULE_DAILY;
+ tasks[TASK_LOOSE_OBJECTS].enabled = 1;
+ tasks[TASK_LOOSE_OBJECTS].schedule = SCHEDULE_DAILY;
+ tasks[TASK_PACK_REFS].enabled = 1;
+ tasks[TASK_PACK_REFS].schedule = SCHEDULE_WEEKLY;
+ }
+}
+
+static void initialize_task_config(int schedule)
+{
+ int i;
+ struct strbuf config_name = STRBUF_INIT;
+ gc_config();
+
+ if (schedule)
+ initialize_maintenance_strategy();
+
+ for (i = 0; i < TASK__COUNT; i++) {
+ int config_value;
+ char *config_str;
+
+ strbuf_reset(&config_name);
+ strbuf_addf(&config_name, "maintenance.%s.enabled",
+ tasks[i].name);
+
+ if (!git_config_get_bool(config_name.buf, &config_value))
+ tasks[i].enabled = config_value;
+
+ strbuf_reset(&config_name);
+ strbuf_addf(&config_name, "maintenance.%s.schedule",
+ tasks[i].name);
+
+ if (!git_config_get_string(config_name.buf, &config_str)) {
+ tasks[i].schedule = parse_schedule(config_str);
+ free(config_str);
+ }
+ }
+
+ strbuf_release(&config_name);
+}
+
+static int task_option_parse(const struct option *opt,
+ const char *arg, int unset)
+{
+ int i, num_selected = 0;
+ struct maintenance_task *task = NULL;
+
+ BUG_ON_OPT_NEG(unset);
+
+ for (i = 0; i < TASK__COUNT; i++) {
+ if (tasks[i].selected_order >= 0)
+ num_selected++;
+ if (!strcasecmp(tasks[i].name, arg)) {
+ task = &tasks[i];
+ }
+ }
+
+ if (!task) {
+ error(_("'%s' is not a valid task"), arg);
+ return 1;
+ }
+
+ if (task->selected_order >= 0) {
+ error(_("task '%s' cannot be selected multiple times"), arg);
+ return 1;
+ }
+
+ task->selected_order = num_selected + 1;
+
+ return 0;
+}
+
+static int maintenance_run(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ struct maintenance_run_opts opts;
+ struct option builtin_maintenance_run_options[] = {
+ OPT_BOOL(0, "auto", &opts.auto_flag,
+ N_("run tasks based on the state of the repository")),
+ OPT_CALLBACK(0, "schedule", &opts.schedule, N_("frequency"),
+ N_("run tasks based on frequency"),
+ maintenance_opt_schedule),
+ OPT_BOOL(0, "quiet", &opts.quiet,
+ N_("do not report progress or other information over stderr")),
+ OPT_CALLBACK_F(0, "task", NULL, N_("task"),
+ N_("run a specific task"),
+ PARSE_OPT_NONEG, task_option_parse),
+ OPT_END()
+ };
+ memset(&opts, 0, sizeof(opts));
+
+ opts.quiet = !isatty(2);
+
+ for (i = 0; i < TASK__COUNT; i++)
+ tasks[i].selected_order = -1;
+
+ argc = parse_options(argc, argv, prefix,
+ builtin_maintenance_run_options,
+ builtin_maintenance_run_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (opts.auto_flag && opts.schedule)
+ die(_("use at most one of --auto and --schedule=<frequency>"));
+
+ initialize_task_config(opts.schedule);
+
+ if (argc != 0)
+ usage_with_options(builtin_maintenance_run_usage,
+ builtin_maintenance_run_options);
+ return maintenance_run_tasks(&opts);
+}
+
+static char *get_maintpath(void)
+{
+ struct strbuf sb = STRBUF_INIT;
+ const char *p = the_repository->worktree ?
+ the_repository->worktree : the_repository->gitdir;
+
+ strbuf_realpath(&sb, p, 1);
+ return strbuf_detach(&sb, NULL);
+}
+
+static char const * const builtin_maintenance_register_usage[] = {
+ "git maintenance register [--config-file <path>]",
+ NULL
+};
+
+static int maintenance_register(int argc, const char **argv, const char *prefix)
+{
+ char *config_file = NULL;
+ struct option options[] = {
+ OPT_STRING(0, "config-file", &config_file, N_("file"), N_("use given config file")),
+ OPT_END(),
+ };
+ int found = 0;
+ const char *key = "maintenance.repo";
+ char *config_value;
+ char *maintpath = get_maintpath();
+ struct string_list_item *item;
+ const struct string_list *list;
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_maintenance_register_usage, 0);
+ if (argc)
+ usage_with_options(builtin_maintenance_register_usage,
+ options);
+
+ /* Disable foreground maintenance */
+ git_config_set("maintenance.auto", "false");
+
+ /* Set maintenance strategy, if unset */
+ if (!git_config_get_string("maintenance.strategy", &config_value))
+ free(config_value);
+ else
+ git_config_set("maintenance.strategy", "incremental");
+
+ list = git_config_get_value_multi(key);
+ if (list) {
+ for_each_string_list_item(item, list) {
+ if (!strcmp(maintpath, item->string)) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ int rc;
+ char *user_config = NULL, *xdg_config = NULL;
+
+ if (!config_file) {
+ git_global_config(&user_config, &xdg_config);
+ config_file = user_config;
+ if (!user_config)
+ die(_("$HOME not set"));
+ }
+ rc = git_config_set_multivar_in_file_gently(
+ config_file, "maintenance.repo", maintpath,
+ CONFIG_REGEX_NONE, 0);
+ free(user_config);
+ free(xdg_config);
+
+ if (rc)
+ die(_("unable to add '%s' value of '%s'"),
+ key, maintpath);
+ }
+
+ free(maintpath);
+ return 0;
+}
+
+static char const * const builtin_maintenance_unregister_usage[] = {
+ "git maintenance unregister [--config-file <path>] [--force]",
+ NULL
+};
+
+static int maintenance_unregister(int argc, const char **argv, const char *prefix)
+{
+ int force = 0;
+ char *config_file = NULL;
+ struct option options[] = {
+ OPT_STRING(0, "config-file", &config_file, N_("file"), N_("use given config file")),
+ OPT__FORCE(&force,
+ N_("return success even if repository was not registered"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_END(),
+ };
+ const char *key = "maintenance.repo";
+ char *maintpath = get_maintpath();
+ int found = 0;
+ struct string_list_item *item;
+ const struct string_list *list;
+ struct config_set cs = { { 0 } };
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_maintenance_unregister_usage, 0);
+ if (argc)
+ usage_with_options(builtin_maintenance_unregister_usage,
+ options);
+
+ if (config_file) {
+ git_configset_init(&cs);
+ git_configset_add_file(&cs, config_file);
+ list = git_configset_get_value_multi(&cs, key);
+ } else {
+ list = git_config_get_value_multi(key);
+ }
+ if (list) {
+ for_each_string_list_item(item, list) {
+ if (!strcmp(maintpath, item->string)) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
+ if (found) {
+ int rc;
+ char *user_config = NULL, *xdg_config = NULL;
+ if (!config_file) {
+ git_global_config(&user_config, &xdg_config);
+ config_file = user_config;
+ if (!user_config)
+ die(_("$HOME not set"));
+ }
+ rc = git_config_set_multivar_in_file_gently(
+ config_file, key, NULL, maintpath,
+ CONFIG_FLAGS_MULTI_REPLACE | CONFIG_FLAGS_FIXED_VALUE);
+ free(user_config);
+ free(xdg_config);
+
+ if (rc &&
+ (!force || rc == CONFIG_NOTHING_SET))
+ die(_("unable to unset '%s' value of '%s'"),
+ key, maintpath);
+ } else if (!force) {
+ die(_("repository '%s' is not registered"), maintpath);
+ }
+
+ git_configset_clear(&cs);
+ free(maintpath);
+ return 0;
+}
+
+static const char *get_frequency(enum schedule_priority schedule)
+{
+ switch (schedule) {
+ case SCHEDULE_HOURLY:
+ return "hourly";
+ case SCHEDULE_DAILY:
+ return "daily";
+ case SCHEDULE_WEEKLY:
+ return "weekly";
+ default:
+ BUG("invalid schedule %d", schedule);
+ }
+}
+
+/*
+ * get_schedule_cmd` reads the GIT_TEST_MAINT_SCHEDULER environment variable
+ * to mock the schedulers that `git maintenance start` rely on.
+ *
+ * For test purpose, GIT_TEST_MAINT_SCHEDULER can be set to a comma-separated
+ * list of colon-separated key/value pairs where each pair contains a scheduler
+ * and its corresponding mock.
+ *
+ * * If $GIT_TEST_MAINT_SCHEDULER is not set, return false and leave the
+ * arguments unmodified.
+ *
+ * * If $GIT_TEST_MAINT_SCHEDULER is set, return true.
+ * In this case, the *cmd value is read as input.
+ *
+ * * if the input value *cmd is the key of one of the comma-separated list
+ * item, then *is_available is set to true and *cmd is modified and becomes
+ * the mock command.
+ *
+ * * if the input value *cmd isn’t the key of any of the comma-separated list
+ * item, then *is_available is set to false.
+ *
+ * Ex.:
+ * GIT_TEST_MAINT_SCHEDULER not set
+ * +-------+-------------------------------------------------+
+ * | Input | Output |
+ * | *cmd | return code | *cmd | *is_available |
+ * +-------+-------------+-------------------+---------------+
+ * | "foo" | false | "foo" (unchanged) | (unchanged) |
+ * +-------+-------------+-------------------+---------------+
+ *
+ * GIT_TEST_MAINT_SCHEDULER set to “foo:./mock_foo.sh,bar:./mock_bar.sh”
+ * +-------+-------------------------------------------------+
+ * | Input | Output |
+ * | *cmd | return code | *cmd | *is_available |
+ * +-------+-------------+-------------------+---------------+
+ * | "foo" | true | "./mock.foo.sh" | true |
+ * | "qux" | true | "qux" (unchanged) | false |
+ * +-------+-------------+-------------------+---------------+
+ */
+static int get_schedule_cmd(const char **cmd, int *is_available)
+{
+ char *testing = xstrdup_or_null(getenv("GIT_TEST_MAINT_SCHEDULER"));
+ struct string_list_item *item;
+ struct string_list list = STRING_LIST_INIT_NODUP;
+
+ if (!testing)
+ return 0;
+
+ if (is_available)
+ *is_available = 0;
+
+ string_list_split_in_place(&list, testing, ',', -1);
+ for_each_string_list_item(item, &list) {
+ struct string_list pair = STRING_LIST_INIT_NODUP;
+
+ if (string_list_split_in_place(&pair, item->string, ':', 2) != 2)
+ continue;
+
+ if (!strcmp(*cmd, pair.items[0].string)) {
+ *cmd = pair.items[1].string;
+ if (is_available)
+ *is_available = 1;
+ string_list_clear(&list, 0);
+ UNLEAK(testing);
+ return 1;
+ }
+ }
+
+ string_list_clear(&list, 0);
+ free(testing);
+ return 1;
+}
+
+static int is_launchctl_available(void)
+{
+ const char *cmd = "launchctl";
+ int is_available;
+ if (get_schedule_cmd(&cmd, &is_available))
+ return is_available;
+
+#ifdef __APPLE__
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static char *launchctl_service_name(const char *frequency)
+{
+ struct strbuf label = STRBUF_INIT;
+ strbuf_addf(&label, "org.git-scm.git.%s", frequency);
+ return strbuf_detach(&label, NULL);
+}
+
+static char *launchctl_service_filename(const char *name)
+{
+ char *expanded;
+ struct strbuf filename = STRBUF_INIT;
+ strbuf_addf(&filename, "~/Library/LaunchAgents/%s.plist", name);
+
+ expanded = interpolate_path(filename.buf, 1);
+ if (!expanded)
+ die(_("failed to expand path '%s'"), filename.buf);
+
+ strbuf_release(&filename);
+ return expanded;
+}
+
+static char *launchctl_get_uid(void)
+{
+ return xstrfmt("gui/%d", getuid());
+}
+
+static int launchctl_boot_plist(int enable, const char *filename)
+{
+ const char *cmd = "launchctl";
+ int result;
+ struct child_process child = CHILD_PROCESS_INIT;
+ char *uid = launchctl_get_uid();
+
+ get_schedule_cmd(&cmd, NULL);
+ strvec_split(&child.args, cmd);
+ strvec_pushl(&child.args, enable ? "bootstrap" : "bootout", uid,
+ filename, NULL);
+
+ child.no_stderr = 1;
+ child.no_stdout = 1;
+
+ if (start_command(&child))
+ die(_("failed to start launchctl"));
+
+ result = finish_command(&child);
+
+ free(uid);
+ return result;
+}
+
+static int launchctl_remove_plist(enum schedule_priority schedule)
+{
+ const char *frequency = get_frequency(schedule);
+ char *name = launchctl_service_name(frequency);
+ char *filename = launchctl_service_filename(name);
+ int result = launchctl_boot_plist(0, filename);
+ unlink(filename);
+ free(filename);
+ free(name);
+ return result;
+}
+
+static int launchctl_remove_plists(void)
+{
+ return launchctl_remove_plist(SCHEDULE_HOURLY) ||
+ launchctl_remove_plist(SCHEDULE_DAILY) ||
+ launchctl_remove_plist(SCHEDULE_WEEKLY);
+}
+
+static int launchctl_list_contains_plist(const char *name, const char *cmd)
+{
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ strvec_split(&child.args, cmd);
+ strvec_pushl(&child.args, "list", name, NULL);
+
+ child.no_stderr = 1;
+ child.no_stdout = 1;
+
+ if (start_command(&child))
+ die(_("failed to start launchctl"));
+
+ /* Returns failure if 'name' doesn't exist. */
+ return !finish_command(&child);
+}
+
+static int launchctl_schedule_plist(const char *exec_path, enum schedule_priority schedule)
+{
+ int i, fd;
+ const char *preamble, *repeat;
+ const char *frequency = get_frequency(schedule);
+ char *name = launchctl_service_name(frequency);
+ char *filename = launchctl_service_filename(name);
+ struct lock_file lk = LOCK_INIT;
+ static unsigned long lock_file_timeout_ms = ULONG_MAX;
+ struct strbuf plist = STRBUF_INIT, plist2 = STRBUF_INIT;
+ struct stat st;
+ const char *cmd = "launchctl";
+
+ get_schedule_cmd(&cmd, NULL);
+ preamble = "<?xml version=\"1.0\"?>\n"
+ "<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
+ "<plist version=\"1.0\">"
+ "<dict>\n"
+ "<key>Label</key><string>%s</string>\n"
+ "<key>ProgramArguments</key>\n"
+ "<array>\n"
+ "<string>%s/git</string>\n"
+ "<string>--exec-path=%s</string>\n"
+ "<string>for-each-repo</string>\n"
+ "<string>--config=maintenance.repo</string>\n"
+ "<string>maintenance</string>\n"
+ "<string>run</string>\n"
+ "<string>--schedule=%s</string>\n"
+ "</array>\n"
+ "<key>StartCalendarInterval</key>\n"
+ "<array>\n";
+ strbuf_addf(&plist, preamble, name, exec_path, exec_path, frequency);
+
+ switch (schedule) {
+ case SCHEDULE_HOURLY:
+ repeat = "<dict>\n"
+ "<key>Hour</key><integer>%d</integer>\n"
+ "<key>Minute</key><integer>0</integer>\n"
+ "</dict>\n";
+ for (i = 1; i <= 23; i++)
+ strbuf_addf(&plist, repeat, i);
+ break;
+
+ case SCHEDULE_DAILY:
+ repeat = "<dict>\n"
+ "<key>Day</key><integer>%d</integer>\n"
+ "<key>Hour</key><integer>0</integer>\n"
+ "<key>Minute</key><integer>0</integer>\n"
+ "</dict>\n";
+ for (i = 1; i <= 6; i++)
+ strbuf_addf(&plist, repeat, i);
+ break;
+
+ case SCHEDULE_WEEKLY:
+ strbuf_addstr(&plist,
+ "<dict>\n"
+ "<key>Day</key><integer>0</integer>\n"
+ "<key>Hour</key><integer>0</integer>\n"
+ "<key>Minute</key><integer>0</integer>\n"
+ "</dict>\n");
+ break;
+
+ default:
+ /* unreachable */
+ break;
+ }
+ strbuf_addstr(&plist, "</array>\n</dict>\n</plist>\n");
+
+ if (safe_create_leading_directories(filename))
+ die(_("failed to create directories for '%s'"), filename);
+
+ if ((long)lock_file_timeout_ms < 0 &&
+ git_config_get_ulong("gc.launchctlplistlocktimeoutms",
+ &lock_file_timeout_ms))
+ lock_file_timeout_ms = 150;
+
+ fd = hold_lock_file_for_update_timeout(&lk, filename, LOCK_DIE_ON_ERROR,
+ lock_file_timeout_ms);
+
+ /*
+ * Does this file already exist? With the intended contents? Is it
+ * registered already? Then it does not need to be re-registered.
+ */
+ if (!stat(filename, &st) && st.st_size == plist.len &&
+ strbuf_read_file(&plist2, filename, plist.len) == plist.len &&
+ !strbuf_cmp(&plist, &plist2) &&
+ launchctl_list_contains_plist(name, cmd))
+ rollback_lock_file(&lk);
+ else {
+ if (write_in_full(fd, plist.buf, plist.len) < 0 ||
+ commit_lock_file(&lk))
+ die_errno(_("could not write '%s'"), filename);
+
+ /* bootout might fail if not already running, so ignore */
+ launchctl_boot_plist(0, filename);
+ if (launchctl_boot_plist(1, filename))
+ die(_("failed to bootstrap service %s"), filename);
+ }
+
+ free(filename);
+ free(name);
+ strbuf_release(&plist);
+ strbuf_release(&plist2);
+ return 0;
+}
+
+static int launchctl_add_plists(void)
+{
+ const char *exec_path = git_exec_path();
+
+ return launchctl_schedule_plist(exec_path, SCHEDULE_HOURLY) ||
+ launchctl_schedule_plist(exec_path, SCHEDULE_DAILY) ||
+ launchctl_schedule_plist(exec_path, SCHEDULE_WEEKLY);
+}
+
+static int launchctl_update_schedule(int run_maintenance, int fd)
+{
+ if (run_maintenance)
+ return launchctl_add_plists();
+ else
+ return launchctl_remove_plists();
+}
+
+static int is_schtasks_available(void)
+{
+ const char *cmd = "schtasks";
+ int is_available;
+ if (get_schedule_cmd(&cmd, &is_available))
+ return is_available;
+
+#ifdef GIT_WINDOWS_NATIVE
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static char *schtasks_task_name(const char *frequency)
+{
+ struct strbuf label = STRBUF_INIT;
+ strbuf_addf(&label, "Git Maintenance (%s)", frequency);
+ return strbuf_detach(&label, NULL);
+}
+
+static int schtasks_remove_task(enum schedule_priority schedule)
+{
+ const char *cmd = "schtasks";
+ struct child_process child = CHILD_PROCESS_INIT;
+ const char *frequency = get_frequency(schedule);
+ char *name = schtasks_task_name(frequency);
+
+ get_schedule_cmd(&cmd, NULL);
+ strvec_split(&child.args, cmd);
+ strvec_pushl(&child.args, "/delete", "/tn", name, "/f", NULL);
+ free(name);
+
+ return run_command(&child);
+}
+
+static int schtasks_remove_tasks(void)
+{
+ return schtasks_remove_task(SCHEDULE_HOURLY) ||
+ schtasks_remove_task(SCHEDULE_DAILY) ||
+ schtasks_remove_task(SCHEDULE_WEEKLY);
+}
+
+static int schtasks_schedule_task(const char *exec_path, enum schedule_priority schedule)
+{
+ const char *cmd = "schtasks";
+ int result;
+ struct child_process child = CHILD_PROCESS_INIT;
+ const char *xml;
+ struct tempfile *tfile;
+ const char *frequency = get_frequency(schedule);
+ char *name = schtasks_task_name(frequency);
+ struct strbuf tfilename = STRBUF_INIT;
+
+ get_schedule_cmd(&cmd, NULL);
+
+ strbuf_addf(&tfilename, "%s/schedule_%s_XXXXXX",
+ get_git_common_dir(), frequency);
+ tfile = xmks_tempfile(tfilename.buf);
+ strbuf_release(&tfilename);
+
+ if (!fdopen_tempfile(tfile, "w"))
+ die(_("failed to create temp xml file"));
+
+ xml = "<?xml version=\"1.0\" ?>\n"
+ "<Task version=\"1.4\" xmlns=\"http://schemas.microsoft.com/windows/2004/02/mit/task\">\n"
+ "<Triggers>\n"
+ "<CalendarTrigger>\n";
+ fputs(xml, tfile->fp);
+
+ switch (schedule) {
+ case SCHEDULE_HOURLY:
+ fprintf(tfile->fp,
+ "<StartBoundary>2020-01-01T01:00:00</StartBoundary>\n"
+ "<Enabled>true</Enabled>\n"
+ "<ScheduleByDay>\n"
+ "<DaysInterval>1</DaysInterval>\n"
+ "</ScheduleByDay>\n"
+ "<Repetition>\n"
+ "<Interval>PT1H</Interval>\n"
+ "<Duration>PT23H</Duration>\n"
+ "<StopAtDurationEnd>false</StopAtDurationEnd>\n"
+ "</Repetition>\n");
+ break;
+
+ case SCHEDULE_DAILY:
+ fprintf(tfile->fp,
+ "<StartBoundary>2020-01-01T00:00:00</StartBoundary>\n"
+ "<Enabled>true</Enabled>\n"
+ "<ScheduleByWeek>\n"
+ "<DaysOfWeek>\n"
+ "<Monday />\n"
+ "<Tuesday />\n"
+ "<Wednesday />\n"
+ "<Thursday />\n"
+ "<Friday />\n"
+ "<Saturday />\n"
+ "</DaysOfWeek>\n"
+ "<WeeksInterval>1</WeeksInterval>\n"
+ "</ScheduleByWeek>\n");
+ break;
+
+ case SCHEDULE_WEEKLY:
+ fprintf(tfile->fp,
+ "<StartBoundary>2020-01-01T00:00:00</StartBoundary>\n"
+ "<Enabled>true</Enabled>\n"
+ "<ScheduleByWeek>\n"
+ "<DaysOfWeek>\n"
+ "<Sunday />\n"
+ "</DaysOfWeek>\n"
+ "<WeeksInterval>1</WeeksInterval>\n"
+ "</ScheduleByWeek>\n");
+ break;
+
+ default:
+ break;
+ }
+
+ xml = "</CalendarTrigger>\n"
+ "</Triggers>\n"
+ "<Principals>\n"
+ "<Principal id=\"Author\">\n"
+ "<LogonType>InteractiveToken</LogonType>\n"
+ "<RunLevel>LeastPrivilege</RunLevel>\n"
+ "</Principal>\n"
+ "</Principals>\n"
+ "<Settings>\n"
+ "<MultipleInstancesPolicy>IgnoreNew</MultipleInstancesPolicy>\n"
+ "<Enabled>true</Enabled>\n"
+ "<Hidden>true</Hidden>\n"
+ "<UseUnifiedSchedulingEngine>true</UseUnifiedSchedulingEngine>\n"
+ "<WakeToRun>false</WakeToRun>\n"
+ "<ExecutionTimeLimit>PT72H</ExecutionTimeLimit>\n"
+ "<Priority>7</Priority>\n"
+ "</Settings>\n"
+ "<Actions Context=\"Author\">\n"
+ "<Exec>\n"
+ "<Command>\"%s\\git.exe\"</Command>\n"
+ "<Arguments>--exec-path=\"%s\" for-each-repo --config=maintenance.repo maintenance run --schedule=%s</Arguments>\n"
+ "</Exec>\n"
+ "</Actions>\n"
+ "</Task>\n";
+ fprintf(tfile->fp, xml, exec_path, exec_path, frequency);
+ strvec_split(&child.args, cmd);
+ strvec_pushl(&child.args, "/create", "/tn", name, "/f", "/xml",
+ get_tempfile_path(tfile), NULL);
+ close_tempfile_gently(tfile);
+
+ child.no_stdout = 1;
+ child.no_stderr = 1;
+
+ if (start_command(&child))
+ die(_("failed to start schtasks"));
+ result = finish_command(&child);
+
+ delete_tempfile(&tfile);
+ free(name);
+ return result;
+}
+
+static int schtasks_schedule_tasks(void)
+{
+ const char *exec_path = git_exec_path();
+
+ return schtasks_schedule_task(exec_path, SCHEDULE_HOURLY) ||
+ schtasks_schedule_task(exec_path, SCHEDULE_DAILY) ||
+ schtasks_schedule_task(exec_path, SCHEDULE_WEEKLY);
+}
+
+static int schtasks_update_schedule(int run_maintenance, int fd)
+{
+ if (run_maintenance)
+ return schtasks_schedule_tasks();
+ else
+ return schtasks_remove_tasks();
+}
+
+MAYBE_UNUSED
+static int check_crontab_process(const char *cmd)
+{
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ strvec_split(&child.args, cmd);
+ strvec_push(&child.args, "-l");
+ child.no_stdin = 1;
+ child.no_stdout = 1;
+ child.no_stderr = 1;
+ child.silent_exec_failure = 1;
+
+ if (start_command(&child))
+ return 0;
+ /* Ignore exit code, as an empty crontab will return error. */
+ finish_command(&child);
+ return 1;
+}
+
+static int is_crontab_available(void)
+{
+ const char *cmd = "crontab";
+ int is_available;
+
+ if (get_schedule_cmd(&cmd, &is_available))
+ return is_available;
+
+#ifdef __APPLE__
+ /*
+ * macOS has cron, but it requires special permissions and will
+ * create a UI alert when attempting to run this command.
+ */
+ return 0;
+#else
+ return check_crontab_process(cmd);
+#endif
+}
+
+#define BEGIN_LINE "# BEGIN GIT MAINTENANCE SCHEDULE"
+#define END_LINE "# END GIT MAINTENANCE SCHEDULE"
+
+static int crontab_update_schedule(int run_maintenance, int fd)
+{
+ const char *cmd = "crontab";
+ int result = 0;
+ int in_old_region = 0;
+ struct child_process crontab_list = CHILD_PROCESS_INIT;
+ struct child_process crontab_edit = CHILD_PROCESS_INIT;
+ FILE *cron_list, *cron_in;
+ struct strbuf line = STRBUF_INIT;
+ struct tempfile *tmpedit = NULL;
+
+ get_schedule_cmd(&cmd, NULL);
+ strvec_split(&crontab_list.args, cmd);
+ strvec_push(&crontab_list.args, "-l");
+ crontab_list.in = -1;
+ crontab_list.out = dup(fd);
+ crontab_list.git_cmd = 0;
+
+ if (start_command(&crontab_list))
+ return error(_("failed to run 'crontab -l'; your system might not support 'cron'"));
+
+ /* Ignore exit code, as an empty crontab will return error. */
+ finish_command(&crontab_list);
+
+ tmpedit = mks_tempfile_t(".git_cron_edit_tmpXXXXXX");
+ if (!tmpedit) {
+ result = error(_("failed to create crontab temporary file"));
+ goto out;
+ }
+ cron_in = fdopen_tempfile(tmpedit, "w");
+ if (!cron_in) {
+ result = error(_("failed to open temporary file"));
+ goto out;
+ }
+
+ /*
+ * Read from the .lock file, filtering out the old
+ * schedule while appending the new schedule.
+ */
+ cron_list = fdopen(fd, "r");
+ rewind(cron_list);
+
+ while (!strbuf_getline_lf(&line, cron_list)) {
+ if (!in_old_region && !strcmp(line.buf, BEGIN_LINE))
+ in_old_region = 1;
+ else if (in_old_region && !strcmp(line.buf, END_LINE))
+ in_old_region = 0;
+ else if (!in_old_region)
+ fprintf(cron_in, "%s\n", line.buf);
+ }
+ strbuf_release(&line);
+
+ if (run_maintenance) {
+ struct strbuf line_format = STRBUF_INIT;
+ const char *exec_path = git_exec_path();
+
+ fprintf(cron_in, "%s\n", BEGIN_LINE);
+ fprintf(cron_in,
+ "# The following schedule was created by Git\n");
+ fprintf(cron_in, "# Any edits made in this region might be\n");
+ fprintf(cron_in,
+ "# replaced in the future by a Git command.\n\n");
+
+ strbuf_addf(&line_format,
+ "%%s %%s * * %%s \"%s/git\" --exec-path=\"%s\" for-each-repo --config=maintenance.repo maintenance run --schedule=%%s\n",
+ exec_path, exec_path);
+ fprintf(cron_in, line_format.buf, "0", "1-23", "*", "hourly");
+ fprintf(cron_in, line_format.buf, "0", "0", "1-6", "daily");
+ fprintf(cron_in, line_format.buf, "0", "0", "0", "weekly");
+ strbuf_release(&line_format);
+
+ fprintf(cron_in, "\n%s\n", END_LINE);
+ }
+
+ fflush(cron_in);
+
+ strvec_split(&crontab_edit.args, cmd);
+ strvec_push(&crontab_edit.args, get_tempfile_path(tmpedit));
+ crontab_edit.git_cmd = 0;
+
+ if (start_command(&crontab_edit)) {
+ result = error(_("failed to run 'crontab'; your system might not support 'cron'"));
+ goto out;
+ }
+
+ if (finish_command(&crontab_edit))
+ result = error(_("'crontab' died"));
+ else
+ fclose(cron_list);
+out:
+ delete_tempfile(&tmpedit);
+ return result;
+}
+
+static int real_is_systemd_timer_available(void)
+{
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&child.args, "systemctl", "--user", "list-timers", NULL);
+ child.no_stdin = 1;
+ child.no_stdout = 1;
+ child.no_stderr = 1;
+ child.silent_exec_failure = 1;
+
+ if (start_command(&child))
+ return 0;
+ if (finish_command(&child))
+ return 0;
+ return 1;
+}
+
+static int is_systemd_timer_available(void)
+{
+ const char *cmd = "systemctl";
+ int is_available;
+
+ if (get_schedule_cmd(&cmd, &is_available))
+ return is_available;
+
+ return real_is_systemd_timer_available();
+}
+
+static char *xdg_config_home_systemd(const char *filename)
+{
+ return xdg_config_home_for("systemd/user", filename);
+}
+
+static int systemd_timer_enable_unit(int enable,
+ enum schedule_priority schedule)
+{
+ const char *cmd = "systemctl";
+ struct child_process child = CHILD_PROCESS_INIT;
+ const char *frequency = get_frequency(schedule);
+
+ /*
+ * Disabling the systemd unit while it is already disabled makes
+ * systemctl print an error.
+ * Let's ignore it since it means we already are in the expected state:
+ * the unit is disabled.
+ *
+ * On the other hand, enabling a systemd unit which is already enabled
+ * produces no error.
+ */
+ if (!enable)
+ child.no_stderr = 1;
+
+ get_schedule_cmd(&cmd, NULL);
+ strvec_split(&child.args, cmd);
+ strvec_pushl(&child.args, "--user", enable ? "enable" : "disable",
+ "--now", NULL);
+ strvec_pushf(&child.args, "git-maintenance@%s.timer", frequency);
+
+ if (start_command(&child))
+ return error(_("failed to start systemctl"));
+ if (finish_command(&child))
+ /*
+ * Disabling an already disabled systemd unit makes
+ * systemctl fail.
+ * Let's ignore this failure.
+ *
+ * Enabling an enabled systemd unit doesn't fail.
+ */
+ if (enable)
+ return error(_("failed to run systemctl"));
+ return 0;
+}
+
+static int systemd_timer_delete_unit_templates(void)
+{
+ int ret = 0;
+ char *filename = xdg_config_home_systemd("git-maintenance@.timer");
+ if (unlink(filename) && !is_missing_file_error(errno))
+ ret = error_errno(_("failed to delete '%s'"), filename);
+ FREE_AND_NULL(filename);
+
+ filename = xdg_config_home_systemd("git-maintenance@.service");
+ if (unlink(filename) && !is_missing_file_error(errno))
+ ret = error_errno(_("failed to delete '%s'"), filename);
+
+ free(filename);
+ return ret;
+}
+
+static int systemd_timer_delete_units(void)
+{
+ return systemd_timer_enable_unit(0, SCHEDULE_HOURLY) ||
+ systemd_timer_enable_unit(0, SCHEDULE_DAILY) ||
+ systemd_timer_enable_unit(0, SCHEDULE_WEEKLY) ||
+ systemd_timer_delete_unit_templates();
+}
+
+static int systemd_timer_write_unit_templates(const char *exec_path)
+{
+ char *filename;
+ FILE *file;
+ const char *unit;
+
+ filename = xdg_config_home_systemd("git-maintenance@.timer");
+ if (safe_create_leading_directories(filename)) {
+ error(_("failed to create directories for '%s'"), filename);
+ goto error;
+ }
+ file = fopen_or_warn(filename, "w");
+ if (!file)
+ goto error;
+
+ unit = "# This file was created and is maintained by Git.\n"
+ "# Any edits made in this file might be replaced in the future\n"
+ "# by a Git command.\n"
+ "\n"
+ "[Unit]\n"
+ "Description=Optimize Git repositories data\n"
+ "\n"
+ "[Timer]\n"
+ "OnCalendar=%i\n"
+ "Persistent=true\n"
+ "\n"
+ "[Install]\n"
+ "WantedBy=timers.target\n";
+ if (fputs(unit, file) == EOF) {
+ error(_("failed to write to '%s'"), filename);
+ fclose(file);
+ goto error;
+ }
+ if (fclose(file) == EOF) {
+ error_errno(_("failed to flush '%s'"), filename);
+ goto error;
+ }
+ free(filename);
+
+ filename = xdg_config_home_systemd("git-maintenance@.service");
+ file = fopen_or_warn(filename, "w");
+ if (!file)
+ goto error;
+
+ unit = "# This file was created and is maintained by Git.\n"
+ "# Any edits made in this file might be replaced in the future\n"
+ "# by a Git command.\n"
+ "\n"
+ "[Unit]\n"
+ "Description=Optimize Git repositories data\n"
+ "\n"
+ "[Service]\n"
+ "Type=oneshot\n"
+ "ExecStart=\"%s/git\" --exec-path=\"%s\" for-each-repo --config=maintenance.repo maintenance run --schedule=%%i\n"
+ "LockPersonality=yes\n"
+ "MemoryDenyWriteExecute=yes\n"
+ "NoNewPrivileges=yes\n"
+ "RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6\n"
+ "RestrictNamespaces=yes\n"
+ "RestrictRealtime=yes\n"
+ "RestrictSUIDSGID=yes\n"
+ "SystemCallArchitectures=native\n"
+ "SystemCallFilter=@system-service\n";
+ if (fprintf(file, unit, exec_path, exec_path) < 0) {
+ error(_("failed to write to '%s'"), filename);
+ fclose(file);
+ goto error;
+ }
+ if (fclose(file) == EOF) {
+ error_errno(_("failed to flush '%s'"), filename);
+ goto error;
+ }
+ free(filename);
+ return 0;
+
+error:
+ free(filename);
+ systemd_timer_delete_unit_templates();
+ return -1;
+}
+
+static int systemd_timer_setup_units(void)
+{
+ const char *exec_path = git_exec_path();
+
+ int ret = systemd_timer_write_unit_templates(exec_path) ||
+ systemd_timer_enable_unit(1, SCHEDULE_HOURLY) ||
+ systemd_timer_enable_unit(1, SCHEDULE_DAILY) ||
+ systemd_timer_enable_unit(1, SCHEDULE_WEEKLY);
+ if (ret)
+ systemd_timer_delete_units();
+ return ret;
+}
+
+static int systemd_timer_update_schedule(int run_maintenance, int fd)
+{
+ if (run_maintenance)
+ return systemd_timer_setup_units();
+ else
+ return systemd_timer_delete_units();
+}
+
+enum scheduler {
+ SCHEDULER_INVALID = -1,
+ SCHEDULER_AUTO,
+ SCHEDULER_CRON,
+ SCHEDULER_SYSTEMD,
+ SCHEDULER_LAUNCHCTL,
+ SCHEDULER_SCHTASKS,
+};
+
+static const struct {
+ const char *name;
+ int (*is_available)(void);
+ int (*update_schedule)(int run_maintenance, int fd);
+} scheduler_fn[] = {
+ [SCHEDULER_CRON] = {
+ .name = "crontab",
+ .is_available = is_crontab_available,
+ .update_schedule = crontab_update_schedule,
+ },
+ [SCHEDULER_SYSTEMD] = {
+ .name = "systemctl",
+ .is_available = is_systemd_timer_available,
+ .update_schedule = systemd_timer_update_schedule,
+ },
+ [SCHEDULER_LAUNCHCTL] = {
+ .name = "launchctl",
+ .is_available = is_launchctl_available,
+ .update_schedule = launchctl_update_schedule,
+ },
+ [SCHEDULER_SCHTASKS] = {
+ .name = "schtasks",
+ .is_available = is_schtasks_available,
+ .update_schedule = schtasks_update_schedule,
+ },
+};
+
+static enum scheduler parse_scheduler(const char *value)
+{
+ if (!value)
+ return SCHEDULER_INVALID;
+ else if (!strcasecmp(value, "auto"))
+ return SCHEDULER_AUTO;
+ else if (!strcasecmp(value, "cron") || !strcasecmp(value, "crontab"))
+ return SCHEDULER_CRON;
+ else if (!strcasecmp(value, "systemd") ||
+ !strcasecmp(value, "systemd-timer"))
+ return SCHEDULER_SYSTEMD;
+ else if (!strcasecmp(value, "launchctl"))
+ return SCHEDULER_LAUNCHCTL;
+ else if (!strcasecmp(value, "schtasks"))
+ return SCHEDULER_SCHTASKS;
+ else
+ return SCHEDULER_INVALID;
+}
+
+static int maintenance_opt_scheduler(const struct option *opt, const char *arg,
+ int unset)
+{
+ enum scheduler *scheduler = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ *scheduler = parse_scheduler(arg);
+ if (*scheduler == SCHEDULER_INVALID)
+ return error(_("unrecognized --scheduler argument '%s'"), arg);
+ return 0;
+}
+
+struct maintenance_start_opts {
+ enum scheduler scheduler;
+};
+
+static enum scheduler resolve_scheduler(enum scheduler scheduler)
+{
+ if (scheduler != SCHEDULER_AUTO)
+ return scheduler;
+
+#if defined(__APPLE__)
+ return SCHEDULER_LAUNCHCTL;
+
+#elif defined(GIT_WINDOWS_NATIVE)
+ return SCHEDULER_SCHTASKS;
+
+#elif defined(__linux__)
+ if (is_systemd_timer_available())
+ return SCHEDULER_SYSTEMD;
+ else if (is_crontab_available())
+ return SCHEDULER_CRON;
+ else
+ die(_("neither systemd timers nor crontab are available"));
+
+#else
+ return SCHEDULER_CRON;
+#endif
+}
+
+static void validate_scheduler(enum scheduler scheduler)
+{
+ if (scheduler == SCHEDULER_INVALID)
+ BUG("invalid scheduler");
+ if (scheduler == SCHEDULER_AUTO)
+ BUG("resolve_scheduler should have been called before");
+
+ if (!scheduler_fn[scheduler].is_available())
+ die(_("%s scheduler is not available"),
+ scheduler_fn[scheduler].name);
+}
+
+static int update_background_schedule(const struct maintenance_start_opts *opts,
+ int enable)
+{
+ unsigned int i;
+ int result = 0;
+ struct lock_file lk;
+ char *lock_path = xstrfmt("%s/schedule", the_repository->objects->odb->path);
+
+ if (hold_lock_file_for_update(&lk, lock_path, LOCK_NO_DEREF) < 0) {
+ free(lock_path);
+ return error(_("another process is scheduling background maintenance"));
+ }
+
+ for (i = 1; i < ARRAY_SIZE(scheduler_fn); i++) {
+ if (enable && opts->scheduler == i)
+ continue;
+ if (!scheduler_fn[i].is_available())
+ continue;
+ scheduler_fn[i].update_schedule(0, get_lock_file_fd(&lk));
+ }
+
+ if (enable)
+ result = scheduler_fn[opts->scheduler].update_schedule(
+ 1, get_lock_file_fd(&lk));
+
+ rollback_lock_file(&lk);
+
+ free(lock_path);
+ return result;
+}
+
+static const char *const builtin_maintenance_start_usage[] = {
+ N_("git maintenance start [--scheduler=<scheduler>]"),
+ NULL
+};
+
+static int maintenance_start(int argc, const char **argv, const char *prefix)
+{
+ struct maintenance_start_opts opts = { 0 };
+ struct option options[] = {
+ OPT_CALLBACK_F(
+ 0, "scheduler", &opts.scheduler, N_("scheduler"),
+ N_("scheduler to trigger git maintenance run"),
+ PARSE_OPT_NONEG, maintenance_opt_scheduler),
+ OPT_END()
+ };
+ const char *register_args[] = { "register", NULL };
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_maintenance_start_usage, 0);
+ if (argc)
+ usage_with_options(builtin_maintenance_start_usage, options);
+
+ opts.scheduler = resolve_scheduler(opts.scheduler);
+ validate_scheduler(opts.scheduler);
+
+ if (maintenance_register(ARRAY_SIZE(register_args)-1, register_args, NULL))
+ warning(_("failed to add repo to global config"));
+ return update_background_schedule(&opts, 1);
+}
+
+static const char *const builtin_maintenance_stop_usage[] = {
+ "git maintenance stop",
+ NULL
+};
+
+static int maintenance_stop(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_maintenance_stop_usage, 0);
+ if (argc)
+ usage_with_options(builtin_maintenance_stop_usage, options);
+ return update_background_schedule(NULL, 0);
+}
+
+static const char * const builtin_maintenance_usage[] = {
+ N_("git maintenance <subcommand> [<options>]"),
+ NULL,
+};
+
+int cmd_maintenance(int argc, const char **argv, const char *prefix)
+{
+ parse_opt_subcommand_fn *fn = NULL;
+ struct option builtin_maintenance_options[] = {
+ OPT_SUBCOMMAND("run", &fn, maintenance_run),
+ OPT_SUBCOMMAND("start", &fn, maintenance_start),
+ OPT_SUBCOMMAND("stop", &fn, maintenance_stop),
+ OPT_SUBCOMMAND("register", &fn, maintenance_register),
+ OPT_SUBCOMMAND("unregister", &fn, maintenance_unregister),
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, prefix, builtin_maintenance_options,
+ builtin_maintenance_usage, 0);
+ return fn(argc, argv, prefix);
+}
diff --git a/builtin/get-tar-commit-id.c b/builtin/get-tar-commit-id.c
new file mode 100644
index 0000000..491af92
--- /dev/null
+++ b/builtin/get-tar-commit-id.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2005, 2006 Rene Scharfe
+ */
+#include "cache.h"
+#include "commit.h"
+#include "tar.h"
+#include "builtin.h"
+#include "quote.h"
+
+static const char builtin_get_tar_commit_id_usage[] =
+"git get-tar-commit-id";
+
+/* ustar header + extended global header content */
+#define RECORDSIZE (512)
+#define HEADERSIZE (2 * RECORDSIZE)
+
+int cmd_get_tar_commit_id(int argc, const char **argv, const char *prefix)
+{
+ char buffer[HEADERSIZE];
+ struct ustar_header *header = (struct ustar_header *)buffer;
+ char *content = buffer + RECORDSIZE;
+ const char *comment;
+ ssize_t n;
+ long len;
+ char *end;
+
+ if (argc != 1)
+ usage(builtin_get_tar_commit_id_usage);
+
+ n = read_in_full(0, buffer, HEADERSIZE);
+ if (n < 0)
+ die_errno("git get-tar-commit-id: read error");
+ if (n != HEADERSIZE)
+ die_errno("git get-tar-commit-id: EOF before reading tar header");
+ if (header->typeflag[0] != 'g')
+ return 1;
+
+ len = strtol(content, &end, 10);
+ if (errno == ERANGE || end == content || len < 0)
+ return 1;
+ if (!skip_prefix(end, " comment=", &comment))
+ return 1;
+ len -= comment - content;
+ if (len < 1 || !(len % 2) ||
+ hash_algo_by_length((len - 1) / 2) == GIT_HASH_UNKNOWN)
+ return 1;
+
+ if (write_in_full(1, comment, len) < 0)
+ die_errno("git get-tar-commit-id: write error");
+
+ return 0;
+}
diff --git a/builtin/grep.c b/builtin/grep.c
new file mode 100644
index 0000000..f7821c5
--- /dev/null
+++ b/builtin/grep.c
@@ -0,0 +1,1252 @@
+/*
+ * Builtin "git grep"
+ *
+ * Copyright (c) 2006 Junio C Hamano
+ */
+#include "cache.h"
+#include "repository.h"
+#include "config.h"
+#include "blob.h"
+#include "tree.h"
+#include "commit.h"
+#include "tag.h"
+#include "tree-walk.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "string-list.h"
+#include "run-command.h"
+#include "userdiff.h"
+#include "grep.h"
+#include "quote.h"
+#include "dir.h"
+#include "pathspec.h"
+#include "submodule.h"
+#include "submodule-config.h"
+#include "object-store.h"
+#include "packfile.h"
+
+static const char *grep_prefix;
+
+static char const * const grep_usage[] = {
+ N_("git grep [<options>] [-e] <pattern> [<rev>...] [[--] <path>...]"),
+ NULL
+};
+
+static int recurse_submodules;
+
+static int num_threads;
+
+static pthread_t *threads;
+
+/* We use one producer thread and THREADS consumer
+ * threads. The producer adds struct work_items to 'todo' and the
+ * consumers pick work items from the same array.
+ */
+struct work_item {
+ struct grep_source source;
+ char done;
+ struct strbuf out;
+};
+
+/* In the range [todo_done, todo_start) in 'todo' we have work_items
+ * that have been or are processed by a consumer thread. We haven't
+ * written the result for these to stdout yet.
+ *
+ * The work_items in [todo_start, todo_end) are waiting to be picked
+ * up by a consumer thread.
+ *
+ * The ranges are modulo TODO_SIZE.
+ */
+#define TODO_SIZE 128
+static struct work_item todo[TODO_SIZE];
+static int todo_start;
+static int todo_end;
+static int todo_done;
+
+/* Has all work items been added? */
+static int all_work_added;
+
+static struct repository **repos_to_free;
+static size_t repos_to_free_nr, repos_to_free_alloc;
+
+/* This lock protects all the variables above. */
+static pthread_mutex_t grep_mutex;
+
+static inline void grep_lock(void)
+{
+ pthread_mutex_lock(&grep_mutex);
+}
+
+static inline void grep_unlock(void)
+{
+ pthread_mutex_unlock(&grep_mutex);
+}
+
+/* Signalled when a new work_item is added to todo. */
+static pthread_cond_t cond_add;
+
+/* Signalled when the result from one work_item is written to
+ * stdout.
+ */
+static pthread_cond_t cond_write;
+
+/* Signalled when we are finished with everything. */
+static pthread_cond_t cond_result;
+
+static int skip_first_line;
+
+static void add_work(struct grep_opt *opt, struct grep_source *gs)
+{
+ if (opt->binary != GREP_BINARY_TEXT)
+ grep_source_load_driver(gs, opt->repo->index);
+
+ grep_lock();
+
+ while ((todo_end+1) % ARRAY_SIZE(todo) == todo_done) {
+ pthread_cond_wait(&cond_write, &grep_mutex);
+ }
+
+ todo[todo_end].source = *gs;
+ todo[todo_end].done = 0;
+ strbuf_reset(&todo[todo_end].out);
+ todo_end = (todo_end + 1) % ARRAY_SIZE(todo);
+
+ pthread_cond_signal(&cond_add);
+ grep_unlock();
+}
+
+static struct work_item *get_work(void)
+{
+ struct work_item *ret;
+
+ grep_lock();
+ while (todo_start == todo_end && !all_work_added) {
+ pthread_cond_wait(&cond_add, &grep_mutex);
+ }
+
+ if (todo_start == todo_end && all_work_added) {
+ ret = NULL;
+ } else {
+ ret = &todo[todo_start];
+ todo_start = (todo_start + 1) % ARRAY_SIZE(todo);
+ }
+ grep_unlock();
+ return ret;
+}
+
+static void work_done(struct work_item *w)
+{
+ int old_done;
+
+ grep_lock();
+ w->done = 1;
+ old_done = todo_done;
+ for(; todo[todo_done].done && todo_done != todo_start;
+ todo_done = (todo_done+1) % ARRAY_SIZE(todo)) {
+ w = &todo[todo_done];
+ if (w->out.len) {
+ const char *p = w->out.buf;
+ size_t len = w->out.len;
+
+ /* Skip the leading hunk mark of the first file. */
+ if (skip_first_line) {
+ while (len) {
+ len--;
+ if (*p++ == '\n')
+ break;
+ }
+ skip_first_line = 0;
+ }
+
+ write_or_die(1, p, len);
+ }
+ grep_source_clear(&w->source);
+ }
+
+ if (old_done != todo_done)
+ pthread_cond_signal(&cond_write);
+
+ if (all_work_added && todo_done == todo_end)
+ pthread_cond_signal(&cond_result);
+
+ grep_unlock();
+}
+
+static void free_repos(void)
+{
+ int i;
+
+ for (i = 0; i < repos_to_free_nr; i++) {
+ repo_clear(repos_to_free[i]);
+ free(repos_to_free[i]);
+ }
+ FREE_AND_NULL(repos_to_free);
+ repos_to_free_nr = 0;
+ repos_to_free_alloc = 0;
+}
+
+static void *run(void *arg)
+{
+ int hit = 0;
+ struct grep_opt *opt = arg;
+
+ while (1) {
+ struct work_item *w = get_work();
+ if (!w)
+ break;
+
+ opt->output_priv = w;
+ hit |= grep_source(opt, &w->source);
+ grep_source_clear_data(&w->source);
+ work_done(w);
+ }
+ free_grep_patterns(opt);
+ free(opt);
+
+ return (void*) (intptr_t) hit;
+}
+
+static void strbuf_out(struct grep_opt *opt, const void *buf, size_t size)
+{
+ struct work_item *w = opt->output_priv;
+ strbuf_add(&w->out, buf, size);
+}
+
+static void start_threads(struct grep_opt *opt)
+{
+ int i;
+
+ pthread_mutex_init(&grep_mutex, NULL);
+ pthread_mutex_init(&grep_attr_mutex, NULL);
+ pthread_cond_init(&cond_add, NULL);
+ pthread_cond_init(&cond_write, NULL);
+ pthread_cond_init(&cond_result, NULL);
+ grep_use_locks = 1;
+ enable_obj_read_lock();
+
+ for (i = 0; i < ARRAY_SIZE(todo); i++) {
+ strbuf_init(&todo[i].out, 0);
+ }
+
+ CALLOC_ARRAY(threads, num_threads);
+ for (i = 0; i < num_threads; i++) {
+ int err;
+ struct grep_opt *o = grep_opt_dup(opt);
+ o->output = strbuf_out;
+ compile_grep_patterns(o);
+ err = pthread_create(&threads[i], NULL, run, o);
+
+ if (err)
+ die(_("grep: failed to create thread: %s"),
+ strerror(err));
+ }
+}
+
+static int wait_all(void)
+{
+ int hit = 0;
+ int i;
+
+ if (!HAVE_THREADS)
+ BUG("Never call this function unless you have started threads");
+
+ grep_lock();
+ all_work_added = 1;
+
+ /* Wait until all work is done. */
+ while (todo_done != todo_end)
+ pthread_cond_wait(&cond_result, &grep_mutex);
+
+ /* Wake up all the consumer threads so they can see that there
+ * is no more work to do.
+ */
+ pthread_cond_broadcast(&cond_add);
+ grep_unlock();
+
+ for (i = 0; i < num_threads; i++) {
+ void *h;
+ pthread_join(threads[i], &h);
+ hit |= (int) (intptr_t) h;
+ }
+
+ free(threads);
+
+ pthread_mutex_destroy(&grep_mutex);
+ pthread_mutex_destroy(&grep_attr_mutex);
+ pthread_cond_destroy(&cond_add);
+ pthread_cond_destroy(&cond_write);
+ pthread_cond_destroy(&cond_result);
+ grep_use_locks = 0;
+ disable_obj_read_lock();
+
+ return hit;
+}
+
+static int grep_cmd_config(const char *var, const char *value, void *cb)
+{
+ int st = grep_config(var, value, cb);
+ if (git_color_default_config(var, value, NULL) < 0)
+ st = -1;
+
+ if (!strcmp(var, "grep.threads")) {
+ num_threads = git_config_int(var, value);
+ if (num_threads < 0)
+ die(_("invalid number of threads specified (%d) for %s"),
+ num_threads, var);
+ else if (!HAVE_THREADS && num_threads > 1) {
+ /*
+ * TRANSLATORS: %s is the configuration
+ * variable for tweaking threads, currently
+ * grep.threads
+ */
+ warning(_("no threads support, ignoring %s"), var);
+ num_threads = 1;
+ }
+ }
+
+ if (!strcmp(var, "submodule.recurse"))
+ recurse_submodules = git_config_bool(var, value);
+
+ return st;
+}
+
+static void grep_source_name(struct grep_opt *opt, const char *filename,
+ int tree_name_len, struct strbuf *out)
+{
+ strbuf_reset(out);
+
+ if (opt->null_following_name) {
+ if (opt->relative && grep_prefix) {
+ struct strbuf rel_buf = STRBUF_INIT;
+ const char *rel_name =
+ relative_path(filename + tree_name_len,
+ grep_prefix, &rel_buf);
+
+ if (tree_name_len)
+ strbuf_add(out, filename, tree_name_len);
+
+ strbuf_addstr(out, rel_name);
+ strbuf_release(&rel_buf);
+ } else {
+ strbuf_addstr(out, filename);
+ }
+ return;
+ }
+
+ if (opt->relative && grep_prefix)
+ quote_path(filename + tree_name_len, grep_prefix, out, 0);
+ else
+ quote_c_style(filename + tree_name_len, out, NULL, 0);
+
+ if (tree_name_len)
+ strbuf_insert(out, 0, filename, tree_name_len);
+}
+
+static int grep_oid(struct grep_opt *opt, const struct object_id *oid,
+ const char *filename, int tree_name_len,
+ const char *path)
+{
+ struct strbuf pathbuf = STRBUF_INIT;
+ struct grep_source gs;
+
+ grep_source_name(opt, filename, tree_name_len, &pathbuf);
+ grep_source_init_oid(&gs, pathbuf.buf, path, oid, opt->repo);
+ strbuf_release(&pathbuf);
+
+ if (num_threads > 1) {
+ /*
+ * add_work() copies gs and thus assumes ownership of
+ * its fields, so do not call grep_source_clear()
+ */
+ add_work(opt, &gs);
+ return 0;
+ } else {
+ int hit;
+
+ hit = grep_source(opt, &gs);
+
+ grep_source_clear(&gs);
+ return hit;
+ }
+}
+
+static int grep_file(struct grep_opt *opt, const char *filename)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct grep_source gs;
+
+ grep_source_name(opt, filename, 0, &buf);
+ grep_source_init_file(&gs, buf.buf, filename);
+ strbuf_release(&buf);
+
+ if (num_threads > 1) {
+ /*
+ * add_work() copies gs and thus assumes ownership of
+ * its fields, so do not call grep_source_clear()
+ */
+ add_work(opt, &gs);
+ return 0;
+ } else {
+ int hit;
+
+ hit = grep_source(opt, &gs);
+
+ grep_source_clear(&gs);
+ return hit;
+ }
+}
+
+static void append_path(struct grep_opt *opt, const void *data, size_t len)
+{
+ struct string_list *path_list = opt->output_priv;
+
+ if (len == 1 && *(const char *)data == '\0')
+ return;
+ string_list_append_nodup(path_list, xstrndup(data, len));
+}
+
+static void run_pager(struct grep_opt *opt, const char *prefix)
+{
+ struct string_list *path_list = opt->output_priv;
+ struct child_process child = CHILD_PROCESS_INIT;
+ int i, status;
+
+ for (i = 0; i < path_list->nr; i++)
+ strvec_push(&child.args, path_list->items[i].string);
+ child.dir = prefix;
+ child.use_shell = 1;
+
+ status = run_command(&child);
+ if (status)
+ exit(status);
+}
+
+static int grep_cache(struct grep_opt *opt,
+ const struct pathspec *pathspec, int cached);
+static int grep_tree(struct grep_opt *opt, const struct pathspec *pathspec,
+ struct tree_desc *tree, struct strbuf *base, int tn_len,
+ int check_attr);
+
+static int grep_submodule(struct grep_opt *opt,
+ const struct pathspec *pathspec,
+ const struct object_id *oid,
+ const char *filename, const char *path, int cached)
+{
+ struct repository *subrepo;
+ struct repository *superproject = opt->repo;
+ struct grep_opt subopt;
+ int hit = 0;
+
+ if (!is_submodule_active(superproject, path))
+ return 0;
+
+ subrepo = xmalloc(sizeof(*subrepo));
+ if (repo_submodule_init(subrepo, superproject, path, null_oid())) {
+ free(subrepo);
+ return 0;
+ }
+ ALLOC_GROW(repos_to_free, repos_to_free_nr + 1, repos_to_free_alloc);
+ repos_to_free[repos_to_free_nr++] = subrepo;
+
+ /*
+ * NEEDSWORK: repo_read_gitmodules() might call
+ * add_to_alternates_memory() via config_from_gitmodules(). This
+ * operation causes a race condition with concurrent object readings
+ * performed by the worker threads. That's why we need obj_read_lock()
+ * here. It should be removed once it's no longer necessary to add the
+ * subrepo's odbs to the in-memory alternates list.
+ */
+ obj_read_lock();
+
+ /*
+ * NEEDSWORK: when reading a submodule, the sparsity settings in the
+ * superproject are incorrectly forgotten or misused. For example:
+ *
+ * 1. "command_requires_full_index"
+ * When this setting is turned on for `grep`, only the superproject
+ * knows it. All the submodules are read with their own configs
+ * and get prepare_repo_settings()'d. Therefore, these submodules
+ * "forget" the sparse-index feature switch. As a result, the index
+ * of these submodules are expanded unexpectedly.
+ *
+ * 2. "core_apply_sparse_checkout"
+ * When running `grep` in the superproject, this setting is
+ * populated using the superproject's configs. However, once
+ * initialized, this config is globally accessible and is read by
+ * prepare_repo_settings() for the submodules. For instance, if a
+ * submodule is using a sparse-checkout, however, the superproject
+ * is not, the result is that the config from the superproject will
+ * dictate the behavior for the submodule, making it "forget" its
+ * sparse-checkout state.
+ *
+ * 3. "core_sparse_checkout_cone"
+ * ditto.
+ *
+ * Note that this list is not exhaustive.
+ */
+ repo_read_gitmodules(subrepo, 0);
+
+ /*
+ * All code paths tested by test code no longer need submodule ODBs to
+ * be added as alternates, but add it to the list just in case.
+ * Submodule ODBs added through add_submodule_odb_by_path() will be
+ * lazily registered as alternates when needed (and except in an
+ * unexpected code interaction, it won't be needed).
+ */
+ add_submodule_odb_by_path(subrepo->objects->odb->path);
+ obj_read_unlock();
+
+ memcpy(&subopt, opt, sizeof(subopt));
+ subopt.repo = subrepo;
+
+ if (oid) {
+ enum object_type object_type;
+ struct tree_desc tree;
+ void *data;
+ unsigned long size;
+ struct strbuf base = STRBUF_INIT;
+
+ obj_read_lock();
+ object_type = oid_object_info(subrepo, oid, NULL);
+ obj_read_unlock();
+ data = read_object_with_reference(subrepo,
+ oid, OBJ_TREE,
+ &size, NULL);
+ if (!data)
+ die(_("unable to read tree (%s)"), oid_to_hex(oid));
+
+ strbuf_addstr(&base, filename);
+ strbuf_addch(&base, '/');
+
+ init_tree_desc(&tree, data, size);
+ hit = grep_tree(&subopt, pathspec, &tree, &base, base.len,
+ object_type == OBJ_COMMIT);
+ strbuf_release(&base);
+ free(data);
+ } else {
+ hit = grep_cache(&subopt, pathspec, cached);
+ }
+
+ return hit;
+}
+
+static int grep_cache(struct grep_opt *opt,
+ const struct pathspec *pathspec, int cached)
+{
+ struct repository *repo = opt->repo;
+ int hit = 0;
+ int nr;
+ struct strbuf name = STRBUF_INIT;
+ int name_base_len = 0;
+ if (repo->submodule_prefix) {
+ name_base_len = strlen(repo->submodule_prefix);
+ strbuf_addstr(&name, repo->submodule_prefix);
+ }
+
+ if (repo_read_index(repo) < 0)
+ die(_("index file corrupt"));
+
+ for (nr = 0; nr < repo->index->cache_nr; nr++) {
+ const struct cache_entry *ce = repo->index->cache[nr];
+
+ if (!cached && ce_skip_worktree(ce))
+ continue;
+
+ strbuf_setlen(&name, name_base_len);
+ strbuf_addstr(&name, ce->name);
+ if (S_ISSPARSEDIR(ce->ce_mode)) {
+ enum object_type type;
+ struct tree_desc tree;
+ void *data;
+ unsigned long size;
+
+ data = read_object_file(&ce->oid, &type, &size);
+ init_tree_desc(&tree, data, size);
+
+ hit |= grep_tree(opt, pathspec, &tree, &name, 0, 0);
+ strbuf_setlen(&name, name_base_len);
+ strbuf_addstr(&name, ce->name);
+ free(data);
+ } else if (S_ISREG(ce->ce_mode) &&
+ match_pathspec(repo->index, pathspec, name.buf, name.len, 0, NULL,
+ S_ISDIR(ce->ce_mode) ||
+ S_ISGITLINK(ce->ce_mode))) {
+ /*
+ * If CE_VALID is on, we assume worktree file and its
+ * cache entry are identical, even if worktree file has
+ * been modified, so use cache version instead
+ */
+ if (cached || (ce->ce_flags & CE_VALID)) {
+ if (ce_stage(ce) || ce_intent_to_add(ce))
+ continue;
+ hit |= grep_oid(opt, &ce->oid, name.buf,
+ 0, name.buf);
+ } else {
+ hit |= grep_file(opt, name.buf);
+ }
+ } else if (recurse_submodules && S_ISGITLINK(ce->ce_mode) &&
+ submodule_path_match(repo->index, pathspec, name.buf, NULL)) {
+ hit |= grep_submodule(opt, pathspec, NULL, ce->name,
+ ce->name, cached);
+ } else {
+ continue;
+ }
+
+ if (ce_stage(ce)) {
+ do {
+ nr++;
+ } while (nr < repo->index->cache_nr &&
+ !strcmp(ce->name, repo->index->cache[nr]->name));
+ nr--; /* compensate for loop control */
+ }
+ if (hit && opt->status_only)
+ break;
+ }
+
+ strbuf_release(&name);
+ return hit;
+}
+
+static int grep_tree(struct grep_opt *opt, const struct pathspec *pathspec,
+ struct tree_desc *tree, struct strbuf *base, int tn_len,
+ int check_attr)
+{
+ struct repository *repo = opt->repo;
+ int hit = 0;
+ enum interesting match = entry_not_interesting;
+ struct name_entry entry;
+ int old_baselen = base->len;
+ struct strbuf name = STRBUF_INIT;
+ int name_base_len = 0;
+ if (repo->submodule_prefix) {
+ strbuf_addstr(&name, repo->submodule_prefix);
+ name_base_len = name.len;
+ }
+
+ while (tree_entry(tree, &entry)) {
+ int te_len = tree_entry_len(&entry);
+
+ if (match != all_entries_interesting) {
+ strbuf_addstr(&name, base->buf + tn_len);
+ match = tree_entry_interesting(repo->index,
+ &entry, &name,
+ 0, pathspec);
+ strbuf_setlen(&name, name_base_len);
+
+ if (match == all_entries_not_interesting)
+ break;
+ if (match == entry_not_interesting)
+ continue;
+ }
+
+ strbuf_add(base, entry.path, te_len);
+
+ if (S_ISREG(entry.mode)) {
+ hit |= grep_oid(opt, &entry.oid, base->buf, tn_len,
+ check_attr ? base->buf + tn_len : NULL);
+ } else if (S_ISDIR(entry.mode)) {
+ enum object_type type;
+ struct tree_desc sub;
+ void *data;
+ unsigned long size;
+
+ data = read_object_file(&entry.oid, &type, &size);
+ if (!data)
+ die(_("unable to read tree (%s)"),
+ oid_to_hex(&entry.oid));
+
+ strbuf_addch(base, '/');
+ init_tree_desc(&sub, data, size);
+ hit |= grep_tree(opt, pathspec, &sub, base, tn_len,
+ check_attr);
+ free(data);
+ } else if (recurse_submodules && S_ISGITLINK(entry.mode)) {
+ hit |= grep_submodule(opt, pathspec, &entry.oid,
+ base->buf, base->buf + tn_len,
+ 1); /* ignored */
+ }
+
+ strbuf_setlen(base, old_baselen);
+
+ if (hit && opt->status_only)
+ break;
+ }
+
+ strbuf_release(&name);
+ return hit;
+}
+
+static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
+ struct object *obj, const char *name, const char *path)
+{
+ if (obj->type == OBJ_BLOB)
+ return grep_oid(opt, &obj->oid, name, 0, path);
+ if (obj->type == OBJ_COMMIT || obj->type == OBJ_TREE) {
+ struct tree_desc tree;
+ void *data;
+ unsigned long size;
+ struct strbuf base;
+ int hit, len;
+
+ data = read_object_with_reference(opt->repo,
+ &obj->oid, OBJ_TREE,
+ &size, NULL);
+ if (!data)
+ die(_("unable to read tree (%s)"), oid_to_hex(&obj->oid));
+
+ len = name ? strlen(name) : 0;
+ strbuf_init(&base, PATH_MAX + len + 1);
+ if (len) {
+ strbuf_add(&base, name, len);
+ strbuf_addch(&base, ':');
+ }
+ init_tree_desc(&tree, data, size);
+ hit = grep_tree(opt, pathspec, &tree, &base, base.len,
+ obj->type == OBJ_COMMIT);
+ strbuf_release(&base);
+ free(data);
+ return hit;
+ }
+ die(_("unable to grep from object of type %s"), type_name(obj->type));
+}
+
+static int grep_objects(struct grep_opt *opt, const struct pathspec *pathspec,
+ const struct object_array *list)
+{
+ unsigned int i;
+ int hit = 0;
+ const unsigned int nr = list->nr;
+
+ for (i = 0; i < nr; i++) {
+ struct object *real_obj;
+
+ obj_read_lock();
+ real_obj = deref_tag(opt->repo, list->objects[i].item,
+ NULL, 0);
+ obj_read_unlock();
+
+ if (!real_obj) {
+ char hex[GIT_MAX_HEXSZ + 1];
+ const char *name = list->objects[i].name;
+
+ if (!name) {
+ oid_to_hex_r(hex, &list->objects[i].item->oid);
+ name = hex;
+ }
+ die(_("invalid object '%s' given."), name);
+ }
+
+ /* load the gitmodules file for this rev */
+ if (recurse_submodules) {
+ submodule_free(opt->repo);
+ obj_read_lock();
+ gitmodules_config_oid(&real_obj->oid);
+ obj_read_unlock();
+ }
+ if (grep_object(opt, pathspec, real_obj, list->objects[i].name,
+ list->objects[i].path)) {
+ hit = 1;
+ if (opt->status_only)
+ break;
+ }
+ }
+ return hit;
+}
+
+static int grep_directory(struct grep_opt *opt, const struct pathspec *pathspec,
+ int exc_std, int use_index)
+{
+ struct dir_struct dir = DIR_INIT;
+ int i, hit = 0;
+
+ if (!use_index)
+ dir.flags |= DIR_NO_GITLINKS;
+ if (exc_std)
+ setup_standard_excludes(&dir);
+
+ fill_directory(&dir, opt->repo->index, pathspec);
+ for (i = 0; i < dir.nr; i++) {
+ hit |= grep_file(opt, dir.entries[i]->name);
+ if (hit && opt->status_only)
+ break;
+ }
+ dir_clear(&dir);
+ return hit;
+}
+
+static int context_callback(const struct option *opt, const char *arg,
+ int unset)
+{
+ struct grep_opt *grep_opt = opt->value;
+ int value;
+ const char *endp;
+
+ if (unset) {
+ grep_opt->pre_context = grep_opt->post_context = 0;
+ return 0;
+ }
+ value = strtol(arg, (char **)&endp, 10);
+ if (*endp) {
+ return error(_("switch `%c' expects a numerical value"),
+ opt->short_name);
+ }
+ grep_opt->pre_context = grep_opt->post_context = value;
+ return 0;
+}
+
+static int file_callback(const struct option *opt, const char *arg, int unset)
+{
+ struct grep_opt *grep_opt = opt->value;
+ int from_stdin;
+ FILE *patterns;
+ int lno = 0;
+ struct strbuf sb = STRBUF_INIT;
+
+ BUG_ON_OPT_NEG(unset);
+
+ from_stdin = !strcmp(arg, "-");
+ patterns = from_stdin ? stdin : fopen(arg, "r");
+ if (!patterns)
+ die_errno(_("cannot open '%s'"), arg);
+ while (strbuf_getline(&sb, patterns) == 0) {
+ /* ignore empty line like grep does */
+ if (sb.len == 0)
+ continue;
+
+ append_grep_pat(grep_opt, sb.buf, sb.len, arg, ++lno,
+ GREP_PATTERN);
+ }
+ if (!from_stdin)
+ fclose(patterns);
+ strbuf_release(&sb);
+ return 0;
+}
+
+static int not_callback(const struct option *opt, const char *arg, int unset)
+{
+ struct grep_opt *grep_opt = opt->value;
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ append_grep_pattern(grep_opt, "--not", "command line", 0, GREP_NOT);
+ return 0;
+}
+
+static int and_callback(const struct option *opt, const char *arg, int unset)
+{
+ struct grep_opt *grep_opt = opt->value;
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ append_grep_pattern(grep_opt, "--and", "command line", 0, GREP_AND);
+ return 0;
+}
+
+static int open_callback(const struct option *opt, const char *arg, int unset)
+{
+ struct grep_opt *grep_opt = opt->value;
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ append_grep_pattern(grep_opt, "(", "command line", 0, GREP_OPEN_PAREN);
+ return 0;
+}
+
+static int close_callback(const struct option *opt, const char *arg, int unset)
+{
+ struct grep_opt *grep_opt = opt->value;
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ append_grep_pattern(grep_opt, ")", "command line", 0, GREP_CLOSE_PAREN);
+ return 0;
+}
+
+static int pattern_callback(const struct option *opt, const char *arg,
+ int unset)
+{
+ struct grep_opt *grep_opt = opt->value;
+ BUG_ON_OPT_NEG(unset);
+ append_grep_pattern(grep_opt, arg, "-e option", 0, GREP_PATTERN);
+ return 0;
+}
+
+int cmd_grep(int argc, const char **argv, const char *prefix)
+{
+ int hit = 0;
+ int cached = 0, untracked = 0, opt_exclude = -1;
+ int seen_dashdash = 0;
+ int external_grep_allowed__ignored;
+ const char *show_in_pager = NULL, *default_pager = "dummy";
+ struct grep_opt opt;
+ struct object_array list = OBJECT_ARRAY_INIT;
+ struct pathspec pathspec;
+ struct string_list path_list = STRING_LIST_INIT_DUP;
+ int i;
+ int dummy;
+ int use_index = 1;
+ int allow_revs;
+
+ struct option options[] = {
+ OPT_BOOL(0, "cached", &cached,
+ N_("search in index instead of in the work tree")),
+ OPT_NEGBIT(0, "no-index", &use_index,
+ N_("find in contents not managed by git"), 1),
+ OPT_BOOL(0, "untracked", &untracked,
+ N_("search in both tracked and untracked files")),
+ OPT_SET_INT(0, "exclude-standard", &opt_exclude,
+ N_("ignore files specified via '.gitignore'"), 1),
+ OPT_BOOL(0, "recurse-submodules", &recurse_submodules,
+ N_("recursively search in each submodule")),
+ OPT_GROUP(""),
+ OPT_BOOL('v', "invert-match", &opt.invert,
+ N_("show non-matching lines")),
+ OPT_BOOL('i', "ignore-case", &opt.ignore_case,
+ N_("case insensitive matching")),
+ OPT_BOOL('w', "word-regexp", &opt.word_regexp,
+ N_("match patterns only at word boundaries")),
+ OPT_SET_INT('a', "text", &opt.binary,
+ N_("process binary files as text"), GREP_BINARY_TEXT),
+ OPT_SET_INT('I', NULL, &opt.binary,
+ N_("don't match patterns in binary files"),
+ GREP_BINARY_NOMATCH),
+ OPT_BOOL(0, "textconv", &opt.allow_textconv,
+ N_("process binary files with textconv filters")),
+ OPT_SET_INT('r', "recursive", &opt.max_depth,
+ N_("search in subdirectories (default)"), -1),
+ { OPTION_INTEGER, 0, "max-depth", &opt.max_depth, N_("depth"),
+ N_("descend at most <depth> levels"), PARSE_OPT_NONEG,
+ NULL, 1 },
+ OPT_GROUP(""),
+ OPT_SET_INT('E', "extended-regexp", &opt.pattern_type_option,
+ N_("use extended POSIX regular expressions"),
+ GREP_PATTERN_TYPE_ERE),
+ OPT_SET_INT('G', "basic-regexp", &opt.pattern_type_option,
+ N_("use basic POSIX regular expressions (default)"),
+ GREP_PATTERN_TYPE_BRE),
+ OPT_SET_INT('F', "fixed-strings", &opt.pattern_type_option,
+ N_("interpret patterns as fixed strings"),
+ GREP_PATTERN_TYPE_FIXED),
+ OPT_SET_INT('P', "perl-regexp", &opt.pattern_type_option,
+ N_("use Perl-compatible regular expressions"),
+ GREP_PATTERN_TYPE_PCRE),
+ OPT_GROUP(""),
+ OPT_BOOL('n', "line-number", &opt.linenum, N_("show line numbers")),
+ OPT_BOOL(0, "column", &opt.columnnum, N_("show column number of first match")),
+ OPT_NEGBIT('h', NULL, &opt.pathname, N_("don't show filenames"), 1),
+ OPT_BIT('H', NULL, &opt.pathname, N_("show filenames"), 1),
+ OPT_NEGBIT(0, "full-name", &opt.relative,
+ N_("show filenames relative to top directory"), 1),
+ OPT_BOOL('l', "files-with-matches", &opt.name_only,
+ N_("show only filenames instead of matching lines")),
+ OPT_BOOL(0, "name-only", &opt.name_only,
+ N_("synonym for --files-with-matches")),
+ OPT_BOOL('L', "files-without-match",
+ &opt.unmatch_name_only,
+ N_("show only the names of files without match")),
+ OPT_BOOL_F('z', "null", &opt.null_following_name,
+ N_("print NUL after filenames"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL('o', "only-matching", &opt.only_matching,
+ N_("show only matching parts of a line")),
+ OPT_BOOL('c', "count", &opt.count,
+ N_("show the number of matches instead of matching lines")),
+ OPT__COLOR(&opt.color, N_("highlight matches")),
+ OPT_BOOL(0, "break", &opt.file_break,
+ N_("print empty line between matches from different files")),
+ OPT_BOOL(0, "heading", &opt.heading,
+ N_("show filename only once above matches from same file")),
+ OPT_GROUP(""),
+ OPT_CALLBACK('C', "context", &opt, N_("n"),
+ N_("show <n> context lines before and after matches"),
+ context_callback),
+ OPT_INTEGER('B', "before-context", &opt.pre_context,
+ N_("show <n> context lines before matches")),
+ OPT_INTEGER('A', "after-context", &opt.post_context,
+ N_("show <n> context lines after matches")),
+ OPT_INTEGER(0, "threads", &num_threads,
+ N_("use <n> worker threads")),
+ OPT_NUMBER_CALLBACK(&opt, N_("shortcut for -C NUM"),
+ context_callback),
+ OPT_BOOL('p', "show-function", &opt.funcname,
+ N_("show a line with the function name before matches")),
+ OPT_BOOL('W', "function-context", &opt.funcbody,
+ N_("show the surrounding function")),
+ OPT_GROUP(""),
+ OPT_CALLBACK('f', NULL, &opt, N_("file"),
+ N_("read patterns from file"), file_callback),
+ OPT_CALLBACK_F('e', NULL, &opt, N_("pattern"),
+ N_("match <pattern>"), PARSE_OPT_NONEG, pattern_callback),
+ OPT_CALLBACK_F(0, "and", &opt, NULL,
+ N_("combine patterns specified with -e"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, and_callback),
+ OPT_BOOL(0, "or", &dummy, ""),
+ OPT_CALLBACK_F(0, "not", &opt, NULL, "",
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, not_callback),
+ OPT_CALLBACK_F('(', NULL, &opt, NULL, "",
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG | PARSE_OPT_NODASH,
+ open_callback),
+ OPT_CALLBACK_F(')', NULL, &opt, NULL, "",
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG | PARSE_OPT_NODASH,
+ close_callback),
+ OPT__QUIET(&opt.status_only,
+ N_("indicate hit with exit status without output")),
+ OPT_BOOL(0, "all-match", &opt.all_match,
+ N_("show only matches from files that match all patterns")),
+ OPT_GROUP(""),
+ { OPTION_STRING, 'O', "open-files-in-pager", &show_in_pager,
+ N_("pager"), N_("show matching files in the pager"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NOCOMPLETE,
+ NULL, (intptr_t)default_pager },
+ OPT_BOOL_F(0, "ext-grep", &external_grep_allowed__ignored,
+ N_("allow calling of grep(1) (ignored by this build)"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_INTEGER('m', "max-count", &opt.max_count,
+ N_("maximum number of results per file")),
+ OPT_END()
+ };
+ grep_prefix = prefix;
+
+ grep_init(&opt, the_repository);
+ git_config(grep_cmd_config, &opt);
+
+ /*
+ * If there is no -- then the paths must exist in the working
+ * tree. If there is no explicit pattern specified with -e or
+ * -f, we take the first unrecognized non option to be the
+ * pattern, but then what follows it must be zero or more
+ * valid refs up to the -- (if exists), and then existing
+ * paths. If there is an explicit pattern, then the first
+ * unrecognized non option is the beginning of the refs list
+ * that continues up to the -- (if exists), and then paths.
+ */
+ argc = parse_options(argc, argv, prefix, options, grep_usage,
+ PARSE_OPT_KEEP_DASHDASH |
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
+
+ if (use_index && !startup_info->have_repository) {
+ int fallback = 0;
+ git_config_get_bool("grep.fallbacktonoindex", &fallback);
+ if (fallback)
+ use_index = 0;
+ else
+ /* die the same way as if we did it at the beginning */
+ setup_git_directory();
+ }
+ /* Ignore --recurse-submodules if --no-index is given or implied */
+ if (!use_index)
+ recurse_submodules = 0;
+
+ /*
+ * skip a -- separator; we know it cannot be
+ * separating revisions from pathnames if
+ * we haven't even had any patterns yet
+ */
+ if (argc > 0 && !opt.pattern_list && !strcmp(argv[0], "--")) {
+ argv++;
+ argc--;
+ }
+
+ /* First unrecognized non-option token */
+ if (argc > 0 && !opt.pattern_list) {
+ append_grep_pattern(&opt, argv[0], "command line", 0,
+ GREP_PATTERN);
+ argv++;
+ argc--;
+ }
+
+ if (show_in_pager == default_pager)
+ show_in_pager = git_pager(1);
+ if (show_in_pager) {
+ opt.color = 0;
+ opt.name_only = 1;
+ opt.null_following_name = 1;
+ opt.output_priv = &path_list;
+ opt.output = append_path;
+ string_list_append(&path_list, show_in_pager);
+ }
+
+ if (!opt.pattern_list)
+ die(_("no pattern given"));
+
+ /* --only-matching has no effect with --invert. */
+ if (opt.invert)
+ opt.only_matching = 0;
+
+ /*
+ * We have to find "--" in a separate pass, because its presence
+ * influences how we will parse arguments that come before it.
+ */
+ for (i = 0; i < argc; i++) {
+ if (!strcmp(argv[i], "--")) {
+ seen_dashdash = 1;
+ break;
+ }
+ }
+
+ /*
+ * Resolve any rev arguments. If we have a dashdash, then everything up
+ * to it must resolve as a rev. If not, then we stop at the first
+ * non-rev and assume everything else is a path.
+ */
+ allow_revs = use_index && !untracked;
+ for (i = 0; i < argc; i++) {
+ const char *arg = argv[i];
+ struct object_id oid;
+ struct object_context oc;
+ struct object *object;
+
+ if (!strcmp(arg, "--")) {
+ i++;
+ break;
+ }
+
+ if (!allow_revs) {
+ if (seen_dashdash)
+ die(_("--no-index or --untracked cannot be used with revs"));
+ break;
+ }
+
+ if (get_oid_with_context(the_repository, arg,
+ GET_OID_RECORD_PATH,
+ &oid, &oc)) {
+ if (seen_dashdash)
+ die(_("unable to resolve revision: %s"), arg);
+ break;
+ }
+
+ object = parse_object_or_die(&oid, arg);
+ if (!seen_dashdash)
+ verify_non_filename(prefix, arg);
+ add_object_array_with_path(object, arg, &list, oc.mode, oc.path);
+ free(oc.path);
+ }
+
+ /*
+ * Anything left over is presumed to be a path. But in the non-dashdash
+ * "do what I mean" case, we verify and complain when that isn't true.
+ */
+ if (!seen_dashdash) {
+ int j;
+ for (j = i; j < argc; j++)
+ verify_filename(prefix, argv[j], j == i && allow_revs);
+ }
+
+ parse_pathspec(&pathspec, 0,
+ PATHSPEC_PREFER_CWD |
+ (opt.max_depth != -1 ? PATHSPEC_MAXDEPTH_VALID : 0),
+ prefix, argv + i);
+ pathspec.max_depth = opt.max_depth;
+ pathspec.recursive = 1;
+ pathspec.recurse_submodules = !!recurse_submodules;
+
+ if (recurse_submodules && untracked)
+ die(_("--untracked not supported with --recurse-submodules"));
+
+ /*
+ * Optimize out the case where the amount of matches is limited to zero.
+ * We do this to keep results consistent with GNU grep(1).
+ */
+ if (opt.max_count == 0)
+ return 1;
+
+ if (show_in_pager) {
+ if (num_threads > 1)
+ warning(_("invalid option combination, ignoring --threads"));
+ num_threads = 1;
+ } else if (!HAVE_THREADS && num_threads > 1) {
+ warning(_("no threads support, ignoring --threads"));
+ num_threads = 1;
+ } else if (num_threads < 0)
+ die(_("invalid number of threads specified (%d)"), num_threads);
+ else if (num_threads == 0)
+ num_threads = HAVE_THREADS ? online_cpus() : 1;
+
+ if (num_threads > 1) {
+ if (!HAVE_THREADS)
+ BUG("Somebody got num_threads calculation wrong!");
+ if (!(opt.name_only || opt.unmatch_name_only || opt.count)
+ && (opt.pre_context || opt.post_context ||
+ opt.file_break || opt.funcbody))
+ skip_first_line = 1;
+
+ /*
+ * Pre-read gitmodules (if not read already) and force eager
+ * initialization of packed_git to prevent racy lazy
+ * reading/initialization once worker threads are started.
+ */
+ if (recurse_submodules)
+ repo_read_gitmodules(the_repository, 1);
+ if (startup_info->have_repository)
+ (void)get_packed_git(the_repository);
+
+ start_threads(&opt);
+ } else {
+ /*
+ * The compiled patterns on the main path are only
+ * used when not using threading. Otherwise
+ * start_threads() above calls compile_grep_patterns()
+ * for each thread.
+ */
+ compile_grep_patterns(&opt);
+ }
+
+ if (show_in_pager && (cached || list.nr))
+ die(_("--open-files-in-pager only works on the worktree"));
+
+ if (show_in_pager && opt.pattern_list && !opt.pattern_list->next) {
+ const char *pager = path_list.items[0].string;
+ int len = strlen(pager);
+
+ if (len > 4 && is_dir_sep(pager[len - 5]))
+ pager += len - 4;
+
+ if (opt.ignore_case && !strcmp("less", pager))
+ string_list_append(&path_list, "-I");
+
+ if (!strcmp("less", pager) || !strcmp("vi", pager)) {
+ struct strbuf buf = STRBUF_INIT;
+ strbuf_addf(&buf, "+/%s%s",
+ strcmp("less", pager) ? "" : "*",
+ opt.pattern_list->pattern);
+ string_list_append_nodup(&path_list,
+ strbuf_detach(&buf, NULL));
+ }
+ }
+
+ if (!show_in_pager && !opt.status_only)
+ setup_pager();
+
+ die_for_incompatible_opt3(!use_index, "--no-index",
+ untracked, "--untracked",
+ cached, "--cached");
+
+ if (!use_index || untracked) {
+ int use_exclude = (opt_exclude < 0) ? use_index : !!opt_exclude;
+ hit = grep_directory(&opt, &pathspec, use_exclude, use_index);
+ } else if (0 <= opt_exclude) {
+ die(_("--[no-]exclude-standard cannot be used for tracked contents"));
+ } else if (!list.nr) {
+ if (!cached)
+ setup_work_tree();
+
+ hit = grep_cache(&opt, &pathspec, cached);
+ } else {
+ if (cached)
+ die(_("both --cached and trees are given"));
+
+ hit = grep_objects(&opt, &pathspec, &list);
+ }
+
+ if (num_threads > 1)
+ hit |= wait_all();
+ if (hit && show_in_pager)
+ run_pager(&opt, prefix);
+ clear_pathspec(&pathspec);
+ string_list_clear(&path_list, 0);
+ free_grep_patterns(&opt);
+ object_array_clear(&list);
+ free_repos();
+ return !hit;
+}
diff --git a/builtin/hash-object.c b/builtin/hash-object.c
new file mode 100644
index 0000000..b506381
--- /dev/null
+++ b/builtin/hash-object.c
@@ -0,0 +1,166 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ * Copyright (C) Junio C Hamano, 2005
+ */
+#include "builtin.h"
+#include "config.h"
+#include "object-store.h"
+#include "blob.h"
+#include "quote.h"
+#include "parse-options.h"
+#include "exec-cmd.h"
+
+/*
+ * This is to create corrupt objects for debugging and as such it
+ * needs to bypass the data conversion performed by, and the type
+ * limitation imposed by, index_fd() and its callees.
+ */
+static int hash_literally(struct object_id *oid, int fd, const char *type, unsigned flags)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int ret;
+
+ if (strbuf_read(&buf, fd, 4096) < 0)
+ ret = -1;
+ else
+ ret = write_object_file_literally(buf.buf, buf.len, type, oid,
+ flags);
+ strbuf_release(&buf);
+ return ret;
+}
+
+static void hash_fd(int fd, const char *type, const char *path, unsigned flags,
+ int literally)
+{
+ struct stat st;
+ struct object_id oid;
+
+ if (fstat(fd, &st) < 0 ||
+ (literally
+ ? hash_literally(&oid, fd, type, flags)
+ : index_fd(the_repository->index, &oid, fd, &st,
+ type_from_string(type), path, flags)))
+ die((flags & HASH_WRITE_OBJECT)
+ ? "Unable to add %s to database"
+ : "Unable to hash %s", path);
+ printf("%s\n", oid_to_hex(&oid));
+ maybe_flush_or_die(stdout, "hash to stdout");
+}
+
+static void hash_object(const char *path, const char *type, const char *vpath,
+ unsigned flags, int literally)
+{
+ int fd;
+ fd = xopen(path, O_RDONLY);
+ hash_fd(fd, type, vpath, flags, literally);
+}
+
+static void hash_stdin_paths(const char *type, int no_filters, unsigned flags,
+ int literally)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf unquoted = STRBUF_INIT;
+
+ while (strbuf_getline(&buf, stdin) != EOF) {
+ if (buf.buf[0] == '"') {
+ strbuf_reset(&unquoted);
+ if (unquote_c_style(&unquoted, buf.buf, NULL))
+ die("line is badly quoted");
+ strbuf_swap(&buf, &unquoted);
+ }
+ hash_object(buf.buf, type, no_filters ? NULL : buf.buf, flags,
+ literally);
+ }
+ strbuf_release(&buf);
+ strbuf_release(&unquoted);
+}
+
+int cmd_hash_object(int argc, const char **argv, const char *prefix)
+{
+ static const char * const hash_object_usage[] = {
+ N_("git hash-object [-t <type>] [-w] [--path=<file> | --no-filters]\n"
+ " [--stdin [--literally]] [--] <file>..."),
+ N_("git hash-object [-t <type>] [-w] --stdin-paths [--no-filters]"),
+ NULL
+ };
+ const char *type = blob_type;
+ int hashstdin = 0;
+ int stdin_paths = 0;
+ int no_filters = 0;
+ int literally = 0;
+ int nongit = 0;
+ unsigned flags = HASH_FORMAT_CHECK;
+ const char *vpath = NULL;
+ char *vpath_free = NULL;
+ const struct option hash_object_options[] = {
+ OPT_STRING('t', NULL, &type, N_("type"), N_("object type")),
+ OPT_BIT('w', NULL, &flags, N_("write the object into the object database"),
+ HASH_WRITE_OBJECT),
+ OPT_COUNTUP( 0 , "stdin", &hashstdin, N_("read the object from stdin")),
+ OPT_BOOL( 0 , "stdin-paths", &stdin_paths, N_("read file names from stdin")),
+ OPT_BOOL( 0 , "no-filters", &no_filters, N_("store file as is without filters")),
+ OPT_BOOL( 0, "literally", &literally, N_("just hash any random garbage to create corrupt objects for debugging Git")),
+ OPT_STRING( 0 , "path", &vpath, N_("file"), N_("process file as it were from this path")),
+ OPT_END()
+ };
+ int i;
+ const char *errstr = NULL;
+
+ argc = parse_options(argc, argv, prefix, hash_object_options,
+ hash_object_usage, 0);
+
+ if (flags & HASH_WRITE_OBJECT)
+ prefix = setup_git_directory();
+ else
+ prefix = setup_git_directory_gently(&nongit);
+
+ if (vpath && prefix) {
+ vpath_free = prefix_filename(prefix, vpath);
+ vpath = vpath_free;
+ }
+
+ git_config(git_default_config, NULL);
+
+ if (stdin_paths) {
+ if (hashstdin)
+ errstr = "Can't use --stdin-paths with --stdin";
+ else if (argc)
+ errstr = "Can't specify files with --stdin-paths";
+ else if (vpath)
+ errstr = "Can't use --stdin-paths with --path";
+ }
+ else {
+ if (hashstdin > 1)
+ errstr = "Multiple --stdin arguments are not supported";
+ if (vpath && no_filters)
+ errstr = "Can't use --path with --no-filters";
+ }
+
+ if (errstr) {
+ error("%s", errstr);
+ usage_with_options(hash_object_usage, hash_object_options);
+ }
+
+ if (hashstdin)
+ hash_fd(0, type, vpath, flags, literally);
+
+ for (i = 0 ; i < argc; i++) {
+ const char *arg = argv[i];
+ char *to_free = NULL;
+
+ if (prefix)
+ arg = to_free = prefix_filename(prefix, arg);
+ hash_object(arg, type, no_filters ? NULL : vpath ? vpath : arg,
+ flags, literally);
+ free(to_free);
+ }
+
+ if (stdin_paths)
+ hash_stdin_paths(type, no_filters, flags, literally);
+
+ free(vpath_free);
+
+ return 0;
+}
diff --git a/builtin/help.c b/builtin/help.c
new file mode 100644
index 0000000..53f2812
--- /dev/null
+++ b/builtin/help.c
@@ -0,0 +1,722 @@
+/*
+ * Builtin help command
+ */
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "exec-cmd.h"
+#include "parse-options.h"
+#include "run-command.h"
+#include "config-list.h"
+#include "help.h"
+#include "alias.h"
+
+#ifndef DEFAULT_HELP_FORMAT
+#define DEFAULT_HELP_FORMAT "man"
+#endif
+
+static struct man_viewer_list {
+ struct man_viewer_list *next;
+ char name[FLEX_ARRAY];
+} *man_viewer_list;
+
+static struct man_viewer_info_list {
+ struct man_viewer_info_list *next;
+ const char *info;
+ char name[FLEX_ARRAY];
+} *man_viewer_info_list;
+
+enum help_format {
+ HELP_FORMAT_NONE,
+ HELP_FORMAT_MAN,
+ HELP_FORMAT_INFO,
+ HELP_FORMAT_WEB
+};
+
+enum show_config_type {
+ SHOW_CONFIG_HUMAN,
+ SHOW_CONFIG_VARS,
+ SHOW_CONFIG_SECTIONS,
+};
+
+static enum help_action {
+ HELP_ACTION_ALL = 1,
+ HELP_ACTION_GUIDES,
+ HELP_ACTION_CONFIG,
+ HELP_ACTION_USER_INTERFACES,
+ HELP_ACTION_DEVELOPER_INTERFACES,
+ HELP_ACTION_CONFIG_FOR_COMPLETION,
+ HELP_ACTION_CONFIG_SECTIONS_FOR_COMPLETION,
+} cmd_mode;
+
+static const char *html_path;
+static int verbose = 1;
+static enum help_format help_format = HELP_FORMAT_NONE;
+static int exclude_guides;
+static int show_external_commands = -1;
+static int show_aliases = -1;
+static struct option builtin_help_options[] = {
+ OPT_CMDMODE('a', "all", &cmd_mode, N_("print all available commands"),
+ HELP_ACTION_ALL),
+ OPT_BOOL(0, "external-commands", &show_external_commands,
+ N_("show external commands in --all")),
+ OPT_BOOL(0, "aliases", &show_aliases, N_("show aliases in --all")),
+ OPT_HIDDEN_BOOL(0, "exclude-guides", &exclude_guides, N_("exclude guides")),
+ OPT_SET_INT('m', "man", &help_format, N_("show man page"), HELP_FORMAT_MAN),
+ OPT_SET_INT('w', "web", &help_format, N_("show manual in web browser"),
+ HELP_FORMAT_WEB),
+ OPT_SET_INT('i', "info", &help_format, N_("show info page"),
+ HELP_FORMAT_INFO),
+ OPT__VERBOSE(&verbose, N_("print command description")),
+
+ OPT_CMDMODE('g', "guides", &cmd_mode, N_("print list of useful guides"),
+ HELP_ACTION_GUIDES),
+ OPT_CMDMODE(0, "user-interfaces", &cmd_mode,
+ N_("print list of user-facing repository, command and file interfaces"),
+ HELP_ACTION_USER_INTERFACES),
+ OPT_CMDMODE(0, "developer-interfaces", &cmd_mode,
+ N_("print list of file formats, protocols and other developer interfaces"),
+ HELP_ACTION_DEVELOPER_INTERFACES),
+ OPT_CMDMODE('c', "config", &cmd_mode, N_("print all configuration variable names"),
+ HELP_ACTION_CONFIG),
+ OPT_CMDMODE_F(0, "config-for-completion", &cmd_mode, "",
+ HELP_ACTION_CONFIG_FOR_COMPLETION, PARSE_OPT_HIDDEN),
+ OPT_CMDMODE_F(0, "config-sections-for-completion", &cmd_mode, "",
+ HELP_ACTION_CONFIG_SECTIONS_FOR_COMPLETION, PARSE_OPT_HIDDEN),
+
+ OPT_END(),
+};
+
+static const char * const builtin_help_usage[] = {
+ "git help [-a|--all] [--[no-]verbose] [--[no-]external-commands] [--[no-]aliases]",
+ N_("git help [[-i|--info] [-m|--man] [-w|--web]] [<command>|<doc>]"),
+ "git help [-g|--guides]",
+ "git help [-c|--config]",
+ "git help [--user-interfaces]",
+ "git help [--developer-interfaces]",
+ NULL
+};
+
+struct slot_expansion {
+ const char *prefix;
+ const char *placeholder;
+ void (*fn)(struct string_list *list, const char *prefix);
+ int found;
+};
+
+static void list_config_help(enum show_config_type type)
+{
+ struct slot_expansion slot_expansions[] = {
+ { "advice", "*", list_config_advices },
+ { "color.branch", "<slot>", list_config_color_branch_slots },
+ { "color.decorate", "<slot>", list_config_color_decorate_slots },
+ { "color.diff", "<slot>", list_config_color_diff_slots },
+ { "color.grep", "<slot>", list_config_color_grep_slots },
+ { "color.interactive", "<slot>", list_config_color_interactive_slots },
+ { "color.remote", "<slot>", list_config_color_sideband_slots },
+ { "color.status", "<slot>", list_config_color_status_slots },
+ { "fsck", "<msg-id>", list_config_fsck_msg_ids },
+ { "receive.fsck", "<msg-id>", list_config_fsck_msg_ids },
+ { NULL, NULL, NULL }
+ };
+ const char **p;
+ struct slot_expansion *e;
+ struct string_list keys = STRING_LIST_INIT_DUP;
+ struct string_list keys_uniq = STRING_LIST_INIT_DUP;
+ struct string_list_item *item;
+ int i;
+
+ for (p = config_name_list; *p; p++) {
+ const char *var = *p;
+ struct strbuf sb = STRBUF_INIT;
+
+ for (e = slot_expansions; e->prefix; e++) {
+
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "%s.%s", e->prefix, e->placeholder);
+ if (!strcasecmp(var, sb.buf)) {
+ e->fn(&keys, e->prefix);
+ e->found++;
+ break;
+ }
+ }
+ strbuf_release(&sb);
+ if (!e->prefix)
+ string_list_append(&keys, var);
+ }
+
+ for (e = slot_expansions; e->prefix; e++)
+ if (!e->found)
+ BUG("slot_expansion %s.%s is not used",
+ e->prefix, e->placeholder);
+
+ string_list_sort(&keys);
+ for (i = 0; i < keys.nr; i++) {
+ const char *var = keys.items[i].string;
+ const char *wildcard, *tag, *cut;
+ const char *dot = NULL;
+ struct strbuf sb = STRBUF_INIT;
+
+ switch (type) {
+ case SHOW_CONFIG_HUMAN:
+ puts(var);
+ continue;
+ case SHOW_CONFIG_SECTIONS:
+ dot = strchr(var, '.');
+ break;
+ case SHOW_CONFIG_VARS:
+ break;
+ }
+ wildcard = strchr(var, '*');
+ tag = strchr(var, '<');
+
+ if (!dot && !wildcard && !tag) {
+ string_list_append(&keys_uniq, var);
+ continue;
+ }
+
+ if (dot)
+ cut = dot;
+ else if (wildcard && !tag)
+ cut = wildcard;
+ else if (!wildcard && tag)
+ cut = tag;
+ else
+ cut = wildcard < tag ? wildcard : tag;
+
+ strbuf_add(&sb, var, cut - var);
+ string_list_append(&keys_uniq, sb.buf);
+ strbuf_release(&sb);
+
+ }
+ string_list_clear(&keys, 0);
+ string_list_remove_duplicates(&keys_uniq, 0);
+ for_each_string_list_item(item, &keys_uniq)
+ puts(item->string);
+ string_list_clear(&keys_uniq, 0);
+}
+
+static enum help_format parse_help_format(const char *format)
+{
+ if (!strcmp(format, "man"))
+ return HELP_FORMAT_MAN;
+ if (!strcmp(format, "info"))
+ return HELP_FORMAT_INFO;
+ if (!strcmp(format, "web") || !strcmp(format, "html"))
+ return HELP_FORMAT_WEB;
+ /*
+ * Please update _git_config() in git-completion.bash when you
+ * add new help formats.
+ */
+ die(_("unrecognized help format '%s'"), format);
+}
+
+static const char *get_man_viewer_info(const char *name)
+{
+ struct man_viewer_info_list *viewer;
+
+ for (viewer = man_viewer_info_list; viewer; viewer = viewer->next)
+ {
+ if (!strcasecmp(name, viewer->name))
+ return viewer->info;
+ }
+ return NULL;
+}
+
+static int check_emacsclient_version(void)
+{
+ struct strbuf buffer = STRBUF_INIT;
+ struct child_process ec_process = CHILD_PROCESS_INIT;
+ int version;
+
+ /* emacsclient prints its version number on stderr */
+ strvec_pushl(&ec_process.args, "emacsclient", "--version", NULL);
+ ec_process.err = -1;
+ ec_process.stdout_to_stderr = 1;
+ if (start_command(&ec_process))
+ return error(_("Failed to start emacsclient."));
+
+ strbuf_read(&buffer, ec_process.err, 20);
+ close(ec_process.err);
+
+ /*
+ * Don't bother checking return value, because "emacsclient --version"
+ * seems to always exits with code 1.
+ */
+ finish_command(&ec_process);
+
+ if (!starts_with(buffer.buf, "emacsclient")) {
+ strbuf_release(&buffer);
+ return error(_("Failed to parse emacsclient version."));
+ }
+
+ strbuf_remove(&buffer, 0, strlen("emacsclient"));
+ version = atoi(buffer.buf);
+
+ if (version < 22) {
+ strbuf_release(&buffer);
+ return error(_("emacsclient version '%d' too old (< 22)."),
+ version);
+ }
+
+ strbuf_release(&buffer);
+ return 0;
+}
+
+static void exec_woman_emacs(const char *path, const char *page)
+{
+ if (!check_emacsclient_version()) {
+ /* This works only with emacsclient version >= 22. */
+ struct strbuf man_page = STRBUF_INIT;
+
+ if (!path)
+ path = "emacsclient";
+ strbuf_addf(&man_page, "(woman \"%s\")", page);
+ execlp(path, "emacsclient", "-e", man_page.buf, (char *)NULL);
+ warning_errno(_("failed to exec '%s'"), path);
+ strbuf_release(&man_page);
+ }
+}
+
+static void exec_man_konqueror(const char *path, const char *page)
+{
+ const char *display = getenv("DISPLAY");
+ if (display && *display) {
+ struct strbuf man_page = STRBUF_INIT;
+ const char *filename = "kfmclient";
+
+ /* It's simpler to launch konqueror using kfmclient. */
+ if (path) {
+ size_t len;
+ if (strip_suffix(path, "/konqueror", &len))
+ path = xstrfmt("%.*s/kfmclient", (int)len, path);
+ filename = basename((char *)path);
+ } else
+ path = "kfmclient";
+ strbuf_addf(&man_page, "man:%s(1)", page);
+ execlp(path, filename, "newTab", man_page.buf, (char *)NULL);
+ warning_errno(_("failed to exec '%s'"), path);
+ strbuf_release(&man_page);
+ }
+}
+
+static void exec_man_man(const char *path, const char *page)
+{
+ if (!path)
+ path = "man";
+ execlp(path, "man", page, (char *)NULL);
+ warning_errno(_("failed to exec '%s'"), path);
+}
+
+static void exec_man_cmd(const char *cmd, const char *page)
+{
+ struct strbuf shell_cmd = STRBUF_INIT;
+ strbuf_addf(&shell_cmd, "%s %s", cmd, page);
+ execl(SHELL_PATH, SHELL_PATH, "-c", shell_cmd.buf, (char *)NULL);
+ warning(_("failed to exec '%s'"), cmd);
+ strbuf_release(&shell_cmd);
+}
+
+static void add_man_viewer(const char *name)
+{
+ struct man_viewer_list **p = &man_viewer_list;
+
+ while (*p)
+ p = &((*p)->next);
+ FLEX_ALLOC_STR(*p, name, name);
+}
+
+static int supported_man_viewer(const char *name, size_t len)
+{
+ return (!strncasecmp("man", name, len) ||
+ !strncasecmp("woman", name, len) ||
+ !strncasecmp("konqueror", name, len));
+}
+
+static void do_add_man_viewer_info(const char *name,
+ size_t len,
+ const char *value)
+{
+ struct man_viewer_info_list *new_man_viewer;
+ FLEX_ALLOC_MEM(new_man_viewer, name, name, len);
+ new_man_viewer->info = xstrdup(value);
+ new_man_viewer->next = man_viewer_info_list;
+ man_viewer_info_list = new_man_viewer;
+}
+
+static int add_man_viewer_path(const char *name,
+ size_t len,
+ const char *value)
+{
+ if (supported_man_viewer(name, len))
+ do_add_man_viewer_info(name, len, value);
+ else
+ warning(_("'%s': path for unsupported man viewer.\n"
+ "Please consider using 'man.<tool>.cmd' instead."),
+ name);
+
+ return 0;
+}
+
+static int add_man_viewer_cmd(const char *name,
+ size_t len,
+ const char *value)
+{
+ if (supported_man_viewer(name, len))
+ warning(_("'%s': cmd for supported man viewer.\n"
+ "Please consider using 'man.<tool>.path' instead."),
+ name);
+ else
+ do_add_man_viewer_info(name, len, value);
+
+ return 0;
+}
+
+static int add_man_viewer_info(const char *var, const char *value)
+{
+ const char *name, *subkey;
+ size_t namelen;
+
+ if (parse_config_key(var, "man", &name, &namelen, &subkey) < 0 || !name)
+ return 0;
+
+ if (!strcmp(subkey, "path")) {
+ if (!value)
+ return config_error_nonbool(var);
+ return add_man_viewer_path(name, namelen, value);
+ }
+ if (!strcmp(subkey, "cmd")) {
+ if (!value)
+ return config_error_nonbool(var);
+ return add_man_viewer_cmd(name, namelen, value);
+ }
+
+ return 0;
+}
+
+static int git_help_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "help.format")) {
+ if (!value)
+ return config_error_nonbool(var);
+ help_format = parse_help_format(value);
+ return 0;
+ }
+ if (!strcmp(var, "help.htmlpath")) {
+ if (!value)
+ return config_error_nonbool(var);
+ html_path = xstrdup(value);
+ return 0;
+ }
+ if (!strcmp(var, "man.viewer")) {
+ if (!value)
+ return config_error_nonbool(var);
+ add_man_viewer(value);
+ return 0;
+ }
+ if (starts_with(var, "man."))
+ return add_man_viewer_info(var, value);
+
+ return git_default_config(var, value, cb);
+}
+
+static struct cmdnames main_cmds, other_cmds;
+
+static int is_git_command(const char *s)
+{
+ if (is_builtin(s))
+ return 1;
+
+ load_command_list("git-", &main_cmds, &other_cmds);
+ return is_in_cmdlist(&main_cmds, s) ||
+ is_in_cmdlist(&other_cmds, s);
+}
+
+static const char *cmd_to_page(const char *git_cmd)
+{
+ if (!git_cmd)
+ return "git";
+ else if (starts_with(git_cmd, "git"))
+ return git_cmd;
+ else if (is_git_command(git_cmd))
+ return xstrfmt("git-%s", git_cmd);
+ else if (!strcmp("scalar", git_cmd))
+ return xstrdup(git_cmd);
+ else
+ return xstrfmt("git%s", git_cmd);
+}
+
+static void setup_man_path(void)
+{
+ struct strbuf new_path = STRBUF_INIT;
+ const char *old_path = getenv("MANPATH");
+ char *git_man_path = system_path(GIT_MAN_PATH);
+
+ /* We should always put ':' after our path. If there is no
+ * old_path, the ':' at the end will let 'man' to try
+ * system-wide paths after ours to find the manual page. If
+ * there is old_path, we need ':' as delimiter. */
+ strbuf_addstr(&new_path, git_man_path);
+ strbuf_addch(&new_path, ':');
+ if (old_path)
+ strbuf_addstr(&new_path, old_path);
+
+ free(git_man_path);
+ setenv("MANPATH", new_path.buf, 1);
+
+ strbuf_release(&new_path);
+}
+
+static void exec_viewer(const char *name, const char *page)
+{
+ const char *info = get_man_viewer_info(name);
+
+ if (!strcasecmp(name, "man"))
+ exec_man_man(info, page);
+ else if (!strcasecmp(name, "woman"))
+ exec_woman_emacs(info, page);
+ else if (!strcasecmp(name, "konqueror"))
+ exec_man_konqueror(info, page);
+ else if (info)
+ exec_man_cmd(info, page);
+ else
+ warning(_("'%s': unknown man viewer."), name);
+}
+
+static void show_man_page(const char *page)
+{
+ struct man_viewer_list *viewer;
+ const char *fallback = getenv("GIT_MAN_VIEWER");
+
+ setup_man_path();
+ for (viewer = man_viewer_list; viewer; viewer = viewer->next)
+ {
+ exec_viewer(viewer->name, page); /* will return when unable */
+ }
+ if (fallback)
+ exec_viewer(fallback, page);
+ exec_viewer("man", page);
+ die(_("no man viewer handled the request"));
+}
+
+static void show_info_page(const char *page)
+{
+ setenv("INFOPATH", system_path(GIT_INFO_PATH), 1);
+ execlp("info", "info", "gitman", page, (char *)NULL);
+ die(_("no info viewer handled the request"));
+}
+
+static void get_html_page_path(struct strbuf *page_path, const char *page)
+{
+ struct stat st;
+ char *to_free = NULL;
+
+ if (!html_path)
+ html_path = to_free = system_path(GIT_HTML_PATH);
+
+ /*
+ * Check that the page we're looking for exists.
+ */
+ if (!strstr(html_path, "://")) {
+ if (stat(mkpath("%s/%s.html", html_path, page), &st)
+ || !S_ISREG(st.st_mode))
+ die("'%s/%s.html': documentation file not found.",
+ html_path, page);
+ }
+
+ strbuf_init(page_path, 0);
+ strbuf_addf(page_path, "%s/%s.html", html_path, page);
+ free(to_free);
+}
+
+static void open_html(const char *path)
+{
+ execl_git_cmd("web--browse", "-c", "help.browser", path, (char *)NULL);
+}
+
+static void show_html_page(const char *page)
+{
+ struct strbuf page_path; /* it leaks but we exec bellow */
+
+ get_html_page_path(&page_path, page);
+
+ open_html(page_path.buf);
+}
+
+static const char *check_git_cmd(const char* cmd)
+{
+ char *alias;
+
+ if (is_git_command(cmd))
+ return cmd;
+
+ alias = alias_lookup(cmd);
+ if (alias) {
+ const char **argv;
+ int count;
+
+ /*
+ * handle_builtin() in git.c rewrites "git cmd --help"
+ * to "git help --exclude-guides cmd", so we can use
+ * exclude_guides to distinguish "git cmd --help" from
+ * "git help cmd". In the latter case, or if cmd is an
+ * alias for a shell command, just print the alias
+ * definition.
+ */
+ if (!exclude_guides || alias[0] == '!') {
+ printf_ln(_("'%s' is aliased to '%s'"), cmd, alias);
+ free(alias);
+ exit(0);
+ }
+ /*
+ * Otherwise, we pretend that the command was "git
+ * word0 --help". We use split_cmdline() to get the
+ * first word of the alias, to ensure that we use the
+ * same rules as when the alias is actually
+ * used. split_cmdline() modifies alias in-place.
+ */
+ fprintf_ln(stderr, _("'%s' is aliased to '%s'"), cmd, alias);
+ count = split_cmdline(alias, &argv);
+ if (count < 0)
+ die(_("bad alias.%s string: %s"), cmd,
+ split_cmdline_strerror(count));
+ free(argv);
+ UNLEAK(alias);
+ return alias;
+ }
+
+ if (exclude_guides)
+ return help_unknown_cmd(cmd);
+
+ return cmd;
+}
+
+static void no_help_format(const char *opt_mode, enum help_format fmt)
+{
+ const char *opt_fmt;
+
+ switch (fmt) {
+ case HELP_FORMAT_NONE:
+ return;
+ case HELP_FORMAT_MAN:
+ opt_fmt = "--man";
+ break;
+ case HELP_FORMAT_INFO:
+ opt_fmt = "--info";
+ break;
+ case HELP_FORMAT_WEB:
+ opt_fmt = "--web";
+ break;
+ default:
+ BUG("unreachable");
+ }
+
+ usage_msg_optf(_("options '%s' and '%s' cannot be used together"),
+ builtin_help_usage, builtin_help_options, opt_mode,
+ opt_fmt);
+}
+
+static void opt_mode_usage(int argc, const char *opt_mode,
+ enum help_format fmt)
+{
+ if (argc)
+ usage_msg_optf(_("the '%s' option doesn't take any non-option arguments"),
+ builtin_help_usage, builtin_help_options,
+ opt_mode);
+
+ no_help_format(opt_mode, fmt);
+}
+
+int cmd_help(int argc, const char **argv, const char *prefix)
+{
+ int nongit;
+ enum help_format parsed_help_format;
+ const char *page;
+
+ argc = parse_options(argc, argv, prefix, builtin_help_options,
+ builtin_help_usage, 0);
+ parsed_help_format = help_format;
+
+ if (cmd_mode != HELP_ACTION_ALL &&
+ (show_external_commands >= 0 ||
+ show_aliases >= 0))
+ usage_msg_opt(_("the '--no-[external-commands|aliases]' options can only be used with '--all'"),
+ builtin_help_usage, builtin_help_options);
+
+ switch (cmd_mode) {
+ case HELP_ACTION_ALL:
+ opt_mode_usage(argc, "--all", help_format);
+ if (verbose) {
+ setup_pager();
+ list_all_cmds_help(show_external_commands,
+ show_aliases);
+ return 0;
+ }
+ printf(_("usage: %s%s"), _(git_usage_string), "\n\n");
+ load_command_list("git-", &main_cmds, &other_cmds);
+ list_commands(&main_cmds, &other_cmds);
+ printf("%s\n", _(git_more_info_string));
+ break;
+ case HELP_ACTION_GUIDES:
+ opt_mode_usage(argc, "--guides", help_format);
+ list_guides_help();
+ printf("%s\n", _(git_more_info_string));
+ return 0;
+ case HELP_ACTION_CONFIG_FOR_COMPLETION:
+ opt_mode_usage(argc, "--config-for-completion", help_format);
+ list_config_help(SHOW_CONFIG_VARS);
+ return 0;
+ case HELP_ACTION_USER_INTERFACES:
+ opt_mode_usage(argc, "--user-interfaces", help_format);
+ list_user_interfaces_help();
+ return 0;
+ case HELP_ACTION_DEVELOPER_INTERFACES:
+ opt_mode_usage(argc, "--developer-interfaces", help_format);
+ list_developer_interfaces_help();
+ return 0;
+ case HELP_ACTION_CONFIG_SECTIONS_FOR_COMPLETION:
+ opt_mode_usage(argc, "--config-sections-for-completion",
+ help_format);
+ list_config_help(SHOW_CONFIG_SECTIONS);
+ return 0;
+ case HELP_ACTION_CONFIG:
+ opt_mode_usage(argc, "--config", help_format);
+ setup_pager();
+ list_config_help(SHOW_CONFIG_HUMAN);
+ printf("\n%s\n", _("'git help config' for more information"));
+ return 0;
+ }
+
+ if (!argv[0]) {
+ printf(_("usage: %s%s"), _(git_usage_string), "\n\n");
+ list_common_cmds_help();
+ printf("\n%s\n", _(git_more_info_string));
+ return 0;
+ }
+
+ setup_git_directory_gently(&nongit);
+ git_config(git_help_config, NULL);
+
+ if (parsed_help_format != HELP_FORMAT_NONE)
+ help_format = parsed_help_format;
+ if (help_format == HELP_FORMAT_NONE)
+ help_format = parse_help_format(DEFAULT_HELP_FORMAT);
+
+ argv[0] = check_git_cmd(argv[0]);
+
+ page = cmd_to_page(argv[0]);
+ switch (help_format) {
+ case HELP_FORMAT_NONE:
+ case HELP_FORMAT_MAN:
+ show_man_page(page);
+ break;
+ case HELP_FORMAT_INFO:
+ show_info_page(page);
+ break;
+ case HELP_FORMAT_WEB:
+ show_html_page(page);
+ break;
+ }
+
+ return 0;
+}
diff --git a/builtin/hook.c b/builtin/hook.c
new file mode 100644
index 0000000..b6530d1
--- /dev/null
+++ b/builtin/hook.c
@@ -0,0 +1,80 @@
+#include "cache.h"
+#include "builtin.h"
+#include "config.h"
+#include "hook.h"
+#include "parse-options.h"
+#include "strbuf.h"
+#include "strvec.h"
+
+#define BUILTIN_HOOK_RUN_USAGE \
+ N_("git hook run [--ignore-missing] <hook-name> [-- <hook-args>]")
+
+static const char * const builtin_hook_usage[] = {
+ BUILTIN_HOOK_RUN_USAGE,
+ NULL
+};
+
+static const char * const builtin_hook_run_usage[] = {
+ BUILTIN_HOOK_RUN_USAGE,
+ NULL
+};
+
+static int run(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+ int ignore_missing = 0;
+ const char *hook_name;
+ struct option run_options[] = {
+ OPT_BOOL(0, "ignore-missing", &ignore_missing,
+ N_("silently ignore missing requested <hook-name>")),
+ OPT_END(),
+ };
+ int ret;
+
+ argc = parse_options(argc, argv, prefix, run_options,
+ builtin_hook_run_usage,
+ PARSE_OPT_KEEP_DASHDASH);
+
+ if (!argc)
+ goto usage;
+
+ /*
+ * Having a -- for "run" when providing <hook-args> is
+ * mandatory.
+ */
+ if (argc > 1 && strcmp(argv[1], "--") &&
+ strcmp(argv[1], "--end-of-options"))
+ goto usage;
+
+ /* Add our arguments, start after -- */
+ for (i = 2 ; i < argc; i++)
+ strvec_push(&opt.args, argv[i]);
+
+ /* Need to take into account core.hooksPath */
+ git_config(git_default_config, NULL);
+
+ hook_name = argv[0];
+ if (!ignore_missing)
+ opt.error_if_missing = 1;
+ ret = run_hooks_opt(hook_name, &opt);
+ if (ret < 0) /* error() return */
+ ret = 1;
+ return ret;
+usage:
+ usage_with_options(builtin_hook_run_usage, run_options);
+}
+
+int cmd_hook(int argc, const char **argv, const char *prefix)
+{
+ parse_opt_subcommand_fn *fn = NULL;
+ struct option builtin_hook_options[] = {
+ OPT_SUBCOMMAND("run", &fn, run),
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, NULL, builtin_hook_options,
+ builtin_hook_usage, 0);
+
+ return fn(argc, argv, prefix);
+}
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
new file mode 100644
index 0000000..6648f2d
--- /dev/null
+++ b/builtin/index-pack.c
@@ -0,0 +1,1959 @@
+#include "builtin.h"
+#include "config.h"
+#include "delta.h"
+#include "pack.h"
+#include "csum-file.h"
+#include "blob.h"
+#include "commit.h"
+#include "tag.h"
+#include "tree.h"
+#include "progress.h"
+#include "fsck.h"
+#include "exec-cmd.h"
+#include "streaming.h"
+#include "thread-utils.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "promisor-remote.h"
+
+static const char index_pack_usage[] =
+"git index-pack [-v] [-o <index-file>] [--keep | --keep=<msg>] [--[no-]rev-index] [--verify] [--strict] (<pack-file> | --stdin [--fix-thin] [<pack-file>])";
+
+struct object_entry {
+ struct pack_idx_entry idx;
+ unsigned long size;
+ unsigned char hdr_size;
+ signed char type;
+ signed char real_type;
+};
+
+struct object_stat {
+ unsigned delta_depth;
+ int base_object_no;
+};
+
+struct base_data {
+ /* Initialized by make_base(). */
+ struct base_data *base;
+ struct object_entry *obj;
+ int ref_first, ref_last;
+ int ofs_first, ofs_last;
+ /*
+ * Threads should increment retain_data if they are about to call
+ * patch_delta() using this struct's data as a base, and decrement this
+ * when they are done. While retain_data is nonzero, this struct's data
+ * will not be freed even if the delta base cache limit is exceeded.
+ */
+ int retain_data;
+ /*
+ * The number of direct children that have not been fully processed
+ * (entered work_head, entered done_head, left done_head). When this
+ * number reaches zero, this struct base_data can be freed.
+ */
+ int children_remaining;
+
+ /* Not initialized by make_base(). */
+ struct list_head list;
+ void *data;
+ unsigned long size;
+};
+
+/*
+ * Stack of struct base_data that have unprocessed children.
+ * threaded_second_pass() uses this as a source of work (the other being the
+ * objects array).
+ *
+ * Guarded by work_mutex.
+ */
+static LIST_HEAD(work_head);
+
+/*
+ * Stack of struct base_data that have children, all of whom have been
+ * processed or are being processed, and at least one child is being processed.
+ * These struct base_data must be kept around until the last child is
+ * processed.
+ *
+ * Guarded by work_mutex.
+ */
+static LIST_HEAD(done_head);
+
+/*
+ * All threads share one delta base cache.
+ *
+ * base_cache_used is guarded by work_mutex, and base_cache_limit is read-only
+ * in a thread.
+ */
+static size_t base_cache_used;
+static size_t base_cache_limit;
+
+struct thread_local {
+ pthread_t thread;
+ int pack_fd;
+};
+
+/* Remember to update object flag allocation in object.h */
+#define FLAG_LINK (1u<<20)
+#define FLAG_CHECKED (1u<<21)
+
+struct ofs_delta_entry {
+ off_t offset;
+ int obj_no;
+};
+
+struct ref_delta_entry {
+ struct object_id oid;
+ int obj_no;
+};
+
+static struct object_entry *objects;
+static struct object_stat *obj_stat;
+static struct ofs_delta_entry *ofs_deltas;
+static struct ref_delta_entry *ref_deltas;
+static struct thread_local nothread_data;
+static int nr_objects;
+static int nr_ofs_deltas;
+static int nr_ref_deltas;
+static int ref_deltas_alloc;
+static int nr_resolved_deltas;
+static int nr_threads;
+
+static int from_stdin;
+static int strict;
+static int do_fsck_object;
+static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES;
+static int verbose;
+static const char *progress_title;
+static int show_resolving_progress;
+static int show_stat;
+static int check_self_contained_and_connected;
+
+static struct progress *progress;
+
+/* We always read in 4kB chunks. */
+static unsigned char input_buffer[4096];
+static unsigned int input_offset, input_len;
+static off_t consumed_bytes;
+static off_t max_input_size;
+static unsigned deepest_delta;
+static git_hash_ctx input_ctx;
+static uint32_t input_crc32;
+static int input_fd, output_fd;
+static const char *curr_pack;
+
+static struct thread_local *thread_data;
+static int nr_dispatched;
+static int threads_active;
+
+static pthread_mutex_t read_mutex;
+#define read_lock() lock_mutex(&read_mutex)
+#define read_unlock() unlock_mutex(&read_mutex)
+
+static pthread_mutex_t counter_mutex;
+#define counter_lock() lock_mutex(&counter_mutex)
+#define counter_unlock() unlock_mutex(&counter_mutex)
+
+static pthread_mutex_t work_mutex;
+#define work_lock() lock_mutex(&work_mutex)
+#define work_unlock() unlock_mutex(&work_mutex)
+
+static pthread_mutex_t deepest_delta_mutex;
+#define deepest_delta_lock() lock_mutex(&deepest_delta_mutex)
+#define deepest_delta_unlock() unlock_mutex(&deepest_delta_mutex)
+
+static pthread_key_t key;
+
+static inline void lock_mutex(pthread_mutex_t *mutex)
+{
+ if (threads_active)
+ pthread_mutex_lock(mutex);
+}
+
+static inline void unlock_mutex(pthread_mutex_t *mutex)
+{
+ if (threads_active)
+ pthread_mutex_unlock(mutex);
+}
+
+/*
+ * Mutex and conditional variable can't be statically-initialized on Windows.
+ */
+static void init_thread(void)
+{
+ int i;
+ init_recursive_mutex(&read_mutex);
+ pthread_mutex_init(&counter_mutex, NULL);
+ pthread_mutex_init(&work_mutex, NULL);
+ if (show_stat)
+ pthread_mutex_init(&deepest_delta_mutex, NULL);
+ pthread_key_create(&key, NULL);
+ CALLOC_ARRAY(thread_data, nr_threads);
+ for (i = 0; i < nr_threads; i++) {
+ thread_data[i].pack_fd = xopen(curr_pack, O_RDONLY);
+ }
+
+ threads_active = 1;
+}
+
+static void cleanup_thread(void)
+{
+ int i;
+ if (!threads_active)
+ return;
+ threads_active = 0;
+ pthread_mutex_destroy(&read_mutex);
+ pthread_mutex_destroy(&counter_mutex);
+ pthread_mutex_destroy(&work_mutex);
+ if (show_stat)
+ pthread_mutex_destroy(&deepest_delta_mutex);
+ for (i = 0; i < nr_threads; i++)
+ close(thread_data[i].pack_fd);
+ pthread_key_delete(key);
+ free(thread_data);
+}
+
+static int mark_link(struct object *obj, enum object_type type,
+ void *data, struct fsck_options *options)
+{
+ if (!obj)
+ return -1;
+
+ if (type != OBJ_ANY && obj->type != type)
+ die(_("object type mismatch at %s"), oid_to_hex(&obj->oid));
+
+ obj->flags |= FLAG_LINK;
+ return 0;
+}
+
+/* The content of each linked object must have been checked
+ or it must be already present in the object database */
+static unsigned check_object(struct object *obj)
+{
+ if (!obj)
+ return 0;
+
+ if (!(obj->flags & FLAG_LINK))
+ return 0;
+
+ if (!(obj->flags & FLAG_CHECKED)) {
+ unsigned long size;
+ int type = oid_object_info(the_repository, &obj->oid, &size);
+ if (type <= 0)
+ die(_("did not receive expected object %s"),
+ oid_to_hex(&obj->oid));
+ if (type != obj->type)
+ die(_("object %s: expected type %s, found %s"),
+ oid_to_hex(&obj->oid),
+ type_name(obj->type), type_name(type));
+ obj->flags |= FLAG_CHECKED;
+ return 1;
+ }
+
+ return 0;
+}
+
+static unsigned check_objects(void)
+{
+ unsigned i, max, foreign_nr = 0;
+
+ max = get_max_object_index();
+
+ if (verbose)
+ progress = start_delayed_progress(_("Checking objects"), max);
+
+ for (i = 0; i < max; i++) {
+ foreign_nr += check_object(get_indexed_object(i));
+ display_progress(progress, i + 1);
+ }
+
+ stop_progress(&progress);
+ return foreign_nr;
+}
+
+
+/* Discard current buffer used content. */
+static void flush(void)
+{
+ if (input_offset) {
+ if (output_fd >= 0)
+ write_or_die(output_fd, input_buffer, input_offset);
+ the_hash_algo->update_fn(&input_ctx, input_buffer, input_offset);
+ memmove(input_buffer, input_buffer + input_offset, input_len);
+ input_offset = 0;
+ }
+}
+
+/*
+ * Make sure at least "min" bytes are available in the buffer, and
+ * return the pointer to the buffer.
+ */
+static void *fill(int min)
+{
+ if (min <= input_len)
+ return input_buffer + input_offset;
+ if (min > sizeof(input_buffer))
+ die(Q_("cannot fill %d byte",
+ "cannot fill %d bytes",
+ min),
+ min);
+ flush();
+ do {
+ ssize_t ret = xread(input_fd, input_buffer + input_len,
+ sizeof(input_buffer) - input_len);
+ if (ret <= 0) {
+ if (!ret)
+ die(_("early EOF"));
+ die_errno(_("read error on input"));
+ }
+ input_len += ret;
+ if (from_stdin)
+ display_throughput(progress, consumed_bytes + input_len);
+ } while (input_len < min);
+ return input_buffer;
+}
+
+static void use(int bytes)
+{
+ if (bytes > input_len)
+ die(_("used more bytes than were available"));
+ input_crc32 = crc32(input_crc32, input_buffer + input_offset, bytes);
+ input_len -= bytes;
+ input_offset += bytes;
+
+ /* make sure off_t is sufficiently large not to wrap */
+ if (signed_add_overflows(consumed_bytes, bytes))
+ die(_("pack too large for current definition of off_t"));
+ consumed_bytes += bytes;
+ if (max_input_size && consumed_bytes > max_input_size) {
+ struct strbuf size_limit = STRBUF_INIT;
+ strbuf_humanise_bytes(&size_limit, max_input_size);
+ die(_("pack exceeds maximum allowed size (%s)"),
+ size_limit.buf);
+ }
+}
+
+static const char *open_pack_file(const char *pack_name)
+{
+ if (from_stdin) {
+ input_fd = 0;
+ if (!pack_name) {
+ struct strbuf tmp_file = STRBUF_INIT;
+ output_fd = odb_mkstemp(&tmp_file,
+ "pack/tmp_pack_XXXXXX");
+ pack_name = strbuf_detach(&tmp_file, NULL);
+ } else {
+ output_fd = xopen(pack_name, O_CREAT|O_EXCL|O_RDWR, 0600);
+ }
+ nothread_data.pack_fd = output_fd;
+ } else {
+ input_fd = xopen(pack_name, O_RDONLY);
+ output_fd = -1;
+ nothread_data.pack_fd = input_fd;
+ }
+ the_hash_algo->init_fn(&input_ctx);
+ return pack_name;
+}
+
+static void parse_pack_header(void)
+{
+ struct pack_header *hdr = fill(sizeof(struct pack_header));
+
+ /* Header consistency check */
+ if (hdr->hdr_signature != htonl(PACK_SIGNATURE))
+ die(_("pack signature mismatch"));
+ if (!pack_version_ok(hdr->hdr_version))
+ die(_("pack version %"PRIu32" unsupported"),
+ ntohl(hdr->hdr_version));
+
+ nr_objects = ntohl(hdr->hdr_entries);
+ use(sizeof(struct pack_header));
+}
+
+__attribute__((format (printf, 2, 3)))
+static NORETURN void bad_object(off_t offset, const char *format, ...)
+{
+ va_list params;
+ char buf[1024];
+
+ va_start(params, format);
+ vsnprintf(buf, sizeof(buf), format, params);
+ va_end(params);
+ die(_("pack has bad object at offset %"PRIuMAX": %s"),
+ (uintmax_t)offset, buf);
+}
+
+static inline struct thread_local *get_thread_data(void)
+{
+ if (HAVE_THREADS) {
+ if (threads_active)
+ return pthread_getspecific(key);
+ assert(!threads_active &&
+ "This should only be reached when all threads are gone");
+ }
+ return &nothread_data;
+}
+
+static void set_thread_data(struct thread_local *data)
+{
+ if (threads_active)
+ pthread_setspecific(key, data);
+}
+
+static void free_base_data(struct base_data *c)
+{
+ if (c->data) {
+ FREE_AND_NULL(c->data);
+ base_cache_used -= c->size;
+ }
+}
+
+static void prune_base_data(struct base_data *retain)
+{
+ struct list_head *pos;
+
+ if (base_cache_used <= base_cache_limit)
+ return;
+
+ list_for_each_prev(pos, &done_head) {
+ struct base_data *b = list_entry(pos, struct base_data, list);
+ if (b->retain_data || b == retain)
+ continue;
+ if (b->data) {
+ free_base_data(b);
+ if (base_cache_used <= base_cache_limit)
+ return;
+ }
+ }
+
+ list_for_each_prev(pos, &work_head) {
+ struct base_data *b = list_entry(pos, struct base_data, list);
+ if (b->retain_data || b == retain)
+ continue;
+ if (b->data) {
+ free_base_data(b);
+ if (base_cache_used <= base_cache_limit)
+ return;
+ }
+ }
+}
+
+static int is_delta_type(enum object_type type)
+{
+ return (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA);
+}
+
+static void *unpack_entry_data(off_t offset, unsigned long size,
+ enum object_type type, struct object_id *oid)
+{
+ static char fixed_buf[8192];
+ int status;
+ git_zstream stream;
+ void *buf;
+ git_hash_ctx c;
+ char hdr[32];
+ int hdrlen;
+
+ if (!is_delta_type(type)) {
+ hdrlen = format_object_header(hdr, sizeof(hdr), type, size);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+ } else
+ oid = NULL;
+ if (type == OBJ_BLOB && size > big_file_threshold)
+ buf = fixed_buf;
+ else
+ buf = xmallocz(size);
+
+ memset(&stream, 0, sizeof(stream));
+ git_inflate_init(&stream);
+ stream.next_out = buf;
+ stream.avail_out = buf == fixed_buf ? sizeof(fixed_buf) : size;
+
+ do {
+ unsigned char *last_out = stream.next_out;
+ stream.next_in = fill(1);
+ stream.avail_in = input_len;
+ status = git_inflate(&stream, 0);
+ use(input_len - stream.avail_in);
+ if (oid)
+ the_hash_algo->update_fn(&c, last_out, stream.next_out - last_out);
+ if (buf == fixed_buf) {
+ stream.next_out = buf;
+ stream.avail_out = sizeof(fixed_buf);
+ }
+ } while (status == Z_OK);
+ if (stream.total_out != size || status != Z_STREAM_END)
+ bad_object(offset, _("inflate returned %d"), status);
+ git_inflate_end(&stream);
+ if (oid)
+ the_hash_algo->final_oid_fn(oid, &c);
+ return buf == fixed_buf ? NULL : buf;
+}
+
+static void *unpack_raw_entry(struct object_entry *obj,
+ off_t *ofs_offset,
+ struct object_id *ref_oid,
+ struct object_id *oid)
+{
+ unsigned char *p;
+ unsigned long size, c;
+ off_t base_offset;
+ unsigned shift;
+ void *data;
+
+ obj->idx.offset = consumed_bytes;
+ input_crc32 = crc32(0, NULL, 0);
+
+ p = fill(1);
+ c = *p;
+ use(1);
+ obj->type = (c >> 4) & 7;
+ size = (c & 15);
+ shift = 4;
+ while (c & 0x80) {
+ p = fill(1);
+ c = *p;
+ use(1);
+ size += (c & 0x7f) << shift;
+ shift += 7;
+ }
+ obj->size = size;
+
+ switch (obj->type) {
+ case OBJ_REF_DELTA:
+ oidread(ref_oid, fill(the_hash_algo->rawsz));
+ use(the_hash_algo->rawsz);
+ break;
+ case OBJ_OFS_DELTA:
+ p = fill(1);
+ c = *p;
+ use(1);
+ base_offset = c & 127;
+ while (c & 128) {
+ base_offset += 1;
+ if (!base_offset || MSB(base_offset, 7))
+ bad_object(obj->idx.offset, _("offset value overflow for delta base object"));
+ p = fill(1);
+ c = *p;
+ use(1);
+ base_offset = (base_offset << 7) + (c & 127);
+ }
+ *ofs_offset = obj->idx.offset - base_offset;
+ if (*ofs_offset <= 0 || *ofs_offset >= obj->idx.offset)
+ bad_object(obj->idx.offset, _("delta base offset is out of bound"));
+ break;
+ case OBJ_COMMIT:
+ case OBJ_TREE:
+ case OBJ_BLOB:
+ case OBJ_TAG:
+ break;
+ default:
+ bad_object(obj->idx.offset, _("unknown object type %d"), obj->type);
+ }
+ obj->hdr_size = consumed_bytes - obj->idx.offset;
+
+ data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid);
+ obj->idx.crc32 = input_crc32;
+ return data;
+}
+
+static void *unpack_data(struct object_entry *obj,
+ int (*consume)(const unsigned char *, unsigned long, void *),
+ void *cb_data)
+{
+ off_t from = obj[0].idx.offset + obj[0].hdr_size;
+ off_t len = obj[1].idx.offset - from;
+ unsigned char *data, *inbuf;
+ git_zstream stream;
+ int status;
+
+ data = xmallocz(consume ? 64*1024 : obj->size);
+ inbuf = xmalloc((len < 64*1024) ? (int)len : 64*1024);
+
+ memset(&stream, 0, sizeof(stream));
+ git_inflate_init(&stream);
+ stream.next_out = data;
+ stream.avail_out = consume ? 64*1024 : obj->size;
+
+ do {
+ ssize_t n = (len < 64*1024) ? (ssize_t)len : 64*1024;
+ n = xpread(get_thread_data()->pack_fd, inbuf, n, from);
+ if (n < 0)
+ die_errno(_("cannot pread pack file"));
+ if (!n)
+ die(Q_("premature end of pack file, %"PRIuMAX" byte missing",
+ "premature end of pack file, %"PRIuMAX" bytes missing",
+ len),
+ (uintmax_t)len);
+ from += n;
+ len -= n;
+ stream.next_in = inbuf;
+ stream.avail_in = n;
+ if (!consume)
+ status = git_inflate(&stream, 0);
+ else {
+ do {
+ status = git_inflate(&stream, 0);
+ if (consume(data, stream.next_out - data, cb_data)) {
+ free(inbuf);
+ free(data);
+ return NULL;
+ }
+ stream.next_out = data;
+ stream.avail_out = 64*1024;
+ } while (status == Z_OK && stream.avail_in);
+ }
+ } while (len && status == Z_OK && !stream.avail_in);
+
+ /* This has been inflated OK when first encountered, so... */
+ if (status != Z_STREAM_END || stream.total_out != obj->size)
+ die(_("serious inflate inconsistency"));
+
+ git_inflate_end(&stream);
+ free(inbuf);
+ if (consume) {
+ FREE_AND_NULL(data);
+ }
+ return data;
+}
+
+static void *get_data_from_pack(struct object_entry *obj)
+{
+ return unpack_data(obj, NULL, NULL);
+}
+
+static int compare_ofs_delta_bases(off_t offset1, off_t offset2,
+ enum object_type type1,
+ enum object_type type2)
+{
+ int cmp = type1 - type2;
+ if (cmp)
+ return cmp;
+ return offset1 < offset2 ? -1 :
+ offset1 > offset2 ? 1 :
+ 0;
+}
+
+static int find_ofs_delta(const off_t offset)
+{
+ int first = 0, last = nr_ofs_deltas;
+
+ while (first < last) {
+ int next = first + (last - first) / 2;
+ struct ofs_delta_entry *delta = &ofs_deltas[next];
+ int cmp;
+
+ cmp = compare_ofs_delta_bases(offset, delta->offset,
+ OBJ_OFS_DELTA,
+ objects[delta->obj_no].type);
+ if (!cmp)
+ return next;
+ if (cmp < 0) {
+ last = next;
+ continue;
+ }
+ first = next+1;
+ }
+ return -first-1;
+}
+
+static void find_ofs_delta_children(off_t offset,
+ int *first_index, int *last_index)
+{
+ int first = find_ofs_delta(offset);
+ int last = first;
+ int end = nr_ofs_deltas - 1;
+
+ if (first < 0) {
+ *first_index = 0;
+ *last_index = -1;
+ return;
+ }
+ while (first > 0 && ofs_deltas[first - 1].offset == offset)
+ --first;
+ while (last < end && ofs_deltas[last + 1].offset == offset)
+ ++last;
+ *first_index = first;
+ *last_index = last;
+}
+
+static int compare_ref_delta_bases(const struct object_id *oid1,
+ const struct object_id *oid2,
+ enum object_type type1,
+ enum object_type type2)
+{
+ int cmp = type1 - type2;
+ if (cmp)
+ return cmp;
+ return oidcmp(oid1, oid2);
+}
+
+static int find_ref_delta(const struct object_id *oid)
+{
+ int first = 0, last = nr_ref_deltas;
+
+ while (first < last) {
+ int next = first + (last - first) / 2;
+ struct ref_delta_entry *delta = &ref_deltas[next];
+ int cmp;
+
+ cmp = compare_ref_delta_bases(oid, &delta->oid,
+ OBJ_REF_DELTA,
+ objects[delta->obj_no].type);
+ if (!cmp)
+ return next;
+ if (cmp < 0) {
+ last = next;
+ continue;
+ }
+ first = next+1;
+ }
+ return -first-1;
+}
+
+static void find_ref_delta_children(const struct object_id *oid,
+ int *first_index, int *last_index)
+{
+ int first = find_ref_delta(oid);
+ int last = first;
+ int end = nr_ref_deltas - 1;
+
+ if (first < 0) {
+ *first_index = 0;
+ *last_index = -1;
+ return;
+ }
+ while (first > 0 && oideq(&ref_deltas[first - 1].oid, oid))
+ --first;
+ while (last < end && oideq(&ref_deltas[last + 1].oid, oid))
+ ++last;
+ *first_index = first;
+ *last_index = last;
+}
+
+struct compare_data {
+ struct object_entry *entry;
+ struct git_istream *st;
+ unsigned char *buf;
+ unsigned long buf_size;
+};
+
+static int compare_objects(const unsigned char *buf, unsigned long size,
+ void *cb_data)
+{
+ struct compare_data *data = cb_data;
+
+ if (data->buf_size < size) {
+ free(data->buf);
+ data->buf = xmalloc(size);
+ data->buf_size = size;
+ }
+
+ while (size) {
+ ssize_t len = read_istream(data->st, data->buf, size);
+ if (len == 0)
+ die(_("SHA1 COLLISION FOUND WITH %s !"),
+ oid_to_hex(&data->entry->idx.oid));
+ if (len < 0)
+ die(_("unable to read %s"),
+ oid_to_hex(&data->entry->idx.oid));
+ if (memcmp(buf, data->buf, len))
+ die(_("SHA1 COLLISION FOUND WITH %s !"),
+ oid_to_hex(&data->entry->idx.oid));
+ size -= len;
+ buf += len;
+ }
+ return 0;
+}
+
+static int check_collison(struct object_entry *entry)
+{
+ struct compare_data data;
+ enum object_type type;
+ unsigned long size;
+
+ if (entry->size <= big_file_threshold || entry->type != OBJ_BLOB)
+ return -1;
+
+ memset(&data, 0, sizeof(data));
+ data.entry = entry;
+ data.st = open_istream(the_repository, &entry->idx.oid, &type, &size,
+ NULL);
+ if (!data.st)
+ return -1;
+ if (size != entry->size || type != entry->type)
+ die(_("SHA1 COLLISION FOUND WITH %s !"),
+ oid_to_hex(&entry->idx.oid));
+ unpack_data(entry, compare_objects, &data);
+ close_istream(data.st);
+ free(data.buf);
+ return 0;
+}
+
+static void sha1_object(const void *data, struct object_entry *obj_entry,
+ unsigned long size, enum object_type type,
+ const struct object_id *oid)
+{
+ void *new_data = NULL;
+ int collision_test_needed = 0;
+
+ assert(data || obj_entry);
+
+ if (startup_info->have_repository) {
+ read_lock();
+ collision_test_needed =
+ has_object_file_with_flags(oid, OBJECT_INFO_QUICK);
+ read_unlock();
+ }
+
+ if (collision_test_needed && !data) {
+ read_lock();
+ if (!check_collison(obj_entry))
+ collision_test_needed = 0;
+ read_unlock();
+ }
+ if (collision_test_needed) {
+ void *has_data;
+ enum object_type has_type;
+ unsigned long has_size;
+ read_lock();
+ has_type = oid_object_info(the_repository, oid, &has_size);
+ if (has_type < 0)
+ die(_("cannot read existing object info %s"), oid_to_hex(oid));
+ if (has_type != type || has_size != size)
+ die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
+ has_data = read_object_file(oid, &has_type, &has_size);
+ read_unlock();
+ if (!data)
+ data = new_data = get_data_from_pack(obj_entry);
+ if (!has_data)
+ die(_("cannot read existing object %s"), oid_to_hex(oid));
+ if (size != has_size || type != has_type ||
+ memcmp(data, has_data, size) != 0)
+ die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
+ free(has_data);
+ }
+
+ if (strict || do_fsck_object) {
+ read_lock();
+ if (type == OBJ_BLOB) {
+ struct blob *blob = lookup_blob(the_repository, oid);
+ if (blob)
+ blob->object.flags |= FLAG_CHECKED;
+ else
+ die(_("invalid blob object %s"), oid_to_hex(oid));
+ if (do_fsck_object &&
+ fsck_object(&blob->object, (void *)data, size, &fsck_options))
+ die(_("fsck error in packed object"));
+ } else {
+ struct object *obj;
+ int eaten;
+ void *buf = (void *) data;
+
+ assert(data && "data can only be NULL for large _blobs_");
+
+ /*
+ * we do not need to free the memory here, as the
+ * buf is deleted by the caller.
+ */
+ obj = parse_object_buffer(the_repository, oid, type,
+ size, buf,
+ &eaten);
+ if (!obj)
+ die(_("invalid %s"), type_name(type));
+ if (do_fsck_object &&
+ fsck_object(obj, buf, size, &fsck_options))
+ die(_("fsck error in packed object"));
+ if (strict && fsck_walk(obj, NULL, &fsck_options))
+ die(_("Not all child objects of %s are reachable"), oid_to_hex(&obj->oid));
+
+ if (obj->type == OBJ_TREE) {
+ struct tree *item = (struct tree *) obj;
+ item->buffer = NULL;
+ obj->parsed = 0;
+ }
+ if (obj->type == OBJ_COMMIT) {
+ struct commit *commit = (struct commit *) obj;
+ if (detach_commit_buffer(commit, NULL) != data)
+ BUG("parse_object_buffer transmogrified our buffer");
+ }
+ obj->flags |= FLAG_CHECKED;
+ }
+ read_unlock();
+ }
+
+ free(new_data);
+}
+
+/*
+ * Ensure that this node has been reconstructed and return its contents.
+ *
+ * In the typical and best case, this node would already be reconstructed
+ * (through the invocation to resolve_delta() in threaded_second_pass()) and it
+ * would not be pruned. However, if pruning of this node was necessary due to
+ * reaching delta_base_cache_limit, this function will find the closest
+ * ancestor with reconstructed data that has not been pruned (or if there is
+ * none, the ultimate base object), and reconstruct each node in the delta
+ * chain in order to generate the reconstructed data for this node.
+ */
+static void *get_base_data(struct base_data *c)
+{
+ if (!c->data) {
+ struct object_entry *obj = c->obj;
+ struct base_data **delta = NULL;
+ int delta_nr = 0, delta_alloc = 0;
+
+ while (is_delta_type(c->obj->type) && !c->data) {
+ ALLOC_GROW(delta, delta_nr + 1, delta_alloc);
+ delta[delta_nr++] = c;
+ c = c->base;
+ }
+ if (!delta_nr) {
+ c->data = get_data_from_pack(obj);
+ c->size = obj->size;
+ base_cache_used += c->size;
+ prune_base_data(c);
+ }
+ for (; delta_nr > 0; delta_nr--) {
+ void *base, *raw;
+ c = delta[delta_nr - 1];
+ obj = c->obj;
+ base = get_base_data(c->base);
+ raw = get_data_from_pack(obj);
+ c->data = patch_delta(
+ base, c->base->size,
+ raw, obj->size,
+ &c->size);
+ free(raw);
+ if (!c->data)
+ bad_object(obj->idx.offset, _("failed to apply delta"));
+ base_cache_used += c->size;
+ prune_base_data(c);
+ }
+ free(delta);
+ }
+ return c->data;
+}
+
+static struct base_data *make_base(struct object_entry *obj,
+ struct base_data *parent)
+{
+ struct base_data *base = xcalloc(1, sizeof(struct base_data));
+ base->base = parent;
+ base->obj = obj;
+ find_ref_delta_children(&obj->idx.oid,
+ &base->ref_first, &base->ref_last);
+ find_ofs_delta_children(obj->idx.offset,
+ &base->ofs_first, &base->ofs_last);
+ base->children_remaining = base->ref_last - base->ref_first +
+ base->ofs_last - base->ofs_first + 2;
+ return base;
+}
+
+static struct base_data *resolve_delta(struct object_entry *delta_obj,
+ struct base_data *base)
+{
+ void *delta_data, *result_data;
+ struct base_data *result;
+ unsigned long result_size;
+
+ if (show_stat) {
+ int i = delta_obj - objects;
+ int j = base->obj - objects;
+ obj_stat[i].delta_depth = obj_stat[j].delta_depth + 1;
+ deepest_delta_lock();
+ if (deepest_delta < obj_stat[i].delta_depth)
+ deepest_delta = obj_stat[i].delta_depth;
+ deepest_delta_unlock();
+ obj_stat[i].base_object_no = j;
+ }
+ delta_data = get_data_from_pack(delta_obj);
+ assert(base->data);
+ result_data = patch_delta(base->data, base->size,
+ delta_data, delta_obj->size, &result_size);
+ free(delta_data);
+ if (!result_data)
+ bad_object(delta_obj->idx.offset, _("failed to apply delta"));
+ hash_object_file(the_hash_algo, result_data, result_size,
+ delta_obj->real_type, &delta_obj->idx.oid);
+ sha1_object(result_data, NULL, result_size, delta_obj->real_type,
+ &delta_obj->idx.oid);
+
+ result = make_base(delta_obj, base);
+ result->data = result_data;
+ result->size = result_size;
+
+ counter_lock();
+ nr_resolved_deltas++;
+ counter_unlock();
+
+ return result;
+}
+
+static int compare_ofs_delta_entry(const void *a, const void *b)
+{
+ const struct ofs_delta_entry *delta_a = a;
+ const struct ofs_delta_entry *delta_b = b;
+
+ return delta_a->offset < delta_b->offset ? -1 :
+ delta_a->offset > delta_b->offset ? 1 :
+ 0;
+}
+
+static int compare_ref_delta_entry(const void *a, const void *b)
+{
+ const struct ref_delta_entry *delta_a = a;
+ const struct ref_delta_entry *delta_b = b;
+
+ return oidcmp(&delta_a->oid, &delta_b->oid);
+}
+
+static void *threaded_second_pass(void *data)
+{
+ if (data)
+ set_thread_data(data);
+ for (;;) {
+ struct base_data *parent = NULL;
+ struct object_entry *child_obj;
+ struct base_data *child;
+
+ counter_lock();
+ display_progress(progress, nr_resolved_deltas);
+ counter_unlock();
+
+ work_lock();
+ if (list_empty(&work_head)) {
+ /*
+ * Take an object from the object array.
+ */
+ while (nr_dispatched < nr_objects &&
+ is_delta_type(objects[nr_dispatched].type))
+ nr_dispatched++;
+ if (nr_dispatched >= nr_objects) {
+ work_unlock();
+ break;
+ }
+ child_obj = &objects[nr_dispatched++];
+ } else {
+ /*
+ * Peek at the top of the stack, and take a child from
+ * it.
+ */
+ parent = list_first_entry(&work_head, struct base_data,
+ list);
+
+ if (parent->ref_first <= parent->ref_last) {
+ int offset = ref_deltas[parent->ref_first++].obj_no;
+ child_obj = objects + offset;
+ if (child_obj->real_type != OBJ_REF_DELTA)
+ die("REF_DELTA at offset %"PRIuMAX" already resolved (duplicate base %s?)",
+ (uintmax_t) child_obj->idx.offset,
+ oid_to_hex(&parent->obj->idx.oid));
+ child_obj->real_type = parent->obj->real_type;
+ } else {
+ child_obj = objects +
+ ofs_deltas[parent->ofs_first++].obj_no;
+ assert(child_obj->real_type == OBJ_OFS_DELTA);
+ child_obj->real_type = parent->obj->real_type;
+ }
+
+ if (parent->ref_first > parent->ref_last &&
+ parent->ofs_first > parent->ofs_last) {
+ /*
+ * This parent has run out of children, so move
+ * it to done_head.
+ */
+ list_del(&parent->list);
+ list_add(&parent->list, &done_head);
+ }
+
+ /*
+ * Ensure that the parent has data, since we will need
+ * it later.
+ *
+ * NEEDSWORK: If parent data needs to be reloaded, this
+ * prolongs the time that the current thread spends in
+ * the mutex. A mitigating factor is that parent data
+ * needs to be reloaded only if the delta base cache
+ * limit is exceeded, so in the typical case, this does
+ * not happen.
+ */
+ get_base_data(parent);
+ parent->retain_data++;
+ }
+ work_unlock();
+
+ if (parent) {
+ child = resolve_delta(child_obj, parent);
+ if (!child->children_remaining)
+ FREE_AND_NULL(child->data);
+ } else {
+ child = make_base(child_obj, NULL);
+ if (child->children_remaining) {
+ /*
+ * Since this child has its own delta children,
+ * we will need this data in the future.
+ * Inflate now so that future iterations will
+ * have access to this object's data while
+ * outside the work mutex.
+ */
+ child->data = get_data_from_pack(child_obj);
+ child->size = child_obj->size;
+ }
+ }
+
+ work_lock();
+ if (parent)
+ parent->retain_data--;
+ if (child->data) {
+ /*
+ * This child has its own children, so add it to
+ * work_head.
+ */
+ list_add(&child->list, &work_head);
+ base_cache_used += child->size;
+ prune_base_data(NULL);
+ free_base_data(child);
+ } else {
+ /*
+ * This child does not have its own children. It may be
+ * the last descendant of its ancestors; free those
+ * that we can.
+ */
+ struct base_data *p = parent;
+
+ while (p) {
+ struct base_data *next_p;
+
+ p->children_remaining--;
+ if (p->children_remaining)
+ break;
+
+ next_p = p->base;
+ free_base_data(p);
+ list_del(&p->list);
+ free(p);
+
+ p = next_p;
+ }
+ FREE_AND_NULL(child);
+ }
+ work_unlock();
+ }
+ return NULL;
+}
+
+/*
+ * First pass:
+ * - find locations of all objects;
+ * - calculate SHA1 of all non-delta objects;
+ * - remember base (SHA1 or offset) for all deltas.
+ */
+static void parse_pack_objects(unsigned char *hash)
+{
+ int i, nr_delays = 0;
+ struct ofs_delta_entry *ofs_delta = ofs_deltas;
+ struct object_id ref_delta_oid;
+ struct stat st;
+
+ if (verbose)
+ progress = start_progress(
+ progress_title ? progress_title :
+ from_stdin ? _("Receiving objects") : _("Indexing objects"),
+ nr_objects);
+ for (i = 0; i < nr_objects; i++) {
+ struct object_entry *obj = &objects[i];
+ void *data = unpack_raw_entry(obj, &ofs_delta->offset,
+ &ref_delta_oid,
+ &obj->idx.oid);
+ obj->real_type = obj->type;
+ if (obj->type == OBJ_OFS_DELTA) {
+ nr_ofs_deltas++;
+ ofs_delta->obj_no = i;
+ ofs_delta++;
+ } else if (obj->type == OBJ_REF_DELTA) {
+ ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc);
+ oidcpy(&ref_deltas[nr_ref_deltas].oid, &ref_delta_oid);
+ ref_deltas[nr_ref_deltas].obj_no = i;
+ nr_ref_deltas++;
+ } else if (!data) {
+ /* large blobs, check later */
+ obj->real_type = OBJ_BAD;
+ nr_delays++;
+ } else
+ sha1_object(data, NULL, obj->size, obj->type,
+ &obj->idx.oid);
+ free(data);
+ display_progress(progress, i+1);
+ }
+ objects[i].idx.offset = consumed_bytes;
+ stop_progress(&progress);
+
+ /* Check pack integrity */
+ flush();
+ the_hash_algo->final_fn(hash, &input_ctx);
+ if (!hasheq(fill(the_hash_algo->rawsz), hash))
+ die(_("pack is corrupted (SHA1 mismatch)"));
+ use(the_hash_algo->rawsz);
+
+ /* If input_fd is a file, we should have reached its end now. */
+ if (fstat(input_fd, &st))
+ die_errno(_("cannot fstat packfile"));
+ if (S_ISREG(st.st_mode) &&
+ lseek(input_fd, 0, SEEK_CUR) - input_len != st.st_size)
+ die(_("pack has junk at the end"));
+
+ for (i = 0; i < nr_objects; i++) {
+ struct object_entry *obj = &objects[i];
+ if (obj->real_type != OBJ_BAD)
+ continue;
+ obj->real_type = obj->type;
+ sha1_object(NULL, obj, obj->size, obj->type,
+ &obj->idx.oid);
+ nr_delays--;
+ }
+ if (nr_delays)
+ die(_("confusion beyond insanity in parse_pack_objects()"));
+}
+
+/*
+ * Second pass:
+ * - for all non-delta objects, look if it is used as a base for
+ * deltas;
+ * - if used as a base, uncompress the object and apply all deltas,
+ * recursively checking if the resulting object is used as a base
+ * for some more deltas.
+ */
+static void resolve_deltas(void)
+{
+ int i;
+
+ if (!nr_ofs_deltas && !nr_ref_deltas)
+ return;
+
+ /* Sort deltas by base SHA1/offset for fast searching */
+ QSORT(ofs_deltas, nr_ofs_deltas, compare_ofs_delta_entry);
+ QSORT(ref_deltas, nr_ref_deltas, compare_ref_delta_entry);
+
+ if (verbose || show_resolving_progress)
+ progress = start_progress(_("Resolving deltas"),
+ nr_ref_deltas + nr_ofs_deltas);
+
+ nr_dispatched = 0;
+ base_cache_limit = delta_base_cache_limit * nr_threads;
+ if (nr_threads > 1 || getenv("GIT_FORCE_THREADS")) {
+ init_thread();
+ for (i = 0; i < nr_threads; i++) {
+ int ret = pthread_create(&thread_data[i].thread, NULL,
+ threaded_second_pass, thread_data + i);
+ if (ret)
+ die(_("unable to create thread: %s"),
+ strerror(ret));
+ }
+ for (i = 0; i < nr_threads; i++)
+ pthread_join(thread_data[i].thread, NULL);
+ cleanup_thread();
+ return;
+ }
+ threaded_second_pass(&nothread_data);
+}
+
+/*
+ * Third pass:
+ * - append objects to convert thin pack to full pack if required
+ * - write the final pack hash
+ */
+static void fix_unresolved_deltas(struct hashfile *f);
+static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash)
+{
+ if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) {
+ stop_progress(&progress);
+ /* Flush remaining pack final hash. */
+ flush();
+ return;
+ }
+
+ if (fix_thin_pack) {
+ struct hashfile *f;
+ unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ];
+ struct strbuf msg = STRBUF_INIT;
+ int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas;
+ int nr_objects_initial = nr_objects;
+ if (nr_unresolved <= 0)
+ die(_("confusion beyond insanity"));
+ REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1);
+ memset(objects + nr_objects + 1, 0,
+ nr_unresolved * sizeof(*objects));
+ f = hashfd(output_fd, curr_pack);
+ fix_unresolved_deltas(f);
+ strbuf_addf(&msg, Q_("completed with %d local object",
+ "completed with %d local objects",
+ nr_objects - nr_objects_initial),
+ nr_objects - nr_objects_initial);
+ stop_progress_msg(&progress, msg.buf);
+ strbuf_release(&msg);
+ finalize_hashfile(f, tail_hash, FSYNC_COMPONENT_PACK, 0);
+ hashcpy(read_hash, pack_hash);
+ fixup_pack_header_footer(output_fd, pack_hash,
+ curr_pack, nr_objects,
+ read_hash, consumed_bytes-the_hash_algo->rawsz);
+ if (!hasheq(read_hash, tail_hash))
+ die(_("Unexpected tail checksum for %s "
+ "(disk corruption?)"), curr_pack);
+ }
+ if (nr_ofs_deltas + nr_ref_deltas != nr_resolved_deltas)
+ die(Q_("pack has %d unresolved delta",
+ "pack has %d unresolved deltas",
+ nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas),
+ nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas);
+}
+
+static int write_compressed(struct hashfile *f, void *in, unsigned int size)
+{
+ git_zstream stream;
+ int status;
+ unsigned char outbuf[4096];
+
+ git_deflate_init(&stream, zlib_compression_level);
+ stream.next_in = in;
+ stream.avail_in = size;
+
+ do {
+ stream.next_out = outbuf;
+ stream.avail_out = sizeof(outbuf);
+ status = git_deflate(&stream, Z_FINISH);
+ hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out);
+ } while (status == Z_OK);
+
+ if (status != Z_STREAM_END)
+ die(_("unable to deflate appended object (%d)"), status);
+ size = stream.total_out;
+ git_deflate_end(&stream);
+ return size;
+}
+
+static struct object_entry *append_obj_to_pack(struct hashfile *f,
+ const unsigned char *sha1, void *buf,
+ unsigned long size, enum object_type type)
+{
+ struct object_entry *obj = &objects[nr_objects++];
+ unsigned char header[10];
+ unsigned long s = size;
+ int n = 0;
+ unsigned char c = (type << 4) | (s & 15);
+ s >>= 4;
+ while (s) {
+ header[n++] = c | 0x80;
+ c = s & 0x7f;
+ s >>= 7;
+ }
+ header[n++] = c;
+ crc32_begin(f);
+ hashwrite(f, header, n);
+ obj[0].size = size;
+ obj[0].hdr_size = n;
+ obj[0].type = type;
+ obj[0].real_type = type;
+ obj[1].idx.offset = obj[0].idx.offset + n;
+ obj[1].idx.offset += write_compressed(f, buf, size);
+ obj[0].idx.crc32 = crc32_end(f);
+ hashflush(f);
+ oidread(&obj->idx.oid, sha1);
+ return obj;
+}
+
+static int delta_pos_compare(const void *_a, const void *_b)
+{
+ struct ref_delta_entry *a = *(struct ref_delta_entry **)_a;
+ struct ref_delta_entry *b = *(struct ref_delta_entry **)_b;
+ return a->obj_no - b->obj_no;
+}
+
+static void fix_unresolved_deltas(struct hashfile *f)
+{
+ struct ref_delta_entry **sorted_by_pos;
+ int i;
+
+ /*
+ * Since many unresolved deltas may well be themselves base objects
+ * for more unresolved deltas, we really want to include the
+ * smallest number of base objects that would cover as much delta
+ * as possible by picking the
+ * trunc deltas first, allowing for other deltas to resolve without
+ * additional base objects. Since most base objects are to be found
+ * before deltas depending on them, a good heuristic is to start
+ * resolving deltas in the same order as their position in the pack.
+ */
+ ALLOC_ARRAY(sorted_by_pos, nr_ref_deltas);
+ for (i = 0; i < nr_ref_deltas; i++)
+ sorted_by_pos[i] = &ref_deltas[i];
+ QSORT(sorted_by_pos, nr_ref_deltas, delta_pos_compare);
+
+ if (has_promisor_remote()) {
+ /*
+ * Prefetch the delta bases.
+ */
+ struct oid_array to_fetch = OID_ARRAY_INIT;
+ for (i = 0; i < nr_ref_deltas; i++) {
+ struct ref_delta_entry *d = sorted_by_pos[i];
+ if (!oid_object_info_extended(the_repository, &d->oid,
+ NULL,
+ OBJECT_INFO_FOR_PREFETCH))
+ continue;
+ oid_array_append(&to_fetch, &d->oid);
+ }
+ promisor_remote_get_direct(the_repository,
+ to_fetch.oid, to_fetch.nr);
+ oid_array_clear(&to_fetch);
+ }
+
+ for (i = 0; i < nr_ref_deltas; i++) {
+ struct ref_delta_entry *d = sorted_by_pos[i];
+ enum object_type type;
+ void *data;
+ unsigned long size;
+
+ if (objects[d->obj_no].real_type != OBJ_REF_DELTA)
+ continue;
+ data = read_object_file(&d->oid, &type, &size);
+ if (!data)
+ continue;
+
+ if (check_object_signature(the_repository, &d->oid, data, size,
+ type) < 0)
+ die(_("local object %s is corrupt"), oid_to_hex(&d->oid));
+
+ /*
+ * Add this as an object to the objects array and call
+ * threaded_second_pass() (which will pick up the added
+ * object).
+ */
+ append_obj_to_pack(f, d->oid.hash, data, size, type);
+ free(data);
+ threaded_second_pass(NULL);
+
+ display_progress(progress, nr_resolved_deltas);
+ }
+ free(sorted_by_pos);
+}
+
+static const char *derive_filename(const char *pack_name, const char *strip,
+ const char *suffix, struct strbuf *buf)
+{
+ size_t len;
+ if (!strip_suffix(pack_name, strip, &len) || !len ||
+ pack_name[len - 1] != '.')
+ die(_("packfile name '%s' does not end with '.%s'"),
+ pack_name, strip);
+ strbuf_add(buf, pack_name, len);
+ strbuf_addstr(buf, suffix);
+ return buf->buf;
+}
+
+static void write_special_file(const char *suffix, const char *msg,
+ const char *pack_name, const unsigned char *hash,
+ const char **report)
+{
+ struct strbuf name_buf = STRBUF_INIT;
+ const char *filename;
+ int fd;
+ int msg_len = strlen(msg);
+
+ if (pack_name)
+ filename = derive_filename(pack_name, "pack", suffix, &name_buf);
+ else
+ filename = odb_pack_name(&name_buf, hash, suffix);
+
+ fd = odb_pack_keep(filename);
+ if (fd < 0) {
+ if (errno != EEXIST)
+ die_errno(_("cannot write %s file '%s'"),
+ suffix, filename);
+ } else {
+ if (msg_len > 0) {
+ write_or_die(fd, msg, msg_len);
+ write_or_die(fd, "\n", 1);
+ }
+ if (close(fd) != 0)
+ die_errno(_("cannot close written %s file '%s'"),
+ suffix, filename);
+ if (report)
+ *report = suffix;
+ }
+ strbuf_release(&name_buf);
+}
+
+static void rename_tmp_packfile(const char **final_name,
+ const char *curr_name,
+ struct strbuf *name, unsigned char *hash,
+ const char *ext, int make_read_only_if_same)
+{
+ if (*final_name != curr_name) {
+ if (!*final_name)
+ *final_name = odb_pack_name(name, hash, ext);
+ if (finalize_object_file(curr_name, *final_name))
+ die(_("unable to rename temporary '*.%s' file to '%s'"),
+ ext, *final_name);
+ } else if (make_read_only_if_same) {
+ chmod(*final_name, 0444);
+ }
+}
+
+static void final(const char *final_pack_name, const char *curr_pack_name,
+ const char *final_index_name, const char *curr_index_name,
+ const char *final_rev_index_name, const char *curr_rev_index_name,
+ const char *keep_msg, const char *promisor_msg,
+ unsigned char *hash)
+{
+ const char *report = "pack";
+ struct strbuf pack_name = STRBUF_INIT;
+ struct strbuf index_name = STRBUF_INIT;
+ struct strbuf rev_index_name = STRBUF_INIT;
+ int err;
+
+ if (!from_stdin) {
+ close(input_fd);
+ } else {
+ fsync_component_or_die(FSYNC_COMPONENT_PACK, output_fd, curr_pack_name);
+ err = close(output_fd);
+ if (err)
+ die_errno(_("error while closing pack file"));
+ }
+
+ if (keep_msg)
+ write_special_file("keep", keep_msg, final_pack_name, hash,
+ &report);
+ if (promisor_msg)
+ write_special_file("promisor", promisor_msg, final_pack_name,
+ hash, NULL);
+
+ rename_tmp_packfile(&final_pack_name, curr_pack_name, &pack_name,
+ hash, "pack", from_stdin);
+ if (curr_rev_index_name)
+ rename_tmp_packfile(&final_rev_index_name, curr_rev_index_name,
+ &rev_index_name, hash, "rev", 1);
+ rename_tmp_packfile(&final_index_name, curr_index_name, &index_name,
+ hash, "idx", 1);
+
+ if (do_fsck_object) {
+ struct packed_git *p;
+ p = add_packed_git(final_index_name, strlen(final_index_name), 0);
+ if (p)
+ install_packed_git(the_repository, p);
+ }
+
+ if (!from_stdin) {
+ printf("%s\n", hash_to_hex(hash));
+ } else {
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_addf(&buf, "%s\t%s\n", report, hash_to_hex(hash));
+ write_or_die(1, buf.buf, buf.len);
+ strbuf_release(&buf);
+
+ /*
+ * Let's just mimic git-unpack-objects here and write
+ * the last part of the input buffer to stdout.
+ */
+ while (input_len) {
+ err = xwrite(1, input_buffer + input_offset, input_len);
+ if (err <= 0)
+ break;
+ input_len -= err;
+ input_offset += err;
+ }
+ }
+
+ strbuf_release(&rev_index_name);
+ strbuf_release(&index_name);
+ strbuf_release(&pack_name);
+}
+
+static int git_index_pack_config(const char *k, const char *v, void *cb)
+{
+ struct pack_idx_option *opts = cb;
+
+ if (!strcmp(k, "pack.indexversion")) {
+ opts->version = git_config_int(k, v);
+ if (opts->version > 2)
+ die(_("bad pack.indexVersion=%"PRIu32), opts->version);
+ return 0;
+ }
+ if (!strcmp(k, "pack.threads")) {
+ nr_threads = git_config_int(k, v);
+ if (nr_threads < 0)
+ die(_("invalid number of threads specified (%d)"),
+ nr_threads);
+ if (!HAVE_THREADS && nr_threads != 1) {
+ warning(_("no threads support, ignoring %s"), k);
+ nr_threads = 1;
+ }
+ return 0;
+ }
+ if (!strcmp(k, "pack.writereverseindex")) {
+ if (git_config_bool(k, v))
+ opts->flags |= WRITE_REV;
+ else
+ opts->flags &= ~WRITE_REV;
+ }
+ return git_default_config(k, v, cb);
+}
+
+static int cmp_uint32(const void *a_, const void *b_)
+{
+ uint32_t a = *((uint32_t *)a_);
+ uint32_t b = *((uint32_t *)b_);
+
+ return (a < b) ? -1 : (a != b);
+}
+
+static void read_v2_anomalous_offsets(struct packed_git *p,
+ struct pack_idx_option *opts)
+{
+ const uint32_t *idx1, *idx2;
+ uint32_t i;
+
+ /* The address of the 4-byte offset table */
+ idx1 = (((const uint32_t *)((const uint8_t *)p->index_data + p->crc_offset))
+ + (size_t)p->num_objects /* CRC32 table */
+ );
+
+ /* The address of the 8-byte offset table */
+ idx2 = idx1 + p->num_objects;
+
+ for (i = 0; i < p->num_objects; i++) {
+ uint32_t off = ntohl(idx1[i]);
+ if (!(off & 0x80000000))
+ continue;
+ off = off & 0x7fffffff;
+ check_pack_index_ptr(p, &idx2[off * 2]);
+ if (idx2[off * 2])
+ continue;
+ /*
+ * The real offset is ntohl(idx2[off * 2]) in high 4
+ * octets, and ntohl(idx2[off * 2 + 1]) in low 4
+ * octets. But idx2[off * 2] is Zero!!!
+ */
+ ALLOC_GROW(opts->anomaly, opts->anomaly_nr + 1, opts->anomaly_alloc);
+ opts->anomaly[opts->anomaly_nr++] = ntohl(idx2[off * 2 + 1]);
+ }
+
+ QSORT(opts->anomaly, opts->anomaly_nr, cmp_uint32);
+}
+
+static void read_idx_option(struct pack_idx_option *opts, const char *pack_name)
+{
+ struct packed_git *p = add_packed_git(pack_name, strlen(pack_name), 1);
+
+ if (!p)
+ die(_("Cannot open existing pack file '%s'"), pack_name);
+ if (open_pack_index(p))
+ die(_("Cannot open existing pack idx file for '%s'"), pack_name);
+
+ /* Read the attributes from the existing idx file */
+ opts->version = p->index_version;
+
+ if (opts->version == 2)
+ read_v2_anomalous_offsets(p, opts);
+
+ /*
+ * Get rid of the idx file as we do not need it anymore.
+ * NEEDSWORK: extract this bit from free_pack_by_name() in
+ * object-file.c, perhaps? It shouldn't matter very much as we
+ * know we haven't installed this pack (hence we never have
+ * read anything from it).
+ */
+ close_pack_index(p);
+ free(p);
+}
+
+static void show_pack_info(int stat_only)
+{
+ int i, baseobjects = nr_objects - nr_ref_deltas - nr_ofs_deltas;
+ unsigned long *chain_histogram = NULL;
+
+ if (deepest_delta)
+ CALLOC_ARRAY(chain_histogram, deepest_delta);
+
+ for (i = 0; i < nr_objects; i++) {
+ struct object_entry *obj = &objects[i];
+
+ if (is_delta_type(obj->type))
+ chain_histogram[obj_stat[i].delta_depth - 1]++;
+ if (stat_only)
+ continue;
+ printf("%s %-6s %"PRIuMAX" %"PRIuMAX" %"PRIuMAX,
+ oid_to_hex(&obj->idx.oid),
+ type_name(obj->real_type), (uintmax_t)obj->size,
+ (uintmax_t)(obj[1].idx.offset - obj->idx.offset),
+ (uintmax_t)obj->idx.offset);
+ if (is_delta_type(obj->type)) {
+ struct object_entry *bobj = &objects[obj_stat[i].base_object_no];
+ printf(" %u %s", obj_stat[i].delta_depth,
+ oid_to_hex(&bobj->idx.oid));
+ }
+ putchar('\n');
+ }
+
+ if (baseobjects)
+ printf_ln(Q_("non delta: %d object",
+ "non delta: %d objects",
+ baseobjects),
+ baseobjects);
+ for (i = 0; i < deepest_delta; i++) {
+ if (!chain_histogram[i])
+ continue;
+ printf_ln(Q_("chain length = %d: %lu object",
+ "chain length = %d: %lu objects",
+ chain_histogram[i]),
+ i + 1,
+ chain_histogram[i]);
+ }
+ free(chain_histogram);
+}
+
+int cmd_index_pack(int argc, const char **argv, const char *prefix)
+{
+ int i, fix_thin_pack = 0, verify = 0, stat_only = 0, rev_index;
+ const char *curr_index;
+ const char *curr_rev_index = NULL;
+ const char *index_name = NULL, *pack_name = NULL, *rev_index_name = NULL;
+ const char *keep_msg = NULL;
+ const char *promisor_msg = NULL;
+ struct strbuf index_name_buf = STRBUF_INIT;
+ struct strbuf rev_index_name_buf = STRBUF_INIT;
+ struct pack_idx_entry **idx_objects;
+ struct pack_idx_option opts;
+ unsigned char pack_hash[GIT_MAX_RAWSZ];
+ unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */
+ int report_end_of_input = 0;
+ int hash_algo = 0;
+
+ /*
+ * index-pack never needs to fetch missing objects except when
+ * REF_DELTA bases are missing (which are explicitly handled). It only
+ * accesses the repo to do hash collision checks and to check which
+ * REF_DELTA bases need to be fetched.
+ */
+ fetch_if_missing = 0;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(index_pack_usage);
+
+ read_replace_refs = 0;
+ fsck_options.walk = mark_link;
+
+ reset_pack_idx_option(&opts);
+ git_config(git_index_pack_config, &opts);
+ if (prefix && chdir(prefix))
+ die(_("Cannot come back to cwd"));
+
+ if (git_env_bool(GIT_TEST_WRITE_REV_INDEX, 0))
+ rev_index = 1;
+ else
+ rev_index = !!(opts.flags & (WRITE_REV_VERIFY | WRITE_REV));
+
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+
+ if (*arg == '-') {
+ if (!strcmp(arg, "--stdin")) {
+ from_stdin = 1;
+ } else if (!strcmp(arg, "--fix-thin")) {
+ fix_thin_pack = 1;
+ } else if (skip_to_optional_arg(arg, "--strict", &arg)) {
+ strict = 1;
+ do_fsck_object = 1;
+ fsck_set_msg_types(&fsck_options, arg);
+ } else if (!strcmp(arg, "--check-self-contained-and-connected")) {
+ strict = 1;
+ check_self_contained_and_connected = 1;
+ } else if (!strcmp(arg, "--fsck-objects")) {
+ do_fsck_object = 1;
+ } else if (!strcmp(arg, "--verify")) {
+ verify = 1;
+ } else if (!strcmp(arg, "--verify-stat")) {
+ verify = 1;
+ show_stat = 1;
+ } else if (!strcmp(arg, "--verify-stat-only")) {
+ verify = 1;
+ show_stat = 1;
+ stat_only = 1;
+ } else if (skip_to_optional_arg(arg, "--keep", &keep_msg)) {
+ ; /* nothing to do */
+ } else if (skip_to_optional_arg(arg, "--promisor", &promisor_msg)) {
+ ; /* already parsed */
+ } else if (starts_with(arg, "--threads=")) {
+ char *end;
+ nr_threads = strtoul(arg+10, &end, 0);
+ if (!arg[10] || *end || nr_threads < 0)
+ usage(index_pack_usage);
+ if (!HAVE_THREADS && nr_threads != 1) {
+ warning(_("no threads support, ignoring %s"), arg);
+ nr_threads = 1;
+ }
+ } else if (starts_with(arg, "--pack_header=")) {
+ struct pack_header *hdr;
+ char *c;
+
+ hdr = (struct pack_header *)input_buffer;
+ hdr->hdr_signature = htonl(PACK_SIGNATURE);
+ hdr->hdr_version = htonl(strtoul(arg + 14, &c, 10));
+ if (*c != ',')
+ die(_("bad %s"), arg);
+ hdr->hdr_entries = htonl(strtoul(c + 1, &c, 10));
+ if (*c)
+ die(_("bad %s"), arg);
+ input_len = sizeof(*hdr);
+ } else if (!strcmp(arg, "-v")) {
+ verbose = 1;
+ } else if (!strcmp(arg, "--progress-title")) {
+ if (progress_title || (i+1) >= argc)
+ usage(index_pack_usage);
+ progress_title = argv[++i];
+ } else if (!strcmp(arg, "--show-resolving-progress")) {
+ show_resolving_progress = 1;
+ } else if (!strcmp(arg, "--report-end-of-input")) {
+ report_end_of_input = 1;
+ } else if (!strcmp(arg, "-o")) {
+ if (index_name || (i+1) >= argc)
+ usage(index_pack_usage);
+ index_name = argv[++i];
+ } else if (starts_with(arg, "--index-version=")) {
+ char *c;
+ opts.version = strtoul(arg + 16, &c, 10);
+ if (opts.version > 2)
+ die(_("bad %s"), arg);
+ if (*c == ',')
+ opts.off32_limit = strtoul(c+1, &c, 0);
+ if (*c || opts.off32_limit & 0x80000000)
+ die(_("bad %s"), arg);
+ } else if (skip_prefix(arg, "--max-input-size=", &arg)) {
+ max_input_size = strtoumax(arg, NULL, 10);
+ } else if (skip_prefix(arg, "--object-format=", &arg)) {
+ hash_algo = hash_algo_by_name(arg);
+ if (hash_algo == GIT_HASH_UNKNOWN)
+ die(_("unknown hash algorithm '%s'"), arg);
+ repo_set_hash_algo(the_repository, hash_algo);
+ } else if (!strcmp(arg, "--rev-index")) {
+ rev_index = 1;
+ } else if (!strcmp(arg, "--no-rev-index")) {
+ rev_index = 0;
+ } else
+ usage(index_pack_usage);
+ continue;
+ }
+
+ if (pack_name)
+ usage(index_pack_usage);
+ pack_name = arg;
+ }
+
+ if (!pack_name && !from_stdin)
+ usage(index_pack_usage);
+ if (fix_thin_pack && !from_stdin)
+ die(_("the option '%s' requires '%s'"), "--fix-thin", "--stdin");
+ if (from_stdin && !startup_info->have_repository)
+ die(_("--stdin requires a git repository"));
+ if (from_stdin && hash_algo)
+ die(_("options '%s' and '%s' cannot be used together"), "--object-format", "--stdin");
+ if (!index_name && pack_name)
+ index_name = derive_filename(pack_name, "pack", "idx", &index_name_buf);
+
+ opts.flags &= ~(WRITE_REV | WRITE_REV_VERIFY);
+ if (rev_index) {
+ opts.flags |= verify ? WRITE_REV_VERIFY : WRITE_REV;
+ if (index_name)
+ rev_index_name = derive_filename(index_name,
+ "idx", "rev",
+ &rev_index_name_buf);
+ }
+
+ if (verify) {
+ if (!index_name)
+ die(_("--verify with no packfile name given"));
+ read_idx_option(&opts, index_name);
+ opts.flags |= WRITE_IDX_VERIFY | WRITE_IDX_STRICT;
+ }
+ if (strict)
+ opts.flags |= WRITE_IDX_STRICT;
+
+ if (HAVE_THREADS && !nr_threads) {
+ nr_threads = online_cpus();
+ /*
+ * Experiments show that going above 20 threads doesn't help,
+ * no matter how many cores you have. Below that, we tend to
+ * max at half the number of online_cpus(), presumably because
+ * half of those are hyperthreads rather than full cores. We'll
+ * never reduce the level below "3", though, to match a
+ * historical value that nobody complained about.
+ */
+ if (nr_threads < 4)
+ ; /* too few cores to consider capping */
+ else if (nr_threads < 6)
+ nr_threads = 3; /* historic cap */
+ else if (nr_threads < 40)
+ nr_threads /= 2;
+ else
+ nr_threads = 20; /* hard cap */
+ }
+
+ curr_pack = open_pack_file(pack_name);
+ parse_pack_header();
+ CALLOC_ARRAY(objects, st_add(nr_objects, 1));
+ if (show_stat)
+ CALLOC_ARRAY(obj_stat, st_add(nr_objects, 1));
+ CALLOC_ARRAY(ofs_deltas, nr_objects);
+ parse_pack_objects(pack_hash);
+ if (report_end_of_input)
+ write_in_full(2, "\0", 1);
+ resolve_deltas();
+ conclude_pack(fix_thin_pack, curr_pack, pack_hash);
+ free(ofs_deltas);
+ free(ref_deltas);
+ if (strict)
+ foreign_nr = check_objects();
+
+ if (show_stat)
+ show_pack_info(stat_only);
+
+ ALLOC_ARRAY(idx_objects, nr_objects);
+ for (i = 0; i < nr_objects; i++)
+ idx_objects[i] = &objects[i].idx;
+ curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash);
+ if (rev_index)
+ curr_rev_index = write_rev_file(rev_index_name, idx_objects,
+ nr_objects, pack_hash,
+ opts.flags);
+ free(idx_objects);
+
+ if (!verify)
+ final(pack_name, curr_pack,
+ index_name, curr_index,
+ rev_index_name, curr_rev_index,
+ keep_msg, promisor_msg,
+ pack_hash);
+ else
+ close(input_fd);
+
+ if (do_fsck_object && fsck_finish(&fsck_options))
+ die(_("fsck error in pack objects"));
+
+ free(opts.anomaly);
+ free(objects);
+ strbuf_release(&index_name_buf);
+ strbuf_release(&rev_index_name_buf);
+ if (!pack_name)
+ free((void *) curr_pack);
+ if (!index_name)
+ free((void *) curr_index);
+ if (!rev_index_name)
+ free((void *) curr_rev_index);
+
+ /*
+ * Let the caller know this pack is not self contained
+ */
+ if (check_self_contained_and_connected && foreign_nr)
+ return 1;
+
+ return 0;
+}
diff --git a/builtin/init-db.c b/builtin/init-db.c
new file mode 100644
index 0000000..dcaaf10
--- /dev/null
+++ b/builtin/init-db.c
@@ -0,0 +1,699 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ */
+#include "cache.h"
+#include "config.h"
+#include "refs.h"
+#include "builtin.h"
+#include "exec-cmd.h"
+#include "parse-options.h"
+#include "worktree.h"
+
+#ifndef DEFAULT_GIT_TEMPLATE_DIR
+#define DEFAULT_GIT_TEMPLATE_DIR "/usr/share/git-core/templates"
+#endif
+
+#ifdef NO_TRUSTABLE_FILEMODE
+#define TEST_FILEMODE 0
+#else
+#define TEST_FILEMODE 1
+#endif
+
+#define GIT_DEFAULT_HASH_ENVIRONMENT "GIT_DEFAULT_HASH"
+
+static int init_is_bare_repository = 0;
+static int init_shared_repository = -1;
+
+static void copy_templates_1(struct strbuf *path, struct strbuf *template_path,
+ DIR *dir)
+{
+ size_t path_baselen = path->len;
+ size_t template_baselen = template_path->len;
+ struct dirent *de;
+
+ /* Note: if ".git/hooks" file exists in the repository being
+ * re-initialized, /etc/core-git/templates/hooks/update would
+ * cause "git init" to fail here. I think this is sane but
+ * it means that the set of templates we ship by default, along
+ * with the way the namespace under .git/ is organized, should
+ * be really carefully chosen.
+ */
+ safe_create_dir(path->buf, 1);
+ while ((de = readdir(dir)) != NULL) {
+ struct stat st_git, st_template;
+ int exists = 0;
+
+ strbuf_setlen(path, path_baselen);
+ strbuf_setlen(template_path, template_baselen);
+
+ if (de->d_name[0] == '.')
+ continue;
+ strbuf_addstr(path, de->d_name);
+ strbuf_addstr(template_path, de->d_name);
+ if (lstat(path->buf, &st_git)) {
+ if (errno != ENOENT)
+ die_errno(_("cannot stat '%s'"), path->buf);
+ }
+ else
+ exists = 1;
+
+ if (lstat(template_path->buf, &st_template))
+ die_errno(_("cannot stat template '%s'"), template_path->buf);
+
+ if (S_ISDIR(st_template.st_mode)) {
+ DIR *subdir = opendir(template_path->buf);
+ if (!subdir)
+ die_errno(_("cannot opendir '%s'"), template_path->buf);
+ strbuf_addch(path, '/');
+ strbuf_addch(template_path, '/');
+ copy_templates_1(path, template_path, subdir);
+ closedir(subdir);
+ }
+ else if (exists)
+ continue;
+ else if (S_ISLNK(st_template.st_mode)) {
+ struct strbuf lnk = STRBUF_INIT;
+ if (strbuf_readlink(&lnk, template_path->buf,
+ st_template.st_size) < 0)
+ die_errno(_("cannot readlink '%s'"), template_path->buf);
+ if (symlink(lnk.buf, path->buf))
+ die_errno(_("cannot symlink '%s' '%s'"),
+ lnk.buf, path->buf);
+ strbuf_release(&lnk);
+ }
+ else if (S_ISREG(st_template.st_mode)) {
+ if (copy_file(path->buf, template_path->buf, st_template.st_mode))
+ die_errno(_("cannot copy '%s' to '%s'"),
+ template_path->buf, path->buf);
+ }
+ else
+ error(_("ignoring template %s"), template_path->buf);
+ }
+}
+
+static void copy_templates(const char *template_dir, const char *init_template_dir)
+{
+ struct strbuf path = STRBUF_INIT;
+ struct strbuf template_path = STRBUF_INIT;
+ size_t template_len;
+ struct repository_format template_format = REPOSITORY_FORMAT_INIT;
+ struct strbuf err = STRBUF_INIT;
+ DIR *dir;
+ char *to_free = NULL;
+
+ if (!template_dir)
+ template_dir = getenv(TEMPLATE_DIR_ENVIRONMENT);
+ if (!template_dir)
+ template_dir = init_template_dir;
+ if (!template_dir)
+ template_dir = to_free = system_path(DEFAULT_GIT_TEMPLATE_DIR);
+ if (!template_dir[0]) {
+ free(to_free);
+ return;
+ }
+
+ strbuf_addstr(&template_path, template_dir);
+ strbuf_complete(&template_path, '/');
+ template_len = template_path.len;
+
+ dir = opendir(template_path.buf);
+ if (!dir) {
+ warning(_("templates not found in %s"), template_dir);
+ goto free_return;
+ }
+
+ /* Make sure that template is from the correct vintage */
+ strbuf_addstr(&template_path, "config");
+ read_repository_format(&template_format, template_path.buf);
+ strbuf_setlen(&template_path, template_len);
+
+ /*
+ * No mention of version at all is OK, but anything else should be
+ * verified.
+ */
+ if (template_format.version >= 0 &&
+ verify_repository_format(&template_format, &err) < 0) {
+ warning(_("not copying templates from '%s': %s"),
+ template_dir, err.buf);
+ strbuf_release(&err);
+ goto close_free_return;
+ }
+
+ strbuf_addstr(&path, get_git_common_dir());
+ strbuf_complete(&path, '/');
+ copy_templates_1(&path, &template_path, dir);
+close_free_return:
+ closedir(dir);
+free_return:
+ free(to_free);
+ strbuf_release(&path);
+ strbuf_release(&template_path);
+ clear_repository_format(&template_format);
+}
+
+/*
+ * If the git_dir is not directly inside the working tree, then git will not
+ * find it by default, and we need to set the worktree explicitly.
+ */
+static int needs_work_tree_config(const char *git_dir, const char *work_tree)
+{
+ if (!strcmp(work_tree, "/") && !strcmp(git_dir, "/.git"))
+ return 0;
+ if (skip_prefix(git_dir, work_tree, &git_dir) &&
+ !strcmp(git_dir, "/.git"))
+ return 0;
+ return 1;
+}
+
+void initialize_repository_version(int hash_algo, int reinit)
+{
+ char repo_version_string[10];
+ int repo_version = GIT_REPO_VERSION;
+
+ if (hash_algo != GIT_HASH_SHA1)
+ repo_version = GIT_REPO_VERSION_READ;
+
+ /* This forces creation of new config file */
+ xsnprintf(repo_version_string, sizeof(repo_version_string),
+ "%d", repo_version);
+ git_config_set("core.repositoryformatversion", repo_version_string);
+
+ if (hash_algo != GIT_HASH_SHA1)
+ git_config_set("extensions.objectformat",
+ hash_algos[hash_algo].name);
+ else if (reinit)
+ git_config_set_gently("extensions.objectformat", NULL);
+}
+
+static int create_default_files(const char *template_path,
+ const char *original_git_dir,
+ const char *initial_branch,
+ const struct repository_format *fmt,
+ int quiet)
+{
+ struct stat st1;
+ struct strbuf buf = STRBUF_INIT;
+ char *path;
+ char junk[2];
+ int reinit;
+ int filemode;
+ struct strbuf err = STRBUF_INIT;
+ const char *init_template_dir = NULL;
+ const char *work_tree = get_git_work_tree();
+
+ /*
+ * First copy the templates -- we might have the default
+ * config file there, in which case we would want to read
+ * from it after installing.
+ *
+ * Before reading that config, we also need to clear out any cached
+ * values (since we've just potentially changed what's available on
+ * disk).
+ */
+ git_config_get_pathname("init.templatedir", &init_template_dir);
+ copy_templates(template_path, init_template_dir);
+ free((char *)init_template_dir);
+ git_config_clear();
+ reset_shared_repository();
+ git_config(git_default_config, NULL);
+
+ /*
+ * We must make sure command-line options continue to override any
+ * values we might have just re-read from the config.
+ */
+ is_bare_repository_cfg = init_is_bare_repository || !work_tree;
+ if (init_shared_repository != -1)
+ set_shared_repository(init_shared_repository);
+
+ /*
+ * We would have created the above under user's umask -- under
+ * shared-repository settings, we would need to fix them up.
+ */
+ if (get_shared_repository()) {
+ adjust_shared_perm(get_git_dir());
+ }
+
+ /*
+ * We need to create a "refs" dir in any case so that older
+ * versions of git can tell that this is a repository.
+ */
+ safe_create_dir(git_path("refs"), 1);
+ adjust_shared_perm(git_path("refs"));
+
+ if (refs_init_db(&err))
+ die("failed to set up refs db: %s", err.buf);
+
+ /*
+ * Point the HEAD symref to the initial branch with if HEAD does
+ * not yet exist.
+ */
+ path = git_path_buf(&buf, "HEAD");
+ reinit = (!access(path, R_OK)
+ || readlink(path, junk, sizeof(junk)-1) != -1);
+ if (!reinit) {
+ char *ref;
+
+ if (!initial_branch)
+ initial_branch = git_default_branch_name(quiet);
+
+ ref = xstrfmt("refs/heads/%s", initial_branch);
+ if (check_refname_format(ref, 0) < 0)
+ die(_("invalid initial branch name: '%s'"),
+ initial_branch);
+
+ if (create_symref("HEAD", ref, NULL) < 0)
+ exit(1);
+ free(ref);
+ }
+
+ initialize_repository_version(fmt->hash_algo, 0);
+
+ /* Check filemode trustability */
+ path = git_path_buf(&buf, "config");
+ filemode = TEST_FILEMODE;
+ if (TEST_FILEMODE && !lstat(path, &st1)) {
+ struct stat st2;
+ filemode = (!chmod(path, st1.st_mode ^ S_IXUSR) &&
+ !lstat(path, &st2) &&
+ st1.st_mode != st2.st_mode &&
+ !chmod(path, st1.st_mode));
+ if (filemode && !reinit && (st1.st_mode & S_IXUSR))
+ filemode = 0;
+ }
+ git_config_set("core.filemode", filemode ? "true" : "false");
+
+ if (is_bare_repository())
+ git_config_set("core.bare", "true");
+ else {
+ git_config_set("core.bare", "false");
+ /* allow template config file to override the default */
+ if (log_all_ref_updates == LOG_REFS_UNSET)
+ git_config_set("core.logallrefupdates", "true");
+ if (needs_work_tree_config(original_git_dir, work_tree))
+ git_config_set("core.worktree", work_tree);
+ }
+
+ if (!reinit) {
+ /* Check if symlink is supported in the work tree */
+ path = git_path_buf(&buf, "tXXXXXX");
+ if (!close(xmkstemp(path)) &&
+ !unlink(path) &&
+ !symlink("testing", path) &&
+ !lstat(path, &st1) &&
+ S_ISLNK(st1.st_mode))
+ unlink(path); /* good */
+ else
+ git_config_set("core.symlinks", "false");
+
+ /* Check if the filesystem is case-insensitive */
+ path = git_path_buf(&buf, "CoNfIg");
+ if (!access(path, F_OK))
+ git_config_set("core.ignorecase", "true");
+ probe_utf8_pathname_composition();
+ }
+
+ strbuf_release(&buf);
+ return reinit;
+}
+
+static void create_object_directory(void)
+{
+ struct strbuf path = STRBUF_INIT;
+ size_t baselen;
+
+ strbuf_addstr(&path, get_object_directory());
+ baselen = path.len;
+
+ safe_create_dir(path.buf, 1);
+
+ strbuf_setlen(&path, baselen);
+ strbuf_addstr(&path, "/pack");
+ safe_create_dir(path.buf, 1);
+
+ strbuf_setlen(&path, baselen);
+ strbuf_addstr(&path, "/info");
+ safe_create_dir(path.buf, 1);
+
+ strbuf_release(&path);
+}
+
+static void separate_git_dir(const char *git_dir, const char *git_link)
+{
+ struct stat st;
+
+ if (!stat(git_link, &st)) {
+ const char *src;
+
+ if (S_ISREG(st.st_mode))
+ src = read_gitfile(git_link);
+ else if (S_ISDIR(st.st_mode))
+ src = git_link;
+ else
+ die(_("unable to handle file type %d"), (int)st.st_mode);
+
+ if (rename(src, git_dir))
+ die_errno(_("unable to move %s to %s"), src, git_dir);
+ repair_worktrees(NULL, NULL);
+ }
+
+ write_file(git_link, "gitdir: %s", git_dir);
+}
+
+static void validate_hash_algorithm(struct repository_format *repo_fmt, int hash)
+{
+ const char *env = getenv(GIT_DEFAULT_HASH_ENVIRONMENT);
+ /*
+ * If we already have an initialized repo, don't allow the user to
+ * specify a different algorithm, as that could cause corruption.
+ * Otherwise, if the user has specified one on the command line, use it.
+ */
+ if (repo_fmt->version >= 0 && hash != GIT_HASH_UNKNOWN && hash != repo_fmt->hash_algo)
+ die(_("attempt to reinitialize repository with different hash"));
+ else if (hash != GIT_HASH_UNKNOWN)
+ repo_fmt->hash_algo = hash;
+ else if (env) {
+ int env_algo = hash_algo_by_name(env);
+ if (env_algo == GIT_HASH_UNKNOWN)
+ die(_("unknown hash algorithm '%s'"), env);
+ repo_fmt->hash_algo = env_algo;
+ }
+}
+
+int init_db(const char *git_dir, const char *real_git_dir,
+ const char *template_dir, int hash, const char *initial_branch,
+ unsigned int flags)
+{
+ int reinit;
+ int exist_ok = flags & INIT_DB_EXIST_OK;
+ char *original_git_dir = real_pathdup(git_dir, 1);
+ struct repository_format repo_fmt = REPOSITORY_FORMAT_INIT;
+
+ if (real_git_dir) {
+ struct stat st;
+
+ if (!exist_ok && !stat(git_dir, &st))
+ die(_("%s already exists"), git_dir);
+
+ if (!exist_ok && !stat(real_git_dir, &st))
+ die(_("%s already exists"), real_git_dir);
+
+ set_git_dir(real_git_dir, 1);
+ git_dir = get_git_dir();
+ separate_git_dir(git_dir, original_git_dir);
+ }
+ else {
+ set_git_dir(git_dir, 1);
+ git_dir = get_git_dir();
+ }
+ startup_info->have_repository = 1;
+
+ /* Ensure `core.hidedotfiles` is processed */
+ git_config(platform_core_config, NULL);
+
+ safe_create_dir(git_dir, 0);
+
+ init_is_bare_repository = is_bare_repository();
+
+ /* Check to see if the repository version is right.
+ * Note that a newly created repository does not have
+ * config file, so this will not fail. What we are catching
+ * is an attempt to reinitialize new repository with an old tool.
+ */
+ check_repository_format(&repo_fmt);
+
+ validate_hash_algorithm(&repo_fmt, hash);
+
+ reinit = create_default_files(template_dir, original_git_dir,
+ initial_branch, &repo_fmt,
+ flags & INIT_DB_QUIET);
+ if (reinit && initial_branch)
+ warning(_("re-init: ignored --initial-branch=%s"),
+ initial_branch);
+
+ create_object_directory();
+
+ if (get_shared_repository()) {
+ char buf[10];
+ /* We do not spell "group" and such, so that
+ * the configuration can be read by older version
+ * of git. Note, we use octal numbers for new share modes,
+ * and compatibility values for PERM_GROUP and
+ * PERM_EVERYBODY.
+ */
+ if (get_shared_repository() < 0)
+ /* force to the mode value */
+ xsnprintf(buf, sizeof(buf), "0%o", -get_shared_repository());
+ else if (get_shared_repository() == PERM_GROUP)
+ xsnprintf(buf, sizeof(buf), "%d", OLD_PERM_GROUP);
+ else if (get_shared_repository() == PERM_EVERYBODY)
+ xsnprintf(buf, sizeof(buf), "%d", OLD_PERM_EVERYBODY);
+ else
+ BUG("invalid value for shared_repository");
+ git_config_set("core.sharedrepository", buf);
+ git_config_set("receive.denyNonFastforwards", "true");
+ }
+
+ if (!(flags & INIT_DB_QUIET)) {
+ int len = strlen(git_dir);
+
+ if (reinit)
+ printf(get_shared_repository()
+ ? _("Reinitialized existing shared Git repository in %s%s\n")
+ : _("Reinitialized existing Git repository in %s%s\n"),
+ git_dir, len && git_dir[len-1] != '/' ? "/" : "");
+ else
+ printf(get_shared_repository()
+ ? _("Initialized empty shared Git repository in %s%s\n")
+ : _("Initialized empty Git repository in %s%s\n"),
+ git_dir, len && git_dir[len-1] != '/' ? "/" : "");
+ }
+
+ free(original_git_dir);
+ return 0;
+}
+
+static int guess_repository_type(const char *git_dir)
+{
+ const char *slash;
+ char *cwd;
+ int cwd_is_git_dir;
+
+ /*
+ * "GIT_DIR=. git init" is always bare.
+ * "GIT_DIR=`pwd` git init" too.
+ */
+ if (!strcmp(".", git_dir))
+ return 1;
+ cwd = xgetcwd();
+ cwd_is_git_dir = !strcmp(git_dir, cwd);
+ free(cwd);
+ if (cwd_is_git_dir)
+ return 1;
+ /*
+ * "GIT_DIR=.git or GIT_DIR=something/.git is usually not.
+ */
+ if (!strcmp(git_dir, ".git"))
+ return 0;
+ slash = strrchr(git_dir, '/');
+ if (slash && !strcmp(slash, "/.git"))
+ return 0;
+
+ /*
+ * Otherwise it is often bare. At this point
+ * we are just guessing.
+ */
+ return 1;
+}
+
+static int shared_callback(const struct option *opt, const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+ *((int *) opt->value) = (arg) ? git_config_perm("arg", arg) : PERM_GROUP;
+ return 0;
+}
+
+static const char *const init_db_usage[] = {
+ N_("git init [-q | --quiet] [--bare] [--template=<template-directory>]\n"
+ " [--separate-git-dir <git-dir>] [--object-format=<format>]\n"
+ " [-b <branch-name> | --initial-branch=<branch-name>]\n"
+ " [--shared[=<permissions>]] [<directory>]"),
+ NULL
+};
+
+/*
+ * If you want to, you can share the DB area with any number of branches.
+ * That has advantages: you can save space by sharing all the SHA1 objects.
+ * On the other hand, it might just make lookup slower and messier. You
+ * be the judge. The default case is to have one DB per managed directory.
+ */
+int cmd_init_db(int argc, const char **argv, const char *prefix)
+{
+ const char *git_dir;
+ const char *real_git_dir = NULL;
+ const char *work_tree;
+ const char *template_dir = NULL;
+ unsigned int flags = 0;
+ const char *object_format = NULL;
+ const char *initial_branch = NULL;
+ int hash_algo = GIT_HASH_UNKNOWN;
+ const struct option init_db_options[] = {
+ OPT_STRING(0, "template", &template_dir, N_("template-directory"),
+ N_("directory from which templates will be used")),
+ OPT_SET_INT(0, "bare", &is_bare_repository_cfg,
+ N_("create a bare repository"), 1),
+ { OPTION_CALLBACK, 0, "shared", &init_shared_repository,
+ N_("permissions"),
+ N_("specify that the git repository is to be shared amongst several users"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG, shared_callback, 0},
+ OPT_BIT('q', "quiet", &flags, N_("be quiet"), INIT_DB_QUIET),
+ OPT_STRING(0, "separate-git-dir", &real_git_dir, N_("gitdir"),
+ N_("separate git dir from working tree")),
+ OPT_STRING('b', "initial-branch", &initial_branch, N_("name"),
+ N_("override the name of the initial branch")),
+ OPT_STRING(0, "object-format", &object_format, N_("hash"),
+ N_("specify the hash algorithm to use")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, init_db_options, init_db_usage, 0);
+
+ if (real_git_dir && is_bare_repository_cfg == 1)
+ die(_("options '%s' and '%s' cannot be used together"), "--separate-git-dir", "--bare");
+
+ if (real_git_dir && !is_absolute_path(real_git_dir))
+ real_git_dir = real_pathdup(real_git_dir, 1);
+
+ if (template_dir && *template_dir && !is_absolute_path(template_dir)) {
+ template_dir = absolute_pathdup(template_dir);
+ UNLEAK(template_dir);
+ }
+
+ if (argc == 1) {
+ int mkdir_tried = 0;
+ retry:
+ if (chdir(argv[0]) < 0) {
+ if (!mkdir_tried) {
+ int saved;
+ /*
+ * At this point we haven't read any configuration,
+ * and we know shared_repository should always be 0;
+ * but just in case we play safe.
+ */
+ saved = get_shared_repository();
+ set_shared_repository(0);
+ switch (safe_create_leading_directories_const(argv[0])) {
+ case SCLD_OK:
+ case SCLD_PERMS:
+ break;
+ case SCLD_EXISTS:
+ errno = EEXIST;
+ /* fallthru */
+ default:
+ die_errno(_("cannot mkdir %s"), argv[0]);
+ break;
+ }
+ set_shared_repository(saved);
+ if (mkdir(argv[0], 0777) < 0)
+ die_errno(_("cannot mkdir %s"), argv[0]);
+ mkdir_tried = 1;
+ goto retry;
+ }
+ die_errno(_("cannot chdir to %s"), argv[0]);
+ }
+ } else if (0 < argc) {
+ usage(init_db_usage[0]);
+ }
+ if (is_bare_repository_cfg == 1) {
+ char *cwd = xgetcwd();
+ setenv(GIT_DIR_ENVIRONMENT, cwd, argc > 0);
+ free(cwd);
+ }
+
+ if (object_format) {
+ hash_algo = hash_algo_by_name(object_format);
+ if (hash_algo == GIT_HASH_UNKNOWN)
+ die(_("unknown hash algorithm '%s'"), object_format);
+ }
+
+ if (init_shared_repository != -1)
+ set_shared_repository(init_shared_repository);
+
+ /*
+ * GIT_WORK_TREE makes sense only in conjunction with GIT_DIR
+ * without --bare. Catch the error early.
+ */
+ git_dir = xstrdup_or_null(getenv(GIT_DIR_ENVIRONMENT));
+ work_tree = xstrdup_or_null(getenv(GIT_WORK_TREE_ENVIRONMENT));
+ if ((!git_dir || is_bare_repository_cfg == 1) && work_tree)
+ die(_("%s (or --work-tree=<directory>) not allowed without "
+ "specifying %s (or --git-dir=<directory>)"),
+ GIT_WORK_TREE_ENVIRONMENT,
+ GIT_DIR_ENVIRONMENT);
+
+ /*
+ * Set up the default .git directory contents
+ */
+ if (!git_dir)
+ git_dir = DEFAULT_GIT_DIR_ENVIRONMENT;
+
+ /*
+ * When --separate-git-dir is used inside a linked worktree, take
+ * care to ensure that the common .git/ directory is relocated, not
+ * the worktree-specific .git/worktrees/<id>/ directory.
+ */
+ if (real_git_dir) {
+ int err;
+ const char *p;
+ struct strbuf sb = STRBUF_INIT;
+
+ p = read_gitfile_gently(git_dir, &err);
+ if (p && get_common_dir(&sb, p)) {
+ struct strbuf mainwt = STRBUF_INIT;
+
+ strbuf_addbuf(&mainwt, &sb);
+ strbuf_strip_suffix(&mainwt, "/.git");
+ if (chdir(mainwt.buf) < 0)
+ die_errno(_("cannot chdir to %s"), mainwt.buf);
+ strbuf_release(&mainwt);
+ git_dir = strbuf_detach(&sb, NULL);
+ }
+ strbuf_release(&sb);
+ }
+
+ if (is_bare_repository_cfg < 0)
+ is_bare_repository_cfg = guess_repository_type(git_dir);
+
+ if (!is_bare_repository_cfg) {
+ const char *git_dir_parent = strrchr(git_dir, '/');
+ if (git_dir_parent) {
+ char *rel = xstrndup(git_dir, git_dir_parent - git_dir);
+ git_work_tree_cfg = real_pathdup(rel, 1);
+ free(rel);
+ }
+ if (!git_work_tree_cfg)
+ git_work_tree_cfg = xgetcwd();
+ if (work_tree)
+ set_git_work_tree(work_tree);
+ else
+ set_git_work_tree(git_work_tree_cfg);
+ if (access(get_git_work_tree(), X_OK))
+ die_errno (_("Cannot access work tree '%s'"),
+ get_git_work_tree());
+ }
+ else {
+ if (real_git_dir)
+ die(_("--separate-git-dir incompatible with bare repository"));
+ if (work_tree)
+ set_git_work_tree(work_tree);
+ }
+
+ UNLEAK(real_git_dir);
+ UNLEAK(git_dir);
+ UNLEAK(work_tree);
+
+ flags |= INIT_DB_EXIST_OK;
+ return init_db(git_dir, real_git_dir, template_dir, hash_algo,
+ initial_branch, flags);
+}
diff --git a/builtin/interpret-trailers.c b/builtin/interpret-trailers.c
new file mode 100644
index 0000000..e58627c
--- /dev/null
+++ b/builtin/interpret-trailers.c
@@ -0,0 +1,142 @@
+/*
+ * Builtin "git interpret-trailers"
+ *
+ * Copyright (c) 2013, 2014 Christian Couder <chriscool@tuxfamily.org>
+ *
+ */
+
+#include "cache.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "string-list.h"
+#include "trailer.h"
+#include "config.h"
+
+static const char * const git_interpret_trailers_usage[] = {
+ N_("git interpret-trailers [--in-place] [--trim-empty]\n"
+ " [(--trailer <token>[(=|:)<value>])...]\n"
+ " [--parse] [<file>...]"),
+ NULL
+};
+
+static enum trailer_where where;
+static enum trailer_if_exists if_exists;
+static enum trailer_if_missing if_missing;
+
+static int option_parse_where(const struct option *opt,
+ const char *arg, int unset)
+{
+ return trailer_set_where(&where, arg);
+}
+
+static int option_parse_if_exists(const struct option *opt,
+ const char *arg, int unset)
+{
+ return trailer_set_if_exists(&if_exists, arg);
+}
+
+static int option_parse_if_missing(const struct option *opt,
+ const char *arg, int unset)
+{
+ return trailer_set_if_missing(&if_missing, arg);
+}
+
+static void new_trailers_clear(struct list_head *trailers)
+{
+ struct list_head *pos, *tmp;
+ struct new_trailer_item *item;
+
+ list_for_each_safe(pos, tmp, trailers) {
+ item = list_entry(pos, struct new_trailer_item, list);
+ list_del(pos);
+ free(item);
+ }
+}
+
+static int option_parse_trailer(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct list_head *trailers = opt->value;
+ struct new_trailer_item *item;
+
+ if (unset) {
+ new_trailers_clear(trailers);
+ return 0;
+ }
+
+ if (!arg)
+ return -1;
+
+ item = xmalloc(sizeof(*item));
+ item->text = arg;
+ item->where = where;
+ item->if_exists = if_exists;
+ item->if_missing = if_missing;
+ list_add_tail(&item->list, trailers);
+ return 0;
+}
+
+static int parse_opt_parse(const struct option *opt, const char *arg,
+ int unset)
+{
+ struct process_trailer_options *v = opt->value;
+ v->only_trailers = 1;
+ v->only_input = 1;
+ v->unfold = 1;
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ return 0;
+}
+
+int cmd_interpret_trailers(int argc, const char **argv, const char *prefix)
+{
+ struct process_trailer_options opts = PROCESS_TRAILER_OPTIONS_INIT;
+ LIST_HEAD(trailers);
+
+ struct option options[] = {
+ OPT_BOOL(0, "in-place", &opts.in_place, N_("edit files in place")),
+ OPT_BOOL(0, "trim-empty", &opts.trim_empty, N_("trim empty trailers")),
+
+ OPT_CALLBACK(0, "where", NULL, N_("action"),
+ N_("where to place the new trailer"), option_parse_where),
+ OPT_CALLBACK(0, "if-exists", NULL, N_("action"),
+ N_("action if trailer already exists"), option_parse_if_exists),
+ OPT_CALLBACK(0, "if-missing", NULL, N_("action"),
+ N_("action if trailer is missing"), option_parse_if_missing),
+
+ OPT_BOOL(0, "only-trailers", &opts.only_trailers, N_("output only the trailers")),
+ OPT_BOOL(0, "only-input", &opts.only_input, N_("do not apply config rules")),
+ OPT_BOOL(0, "unfold", &opts.unfold, N_("join whitespace-continued values")),
+ OPT_CALLBACK_F(0, "parse", &opts, NULL, N_("set parsing options"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, parse_opt_parse),
+ OPT_BOOL(0, "no-divider", &opts.no_divider, N_("do not treat --- specially")),
+ OPT_CALLBACK(0, "trailer", &trailers, N_("trailer"),
+ N_("trailer(s) to add"), option_parse_trailer),
+ OPT_END()
+ };
+
+ git_config(git_default_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_interpret_trailers_usage, 0);
+
+ if (opts.only_input && !list_empty(&trailers))
+ usage_msg_opt(
+ _("--trailer with --only-input does not make sense"),
+ git_interpret_trailers_usage,
+ options);
+
+ if (argc) {
+ int i;
+ for (i = 0; i < argc; i++)
+ process_trailers(argv[i], &opts, &trailers);
+ } else {
+ if (opts.in_place)
+ die(_("no input file given for in-place editing"));
+ process_trailers(NULL, &opts, &trailers);
+ }
+
+ new_trailers_clear(&trailers);
+
+ return 0;
+}
diff --git a/builtin/log.c b/builtin/log.c
new file mode 100644
index 0000000..89447a5
--- /dev/null
+++ b/builtin/log.c
@@ -0,0 +1,2502 @@
+/*
+ * Builtin "git log" and related commands (show, whatchanged)
+ *
+ * (C) Copyright 2006 Linus Torvalds
+ * 2006 Junio Hamano
+ */
+#include "cache.h"
+#include "config.h"
+#include "refs.h"
+#include "object-store.h"
+#include "color.h"
+#include "commit.h"
+#include "diff.h"
+#include "diff-merges.h"
+#include "revision.h"
+#include "log-tree.h"
+#include "builtin.h"
+#include "tag.h"
+#include "reflog-walk.h"
+#include "patch-ids.h"
+#include "run-command.h"
+#include "shortlog.h"
+#include "remote.h"
+#include "string-list.h"
+#include "parse-options.h"
+#include "line-log.h"
+#include "branch.h"
+#include "streaming.h"
+#include "version.h"
+#include "mailmap.h"
+#include "gpg-interface.h"
+#include "progress.h"
+#include "commit-slab.h"
+#include "repository.h"
+#include "commit-reach.h"
+#include "range-diff.h"
+#include "tmp-objdir.h"
+
+#define MAIL_DEFAULT_WRAP 72
+#define COVER_FROM_AUTO_MAX_SUBJECT_LEN 100
+#define FORMAT_PATCH_NAME_MAX_DEFAULT 64
+
+/* Set a default date-time format for git log ("log.date" config variable) */
+static const char *default_date_mode = NULL;
+
+static int default_abbrev_commit;
+static int default_show_root = 1;
+static int default_follow;
+static int default_show_signature;
+static int default_encode_email_headers = 1;
+static int decoration_style;
+static int decoration_given;
+static int use_mailmap_config = 1;
+static unsigned int force_in_body_from;
+static const char *fmt_patch_subject_prefix = "PATCH";
+static int fmt_patch_name_max = FORMAT_PATCH_NAME_MAX_DEFAULT;
+static const char *fmt_pretty;
+
+static const char * const builtin_log_usage[] = {
+ N_("git log [<options>] [<revision-range>] [[--] <path>...]"),
+ N_("git show [<options>] <object>..."),
+ NULL
+};
+
+struct line_opt_callback_data {
+ struct rev_info *rev;
+ const char *prefix;
+ struct string_list args;
+};
+
+static int session_is_interactive(void)
+{
+ return isatty(1) || pager_in_use();
+}
+
+static int auto_decoration_style(void)
+{
+ return session_is_interactive() ? DECORATE_SHORT_REFS : 0;
+}
+
+static int parse_decoration_style(const char *value)
+{
+ switch (git_parse_maybe_bool(value)) {
+ case 1:
+ return DECORATE_SHORT_REFS;
+ case 0:
+ return 0;
+ default:
+ break;
+ }
+ if (!strcmp(value, "full"))
+ return DECORATE_FULL_REFS;
+ else if (!strcmp(value, "short"))
+ return DECORATE_SHORT_REFS;
+ else if (!strcmp(value, "auto"))
+ return auto_decoration_style();
+ /*
+ * Please update _git_log() in git-completion.bash when you
+ * add new decoration styles.
+ */
+ return -1;
+}
+
+static int use_default_decoration_filter = 1;
+static struct string_list decorate_refs_exclude = STRING_LIST_INIT_NODUP;
+static struct string_list decorate_refs_exclude_config = STRING_LIST_INIT_NODUP;
+static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP;
+
+static int clear_decorations_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ string_list_clear(&decorate_refs_include, 0);
+ string_list_clear(&decorate_refs_exclude, 0);
+ use_default_decoration_filter = 0;
+ return 0;
+}
+
+static int decorate_callback(const struct option *opt, const char *arg, int unset)
+{
+ if (unset)
+ decoration_style = 0;
+ else if (arg)
+ decoration_style = parse_decoration_style(arg);
+ else
+ decoration_style = DECORATE_SHORT_REFS;
+
+ if (decoration_style < 0)
+ die(_("invalid --decorate option: %s"), arg);
+
+ decoration_given = 1;
+
+ return 0;
+}
+
+static int log_line_range_callback(const struct option *option, const char *arg, int unset)
+{
+ struct line_opt_callback_data *data = option->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ if (!arg)
+ return -1;
+
+ data->rev->line_level_traverse = 1;
+ string_list_append(&data->args, arg);
+
+ return 0;
+}
+
+static void init_log_defaults(void)
+{
+ init_diff_ui_defaults();
+
+ decoration_style = auto_decoration_style();
+}
+
+static void cmd_log_init_defaults(struct rev_info *rev)
+{
+ if (fmt_pretty)
+ get_commit_format(fmt_pretty, rev);
+ if (default_follow)
+ rev->diffopt.flags.default_follow_renames = 1;
+ rev->verbose_header = 1;
+ rev->diffopt.flags.recursive = 1;
+ rev->diffopt.stat_width = -1; /* use full terminal width */
+ rev->diffopt.stat_graph_width = -1; /* respect statGraphWidth config */
+ rev->abbrev_commit = default_abbrev_commit;
+ rev->show_root_diff = default_show_root;
+ rev->subject_prefix = fmt_patch_subject_prefix;
+ rev->patch_name_max = fmt_patch_name_max;
+ rev->show_signature = default_show_signature;
+ rev->encode_email_headers = default_encode_email_headers;
+ rev->diffopt.flags.allow_textconv = 1;
+
+ if (default_date_mode)
+ parse_date_format(default_date_mode, &rev->date_mode);
+}
+
+static void set_default_decoration_filter(struct decoration_filter *decoration_filter)
+{
+ int i;
+ char *value = NULL;
+ struct string_list *include = decoration_filter->include_ref_pattern;
+ const struct string_list *config_exclude =
+ git_config_get_value_multi("log.excludeDecoration");
+
+ if (config_exclude) {
+ struct string_list_item *item;
+ for_each_string_list_item(item, config_exclude)
+ string_list_append(decoration_filter->exclude_ref_config_pattern,
+ item->string);
+ }
+
+ /*
+ * By default, decorate_all is disabled. Enable it if
+ * log.initialDecorationSet=all. Don't ever disable it by config,
+ * since the command-line takes precedent.
+ */
+ if (use_default_decoration_filter &&
+ !git_config_get_string("log.initialdecorationset", &value) &&
+ !strcmp("all", value))
+ use_default_decoration_filter = 0;
+ free(value);
+
+ if (!use_default_decoration_filter ||
+ decoration_filter->exclude_ref_pattern->nr ||
+ decoration_filter->include_ref_pattern->nr ||
+ decoration_filter->exclude_ref_config_pattern->nr)
+ return;
+
+ /*
+ * No command-line or config options were given, so
+ * populate with sensible defaults.
+ */
+ for (i = 0; i < ARRAY_SIZE(ref_namespace); i++) {
+ if (!ref_namespace[i].decoration)
+ continue;
+
+ string_list_append(include, ref_namespace[i].ref);
+ }
+}
+
+static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
+ struct rev_info *rev, struct setup_revision_opt *opt)
+{
+ struct userformat_want w;
+ int quiet = 0, source = 0, mailmap;
+ static struct line_opt_callback_data line_cb = {NULL, NULL, STRING_LIST_INIT_DUP};
+ struct decoration_filter decoration_filter = {
+ .exclude_ref_pattern = &decorate_refs_exclude,
+ .include_ref_pattern = &decorate_refs_include,
+ .exclude_ref_config_pattern = &decorate_refs_exclude_config,
+ };
+ static struct revision_sources revision_sources;
+
+ const struct option builtin_log_options[] = {
+ OPT__QUIET(&quiet, N_("suppress diff output")),
+ OPT_BOOL(0, "source", &source, N_("show source")),
+ OPT_BOOL(0, "use-mailmap", &mailmap, N_("use mail map file")),
+ OPT_ALIAS(0, "mailmap", "use-mailmap"),
+ OPT_CALLBACK_F(0, "clear-decorations", NULL, NULL,
+ N_("clear all previously-defined decoration filters"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG,
+ clear_decorations_callback),
+ OPT_STRING_LIST(0, "decorate-refs", &decorate_refs_include,
+ N_("pattern"), N_("only decorate refs that match <pattern>")),
+ OPT_STRING_LIST(0, "decorate-refs-exclude", &decorate_refs_exclude,
+ N_("pattern"), N_("do not decorate refs that match <pattern>")),
+ OPT_CALLBACK_F(0, "decorate", NULL, NULL, N_("decorate options"),
+ PARSE_OPT_OPTARG, decorate_callback),
+ OPT_CALLBACK('L', NULL, &line_cb, "range:file",
+ N_("trace the evolution of line range <start>,<end> or function :<funcname> in <file>"),
+ log_line_range_callback),
+ OPT_END()
+ };
+
+ line_cb.rev = rev;
+ line_cb.prefix = prefix;
+
+ mailmap = use_mailmap_config;
+ argc = parse_options(argc, argv, prefix,
+ builtin_log_options, builtin_log_usage,
+ PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT |
+ PARSE_OPT_KEEP_DASHDASH);
+
+ if (quiet)
+ rev->diffopt.output_format |= DIFF_FORMAT_NO_OUTPUT;
+ argc = setup_revisions(argc, argv, rev, opt);
+
+ /* Any arguments at this point are not recognized */
+ if (argc > 1)
+ die(_("unrecognized argument: %s"), argv[1]);
+
+ if (rev->line_level_traverse && rev->prune_data.nr)
+ die(_("-L<range>:<file> cannot be used with pathspec"));
+
+ memset(&w, 0, sizeof(w));
+ userformat_find_requirements(NULL, &w);
+
+ if (!rev->show_notes_given && (!rev->pretty_given || w.notes))
+ rev->show_notes = 1;
+ if (rev->show_notes)
+ load_display_notes(&rev->notes_opt);
+
+ if ((rev->diffopt.pickaxe_opts & DIFF_PICKAXE_KINDS_MASK) ||
+ rev->diffopt.filter || rev->diffopt.flags.follow_renames)
+ rev->always_show_header = 0;
+
+ if (source || w.source) {
+ init_revision_sources(&revision_sources);
+ rev->sources = &revision_sources;
+ }
+
+ if (mailmap) {
+ rev->mailmap = xmalloc(sizeof(struct string_list));
+ string_list_init_nodup(rev->mailmap);
+ read_mailmap(rev->mailmap);
+ }
+
+ if (rev->pretty_given && rev->commit_format == CMIT_FMT_RAW) {
+ /*
+ * "log --pretty=raw" is special; ignore UI oriented
+ * configuration variables such as decoration.
+ */
+ if (!decoration_given)
+ decoration_style = 0;
+ if (!rev->abbrev_commit_given)
+ rev->abbrev_commit = 0;
+ }
+
+ if (rev->commit_format == CMIT_FMT_USERFORMAT) {
+ if (!w.decorate) {
+ /*
+ * Disable decoration loading if the format will not
+ * show them anyway.
+ */
+ decoration_style = 0;
+ } else if (!decoration_style) {
+ /*
+ * If we are going to show them, make sure we do load
+ * them here, but taking care not to override a
+ * specific style set by config or --decorate.
+ */
+ decoration_style = DECORATE_SHORT_REFS;
+ }
+ }
+
+ if (decoration_style || rev->simplify_by_decoration) {
+ set_default_decoration_filter(&decoration_filter);
+
+ if (decoration_style)
+ rev->show_decorations = 1;
+
+ load_ref_decorations(&decoration_filter, decoration_style);
+ }
+
+ if (rev->line_level_traverse)
+ line_log_init(rev, line_cb.prefix, &line_cb.args);
+
+ setup_pager();
+}
+
+static void cmd_log_init(int argc, const char **argv, const char *prefix,
+ struct rev_info *rev, struct setup_revision_opt *opt)
+{
+ cmd_log_init_defaults(rev);
+ cmd_log_init_finish(argc, argv, prefix, rev, opt);
+}
+
+static int cmd_log_deinit(int ret, struct rev_info *rev)
+{
+ release_revisions(rev);
+ return ret;
+}
+
+/*
+ * This gives a rough estimate for how many commits we
+ * will print out in the list.
+ */
+static int estimate_commit_count(struct commit_list *list)
+{
+ int n = 0;
+
+ while (list) {
+ struct commit *commit = list->item;
+ unsigned int flags = commit->object.flags;
+ list = list->next;
+ if (!(flags & (TREESAME | UNINTERESTING)))
+ n++;
+ }
+ return n;
+}
+
+static void show_early_header(struct rev_info *rev, const char *stage, int nr)
+{
+ if (rev->shown_one) {
+ rev->shown_one = 0;
+ if (rev->commit_format != CMIT_FMT_ONELINE)
+ putchar(rev->diffopt.line_termination);
+ }
+ fprintf(rev->diffopt.file, _("Final output: %d %s\n"), nr, stage);
+}
+
+static struct itimerval early_output_timer;
+
+static void log_show_early(struct rev_info *revs, struct commit_list *list)
+{
+ int i = revs->early_output;
+ int show_header = 1;
+ int no_free = revs->diffopt.no_free;
+
+ revs->diffopt.no_free = 0;
+ sort_in_topological_order(&list, revs->sort_order);
+ while (list && i) {
+ struct commit *commit = list->item;
+ switch (simplify_commit(revs, commit)) {
+ case commit_show:
+ if (show_header) {
+ int n = estimate_commit_count(list);
+ show_early_header(revs, "incomplete", n);
+ show_header = 0;
+ }
+ log_tree_commit(revs, commit);
+ i--;
+ break;
+ case commit_ignore:
+ break;
+ case commit_error:
+ revs->diffopt.no_free = no_free;
+ diff_free(&revs->diffopt);
+ return;
+ }
+ list = list->next;
+ }
+
+ /* Did we already get enough commits for the early output? */
+ if (!i) {
+ revs->diffopt.no_free = 0;
+ diff_free(&revs->diffopt);
+ return;
+ }
+
+ /*
+ * ..if no, then repeat it twice a second until we
+ * do.
+ *
+ * NOTE! We don't use "it_interval", because if the
+ * reader isn't listening, we want our output to be
+ * throttled by the writing, and not have the timer
+ * trigger every second even if we're blocked on a
+ * reader!
+ */
+ early_output_timer.it_value.tv_sec = 0;
+ early_output_timer.it_value.tv_usec = 500000;
+ setitimer(ITIMER_REAL, &early_output_timer, NULL);
+}
+
+static void early_output(int signal)
+{
+ show_early_output = log_show_early;
+}
+
+static void setup_early_output(void)
+{
+ struct sigaction sa;
+
+ /*
+ * Set up the signal handler, minimally intrusively:
+ * we only set a single volatile integer word (not
+ * using sigatomic_t - trying to avoid unnecessary
+ * system dependencies and headers), and using
+ * SA_RESTART.
+ */
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = early_output;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART;
+ sigaction(SIGALRM, &sa, NULL);
+
+ /*
+ * If we can get the whole output in less than a
+ * tenth of a second, don't even bother doing the
+ * early-output thing..
+ *
+ * This is a one-time-only trigger.
+ */
+ early_output_timer.it_value.tv_sec = 0;
+ early_output_timer.it_value.tv_usec = 100000;
+ setitimer(ITIMER_REAL, &early_output_timer, NULL);
+}
+
+static void finish_early_output(struct rev_info *rev)
+{
+ int n = estimate_commit_count(rev->commits);
+ signal(SIGALRM, SIG_IGN);
+ show_early_header(rev, "done", n);
+}
+
+static int cmd_log_walk_no_free(struct rev_info *rev)
+{
+ struct commit *commit;
+ int saved_nrl = 0;
+ int saved_dcctc = 0;
+
+ if (rev->remerge_diff) {
+ rev->remerge_objdir = tmp_objdir_create("remerge-diff");
+ if (!rev->remerge_objdir)
+ die(_("unable to create temporary object directory"));
+ tmp_objdir_replace_primary_odb(rev->remerge_objdir, 1);
+ }
+
+ if (rev->early_output)
+ setup_early_output();
+
+ if (prepare_revision_walk(rev))
+ die(_("revision walk setup failed"));
+
+ if (rev->early_output)
+ finish_early_output(rev);
+
+ /*
+ * For --check and --exit-code, the exit code is based on CHECK_FAILED
+ * and HAS_CHANGES being accumulated in rev->diffopt, so be careful to
+ * retain that state information if replacing rev->diffopt in this loop
+ */
+ while ((commit = get_revision(rev)) != NULL) {
+ if (!log_tree_commit(rev, commit) && rev->max_count >= 0)
+ /*
+ * We decremented max_count in get_revision,
+ * but we didn't actually show the commit.
+ */
+ rev->max_count++;
+ if (!rev->reflog_info) {
+ /*
+ * We may show a given commit multiple times when
+ * walking the reflogs.
+ */
+ free_commit_buffer(the_repository->parsed_objects,
+ commit);
+ free_commit_list(commit->parents);
+ commit->parents = NULL;
+ }
+ if (saved_nrl < rev->diffopt.needed_rename_limit)
+ saved_nrl = rev->diffopt.needed_rename_limit;
+ if (rev->diffopt.degraded_cc_to_c)
+ saved_dcctc = 1;
+ }
+ rev->diffopt.degraded_cc_to_c = saved_dcctc;
+ rev->diffopt.needed_rename_limit = saved_nrl;
+
+ if (rev->remerge_diff) {
+ tmp_objdir_destroy(rev->remerge_objdir);
+ rev->remerge_objdir = NULL;
+ }
+
+ if (rev->diffopt.output_format & DIFF_FORMAT_CHECKDIFF &&
+ rev->diffopt.flags.check_failed) {
+ return 02;
+ }
+ return diff_result_code(&rev->diffopt, 0);
+}
+
+static int cmd_log_walk(struct rev_info *rev)
+{
+ int retval;
+
+ rev->diffopt.no_free = 1;
+ retval = cmd_log_walk_no_free(rev);
+ rev->diffopt.no_free = 0;
+ diff_free(&rev->diffopt);
+ return retval;
+}
+
+static int git_log_config(const char *var, const char *value, void *cb)
+{
+ const char *slot_name;
+
+ if (!strcmp(var, "format.pretty"))
+ return git_config_string(&fmt_pretty, var, value);
+ if (!strcmp(var, "format.subjectprefix"))
+ return git_config_string(&fmt_patch_subject_prefix, var, value);
+ if (!strcmp(var, "format.filenamemaxlength")) {
+ fmt_patch_name_max = git_config_int(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "format.encodeemailheaders")) {
+ default_encode_email_headers = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "log.abbrevcommit")) {
+ default_abbrev_commit = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "log.date"))
+ return git_config_string(&default_date_mode, var, value);
+ if (!strcmp(var, "log.decorate")) {
+ decoration_style = parse_decoration_style(value);
+ if (decoration_style < 0)
+ decoration_style = 0; /* maybe warn? */
+ return 0;
+ }
+ if (!strcmp(var, "log.diffmerges"))
+ return diff_merges_config(value);
+ if (!strcmp(var, "log.showroot")) {
+ default_show_root = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "log.follow")) {
+ default_follow = git_config_bool(var, value);
+ return 0;
+ }
+ if (skip_prefix(var, "color.decorate.", &slot_name))
+ return parse_decorate_color_config(var, slot_name, value);
+ if (!strcmp(var, "log.mailmap")) {
+ use_mailmap_config = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "log.showsignature")) {
+ default_show_signature = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (git_gpg_config(var, value, cb) < 0)
+ return -1;
+ return git_diff_ui_config(var, value, cb);
+}
+
+int cmd_whatchanged(int argc, const char **argv, const char *prefix)
+{
+ struct rev_info rev;
+ struct setup_revision_opt opt;
+
+ init_log_defaults();
+ git_config(git_log_config, NULL);
+
+ repo_init_revisions(the_repository, &rev, prefix);
+ git_config(grep_config, &rev.grep_filter);
+
+ rev.diff = 1;
+ rev.simplify_history = 0;
+ memset(&opt, 0, sizeof(opt));
+ opt.def = "HEAD";
+ opt.revarg_opt = REVARG_COMMITTISH;
+ cmd_log_init(argc, argv, prefix, &rev, &opt);
+ if (!rev.diffopt.output_format)
+ rev.diffopt.output_format = DIFF_FORMAT_RAW;
+ return cmd_log_deinit(cmd_log_walk(&rev), &rev);
+}
+
+static void show_tagger(const char *buf, struct rev_info *rev)
+{
+ struct strbuf out = STRBUF_INIT;
+ struct pretty_print_context pp = {0};
+
+ pp.fmt = rev->commit_format;
+ pp.date_mode = rev->date_mode;
+ pp_user_info(&pp, "Tagger", &out, buf, get_log_output_encoding());
+ fprintf(rev->diffopt.file, "%s", out.buf);
+ strbuf_release(&out);
+}
+
+static int show_blob_object(const struct object_id *oid, struct rev_info *rev, const char *obj_name)
+{
+ struct object_id oidc;
+ struct object_context obj_context;
+ char *buf;
+ unsigned long size;
+
+ fflush(rev->diffopt.file);
+ if (!rev->diffopt.flags.textconv_set_via_cmdline ||
+ !rev->diffopt.flags.allow_textconv)
+ return stream_blob_to_fd(1, oid, NULL, 0);
+
+ if (get_oid_with_context(the_repository, obj_name,
+ GET_OID_RECORD_PATH,
+ &oidc, &obj_context))
+ die(_("not a valid object name %s"), obj_name);
+ if (!obj_context.path ||
+ !textconv_object(the_repository, obj_context.path,
+ obj_context.mode, &oidc, 1, &buf, &size)) {
+ free(obj_context.path);
+ return stream_blob_to_fd(1, oid, NULL, 0);
+ }
+
+ if (!buf)
+ die(_("git show %s: bad file"), obj_name);
+
+ write_or_die(1, buf, size);
+ free(obj_context.path);
+ return 0;
+}
+
+static int show_tag_object(const struct object_id *oid, struct rev_info *rev)
+{
+ unsigned long size;
+ enum object_type type;
+ char *buf = read_object_file(oid, &type, &size);
+ int offset = 0;
+
+ if (!buf)
+ return error(_("could not read object %s"), oid_to_hex(oid));
+
+ assert(type == OBJ_TAG);
+ while (offset < size && buf[offset] != '\n') {
+ int new_offset = offset + 1;
+ const char *ident;
+ while (new_offset < size && buf[new_offset++] != '\n')
+ ; /* do nothing */
+ if (skip_prefix(buf + offset, "tagger ", &ident))
+ show_tagger(ident, rev);
+ offset = new_offset;
+ }
+
+ if (offset < size)
+ fwrite(buf + offset, size - offset, 1, rev->diffopt.file);
+ free(buf);
+ return 0;
+}
+
+static int show_tree_object(const struct object_id *oid UNUSED,
+ struct strbuf *base UNUSED,
+ const char *pathname, unsigned mode,
+ void *context)
+{
+ FILE *file = context;
+ fprintf(file, "%s%s\n", pathname, S_ISDIR(mode) ? "/" : "");
+ return 0;
+}
+
+static void show_setup_revisions_tweak(struct rev_info *rev,
+ struct setup_revision_opt *opt)
+{
+ if (rev->first_parent_only)
+ diff_merges_default_to_first_parent(rev);
+ else
+ diff_merges_default_to_dense_combined(rev);
+ if (!rev->diffopt.output_format)
+ rev->diffopt.output_format = DIFF_FORMAT_PATCH;
+}
+
+int cmd_show(int argc, const char **argv, const char *prefix)
+{
+ struct rev_info rev;
+ unsigned int i;
+ struct setup_revision_opt opt;
+ struct pathspec match_all;
+ int ret = 0;
+
+ init_log_defaults();
+ git_config(git_log_config, NULL);
+
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
+
+ memset(&match_all, 0, sizeof(match_all));
+ repo_init_revisions(the_repository, &rev, prefix);
+ git_config(grep_config, &rev.grep_filter);
+
+ rev.diff = 1;
+ rev.always_show_header = 1;
+ rev.no_walk = 1;
+ rev.diffopt.stat_width = -1; /* Scale to real terminal size */
+
+ memset(&opt, 0, sizeof(opt));
+ opt.def = "HEAD";
+ opt.tweak = show_setup_revisions_tweak;
+ cmd_log_init(argc, argv, prefix, &rev, &opt);
+
+ if (!rev.no_walk)
+ return cmd_log_deinit(cmd_log_walk(&rev), &rev);
+
+ rev.diffopt.no_free = 1;
+ for (i = 0; i < rev.pending.nr && !ret; i++) {
+ struct object *o = rev.pending.objects[i].item;
+ const char *name = rev.pending.objects[i].name;
+ switch (o->type) {
+ case OBJ_BLOB:
+ ret = show_blob_object(&o->oid, &rev, name);
+ break;
+ case OBJ_TAG: {
+ struct tag *t = (struct tag *)o;
+ struct object_id *oid = get_tagged_oid(t);
+
+ if (rev.shown_one)
+ putchar('\n');
+ fprintf(rev.diffopt.file, "%stag %s%s\n",
+ diff_get_color_opt(&rev.diffopt, DIFF_COMMIT),
+ t->tag,
+ diff_get_color_opt(&rev.diffopt, DIFF_RESET));
+ ret = show_tag_object(&o->oid, &rev);
+ rev.shown_one = 1;
+ if (ret)
+ break;
+ o = parse_object(the_repository, oid);
+ if (!o)
+ ret = error(_("could not read object %s"),
+ oid_to_hex(oid));
+ rev.pending.objects[i].item = o;
+ i--;
+ break;
+ }
+ case OBJ_TREE:
+ if (rev.shown_one)
+ putchar('\n');
+ fprintf(rev.diffopt.file, "%stree %s%s\n\n",
+ diff_get_color_opt(&rev.diffopt, DIFF_COMMIT),
+ name,
+ diff_get_color_opt(&rev.diffopt, DIFF_RESET));
+ read_tree(the_repository, (struct tree *)o,
+ &match_all, show_tree_object,
+ rev.diffopt.file);
+ rev.shown_one = 1;
+ break;
+ case OBJ_COMMIT:
+ {
+ struct object_array old;
+ struct object_array blank = OBJECT_ARRAY_INIT;
+
+ memcpy(&old, &rev.pending, sizeof(old));
+ memcpy(&rev.pending, &blank, sizeof(rev.pending));
+
+ add_object_array(o, name, &rev.pending);
+ ret = cmd_log_walk_no_free(&rev);
+
+ /*
+ * No need for
+ * object_array_clear(&pending). It was
+ * cleared already in prepare_revision_walk()
+ */
+ memcpy(&rev.pending, &old, sizeof(rev.pending));
+ break;
+ }
+ default:
+ ret = error(_("unknown type: %d"), o->type);
+ }
+ }
+
+ rev.diffopt.no_free = 0;
+ diff_free(&rev.diffopt);
+
+ return cmd_log_deinit(ret, &rev);
+}
+
+/*
+ * This is equivalent to "git log -g --abbrev-commit --pretty=oneline"
+ */
+int cmd_log_reflog(int argc, const char **argv, const char *prefix)
+{
+ struct rev_info rev;
+ struct setup_revision_opt opt;
+
+ init_log_defaults();
+ git_config(git_log_config, NULL);
+
+ repo_init_revisions(the_repository, &rev, prefix);
+ init_reflog_walk(&rev.reflog_info);
+ git_config(grep_config, &rev.grep_filter);
+
+ rev.verbose_header = 1;
+ memset(&opt, 0, sizeof(opt));
+ opt.def = "HEAD";
+ cmd_log_init_defaults(&rev);
+ rev.abbrev_commit = 1;
+ rev.commit_format = CMIT_FMT_ONELINE;
+ rev.use_terminator = 1;
+ rev.always_show_header = 1;
+ cmd_log_init_finish(argc, argv, prefix, &rev, &opt);
+
+ return cmd_log_deinit(cmd_log_walk(&rev), &rev);
+}
+
+static void log_setup_revisions_tweak(struct rev_info *rev,
+ struct setup_revision_opt *opt)
+{
+ if (rev->diffopt.flags.default_follow_renames &&
+ rev->prune_data.nr == 1)
+ rev->diffopt.flags.follow_renames = 1;
+
+ if (rev->first_parent_only)
+ diff_merges_default_to_first_parent(rev);
+}
+
+int cmd_log(int argc, const char **argv, const char *prefix)
+{
+ struct rev_info rev;
+ struct setup_revision_opt opt;
+
+ init_log_defaults();
+ git_config(git_log_config, NULL);
+
+ repo_init_revisions(the_repository, &rev, prefix);
+ git_config(grep_config, &rev.grep_filter);
+
+ rev.always_show_header = 1;
+ memset(&opt, 0, sizeof(opt));
+ opt.def = "HEAD";
+ opt.revarg_opt = REVARG_COMMITTISH;
+ opt.tweak = log_setup_revisions_tweak;
+ cmd_log_init(argc, argv, prefix, &rev, &opt);
+ return cmd_log_deinit(cmd_log_walk(&rev), &rev);
+}
+
+/* format-patch */
+
+static const char *fmt_patch_suffix = ".patch";
+static int numbered = 0;
+static int auto_number = 1;
+
+static char *default_attach = NULL;
+
+static struct string_list extra_hdr = STRING_LIST_INIT_NODUP;
+static struct string_list extra_to = STRING_LIST_INIT_NODUP;
+static struct string_list extra_cc = STRING_LIST_INIT_NODUP;
+
+static void add_header(const char *value)
+{
+ struct string_list_item *item;
+ int len = strlen(value);
+ while (len && value[len - 1] == '\n')
+ len--;
+
+ if (!strncasecmp(value, "to: ", 4)) {
+ item = string_list_append(&extra_to, value + 4);
+ len -= 4;
+ } else if (!strncasecmp(value, "cc: ", 4)) {
+ item = string_list_append(&extra_cc, value + 4);
+ len -= 4;
+ } else {
+ item = string_list_append(&extra_hdr, value);
+ }
+
+ item->string[len] = '\0';
+}
+
+enum cover_setting {
+ COVER_UNSET,
+ COVER_OFF,
+ COVER_ON,
+ COVER_AUTO
+};
+
+enum thread_level {
+ THREAD_UNSET,
+ THREAD_SHALLOW,
+ THREAD_DEEP
+};
+
+enum cover_from_description {
+ COVER_FROM_NONE,
+ COVER_FROM_MESSAGE,
+ COVER_FROM_SUBJECT,
+ COVER_FROM_AUTO
+};
+
+enum auto_base_setting {
+ AUTO_BASE_NEVER,
+ AUTO_BASE_ALWAYS,
+ AUTO_BASE_WHEN_ABLE
+};
+
+static enum thread_level thread;
+static int do_signoff;
+static enum auto_base_setting auto_base;
+static char *from;
+static const char *signature = git_version_string;
+static const char *signature_file;
+static enum cover_setting config_cover_letter;
+static const char *config_output_directory;
+static enum cover_from_description cover_from_description_mode = COVER_FROM_MESSAGE;
+static int show_notes;
+static struct display_notes_opt notes_opt;
+
+static enum cover_from_description parse_cover_from_description(const char *arg)
+{
+ if (!arg || !strcmp(arg, "default"))
+ return COVER_FROM_MESSAGE;
+ else if (!strcmp(arg, "none"))
+ return COVER_FROM_NONE;
+ else if (!strcmp(arg, "message"))
+ return COVER_FROM_MESSAGE;
+ else if (!strcmp(arg, "subject"))
+ return COVER_FROM_SUBJECT;
+ else if (!strcmp(arg, "auto"))
+ return COVER_FROM_AUTO;
+ else
+ die(_("%s: invalid cover from description mode"), arg);
+}
+
+static int git_format_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "format.headers")) {
+ if (!value)
+ die(_("format.headers without value"));
+ add_header(value);
+ return 0;
+ }
+ if (!strcmp(var, "format.suffix"))
+ return git_config_string(&fmt_patch_suffix, var, value);
+ if (!strcmp(var, "format.to")) {
+ if (!value)
+ return config_error_nonbool(var);
+ string_list_append(&extra_to, value);
+ return 0;
+ }
+ if (!strcmp(var, "format.cc")) {
+ if (!value)
+ return config_error_nonbool(var);
+ string_list_append(&extra_cc, value);
+ return 0;
+ }
+ if (!strcmp(var, "diff.color") || !strcmp(var, "color.diff") ||
+ !strcmp(var, "color.ui") || !strcmp(var, "diff.submodule")) {
+ return 0;
+ }
+ if (!strcmp(var, "format.numbered")) {
+ if (value && !strcasecmp(value, "auto")) {
+ auto_number = 1;
+ return 0;
+ }
+ numbered = git_config_bool(var, value);
+ auto_number = auto_number && numbered;
+ return 0;
+ }
+ if (!strcmp(var, "format.attach")) {
+ if (value && *value)
+ default_attach = xstrdup(value);
+ else
+ default_attach = xstrdup(git_version_string);
+ return 0;
+ }
+ if (!strcmp(var, "format.thread")) {
+ if (value && !strcasecmp(value, "deep")) {
+ thread = THREAD_DEEP;
+ return 0;
+ }
+ if (value && !strcasecmp(value, "shallow")) {
+ thread = THREAD_SHALLOW;
+ return 0;
+ }
+ thread = git_config_bool(var, value) ? THREAD_SHALLOW : THREAD_UNSET;
+ return 0;
+ }
+ if (!strcmp(var, "format.signoff")) {
+ do_signoff = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "format.signature"))
+ return git_config_string(&signature, var, value);
+ if (!strcmp(var, "format.signaturefile"))
+ return git_config_pathname(&signature_file, var, value);
+ if (!strcmp(var, "format.coverletter")) {
+ if (value && !strcasecmp(value, "auto")) {
+ config_cover_letter = COVER_AUTO;
+ return 0;
+ }
+ config_cover_letter = git_config_bool(var, value) ? COVER_ON : COVER_OFF;
+ return 0;
+ }
+ if (!strcmp(var, "format.outputdirectory"))
+ return git_config_string(&config_output_directory, var, value);
+ if (!strcmp(var, "format.useautobase")) {
+ if (value && !strcasecmp(value, "whenAble")) {
+ auto_base = AUTO_BASE_WHEN_ABLE;
+ return 0;
+ }
+ auto_base = git_config_bool(var, value) ? AUTO_BASE_ALWAYS : AUTO_BASE_NEVER;
+ return 0;
+ }
+ if (!strcmp(var, "format.from")) {
+ int b = git_parse_maybe_bool(value);
+ free(from);
+ if (b < 0)
+ from = xstrdup(value);
+ else if (b)
+ from = xstrdup(git_committer_info(IDENT_NO_DATE));
+ else
+ from = NULL;
+ return 0;
+ }
+ if (!strcmp(var, "format.forceinbodyfrom")) {
+ force_in_body_from = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "format.notes")) {
+ int b = git_parse_maybe_bool(value);
+ if (b < 0)
+ enable_ref_display_notes(&notes_opt, &show_notes, value);
+ else if (b)
+ enable_default_display_notes(&notes_opt, &show_notes);
+ else
+ disable_display_notes(&notes_opt, &show_notes);
+ return 0;
+ }
+ if (!strcmp(var, "format.coverfromdescription")) {
+ cover_from_description_mode = parse_cover_from_description(value);
+ return 0;
+ }
+
+ return git_log_config(var, value, cb);
+}
+
+static const char *output_directory = NULL;
+static int outdir_offset;
+
+static int open_next_file(struct commit *commit, const char *subject,
+ struct rev_info *rev, int quiet)
+{
+ struct strbuf filename = STRBUF_INIT;
+
+ if (output_directory) {
+ strbuf_addstr(&filename, output_directory);
+ strbuf_complete(&filename, '/');
+ }
+
+ if (rev->numbered_files)
+ strbuf_addf(&filename, "%d", rev->nr);
+ else if (commit)
+ fmt_output_commit(&filename, commit, rev);
+ else
+ fmt_output_subject(&filename, subject, rev);
+
+ if (!quiet)
+ printf("%s\n", filename.buf + outdir_offset);
+
+ if (!(rev->diffopt.file = fopen(filename.buf, "w"))) {
+ error_errno(_("cannot open patch file %s"), filename.buf);
+ strbuf_release(&filename);
+ return -1;
+ }
+
+ strbuf_release(&filename);
+ return 0;
+}
+
+static void get_patch_ids(struct rev_info *rev, struct patch_ids *ids)
+{
+ struct rev_info check_rev;
+ struct commit *commit, *c1, *c2;
+ struct object *o1, *o2;
+ unsigned flags1, flags2;
+
+ if (rev->pending.nr != 2)
+ die(_("need exactly one range"));
+
+ o1 = rev->pending.objects[0].item;
+ o2 = rev->pending.objects[1].item;
+ flags1 = o1->flags;
+ flags2 = o2->flags;
+ c1 = lookup_commit_reference(the_repository, &o1->oid);
+ c2 = lookup_commit_reference(the_repository, &o2->oid);
+
+ if ((flags1 & UNINTERESTING) == (flags2 & UNINTERESTING))
+ die(_("not a range"));
+
+ init_patch_ids(the_repository, ids);
+
+ /* given a range a..b get all patch ids for b..a */
+ repo_init_revisions(the_repository, &check_rev, rev->prefix);
+ check_rev.max_parents = 1;
+ o1->flags ^= UNINTERESTING;
+ o2->flags ^= UNINTERESTING;
+ add_pending_object(&check_rev, o1, "o1");
+ add_pending_object(&check_rev, o2, "o2");
+ if (prepare_revision_walk(&check_rev))
+ die(_("revision walk setup failed"));
+
+ while ((commit = get_revision(&check_rev)) != NULL) {
+ add_commit_patch_id(commit, ids);
+ }
+
+ /* reset for next revision walk */
+ clear_commit_marks(c1, SEEN | UNINTERESTING | SHOWN | ADDED);
+ clear_commit_marks(c2, SEEN | UNINTERESTING | SHOWN | ADDED);
+ o1->flags = flags1;
+ o2->flags = flags2;
+}
+
+static void gen_message_id(struct rev_info *info, char *base)
+{
+ struct strbuf buf = STRBUF_INIT;
+ strbuf_addf(&buf, "%s.%"PRItime".git.%s", base,
+ (timestamp_t) time(NULL),
+ git_committer_info(IDENT_NO_NAME|IDENT_NO_DATE|IDENT_STRICT));
+ info->message_id = strbuf_detach(&buf, NULL);
+}
+
+static void print_signature(FILE *file)
+{
+ if (!signature || !*signature)
+ return;
+
+ fprintf(file, "-- \n%s", signature);
+ if (signature[strlen(signature)-1] != '\n')
+ putc('\n', file);
+ putc('\n', file);
+}
+
+static char *find_branch_name(struct rev_info *rev)
+{
+ int i, positive = -1;
+ struct object_id branch_oid;
+ const struct object_id *tip_oid;
+ const char *ref, *v;
+ char *full_ref, *branch = NULL;
+
+ for (i = 0; i < rev->cmdline.nr; i++) {
+ if (rev->cmdline.rev[i].flags & UNINTERESTING)
+ continue;
+ if (positive < 0)
+ positive = i;
+ else
+ return NULL;
+ }
+ if (positive < 0)
+ return NULL;
+ ref = rev->cmdline.rev[positive].name;
+ tip_oid = &rev->cmdline.rev[positive].item->oid;
+ if (dwim_ref(ref, strlen(ref), &branch_oid, &full_ref, 0) &&
+ skip_prefix(full_ref, "refs/heads/", &v) &&
+ oideq(tip_oid, &branch_oid))
+ branch = xstrdup(v);
+ free(full_ref);
+ return branch;
+}
+
+static void show_diffstat(struct rev_info *rev,
+ struct commit *origin, struct commit *head)
+{
+ struct diff_options opts;
+
+ memcpy(&opts, &rev->diffopt, sizeof(opts));
+ opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
+ diff_setup_done(&opts);
+
+ diff_tree_oid(get_commit_tree_oid(origin),
+ get_commit_tree_oid(head),
+ "", &opts);
+ diffcore_std(&opts);
+ diff_flush(&opts);
+
+ fprintf(rev->diffopt.file, "\n");
+}
+
+static void prepare_cover_text(struct pretty_print_context *pp,
+ const char *branch_name,
+ struct strbuf *sb,
+ const char *encoding,
+ int need_8bit_cte)
+{
+ const char *subject = "*** SUBJECT HERE ***";
+ const char *body = "*** BLURB HERE ***";
+ struct strbuf description_sb = STRBUF_INIT;
+ struct strbuf subject_sb = STRBUF_INIT;
+
+ if (cover_from_description_mode == COVER_FROM_NONE)
+ goto do_pp;
+
+ if (branch_name && *branch_name)
+ read_branch_desc(&description_sb, branch_name);
+ if (!description_sb.len)
+ goto do_pp;
+
+ if (cover_from_description_mode == COVER_FROM_SUBJECT ||
+ cover_from_description_mode == COVER_FROM_AUTO)
+ body = format_subject(&subject_sb, description_sb.buf, " ");
+
+ if (cover_from_description_mode == COVER_FROM_MESSAGE ||
+ (cover_from_description_mode == COVER_FROM_AUTO &&
+ subject_sb.len > COVER_FROM_AUTO_MAX_SUBJECT_LEN))
+ body = description_sb.buf;
+ else
+ subject = subject_sb.buf;
+
+do_pp:
+ pp_title_line(pp, &subject, sb, encoding, need_8bit_cte);
+ pp_remainder(pp, &body, sb, 0);
+
+ strbuf_release(&description_sb);
+ strbuf_release(&subject_sb);
+}
+
+static int get_notes_refs(struct string_list_item *item, void *arg)
+{
+ strvec_pushf(arg, "--notes=%s", item->string);
+ return 0;
+}
+
+static void get_notes_args(struct strvec *arg, struct rev_info *rev)
+{
+ if (!rev->show_notes) {
+ strvec_push(arg, "--no-notes");
+ } else if (rev->notes_opt.use_default_notes > 0 ||
+ (rev->notes_opt.use_default_notes == -1 &&
+ !rev->notes_opt.extra_notes_refs.nr)) {
+ strvec_push(arg, "--notes");
+ } else {
+ for_each_string_list(&rev->notes_opt.extra_notes_refs, get_notes_refs, arg);
+ }
+}
+
+static void make_cover_letter(struct rev_info *rev, int use_separate_file,
+ struct commit *origin,
+ int nr, struct commit **list,
+ const char *branch_name,
+ int quiet)
+{
+ const char *committer;
+ struct shortlog log;
+ struct strbuf sb = STRBUF_INIT;
+ int i;
+ const char *encoding = "UTF-8";
+ int need_8bit_cte = 0;
+ struct pretty_print_context pp = {0};
+ struct commit *head = list[0];
+
+ if (!cmit_fmt_is_mail(rev->commit_format))
+ die(_("cover letter needs email format"));
+
+ committer = git_committer_info(0);
+
+ if (use_separate_file &&
+ open_next_file(NULL, rev->numbered_files ? NULL : "cover-letter", rev, quiet))
+ die(_("failed to create cover-letter file"));
+
+ log_write_email_headers(rev, head, &pp.after_subject, &need_8bit_cte, 0);
+
+ for (i = 0; !need_8bit_cte && i < nr; i++) {
+ const char *buf = get_commit_buffer(list[i], NULL);
+ if (has_non_ascii(buf))
+ need_8bit_cte = 1;
+ unuse_commit_buffer(list[i], buf);
+ }
+
+ if (!branch_name)
+ branch_name = find_branch_name(rev);
+
+ pp.fmt = CMIT_FMT_EMAIL;
+ pp.date_mode.type = DATE_RFC2822;
+ pp.rev = rev;
+ pp.print_email_subject = 1;
+ pp_user_info(&pp, NULL, &sb, committer, encoding);
+ prepare_cover_text(&pp, branch_name, &sb, encoding, need_8bit_cte);
+ fprintf(rev->diffopt.file, "%s\n", sb.buf);
+
+ strbuf_release(&sb);
+
+ shortlog_init(&log);
+ log.wrap_lines = 1;
+ log.wrap = MAIL_DEFAULT_WRAP;
+ log.in1 = 2;
+ log.in2 = 4;
+ log.file = rev->diffopt.file;
+ log.groups = SHORTLOG_GROUP_AUTHOR;
+ shortlog_finish_setup(&log);
+ for (i = 0; i < nr; i++)
+ shortlog_add_commit(&log, list[i]);
+
+ shortlog_output(&log);
+
+ /* We can only do diffstat with a unique reference point */
+ if (origin)
+ show_diffstat(rev, origin, head);
+
+ if (rev->idiff_oid1) {
+ fprintf_ln(rev->diffopt.file, "%s", rev->idiff_title);
+ show_interdiff(rev->idiff_oid1, rev->idiff_oid2, 0,
+ &rev->diffopt);
+ }
+
+ if (rev->rdiff1) {
+ /*
+ * Pass minimum required diff-options to range-diff; others
+ * can be added later if deemed desirable.
+ */
+ struct diff_options opts;
+ struct strvec other_arg = STRVEC_INIT;
+ struct range_diff_options range_diff_opts = {
+ .creation_factor = rev->creation_factor,
+ .dual_color = 1,
+ .diffopt = &opts,
+ .other_arg = &other_arg
+ };
+
+ diff_setup(&opts);
+ opts.file = rev->diffopt.file;
+ opts.use_color = rev->diffopt.use_color;
+ diff_setup_done(&opts);
+ fprintf_ln(rev->diffopt.file, "%s", rev->rdiff_title);
+ get_notes_args(&other_arg, rev);
+ show_range_diff(rev->rdiff1, rev->rdiff2, &range_diff_opts);
+ strvec_clear(&other_arg);
+ }
+}
+
+static const char *clean_message_id(const char *msg_id)
+{
+ char ch;
+ const char *a, *z, *m;
+
+ m = msg_id;
+ while ((ch = *m) && (isspace(ch) || (ch == '<')))
+ m++;
+ a = m;
+ z = NULL;
+ while ((ch = *m)) {
+ if (!isspace(ch) && (ch != '>'))
+ z = m;
+ m++;
+ }
+ if (!z)
+ die(_("insane in-reply-to: %s"), msg_id);
+ if (++z == m)
+ return a;
+ return xmemdupz(a, z - a);
+}
+
+static const char *set_outdir(const char *prefix, const char *output_directory)
+{
+ if (output_directory && is_absolute_path(output_directory))
+ return output_directory;
+
+ if (!prefix || !*prefix) {
+ if (output_directory)
+ return output_directory;
+ /* The user did not explicitly ask for "./" */
+ outdir_offset = 2;
+ return "./";
+ }
+
+ outdir_offset = strlen(prefix);
+ if (!output_directory)
+ return prefix;
+
+ return prefix_filename(prefix, output_directory);
+}
+
+static const char * const builtin_format_patch_usage[] = {
+ N_("git format-patch [<options>] [<since> | <revision-range>]"),
+ NULL
+};
+
+static int keep_subject = 0;
+
+static int keep_callback(const struct option *opt, const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ ((struct rev_info *)opt->value)->total = -1;
+ keep_subject = 1;
+ return 0;
+}
+
+static int subject_prefix = 0;
+
+static int subject_prefix_callback(const struct option *opt, const char *arg,
+ int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+ subject_prefix = 1;
+ ((struct rev_info *)opt->value)->subject_prefix = arg;
+ return 0;
+}
+
+static int rfc_callback(const struct option *opt, const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ return subject_prefix_callback(opt, "RFC PATCH", unset);
+}
+
+static int numbered_cmdline_opt = 0;
+
+static int numbered_callback(const struct option *opt, const char *arg,
+ int unset)
+{
+ BUG_ON_OPT_ARG(arg);
+ *(int *)opt->value = numbered_cmdline_opt = unset ? 0 : 1;
+ if (unset)
+ auto_number = 0;
+ return 0;
+}
+
+static int no_numbered_callback(const struct option *opt, const char *arg,
+ int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+ return numbered_callback(opt, arg, 1);
+}
+
+static int output_directory_callback(const struct option *opt, const char *arg,
+ int unset)
+{
+ const char **dir = (const char **)opt->value;
+ BUG_ON_OPT_NEG(unset);
+ if (*dir)
+ die(_("two output directories?"));
+ *dir = arg;
+ return 0;
+}
+
+static int thread_callback(const struct option *opt, const char *arg, int unset)
+{
+ enum thread_level *thread = (enum thread_level *)opt->value;
+ if (unset)
+ *thread = THREAD_UNSET;
+ else if (!arg || !strcmp(arg, "shallow"))
+ *thread = THREAD_SHALLOW;
+ else if (!strcmp(arg, "deep"))
+ *thread = THREAD_DEEP;
+ /*
+ * Please update _git_formatpatch() in git-completion.bash
+ * when you add new options.
+ */
+ else
+ return 1;
+ return 0;
+}
+
+static int attach_callback(const struct option *opt, const char *arg, int unset)
+{
+ struct rev_info *rev = (struct rev_info *)opt->value;
+ if (unset)
+ rev->mime_boundary = NULL;
+ else if (arg)
+ rev->mime_boundary = arg;
+ else
+ rev->mime_boundary = git_version_string;
+ rev->no_inline = unset ? 0 : 1;
+ return 0;
+}
+
+static int inline_callback(const struct option *opt, const char *arg, int unset)
+{
+ struct rev_info *rev = (struct rev_info *)opt->value;
+ if (unset)
+ rev->mime_boundary = NULL;
+ else if (arg)
+ rev->mime_boundary = arg;
+ else
+ rev->mime_boundary = git_version_string;
+ rev->no_inline = 0;
+ return 0;
+}
+
+static int header_callback(const struct option *opt, const char *arg, int unset)
+{
+ if (unset) {
+ string_list_clear(&extra_hdr, 0);
+ string_list_clear(&extra_to, 0);
+ string_list_clear(&extra_cc, 0);
+ } else {
+ add_header(arg);
+ }
+ return 0;
+}
+
+static int to_callback(const struct option *opt, const char *arg, int unset)
+{
+ if (unset)
+ string_list_clear(&extra_to, 0);
+ else
+ string_list_append(&extra_to, arg);
+ return 0;
+}
+
+static int cc_callback(const struct option *opt, const char *arg, int unset)
+{
+ if (unset)
+ string_list_clear(&extra_cc, 0);
+ else
+ string_list_append(&extra_cc, arg);
+ return 0;
+}
+
+static int from_callback(const struct option *opt, const char *arg, int unset)
+{
+ char **from = opt->value;
+
+ free(*from);
+
+ if (unset)
+ *from = NULL;
+ else if (arg)
+ *from = xstrdup(arg);
+ else
+ *from = xstrdup(git_committer_info(IDENT_NO_DATE));
+ return 0;
+}
+
+static int base_callback(const struct option *opt, const char *arg, int unset)
+{
+ const char **base_commit = opt->value;
+
+ if (unset) {
+ auto_base = AUTO_BASE_NEVER;
+ *base_commit = NULL;
+ } else if (!strcmp(arg, "auto")) {
+ auto_base = AUTO_BASE_ALWAYS;
+ *base_commit = NULL;
+ } else {
+ auto_base = AUTO_BASE_NEVER;
+ *base_commit = arg;
+ }
+ return 0;
+}
+
+struct base_tree_info {
+ struct object_id base_commit;
+ int nr_patch_id, alloc_patch_id;
+ struct object_id *patch_id;
+};
+
+static struct commit *get_base_commit(const char *base_commit,
+ struct commit **list,
+ int total)
+{
+ struct commit *base = NULL;
+ struct commit **rev;
+ int i = 0, rev_nr = 0, auto_select, die_on_failure;
+
+ switch (auto_base) {
+ case AUTO_BASE_NEVER:
+ if (base_commit) {
+ auto_select = 0;
+ die_on_failure = 1;
+ } else {
+ /* no base information is requested */
+ return NULL;
+ }
+ break;
+ case AUTO_BASE_ALWAYS:
+ case AUTO_BASE_WHEN_ABLE:
+ if (base_commit) {
+ BUG("requested automatic base selection but a commit was provided");
+ } else {
+ auto_select = 1;
+ die_on_failure = auto_base == AUTO_BASE_ALWAYS;
+ }
+ break;
+ default:
+ BUG("unexpected automatic base selection method");
+ }
+
+ if (!auto_select) {
+ base = lookup_commit_reference_by_name(base_commit);
+ if (!base)
+ die(_("unknown commit %s"), base_commit);
+ } else {
+ struct branch *curr_branch = branch_get(NULL);
+ const char *upstream = branch_get_upstream(curr_branch, NULL);
+ if (upstream) {
+ struct commit_list *base_list;
+ struct commit *commit;
+ struct object_id oid;
+
+ if (get_oid(upstream, &oid)) {
+ if (die_on_failure)
+ die(_("failed to resolve '%s' as a valid ref"), upstream);
+ else
+ return NULL;
+ }
+ commit = lookup_commit_or_die(&oid, "upstream base");
+ base_list = get_merge_bases_many(commit, total, list);
+ /* There should be one and only one merge base. */
+ if (!base_list || base_list->next) {
+ if (die_on_failure) {
+ die(_("could not find exact merge base"));
+ } else {
+ free_commit_list(base_list);
+ return NULL;
+ }
+ }
+ base = base_list->item;
+ free_commit_list(base_list);
+ } else {
+ if (die_on_failure)
+ die(_("failed to get upstream, if you want to record base commit automatically,\n"
+ "please use git branch --set-upstream-to to track a remote branch.\n"
+ "Or you could specify base commit by --base=<base-commit-id> manually"));
+ else
+ return NULL;
+ }
+ }
+
+ ALLOC_ARRAY(rev, total);
+ for (i = 0; i < total; i++)
+ rev[i] = list[i];
+
+ rev_nr = total;
+ /*
+ * Get merge base through pair-wise computations
+ * and store it in rev[0].
+ */
+ while (rev_nr > 1) {
+ for (i = 0; i < rev_nr / 2; i++) {
+ struct commit_list *merge_base;
+ merge_base = get_merge_bases(rev[2 * i], rev[2 * i + 1]);
+ if (!merge_base || merge_base->next) {
+ if (die_on_failure) {
+ die(_("failed to find exact merge base"));
+ } else {
+ free(rev);
+ return NULL;
+ }
+ }
+
+ rev[i] = merge_base->item;
+ }
+
+ if (rev_nr % 2)
+ rev[i] = rev[2 * i];
+ rev_nr = DIV_ROUND_UP(rev_nr, 2);
+ }
+
+ if (!in_merge_bases(base, rev[0])) {
+ if (die_on_failure) {
+ die(_("base commit should be the ancestor of revision list"));
+ } else {
+ free(rev);
+ return NULL;
+ }
+ }
+
+ for (i = 0; i < total; i++) {
+ if (base == list[i]) {
+ if (die_on_failure) {
+ die(_("base commit shouldn't be in revision list"));
+ } else {
+ free(rev);
+ return NULL;
+ }
+ }
+ }
+
+ free(rev);
+ return base;
+}
+
+define_commit_slab(commit_base, int);
+
+static void prepare_bases(struct base_tree_info *bases,
+ struct commit *base,
+ struct commit **list,
+ int total)
+{
+ struct commit *commit;
+ struct rev_info revs;
+ struct diff_options diffopt;
+ struct commit_base commit_base;
+ int i;
+
+ if (!base)
+ return;
+
+ init_commit_base(&commit_base);
+ repo_diff_setup(the_repository, &diffopt);
+ diffopt.flags.recursive = 1;
+ diff_setup_done(&diffopt);
+
+ oidcpy(&bases->base_commit, &base->object.oid);
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ revs.max_parents = 1;
+ revs.topo_order = 1;
+ for (i = 0; i < total; i++) {
+ list[i]->object.flags &= ~UNINTERESTING;
+ add_pending_object(&revs, &list[i]->object, "rev_list");
+ *commit_base_at(&commit_base, list[i]) = 1;
+ }
+ base->object.flags |= UNINTERESTING;
+ add_pending_object(&revs, &base->object, "base");
+
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed"));
+ /*
+ * Traverse the commits list, get prerequisite patch ids
+ * and stuff them in bases structure.
+ */
+ while ((commit = get_revision(&revs)) != NULL) {
+ struct object_id oid;
+ struct object_id *patch_id;
+ if (*commit_base_at(&commit_base, commit))
+ continue;
+ if (commit_patch_id(commit, &diffopt, &oid, 0))
+ die(_("cannot get patch id"));
+ ALLOC_GROW(bases->patch_id, bases->nr_patch_id + 1, bases->alloc_patch_id);
+ patch_id = bases->patch_id + bases->nr_patch_id;
+ oidcpy(patch_id, &oid);
+ bases->nr_patch_id++;
+ }
+ clear_commit_base(&commit_base);
+}
+
+static void print_bases(struct base_tree_info *bases, FILE *file)
+{
+ int i;
+
+ /* Only do this once, either for the cover or for the first one */
+ if (is_null_oid(&bases->base_commit))
+ return;
+
+ /* Show the base commit */
+ fprintf(file, "\nbase-commit: %s\n", oid_to_hex(&bases->base_commit));
+
+ /* Show the prerequisite patches */
+ for (i = bases->nr_patch_id - 1; i >= 0; i--)
+ fprintf(file, "prerequisite-patch-id: %s\n", oid_to_hex(&bases->patch_id[i]));
+
+ free(bases->patch_id);
+ bases->nr_patch_id = 0;
+ bases->alloc_patch_id = 0;
+ oidclr(&bases->base_commit);
+}
+
+static const char *diff_title(struct strbuf *sb,
+ const char *reroll_count,
+ const char *generic,
+ const char *rerolled)
+{
+ int v;
+
+ /* RFC may be v0, so allow -v1 to diff against v0 */
+ if (reroll_count && !strtol_i(reroll_count, 10, &v) &&
+ v >= 1)
+ strbuf_addf(sb, rerolled, v - 1);
+ else
+ strbuf_addstr(sb, generic);
+ return sb->buf;
+}
+
+static void infer_range_diff_ranges(struct strbuf *r1,
+ struct strbuf *r2,
+ const char *prev,
+ struct commit *origin,
+ struct commit *head)
+{
+ const char *head_oid = oid_to_hex(&head->object.oid);
+ int prev_is_range = is_range_diff_range(prev);
+
+ if (prev_is_range)
+ strbuf_addstr(r1, prev);
+ else
+ strbuf_addf(r1, "%s..%s", head_oid, prev);
+
+ if (origin)
+ strbuf_addf(r2, "%s..%s", oid_to_hex(&origin->object.oid), head_oid);
+ else if (prev_is_range)
+ die(_("failed to infer range-diff origin of current series"));
+ else {
+ warning(_("using '%s' as range-diff origin of current series"), prev);
+ strbuf_addf(r2, "%s..%s", prev, head_oid);
+ }
+}
+
+int cmd_format_patch(int argc, const char **argv, const char *prefix)
+{
+ struct commit *commit;
+ struct commit **list = NULL;
+ struct rev_info rev;
+ char *to_free = NULL;
+ struct setup_revision_opt s_r_opt;
+ int nr = 0, total, i;
+ int use_stdout = 0;
+ int start_number = -1;
+ int just_numbers = 0;
+ int ignore_if_in_upstream = 0;
+ int cover_letter = -1;
+ int boundary_count = 0;
+ int no_binary_diff = 0;
+ int zero_commit = 0;
+ struct commit *origin = NULL;
+ const char *in_reply_to = NULL;
+ struct patch_ids ids;
+ struct strbuf buf = STRBUF_INIT;
+ int use_patch_format = 0;
+ int quiet = 0;
+ const char *reroll_count = NULL;
+ char *cover_from_description_arg = NULL;
+ char *branch_name = NULL;
+ char *base_commit = NULL;
+ struct base_tree_info bases;
+ struct commit *base;
+ int show_progress = 0;
+ struct progress *progress = NULL;
+ struct oid_array idiff_prev = OID_ARRAY_INIT;
+ struct strbuf idiff_title = STRBUF_INIT;
+ const char *rdiff_prev = NULL;
+ struct strbuf rdiff1 = STRBUF_INIT;
+ struct strbuf rdiff2 = STRBUF_INIT;
+ struct strbuf rdiff_title = STRBUF_INIT;
+ int creation_factor = -1;
+
+ const struct option builtin_format_patch_options[] = {
+ OPT_CALLBACK_F('n', "numbered", &numbered, NULL,
+ N_("use [PATCH n/m] even with a single patch"),
+ PARSE_OPT_NOARG, numbered_callback),
+ OPT_CALLBACK_F('N', "no-numbered", &numbered, NULL,
+ N_("use [PATCH] even with multiple patches"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, no_numbered_callback),
+ OPT_BOOL('s', "signoff", &do_signoff, N_("add a Signed-off-by trailer")),
+ OPT_BOOL(0, "stdout", &use_stdout,
+ N_("print patches to standard out")),
+ OPT_BOOL(0, "cover-letter", &cover_letter,
+ N_("generate a cover letter")),
+ OPT_BOOL(0, "numbered-files", &just_numbers,
+ N_("use simple number sequence for output file names")),
+ OPT_STRING(0, "suffix", &fmt_patch_suffix, N_("sfx"),
+ N_("use <sfx> instead of '.patch'")),
+ OPT_INTEGER(0, "start-number", &start_number,
+ N_("start numbering patches at <n> instead of 1")),
+ OPT_STRING('v', "reroll-count", &reroll_count, N_("reroll-count"),
+ N_("mark the series as Nth re-roll")),
+ OPT_INTEGER(0, "filename-max-length", &fmt_patch_name_max,
+ N_("max length of output filename")),
+ OPT_CALLBACK_F(0, "rfc", &rev, NULL,
+ N_("use [RFC PATCH] instead of [PATCH]"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, rfc_callback),
+ OPT_STRING(0, "cover-from-description", &cover_from_description_arg,
+ N_("cover-from-description-mode"),
+ N_("generate parts of a cover letter based on a branch's description")),
+ OPT_CALLBACK_F(0, "subject-prefix", &rev, N_("prefix"),
+ N_("use [<prefix>] instead of [PATCH]"),
+ PARSE_OPT_NONEG, subject_prefix_callback),
+ OPT_CALLBACK_F('o', "output-directory", &output_directory,
+ N_("dir"), N_("store resulting files in <dir>"),
+ PARSE_OPT_NONEG, output_directory_callback),
+ OPT_CALLBACK_F('k', "keep-subject", &rev, NULL,
+ N_("don't strip/add [PATCH]"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, keep_callback),
+ OPT_BOOL(0, "no-binary", &no_binary_diff,
+ N_("don't output binary diffs")),
+ OPT_BOOL(0, "zero-commit", &zero_commit,
+ N_("output all-zero hash in From header")),
+ OPT_BOOL(0, "ignore-if-in-upstream", &ignore_if_in_upstream,
+ N_("don't include a patch matching a commit upstream")),
+ OPT_SET_INT_F('p', "no-stat", &use_patch_format,
+ N_("show patch format instead of default (patch + stat)"),
+ 1, PARSE_OPT_NONEG),
+ OPT_GROUP(N_("Messaging")),
+ OPT_CALLBACK(0, "add-header", NULL, N_("header"),
+ N_("add email header"), header_callback),
+ OPT_CALLBACK(0, "to", NULL, N_("email"), N_("add To: header"), to_callback),
+ OPT_CALLBACK(0, "cc", NULL, N_("email"), N_("add Cc: header"), cc_callback),
+ OPT_CALLBACK_F(0, "from", &from, N_("ident"),
+ N_("set From address to <ident> (or committer ident if absent)"),
+ PARSE_OPT_OPTARG, from_callback),
+ OPT_STRING(0, "in-reply-to", &in_reply_to, N_("message-id"),
+ N_("make first mail a reply to <message-id>")),
+ OPT_CALLBACK_F(0, "attach", &rev, N_("boundary"),
+ N_("attach the patch"), PARSE_OPT_OPTARG,
+ attach_callback),
+ OPT_CALLBACK_F(0, "inline", &rev, N_("boundary"),
+ N_("inline the patch"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG,
+ inline_callback),
+ OPT_CALLBACK_F(0, "thread", &thread, N_("style"),
+ N_("enable message threading, styles: shallow, deep"),
+ PARSE_OPT_OPTARG, thread_callback),
+ OPT_STRING(0, "signature", &signature, N_("signature"),
+ N_("add a signature")),
+ OPT_CALLBACK_F(0, "base", &base_commit, N_("base-commit"),
+ N_("add prerequisite tree info to the patch series"),
+ 0, base_callback),
+ OPT_FILENAME(0, "signature-file", &signature_file,
+ N_("add a signature from a file")),
+ OPT__QUIET(&quiet, N_("don't print the patch filenames")),
+ OPT_BOOL(0, "progress", &show_progress,
+ N_("show progress while generating patches")),
+ OPT_CALLBACK(0, "interdiff", &idiff_prev, N_("rev"),
+ N_("show changes against <rev> in cover letter or single patch"),
+ parse_opt_object_name),
+ OPT_STRING(0, "range-diff", &rdiff_prev, N_("refspec"),
+ N_("show changes against <refspec> in cover letter or single patch")),
+ OPT_INTEGER(0, "creation-factor", &creation_factor,
+ N_("percentage by which creation is weighted")),
+ OPT_BOOL(0, "force-in-body-from", &force_in_body_from,
+ N_("show in-body From: even if identical to the e-mail header")),
+ OPT_END()
+ };
+
+ extra_hdr.strdup_strings = 1;
+ extra_to.strdup_strings = 1;
+ extra_cc.strdup_strings = 1;
+
+ init_log_defaults();
+ init_display_notes(&notes_opt);
+ git_config(git_format_config, NULL);
+ repo_init_revisions(the_repository, &rev, prefix);
+ git_config(grep_config, &rev.grep_filter);
+
+ rev.show_notes = show_notes;
+ memcpy(&rev.notes_opt, &notes_opt, sizeof(notes_opt));
+ rev.commit_format = CMIT_FMT_EMAIL;
+ rev.encode_email_headers = default_encode_email_headers;
+ rev.expand_tabs_in_log_default = 0;
+ rev.verbose_header = 1;
+ rev.diff = 1;
+ rev.max_parents = 1;
+ rev.diffopt.flags.recursive = 1;
+ rev.diffopt.no_free = 1;
+ rev.subject_prefix = fmt_patch_subject_prefix;
+ memset(&s_r_opt, 0, sizeof(s_r_opt));
+ s_r_opt.def = "HEAD";
+ s_r_opt.revarg_opt = REVARG_COMMITTISH;
+
+ if (default_attach) {
+ rev.mime_boundary = default_attach;
+ rev.no_inline = 1;
+ }
+
+ /*
+ * Parse the arguments before setup_revisions(), or something
+ * like "git format-patch -o a123 HEAD^.." may fail; a123 is
+ * possibly a valid SHA1.
+ */
+ argc = parse_options(argc, argv, prefix, builtin_format_patch_options,
+ builtin_format_patch_usage,
+ PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT |
+ PARSE_OPT_KEEP_DASHDASH);
+
+ rev.force_in_body_from = force_in_body_from;
+
+ /* Make sure "0000-$sub.patch" gives non-negative length for $sub */
+ if (fmt_patch_name_max <= strlen("0000-") + strlen(fmt_patch_suffix))
+ fmt_patch_name_max = strlen("0000-") + strlen(fmt_patch_suffix);
+
+ if (cover_from_description_arg)
+ cover_from_description_mode = parse_cover_from_description(cover_from_description_arg);
+
+ if (reroll_count) {
+ struct strbuf sprefix = STRBUF_INIT;
+
+ strbuf_addf(&sprefix, "%s v%s",
+ rev.subject_prefix, reroll_count);
+ rev.reroll_count = reroll_count;
+ rev.subject_prefix = strbuf_detach(&sprefix, NULL);
+ }
+
+ for (i = 0; i < extra_hdr.nr; i++) {
+ strbuf_addstr(&buf, extra_hdr.items[i].string);
+ strbuf_addch(&buf, '\n');
+ }
+
+ if (extra_to.nr)
+ strbuf_addstr(&buf, "To: ");
+ for (i = 0; i < extra_to.nr; i++) {
+ if (i)
+ strbuf_addstr(&buf, " ");
+ strbuf_addstr(&buf, extra_to.items[i].string);
+ if (i + 1 < extra_to.nr)
+ strbuf_addch(&buf, ',');
+ strbuf_addch(&buf, '\n');
+ }
+
+ if (extra_cc.nr)
+ strbuf_addstr(&buf, "Cc: ");
+ for (i = 0; i < extra_cc.nr; i++) {
+ if (i)
+ strbuf_addstr(&buf, " ");
+ strbuf_addstr(&buf, extra_cc.items[i].string);
+ if (i + 1 < extra_cc.nr)
+ strbuf_addch(&buf, ',');
+ strbuf_addch(&buf, '\n');
+ }
+
+ rev.extra_headers = to_free = strbuf_detach(&buf, NULL);
+
+ if (from) {
+ if (split_ident_line(&rev.from_ident, from, strlen(from)))
+ die(_("invalid ident line: %s"), from);
+ }
+
+ if (start_number < 0)
+ start_number = 1;
+
+ /*
+ * If numbered is set solely due to format.numbered in config,
+ * and it would conflict with --keep-subject (-k) from the
+ * command line, reset "numbered".
+ */
+ if (numbered && keep_subject && !numbered_cmdline_opt)
+ numbered = 0;
+
+ if (numbered && keep_subject)
+ die(_("options '%s' and '%s' cannot be used together"), "-n", "-k");
+ if (keep_subject && subject_prefix)
+ die(_("options '%s' and '%s' cannot be used together"), "--subject-prefix/--rfc", "-k");
+ rev.preserve_subject = keep_subject;
+
+ argc = setup_revisions(argc, argv, &rev, &s_r_opt);
+ if (argc > 1)
+ die(_("unrecognized argument: %s"), argv[1]);
+
+ if (rev.diffopt.output_format & DIFF_FORMAT_NAME)
+ die(_("--name-only does not make sense"));
+ if (rev.diffopt.output_format & DIFF_FORMAT_NAME_STATUS)
+ die(_("--name-status does not make sense"));
+ if (rev.diffopt.output_format & DIFF_FORMAT_CHECKDIFF)
+ die(_("--check does not make sense"));
+ if (rev.remerge_diff)
+ die(_("--remerge-diff does not make sense"));
+
+ if (!use_patch_format &&
+ (!rev.diffopt.output_format ||
+ rev.diffopt.output_format == DIFF_FORMAT_PATCH))
+ rev.diffopt.output_format = DIFF_FORMAT_DIFFSTAT | DIFF_FORMAT_SUMMARY;
+ if (!rev.diffopt.stat_width)
+ rev.diffopt.stat_width = MAIL_DEFAULT_WRAP;
+
+ /* Always generate a patch */
+ rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
+
+ rev.zero_commit = zero_commit;
+ rev.patch_name_max = fmt_patch_name_max;
+
+ if (!rev.diffopt.flags.text && !no_binary_diff)
+ rev.diffopt.flags.binary = 1;
+
+ if (rev.show_notes)
+ load_display_notes(&rev.notes_opt);
+
+ die_for_incompatible_opt3(use_stdout, "--stdout",
+ rev.diffopt.close_file, "--output",
+ !!output_directory, "--output-directory");
+
+ if (use_stdout) {
+ setup_pager();
+ } else if (!rev.diffopt.close_file) {
+ int saved;
+
+ if (!output_directory)
+ output_directory = config_output_directory;
+ output_directory = set_outdir(prefix, output_directory);
+
+ if (rev.diffopt.use_color != GIT_COLOR_ALWAYS)
+ rev.diffopt.use_color = GIT_COLOR_NEVER;
+ /*
+ * We consider <outdir> as 'outside of gitdir', therefore avoid
+ * applying adjust_shared_perm in s-c-l-d.
+ */
+ saved = get_shared_repository();
+ set_shared_repository(0);
+ switch (safe_create_leading_directories_const(output_directory)) {
+ case SCLD_OK:
+ case SCLD_EXISTS:
+ break;
+ default:
+ die(_("could not create leading directories "
+ "of '%s'"), output_directory);
+ }
+ set_shared_repository(saved);
+ if (mkdir(output_directory, 0777) < 0 && errno != EEXIST)
+ die_errno(_("could not create directory '%s'"),
+ output_directory);
+ }
+
+ if (rev.pending.nr == 1) {
+ int check_head = 0;
+
+ if (rev.max_count < 0 && !rev.show_root_diff) {
+ /*
+ * This is traditional behaviour of "git format-patch
+ * origin" that prepares what the origin side still
+ * does not have.
+ */
+ rev.pending.objects[0].item->flags |= UNINTERESTING;
+ add_head_to_pending(&rev);
+ check_head = 1;
+ }
+ /*
+ * Otherwise, it is "format-patch -22 HEAD", and/or
+ * "format-patch --root HEAD". The user wants
+ * get_revision() to do the usual traversal.
+ */
+
+ if (!strcmp(rev.pending.objects[0].name, "HEAD"))
+ check_head = 1;
+
+ if (check_head) {
+ const char *ref, *v;
+ ref = resolve_ref_unsafe("HEAD", RESOLVE_REF_READING,
+ NULL, NULL);
+ if (ref && skip_prefix(ref, "refs/heads/", &v))
+ branch_name = xstrdup(v);
+ else
+ branch_name = xstrdup(""); /* no branch */
+ }
+ }
+
+ /*
+ * We cannot move this anywhere earlier because we do want to
+ * know if --root was given explicitly from the command line.
+ */
+ rev.show_root_diff = 1;
+
+ if (ignore_if_in_upstream) {
+ /* Don't say anything if head and upstream are the same. */
+ if (rev.pending.nr == 2) {
+ struct object_array_entry *o = rev.pending.objects;
+ if (oideq(&o[0].item->oid, &o[1].item->oid))
+ goto done;
+ }
+ get_patch_ids(&rev, &ids);
+ }
+
+ if (prepare_revision_walk(&rev))
+ die(_("revision walk setup failed"));
+ rev.boundary = 1;
+ while ((commit = get_revision(&rev)) != NULL) {
+ if (commit->object.flags & BOUNDARY) {
+ boundary_count++;
+ origin = (boundary_count == 1) ? commit : NULL;
+ continue;
+ }
+
+ if (ignore_if_in_upstream && has_commit_patch_id(commit, &ids))
+ continue;
+
+ nr++;
+ REALLOC_ARRAY(list, nr);
+ list[nr - 1] = commit;
+ }
+ if (nr == 0)
+ /* nothing to do */
+ goto done;
+ total = nr;
+ if (cover_letter == -1) {
+ if (config_cover_letter == COVER_AUTO)
+ cover_letter = (total > 1);
+ else
+ cover_letter = (config_cover_letter == COVER_ON);
+ }
+ if (!keep_subject && auto_number && (total > 1 || cover_letter))
+ numbered = 1;
+ if (numbered)
+ rev.total = total + start_number - 1;
+
+ if (idiff_prev.nr) {
+ if (!cover_letter && total != 1)
+ die(_("--interdiff requires --cover-letter or single patch"));
+ rev.idiff_oid1 = &idiff_prev.oid[idiff_prev.nr - 1];
+ rev.idiff_oid2 = get_commit_tree_oid(list[0]);
+ rev.idiff_title = diff_title(&idiff_title, reroll_count,
+ _("Interdiff:"),
+ _("Interdiff against v%d:"));
+ }
+
+ if (creation_factor < 0)
+ creation_factor = RANGE_DIFF_CREATION_FACTOR_DEFAULT;
+ else if (!rdiff_prev)
+ die(_("the option '%s' requires '%s'"), "--creation-factor", "--range-diff");
+
+ if (rdiff_prev) {
+ if (!cover_letter && total != 1)
+ die(_("--range-diff requires --cover-letter or single patch"));
+
+ infer_range_diff_ranges(&rdiff1, &rdiff2, rdiff_prev,
+ origin, list[0]);
+ rev.rdiff1 = rdiff1.buf;
+ rev.rdiff2 = rdiff2.buf;
+ rev.creation_factor = creation_factor;
+ rev.rdiff_title = diff_title(&rdiff_title, reroll_count,
+ _("Range-diff:"),
+ _("Range-diff against v%d:"));
+ }
+
+ if (!signature) {
+ ; /* --no-signature inhibits all signatures */
+ } else if (signature && signature != git_version_string) {
+ ; /* non-default signature already set */
+ } else if (signature_file) {
+ struct strbuf buf = STRBUF_INIT;
+
+ if (strbuf_read_file(&buf, signature_file, 128) < 0)
+ die_errno(_("unable to read signature file '%s'"), signature_file);
+ signature = strbuf_detach(&buf, NULL);
+ }
+
+ memset(&bases, 0, sizeof(bases));
+ base = get_base_commit(base_commit, list, nr);
+ if (base) {
+ reset_revision_walk();
+ clear_object_flags(UNINTERESTING);
+ prepare_bases(&bases, base, list, nr);
+ }
+
+ if (in_reply_to || thread || cover_letter) {
+ rev.ref_message_ids = xmalloc(sizeof(*rev.ref_message_ids));
+ string_list_init_nodup(rev.ref_message_ids);
+ }
+ if (in_reply_to) {
+ const char *msgid = clean_message_id(in_reply_to);
+ string_list_append(rev.ref_message_ids, msgid);
+ }
+ rev.numbered_files = just_numbers;
+ rev.patch_suffix = fmt_patch_suffix;
+ if (cover_letter) {
+ if (thread)
+ gen_message_id(&rev, "cover");
+ make_cover_letter(&rev, !!output_directory,
+ origin, nr, list, branch_name, quiet);
+ print_bases(&bases, rev.diffopt.file);
+ print_signature(rev.diffopt.file);
+ total++;
+ start_number--;
+ /* interdiff/range-diff in cover-letter; omit from patches */
+ rev.idiff_oid1 = NULL;
+ rev.rdiff1 = NULL;
+ }
+ rev.add_signoff = do_signoff;
+
+ if (show_progress)
+ progress = start_delayed_progress(_("Generating patches"), total);
+ while (0 <= --nr) {
+ int shown;
+ display_progress(progress, total - nr);
+ commit = list[nr];
+ rev.nr = total - nr + (start_number - 1);
+ /* Make the second and subsequent mails replies to the first */
+ if (thread) {
+ /* Have we already had a message ID? */
+ if (rev.message_id) {
+ /*
+ * For deep threading: make every mail
+ * a reply to the previous one, no
+ * matter what other options are set.
+ *
+ * For shallow threading:
+ *
+ * Without --cover-letter and
+ * --in-reply-to, make every mail a
+ * reply to the one before.
+ *
+ * With --in-reply-to but no
+ * --cover-letter, make every mail a
+ * reply to the <reply-to>.
+ *
+ * With --cover-letter, make every
+ * mail but the cover letter a reply
+ * to the cover letter. The cover
+ * letter is a reply to the
+ * --in-reply-to, if specified.
+ */
+ if (thread == THREAD_SHALLOW
+ && rev.ref_message_ids->nr > 0
+ && (!cover_letter || rev.nr > 1))
+ free(rev.message_id);
+ else
+ string_list_append(rev.ref_message_ids,
+ rev.message_id);
+ }
+ gen_message_id(&rev, oid_to_hex(&commit->object.oid));
+ }
+
+ if (output_directory &&
+ open_next_file(rev.numbered_files ? NULL : commit, NULL, &rev, quiet))
+ die(_("failed to create output files"));
+ shown = log_tree_commit(&rev, commit);
+ free_commit_buffer(the_repository->parsed_objects,
+ commit);
+
+ /* We put one extra blank line between formatted
+ * patches and this flag is used by log-tree code
+ * to see if it needs to emit a LF before showing
+ * the log; when using one file per patch, we do
+ * not want the extra blank line.
+ */
+ if (output_directory)
+ rev.shown_one = 0;
+ if (shown) {
+ print_bases(&bases, rev.diffopt.file);
+ if (rev.mime_boundary)
+ fprintf(rev.diffopt.file, "\n--%s%s--\n\n\n",
+ mime_boundary_leader,
+ rev.mime_boundary);
+ else
+ print_signature(rev.diffopt.file);
+ }
+ if (output_directory)
+ fclose(rev.diffopt.file);
+ }
+ stop_progress(&progress);
+ free(list);
+ free(branch_name);
+ string_list_clear(&extra_to, 0);
+ string_list_clear(&extra_cc, 0);
+ string_list_clear(&extra_hdr, 0);
+ if (ignore_if_in_upstream)
+ free_patch_ids(&ids);
+
+done:
+ oid_array_clear(&idiff_prev);
+ strbuf_release(&idiff_title);
+ strbuf_release(&rdiff1);
+ strbuf_release(&rdiff2);
+ strbuf_release(&rdiff_title);
+ free(to_free);
+ if (rev.ref_message_ids)
+ string_list_clear(rev.ref_message_ids, 0);
+ free(rev.ref_message_ids);
+ return cmd_log_deinit(0, &rev);
+}
+
+static int add_pending_commit(const char *arg, struct rev_info *revs, int flags)
+{
+ struct object_id oid;
+ if (get_oid(arg, &oid) == 0) {
+ struct commit *commit = lookup_commit_reference(the_repository,
+ &oid);
+ if (commit) {
+ commit->object.flags |= flags;
+ add_pending_object(revs, &commit->object, arg);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static const char * const cherry_usage[] = {
+ N_("git cherry [-v] [<upstream> [<head> [<limit>]]]"),
+ NULL
+};
+
+static void print_commit(char sign, struct commit *commit, int verbose,
+ int abbrev, FILE *file)
+{
+ if (!verbose) {
+ fprintf(file, "%c %s\n", sign,
+ find_unique_abbrev(&commit->object.oid, abbrev));
+ } else {
+ struct strbuf buf = STRBUF_INIT;
+ pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf);
+ fprintf(file, "%c %s %s\n", sign,
+ find_unique_abbrev(&commit->object.oid, abbrev),
+ buf.buf);
+ strbuf_release(&buf);
+ }
+}
+
+int cmd_cherry(int argc, const char **argv, const char *prefix)
+{
+ struct rev_info revs;
+ struct patch_ids ids;
+ struct commit *commit;
+ struct commit_list *list = NULL;
+ struct branch *current_branch;
+ const char *upstream;
+ const char *head = "HEAD";
+ const char *limit = NULL;
+ int verbose = 0, abbrev = 0;
+
+ struct option options[] = {
+ OPT__ABBREV(&abbrev),
+ OPT__VERBOSE(&verbose, N_("be verbose")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, cherry_usage, 0);
+
+ switch (argc) {
+ case 3:
+ limit = argv[2];
+ /* FALLTHROUGH */
+ case 2:
+ head = argv[1];
+ /* FALLTHROUGH */
+ case 1:
+ upstream = argv[0];
+ break;
+ default:
+ current_branch = branch_get(NULL);
+ upstream = branch_get_upstream(current_branch, NULL);
+ if (!upstream) {
+ fprintf(stderr, _("Could not find a tracked"
+ " remote branch, please"
+ " specify <upstream> manually.\n"));
+ usage_with_options(cherry_usage, options);
+ }
+ }
+
+ repo_init_revisions(the_repository, &revs, prefix);
+ revs.max_parents = 1;
+
+ if (add_pending_commit(head, &revs, 0))
+ die(_("unknown commit %s"), head);
+ if (add_pending_commit(upstream, &revs, UNINTERESTING))
+ die(_("unknown commit %s"), upstream);
+
+ /* Don't say anything if head and upstream are the same. */
+ if (revs.pending.nr == 2) {
+ struct object_array_entry *o = revs.pending.objects;
+ if (oideq(&o[0].item->oid, &o[1].item->oid))
+ return 0;
+ }
+
+ get_patch_ids(&revs, &ids);
+
+ if (limit && add_pending_commit(limit, &revs, UNINTERESTING))
+ die(_("unknown commit %s"), limit);
+
+ /* reverse the list of commits */
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed"));
+ while ((commit = get_revision(&revs)) != NULL) {
+ commit_list_insert(commit, &list);
+ }
+
+ while (list) {
+ char sign = '+';
+
+ commit = list->item;
+ if (has_commit_patch_id(commit, &ids))
+ sign = '-';
+ print_commit(sign, commit, verbose, abbrev, revs.diffopt.file);
+ list = list->next;
+ }
+
+ free_patch_ids(&ids);
+ return 0;
+}
diff --git a/builtin/ls-files.c b/builtin/ls-files.c
new file mode 100644
index 0000000..4cf8a23
--- /dev/null
+++ b/builtin/ls-files.c
@@ -0,0 +1,891 @@
+/*
+ * This merges the file listing in the directory cache index
+ * with the actual working directory list, and shows different
+ * combinations of the two.
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ */
+#include "cache.h"
+#include "repository.h"
+#include "config.h"
+#include "quote.h"
+#include "dir.h"
+#include "builtin.h"
+#include "strbuf.h"
+#include "tree.h"
+#include "cache-tree.h"
+#include "parse-options.h"
+#include "resolve-undo.h"
+#include "string-list.h"
+#include "pathspec.h"
+#include "run-command.h"
+#include "submodule.h"
+#include "submodule-config.h"
+
+static int abbrev;
+static int show_deleted;
+static int show_cached;
+static int show_others;
+static int show_stage;
+static int show_unmerged;
+static int show_resolve_undo;
+static int show_modified;
+static int show_killed;
+static int show_valid_bit;
+static int show_fsmonitor_bit;
+static int line_terminator = '\n';
+static int debug_mode;
+static int show_eol;
+static int recurse_submodules;
+static int skipping_duplicates;
+static int show_sparse_dirs;
+
+static const char *prefix;
+static int max_prefix_len;
+static int prefix_len;
+static struct pathspec pathspec;
+static int error_unmatch;
+static char *ps_matched;
+static const char *with_tree;
+static int exc_given;
+static int exclude_args;
+static const char *format;
+
+static const char *tag_cached = "";
+static const char *tag_unmerged = "";
+static const char *tag_removed = "";
+static const char *tag_other = "";
+static const char *tag_killed = "";
+static const char *tag_modified = "";
+static const char *tag_skip_worktree = "";
+static const char *tag_resolve_undo = "";
+
+static void write_eolinfo(struct index_state *istate,
+ const struct cache_entry *ce, const char *path)
+{
+ if (show_eol) {
+ struct stat st;
+ const char *i_txt = "";
+ const char *w_txt = "";
+ const char *a_txt = get_convert_attr_ascii(istate, path);
+ if (ce && S_ISREG(ce->ce_mode))
+ i_txt = get_cached_convert_stats_ascii(istate,
+ ce->name);
+ if (!lstat(path, &st) && S_ISREG(st.st_mode))
+ w_txt = get_wt_convert_stats_ascii(path);
+ printf("i/%-5s w/%-5s attr/%-17s\t", i_txt, w_txt, a_txt);
+ }
+}
+
+static void write_name(const char *name)
+{
+ /*
+ * With "--full-name", prefix_len=0; this caller needs to pass
+ * an empty string in that case (a NULL is good for "").
+ */
+ write_name_quoted_relative(name, prefix_len ? prefix : NULL,
+ stdout, line_terminator);
+}
+
+static void write_name_to_buf(struct strbuf *sb, const char *name)
+{
+ const char *rel = relative_path(name, prefix_len ? prefix : NULL, sb);
+
+ if (line_terminator)
+ quote_c_style(rel, sb, NULL, 0);
+ else
+ strbuf_addstr(sb, rel);
+}
+
+static const char *get_tag(const struct cache_entry *ce, const char *tag)
+{
+ static char alttag[4];
+
+ if (tag && *tag && ((show_valid_bit && (ce->ce_flags & CE_VALID)) ||
+ (show_fsmonitor_bit && (ce->ce_flags & CE_FSMONITOR_VALID)))) {
+ memcpy(alttag, tag, 3);
+
+ if (isalpha(tag[0])) {
+ alttag[0] = tolower(tag[0]);
+ } else if (tag[0] == '?') {
+ alttag[0] = '!';
+ } else {
+ alttag[0] = 'v';
+ alttag[1] = tag[0];
+ alttag[2] = ' ';
+ alttag[3] = 0;
+ }
+
+ tag = alttag;
+ }
+
+ return tag;
+}
+
+static void print_debug(const struct cache_entry *ce)
+{
+ if (debug_mode) {
+ const struct stat_data *sd = &ce->ce_stat_data;
+
+ printf(" ctime: %u:%u\n", sd->sd_ctime.sec, sd->sd_ctime.nsec);
+ printf(" mtime: %u:%u\n", sd->sd_mtime.sec, sd->sd_mtime.nsec);
+ printf(" dev: %u\tino: %u\n", sd->sd_dev, sd->sd_ino);
+ printf(" uid: %u\tgid: %u\n", sd->sd_uid, sd->sd_gid);
+ printf(" size: %u\tflags: %x\n", sd->sd_size, ce->ce_flags);
+ }
+}
+
+static void show_dir_entry(struct index_state *istate,
+ const char *tag, struct dir_entry *ent)
+{
+ int len = max_prefix_len;
+
+ if (len > ent->len)
+ die("git ls-files: internal error - directory entry not superset of prefix");
+
+ /* If ps_matches is non-NULL, figure out which pathspec(s) match. */
+ if (ps_matched)
+ dir_path_match(istate, ent, &pathspec, len, ps_matched);
+
+ fputs(tag, stdout);
+ write_eolinfo(istate, NULL, ent->name);
+ write_name(ent->name);
+}
+
+static void show_other_files(struct index_state *istate,
+ const struct dir_struct *dir)
+{
+ int i;
+
+ for (i = 0; i < dir->nr; i++) {
+ struct dir_entry *ent = dir->entries[i];
+ if (!index_name_is_other(istate, ent->name, ent->len))
+ continue;
+ show_dir_entry(istate, tag_other, ent);
+ }
+}
+
+static void show_killed_files(struct index_state *istate,
+ const struct dir_struct *dir)
+{
+ int i;
+ for (i = 0; i < dir->nr; i++) {
+ struct dir_entry *ent = dir->entries[i];
+ char *cp, *sp;
+ int pos, len, killed = 0;
+
+ for (cp = ent->name; cp - ent->name < ent->len; cp = sp + 1) {
+ sp = strchr(cp, '/');
+ if (!sp) {
+ /* If ent->name is prefix of an entry in the
+ * cache, it will be killed.
+ */
+ pos = index_name_pos(istate, ent->name, ent->len);
+ if (0 <= pos)
+ BUG("killed-file %.*s not found",
+ ent->len, ent->name);
+ pos = -pos - 1;
+ while (pos < istate->cache_nr &&
+ ce_stage(istate->cache[pos]))
+ pos++; /* skip unmerged */
+ if (istate->cache_nr <= pos)
+ break;
+ /* pos points at a name immediately after
+ * ent->name in the cache. Does it expect
+ * ent->name to be a directory?
+ */
+ len = ce_namelen(istate->cache[pos]);
+ if ((ent->len < len) &&
+ !strncmp(istate->cache[pos]->name,
+ ent->name, ent->len) &&
+ istate->cache[pos]->name[ent->len] == '/')
+ killed = 1;
+ break;
+ }
+ if (0 <= index_name_pos(istate, ent->name, sp - ent->name)) {
+ /* If any of the leading directories in
+ * ent->name is registered in the cache,
+ * ent->name will be killed.
+ */
+ killed = 1;
+ break;
+ }
+ }
+ if (killed)
+ show_dir_entry(istate, tag_killed, dir->entries[i]);
+ }
+}
+
+static void show_files(struct repository *repo, struct dir_struct *dir);
+
+static void show_submodule(struct repository *superproject,
+ struct dir_struct *dir, const char *path)
+{
+ struct repository subrepo;
+
+ if (repo_submodule_init(&subrepo, superproject, path, null_oid()))
+ return;
+
+ if (repo_read_index(&subrepo) < 0)
+ die("index file corrupt");
+
+ show_files(&subrepo, dir);
+
+ repo_clear(&subrepo);
+}
+
+struct show_index_data {
+ const char *pathname;
+ struct index_state *istate;
+ const struct cache_entry *ce;
+};
+
+static size_t expand_show_index(struct strbuf *sb, const char *start,
+ void *context)
+{
+ struct show_index_data *data = context;
+ const char *end;
+ const char *p;
+ size_t len = strbuf_expand_literal_cb(sb, start, NULL);
+ struct stat st;
+
+ if (len)
+ return len;
+ if (*start != '(')
+ die(_("bad ls-files format: element '%s' "
+ "does not start with '('"), start);
+
+ end = strchr(start + 1, ')');
+ if (!end)
+ die(_("bad ls-files format: element '%s' "
+ "does not end in ')'"), start);
+
+ len = end - start + 1;
+ if (skip_prefix(start, "(objectmode)", &p))
+ strbuf_addf(sb, "%06o", data->ce->ce_mode);
+ else if (skip_prefix(start, "(objectname)", &p))
+ strbuf_add_unique_abbrev(sb, &data->ce->oid, abbrev);
+ else if (skip_prefix(start, "(stage)", &p))
+ strbuf_addf(sb, "%d", ce_stage(data->ce));
+ else if (skip_prefix(start, "(eolinfo:index)", &p))
+ strbuf_addstr(sb, S_ISREG(data->ce->ce_mode) ?
+ get_cached_convert_stats_ascii(data->istate,
+ data->ce->name) : "");
+ else if (skip_prefix(start, "(eolinfo:worktree)", &p))
+ strbuf_addstr(sb, !lstat(data->pathname, &st) &&
+ S_ISREG(st.st_mode) ?
+ get_wt_convert_stats_ascii(data->pathname) : "");
+ else if (skip_prefix(start, "(eolattr)", &p))
+ strbuf_addstr(sb, get_convert_attr_ascii(data->istate,
+ data->pathname));
+ else if (skip_prefix(start, "(path)", &p))
+ write_name_to_buf(sb, data->pathname);
+ else
+ die(_("bad ls-files format: %%%.*s"), (int)len, start);
+
+ return len;
+}
+
+static void show_ce_fmt(struct repository *repo, const struct cache_entry *ce,
+ const char *format, const char *fullname) {
+ struct show_index_data data = {
+ .pathname = fullname,
+ .istate = repo->index,
+ .ce = ce,
+ };
+ struct strbuf sb = STRBUF_INIT;
+
+ strbuf_expand(&sb, format, expand_show_index, &data);
+ strbuf_addch(&sb, line_terminator);
+ fwrite(sb.buf, sb.len, 1, stdout);
+ strbuf_release(&sb);
+}
+
+static void show_ce(struct repository *repo, struct dir_struct *dir,
+ const struct cache_entry *ce, const char *fullname,
+ const char *tag)
+{
+ if (max_prefix_len > strlen(fullname))
+ die("git ls-files: internal error - cache entry not superset of prefix");
+
+ if (recurse_submodules && S_ISGITLINK(ce->ce_mode) &&
+ is_submodule_active(repo, ce->name)) {
+ show_submodule(repo, dir, ce->name);
+ } else if (match_pathspec(repo->index, &pathspec, fullname, strlen(fullname),
+ max_prefix_len, ps_matched,
+ S_ISDIR(ce->ce_mode) ||
+ S_ISGITLINK(ce->ce_mode))) {
+ if (format) {
+ show_ce_fmt(repo, ce, format, fullname);
+ print_debug(ce);
+ return;
+ }
+
+ tag = get_tag(ce, tag);
+
+ if (!show_stage) {
+ fputs(tag, stdout);
+ } else {
+ printf("%s%06o %s %d\t",
+ tag,
+ ce->ce_mode,
+ repo_find_unique_abbrev(repo, &ce->oid, abbrev),
+ ce_stage(ce));
+ }
+ write_eolinfo(repo->index, ce, fullname);
+ write_name(fullname);
+ print_debug(ce);
+ }
+}
+
+static void show_ru_info(struct index_state *istate)
+{
+ struct string_list_item *item;
+
+ if (!istate->resolve_undo)
+ return;
+
+ for_each_string_list_item(item, istate->resolve_undo) {
+ const char *path = item->string;
+ struct resolve_undo_info *ui = item->util;
+ int i, len;
+
+ len = strlen(path);
+ if (len < max_prefix_len)
+ continue; /* outside of the prefix */
+ if (!match_pathspec(istate, &pathspec, path, len,
+ max_prefix_len, ps_matched, 0))
+ continue; /* uninterested */
+ for (i = 0; i < 3; i++) {
+ if (!ui->mode[i])
+ continue;
+ printf("%s%06o %s %d\t", tag_resolve_undo, ui->mode[i],
+ find_unique_abbrev(&ui->oid[i], abbrev),
+ i + 1);
+ write_name(path);
+ }
+ }
+}
+
+static int ce_excluded(struct dir_struct *dir, struct index_state *istate,
+ const char *fullname, const struct cache_entry *ce)
+{
+ int dtype = ce_to_dtype(ce);
+ return is_excluded(dir, istate, fullname, &dtype);
+}
+
+static void construct_fullname(struct strbuf *out, const struct repository *repo,
+ const struct cache_entry *ce)
+{
+ strbuf_reset(out);
+ if (repo->submodule_prefix)
+ strbuf_addstr(out, repo->submodule_prefix);
+ strbuf_addstr(out, ce->name);
+}
+
+static void show_files(struct repository *repo, struct dir_struct *dir)
+{
+ int i;
+ struct strbuf fullname = STRBUF_INIT;
+
+ /* For cached/deleted files we don't need to even do the readdir */
+ if (show_others || show_killed) {
+ if (!show_others)
+ dir->flags |= DIR_COLLECT_KILLED_ONLY;
+ fill_directory(dir, repo->index, &pathspec);
+ if (show_others)
+ show_other_files(repo->index, dir);
+ if (show_killed)
+ show_killed_files(repo->index, dir);
+ }
+
+ if (!(show_cached || show_stage || show_deleted || show_modified))
+ return;
+
+ if (!show_sparse_dirs)
+ ensure_full_index(repo->index);
+
+ for (i = 0; i < repo->index->cache_nr; i++) {
+ const struct cache_entry *ce = repo->index->cache[i];
+ struct stat st;
+ int stat_err;
+
+ construct_fullname(&fullname, repo, ce);
+
+ if ((dir->flags & DIR_SHOW_IGNORED) &&
+ !ce_excluded(dir, repo->index, fullname.buf, ce))
+ continue;
+ if (ce->ce_flags & CE_UPDATE)
+ continue;
+ if ((show_cached || show_stage) &&
+ (!show_unmerged || ce_stage(ce))) {
+ show_ce(repo, dir, ce, fullname.buf,
+ ce_stage(ce) ? tag_unmerged :
+ (ce_skip_worktree(ce) ? tag_skip_worktree :
+ tag_cached));
+ if (skipping_duplicates)
+ goto skip_to_next_name;
+ }
+
+ if (!(show_deleted || show_modified))
+ continue;
+ if (ce_skip_worktree(ce))
+ continue;
+ stat_err = lstat(fullname.buf, &st);
+ if (stat_err && (errno != ENOENT && errno != ENOTDIR))
+ error_errno("cannot lstat '%s'", fullname.buf);
+ if (stat_err && show_deleted) {
+ show_ce(repo, dir, ce, fullname.buf, tag_removed);
+ if (skipping_duplicates)
+ goto skip_to_next_name;
+ }
+ if (show_modified &&
+ (stat_err || ie_modified(repo->index, ce, &st, 0))) {
+ show_ce(repo, dir, ce, fullname.buf, tag_modified);
+ if (skipping_duplicates)
+ goto skip_to_next_name;
+ }
+ continue;
+
+skip_to_next_name:
+ {
+ int j;
+ struct cache_entry **cache = repo->index->cache;
+ for (j = i + 1; j < repo->index->cache_nr; j++)
+ if (strcmp(ce->name, cache[j]->name))
+ break;
+ i = j - 1; /* compensate for the for loop */
+ }
+ }
+
+ strbuf_release(&fullname);
+}
+
+/*
+ * Prune the index to only contain stuff starting with "prefix"
+ */
+static void prune_index(struct index_state *istate,
+ const char *prefix, size_t prefixlen)
+{
+ int pos;
+ unsigned int first, last;
+
+ if (!prefix || !istate->cache_nr)
+ return;
+ pos = index_name_pos(istate, prefix, prefixlen);
+ if (pos < 0)
+ pos = -pos-1;
+ first = pos;
+ last = istate->cache_nr;
+ while (last > first) {
+ int next = first + ((last - first) >> 1);
+ const struct cache_entry *ce = istate->cache[next];
+ if (!strncmp(ce->name, prefix, prefixlen)) {
+ first = next+1;
+ continue;
+ }
+ last = next;
+ }
+ MOVE_ARRAY(istate->cache, istate->cache + pos, last - pos);
+ istate->cache_nr = last - pos;
+}
+
+static int get_common_prefix_len(const char *common_prefix)
+{
+ int common_prefix_len;
+
+ if (!common_prefix)
+ return 0;
+
+ common_prefix_len = strlen(common_prefix);
+
+ /*
+ * If the prefix has a trailing slash, strip it so that submodules wont
+ * be pruned from the index.
+ */
+ if (common_prefix[common_prefix_len - 1] == '/')
+ common_prefix_len--;
+
+ return common_prefix_len;
+}
+
+static int read_one_entry_opt(struct index_state *istate,
+ const struct object_id *oid,
+ struct strbuf *base,
+ const char *pathname,
+ unsigned mode, int opt)
+{
+ int len;
+ struct cache_entry *ce;
+
+ if (S_ISDIR(mode))
+ return READ_TREE_RECURSIVE;
+
+ len = strlen(pathname);
+ ce = make_empty_cache_entry(istate, base->len + len);
+
+ ce->ce_mode = create_ce_mode(mode);
+ ce->ce_flags = create_ce_flags(1);
+ ce->ce_namelen = base->len + len;
+ memcpy(ce->name, base->buf, base->len);
+ memcpy(ce->name + base->len, pathname, len+1);
+ oidcpy(&ce->oid, oid);
+ return add_index_entry(istate, ce, opt);
+}
+
+static int read_one_entry(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context)
+{
+ struct index_state *istate = context;
+ return read_one_entry_opt(istate, oid, base, pathname,
+ mode,
+ ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
+}
+
+/*
+ * This is used when the caller knows there is no existing entries at
+ * the stage that will conflict with the entry being added.
+ */
+static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context)
+{
+ struct index_state *istate = context;
+ return read_one_entry_opt(istate, oid, base, pathname,
+ mode, ADD_CACHE_JUST_APPEND);
+}
+
+/*
+ * Read the tree specified with --with-tree option
+ * (typically, HEAD) into stage #1 and then
+ * squash them down to stage #0. This is used for
+ * --error-unmatch to list and check the path patterns
+ * that were given from the command line. We are not
+ * going to write this index out.
+ */
+void overlay_tree_on_index(struct index_state *istate,
+ const char *tree_name, const char *prefix)
+{
+ struct tree *tree;
+ struct object_id oid;
+ struct pathspec pathspec;
+ struct cache_entry *last_stage0 = NULL;
+ int i;
+ read_tree_fn_t fn = NULL;
+ int err;
+
+ if (get_oid(tree_name, &oid))
+ die("tree-ish %s not found.", tree_name);
+ tree = parse_tree_indirect(&oid);
+ if (!tree)
+ die("bad tree-ish %s", tree_name);
+
+ /* Hoist the unmerged entries up to stage #3 to make room */
+ /* TODO: audit for interaction with sparse-index. */
+ ensure_full_index(istate);
+ for (i = 0; i < istate->cache_nr; i++) {
+ struct cache_entry *ce = istate->cache[i];
+ if (!ce_stage(ce))
+ continue;
+ ce->ce_flags |= CE_STAGEMASK;
+ }
+
+ if (prefix) {
+ static const char *(matchbuf[1]);
+ matchbuf[0] = NULL;
+ parse_pathspec(&pathspec, PATHSPEC_ALL_MAGIC,
+ PATHSPEC_PREFER_CWD, prefix, matchbuf);
+ } else
+ memset(&pathspec, 0, sizeof(pathspec));
+
+ /*
+ * See if we have cache entry at the stage. If so,
+ * do it the original slow way, otherwise, append and then
+ * sort at the end.
+ */
+ for (i = 0; !fn && i < istate->cache_nr; i++) {
+ const struct cache_entry *ce = istate->cache[i];
+ if (ce_stage(ce) == 1)
+ fn = read_one_entry;
+ }
+
+ if (!fn)
+ fn = read_one_entry_quick;
+ err = read_tree(the_repository, tree, &pathspec, fn, istate);
+ if (err)
+ die("unable to read tree entries %s", tree_name);
+
+ /*
+ * Sort the cache entry -- we need to nuke the cache tree, though.
+ */
+ if (fn == read_one_entry_quick) {
+ cache_tree_free(&istate->cache_tree);
+ QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
+ }
+
+ for (i = 0; i < istate->cache_nr; i++) {
+ struct cache_entry *ce = istate->cache[i];
+ switch (ce_stage(ce)) {
+ case 0:
+ last_stage0 = ce;
+ /* fallthru */
+ default:
+ continue;
+ case 1:
+ /*
+ * If there is stage #0 entry for this, we do not
+ * need to show it. We use CE_UPDATE bit to mark
+ * such an entry.
+ */
+ if (last_stage0 &&
+ !strcmp(last_stage0->name, ce->name))
+ ce->ce_flags |= CE_UPDATE;
+ }
+ }
+}
+
+static const char * const ls_files_usage[] = {
+ N_("git ls-files [<options>] [<file>...]"),
+ NULL
+};
+
+static int option_parse_exclude(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct string_list *exclude_list = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ exc_given = 1;
+ string_list_append(exclude_list, arg);
+
+ return 0;
+}
+
+static int option_parse_exclude_from(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct dir_struct *dir = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ exc_given = 1;
+ add_patterns_from_file(dir, arg);
+
+ return 0;
+}
+
+static int option_parse_exclude_standard(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct dir_struct *dir = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+
+ exc_given = 1;
+ setup_standard_excludes(dir);
+
+ return 0;
+}
+
+int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix)
+{
+ int require_work_tree = 0, show_tag = 0, i;
+ char *max_prefix;
+ struct dir_struct dir = DIR_INIT;
+ struct pattern_list *pl;
+ struct string_list exclude_list = STRING_LIST_INIT_NODUP;
+ struct option builtin_ls_files_options[] = {
+ /* Think twice before adding "--nul" synonym to this */
+ OPT_SET_INT('z', NULL, &line_terminator,
+ N_("separate paths with the NUL character"), '\0'),
+ OPT_BOOL('t', NULL, &show_tag,
+ N_("identify the file status with tags")),
+ OPT_BOOL('v', NULL, &show_valid_bit,
+ N_("use lowercase letters for 'assume unchanged' files")),
+ OPT_BOOL('f', NULL, &show_fsmonitor_bit,
+ N_("use lowercase letters for 'fsmonitor clean' files")),
+ OPT_BOOL('c', "cached", &show_cached,
+ N_("show cached files in the output (default)")),
+ OPT_BOOL('d', "deleted", &show_deleted,
+ N_("show deleted files in the output")),
+ OPT_BOOL('m', "modified", &show_modified,
+ N_("show modified files in the output")),
+ OPT_BOOL('o', "others", &show_others,
+ N_("show other files in the output")),
+ OPT_BIT('i', "ignored", &dir.flags,
+ N_("show ignored files in the output"),
+ DIR_SHOW_IGNORED),
+ OPT_BOOL('s', "stage", &show_stage,
+ N_("show staged contents' object name in the output")),
+ OPT_BOOL('k', "killed", &show_killed,
+ N_("show files on the filesystem that need to be removed")),
+ OPT_BIT(0, "directory", &dir.flags,
+ N_("show 'other' directories' names only"),
+ DIR_SHOW_OTHER_DIRECTORIES),
+ OPT_BOOL(0, "eol", &show_eol, N_("show line endings of files")),
+ OPT_NEGBIT(0, "empty-directory", &dir.flags,
+ N_("don't show empty directories"),
+ DIR_HIDE_EMPTY_DIRECTORIES),
+ OPT_BOOL('u', "unmerged", &show_unmerged,
+ N_("show unmerged files in the output")),
+ OPT_BOOL(0, "resolve-undo", &show_resolve_undo,
+ N_("show resolve-undo information")),
+ OPT_CALLBACK_F('x', "exclude", &exclude_list, N_("pattern"),
+ N_("skip files matching pattern"),
+ PARSE_OPT_NONEG, option_parse_exclude),
+ OPT_CALLBACK_F('X', "exclude-from", &dir, N_("file"),
+ N_("read exclude patterns from <file>"),
+ PARSE_OPT_NONEG, option_parse_exclude_from),
+ OPT_STRING(0, "exclude-per-directory", &dir.exclude_per_dir, N_("file"),
+ N_("read additional per-directory exclude patterns in <file>")),
+ OPT_CALLBACK_F(0, "exclude-standard", &dir, NULL,
+ N_("add the standard git exclusions"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG,
+ option_parse_exclude_standard),
+ OPT_SET_INT_F(0, "full-name", &prefix_len,
+ N_("make the output relative to the project top directory"),
+ 0, PARSE_OPT_NONEG),
+ OPT_BOOL(0, "recurse-submodules", &recurse_submodules,
+ N_("recurse through submodules")),
+ OPT_BOOL(0, "error-unmatch", &error_unmatch,
+ N_("if any <file> is not in the index, treat this as an error")),
+ OPT_STRING(0, "with-tree", &with_tree, N_("tree-ish"),
+ N_("pretend that paths removed since <tree-ish> are still present")),
+ OPT__ABBREV(&abbrev),
+ OPT_BOOL(0, "debug", &debug_mode, N_("show debugging data")),
+ OPT_BOOL(0, "deduplicate", &skipping_duplicates,
+ N_("suppress duplicate entries")),
+ OPT_BOOL(0, "sparse", &show_sparse_dirs,
+ N_("show sparse directories in the presence of a sparse index")),
+ OPT_STRING_F(0, "format", &format, N_("format"),
+ N_("format to use for the output"),
+ PARSE_OPT_NONEG),
+ OPT_END()
+ };
+ int ret = 0;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(ls_files_usage, builtin_ls_files_options);
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ prefix = cmd_prefix;
+ if (prefix)
+ prefix_len = strlen(prefix);
+ git_config(git_default_config, NULL);
+
+ if (repo_read_index(the_repository) < 0)
+ die("index file corrupt");
+
+ argc = parse_options(argc, argv, prefix, builtin_ls_files_options,
+ ls_files_usage, 0);
+ pl = add_pattern_list(&dir, EXC_CMDL, "--exclude option");
+ for (i = 0; i < exclude_list.nr; i++) {
+ add_pattern(exclude_list.items[i].string, "", 0, pl, --exclude_args);
+ }
+
+ if (format && (show_stage || show_others || show_killed ||
+ show_resolve_undo || skipping_duplicates || show_eol || show_tag))
+ usage_msg_opt(_("--format cannot be used with -s, -o, -k, -t, "
+ "--resolve-undo, --deduplicate, --eol"),
+ ls_files_usage, builtin_ls_files_options);
+
+ if (show_tag || show_valid_bit || show_fsmonitor_bit) {
+ tag_cached = "H ";
+ tag_unmerged = "M ";
+ tag_removed = "R ";
+ tag_modified = "C ";
+ tag_other = "? ";
+ tag_killed = "K ";
+ tag_skip_worktree = "S ";
+ tag_resolve_undo = "U ";
+ }
+ if (show_modified || show_others || show_deleted || (dir.flags & DIR_SHOW_IGNORED) || show_killed)
+ require_work_tree = 1;
+ if (show_unmerged)
+ /*
+ * There's no point in showing unmerged unless
+ * you also show the stage information.
+ */
+ show_stage = 1;
+ if (show_tag || show_stage)
+ skipping_duplicates = 0;
+ if (dir.exclude_per_dir)
+ exc_given = 1;
+
+ if (require_work_tree && !is_inside_work_tree())
+ setup_work_tree();
+
+ if (recurse_submodules &&
+ (show_deleted || show_others || show_unmerged ||
+ show_killed || show_modified || show_resolve_undo || with_tree))
+ die("ls-files --recurse-submodules unsupported mode");
+
+ if (recurse_submodules && error_unmatch)
+ die("ls-files --recurse-submodules does not support "
+ "--error-unmatch");
+
+ parse_pathspec(&pathspec, 0,
+ PATHSPEC_PREFER_CWD,
+ prefix, argv);
+
+ /*
+ * Find common prefix for all pathspec's
+ * This is used as a performance optimization which unfortunately cannot
+ * be done when recursing into submodules because when a pathspec is
+ * given which spans repository boundaries you can't simply remove the
+ * submodule entry because the pathspec may match something inside the
+ * submodule.
+ */
+ if (recurse_submodules)
+ max_prefix = NULL;
+ else
+ max_prefix = common_prefix(&pathspec);
+ max_prefix_len = get_common_prefix_len(max_prefix);
+
+ prune_index(the_repository->index, max_prefix, max_prefix_len);
+
+ /* Treat unmatching pathspec elements as errors */
+ if (pathspec.nr && error_unmatch)
+ ps_matched = xcalloc(pathspec.nr, 1);
+
+ if ((dir.flags & DIR_SHOW_IGNORED) && !show_others && !show_cached)
+ die("ls-files -i must be used with either -o or -c");
+
+ if ((dir.flags & DIR_SHOW_IGNORED) && !exc_given)
+ die("ls-files --ignored needs some exclude pattern");
+
+ /* With no flags, we default to showing the cached files */
+ if (!(show_stage || show_deleted || show_others || show_unmerged ||
+ show_killed || show_modified || show_resolve_undo))
+ show_cached = 1;
+
+ if (with_tree) {
+ /*
+ * Basic sanity check; show-stages and show-unmerged
+ * would not make any sense with this option.
+ */
+ if (show_stage || show_unmerged)
+ die(_("options '%s' and '%s' cannot be used together"), "ls-files --with-tree", "-s/-u");
+ overlay_tree_on_index(the_repository->index, with_tree, max_prefix);
+ }
+
+ show_files(the_repository, &dir);
+
+ if (show_resolve_undo)
+ show_ru_info(the_repository->index);
+
+ if (ps_matched && report_path_error(ps_matched, &pathspec)) {
+ fprintf(stderr, "Did you forget to 'git add'?\n");
+ ret = 1;
+ }
+
+ string_list_clear(&exclude_list, 0);
+ dir_clear(&dir);
+ free(max_prefix);
+ return ret;
+}
diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c
new file mode 100644
index 0000000..5d5ac03
--- /dev/null
+++ b/builtin/ls-remote.c
@@ -0,0 +1,161 @@
+#include "builtin.h"
+#include "cache.h"
+#include "transport.h"
+#include "ref-filter.h"
+#include "remote.h"
+#include "refs.h"
+
+static const char * const ls_remote_usage[] = {
+ N_("git ls-remote [--heads] [--tags] [--refs] [--upload-pack=<exec>]\n"
+ " [-q | --quiet] [--exit-code] [--get-url] [--sort=<key>]\n"
+ " [--symref] [<repository> [<refs>...]]"),
+ NULL
+};
+
+/*
+ * Is there one among the list of patterns that match the tail part
+ * of the path?
+ */
+static int tail_match(const char **pattern, const char *path)
+{
+ const char *p;
+ char *pathbuf;
+
+ if (!pattern)
+ return 1; /* no restriction */
+
+ pathbuf = xstrfmt("/%s", path);
+ while ((p = *(pattern++)) != NULL) {
+ if (!wildmatch(p, pathbuf, 0)) {
+ free(pathbuf);
+ return 1;
+ }
+ }
+ free(pathbuf);
+ return 0;
+}
+
+int cmd_ls_remote(int argc, const char **argv, const char *prefix)
+{
+ const char *dest = NULL;
+ unsigned flags = 0;
+ int get_url = 0;
+ int quiet = 0;
+ int status = 0;
+ int show_symref_target = 0;
+ const char *uploadpack = NULL;
+ const char **pattern = NULL;
+ struct transport_ls_refs_options transport_options =
+ TRANSPORT_LS_REFS_OPTIONS_INIT;
+ int i;
+ struct string_list server_options = STRING_LIST_INIT_DUP;
+
+ struct remote *remote;
+ struct transport *transport;
+ const struct ref *ref;
+ struct ref_array ref_array;
+ struct string_list sorting_options = STRING_LIST_INIT_DUP;
+
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("do not print remote URL")),
+ OPT_STRING(0, "upload-pack", &uploadpack, N_("exec"),
+ N_("path of git-upload-pack on the remote host")),
+ { OPTION_STRING, 0, "exec", &uploadpack, N_("exec"),
+ N_("path of git-upload-pack on the remote host"),
+ PARSE_OPT_HIDDEN },
+ OPT_BIT('t', "tags", &flags, N_("limit to tags"), REF_TAGS),
+ OPT_BIT('h', "heads", &flags, N_("limit to heads"), REF_HEADS),
+ OPT_BIT(0, "refs", &flags, N_("do not show peeled tags"), REF_NORMAL),
+ OPT_BOOL(0, "get-url", &get_url,
+ N_("take url.<base>.insteadOf into account")),
+ OPT_REF_SORT(&sorting_options),
+ OPT_SET_INT_F(0, "exit-code", &status,
+ N_("exit with exit code 2 if no matching refs are found"),
+ 2, PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL(0, "symref", &show_symref_target,
+ N_("show underlying ref in addition to the object pointed by it")),
+ OPT_STRING_LIST('o', "server-option", &server_options, N_("server-specific"), N_("option to transmit")),
+ OPT_END()
+ };
+
+ memset(&ref_array, 0, sizeof(ref_array));
+
+ argc = parse_options(argc, argv, prefix, options, ls_remote_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ dest = argv[0];
+
+ packet_trace_identity("ls-remote");
+
+ if (argc > 1) {
+ int i;
+ CALLOC_ARRAY(pattern, argc);
+ for (i = 1; i < argc; i++) {
+ pattern[i - 1] = xstrfmt("*/%s", argv[i]);
+ }
+ }
+
+ if (flags & REF_TAGS)
+ strvec_push(&transport_options.ref_prefixes, "refs/tags/");
+ if (flags & REF_HEADS)
+ strvec_push(&transport_options.ref_prefixes, "refs/heads/");
+
+ remote = remote_get(dest);
+ if (!remote) {
+ if (dest)
+ die("bad repository '%s'", dest);
+ die("No remote configured to list refs from.");
+ }
+ if (!remote->url_nr)
+ die("remote %s has no configured URL", dest);
+
+ if (get_url) {
+ printf("%s\n", *remote->url);
+ return 0;
+ }
+
+ transport = transport_get(remote, NULL);
+ if (uploadpack)
+ transport_set_option(transport, TRANS_OPT_UPLOADPACK, uploadpack);
+ if (server_options.nr)
+ transport->server_options = &server_options;
+
+ ref = transport_get_remote_refs(transport, &transport_options);
+ if (ref) {
+ int hash_algo = hash_algo_by_ptr(transport_get_hash_algo(transport));
+ repo_set_hash_algo(the_repository, hash_algo);
+ }
+
+ if (!dest && !quiet)
+ fprintf(stderr, "From %s\n", *remote->url);
+ for ( ; ref; ref = ref->next) {
+ struct ref_array_item *item;
+ if (!check_ref_type(ref, flags))
+ continue;
+ if (!tail_match(pattern, ref->name))
+ continue;
+ item = ref_array_push(&ref_array, ref->name, &ref->old_oid);
+ item->symref = xstrdup_or_null(ref->symref);
+ }
+
+ if (sorting_options.nr) {
+ struct ref_sorting *sorting;
+
+ sorting = ref_sorting_options(&sorting_options);
+ ref_array_sort(sorting, &ref_array);
+ ref_sorting_release(sorting);
+ }
+
+ for (i = 0; i < ref_array.nr; i++) {
+ const struct ref_array_item *ref = ref_array.items[i];
+ if (show_symref_target && ref->symref)
+ printf("ref: %s\t%s\n", ref->symref, ref->refname);
+ printf("%s\t%s\n", oid_to_hex(&ref->objectname), ref->refname);
+ status = 0; /* we found something */
+ }
+
+ ref_array_clear(&ref_array);
+ if (transport_disconnect(transport))
+ status = 1;
+ transport_ls_refs_options_release(&transport_options);
+ return status;
+}
diff --git a/builtin/ls-tree.c b/builtin/ls-tree.c
new file mode 100644
index 0000000..c3ea092
--- /dev/null
+++ b/builtin/ls-tree.c
@@ -0,0 +1,437 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ */
+#include "cache.h"
+#include "config.h"
+#include "object-store.h"
+#include "blob.h"
+#include "tree.h"
+#include "commit.h"
+#include "quote.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "pathspec.h"
+
+static int line_termination = '\n';
+#define LS_RECURSIVE 1
+#define LS_TREE_ONLY (1 << 1)
+#define LS_SHOW_TREES (1 << 2)
+static int abbrev;
+static int ls_options;
+static struct pathspec pathspec;
+static int chomp_prefix;
+static const char *ls_tree_prefix;
+static const char *format;
+struct show_tree_data {
+ unsigned mode;
+ enum object_type type;
+ const struct object_id *oid;
+ const char *pathname;
+ struct strbuf *base;
+};
+
+static const char * const ls_tree_usage[] = {
+ N_("git ls-tree [<options>] <tree-ish> [<path>...]"),
+ NULL
+};
+
+static enum ls_tree_cmdmode {
+ MODE_DEFAULT = 0,
+ MODE_LONG,
+ MODE_NAME_ONLY,
+ MODE_NAME_STATUS,
+ MODE_OBJECT_ONLY,
+} cmdmode;
+
+static void expand_objectsize(struct strbuf *line, const struct object_id *oid,
+ const enum object_type type, unsigned int padded)
+{
+ if (type == OBJ_BLOB) {
+ unsigned long size;
+ if (oid_object_info(the_repository, oid, &size) < 0)
+ die(_("could not get object info about '%s'"),
+ oid_to_hex(oid));
+ if (padded)
+ strbuf_addf(line, "%7"PRIuMAX, (uintmax_t)size);
+ else
+ strbuf_addf(line, "%"PRIuMAX, (uintmax_t)size);
+ } else if (padded) {
+ strbuf_addf(line, "%7s", "-");
+ } else {
+ strbuf_addstr(line, "-");
+ }
+}
+
+static size_t expand_show_tree(struct strbuf *sb, const char *start,
+ void *context)
+{
+ struct show_tree_data *data = context;
+ const char *end;
+ const char *p;
+ unsigned int errlen;
+ size_t len = strbuf_expand_literal_cb(sb, start, NULL);
+
+ if (len)
+ return len;
+ if (*start != '(')
+ die(_("bad ls-tree format: element '%s' does not start with '('"), start);
+
+ end = strchr(start + 1, ')');
+ if (!end)
+ die(_("bad ls-tree format: element '%s' does not end in ')'"), start);
+
+ len = end - start + 1;
+ if (skip_prefix(start, "(objectmode)", &p)) {
+ strbuf_addf(sb, "%06o", data->mode);
+ } else if (skip_prefix(start, "(objecttype)", &p)) {
+ strbuf_addstr(sb, type_name(data->type));
+ } else if (skip_prefix(start, "(objectsize:padded)", &p)) {
+ expand_objectsize(sb, data->oid, data->type, 1);
+ } else if (skip_prefix(start, "(objectsize)", &p)) {
+ expand_objectsize(sb, data->oid, data->type, 0);
+ } else if (skip_prefix(start, "(objectname)", &p)) {
+ strbuf_add_unique_abbrev(sb, data->oid, abbrev);
+ } else if (skip_prefix(start, "(path)", &p)) {
+ const char *name = data->base->buf;
+ const char *prefix = chomp_prefix ? ls_tree_prefix : NULL;
+ struct strbuf quoted = STRBUF_INIT;
+ struct strbuf sbuf = STRBUF_INIT;
+ strbuf_addstr(data->base, data->pathname);
+ name = relative_path(data->base->buf, prefix, &sbuf);
+ quote_c_style(name, &quoted, NULL, 0);
+ strbuf_addbuf(sb, &quoted);
+ strbuf_release(&sbuf);
+ strbuf_release(&quoted);
+ } else {
+ errlen = (unsigned long)len;
+ die(_("bad ls-tree format: %%%.*s"), errlen, start);
+ }
+ return len;
+}
+
+static int show_recursive(const char *base, size_t baselen, const char *pathname)
+{
+ int i;
+
+ if (ls_options & LS_RECURSIVE)
+ return 1;
+
+ if (!pathspec.nr)
+ return 0;
+
+ for (i = 0; i < pathspec.nr; i++) {
+ const char *spec = pathspec.items[i].match;
+ size_t len, speclen;
+
+ if (strncmp(base, spec, baselen))
+ continue;
+ len = strlen(pathname);
+ spec += baselen;
+ speclen = strlen(spec);
+ if (speclen <= len)
+ continue;
+ if (spec[len] && spec[len] != '/')
+ continue;
+ if (memcmp(pathname, spec, len))
+ continue;
+ return 1;
+ }
+ return 0;
+}
+
+static int show_tree_fmt(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context UNUSED)
+{
+ size_t baselen;
+ int recurse = 0;
+ struct strbuf sb = STRBUF_INIT;
+ enum object_type type = object_type(mode);
+
+ struct show_tree_data data = {
+ .mode = mode,
+ .type = type,
+ .oid = oid,
+ .pathname = pathname,
+ .base = base,
+ };
+
+ if (type == OBJ_TREE && show_recursive(base->buf, base->len, pathname))
+ recurse = READ_TREE_RECURSIVE;
+ if (type == OBJ_TREE && recurse && !(ls_options & LS_SHOW_TREES))
+ return recurse;
+ if (type == OBJ_BLOB && (ls_options & LS_TREE_ONLY))
+ return 0;
+
+ baselen = base->len;
+ strbuf_expand(&sb, format, expand_show_tree, &data);
+ strbuf_addch(&sb, line_termination);
+ fwrite(sb.buf, sb.len, 1, stdout);
+ strbuf_release(&sb);
+ strbuf_setlen(base, baselen);
+ return recurse;
+}
+
+static int show_tree_common(struct show_tree_data *data, int *recurse,
+ const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode)
+{
+ enum object_type type = object_type(mode);
+ int ret = -1;
+
+ *recurse = 0;
+ data->mode = mode;
+ data->type = type;
+ data->oid = oid;
+ data->pathname = pathname;
+ data->base = base;
+
+ if (type == OBJ_BLOB) {
+ if (ls_options & LS_TREE_ONLY)
+ ret = 0;
+ } else if (type == OBJ_TREE &&
+ show_recursive(base->buf, base->len, pathname)) {
+ *recurse = READ_TREE_RECURSIVE;
+ if (!(ls_options & LS_SHOW_TREES))
+ ret = *recurse;
+ }
+
+ return ret;
+}
+
+static void show_tree_common_default_long(struct strbuf *base,
+ const char *pathname,
+ const size_t baselen)
+{
+ strbuf_addstr(base, pathname);
+ write_name_quoted_relative(base->buf,
+ chomp_prefix ? ls_tree_prefix : NULL, stdout,
+ line_termination);
+ strbuf_setlen(base, baselen);
+}
+
+static int show_tree_default(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context UNUSED)
+{
+ int early;
+ int recurse;
+ struct show_tree_data data = { 0 };
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ printf("%06o %s %s\t", data.mode, type_name(data.type),
+ find_unique_abbrev(data.oid, abbrev));
+ show_tree_common_default_long(base, pathname, data.base->len);
+ return recurse;
+}
+
+static int show_tree_long(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context UNUSED)
+{
+ int early;
+ int recurse;
+ struct show_tree_data data = { 0 };
+ char size_text[24];
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ if (data.type == OBJ_BLOB) {
+ unsigned long size;
+ if (oid_object_info(the_repository, data.oid, &size) == OBJ_BAD)
+ xsnprintf(size_text, sizeof(size_text), "BAD");
+ else
+ xsnprintf(size_text, sizeof(size_text),
+ "%" PRIuMAX, (uintmax_t)size);
+ } else {
+ xsnprintf(size_text, sizeof(size_text), "-");
+ }
+
+ printf("%06o %s %s %7s\t", data.mode, type_name(data.type),
+ find_unique_abbrev(data.oid, abbrev), size_text);
+ show_tree_common_default_long(base, pathname, data.base->len);
+ return recurse;
+}
+
+static int show_tree_name_only(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context UNUSED)
+{
+ int early;
+ int recurse;
+ const size_t baselen = base->len;
+ struct show_tree_data data = { 0 };
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ strbuf_addstr(base, pathname);
+ write_name_quoted_relative(base->buf,
+ chomp_prefix ? ls_tree_prefix : NULL,
+ stdout, line_termination);
+ strbuf_setlen(base, baselen);
+ return recurse;
+}
+
+static int show_tree_object(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context UNUSED)
+{
+ int early;
+ int recurse;
+ struct show_tree_data data = { 0 };
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ printf("%s%c", find_unique_abbrev(oid, abbrev), line_termination);
+ return recurse;
+}
+
+struct ls_tree_cmdmode_to_fmt {
+ enum ls_tree_cmdmode mode;
+ const char *const fmt;
+ read_tree_fn_t fn;
+};
+
+static struct ls_tree_cmdmode_to_fmt ls_tree_cmdmode_format[] = {
+ {
+ .mode = MODE_DEFAULT,
+ .fmt = "%(objectmode) %(objecttype) %(objectname)%x09%(path)",
+ .fn = show_tree_default,
+ },
+ {
+ .mode = MODE_LONG,
+ .fmt = "%(objectmode) %(objecttype) %(objectname) %(objectsize:padded)%x09%(path)",
+ .fn = show_tree_long,
+ },
+ {
+ .mode = MODE_NAME_ONLY, /* And MODE_NAME_STATUS */
+ .fmt = "%(path)",
+ .fn = show_tree_name_only,
+ },
+ {
+ .mode = MODE_OBJECT_ONLY,
+ .fmt = "%(objectname)",
+ .fn = show_tree_object
+ },
+ {
+ /* fallback */
+ .fn = show_tree_default,
+ },
+};
+
+int cmd_ls_tree(int argc, const char **argv, const char *prefix)
+{
+ struct object_id oid;
+ struct tree *tree;
+ int i, full_tree = 0;
+ read_tree_fn_t fn = NULL;
+ const struct option ls_tree_options[] = {
+ OPT_BIT('d', NULL, &ls_options, N_("only show trees"),
+ LS_TREE_ONLY),
+ OPT_BIT('r', NULL, &ls_options, N_("recurse into subtrees"),
+ LS_RECURSIVE),
+ OPT_BIT('t', NULL, &ls_options, N_("show trees when recursing"),
+ LS_SHOW_TREES),
+ OPT_SET_INT('z', NULL, &line_termination,
+ N_("terminate entries with NUL byte"), 0),
+ OPT_CMDMODE('l', "long", &cmdmode, N_("include object size"),
+ MODE_LONG),
+ OPT_CMDMODE(0, "name-only", &cmdmode, N_("list only filenames"),
+ MODE_NAME_ONLY),
+ OPT_CMDMODE(0, "name-status", &cmdmode, N_("list only filenames"),
+ MODE_NAME_STATUS),
+ OPT_CMDMODE(0, "object-only", &cmdmode, N_("list only objects"),
+ MODE_OBJECT_ONLY),
+ OPT_SET_INT(0, "full-name", &chomp_prefix,
+ N_("use full path names"), 0),
+ OPT_BOOL(0, "full-tree", &full_tree,
+ N_("list entire tree; not just current directory "
+ "(implies --full-name)")),
+ OPT_STRING_F(0, "format", &format, N_("format"),
+ N_("format to use for the output"),
+ PARSE_OPT_NONEG),
+ OPT__ABBREV(&abbrev),
+ OPT_END()
+ };
+ struct ls_tree_cmdmode_to_fmt *m2f = ls_tree_cmdmode_format;
+
+ git_config(git_default_config, NULL);
+ ls_tree_prefix = prefix;
+ if (prefix)
+ chomp_prefix = strlen(prefix);
+
+ argc = parse_options(argc, argv, prefix, ls_tree_options,
+ ls_tree_usage, 0);
+ if (full_tree) {
+ ls_tree_prefix = prefix = NULL;
+ chomp_prefix = 0;
+ }
+ /*
+ * We wanted to detect conflicts between --name-only and
+ * --name-status, but once we're done with that subsequent
+ * code should only need to check the primary name.
+ */
+ if (cmdmode == MODE_NAME_STATUS)
+ cmdmode = MODE_NAME_ONLY;
+
+ /* -d -r should imply -t, but -d by itself should not have to. */
+ if ( (LS_TREE_ONLY|LS_RECURSIVE) ==
+ ((LS_TREE_ONLY|LS_RECURSIVE) & ls_options))
+ ls_options |= LS_SHOW_TREES;
+
+ if (format && cmdmode)
+ usage_msg_opt(
+ _("--format can't be combined with other format-altering options"),
+ ls_tree_usage, ls_tree_options);
+ if (argc < 1)
+ usage_with_options(ls_tree_usage, ls_tree_options);
+ if (get_oid(argv[0], &oid))
+ die("Not a valid object name %s", argv[0]);
+
+ /*
+ * show_recursive() rolls its own matching code and is
+ * generally ignorant of 'struct pathspec'. The magic mask
+ * cannot be lifted until it is converted to use
+ * match_pathspec() or tree_entry_interesting()
+ */
+ parse_pathspec(&pathspec, PATHSPEC_ALL_MAGIC &
+ ~(PATHSPEC_FROMTOP | PATHSPEC_LITERAL),
+ PATHSPEC_PREFER_CWD,
+ prefix, argv + 1);
+ for (i = 0; i < pathspec.nr; i++)
+ pathspec.items[i].nowildcard_len = pathspec.items[i].len;
+ pathspec.has_wildcard = 0;
+ tree = parse_tree_indirect(&oid);
+ if (!tree)
+ die("not a tree object");
+ /*
+ * The generic show_tree_fmt() is slower than show_tree(), so
+ * take the fast path if possible.
+ */
+ while (m2f) {
+ if (!m2f->fmt) {
+ fn = format ? show_tree_fmt : show_tree_default;
+ } else if (format && !strcmp(format, m2f->fmt)) {
+ cmdmode = m2f->mode;
+ fn = m2f->fn;
+ } else if (!format && cmdmode == m2f->mode) {
+ fn = m2f->fn;
+ } else {
+ m2f++;
+ continue;
+ }
+ break;
+ }
+
+ return !!read_tree(the_repository, tree, &pathspec, fn, NULL);
+}
diff --git a/builtin/mailinfo.c b/builtin/mailinfo.c
new file mode 100644
index 0000000..01d16ef
--- /dev/null
+++ b/builtin/mailinfo.c
@@ -0,0 +1,114 @@
+/*
+ * Another stupid program, this one parsing the headers of an
+ * email to figure out authorship and subject
+ */
+#include "cache.h"
+#include "builtin.h"
+#include "utf8.h"
+#include "strbuf.h"
+#include "mailinfo.h"
+#include "parse-options.h"
+
+static const char * const mailinfo_usage[] = {
+ /* TRANSLATORS: keep <> in "<" mail ">" info. */
+ N_("git mailinfo [<options>] <msg> <patch> < mail >info"),
+ NULL,
+};
+
+struct metainfo_charset
+{
+ enum {
+ CHARSET_DEFAULT,
+ CHARSET_NO_REENCODE,
+ CHARSET_EXPLICIT,
+ } policy;
+ const char *charset;
+};
+
+static int parse_opt_explicit_encoding(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct metainfo_charset *meta_charset = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ meta_charset->policy = CHARSET_EXPLICIT;
+ meta_charset->charset = arg;
+
+ return 0;
+}
+
+static int parse_opt_quoted_cr(const struct option *opt, const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+
+ if (mailinfo_parse_quoted_cr_action(arg, opt->value) != 0)
+ return error(_("bad action '%s' for '%s'"), arg, "--quoted-cr");
+ return 0;
+}
+
+int cmd_mailinfo(int argc, const char **argv, const char *prefix)
+{
+ struct metainfo_charset meta_charset;
+ struct mailinfo mi;
+ int status;
+ char *msgfile, *patchfile;
+
+ struct option options[] = {
+ OPT_BOOL('k', NULL, &mi.keep_subject, N_("keep subject")),
+ OPT_BOOL('b', NULL, &mi.keep_non_patch_brackets_in_subject,
+ N_("keep non patch brackets in subject")),
+ OPT_BOOL('m', "message-id", &mi.add_message_id,
+ N_("copy Message-ID to the end of commit message")),
+ OPT_SET_INT_F('u', NULL, &meta_charset.policy,
+ N_("re-code metadata to i18n.commitEncoding"),
+ CHARSET_DEFAULT, PARSE_OPT_NONEG),
+ OPT_SET_INT_F('n', NULL, &meta_charset.policy,
+ N_("disable charset re-coding of metadata"),
+ CHARSET_NO_REENCODE, PARSE_OPT_NONEG),
+ OPT_CALLBACK_F(0, "encoding", &meta_charset, N_("encoding"),
+ N_("re-code metadata to this encoding"),
+ PARSE_OPT_NONEG, parse_opt_explicit_encoding),
+ OPT_BOOL(0, "scissors", &mi.use_scissors, N_("use scissors")),
+ OPT_CALLBACK_F(0, "quoted-cr", &mi.quoted_cr, N_("<action>"),
+ N_("action when quoted CR is found"),
+ PARSE_OPT_NONEG, parse_opt_quoted_cr),
+ OPT_HIDDEN_BOOL(0, "inbody-headers", &mi.use_inbody_headers,
+ N_("use headers in message's body")),
+ OPT_END()
+ };
+
+ setup_mailinfo(&mi);
+ meta_charset.policy = CHARSET_DEFAULT;
+
+ argc = parse_options(argc, argv, prefix, options, mailinfo_usage, 0);
+
+ if (argc != 2)
+ usage_with_options(mailinfo_usage, options);
+
+ switch (meta_charset.policy) {
+ case CHARSET_DEFAULT:
+ mi.metainfo_charset = get_commit_output_encoding();
+ break;
+ case CHARSET_NO_REENCODE:
+ mi.metainfo_charset = NULL;
+ break;
+ case CHARSET_EXPLICIT:
+ break;
+ default:
+ BUG("invalid meta_charset.policy");
+ }
+
+ mi.input = stdin;
+ mi.output = stdout;
+
+ msgfile = prefix_filename(prefix, argv[0]);
+ patchfile = prefix_filename(prefix, argv[1]);
+
+ status = !!mailinfo(&mi, msgfile, patchfile);
+ clear_mailinfo(&mi);
+
+ free(msgfile);
+ free(patchfile);
+ return status;
+}
diff --git a/builtin/mailsplit.c b/builtin/mailsplit.c
new file mode 100644
index 0000000..73509f6
--- /dev/null
+++ b/builtin/mailsplit.c
@@ -0,0 +1,370 @@
+/*
+ * Totally braindamaged mbox splitter program.
+ *
+ * It just splits a mbox into a list of files: "0001" "0002" ..
+ * so you can process them further from there.
+ */
+#include "cache.h"
+#include "builtin.h"
+#include "string-list.h"
+#include "strbuf.h"
+
+static const char git_mailsplit_usage[] =
+"git mailsplit [-d<prec>] [-f<n>] [-b] [--keep-cr] -o<directory> [(<mbox>|<Maildir>)...]";
+
+static int is_from_line(const char *line, int len)
+{
+ const char *colon;
+
+ if (len < 20 || memcmp("From ", line, 5))
+ return 0;
+
+ colon = line + len - 2;
+ line += 5;
+ for (;;) {
+ if (colon < line)
+ return 0;
+ if (*--colon == ':')
+ break;
+ }
+
+ if (!isdigit(colon[-4]) ||
+ !isdigit(colon[-2]) ||
+ !isdigit(colon[-1]) ||
+ !isdigit(colon[ 1]) ||
+ !isdigit(colon[ 2]))
+ return 0;
+
+ /* year */
+ if (strtol(colon+3, NULL, 10) <= 90)
+ return 0;
+
+ /* Ok, close enough */
+ return 1;
+}
+
+static struct strbuf buf = STRBUF_INIT;
+static int keep_cr;
+static int mboxrd;
+
+static int is_gtfrom(const struct strbuf *buf)
+{
+ size_t min = strlen(">From ");
+ size_t ngt;
+
+ if (buf->len < min)
+ return 0;
+
+ ngt = strspn(buf->buf, ">");
+ return ngt && starts_with(buf->buf + ngt, "From ");
+}
+
+/* Called with the first line (potentially partial)
+ * already in buf[] -- normally that should begin with
+ * the Unix "From " line. Write it into the specified
+ * file.
+ */
+static int split_one(FILE *mbox, const char *name, int allow_bare)
+{
+ FILE *output;
+ int fd;
+ int status = 0;
+ int is_bare = !is_from_line(buf.buf, buf.len);
+
+ if (is_bare && !allow_bare) {
+ fprintf(stderr, "corrupt mailbox\n");
+ exit(1);
+ }
+ fd = xopen(name, O_WRONLY | O_CREAT | O_EXCL, 0666);
+ output = xfdopen(fd, "w");
+
+ /* Copy it out, while searching for a line that begins with
+ * "From " and having something that looks like a date format.
+ */
+ for (;;) {
+ if (!keep_cr && buf.len > 1 && buf.buf[buf.len-1] == '\n' &&
+ buf.buf[buf.len-2] == '\r') {
+ strbuf_setlen(&buf, buf.len-2);
+ strbuf_addch(&buf, '\n');
+ }
+
+ if (mboxrd && is_gtfrom(&buf))
+ strbuf_remove(&buf, 0, 1);
+
+ if (fwrite(buf.buf, 1, buf.len, output) != buf.len)
+ die_errno("cannot write output");
+
+ if (strbuf_getwholeline(&buf, mbox, '\n')) {
+ if (feof(mbox)) {
+ status = 1;
+ break;
+ }
+ die_errno("cannot read mbox");
+ }
+ if (!is_bare && is_from_line(buf.buf, buf.len))
+ break; /* done with one message */
+ }
+ fclose(output);
+ return status;
+}
+
+static int populate_maildir_list(struct string_list *list, const char *path)
+{
+ DIR *dir;
+ struct dirent *dent;
+ char *name = NULL;
+ char *subs[] = { "cur", "new", NULL };
+ char **sub;
+ int ret = -1;
+
+ for (sub = subs; *sub; ++sub) {
+ free(name);
+ name = xstrfmt("%s/%s", path, *sub);
+ if (!(dir = opendir(name))) {
+ if (errno == ENOENT)
+ continue;
+ error_errno("cannot opendir %s", name);
+ goto out;
+ }
+
+ while ((dent = readdir(dir)) != NULL) {
+ if (dent->d_name[0] == '.')
+ continue;
+ free(name);
+ name = xstrfmt("%s/%s", *sub, dent->d_name);
+ string_list_insert(list, name);
+ }
+
+ closedir(dir);
+ }
+
+ ret = 0;
+
+out:
+ free(name);
+ return ret;
+}
+
+static int maildir_filename_cmp(const char *a, const char *b)
+{
+ while (*a && *b) {
+ if (isdigit(*a) && isdigit(*b)) {
+ long int na, nb;
+ na = strtol(a, (char **)&a, 10);
+ nb = strtol(b, (char **)&b, 10);
+ if (na != nb)
+ return na - nb;
+ /* strtol advanced our pointers */
+ }
+ else {
+ if (*a != *b)
+ return (unsigned char)*a - (unsigned char)*b;
+ a++;
+ b++;
+ }
+ }
+ return (unsigned char)*a - (unsigned char)*b;
+}
+
+static int split_maildir(const char *maildir, const char *dir,
+ int nr_prec, int skip)
+{
+ char *file = NULL;
+ FILE *f = NULL;
+ int ret = -1;
+ int i;
+ struct string_list list = STRING_LIST_INIT_DUP;
+
+ list.cmp = maildir_filename_cmp;
+
+ if (populate_maildir_list(&list, maildir) < 0)
+ goto out;
+
+ for (i = 0; i < list.nr; i++) {
+ char *name;
+
+ free(file);
+ file = xstrfmt("%s/%s", maildir, list.items[i].string);
+
+ f = fopen(file, "r");
+ if (!f) {
+ error_errno("cannot open mail %s", file);
+ goto out;
+ }
+
+ if (strbuf_getwholeline(&buf, f, '\n')) {
+ error_errno("cannot read mail %s", file);
+ goto out;
+ }
+
+ name = xstrfmt("%s/%0*d", dir, nr_prec, ++skip);
+ split_one(f, name, 1);
+ free(name);
+
+ fclose(f);
+ f = NULL;
+ }
+
+ ret = skip;
+out:
+ if (f)
+ fclose(f);
+ free(file);
+ string_list_clear(&list, 1);
+ return ret;
+}
+
+static int split_mbox(const char *file, const char *dir, int allow_bare,
+ int nr_prec, int skip)
+{
+ int ret = -1;
+ int peek;
+
+ FILE *f = !strcmp(file, "-") ? stdin : fopen(file, "r");
+ int file_done = 0;
+
+ if (isatty(fileno(f)))
+ warning(_("reading patches from stdin/tty..."));
+
+ if (!f) {
+ error_errno("cannot open mbox %s", file);
+ goto out;
+ }
+
+ do {
+ peek = fgetc(f);
+ if (peek == EOF) {
+ if (f == stdin)
+ /* empty stdin is OK */
+ ret = skip;
+ else {
+ fclose(f);
+ error(_("empty mbox: '%s'"), file);
+ }
+ goto out;
+ }
+ } while (isspace(peek));
+ ungetc(peek, f);
+
+ if (strbuf_getwholeline(&buf, f, '\n')) {
+ /* empty stdin is OK */
+ if (f != stdin) {
+ error("cannot read mbox %s", file);
+ goto out;
+ }
+ file_done = 1;
+ }
+
+ while (!file_done) {
+ char *name = xstrfmt("%s/%0*d", dir, nr_prec, ++skip);
+ file_done = split_one(f, name, allow_bare);
+ free(name);
+ }
+
+ if (f != stdin)
+ fclose(f);
+
+ ret = skip;
+out:
+ return ret;
+}
+
+int cmd_mailsplit(int argc, const char **argv, const char *prefix)
+{
+ int nr = 0, nr_prec = 4, num = 0;
+ int allow_bare = 0;
+ const char *dir = NULL;
+ const char **argp;
+ static const char *stdin_only[] = { "-", NULL };
+
+ for (argp = argv+1; *argp; argp++) {
+ const char *arg = *argp;
+
+ if (arg[0] != '-')
+ break;
+ /* do flags here */
+ if ( arg[1] == 'd' ) {
+ nr_prec = strtol(arg+2, NULL, 10);
+ if (nr_prec < 3 || 10 <= nr_prec)
+ usage(git_mailsplit_usage);
+ continue;
+ } else if ( arg[1] == 'f' ) {
+ nr = strtol(arg+2, NULL, 10);
+ } else if ( arg[1] == 'h' ) {
+ usage(git_mailsplit_usage);
+ } else if ( arg[1] == 'b' && !arg[2] ) {
+ allow_bare = 1;
+ } else if (!strcmp(arg, "--keep-cr")) {
+ keep_cr = 1;
+ } else if ( arg[1] == 'o' && arg[2] ) {
+ dir = arg+2;
+ } else if (!strcmp(arg, "--mboxrd")) {
+ mboxrd = 1;
+ } else if ( arg[1] == '-' && !arg[2] ) {
+ argp++; /* -- marks end of options */
+ break;
+ } else {
+ die("unknown option: %s", arg);
+ }
+ }
+
+ if ( !dir ) {
+ /* Backwards compatibility: if no -o specified, accept
+ <mbox> <dir> or just <dir> */
+ switch (argc - (argp-argv)) {
+ case 1:
+ dir = argp[0];
+ argp = stdin_only;
+ break;
+ case 2:
+ stdin_only[0] = argp[0];
+ dir = argp[1];
+ argp = stdin_only;
+ break;
+ default:
+ usage(git_mailsplit_usage);
+ }
+ } else {
+ /* New usage: if no more argument, parse stdin */
+ if ( !*argp )
+ argp = stdin_only;
+ }
+
+ while (*argp) {
+ const char *arg = *argp++;
+ struct stat argstat;
+ int ret = 0;
+
+ if (arg[0] == '-' && arg[1] == 0) {
+ ret = split_mbox(arg, dir, allow_bare, nr_prec, nr);
+ if (ret < 0) {
+ error("cannot split patches from stdin");
+ return 1;
+ }
+ num += (ret - nr);
+ nr = ret;
+ continue;
+ }
+
+ if (stat(arg, &argstat) == -1) {
+ error_errno("cannot stat %s", arg);
+ return 1;
+ }
+
+ if (S_ISDIR(argstat.st_mode))
+ ret = split_maildir(arg, dir, nr_prec, nr);
+ else
+ ret = split_mbox(arg, dir, allow_bare, nr_prec, nr);
+
+ if (ret < 0) {
+ error("cannot split patches from %s", arg);
+ return 1;
+ }
+ num += (ret - nr);
+ nr = ret;
+ }
+
+ printf("%d\n", num);
+
+ return 0;
+}
diff --git a/builtin/merge-base.c b/builtin/merge-base.c
new file mode 100644
index 0000000..6f3941f
--- /dev/null
+++ b/builtin/merge-base.c
@@ -0,0 +1,193 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "commit.h"
+#include "refs.h"
+#include "diff.h"
+#include "revision.h"
+#include "parse-options.h"
+#include "repository.h"
+#include "commit-reach.h"
+
+static int show_merge_base(struct commit **rev, int rev_nr, int show_all)
+{
+ struct commit_list *result, *r;
+
+ result = get_merge_bases_many_dirty(rev[0], rev_nr - 1, rev + 1);
+
+ if (!result)
+ return 1;
+
+ for (r = result; r; r = r->next) {
+ printf("%s\n", oid_to_hex(&r->item->object.oid));
+ if (!show_all)
+ break;
+ }
+
+ free_commit_list(result);
+ return 0;
+}
+
+static const char * const merge_base_usage[] = {
+ N_("git merge-base [-a | --all] <commit> <commit>..."),
+ N_("git merge-base [-a | --all] --octopus <commit>..."),
+ N_("git merge-base --is-ancestor <commit> <commit>"),
+ N_("git merge-base --independent <commit>..."),
+ N_("git merge-base --fork-point <ref> [<commit>]"),
+ NULL
+};
+
+static struct commit *get_commit_reference(const char *arg)
+{
+ struct object_id revkey;
+ struct commit *r;
+
+ if (get_oid(arg, &revkey))
+ die("Not a valid object name %s", arg);
+ r = lookup_commit_reference(the_repository, &revkey);
+ if (!r)
+ die("Not a valid commit name %s", arg);
+
+ return r;
+}
+
+static int handle_independent(int count, const char **args)
+{
+ struct commit_list *revs = NULL, *rev;
+ int i;
+
+ for (i = count - 1; i >= 0; i--)
+ commit_list_insert(get_commit_reference(args[i]), &revs);
+
+ reduce_heads_replace(&revs);
+
+ if (!revs)
+ return 1;
+
+ for (rev = revs; rev; rev = rev->next)
+ printf("%s\n", oid_to_hex(&rev->item->object.oid));
+
+ free_commit_list(revs);
+ return 0;
+}
+
+static int handle_octopus(int count, const char **args, int show_all)
+{
+ struct commit_list *revs = NULL;
+ struct commit_list *result, *rev;
+ int i;
+
+ for (i = count - 1; i >= 0; i--)
+ commit_list_insert(get_commit_reference(args[i]), &revs);
+
+ result = get_octopus_merge_bases(revs);
+ free_commit_list(revs);
+ reduce_heads_replace(&result);
+
+ if (!result)
+ return 1;
+
+ for (rev = result; rev; rev = rev->next) {
+ printf("%s\n", oid_to_hex(&rev->item->object.oid));
+ if (!show_all)
+ break;
+ }
+
+ free_commit_list(result);
+ return 0;
+}
+
+static int handle_is_ancestor(int argc, const char **argv)
+{
+ struct commit *one, *two;
+
+ if (argc != 2)
+ die("--is-ancestor takes exactly two commits");
+ one = get_commit_reference(argv[0]);
+ two = get_commit_reference(argv[1]);
+ if (in_merge_bases(one, two))
+ return 0;
+ else
+ return 1;
+}
+
+static int handle_fork_point(int argc, const char **argv)
+{
+ struct object_id oid;
+ struct commit *derived, *fork_point;
+ const char *commitname;
+
+ commitname = (argc == 2) ? argv[1] : "HEAD";
+ if (get_oid(commitname, &oid))
+ die("Not a valid object name: '%s'", commitname);
+
+ derived = lookup_commit_reference(the_repository, &oid);
+
+ fork_point = get_fork_point(argv[0], derived);
+
+ if (!fork_point)
+ return 1;
+
+ printf("%s\n", oid_to_hex(&fork_point->object.oid));
+ return 0;
+}
+
+int cmd_merge_base(int argc, const char **argv, const char *prefix)
+{
+ struct commit **rev;
+ int rev_nr = 0;
+ int show_all = 0;
+ int cmdmode = 0;
+ int ret;
+
+ struct option options[] = {
+ OPT_BOOL('a', "all", &show_all, N_("output all common ancestors")),
+ OPT_CMDMODE(0, "octopus", &cmdmode,
+ N_("find ancestors for a single n-way merge"), 'o'),
+ OPT_CMDMODE(0, "independent", &cmdmode,
+ N_("list revs not reachable from others"), 'r'),
+ OPT_CMDMODE(0, "is-ancestor", &cmdmode,
+ N_("is the first one ancestor of the other?"), 'a'),
+ OPT_CMDMODE(0, "fork-point", &cmdmode,
+ N_("find where <commit> forked from reflog of <ref>"), 'f'),
+ OPT_END()
+ };
+
+ git_config(git_default_config, NULL);
+ argc = parse_options(argc, argv, prefix, options, merge_base_usage, 0);
+
+ if (cmdmode == 'a') {
+ if (argc < 2)
+ usage_with_options(merge_base_usage, options);
+ if (show_all)
+ die(_("options '%s' and '%s' cannot be used together"),
+ "--is-ancestor", "--all");
+ return handle_is_ancestor(argc, argv);
+ }
+
+ if (cmdmode == 'r' && show_all)
+ die(_("options '%s' and '%s' cannot be used together"),
+ "--independent", "--all");
+
+ if (cmdmode == 'o')
+ return handle_octopus(argc, argv, show_all);
+
+ if (cmdmode == 'r')
+ return handle_independent(argc, argv);
+
+ if (cmdmode == 'f') {
+ if (argc < 1 || 2 < argc)
+ usage_with_options(merge_base_usage, options);
+ return handle_fork_point(argc, argv);
+ }
+
+ if (argc < 2)
+ usage_with_options(merge_base_usage, options);
+
+ ALLOC_ARRAY(rev, argc);
+ while (argc-- > 0)
+ rev[rev_nr++] = get_commit_reference(*argv++);
+ ret = show_merge_base(rev, rev_nr, show_all);
+ free(rev);
+ return ret;
+}
diff --git a/builtin/merge-file.c b/builtin/merge-file.c
new file mode 100644
index 0000000..c923bbf
--- /dev/null
+++ b/builtin/merge-file.c
@@ -0,0 +1,124 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "xdiff/xdiff.h"
+#include "xdiff-interface.h"
+#include "parse-options.h"
+
+static const char *const merge_file_usage[] = {
+ N_("git merge-file [<options>] [-L <name1> [-L <orig> [-L <name2>]]] <file1> <orig-file> <file2>"),
+ NULL
+};
+
+static int label_cb(const struct option *opt, const char *arg, int unset)
+{
+ static int label_count = 0;
+ const char **names = (const char **)opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ if (label_count >= 3)
+ return error("too many labels on the command line");
+ names[label_count++] = arg;
+ return 0;
+}
+
+int cmd_merge_file(int argc, const char **argv, const char *prefix)
+{
+ const char *names[3] = { 0 };
+ mmfile_t mmfs[3] = { 0 };
+ mmbuffer_t result = { 0 };
+ xmparam_t xmp = { 0 };
+ int ret = 0, i = 0, to_stdout = 0;
+ int quiet = 0;
+ struct option options[] = {
+ OPT_BOOL('p', "stdout", &to_stdout, N_("send results to standard output")),
+ OPT_SET_INT(0, "diff3", &xmp.style, N_("use a diff3 based merge"), XDL_MERGE_DIFF3),
+ OPT_SET_INT(0, "zdiff3", &xmp.style, N_("use a zealous diff3 based merge"),
+ XDL_MERGE_ZEALOUS_DIFF3),
+ OPT_SET_INT(0, "ours", &xmp.favor, N_("for conflicts, use our version"),
+ XDL_MERGE_FAVOR_OURS),
+ OPT_SET_INT(0, "theirs", &xmp.favor, N_("for conflicts, use their version"),
+ XDL_MERGE_FAVOR_THEIRS),
+ OPT_SET_INT(0, "union", &xmp.favor, N_("for conflicts, use a union version"),
+ XDL_MERGE_FAVOR_UNION),
+ OPT_INTEGER(0, "marker-size", &xmp.marker_size,
+ N_("for conflicts, use this marker size")),
+ OPT__QUIET(&quiet, N_("do not warn about conflicts")),
+ OPT_CALLBACK('L', NULL, names, N_("name"),
+ N_("set labels for file1/orig-file/file2"), &label_cb),
+ OPT_END(),
+ };
+
+ xmp.level = XDL_MERGE_ZEALOUS_ALNUM;
+ xmp.style = 0;
+ xmp.favor = 0;
+
+ if (startup_info->have_repository) {
+ /* Read the configuration file */
+ git_config(git_xmerge_config, NULL);
+ if (0 <= git_xmerge_style)
+ xmp.style = git_xmerge_style;
+ }
+
+ argc = parse_options(argc, argv, prefix, options, merge_file_usage, 0);
+ if (argc != 3)
+ usage_with_options(merge_file_usage, options);
+ if (quiet) {
+ if (!freopen("/dev/null", "w", stderr))
+ return error_errno("failed to redirect stderr to /dev/null");
+ }
+
+ for (i = 0; i < 3; i++) {
+ char *fname;
+ mmfile_t *mmf = mmfs + i;
+
+ if (!names[i])
+ names[i] = argv[i];
+
+ fname = prefix_filename(prefix, argv[i]);
+
+ if (read_mmfile(mmf, fname))
+ ret = -1;
+ else if (mmf->size > MAX_XDIFF_SIZE ||
+ buffer_is_binary(mmf->ptr, mmf->size))
+ ret = error("Cannot merge binary files: %s",
+ argv[i]);
+
+ free(fname);
+ if (ret)
+ goto cleanup;
+
+ }
+
+ xmp.ancestor = names[1];
+ xmp.file1 = names[0];
+ xmp.file2 = names[2];
+ ret = xdl_merge(mmfs + 1, mmfs + 0, mmfs + 2, &xmp, &result);
+
+ if (ret >= 0) {
+ const char *filename = argv[0];
+ char *fpath = prefix_filename(prefix, argv[0]);
+ FILE *f = to_stdout ? stdout : fopen(fpath, "wb");
+
+ if (!f)
+ ret = error_errno("Could not open %s for writing",
+ filename);
+ else if (result.size &&
+ fwrite(result.ptr, result.size, 1, f) != 1)
+ ret = error_errno("Could not write to %s", filename);
+ else if (fclose(f))
+ ret = error_errno("Could not close %s", filename);
+ free(result.ptr);
+ free(fpath);
+ }
+
+ if (ret > 127)
+ ret = 127;
+
+cleanup:
+ for (i = 0; i < 3; i++)
+ free(mmfs[i].ptr);
+
+ return ret;
+}
diff --git a/builtin/merge-index.c b/builtin/merge-index.c
new file mode 100644
index 0000000..452f833
--- /dev/null
+++ b/builtin/merge-index.c
@@ -0,0 +1,118 @@
+#define USE_THE_INDEX_VARIABLE
+#include "builtin.h"
+#include "run-command.h"
+
+static const char *pgm;
+static int one_shot, quiet;
+static int err;
+
+static int merge_entry(int pos, const char *path)
+{
+ int found;
+ const char *arguments[] = { pgm, "", "", "", path, "", "", "", NULL };
+ char hexbuf[4][GIT_MAX_HEXSZ + 1];
+ char ownbuf[4][60];
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ if (pos >= the_index.cache_nr)
+ die("git merge-index: %s not in the cache", path);
+ found = 0;
+ do {
+ const struct cache_entry *ce = the_index.cache[pos];
+ int stage = ce_stage(ce);
+
+ if (strcmp(ce->name, path))
+ break;
+ found++;
+ oid_to_hex_r(hexbuf[stage], &ce->oid);
+ xsnprintf(ownbuf[stage], sizeof(ownbuf[stage]), "%o", ce->ce_mode);
+ arguments[stage] = hexbuf[stage];
+ arguments[stage + 4] = ownbuf[stage];
+ } while (++pos < the_index.cache_nr);
+ if (!found)
+ die("git merge-index: %s not in the cache", path);
+
+ strvec_pushv(&cmd.args, arguments);
+ if (run_command(&cmd)) {
+ if (one_shot)
+ err++;
+ else {
+ if (!quiet)
+ die("merge program failed");
+ exit(1);
+ }
+ }
+ return found;
+}
+
+static void merge_one_path(const char *path)
+{
+ int pos = index_name_pos(&the_index, path, strlen(path));
+
+ /*
+ * If it already exists in the cache as stage0, it's
+ * already merged and there is nothing to do.
+ */
+ if (pos < 0)
+ merge_entry(-pos-1, path);
+}
+
+static void merge_all(void)
+{
+ int i;
+ /* TODO: audit for interaction with sparse-index. */
+ ensure_full_index(&the_index);
+ for (i = 0; i < the_index.cache_nr; i++) {
+ const struct cache_entry *ce = the_index.cache[i];
+ if (!ce_stage(ce))
+ continue;
+ i += merge_entry(i, ce->name)-1;
+ }
+}
+
+int cmd_merge_index(int argc, const char **argv, const char *prefix)
+{
+ int i, force_file = 0;
+
+ /* Without this we cannot rely on waitpid() to tell
+ * what happened to our children.
+ */
+ signal(SIGCHLD, SIG_DFL);
+
+ if (argc < 3)
+ usage("git merge-index [-o] [-q] <merge-program> (-a | [--] [<filename>...])");
+
+ repo_read_index(the_repository);
+
+ /* TODO: audit for interaction with sparse-index. */
+ ensure_full_index(&the_index);
+
+ i = 1;
+ if (!strcmp(argv[i], "-o")) {
+ one_shot = 1;
+ i++;
+ }
+ if (!strcmp(argv[i], "-q")) {
+ quiet = 1;
+ i++;
+ }
+ pgm = argv[i++];
+ for (; i < argc; i++) {
+ const char *arg = argv[i];
+ if (!force_file && *arg == '-') {
+ if (!strcmp(arg, "--")) {
+ force_file = 1;
+ continue;
+ }
+ if (!strcmp(arg, "-a")) {
+ merge_all();
+ continue;
+ }
+ die("git merge-index: unknown option %s", arg);
+ }
+ merge_one_path(arg);
+ }
+ if (err && !quiet)
+ die("merge program failed");
+ return err;
+}
diff --git a/builtin/merge-ours.c b/builtin/merge-ours.c
new file mode 100644
index 0000000..284eb48
--- /dev/null
+++ b/builtin/merge-ours.c
@@ -0,0 +1,32 @@
+/*
+ * Implementation of git-merge-ours.sh as builtin
+ *
+ * Copyright (c) 2007 Thomas Harning Jr
+ * Original:
+ * Original Copyright (c) 2005 Junio C Hamano
+ *
+ * Pretend we resolved the heads, but declare our tree trumps everybody else.
+ */
+#include "git-compat-util.h"
+#include "builtin.h"
+#include "diff.h"
+
+static const char builtin_merge_ours_usage[] =
+ "git merge-ours <base>... -- HEAD <remote>...";
+
+int cmd_merge_ours(int argc, const char **argv, const char *prefix)
+{
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(builtin_merge_ours_usage);
+
+ /*
+ * The contents of the current index becomes the tree we
+ * commit. The index must match HEAD, or this merge cannot go
+ * through.
+ */
+ if (repo_read_index(the_repository) < 0)
+ die_errno("read_cache failed");
+ if (index_differs_from(the_repository, "HEAD", NULL, 0))
+ return 2;
+ return 0;
+}
diff --git a/builtin/merge-recursive.c b/builtin/merge-recursive.c
new file mode 100644
index 0000000..b9acbf5
--- /dev/null
+++ b/builtin/merge-recursive.c
@@ -0,0 +1,92 @@
+#include "cache.h"
+#include "builtin.h"
+#include "commit.h"
+#include "tag.h"
+#include "merge-recursive.h"
+#include "xdiff-interface.h"
+
+static const char builtin_merge_recursive_usage[] =
+ "git %s <base>... -- <head> <remote> ...";
+
+static char *better_branch_name(const char *branch)
+{
+ static char githead_env[8 + GIT_MAX_HEXSZ + 1];
+ char *name;
+
+ if (strlen(branch) != the_hash_algo->hexsz)
+ return xstrdup(branch);
+ xsnprintf(githead_env, sizeof(githead_env), "GITHEAD_%s", branch);
+ name = getenv(githead_env);
+ return xstrdup(name ? name : branch);
+}
+
+int cmd_merge_recursive(int argc, const char **argv, const char *prefix)
+{
+ const struct object_id *bases[21];
+ unsigned bases_count = 0;
+ int i, failed;
+ struct object_id h1, h2;
+ struct merge_options o;
+ char *better1, *better2;
+ struct commit *result;
+
+ init_merge_options(&o, the_repository);
+ if (argv[0] && ends_with(argv[0], "-subtree"))
+ o.subtree_shift = "";
+
+ if (argc < 4)
+ usagef(builtin_merge_recursive_usage, argv[0]);
+
+ for (i = 1; i < argc; ++i) {
+ const char *arg = argv[i];
+
+ if (starts_with(arg, "--")) {
+ if (!arg[2])
+ break;
+ if (parse_merge_opt(&o, arg + 2))
+ die(_("unknown option %s"), arg);
+ continue;
+ }
+ if (bases_count < ARRAY_SIZE(bases)-1) {
+ struct object_id *oid = xmalloc(sizeof(struct object_id));
+ if (get_oid(argv[i], oid))
+ die(_("could not parse object '%s'"), argv[i]);
+ bases[bases_count++] = oid;
+ }
+ else
+ warning(Q_("cannot handle more than %d base. "
+ "Ignoring %s.",
+ "cannot handle more than %d bases. "
+ "Ignoring %s.",
+ ARRAY_SIZE(bases)-1),
+ (int)ARRAY_SIZE(bases)-1, argv[i]);
+ }
+ if (argc - i != 3) /* "--" "<head>" "<remote>" */
+ die(_("not handling anything other than two heads merge."));
+
+ if (repo_read_index_unmerged(the_repository))
+ die_resolve_conflict("merge");
+
+ o.branch1 = argv[++i];
+ o.branch2 = argv[++i];
+
+ if (get_oid(o.branch1, &h1))
+ die(_("could not resolve ref '%s'"), o.branch1);
+ if (get_oid(o.branch2, &h2))
+ die(_("could not resolve ref '%s'"), o.branch2);
+
+ o.branch1 = better1 = better_branch_name(o.branch1);
+ o.branch2 = better2 = better_branch_name(o.branch2);
+
+ if (o.verbosity >= 3)
+ printf(_("Merging %s with %s\n"), o.branch1, o.branch2);
+
+ failed = merge_recursive_generic(&o, &h1, &h2, bases_count, bases, &result);
+
+ free(better1);
+ free(better2);
+
+ if (failed < 0)
+ return 128; /* die() error code */
+ return failed;
+}
diff --git a/builtin/merge-tree.c b/builtin/merge-tree.c
new file mode 100644
index 0000000..e376708
--- /dev/null
+++ b/builtin/merge-tree.c
@@ -0,0 +1,587 @@
+#define USE_THE_INDEX_VARIABLE
+#include "builtin.h"
+#include "tree-walk.h"
+#include "xdiff-interface.h"
+#include "help.h"
+#include "commit-reach.h"
+#include "merge-ort.h"
+#include "object-store.h"
+#include "parse-options.h"
+#include "repository.h"
+#include "blob.h"
+#include "exec-cmd.h"
+#include "merge-blobs.h"
+#include "quote.h"
+
+static int line_termination = '\n';
+
+struct merge_list {
+ struct merge_list *next;
+ struct merge_list *link; /* other stages for this object */
+
+ unsigned int stage : 2;
+ unsigned int mode;
+ const char *path;
+ struct blob *blob;
+};
+
+static struct merge_list *merge_result, **merge_result_end = &merge_result;
+
+static void add_merge_entry(struct merge_list *entry)
+{
+ *merge_result_end = entry;
+ merge_result_end = &entry->next;
+}
+
+static void trivial_merge_trees(struct tree_desc t[3], const char *base);
+
+static const char *explanation(struct merge_list *entry)
+{
+ switch (entry->stage) {
+ case 0:
+ return "merged";
+ case 3:
+ return "added in remote";
+ case 2:
+ if (entry->link)
+ return "added in both";
+ return "added in local";
+ }
+
+ /* Existed in base */
+ entry = entry->link;
+ if (!entry)
+ return "removed in both";
+
+ if (entry->link)
+ return "changed in both";
+
+ if (entry->stage == 3)
+ return "removed in local";
+ return "removed in remote";
+}
+
+static void *result(struct merge_list *entry, unsigned long *size)
+{
+ enum object_type type;
+ struct blob *base, *our, *their;
+ const char *path = entry->path;
+
+ if (!entry->stage)
+ return read_object_file(&entry->blob->object.oid, &type, size);
+ base = NULL;
+ if (entry->stage == 1) {
+ base = entry->blob;
+ entry = entry->link;
+ }
+ our = NULL;
+ if (entry && entry->stage == 2) {
+ our = entry->blob;
+ entry = entry->link;
+ }
+ their = NULL;
+ if (entry)
+ their = entry->blob;
+ return merge_blobs(the_repository->index, path,
+ base, our, their, size);
+}
+
+static void *origin(struct merge_list *entry, unsigned long *size)
+{
+ enum object_type type;
+ while (entry) {
+ if (entry->stage == 2)
+ return read_object_file(&entry->blob->object.oid,
+ &type, size);
+ entry = entry->link;
+ }
+ return NULL;
+}
+
+static int show_outf(void *priv_, mmbuffer_t *mb, int nbuf)
+{
+ int i;
+ for (i = 0; i < nbuf; i++)
+ printf("%.*s", (int) mb[i].size, mb[i].ptr);
+ return 0;
+}
+
+static void show_diff(struct merge_list *entry)
+{
+ unsigned long size;
+ mmfile_t src, dst;
+ xpparam_t xpp;
+ xdemitconf_t xecfg;
+ xdemitcb_t ecb = { .out_line = show_outf };
+
+ memset(&xpp, 0, sizeof(xpp));
+ xpp.flags = 0;
+ memset(&xecfg, 0, sizeof(xecfg));
+ xecfg.ctxlen = 3;
+
+ src.ptr = origin(entry, &size);
+ if (!src.ptr)
+ size = 0;
+ src.size = size;
+ dst.ptr = result(entry, &size);
+ if (!dst.ptr)
+ size = 0;
+ dst.size = size;
+ if (xdi_diff(&src, &dst, &xpp, &xecfg, &ecb))
+ die("unable to generate diff");
+ free(src.ptr);
+ free(dst.ptr);
+}
+
+static void show_result_list(struct merge_list *entry)
+{
+ printf("%s\n", explanation(entry));
+ do {
+ struct merge_list *link = entry->link;
+ static const char *desc[4] = { "result", "base", "our", "their" };
+ printf(" %-6s %o %s %s\n", desc[entry->stage], entry->mode, oid_to_hex(&entry->blob->object.oid), entry->path);
+ entry = link;
+ } while (entry);
+}
+
+static void show_result(void)
+{
+ struct merge_list *walk;
+
+ walk = merge_result;
+ while (walk) {
+ show_result_list(walk);
+ show_diff(walk);
+ walk = walk->next;
+ }
+}
+
+/* An empty entry never compares same, not even to another empty entry */
+static int same_entry(struct name_entry *a, struct name_entry *b)
+{
+ return !is_null_oid(&a->oid) &&
+ !is_null_oid(&b->oid) &&
+ oideq(&a->oid, &b->oid) &&
+ a->mode == b->mode;
+}
+
+static int both_empty(struct name_entry *a, struct name_entry *b)
+{
+ return is_null_oid(&a->oid) && is_null_oid(&b->oid);
+}
+
+static struct merge_list *create_entry(unsigned stage, unsigned mode, const struct object_id *oid, const char *path)
+{
+ struct merge_list *res = xcalloc(1, sizeof(*res));
+
+ res->stage = stage;
+ res->path = path;
+ res->mode = mode;
+ res->blob = lookup_blob(the_repository, oid);
+ return res;
+}
+
+static char *traverse_path(const struct traverse_info *info, const struct name_entry *n)
+{
+ struct strbuf buf = STRBUF_INIT;
+ strbuf_make_traverse_path(&buf, info, n->path, n->pathlen);
+ return strbuf_detach(&buf, NULL);
+}
+
+static void resolve(const struct traverse_info *info, struct name_entry *ours, struct name_entry *result)
+{
+ struct merge_list *orig, *final;
+ const char *path;
+
+ /* If it's already ours, don't bother showing it */
+ if (!ours)
+ return;
+
+ path = traverse_path(info, result);
+ orig = create_entry(2, ours->mode, &ours->oid, path);
+ final = create_entry(0, result->mode, &result->oid, path);
+
+ final->link = orig;
+
+ add_merge_entry(final);
+}
+
+static void unresolved_directory(const struct traverse_info *info,
+ struct name_entry n[3])
+{
+ struct repository *r = the_repository;
+ char *newbase;
+ struct name_entry *p;
+ struct tree_desc t[3];
+ void *buf0, *buf1, *buf2;
+
+ for (p = n; p < n + 3; p++) {
+ if (p->mode && S_ISDIR(p->mode))
+ break;
+ }
+ if (n + 3 <= p)
+ return; /* there is no tree here */
+
+ newbase = traverse_path(info, p);
+
+#define ENTRY_OID(e) (((e)->mode && S_ISDIR((e)->mode)) ? &(e)->oid : NULL)
+ buf0 = fill_tree_descriptor(r, t + 0, ENTRY_OID(n + 0));
+ buf1 = fill_tree_descriptor(r, t + 1, ENTRY_OID(n + 1));
+ buf2 = fill_tree_descriptor(r, t + 2, ENTRY_OID(n + 2));
+#undef ENTRY_OID
+
+ trivial_merge_trees(t, newbase);
+
+ free(buf0);
+ free(buf1);
+ free(buf2);
+ free(newbase);
+}
+
+
+static struct merge_list *link_entry(unsigned stage, const struct traverse_info *info, struct name_entry *n, struct merge_list *entry)
+{
+ const char *path;
+ struct merge_list *link;
+
+ if (!n->mode)
+ return entry;
+ if (entry)
+ path = entry->path;
+ else
+ path = traverse_path(info, n);
+ link = create_entry(stage, n->mode, &n->oid, path);
+ link->link = entry;
+ return link;
+}
+
+static void unresolved(const struct traverse_info *info, struct name_entry n[3])
+{
+ struct merge_list *entry = NULL;
+ int i;
+ unsigned dirmask = 0, mask = 0;
+
+ for (i = 0; i < 3; i++) {
+ mask |= (1 << i);
+ /*
+ * Treat missing entries as directories so that we return
+ * after unresolved_directory has handled this.
+ */
+ if (!n[i].mode || S_ISDIR(n[i].mode))
+ dirmask |= (1 << i);
+ }
+
+ unresolved_directory(info, n);
+
+ if (dirmask == mask)
+ return;
+
+ if (n[2].mode && !S_ISDIR(n[2].mode))
+ entry = link_entry(3, info, n + 2, entry);
+ if (n[1].mode && !S_ISDIR(n[1].mode))
+ entry = link_entry(2, info, n + 1, entry);
+ if (n[0].mode && !S_ISDIR(n[0].mode))
+ entry = link_entry(1, info, n + 0, entry);
+
+ add_merge_entry(entry);
+}
+
+/*
+ * Merge two trees together (t[1] and t[2]), using a common base (t[0])
+ * as the origin.
+ *
+ * This walks the (sorted) trees in lock-step, checking every possible
+ * name. Note that directories automatically sort differently from other
+ * files (see "base_name_compare"), so you'll never see file/directory
+ * conflicts, because they won't ever compare the same.
+ *
+ * IOW, if a directory changes to a filename, it will automatically be
+ * seen as the directory going away, and the filename being created.
+ *
+ * Think of this as a three-way diff.
+ *
+ * The output will be either:
+ * - successful merge
+ * "0 mode sha1 filename"
+ * NOTE NOTE NOTE! FIXME! We really really need to walk the index
+ * in parallel with this too!
+ *
+ * - conflict:
+ * "1 mode sha1 filename"
+ * "2 mode sha1 filename"
+ * "3 mode sha1 filename"
+ * where not all of the 1/2/3 lines may exist, of course.
+ *
+ * The successful merge rules are the same as for the three-way merge
+ * in git-read-tree.
+ */
+static int threeway_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *entry, struct traverse_info *info)
+{
+ /* Same in both? */
+ if (same_entry(entry+1, entry+2) || both_empty(entry+1, entry+2)) {
+ /* Modified, added or removed identically */
+ resolve(info, NULL, entry+1);
+ return mask;
+ }
+
+ if (same_entry(entry+0, entry+1)) {
+ if (!is_null_oid(&entry[2].oid) && !S_ISDIR(entry[2].mode)) {
+ /* We did not touch, they modified -- take theirs */
+ resolve(info, entry+1, entry+2);
+ return mask;
+ }
+ /*
+ * If we did not touch a directory but they made it
+ * into a file, we fall through and unresolved()
+ * recurses down. Likewise for the opposite case.
+ */
+ }
+
+ if (same_entry(entry+0, entry+2) || both_empty(entry+0, entry+2)) {
+ /* We added, modified or removed, they did not touch -- take ours */
+ resolve(info, NULL, entry+1);
+ return mask;
+ }
+
+ unresolved(info, entry);
+ return mask;
+}
+
+static void trivial_merge_trees(struct tree_desc t[3], const char *base)
+{
+ struct traverse_info info;
+
+ setup_traverse_info(&info, base);
+ info.fn = threeway_callback;
+ traverse_trees(&the_index, 3, t, &info);
+}
+
+static void *get_tree_descriptor(struct repository *r,
+ struct tree_desc *desc,
+ const char *rev)
+{
+ struct object_id oid;
+ void *buf;
+
+ if (repo_get_oid(r, rev, &oid))
+ die("unknown rev %s", rev);
+ buf = fill_tree_descriptor(r, desc, &oid);
+ if (!buf)
+ die("%s is not a tree", rev);
+ return buf;
+}
+
+static int trivial_merge(const char *base,
+ const char *branch1,
+ const char *branch2)
+{
+ struct repository *r = the_repository;
+ struct tree_desc t[3];
+ void *buf1, *buf2, *buf3;
+
+ buf1 = get_tree_descriptor(r, t+0, base);
+ buf2 = get_tree_descriptor(r, t+1, branch1);
+ buf3 = get_tree_descriptor(r, t+2, branch2);
+ trivial_merge_trees(t, "");
+ free(buf1);
+ free(buf2);
+ free(buf3);
+
+ show_result();
+ return 0;
+}
+
+enum mode {
+ MODE_UNKNOWN,
+ MODE_TRIVIAL,
+ MODE_REAL,
+};
+
+struct merge_tree_options {
+ int mode;
+ int allow_unrelated_histories;
+ int show_messages;
+ int name_only;
+ int use_stdin;
+};
+
+static int real_merge(struct merge_tree_options *o,
+ const char *branch1, const char *branch2,
+ const char *prefix)
+{
+ struct commit *parent1, *parent2;
+ struct commit_list *merge_bases = NULL;
+ struct merge_options opt;
+ struct merge_result result = { 0 };
+ int show_messages = o->show_messages;
+
+ parent1 = get_merge_parent(branch1);
+ if (!parent1)
+ help_unknown_ref(branch1, "merge-tree",
+ _("not something we can merge"));
+
+ parent2 = get_merge_parent(branch2);
+ if (!parent2)
+ help_unknown_ref(branch2, "merge-tree",
+ _("not something we can merge"));
+
+ init_merge_options(&opt, the_repository);
+
+ opt.show_rename_progress = 0;
+
+ opt.branch1 = branch1;
+ opt.branch2 = branch2;
+
+ /*
+ * Get the merge bases, in reverse order; see comment above
+ * merge_incore_recursive in merge-ort.h
+ */
+ merge_bases = get_merge_bases(parent1, parent2);
+ if (!merge_bases && !o->allow_unrelated_histories)
+ die(_("refusing to merge unrelated histories"));
+ merge_bases = reverse_commit_list(merge_bases);
+
+ merge_incore_recursive(&opt, merge_bases, parent1, parent2, &result);
+ if (result.clean < 0)
+ die(_("failure to merge"));
+
+ if (show_messages == -1)
+ show_messages = !result.clean;
+
+ if (o->use_stdin)
+ printf("%d%c", result.clean, line_termination);
+ printf("%s%c", oid_to_hex(&result.tree->object.oid), line_termination);
+ if (!result.clean) {
+ struct string_list conflicted_files = STRING_LIST_INIT_NODUP;
+ const char *last = NULL;
+ int i;
+
+ merge_get_conflicted_files(&result, &conflicted_files);
+ for (i = 0; i < conflicted_files.nr; i++) {
+ const char *name = conflicted_files.items[i].string;
+ struct stage_info *c = conflicted_files.items[i].util;
+ if (!o->name_only)
+ printf("%06o %s %d\t",
+ c->mode, oid_to_hex(&c->oid), c->stage);
+ else if (last && !strcmp(last, name))
+ continue;
+ write_name_quoted_relative(
+ name, prefix, stdout, line_termination);
+ last = name;
+ }
+ string_list_clear(&conflicted_files, 1);
+ }
+ if (show_messages) {
+ putchar(line_termination);
+ merge_display_update_messages(&opt, line_termination == '\0',
+ &result);
+ }
+ if (o->use_stdin)
+ putchar(line_termination);
+ merge_finalize(&opt, &result);
+ return !result.clean; /* result.clean < 0 handled above */
+}
+
+int cmd_merge_tree(int argc, const char **argv, const char *prefix)
+{
+ struct merge_tree_options o = { .show_messages = -1 };
+ int expected_remaining_argc;
+ int original_argc;
+
+ const char * const merge_tree_usage[] = {
+ N_("git merge-tree [--write-tree] [<options>] <branch1> <branch2>"),
+ N_("git merge-tree [--trivial-merge] <base-tree> <branch1> <branch2>"),
+ NULL
+ };
+ struct option mt_options[] = {
+ OPT_CMDMODE(0, "write-tree", &o.mode,
+ N_("do a real merge instead of a trivial merge"),
+ MODE_REAL),
+ OPT_CMDMODE(0, "trivial-merge", &o.mode,
+ N_("do a trivial merge only"), MODE_TRIVIAL),
+ OPT_BOOL(0, "messages", &o.show_messages,
+ N_("also show informational/conflict messages")),
+ OPT_SET_INT('z', NULL, &line_termination,
+ N_("separate paths with the NUL character"), '\0'),
+ OPT_BOOL_F(0, "name-only",
+ &o.name_only,
+ N_("list filenames without modes/oids/stages"),
+ PARSE_OPT_NONEG),
+ OPT_BOOL_F(0, "allow-unrelated-histories",
+ &o.allow_unrelated_histories,
+ N_("allow merging unrelated histories"),
+ PARSE_OPT_NONEG),
+ OPT_BOOL_F(0, "stdin",
+ &o.use_stdin,
+ N_("perform multiple merges, one per line of input"),
+ PARSE_OPT_NONEG),
+ OPT_END()
+ };
+
+ /* Parse arguments */
+ original_argc = argc - 1; /* ignoring argv[0] */
+ argc = parse_options(argc, argv, prefix, mt_options,
+ merge_tree_usage, PARSE_OPT_STOP_AT_NON_OPTION);
+
+ /* Handle --stdin */
+ if (o.use_stdin) {
+ struct strbuf buf = STRBUF_INIT;
+
+ if (o.mode == MODE_TRIVIAL)
+ die(_("--trivial-merge is incompatible with all other options"));
+ line_termination = '\0';
+ while (strbuf_getline_lf(&buf, stdin) != EOF) {
+ struct strbuf **split;
+ int result;
+
+ split = strbuf_split(&buf, ' ');
+ if (!split[0] || !split[1] || split[2])
+ die(_("malformed input line: '%s'."), buf.buf);
+ strbuf_rtrim(split[0]);
+ result = real_merge(&o, split[0]->buf, split[1]->buf, prefix);
+ if (result < 0)
+ die(_("merging cannot continue; got unclean result of %d"), result);
+ strbuf_list_free(split);
+ }
+ strbuf_release(&buf);
+ return 0;
+ }
+
+ /* Figure out which mode to use */
+ switch (o.mode) {
+ default:
+ BUG("unexpected command mode %d", o.mode);
+ case MODE_UNKNOWN:
+ switch (argc) {
+ default:
+ usage_with_options(merge_tree_usage, mt_options);
+ case 2:
+ o.mode = MODE_REAL;
+ break;
+ case 3:
+ o.mode = MODE_TRIVIAL;
+ break;
+ }
+ expected_remaining_argc = argc;
+ break;
+ case MODE_REAL:
+ expected_remaining_argc = 2;
+ break;
+ case MODE_TRIVIAL:
+ expected_remaining_argc = 3;
+ /* Removal of `--trivial-merge` is expected */
+ original_argc--;
+ break;
+ }
+ if (o.mode == MODE_TRIVIAL && argc < original_argc)
+ die(_("--trivial-merge is incompatible with all other options"));
+
+ if (argc != expected_remaining_argc)
+ usage_with_options(merge_tree_usage, mt_options);
+
+ /* Do the relevant type of merge */
+ if (o.mode == MODE_REAL)
+ return real_merge(&o, argv[0], argv[1], prefix);
+ else
+ return trivial_merge(argv[0], argv[1], argv[2]);
+}
diff --git a/builtin/merge.c b/builtin/merge.c
new file mode 100644
index 0000000..dd47437
--- /dev/null
+++ b/builtin/merge.c
@@ -0,0 +1,1793 @@
+/*
+ * Builtin "git merge"
+ *
+ * Copyright (c) 2008 Miklos Vajna <vmiklos@frugalware.org>
+ *
+ * Based on git-merge.sh by Junio C Hamano.
+ */
+
+#define USE_THE_INDEX_COMPATIBILITY_MACROS
+#include "cache.h"
+#include "config.h"
+#include "parse-options.h"
+#include "builtin.h"
+#include "lockfile.h"
+#include "run-command.h"
+#include "hook.h"
+#include "diff.h"
+#include "diff-merges.h"
+#include "refs.h"
+#include "refspec.h"
+#include "commit.h"
+#include "diffcore.h"
+#include "revision.h"
+#include "unpack-trees.h"
+#include "cache-tree.h"
+#include "dir.h"
+#include "utf8.h"
+#include "log-tree.h"
+#include "color.h"
+#include "rerere.h"
+#include "help.h"
+#include "merge-recursive.h"
+#include "merge-ort-wrappers.h"
+#include "resolve-undo.h"
+#include "remote.h"
+#include "fmt-merge-msg.h"
+#include "gpg-interface.h"
+#include "sequencer.h"
+#include "string-list.h"
+#include "packfile.h"
+#include "tag.h"
+#include "alias.h"
+#include "branch.h"
+#include "commit-reach.h"
+#include "wt-status.h"
+#include "commit-graph.h"
+
+#define DEFAULT_TWOHEAD (1<<0)
+#define DEFAULT_OCTOPUS (1<<1)
+#define NO_FAST_FORWARD (1<<2)
+#define NO_TRIVIAL (1<<3)
+
+struct strategy {
+ const char *name;
+ unsigned attr;
+};
+
+static const char * const builtin_merge_usage[] = {
+ N_("git merge [<options>] [<commit>...]"),
+ "git merge --abort",
+ "git merge --continue",
+ NULL
+};
+
+static int show_diffstat = 1, shortlog_len = -1, squash;
+static int option_commit = -1;
+static int option_edit = -1;
+static int allow_trivial = 1, have_message, verify_signatures;
+static int check_trust_level = 1;
+static int overwrite_ignore = 1;
+static struct strbuf merge_msg = STRBUF_INIT;
+static struct strategy **use_strategies;
+static size_t use_strategies_nr, use_strategies_alloc;
+static const char **xopts;
+static size_t xopts_nr, xopts_alloc;
+static const char *branch;
+static char *branch_mergeoptions;
+static int verbosity;
+static int allow_rerere_auto;
+static int abort_current_merge;
+static int quit_current_merge;
+static int continue_current_merge;
+static int allow_unrelated_histories;
+static int show_progress = -1;
+static int default_to_upstream = 1;
+static int signoff;
+static const char *sign_commit;
+static int autostash;
+static int no_verify;
+static char *into_name;
+
+static struct strategy all_strategy[] = {
+ { "recursive", NO_TRIVIAL },
+ { "octopus", DEFAULT_OCTOPUS },
+ { "ort", DEFAULT_TWOHEAD | NO_TRIVIAL },
+ { "resolve", 0 },
+ { "ours", NO_FAST_FORWARD | NO_TRIVIAL },
+ { "subtree", NO_FAST_FORWARD | NO_TRIVIAL },
+};
+
+static const char *pull_twohead, *pull_octopus;
+
+enum ff_type {
+ FF_NO,
+ FF_ALLOW,
+ FF_ONLY
+};
+
+static enum ff_type fast_forward = FF_ALLOW;
+
+static const char *cleanup_arg;
+static enum commit_msg_cleanup_mode cleanup_mode;
+
+static int option_parse_message(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct strbuf *buf = opt->value;
+
+ if (unset)
+ strbuf_setlen(buf, 0);
+ else if (arg) {
+ strbuf_addf(buf, "%s%s", buf->len ? "\n\n" : "", arg);
+ have_message = 1;
+ } else
+ return error(_("switch `m' requires a value"));
+ return 0;
+}
+
+static enum parse_opt_result option_read_message(struct parse_opt_ctx_t *ctx,
+ const struct option *opt,
+ const char *arg_not_used,
+ int unset)
+{
+ struct strbuf *buf = opt->value;
+ const char *arg;
+
+ BUG_ON_OPT_ARG(arg_not_used);
+ if (unset)
+ BUG("-F cannot be negated");
+
+ if (ctx->opt) {
+ arg = ctx->opt;
+ ctx->opt = NULL;
+ } else if (ctx->argc > 1) {
+ ctx->argc--;
+ arg = *++ctx->argv;
+ } else
+ return error(_("option `%s' requires a value"), opt->long_name);
+
+ if (buf->len)
+ strbuf_addch(buf, '\n');
+ if (ctx->prefix && !is_absolute_path(arg))
+ arg = prefix_filename(ctx->prefix, arg);
+ if (strbuf_read_file(buf, arg, 0) < 0)
+ return error(_("could not read file '%s'"), arg);
+ have_message = 1;
+
+ return 0;
+}
+
+static struct strategy *get_strategy(const char *name)
+{
+ int i;
+ struct strategy *ret;
+ static struct cmdnames main_cmds, other_cmds;
+ static int loaded;
+ char *default_strategy = getenv("GIT_TEST_MERGE_ALGORITHM");
+
+ if (!name)
+ return NULL;
+
+ if (default_strategy &&
+ !strcmp(default_strategy, "ort") &&
+ !strcmp(name, "recursive")) {
+ name = "ort";
+ }
+
+ for (i = 0; i < ARRAY_SIZE(all_strategy); i++)
+ if (!strcmp(name, all_strategy[i].name))
+ return &all_strategy[i];
+
+ if (!loaded) {
+ struct cmdnames not_strategies;
+ loaded = 1;
+
+ memset(&not_strategies, 0, sizeof(struct cmdnames));
+ load_command_list("git-merge-", &main_cmds, &other_cmds);
+ for (i = 0; i < main_cmds.cnt; i++) {
+ int j, found = 0;
+ struct cmdname *ent = main_cmds.names[i];
+ for (j = 0; j < ARRAY_SIZE(all_strategy); j++)
+ if (!strncmp(ent->name, all_strategy[j].name, ent->len)
+ && !all_strategy[j].name[ent->len])
+ found = 1;
+ if (!found)
+ add_cmdname(&not_strategies, ent->name, ent->len);
+ }
+ exclude_cmds(&main_cmds, &not_strategies);
+ }
+ if (!is_in_cmdlist(&main_cmds, name) && !is_in_cmdlist(&other_cmds, name)) {
+ fprintf(stderr, _("Could not find merge strategy '%s'.\n"), name);
+ fprintf(stderr, _("Available strategies are:"));
+ for (i = 0; i < main_cmds.cnt; i++)
+ fprintf(stderr, " %s", main_cmds.names[i]->name);
+ fprintf(stderr, ".\n");
+ if (other_cmds.cnt) {
+ fprintf(stderr, _("Available custom strategies are:"));
+ for (i = 0; i < other_cmds.cnt; i++)
+ fprintf(stderr, " %s", other_cmds.names[i]->name);
+ fprintf(stderr, ".\n");
+ }
+ exit(1);
+ }
+
+ CALLOC_ARRAY(ret, 1);
+ ret->name = xstrdup(name);
+ ret->attr = NO_TRIVIAL;
+ return ret;
+}
+
+static void append_strategy(struct strategy *s)
+{
+ ALLOC_GROW(use_strategies, use_strategies_nr + 1, use_strategies_alloc);
+ use_strategies[use_strategies_nr++] = s;
+}
+
+static int option_parse_strategy(const struct option *opt,
+ const char *name, int unset)
+{
+ if (unset)
+ return 0;
+
+ append_strategy(get_strategy(name));
+ return 0;
+}
+
+static int option_parse_x(const struct option *opt,
+ const char *arg, int unset)
+{
+ if (unset)
+ return 0;
+
+ ALLOC_GROW(xopts, xopts_nr + 1, xopts_alloc);
+ xopts[xopts_nr++] = xstrdup(arg);
+ return 0;
+}
+
+static int option_parse_n(const struct option *opt,
+ const char *arg, int unset)
+{
+ BUG_ON_OPT_ARG(arg);
+ show_diffstat = unset;
+ return 0;
+}
+
+static struct option builtin_merge_options[] = {
+ OPT_CALLBACK_F('n', NULL, NULL, NULL,
+ N_("do not show a diffstat at the end of the merge"),
+ PARSE_OPT_NOARG, option_parse_n),
+ OPT_BOOL(0, "stat", &show_diffstat,
+ N_("show a diffstat at the end of the merge")),
+ OPT_BOOL(0, "summary", &show_diffstat, N_("(synonym to --stat)")),
+ { OPTION_INTEGER, 0, "log", &shortlog_len, N_("n"),
+ N_("add (at most <n>) entries from shortlog to merge commit message"),
+ PARSE_OPT_OPTARG, NULL, DEFAULT_MERGE_LOG_LEN },
+ OPT_BOOL(0, "squash", &squash,
+ N_("create a single commit instead of doing a merge")),
+ OPT_BOOL(0, "commit", &option_commit,
+ N_("perform a commit if the merge succeeds (default)")),
+ OPT_BOOL('e', "edit", &option_edit,
+ N_("edit message before committing")),
+ OPT_CLEANUP(&cleanup_arg),
+ OPT_SET_INT(0, "ff", &fast_forward, N_("allow fast-forward (default)"), FF_ALLOW),
+ OPT_SET_INT_F(0, "ff-only", &fast_forward,
+ N_("abort if fast-forward is not possible"),
+ FF_ONLY, PARSE_OPT_NONEG),
+ OPT_RERERE_AUTOUPDATE(&allow_rerere_auto),
+ OPT_BOOL(0, "verify-signatures", &verify_signatures,
+ N_("verify that the named commit has a valid GPG signature")),
+ OPT_CALLBACK('s', "strategy", &use_strategies, N_("strategy"),
+ N_("merge strategy to use"), option_parse_strategy),
+ OPT_CALLBACK('X', "strategy-option", &xopts, N_("option=value"),
+ N_("option for selected merge strategy"), option_parse_x),
+ OPT_CALLBACK('m', "message", &merge_msg, N_("message"),
+ N_("merge commit message (for a non-fast-forward merge)"),
+ option_parse_message),
+ { OPTION_LOWLEVEL_CALLBACK, 'F', "file", &merge_msg, N_("path"),
+ N_("read message from file"), PARSE_OPT_NONEG,
+ NULL, 0, option_read_message },
+ OPT_STRING(0, "into-name", &into_name, N_("name"),
+ N_("use <name> instead of the real target")),
+ OPT__VERBOSITY(&verbosity),
+ OPT_BOOL(0, "abort", &abort_current_merge,
+ N_("abort the current in-progress merge")),
+ OPT_BOOL(0, "quit", &quit_current_merge,
+ N_("--abort but leave index and working tree alone")),
+ OPT_BOOL(0, "continue", &continue_current_merge,
+ N_("continue the current in-progress merge")),
+ OPT_BOOL(0, "allow-unrelated-histories", &allow_unrelated_histories,
+ N_("allow merging unrelated histories")),
+ OPT_SET_INT(0, "progress", &show_progress, N_("force progress reporting"), 1),
+ { OPTION_STRING, 'S', "gpg-sign", &sign_commit, N_("key-id"),
+ N_("GPG sign commit"), PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
+ OPT_AUTOSTASH(&autostash),
+ OPT_BOOL(0, "overwrite-ignore", &overwrite_ignore, N_("update ignored files (default)")),
+ OPT_BOOL(0, "signoff", &signoff, N_("add a Signed-off-by trailer")),
+ OPT_BOOL(0, "no-verify", &no_verify, N_("bypass pre-merge-commit and commit-msg hooks")),
+ OPT_END()
+};
+
+static int save_state(struct object_id *stash)
+{
+ int len;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct strbuf buffer = STRBUF_INIT;
+ struct lock_file lock_file = LOCK_INIT;
+ int fd;
+ int rc = -1;
+
+ fd = repo_hold_locked_index(the_repository, &lock_file, 0);
+ refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL);
+ if (0 <= fd)
+ repo_update_index_if_able(the_repository, &lock_file);
+ rollback_lock_file(&lock_file);
+
+ strvec_pushl(&cp.args, "stash", "create", NULL);
+ cp.out = -1;
+ cp.git_cmd = 1;
+
+ if (start_command(&cp))
+ die(_("could not run stash."));
+ len = strbuf_read(&buffer, cp.out, 1024);
+ close(cp.out);
+
+ if (finish_command(&cp) || len < 0)
+ die(_("stash failed"));
+ else if (!len) /* no changes */
+ goto out;
+ strbuf_setlen(&buffer, buffer.len-1);
+ if (get_oid(buffer.buf, stash))
+ die(_("not a valid object: %s"), buffer.buf);
+ rc = 0;
+out:
+ strbuf_release(&buffer);
+ return rc;
+}
+
+static void read_empty(const struct object_id *oid)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&cmd.args, "read-tree", "-m", "-u", empty_tree_oid_hex(),
+ oid_to_hex(oid), NULL);
+ cmd.git_cmd = 1;
+
+ if (run_command(&cmd))
+ die(_("read-tree failed"));
+}
+
+static void reset_hard(const struct object_id *oid)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&cmd.args, "read-tree", "-v", "--reset", "-u",
+ oid_to_hex(oid), NULL);
+ cmd.git_cmd = 1;
+
+ if (run_command(&cmd))
+ die(_("read-tree failed"));
+}
+
+static void restore_state(const struct object_id *head,
+ const struct object_id *stash)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ reset_hard(head);
+
+ if (is_null_oid(stash))
+ goto refresh_cache;
+
+ strvec_pushl(&cmd.args, "stash", "apply", "--index", "--quiet", NULL);
+ strvec_push(&cmd.args, oid_to_hex(stash));
+
+ /*
+ * It is OK to ignore error here, for example when there was
+ * nothing to restore.
+ */
+ cmd.git_cmd = 1;
+ run_command(&cmd);
+
+refresh_cache:
+ discard_cache();
+ if (read_cache() < 0)
+ die(_("could not read index"));
+}
+
+/* This is called when no merge was necessary. */
+static void finish_up_to_date(void)
+{
+ if (verbosity >= 0) {
+ if (squash)
+ puts(_("Already up to date. (nothing to squash)"));
+ else
+ puts(_("Already up to date."));
+ }
+ remove_merge_branch_state(the_repository);
+}
+
+static void squash_message(struct commit *commit, struct commit_list *remoteheads)
+{
+ struct rev_info rev;
+ struct strbuf out = STRBUF_INIT;
+ struct commit_list *j;
+ struct pretty_print_context ctx = {0};
+
+ printf(_("Squash commit -- not updating HEAD\n"));
+
+ repo_init_revisions(the_repository, &rev, NULL);
+ diff_merges_suppress(&rev);
+ rev.commit_format = CMIT_FMT_MEDIUM;
+
+ commit->object.flags |= UNINTERESTING;
+ add_pending_object(&rev, &commit->object, NULL);
+
+ for (j = remoteheads; j; j = j->next)
+ add_pending_object(&rev, &j->item->object, NULL);
+
+ setup_revisions(0, NULL, &rev, NULL);
+ if (prepare_revision_walk(&rev))
+ die(_("revision walk setup failed"));
+
+ ctx.abbrev = rev.abbrev;
+ ctx.date_mode = rev.date_mode;
+ ctx.fmt = rev.commit_format;
+
+ strbuf_addstr(&out, "Squashed commit of the following:\n");
+ while ((commit = get_revision(&rev)) != NULL) {
+ strbuf_addch(&out, '\n');
+ strbuf_addf(&out, "commit %s\n",
+ oid_to_hex(&commit->object.oid));
+ pretty_print_commit(&ctx, commit, &out);
+ }
+ write_file_buf(git_path_squash_msg(the_repository), out.buf, out.len);
+ strbuf_release(&out);
+ release_revisions(&rev);
+}
+
+static void finish(struct commit *head_commit,
+ struct commit_list *remoteheads,
+ const struct object_id *new_head, const char *msg)
+{
+ struct strbuf reflog_message = STRBUF_INIT;
+ const struct object_id *head = &head_commit->object.oid;
+
+ if (!msg)
+ strbuf_addstr(&reflog_message, getenv("GIT_REFLOG_ACTION"));
+ else {
+ if (verbosity >= 0)
+ printf("%s\n", msg);
+ strbuf_addf(&reflog_message, "%s: %s",
+ getenv("GIT_REFLOG_ACTION"), msg);
+ }
+ if (squash) {
+ squash_message(head_commit, remoteheads);
+ } else {
+ if (verbosity >= 0 && !merge_msg.len)
+ printf(_("No merge message -- not updating HEAD\n"));
+ else {
+ update_ref(reflog_message.buf, "HEAD", new_head, head,
+ 0, UPDATE_REFS_DIE_ON_ERR);
+ /*
+ * We ignore errors in 'gc --auto', since the
+ * user should see them.
+ */
+ run_auto_maintenance(verbosity < 0);
+ }
+ }
+ if (new_head && show_diffstat) {
+ struct diff_options opts;
+ repo_diff_setup(the_repository, &opts);
+ opts.stat_width = -1; /* use full terminal width */
+ opts.stat_graph_width = -1; /* respect statGraphWidth config */
+ opts.output_format |=
+ DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
+ opts.detect_rename = DIFF_DETECT_RENAME;
+ diff_setup_done(&opts);
+ diff_tree_oid(head, new_head, "", &opts);
+ diffcore_std(&opts);
+ diff_flush(&opts);
+ }
+
+ /* Run a post-merge hook */
+ run_hooks_l("post-merge", squash ? "1" : "0", NULL);
+
+ if (new_head)
+ apply_autostash(git_path_merge_autostash(the_repository));
+ strbuf_release(&reflog_message);
+}
+
+/* Get the name for the merge commit's message. */
+static void merge_name(const char *remote, struct strbuf *msg)
+{
+ struct commit *remote_head;
+ struct object_id branch_head;
+ struct strbuf bname = STRBUF_INIT;
+ struct merge_remote_desc *desc;
+ const char *ptr;
+ char *found_ref = NULL;
+ int len, early;
+
+ strbuf_branchname(&bname, remote, 0);
+ remote = bname.buf;
+
+ oidclr(&branch_head);
+ remote_head = get_merge_parent(remote);
+ if (!remote_head)
+ die(_("'%s' does not point to a commit"), remote);
+
+ if (dwim_ref(remote, strlen(remote), &branch_head, &found_ref, 0) > 0) {
+ if (starts_with(found_ref, "refs/heads/")) {
+ strbuf_addf(msg, "%s\t\tbranch '%s' of .\n",
+ oid_to_hex(&branch_head), remote);
+ goto cleanup;
+ }
+ if (starts_with(found_ref, "refs/tags/")) {
+ strbuf_addf(msg, "%s\t\ttag '%s' of .\n",
+ oid_to_hex(&branch_head), remote);
+ goto cleanup;
+ }
+ if (starts_with(found_ref, "refs/remotes/")) {
+ strbuf_addf(msg, "%s\t\tremote-tracking branch '%s' of .\n",
+ oid_to_hex(&branch_head), remote);
+ goto cleanup;
+ }
+ }
+
+ /* See if remote matches <name>^^^.. or <name>~<number> */
+ for (len = 0, ptr = remote + strlen(remote);
+ remote < ptr && ptr[-1] == '^';
+ ptr--)
+ len++;
+ if (len)
+ early = 1;
+ else {
+ early = 0;
+ ptr = strrchr(remote, '~');
+ if (ptr) {
+ int seen_nonzero = 0;
+
+ len++; /* count ~ */
+ while (*++ptr && isdigit(*ptr)) {
+ seen_nonzero |= (*ptr != '0');
+ len++;
+ }
+ if (*ptr)
+ len = 0; /* not ...~<number> */
+ else if (seen_nonzero)
+ early = 1;
+ else if (len == 1)
+ early = 1; /* "name~" is "name~1"! */
+ }
+ }
+ if (len) {
+ struct strbuf truname = STRBUF_INIT;
+ strbuf_addf(&truname, "refs/heads/%s", remote);
+ strbuf_setlen(&truname, truname.len - len);
+ if (ref_exists(truname.buf)) {
+ strbuf_addf(msg,
+ "%s\t\tbranch '%s'%s of .\n",
+ oid_to_hex(&remote_head->object.oid),
+ truname.buf + 11,
+ (early ? " (early part)" : ""));
+ strbuf_release(&truname);
+ goto cleanup;
+ }
+ strbuf_release(&truname);
+ }
+
+ desc = merge_remote_util(remote_head);
+ if (desc && desc->obj && desc->obj->type == OBJ_TAG) {
+ strbuf_addf(msg, "%s\t\t%s '%s'\n",
+ oid_to_hex(&desc->obj->oid),
+ type_name(desc->obj->type),
+ remote);
+ goto cleanup;
+ }
+
+ strbuf_addf(msg, "%s\t\tcommit '%s'\n",
+ oid_to_hex(&remote_head->object.oid), remote);
+cleanup:
+ free(found_ref);
+ strbuf_release(&bname);
+}
+
+static void parse_branch_merge_options(char *bmo)
+{
+ const char **argv;
+ int argc;
+
+ if (!bmo)
+ return;
+ argc = split_cmdline(bmo, &argv);
+ if (argc < 0)
+ die(_("Bad branch.%s.mergeoptions string: %s"), branch,
+ _(split_cmdline_strerror(argc)));
+ REALLOC_ARRAY(argv, argc + 2);
+ MOVE_ARRAY(argv + 1, argv, argc + 1);
+ argc++;
+ argv[0] = "branch.*.mergeoptions";
+ parse_options(argc, argv, NULL, builtin_merge_options,
+ builtin_merge_usage, 0);
+ free(argv);
+}
+
+static int git_merge_config(const char *k, const char *v, void *cb)
+{
+ int status;
+ const char *str;
+
+ if (branch &&
+ skip_prefix(k, "branch.", &str) &&
+ skip_prefix(str, branch, &str) &&
+ !strcmp(str, ".mergeoptions")) {
+ free(branch_mergeoptions);
+ branch_mergeoptions = xstrdup(v);
+ return 0;
+ }
+
+ if (!strcmp(k, "merge.diffstat") || !strcmp(k, "merge.stat"))
+ show_diffstat = git_config_bool(k, v);
+ else if (!strcmp(k, "merge.verifysignatures"))
+ verify_signatures = git_config_bool(k, v);
+ else if (!strcmp(k, "pull.twohead"))
+ return git_config_string(&pull_twohead, k, v);
+ else if (!strcmp(k, "pull.octopus"))
+ return git_config_string(&pull_octopus, k, v);
+ else if (!strcmp(k, "commit.cleanup"))
+ return git_config_string(&cleanup_arg, k, v);
+ else if (!strcmp(k, "merge.ff")) {
+ int boolval = git_parse_maybe_bool(v);
+ if (0 <= boolval) {
+ fast_forward = boolval ? FF_ALLOW : FF_NO;
+ } else if (v && !strcmp(v, "only")) {
+ fast_forward = FF_ONLY;
+ } /* do not barf on values from future versions of git */
+ return 0;
+ } else if (!strcmp(k, "merge.defaulttoupstream")) {
+ default_to_upstream = git_config_bool(k, v);
+ return 0;
+ } else if (!strcmp(k, "commit.gpgsign")) {
+ sign_commit = git_config_bool(k, v) ? "" : NULL;
+ return 0;
+ } else if (!strcmp(k, "gpg.mintrustlevel")) {
+ check_trust_level = 0;
+ } else if (!strcmp(k, "merge.autostash")) {
+ autostash = git_config_bool(k, v);
+ return 0;
+ }
+
+ status = fmt_merge_msg_config(k, v, cb);
+ if (status)
+ return status;
+ status = git_gpg_config(k, v, NULL);
+ if (status)
+ return status;
+ return git_diff_ui_config(k, v, cb);
+}
+
+static int read_tree_trivial(struct object_id *common, struct object_id *head,
+ struct object_id *one)
+{
+ int i, nr_trees = 0;
+ struct tree *trees[MAX_UNPACK_TREES];
+ struct tree_desc t[MAX_UNPACK_TREES];
+ struct unpack_trees_options opts;
+
+ memset(&opts, 0, sizeof(opts));
+ opts.head_idx = 2;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+ opts.update = 1;
+ opts.verbose_update = 1;
+ opts.trivial_merges_only = 1;
+ opts.merge = 1;
+ opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
+ trees[nr_trees] = parse_tree_indirect(common);
+ if (!trees[nr_trees++])
+ return -1;
+ trees[nr_trees] = parse_tree_indirect(head);
+ if (!trees[nr_trees++])
+ return -1;
+ trees[nr_trees] = parse_tree_indirect(one);
+ if (!trees[nr_trees++])
+ return -1;
+ opts.fn = threeway_merge;
+ cache_tree_free(&the_index.cache_tree);
+ for (i = 0; i < nr_trees; i++) {
+ parse_tree(trees[i]);
+ init_tree_desc(t+i, trees[i]->buffer, trees[i]->size);
+ }
+ if (unpack_trees(nr_trees, t, &opts))
+ return -1;
+ return 0;
+}
+
+static void write_tree_trivial(struct object_id *oid)
+{
+ if (write_cache_as_tree(oid, 0, NULL))
+ die(_("git write-tree failed to write a tree"));
+}
+
+static int try_merge_strategy(const char *strategy, struct commit_list *common,
+ struct commit_list *remoteheads,
+ struct commit *head)
+{
+ const char *head_arg = "HEAD";
+
+ if (repo_refresh_and_write_index(the_repository, REFRESH_QUIET,
+ SKIP_IF_UNCHANGED, 0, NULL, NULL,
+ NULL) < 0)
+ return error(_("Unable to write index."));
+
+ if (!strcmp(strategy, "recursive") || !strcmp(strategy, "subtree") ||
+ !strcmp(strategy, "ort")) {
+ struct lock_file lock = LOCK_INIT;
+ int clean, x;
+ struct commit *result;
+ struct commit_list *reversed = NULL;
+ struct merge_options o;
+ struct commit_list *j;
+
+ if (remoteheads->next) {
+ error(_("Not handling anything other than two heads merge."));
+ return 2;
+ }
+
+ init_merge_options(&o, the_repository);
+ if (!strcmp(strategy, "subtree"))
+ o.subtree_shift = "";
+
+ o.show_rename_progress =
+ show_progress == -1 ? isatty(2) : show_progress;
+
+ for (x = 0; x < xopts_nr; x++)
+ if (parse_merge_opt(&o, xopts[x]))
+ die(_("unknown strategy option: -X%s"), xopts[x]);
+
+ o.branch1 = head_arg;
+ o.branch2 = merge_remote_util(remoteheads->item)->name;
+
+ for (j = common; j; j = j->next)
+ commit_list_insert(j->item, &reversed);
+
+ repo_hold_locked_index(the_repository, &lock,
+ LOCK_DIE_ON_ERROR);
+ if (!strcmp(strategy, "ort"))
+ clean = merge_ort_recursive(&o, head, remoteheads->item,
+ reversed, &result);
+ else
+ clean = merge_recursive(&o, head, remoteheads->item,
+ reversed, &result);
+ if (clean < 0) {
+ rollback_lock_file(&lock);
+ return 2;
+ }
+ if (write_locked_index(&the_index, &lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ die(_("unable to write %s"), get_index_file());
+ return clean ? 0 : 1;
+ } else {
+ return try_merge_command(the_repository,
+ strategy, xopts_nr, xopts,
+ common, head_arg, remoteheads);
+ }
+}
+
+static void count_diff_files(struct diff_queue_struct *q,
+ struct diff_options *opt, void *data)
+{
+ int *count = data;
+
+ (*count) += q->nr;
+}
+
+static int count_unmerged_entries(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < the_index.cache_nr; i++)
+ if (ce_stage(the_index.cache[i]))
+ ret++;
+
+ return ret;
+}
+
+static void add_strategies(const char *string, unsigned attr)
+{
+ int i;
+
+ if (string) {
+ struct string_list list = STRING_LIST_INIT_DUP;
+ struct string_list_item *item;
+ string_list_split(&list, string, ' ', -1);
+ for_each_string_list_item(item, &list)
+ append_strategy(get_strategy(item->string));
+ string_list_clear(&list, 0);
+ return;
+ }
+ for (i = 0; i < ARRAY_SIZE(all_strategy); i++)
+ if (all_strategy[i].attr & attr)
+ append_strategy(&all_strategy[i]);
+
+}
+
+static void read_merge_msg(struct strbuf *msg)
+{
+ const char *filename = git_path_merge_msg(the_repository);
+ strbuf_reset(msg);
+ if (strbuf_read_file(msg, filename, 0) < 0)
+ die_errno(_("Could not read from '%s'"), filename);
+}
+
+static void write_merge_state(struct commit_list *);
+static void abort_commit(struct commit_list *remoteheads, const char *err_msg)
+{
+ if (err_msg)
+ error("%s", err_msg);
+ fprintf(stderr,
+ _("Not committing merge; use 'git commit' to complete the merge.\n"));
+ write_merge_state(remoteheads);
+ exit(1);
+}
+
+static const char merge_editor_comment[] =
+N_("Please enter a commit message to explain why this merge is necessary,\n"
+ "especially if it merges an updated upstream into a topic branch.\n"
+ "\n");
+
+static const char scissors_editor_comment[] =
+N_("An empty message aborts the commit.\n");
+
+static const char no_scissors_editor_comment[] =
+N_("Lines starting with '%c' will be ignored, and an empty message aborts\n"
+ "the commit.\n");
+
+static void write_merge_heads(struct commit_list *);
+static void prepare_to_commit(struct commit_list *remoteheads)
+{
+ struct strbuf msg = STRBUF_INIT;
+ const char *index_file = get_index_file();
+
+ if (!no_verify) {
+ int invoked_hook;
+
+ if (run_commit_hook(0 < option_edit, index_file, &invoked_hook,
+ "pre-merge-commit", NULL))
+ abort_commit(remoteheads, NULL);
+ /*
+ * Re-read the index as pre-merge-commit hook could have updated it,
+ * and write it out as a tree. We must do this before we invoke
+ * the editor and after we invoke run_status above.
+ */
+ if (invoked_hook)
+ discard_index(&the_index);
+ }
+ read_index_from(&the_index, index_file, get_git_dir());
+ strbuf_addbuf(&msg, &merge_msg);
+ if (squash)
+ BUG("the control must not reach here under --squash");
+ if (0 < option_edit) {
+ strbuf_addch(&msg, '\n');
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS) {
+ wt_status_append_cut_line(&msg);
+ strbuf_commented_addf(&msg, "\n");
+ }
+ strbuf_commented_addf(&msg, _(merge_editor_comment));
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS)
+ strbuf_commented_addf(&msg, _(scissors_editor_comment));
+ else
+ strbuf_commented_addf(&msg,
+ _(no_scissors_editor_comment), comment_line_char);
+ }
+ if (signoff)
+ append_signoff(&msg, ignore_non_trailer(msg.buf, msg.len), 0);
+ write_merge_heads(remoteheads);
+ write_file_buf(git_path_merge_msg(the_repository), msg.buf, msg.len);
+ if (run_commit_hook(0 < option_edit, get_index_file(), NULL,
+ "prepare-commit-msg",
+ git_path_merge_msg(the_repository), "merge", NULL))
+ abort_commit(remoteheads, NULL);
+ if (0 < option_edit) {
+ if (launch_editor(git_path_merge_msg(the_repository), NULL, NULL))
+ abort_commit(remoteheads, NULL);
+ }
+
+ if (!no_verify && run_commit_hook(0 < option_edit, get_index_file(),
+ NULL, "commit-msg",
+ git_path_merge_msg(the_repository), NULL))
+ abort_commit(remoteheads, NULL);
+
+ read_merge_msg(&msg);
+ cleanup_message(&msg, cleanup_mode, 0);
+ if (!msg.len)
+ abort_commit(remoteheads, _("Empty commit message."));
+ strbuf_release(&merge_msg);
+ strbuf_addbuf(&merge_msg, &msg);
+ strbuf_release(&msg);
+}
+
+static int merge_trivial(struct commit *head, struct commit_list *remoteheads)
+{
+ struct object_id result_tree, result_commit;
+ struct commit_list *parents, **pptr = &parents;
+
+ if (repo_refresh_and_write_index(the_repository, REFRESH_QUIET,
+ SKIP_IF_UNCHANGED, 0, NULL, NULL,
+ NULL) < 0)
+ return error(_("Unable to write index."));
+
+ write_tree_trivial(&result_tree);
+ printf(_("Wonderful.\n"));
+ pptr = commit_list_append(head, pptr);
+ pptr = commit_list_append(remoteheads->item, pptr);
+ prepare_to_commit(remoteheads);
+ if (commit_tree(merge_msg.buf, merge_msg.len, &result_tree, parents,
+ &result_commit, NULL, sign_commit))
+ die(_("failed to write commit object"));
+ finish(head, remoteheads, &result_commit, "In-index merge");
+ remove_merge_branch_state(the_repository);
+ return 0;
+}
+
+static int finish_automerge(struct commit *head,
+ int head_subsumed,
+ struct commit_list *common,
+ struct commit_list *remoteheads,
+ struct object_id *result_tree,
+ const char *wt_strategy)
+{
+ struct commit_list *parents = NULL;
+ struct strbuf buf = STRBUF_INIT;
+ struct object_id result_commit;
+
+ write_tree_trivial(result_tree);
+ free_commit_list(common);
+ parents = remoteheads;
+ if (!head_subsumed || fast_forward == FF_NO)
+ commit_list_insert(head, &parents);
+ prepare_to_commit(remoteheads);
+ if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parents,
+ &result_commit, NULL, sign_commit))
+ die(_("failed to write commit object"));
+ strbuf_addf(&buf, "Merge made by the '%s' strategy.", wt_strategy);
+ finish(head, remoteheads, &result_commit, buf.buf);
+ strbuf_release(&buf);
+ remove_merge_branch_state(the_repository);
+ return 0;
+}
+
+static int suggest_conflicts(void)
+{
+ const char *filename;
+ FILE *fp;
+ struct strbuf msgbuf = STRBUF_INIT;
+
+ filename = git_path_merge_msg(the_repository);
+ fp = xfopen(filename, "a");
+
+ /*
+ * We can't use cleanup_mode because if we're not using the editor,
+ * get_cleanup_mode will return COMMIT_MSG_CLEANUP_SPACE instead, even
+ * though the message is meant to be processed later by git-commit.
+ * Thus, we will get the cleanup mode which is returned when we _are_
+ * using an editor.
+ */
+ append_conflicts_hint(&the_index, &msgbuf,
+ get_cleanup_mode(cleanup_arg, 1));
+ fputs(msgbuf.buf, fp);
+ strbuf_release(&msgbuf);
+ fclose(fp);
+ repo_rerere(the_repository, allow_rerere_auto);
+ printf(_("Automatic merge failed; "
+ "fix conflicts and then commit the result.\n"));
+ return 1;
+}
+
+static int evaluate_result(void)
+{
+ int cnt = 0;
+ struct rev_info rev;
+
+ /* Check how many files differ. */
+ repo_init_revisions(the_repository, &rev, "");
+ setup_revisions(0, NULL, &rev, NULL);
+ rev.diffopt.output_format |=
+ DIFF_FORMAT_CALLBACK;
+ rev.diffopt.format_callback = count_diff_files;
+ rev.diffopt.format_callback_data = &cnt;
+ run_diff_files(&rev, 0);
+
+ /*
+ * Check how many unmerged entries are
+ * there.
+ */
+ cnt += count_unmerged_entries();
+
+ release_revisions(&rev);
+ return cnt;
+}
+
+/*
+ * Pretend as if the user told us to merge with the remote-tracking
+ * branch we have for the upstream of the current branch
+ */
+static int setup_with_upstream(const char ***argv)
+{
+ struct branch *branch = branch_get(NULL);
+ int i;
+ const char **args;
+
+ if (!branch)
+ die(_("No current branch."));
+ if (!branch->remote_name)
+ die(_("No remote for the current branch."));
+ if (!branch->merge_nr)
+ die(_("No default upstream defined for the current branch."));
+
+ args = xcalloc(st_add(branch->merge_nr, 1), sizeof(char *));
+ for (i = 0; i < branch->merge_nr; i++) {
+ if (!branch->merge[i]->dst)
+ die(_("No remote-tracking branch for %s from %s"),
+ branch->merge[i]->src, branch->remote_name);
+ args[i] = branch->merge[i]->dst;
+ }
+ args[i] = NULL;
+ *argv = args;
+ return i;
+}
+
+static void write_merge_heads(struct commit_list *remoteheads)
+{
+ struct commit_list *j;
+ struct strbuf buf = STRBUF_INIT;
+
+ for (j = remoteheads; j; j = j->next) {
+ struct object_id *oid;
+ struct commit *c = j->item;
+ struct merge_remote_desc *desc;
+
+ desc = merge_remote_util(c);
+ if (desc && desc->obj) {
+ oid = &desc->obj->oid;
+ } else {
+ oid = &c->object.oid;
+ }
+ strbuf_addf(&buf, "%s\n", oid_to_hex(oid));
+ }
+ write_file_buf(git_path_merge_head(the_repository), buf.buf, buf.len);
+
+ strbuf_reset(&buf);
+ if (fast_forward == FF_NO)
+ strbuf_addstr(&buf, "no-ff");
+ write_file_buf(git_path_merge_mode(the_repository), buf.buf, buf.len);
+ strbuf_release(&buf);
+}
+
+static void write_merge_state(struct commit_list *remoteheads)
+{
+ write_merge_heads(remoteheads);
+ strbuf_addch(&merge_msg, '\n');
+ write_file_buf(git_path_merge_msg(the_repository), merge_msg.buf,
+ merge_msg.len);
+}
+
+static int default_edit_option(void)
+{
+ static const char name[] = "GIT_MERGE_AUTOEDIT";
+ const char *e = getenv(name);
+ struct stat st_stdin, st_stdout;
+
+ if (have_message)
+ /* an explicit -m msg without --[no-]edit */
+ return 0;
+
+ if (e) {
+ int v = git_parse_maybe_bool(e);
+ if (v < 0)
+ die(_("Bad value '%s' in environment '%s'"), e, name);
+ return v;
+ }
+
+ /* Use editor if stdin and stdout are the same and is a tty */
+ return (!fstat(0, &st_stdin) &&
+ !fstat(1, &st_stdout) &&
+ isatty(0) && isatty(1) &&
+ st_stdin.st_dev == st_stdout.st_dev &&
+ st_stdin.st_ino == st_stdout.st_ino &&
+ st_stdin.st_mode == st_stdout.st_mode);
+}
+
+static struct commit_list *reduce_parents(struct commit *head_commit,
+ int *head_subsumed,
+ struct commit_list *remoteheads)
+{
+ struct commit_list *parents, **remotes;
+
+ /*
+ * Is the current HEAD reachable from another commit being
+ * merged? If so we do not want to record it as a parent of
+ * the resulting merge, unless --no-ff is given. We will flip
+ * this variable to 0 when we find HEAD among the independent
+ * tips being merged.
+ */
+ *head_subsumed = 1;
+
+ /* Find what parents to record by checking independent ones. */
+ parents = reduce_heads(remoteheads);
+ free_commit_list(remoteheads);
+
+ remoteheads = NULL;
+ remotes = &remoteheads;
+ while (parents) {
+ struct commit *commit = pop_commit(&parents);
+ if (commit == head_commit)
+ *head_subsumed = 0;
+ else
+ remotes = &commit_list_insert(commit, remotes)->next;
+ }
+ return remoteheads;
+}
+
+static void prepare_merge_message(struct strbuf *merge_names, struct strbuf *merge_msg)
+{
+ struct fmt_merge_msg_opts opts;
+
+ memset(&opts, 0, sizeof(opts));
+ opts.add_title = !have_message;
+ opts.shortlog_len = shortlog_len;
+ opts.credit_people = (0 < option_edit);
+ opts.into_name = into_name;
+
+ fmt_merge_msg(merge_names, merge_msg, &opts);
+ if (merge_msg->len)
+ strbuf_setlen(merge_msg, merge_msg->len - 1);
+}
+
+static void handle_fetch_head(struct commit_list **remotes, struct strbuf *merge_names)
+{
+ const char *filename;
+ int fd, pos, npos;
+ struct strbuf fetch_head_file = STRBUF_INIT;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
+ if (!merge_names)
+ merge_names = &fetch_head_file;
+
+ filename = git_path_fetch_head(the_repository);
+ fd = xopen(filename, O_RDONLY);
+
+ if (strbuf_read(merge_names, fd, 0) < 0)
+ die_errno(_("could not read '%s'"), filename);
+ if (close(fd) < 0)
+ die_errno(_("could not close '%s'"), filename);
+
+ for (pos = 0; pos < merge_names->len; pos = npos) {
+ struct object_id oid;
+ char *ptr;
+ struct commit *commit;
+
+ ptr = strchr(merge_names->buf + pos, '\n');
+ if (ptr)
+ npos = ptr - merge_names->buf + 1;
+ else
+ npos = merge_names->len;
+
+ if (npos - pos < hexsz + 2 ||
+ get_oid_hex(merge_names->buf + pos, &oid))
+ commit = NULL; /* bad */
+ else if (memcmp(merge_names->buf + pos + hexsz, "\t\t", 2))
+ continue; /* not-for-merge */
+ else {
+ char saved = merge_names->buf[pos + hexsz];
+ merge_names->buf[pos + hexsz] = '\0';
+ commit = get_merge_parent(merge_names->buf + pos);
+ merge_names->buf[pos + hexsz] = saved;
+ }
+ if (!commit) {
+ if (ptr)
+ *ptr = '\0';
+ die(_("not something we can merge in %s: %s"),
+ filename, merge_names->buf + pos);
+ }
+ remotes = &commit_list_insert(commit, remotes)->next;
+ }
+
+ if (merge_names == &fetch_head_file)
+ strbuf_release(&fetch_head_file);
+}
+
+static struct commit_list *collect_parents(struct commit *head_commit,
+ int *head_subsumed,
+ int argc, const char **argv,
+ struct strbuf *merge_msg)
+{
+ int i;
+ struct commit_list *remoteheads = NULL;
+ struct commit_list **remotes = &remoteheads;
+ struct strbuf merge_names = STRBUF_INIT, *autogen = NULL;
+
+ if (merge_msg && (!have_message || shortlog_len))
+ autogen = &merge_names;
+
+ if (head_commit)
+ remotes = &commit_list_insert(head_commit, remotes)->next;
+
+ if (argc == 1 && !strcmp(argv[0], "FETCH_HEAD")) {
+ handle_fetch_head(remotes, autogen);
+ remoteheads = reduce_parents(head_commit, head_subsumed, remoteheads);
+ } else {
+ for (i = 0; i < argc; i++) {
+ struct commit *commit = get_merge_parent(argv[i]);
+ if (!commit)
+ help_unknown_ref(argv[i], "merge",
+ _("not something we can merge"));
+ remotes = &commit_list_insert(commit, remotes)->next;
+ }
+ remoteheads = reduce_parents(head_commit, head_subsumed, remoteheads);
+ if (autogen) {
+ struct commit_list *p;
+ for (p = remoteheads; p; p = p->next)
+ merge_name(merge_remote_util(p->item)->name, autogen);
+ }
+ }
+
+ if (autogen) {
+ prepare_merge_message(autogen, merge_msg);
+ strbuf_release(autogen);
+ }
+
+ return remoteheads;
+}
+
+static int merging_a_throwaway_tag(struct commit *commit)
+{
+ char *tag_ref;
+ struct object_id oid;
+ int is_throwaway_tag = 0;
+
+ /* Are we merging a tag? */
+ if (!merge_remote_util(commit) ||
+ !merge_remote_util(commit)->obj ||
+ merge_remote_util(commit)->obj->type != OBJ_TAG)
+ return is_throwaway_tag;
+
+ /*
+ * Now we know we are merging a tag object. Are we downstream
+ * and following the tags from upstream? If so, we must have
+ * the tag object pointed at by "refs/tags/$T" where $T is the
+ * tagname recorded in the tag object. We want to allow such
+ * a "just to catch up" merge to fast-forward.
+ *
+ * Otherwise, we are playing an integrator's role, making a
+ * merge with a throw-away tag from a contributor with
+ * something like "git pull $contributor $signed_tag".
+ * We want to forbid such a merge from fast-forwarding
+ * by default; otherwise we would not keep the signature
+ * anywhere.
+ */
+ tag_ref = xstrfmt("refs/tags/%s",
+ ((struct tag *)merge_remote_util(commit)->obj)->tag);
+ if (!read_ref(tag_ref, &oid) &&
+ oideq(&oid, &merge_remote_util(commit)->obj->oid))
+ is_throwaway_tag = 0;
+ else
+ is_throwaway_tag = 1;
+ free(tag_ref);
+ return is_throwaway_tag;
+}
+
+int cmd_merge(int argc, const char **argv, const char *prefix)
+{
+ struct object_id result_tree, stash, head_oid;
+ struct commit *head_commit;
+ struct strbuf buf = STRBUF_INIT;
+ int i, ret = 0, head_subsumed;
+ int best_cnt = -1, merge_was_ok = 0, automerge_was_ok = 0;
+ struct commit_list *common = NULL;
+ const char *best_strategy = NULL, *wt_strategy = NULL;
+ struct commit_list *remoteheads = NULL, *p;
+ void *branch_to_free;
+ int orig_argc = argc;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_merge_usage, builtin_merge_options);
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ /*
+ * Check if we are _not_ on a detached HEAD, i.e. if there is a
+ * current branch.
+ */
+ branch = branch_to_free = resolve_refdup("HEAD", 0, &head_oid, NULL);
+ if (branch)
+ skip_prefix(branch, "refs/heads/", &branch);
+
+ if (!pull_twohead) {
+ char *default_strategy = getenv("GIT_TEST_MERGE_ALGORITHM");
+ if (default_strategy && !strcmp(default_strategy, "ort"))
+ pull_twohead = "ort";
+ }
+
+ init_diff_ui_defaults();
+ git_config(git_merge_config, NULL);
+
+ if (!branch || is_null_oid(&head_oid))
+ head_commit = NULL;
+ else
+ head_commit = lookup_commit_or_die(&head_oid, "HEAD");
+
+ if (branch_mergeoptions)
+ parse_branch_merge_options(branch_mergeoptions);
+ argc = parse_options(argc, argv, prefix, builtin_merge_options,
+ builtin_merge_usage, 0);
+ if (shortlog_len < 0)
+ shortlog_len = (merge_log_config > 0) ? merge_log_config : 0;
+
+ if (verbosity < 0 && show_progress == -1)
+ show_progress = 0;
+
+ if (abort_current_merge) {
+ int nargc = 2;
+ const char *nargv[] = {"reset", "--merge", NULL};
+ struct strbuf stash_oid = STRBUF_INIT;
+
+ if (orig_argc != 2)
+ usage_msg_opt(_("--abort expects no arguments"),
+ builtin_merge_usage, builtin_merge_options);
+
+ if (!file_exists(git_path_merge_head(the_repository)))
+ die(_("There is no merge to abort (MERGE_HEAD missing)."));
+
+ if (read_oneliner(&stash_oid, git_path_merge_autostash(the_repository),
+ READ_ONELINER_SKIP_IF_EMPTY))
+ unlink(git_path_merge_autostash(the_repository));
+
+ /* Invoke 'git reset --merge' */
+ ret = cmd_reset(nargc, nargv, prefix);
+
+ if (stash_oid.len)
+ apply_autostash_oid(stash_oid.buf);
+
+ strbuf_release(&stash_oid);
+ goto done;
+ }
+
+ if (quit_current_merge) {
+ if (orig_argc != 2)
+ usage_msg_opt(_("--quit expects no arguments"),
+ builtin_merge_usage,
+ builtin_merge_options);
+
+ remove_merge_branch_state(the_repository);
+ goto done;
+ }
+
+ if (continue_current_merge) {
+ int nargc = 1;
+ const char *nargv[] = {"commit", NULL};
+
+ if (orig_argc != 2)
+ usage_msg_opt(_("--continue expects no arguments"),
+ builtin_merge_usage, builtin_merge_options);
+
+ if (!file_exists(git_path_merge_head(the_repository)))
+ die(_("There is no merge in progress (MERGE_HEAD missing)."));
+
+ /* Invoke 'git commit' */
+ ret = cmd_commit(nargc, nargv, prefix);
+ goto done;
+ }
+
+ if (repo_read_index_unmerged(the_repository))
+ die_resolve_conflict("merge");
+
+ if (file_exists(git_path_merge_head(the_repository))) {
+ /*
+ * There is no unmerged entry, don't advise 'git
+ * add/rm <file>', just 'git commit'.
+ */
+ if (advice_enabled(ADVICE_RESOLVE_CONFLICT))
+ die(_("You have not concluded your merge (MERGE_HEAD exists).\n"
+ "Please, commit your changes before you merge."));
+ else
+ die(_("You have not concluded your merge (MERGE_HEAD exists)."));
+ }
+ if (ref_exists("CHERRY_PICK_HEAD")) {
+ if (advice_enabled(ADVICE_RESOLVE_CONFLICT))
+ die(_("You have not concluded your cherry-pick (CHERRY_PICK_HEAD exists).\n"
+ "Please, commit your changes before you merge."));
+ else
+ die(_("You have not concluded your cherry-pick (CHERRY_PICK_HEAD exists)."));
+ }
+ resolve_undo_clear_index(&the_index);
+
+ if (option_edit < 0)
+ option_edit = default_edit_option();
+
+ cleanup_mode = get_cleanup_mode(cleanup_arg, 0 < option_edit);
+
+ if (verbosity < 0)
+ show_diffstat = 0;
+
+ if (squash) {
+ if (fast_forward == FF_NO)
+ die(_("options '%s' and '%s' cannot be used together"), "--squash", "--no-ff.");
+ if (option_commit > 0)
+ die(_("options '%s' and '%s' cannot be used together"), "--squash", "--commit.");
+ /*
+ * squash can now silently disable option_commit - this is not
+ * a problem as it is only overriding the default, not a user
+ * supplied option.
+ */
+ option_commit = 0;
+ }
+
+ if (option_commit < 0)
+ option_commit = 1;
+
+ if (!argc) {
+ if (default_to_upstream)
+ argc = setup_with_upstream(&argv);
+ else
+ die(_("No commit specified and merge.defaultToUpstream not set."));
+ } else if (argc == 1 && !strcmp(argv[0], "-")) {
+ argv[0] = "@{-1}";
+ }
+
+ if (!argc)
+ usage_with_options(builtin_merge_usage,
+ builtin_merge_options);
+
+ if (!head_commit) {
+ /*
+ * If the merged head is a valid one there is no reason
+ * to forbid "git merge" into a branch yet to be born.
+ * We do the same for "git pull".
+ */
+ struct object_id *remote_head_oid;
+ if (squash)
+ die(_("Squash commit into empty head not supported yet"));
+ if (fast_forward == FF_NO)
+ die(_("Non-fast-forward commit does not make sense into "
+ "an empty head"));
+ remoteheads = collect_parents(head_commit, &head_subsumed,
+ argc, argv, NULL);
+ if (!remoteheads)
+ die(_("%s - not something we can merge"), argv[0]);
+ if (remoteheads->next)
+ die(_("Can merge only exactly one commit into empty head"));
+
+ if (verify_signatures)
+ verify_merge_signature(remoteheads->item, verbosity,
+ check_trust_level);
+
+ remote_head_oid = &remoteheads->item->object.oid;
+ read_empty(remote_head_oid);
+ update_ref("initial pull", "HEAD", remote_head_oid, NULL, 0,
+ UPDATE_REFS_DIE_ON_ERR);
+ goto done;
+ }
+
+ /*
+ * All the rest are the commits being merged; prepare
+ * the standard merge summary message to be appended
+ * to the given message.
+ */
+ remoteheads = collect_parents(head_commit, &head_subsumed,
+ argc, argv, &merge_msg);
+
+ if (!head_commit || !argc)
+ usage_with_options(builtin_merge_usage,
+ builtin_merge_options);
+
+ if (verify_signatures) {
+ for (p = remoteheads; p; p = p->next) {
+ verify_merge_signature(p->item, verbosity,
+ check_trust_level);
+ }
+ }
+
+ strbuf_addstr(&buf, "merge");
+ for (p = remoteheads; p; p = p->next)
+ strbuf_addf(&buf, " %s", merge_remote_util(p->item)->name);
+ setenv("GIT_REFLOG_ACTION", buf.buf, 0);
+ strbuf_reset(&buf);
+
+ for (p = remoteheads; p; p = p->next) {
+ struct commit *commit = p->item;
+ strbuf_addf(&buf, "GITHEAD_%s",
+ oid_to_hex(&commit->object.oid));
+ setenv(buf.buf, merge_remote_util(commit)->name, 1);
+ strbuf_reset(&buf);
+ if (fast_forward != FF_ONLY && merging_a_throwaway_tag(commit))
+ fast_forward = FF_NO;
+ }
+
+ if (!use_strategies && !pull_twohead &&
+ remoteheads && !remoteheads->next) {
+ char *default_strategy = getenv("GIT_TEST_MERGE_ALGORITHM");
+ if (default_strategy)
+ append_strategy(get_strategy(default_strategy));
+ }
+ if (!use_strategies) {
+ if (!remoteheads)
+ ; /* already up-to-date */
+ else if (!remoteheads->next)
+ add_strategies(pull_twohead, DEFAULT_TWOHEAD);
+ else
+ add_strategies(pull_octopus, DEFAULT_OCTOPUS);
+ }
+
+ for (i = 0; i < use_strategies_nr; i++) {
+ if (use_strategies[i]->attr & NO_FAST_FORWARD)
+ fast_forward = FF_NO;
+ if (use_strategies[i]->attr & NO_TRIVIAL)
+ allow_trivial = 0;
+ }
+
+ if (!remoteheads)
+ ; /* already up-to-date */
+ else if (!remoteheads->next)
+ common = get_merge_bases(head_commit, remoteheads->item);
+ else {
+ struct commit_list *list = remoteheads;
+ commit_list_insert(head_commit, &list);
+ common = get_octopus_merge_bases(list);
+ free(list);
+ }
+
+ update_ref("updating ORIG_HEAD", "ORIG_HEAD",
+ &head_commit->object.oid, NULL, 0, UPDATE_REFS_DIE_ON_ERR);
+
+ if (remoteheads && !common) {
+ /* No common ancestors found. */
+ if (!allow_unrelated_histories)
+ die(_("refusing to merge unrelated histories"));
+ /* otherwise, we need a real merge. */
+ } else if (!remoteheads ||
+ (!remoteheads->next && !common->next &&
+ common->item == remoteheads->item)) {
+ /*
+ * If head can reach all the merge then we are up to date.
+ * but first the most common case of merging one remote.
+ */
+ finish_up_to_date();
+ goto done;
+ } else if (fast_forward != FF_NO && !remoteheads->next &&
+ !common->next &&
+ oideq(&common->item->object.oid, &head_commit->object.oid)) {
+ /* Again the most common case of merging one remote. */
+ struct strbuf msg = STRBUF_INIT;
+ struct commit *commit;
+
+ if (verbosity >= 0) {
+ printf(_("Updating %s..%s\n"),
+ find_unique_abbrev(&head_commit->object.oid,
+ DEFAULT_ABBREV),
+ find_unique_abbrev(&remoteheads->item->object.oid,
+ DEFAULT_ABBREV));
+ }
+ strbuf_addstr(&msg, "Fast-forward");
+ if (have_message)
+ strbuf_addstr(&msg,
+ " (no commit created; -m option ignored)");
+ commit = remoteheads->item;
+ if (!commit) {
+ ret = 1;
+ goto done;
+ }
+
+ if (autostash)
+ create_autostash(the_repository,
+ git_path_merge_autostash(the_repository));
+ if (checkout_fast_forward(the_repository,
+ &head_commit->object.oid,
+ &commit->object.oid,
+ overwrite_ignore)) {
+ apply_autostash(git_path_merge_autostash(the_repository));
+ ret = 1;
+ goto done;
+ }
+
+ finish(head_commit, remoteheads, &commit->object.oid, msg.buf);
+ remove_merge_branch_state(the_repository);
+ strbuf_release(&msg);
+ goto done;
+ } else if (!remoteheads->next && common->next)
+ ;
+ /*
+ * We are not doing octopus and not fast-forward. Need
+ * a real merge.
+ */
+ else if (!remoteheads->next && !common->next && option_commit) {
+ /*
+ * We are not doing octopus, not fast-forward, and have
+ * only one common.
+ */
+ refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL);
+ if (allow_trivial && fast_forward != FF_ONLY) {
+ /*
+ * Must first ensure that index matches HEAD before
+ * attempting a trivial merge.
+ */
+ struct tree *head_tree = get_commit_tree(head_commit);
+ struct strbuf sb = STRBUF_INIT;
+
+ if (repo_index_has_changes(the_repository, head_tree,
+ &sb)) {
+ error(_("Your local changes to the following files would be overwritten by merge:\n %s"),
+ sb.buf);
+ strbuf_release(&sb);
+ return 2;
+ }
+
+ /* See if it is really trivial. */
+ git_committer_info(IDENT_STRICT);
+ printf(_("Trying really trivial in-index merge...\n"));
+ if (!read_tree_trivial(&common->item->object.oid,
+ &head_commit->object.oid,
+ &remoteheads->item->object.oid)) {
+ ret = merge_trivial(head_commit, remoteheads);
+ goto done;
+ }
+ printf(_("Nope.\n"));
+ }
+ } else {
+ /*
+ * An octopus. If we can reach all the remote we are up
+ * to date.
+ */
+ int up_to_date = 1;
+ struct commit_list *j;
+
+ for (j = remoteheads; j; j = j->next) {
+ struct commit_list *common_one;
+
+ /*
+ * Here we *have* to calculate the individual
+ * merge_bases again, otherwise "git merge HEAD^
+ * HEAD^^" would be missed.
+ */
+ common_one = get_merge_bases(head_commit, j->item);
+ if (!oideq(&common_one->item->object.oid, &j->item->object.oid)) {
+ up_to_date = 0;
+ break;
+ }
+ }
+ if (up_to_date) {
+ finish_up_to_date();
+ goto done;
+ }
+ }
+
+ if (fast_forward == FF_ONLY)
+ die_ff_impossible();
+
+ if (autostash)
+ create_autostash(the_repository,
+ git_path_merge_autostash(the_repository));
+
+ /* We are going to make a new commit. */
+ git_committer_info(IDENT_STRICT);
+
+ /*
+ * At this point, we need a real merge. No matter what strategy
+ * we use, it would operate on the index, possibly affecting the
+ * working tree, and when resolved cleanly, have the desired
+ * tree in the index -- this means that the index must be in
+ * sync with the head commit. The strategies are responsible
+ * to ensure this.
+ *
+ * Stash away the local changes so that we can try more than one
+ * and/or recover from merge strategies bailing while leaving the
+ * index and working tree polluted.
+ */
+ if (save_state(&stash))
+ oidclr(&stash);
+
+ for (i = 0; i < use_strategies_nr; i++) {
+ int ret, cnt;
+ if (i) {
+ printf(_("Rewinding the tree to pristine...\n"));
+ restore_state(&head_commit->object.oid, &stash);
+ }
+ if (use_strategies_nr != 1)
+ printf(_("Trying merge strategy %s...\n"),
+ use_strategies[i]->name);
+ /*
+ * Remember which strategy left the state in the working
+ * tree.
+ */
+ wt_strategy = use_strategies[i]->name;
+
+ ret = try_merge_strategy(wt_strategy,
+ common, remoteheads,
+ head_commit);
+ /*
+ * The backend exits with 1 when conflicts are
+ * left to be resolved, with 2 when it does not
+ * handle the given merge at all.
+ */
+ if (ret < 2) {
+ if (!ret) {
+ /*
+ * This strategy worked; no point in trying
+ * another.
+ */
+ merge_was_ok = 1;
+ best_strategy = wt_strategy;
+ break;
+ }
+ cnt = (use_strategies_nr > 1) ? evaluate_result() : 0;
+ if (best_cnt <= 0 || cnt <= best_cnt) {
+ best_strategy = wt_strategy;
+ best_cnt = cnt;
+ }
+ }
+ }
+
+ /*
+ * If we have a resulting tree, that means the strategy module
+ * auto resolved the merge cleanly.
+ */
+ if (merge_was_ok && option_commit) {
+ automerge_was_ok = 1;
+ ret = finish_automerge(head_commit, head_subsumed,
+ common, remoteheads,
+ &result_tree, wt_strategy);
+ goto done;
+ }
+
+ /*
+ * Pick the result from the best strategy and have the user fix
+ * it up.
+ */
+ if (!best_strategy) {
+ restore_state(&head_commit->object.oid, &stash);
+ if (use_strategies_nr > 1)
+ fprintf(stderr,
+ _("No merge strategy handled the merge.\n"));
+ else
+ fprintf(stderr, _("Merge with strategy %s failed.\n"),
+ use_strategies[0]->name);
+ apply_autostash(git_path_merge_autostash(the_repository));
+ ret = 2;
+ goto done;
+ } else if (best_strategy == wt_strategy)
+ ; /* We already have its result in the working tree. */
+ else {
+ printf(_("Rewinding the tree to pristine...\n"));
+ restore_state(&head_commit->object.oid, &stash);
+ printf(_("Using the %s strategy to prepare resolving by hand.\n"),
+ best_strategy);
+ try_merge_strategy(best_strategy, common, remoteheads,
+ head_commit);
+ }
+
+ if (squash) {
+ finish(head_commit, remoteheads, NULL, NULL);
+
+ git_test_write_commit_graph_or_die();
+ } else
+ write_merge_state(remoteheads);
+
+ if (merge_was_ok)
+ fprintf(stderr, _("Automatic merge went well; "
+ "stopped before committing as requested\n"));
+ else
+ ret = suggest_conflicts();
+ if (autostash)
+ printf(_("When finished, apply stashed changes with `git stash pop`\n"));
+
+done:
+ if (!automerge_was_ok) {
+ free_commit_list(common);
+ free_commit_list(remoteheads);
+ }
+ strbuf_release(&buf);
+ free(branch_to_free);
+ return ret;
+}
diff --git a/builtin/mktag.c b/builtin/mktag.c
new file mode 100644
index 0000000..5d22909
--- /dev/null
+++ b/builtin/mktag.c
@@ -0,0 +1,108 @@
+#include "builtin.h"
+#include "parse-options.h"
+#include "tag.h"
+#include "replace-object.h"
+#include "object-store.h"
+#include "fsck.h"
+#include "config.h"
+
+static char const * const builtin_mktag_usage[] = {
+ "git mktag",
+ NULL
+};
+static int option_strict = 1;
+
+static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
+
+static int mktag_fsck_error_func(struct fsck_options *o,
+ const struct object_id *oid,
+ enum object_type object_type,
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
+{
+ switch (msg_type) {
+ case FSCK_WARN:
+ if (!option_strict) {
+ fprintf_ln(stderr, _("warning: tag input does not pass fsck: %s"), message);
+ return 0;
+
+ }
+ /* fallthrough */
+ case FSCK_ERROR:
+ /*
+ * We treat both warnings and errors as errors, things
+ * like missing "tagger" lines are "only" warnings
+ * under fsck, we've always considered them an error.
+ */
+ fprintf_ln(stderr, _("error: tag input does not pass fsck: %s"), message);
+ return 1;
+ default:
+ BUG(_("%d (FSCK_IGNORE?) should never trigger this callback"),
+ msg_type);
+ }
+}
+
+static int verify_object_in_tag(struct object_id *tagged_oid, int *tagged_type)
+{
+ int ret;
+ enum object_type type;
+ unsigned long size;
+ void *buffer;
+ const struct object_id *repl;
+
+ buffer = read_object_file(tagged_oid, &type, &size);
+ if (!buffer)
+ die(_("could not read tagged object '%s'"),
+ oid_to_hex(tagged_oid));
+ if (type != *tagged_type)
+ die(_("object '%s' tagged as '%s', but is a '%s' type"),
+ oid_to_hex(tagged_oid),
+ type_name(*tagged_type), type_name(type));
+
+ repl = lookup_replace_object(the_repository, tagged_oid);
+ ret = check_object_signature(the_repository, repl, buffer, size,
+ *tagged_type);
+ free(buffer);
+
+ return ret;
+}
+
+int cmd_mktag(int argc, const char **argv, const char *prefix)
+{
+ static struct option builtin_mktag_options[] = {
+ OPT_BOOL(0, "strict", &option_strict,
+ N_("enable more strict checking")),
+ OPT_END(),
+ };
+ struct strbuf buf = STRBUF_INIT;
+ struct object_id tagged_oid;
+ int tagged_type;
+ struct object_id result;
+
+ argc = parse_options(argc, argv, NULL,
+ builtin_mktag_options,
+ builtin_mktag_usage, 0);
+
+ if (strbuf_read(&buf, 0, 0) < 0)
+ die_errno(_("could not read from stdin"));
+
+ fsck_options.error_func = mktag_fsck_error_func;
+ fsck_set_msg_type_from_ids(&fsck_options, FSCK_MSG_EXTRA_HEADER_ENTRY,
+ FSCK_WARN);
+ /* config might set fsck.extraHeaderEntry=* again */
+ git_config(git_fsck_config, &fsck_options);
+ if (fsck_tag_standalone(NULL, buf.buf, buf.len, &fsck_options,
+ &tagged_oid, &tagged_type))
+ die(_("tag on stdin did not pass our strict fsck check"));
+
+ if (verify_object_in_tag(&tagged_oid, &tagged_type) < 0)
+ die(_("tag on stdin did not refer to a valid object"));
+
+ if (write_object_file(buf.buf, buf.len, OBJ_TAG, &result) < 0)
+ die(_("unable to write tag file"));
+
+ strbuf_release(&buf);
+ puts(oid_to_hex(&result));
+ return 0;
+}
diff --git a/builtin/mktree.c b/builtin/mktree.c
new file mode 100644
index 0000000..06d8140
--- /dev/null
+++ b/builtin/mktree.c
@@ -0,0 +1,200 @@
+/*
+ * GIT - the stupid content tracker
+ *
+ * Copyright (c) Junio C Hamano, 2006, 2009
+ */
+#include "builtin.h"
+#include "quote.h"
+#include "tree.h"
+#include "parse-options.h"
+#include "object-store.h"
+
+static struct treeent {
+ unsigned mode;
+ struct object_id oid;
+ int len;
+ char name[FLEX_ARRAY];
+} **entries;
+static int alloc, used;
+
+static void append_to_tree(unsigned mode, struct object_id *oid, char *path)
+{
+ struct treeent *ent;
+ size_t len = strlen(path);
+ if (strchr(path, '/'))
+ die("path %s contains slash", path);
+
+ FLEX_ALLOC_MEM(ent, name, path, len);
+ ent->mode = mode;
+ ent->len = len;
+ oidcpy(&ent->oid, oid);
+
+ ALLOC_GROW(entries, used + 1, alloc);
+ entries[used++] = ent;
+}
+
+static int ent_compare(const void *a_, const void *b_)
+{
+ struct treeent *a = *(struct treeent **)a_;
+ struct treeent *b = *(struct treeent **)b_;
+ return base_name_compare(a->name, a->len, a->mode,
+ b->name, b->len, b->mode);
+}
+
+static void write_tree(struct object_id *oid)
+{
+ struct strbuf buf;
+ size_t size;
+ int i;
+
+ QSORT(entries, used, ent_compare);
+ for (size = i = 0; i < used; i++)
+ size += 32 + entries[i]->len;
+
+ strbuf_init(&buf, size);
+ for (i = 0; i < used; i++) {
+ struct treeent *ent = entries[i];
+ strbuf_addf(&buf, "%o %s%c", ent->mode, ent->name, '\0');
+ strbuf_add(&buf, ent->oid.hash, the_hash_algo->rawsz);
+ }
+
+ write_object_file(buf.buf, buf.len, OBJ_TREE, oid);
+ strbuf_release(&buf);
+}
+
+static const char *mktree_usage[] = {
+ "git mktree [-z] [--missing] [--batch]",
+ NULL
+};
+
+static void mktree_line(char *buf, int nul_term_line, int allow_missing)
+{
+ char *ptr, *ntr;
+ const char *p;
+ unsigned mode;
+ enum object_type mode_type; /* object type derived from mode */
+ enum object_type obj_type; /* object type derived from sha */
+ struct object_info oi = OBJECT_INFO_INIT;
+ char *path, *to_free = NULL;
+ struct object_id oid;
+
+ ptr = buf;
+ /*
+ * Read non-recursive ls-tree output format:
+ * mode SP type SP sha1 TAB name
+ */
+ mode = strtoul(ptr, &ntr, 8);
+ if (ptr == ntr || !ntr || *ntr != ' ')
+ die("input format error: %s", buf);
+ ptr = ntr + 1; /* type */
+ ntr = strchr(ptr, ' ');
+ if (!ntr || parse_oid_hex(ntr + 1, &oid, &p) ||
+ *p != '\t')
+ die("input format error: %s", buf);
+
+ /* It is perfectly normal if we do not have a commit from a submodule */
+ if (S_ISGITLINK(mode))
+ allow_missing = 1;
+
+
+ *ntr++ = 0; /* now at the beginning of SHA1 */
+
+ path = (char *)p + 1; /* at the beginning of name */
+ if (!nul_term_line && path[0] == '"') {
+ struct strbuf p_uq = STRBUF_INIT;
+ if (unquote_c_style(&p_uq, path, NULL))
+ die("invalid quoting");
+ path = to_free = strbuf_detach(&p_uq, NULL);
+ }
+
+ /*
+ * Object type is redundantly derivable three ways.
+ * These should all agree.
+ */
+ mode_type = object_type(mode);
+ if (mode_type != type_from_string(ptr)) {
+ die("entry '%s' object type (%s) doesn't match mode type (%s)",
+ path, ptr, type_name(mode_type));
+ }
+
+ /* Check the type of object identified by oid without fetching objects */
+ oi.typep = &obj_type;
+ if (oid_object_info_extended(the_repository, &oid, &oi,
+ OBJECT_INFO_LOOKUP_REPLACE |
+ OBJECT_INFO_QUICK |
+ OBJECT_INFO_SKIP_FETCH_OBJECT) < 0)
+ obj_type = -1;
+
+ if (obj_type < 0) {
+ if (allow_missing) {
+ ; /* no problem - missing objects are presumed to be of the right type */
+ } else {
+ die("entry '%s' object %s is unavailable", path, oid_to_hex(&oid));
+ }
+ } else {
+ if (obj_type != mode_type) {
+ /*
+ * The object exists but is of the wrong type.
+ * This is a problem regardless of allow_missing
+ * because the new tree entry will never be correct.
+ */
+ die("entry '%s' object %s is a %s but specified type was (%s)",
+ path, oid_to_hex(&oid), type_name(obj_type), type_name(mode_type));
+ }
+ }
+
+ append_to_tree(mode, &oid, path);
+ free(to_free);
+}
+
+int cmd_mktree(int ac, const char **av, const char *prefix)
+{
+ struct strbuf sb = STRBUF_INIT;
+ struct object_id oid;
+ int nul_term_line = 0;
+ int allow_missing = 0;
+ int is_batch_mode = 0;
+ int got_eof = 0;
+ strbuf_getline_fn getline_fn;
+
+ const struct option option[] = {
+ OPT_BOOL('z', NULL, &nul_term_line, N_("input is NUL terminated")),
+ OPT_SET_INT( 0 , "missing", &allow_missing, N_("allow missing objects"), 1),
+ OPT_SET_INT( 0 , "batch", &is_batch_mode, N_("allow creation of more than one tree"), 1),
+ OPT_END()
+ };
+
+ ac = parse_options(ac, av, prefix, option, mktree_usage, 0);
+ getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+
+ while (!got_eof) {
+ while (1) {
+ if (getline_fn(&sb, stdin) == EOF) {
+ got_eof = 1;
+ break;
+ }
+ if (sb.buf[0] == '\0') {
+ /* empty lines denote tree boundaries in batch mode */
+ if (is_batch_mode)
+ break;
+ die("input format error: (blank line only valid in batch mode)");
+ }
+ mktree_line(sb.buf, nul_term_line, allow_missing);
+ }
+ if (is_batch_mode && got_eof && used < 1) {
+ /*
+ * Execution gets here if the last tree entry is terminated with a
+ * new-line. The final new-line has been made optional to be
+ * consistent with the original non-batch behaviour of mktree.
+ */
+ ; /* skip creating an empty tree */
+ } else {
+ write_tree(&oid);
+ puts(oid_to_hex(&oid));
+ fflush(stdout);
+ }
+ used=0; /* reset tree entry buffer for re-use in batch mode */
+ }
+ strbuf_release(&sb);
+ return 0;
+}
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
new file mode 100644
index 0000000..9a18a82
--- /dev/null
+++ b/builtin/multi-pack-index.c
@@ -0,0 +1,287 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "parse-options.h"
+#include "midx.h"
+#include "trace2.h"
+#include "object-store.h"
+
+#define BUILTIN_MIDX_WRITE_USAGE \
+ N_("git multi-pack-index [<options>] write [--preferred-pack=<pack>]" \
+ "[--refs-snapshot=<path>]")
+
+#define BUILTIN_MIDX_VERIFY_USAGE \
+ N_("git multi-pack-index [<options>] verify")
+
+#define BUILTIN_MIDX_EXPIRE_USAGE \
+ N_("git multi-pack-index [<options>] expire")
+
+#define BUILTIN_MIDX_REPACK_USAGE \
+ N_("git multi-pack-index [<options>] repack [--batch-size=<size>]")
+
+static char const * const builtin_multi_pack_index_write_usage[] = {
+ BUILTIN_MIDX_WRITE_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_verify_usage[] = {
+ BUILTIN_MIDX_VERIFY_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_expire_usage[] = {
+ BUILTIN_MIDX_EXPIRE_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_repack_usage[] = {
+ BUILTIN_MIDX_REPACK_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_usage[] = {
+ BUILTIN_MIDX_WRITE_USAGE,
+ BUILTIN_MIDX_VERIFY_USAGE,
+ BUILTIN_MIDX_EXPIRE_USAGE,
+ BUILTIN_MIDX_REPACK_USAGE,
+ NULL
+};
+
+static struct opts_multi_pack_index {
+ char *object_dir;
+ const char *preferred_pack;
+ const char *refs_snapshot;
+ unsigned long batch_size;
+ unsigned flags;
+ int stdin_packs;
+} opts;
+
+
+static int parse_object_dir(const struct option *opt, const char *arg,
+ int unset)
+{
+ char **value = opt->value;
+ free(*value);
+ if (unset)
+ *value = xstrdup(get_object_directory());
+ else
+ *value = real_pathdup(arg, 1);
+ return 0;
+}
+
+static struct option common_opts[] = {
+ OPT_CALLBACK(0, "object-dir", &opts.object_dir,
+ N_("directory"),
+ N_("object directory containing set of packfile and pack-index pairs"),
+ parse_object_dir),
+ OPT_END(),
+};
+
+static struct option *add_common_options(struct option *prev)
+{
+ return parse_options_concat(common_opts, prev);
+}
+
+static int git_multi_pack_index_write_config(const char *var, const char *value,
+ void *cb UNUSED)
+{
+ if (!strcmp(var, "pack.writebitmaphashcache")) {
+ if (git_config_bool(var, value))
+ opts.flags |= MIDX_WRITE_BITMAP_HASH_CACHE;
+ else
+ opts.flags &= ~MIDX_WRITE_BITMAP_HASH_CACHE;
+ }
+
+ if (!strcmp(var, "pack.writebitmaplookuptable")) {
+ if (git_config_bool(var, value))
+ opts.flags |= MIDX_WRITE_BITMAP_LOOKUP_TABLE;
+ else
+ opts.flags &= ~MIDX_WRITE_BITMAP_LOOKUP_TABLE;
+ }
+
+ /*
+ * We should never make a fall-back call to 'git_default_config', since
+ * this was already called in 'cmd_multi_pack_index()'.
+ */
+ return 0;
+}
+
+static void read_packs_from_stdin(struct string_list *to)
+{
+ struct strbuf buf = STRBUF_INIT;
+ while (strbuf_getline(&buf, stdin) != EOF)
+ string_list_append(to, buf.buf);
+ string_list_sort(to);
+
+ strbuf_release(&buf);
+}
+
+static int cmd_multi_pack_index_write(int argc, const char **argv,
+ const char *prefix)
+{
+ struct option *options;
+ static struct option builtin_multi_pack_index_write_options[] = {
+ OPT_STRING(0, "preferred-pack", &opts.preferred_pack,
+ N_("preferred-pack"),
+ N_("pack for reuse when computing a multi-pack bitmap")),
+ OPT_BIT(0, "bitmap", &opts.flags, N_("write multi-pack bitmap"),
+ MIDX_WRITE_BITMAP | MIDX_WRITE_REV_INDEX),
+ OPT_BIT(0, "progress", &opts.flags,
+ N_("force progress reporting"), MIDX_PROGRESS),
+ OPT_BOOL(0, "stdin-packs", &opts.stdin_packs,
+ N_("write multi-pack index containing only given indexes")),
+ OPT_FILENAME(0, "refs-snapshot", &opts.refs_snapshot,
+ N_("refs snapshot for selecting bitmap commits")),
+ OPT_END(),
+ };
+
+ opts.flags |= MIDX_WRITE_BITMAP_HASH_CACHE;
+
+ git_config(git_multi_pack_index_write_config, NULL);
+
+ options = add_common_options(builtin_multi_pack_index_write_options);
+
+ trace2_cmd_mode(argv[0]);
+
+ if (isatty(2))
+ opts.flags |= MIDX_PROGRESS;
+ argc = parse_options(argc, argv, prefix,
+ options, builtin_multi_pack_index_write_usage,
+ 0);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_write_usage,
+ options);
+
+ FREE_AND_NULL(options);
+
+ if (opts.stdin_packs) {
+ struct string_list packs = STRING_LIST_INIT_DUP;
+ int ret;
+
+ read_packs_from_stdin(&packs);
+
+ ret = write_midx_file_only(opts.object_dir, &packs,
+ opts.preferred_pack,
+ opts.refs_snapshot, opts.flags);
+
+ string_list_clear(&packs, 0);
+
+ return ret;
+
+ }
+ return write_midx_file(opts.object_dir, opts.preferred_pack,
+ opts.refs_snapshot, opts.flags);
+}
+
+static int cmd_multi_pack_index_verify(int argc, const char **argv,
+ const char *prefix)
+{
+ struct option *options;
+ static struct option builtin_multi_pack_index_verify_options[] = {
+ OPT_BIT(0, "progress", &opts.flags,
+ N_("force progress reporting"), MIDX_PROGRESS),
+ OPT_END(),
+ };
+ options = add_common_options(builtin_multi_pack_index_verify_options);
+
+ trace2_cmd_mode(argv[0]);
+
+ if (isatty(2))
+ opts.flags |= MIDX_PROGRESS;
+ argc = parse_options(argc, argv, prefix,
+ options, builtin_multi_pack_index_verify_usage,
+ 0);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_verify_usage,
+ options);
+
+ FREE_AND_NULL(options);
+
+ return verify_midx_file(the_repository, opts.object_dir, opts.flags);
+}
+
+static int cmd_multi_pack_index_expire(int argc, const char **argv,
+ const char *prefix)
+{
+ struct option *options;
+ static struct option builtin_multi_pack_index_expire_options[] = {
+ OPT_BIT(0, "progress", &opts.flags,
+ N_("force progress reporting"), MIDX_PROGRESS),
+ OPT_END(),
+ };
+ options = add_common_options(builtin_multi_pack_index_expire_options);
+
+ trace2_cmd_mode(argv[0]);
+
+ if (isatty(2))
+ opts.flags |= MIDX_PROGRESS;
+ argc = parse_options(argc, argv, prefix,
+ options, builtin_multi_pack_index_expire_usage,
+ 0);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_expire_usage,
+ options);
+
+ FREE_AND_NULL(options);
+
+ return expire_midx_packs(the_repository, opts.object_dir, opts.flags);
+}
+
+static int cmd_multi_pack_index_repack(int argc, const char **argv,
+ const char *prefix)
+{
+ struct option *options;
+ static struct option builtin_multi_pack_index_repack_options[] = {
+ OPT_MAGNITUDE(0, "batch-size", &opts.batch_size,
+ N_("during repack, collect pack-files of smaller size into a batch that is larger than this size")),
+ OPT_BIT(0, "progress", &opts.flags,
+ N_("force progress reporting"), MIDX_PROGRESS),
+ OPT_END(),
+ };
+
+ options = add_common_options(builtin_multi_pack_index_repack_options);
+
+ trace2_cmd_mode(argv[0]);
+
+ if (isatty(2))
+ opts.flags |= MIDX_PROGRESS;
+ argc = parse_options(argc, argv, prefix,
+ options,
+ builtin_multi_pack_index_repack_usage,
+ 0);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_repack_usage,
+ options);
+
+ FREE_AND_NULL(options);
+
+ return midx_repack(the_repository, opts.object_dir,
+ (size_t)opts.batch_size, opts.flags);
+}
+
+int cmd_multi_pack_index(int argc, const char **argv,
+ const char *prefix)
+{
+ int res;
+ parse_opt_subcommand_fn *fn = NULL;
+ struct option builtin_multi_pack_index_options[] = {
+ OPT_SUBCOMMAND("repack", &fn, cmd_multi_pack_index_repack),
+ OPT_SUBCOMMAND("write", &fn, cmd_multi_pack_index_write),
+ OPT_SUBCOMMAND("verify", &fn, cmd_multi_pack_index_verify),
+ OPT_SUBCOMMAND("expire", &fn, cmd_multi_pack_index_expire),
+ OPT_END(),
+ };
+ struct option *options = parse_options_concat(builtin_multi_pack_index_options, common_opts);
+
+ git_config(git_default_config, NULL);
+
+ if (the_repository &&
+ the_repository->objects &&
+ the_repository->objects->odb)
+ opts.object_dir = xstrdup(the_repository->objects->odb->path);
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_multi_pack_index_usage, 0);
+ FREE_AND_NULL(options);
+
+ res = fn(argc, argv, prefix);
+
+ free(opts.object_dir);
+ return res;
+}
diff --git a/builtin/mv.c b/builtin/mv.c
new file mode 100644
index 0000000..19790ce
--- /dev/null
+++ b/builtin/mv.c
@@ -0,0 +1,565 @@
+/*
+ * "git mv" builtin command
+ *
+ * Copyright (C) 2006 Johannes Schindelin
+ */
+#define USE_THE_INDEX_COMPATIBILITY_MACROS
+#include "builtin.h"
+#include "config.h"
+#include "pathspec.h"
+#include "lockfile.h"
+#include "dir.h"
+#include "cache-tree.h"
+#include "string-list.h"
+#include "parse-options.h"
+#include "submodule.h"
+#include "entry.h"
+
+static const char * const builtin_mv_usage[] = {
+ N_("git mv [<options>] <source>... <destination>"),
+ NULL
+};
+
+enum update_mode {
+ WORKING_DIRECTORY = (1 << 1),
+ INDEX = (1 << 2),
+ SPARSE = (1 << 3),
+ SKIP_WORKTREE_DIR = (1 << 4),
+};
+
+#define DUP_BASENAME 1
+#define KEEP_TRAILING_SLASH 2
+
+static const char **internal_prefix_pathspec(const char *prefix,
+ const char **pathspec,
+ int count, unsigned flags)
+{
+ int i;
+ const char **result;
+ int prefixlen = prefix ? strlen(prefix) : 0;
+ ALLOC_ARRAY(result, count + 1);
+
+ /* Create an intermediate copy of the pathspec based on the flags */
+ for (i = 0; i < count; i++) {
+ int length = strlen(pathspec[i]);
+ int to_copy = length;
+ char *it;
+ while (!(flags & KEEP_TRAILING_SLASH) &&
+ to_copy > 0 && is_dir_sep(pathspec[i][to_copy - 1]))
+ to_copy--;
+
+ it = xmemdupz(pathspec[i], to_copy);
+ if (flags & DUP_BASENAME) {
+ result[i] = xstrdup(basename(it));
+ free(it);
+ } else {
+ result[i] = it;
+ }
+ }
+ result[count] = NULL;
+
+ /* Prefix the pathspec and free the old intermediate strings */
+ for (i = 0; i < count; i++) {
+ const char *match = prefix_path(prefix, prefixlen, result[i]);
+ free((char *) result[i]);
+ result[i] = match;
+ }
+
+ return result;
+}
+
+static const char *add_slash(const char *path)
+{
+ size_t len = strlen(path);
+ if (len && path[len - 1] != '/') {
+ char *with_slash = xmalloc(st_add(len, 2));
+ memcpy(with_slash, path, len);
+ with_slash[len++] = '/';
+ with_slash[len] = 0;
+ return with_slash;
+ }
+ return path;
+}
+
+#define SUBMODULE_WITH_GITDIR ((const char *)1)
+
+static void prepare_move_submodule(const char *src, int first,
+ const char **submodule_gitfile)
+{
+ struct strbuf submodule_dotgit = STRBUF_INIT;
+ if (!S_ISGITLINK(the_index.cache[first]->ce_mode))
+ die(_("Directory %s is in index and no submodule?"), src);
+ if (!is_staging_gitmodules_ok(&the_index))
+ die(_("Please stage your changes to .gitmodules or stash them to proceed"));
+ strbuf_addf(&submodule_dotgit, "%s/.git", src);
+ *submodule_gitfile = read_gitfile(submodule_dotgit.buf);
+ if (*submodule_gitfile)
+ *submodule_gitfile = xstrdup(*submodule_gitfile);
+ else
+ *submodule_gitfile = SUBMODULE_WITH_GITDIR;
+ strbuf_release(&submodule_dotgit);
+}
+
+static int index_range_of_same_dir(const char *src, int length,
+ int *first_p, int *last_p)
+{
+ const char *src_w_slash = add_slash(src);
+ int first, last, len_w_slash = length + 1;
+
+ first = index_name_pos(&the_index, src_w_slash, len_w_slash);
+ if (first >= 0)
+ die(_("%.*s is in index"), len_w_slash, src_w_slash);
+
+ first = -1 - first;
+ for (last = first; last < the_index.cache_nr; last++) {
+ const char *path = the_index.cache[last]->name;
+ if (strncmp(path, src_w_slash, len_w_slash))
+ break;
+ }
+ if (src_w_slash != src)
+ free((char *)src_w_slash);
+ *first_p = first;
+ *last_p = last;
+ return last - first;
+}
+
+/*
+ * Given the path of a directory that does not exist on-disk, check whether the
+ * directory contains any entries in the index with the SKIP_WORKTREE flag
+ * enabled.
+ * Return 1 if such index entries exist.
+ * Return 0 otherwise.
+ */
+static int empty_dir_has_sparse_contents(const char *name)
+{
+ int ret = 0;
+ const char *with_slash = add_slash(name);
+ int length = strlen(with_slash);
+
+ int pos = index_name_pos(&the_index, with_slash, length);
+ const struct cache_entry *ce;
+
+ if (pos < 0) {
+ pos = -pos - 1;
+ if (pos >= the_index.cache_nr)
+ goto free_return;
+ ce = the_index.cache[pos];
+ if (strncmp(with_slash, ce->name, length))
+ goto free_return;
+ if (ce_skip_worktree(ce))
+ ret = 1;
+ }
+
+free_return:
+ if (with_slash != name)
+ free((char *)with_slash);
+ return ret;
+}
+
+int cmd_mv(int argc, const char **argv, const char *prefix)
+{
+ int i, flags, gitmodules_modified = 0;
+ int verbose = 0, show_only = 0, force = 0, ignore_errors = 0, ignore_sparse = 0;
+ struct option builtin_mv_options[] = {
+ OPT__VERBOSE(&verbose, N_("be verbose")),
+ OPT__DRY_RUN(&show_only, N_("dry run")),
+ OPT__FORCE(&force, N_("force move/rename even if target exists"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL('k', NULL, &ignore_errors, N_("skip move/rename errors")),
+ OPT_BOOL(0, "sparse", &ignore_sparse, N_("allow updating entries outside of the sparse-checkout cone")),
+ OPT_END(),
+ };
+ const char **source, **destination, **dest_path, **submodule_gitfile;
+ const char *dst_w_slash;
+ const char **src_dir = NULL;
+ int src_dir_nr = 0, src_dir_alloc = 0;
+ struct strbuf a_src_dir = STRBUF_INIT;
+ enum update_mode *modes, dst_mode = 0;
+ struct stat st;
+ struct string_list src_for_dst = STRING_LIST_INIT_NODUP;
+ struct lock_file lock_file = LOCK_INIT;
+ struct cache_entry *ce;
+ struct string_list only_match_skip_worktree = STRING_LIST_INIT_NODUP;
+ struct string_list dirty_paths = STRING_LIST_INIT_NODUP;
+
+ git_config(git_default_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, builtin_mv_options,
+ builtin_mv_usage, 0);
+ if (--argc < 1)
+ usage_with_options(builtin_mv_usage, builtin_mv_options);
+
+ repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR);
+ if (repo_read_index(the_repository) < 0)
+ die(_("index file corrupt"));
+
+ source = internal_prefix_pathspec(prefix, argv, argc, 0);
+ CALLOC_ARRAY(modes, argc);
+
+ /*
+ * Keep trailing slash, needed to let
+ * "git mv file no-such-dir/" error out, except in the case
+ * "git mv directory no-such-dir/".
+ */
+ flags = KEEP_TRAILING_SLASH;
+ if (argc == 1 && is_directory(argv[0]) && !is_directory(argv[1]))
+ flags = 0;
+ dest_path = internal_prefix_pathspec(prefix, argv + argc, 1, flags);
+ dst_w_slash = add_slash(dest_path[0]);
+ submodule_gitfile = xcalloc(argc, sizeof(char *));
+
+ if (dest_path[0][0] == '\0')
+ /* special case: "." was normalized to "" */
+ destination = internal_prefix_pathspec(dest_path[0], argv, argc, DUP_BASENAME);
+ else if (!lstat(dest_path[0], &st) &&
+ S_ISDIR(st.st_mode)) {
+ destination = internal_prefix_pathspec(dst_w_slash, argv, argc, DUP_BASENAME);
+ } else {
+ if (!path_in_sparse_checkout(dst_w_slash, &the_index) &&
+ empty_dir_has_sparse_contents(dst_w_slash)) {
+ destination = internal_prefix_pathspec(dst_w_slash, argv, argc, DUP_BASENAME);
+ dst_mode = SKIP_WORKTREE_DIR;
+ } else if (argc != 1) {
+ die(_("destination '%s' is not a directory"), dest_path[0]);
+ } else {
+ destination = dest_path;
+ /*
+ * <destination> is a file outside of sparse-checkout
+ * cone. Insist on cone mode here for backward
+ * compatibility. We don't want dst_mode to be assigned
+ * for a file when the repo is using no-cone mode (which
+ * is deprecated at this point) sparse-checkout. As
+ * SPARSE here is only considering cone-mode situation.
+ */
+ if (!path_in_cone_mode_sparse_checkout(destination[0], &the_index))
+ dst_mode = SPARSE;
+ }
+ }
+ if (dst_w_slash != dest_path[0]) {
+ free((char *)dst_w_slash);
+ dst_w_slash = NULL;
+ }
+
+ /* Checking */
+ for (i = 0; i < argc; i++) {
+ const char *src = source[i], *dst = destination[i];
+ int length;
+ const char *bad = NULL;
+ int skip_sparse = 0;
+
+ if (show_only)
+ printf(_("Checking rename of '%s' to '%s'\n"), src, dst);
+
+ length = strlen(src);
+ if (lstat(src, &st) < 0) {
+ int pos;
+ const struct cache_entry *ce;
+
+ pos = index_name_pos(&the_index, src, length);
+ if (pos < 0) {
+ const char *src_w_slash = add_slash(src);
+ if (!path_in_sparse_checkout(src_w_slash, &the_index) &&
+ empty_dir_has_sparse_contents(src)) {
+ modes[i] |= SKIP_WORKTREE_DIR;
+ goto dir_check;
+ }
+ /* only error if existence is expected. */
+ if (!(modes[i] & SPARSE))
+ bad = _("bad source");
+ goto act_on_entry;
+ }
+ ce = the_index.cache[pos];
+ if (!ce_skip_worktree(ce)) {
+ bad = _("bad source");
+ goto act_on_entry;
+ }
+ if (!ignore_sparse) {
+ string_list_append(&only_match_skip_worktree, src);
+ goto act_on_entry;
+ }
+ /* Check if dst exists in index */
+ if (index_name_pos(&the_index, dst, strlen(dst)) < 0) {
+ modes[i] |= SPARSE;
+ goto act_on_entry;
+ }
+ if (!force) {
+ bad = _("destination exists");
+ goto act_on_entry;
+ }
+ modes[i] |= SPARSE;
+ goto act_on_entry;
+ }
+ if (!strncmp(src, dst, length) &&
+ (dst[length] == 0 || dst[length] == '/')) {
+ bad = _("can not move directory into itself");
+ goto act_on_entry;
+ }
+ if (S_ISDIR(st.st_mode)
+ && lstat(dst, &st) == 0) {
+ bad = _("cannot move directory over file");
+ goto act_on_entry;
+ }
+
+dir_check:
+ if (S_ISDIR(st.st_mode)) {
+ int j, dst_len, n;
+ int first = index_name_pos(&the_index, src, length), last;
+
+ if (first >= 0) {
+ prepare_move_submodule(src, first,
+ submodule_gitfile + i);
+ goto act_on_entry;
+ } else if (index_range_of_same_dir(src, length,
+ &first, &last) < 1) {
+ bad = _("source directory is empty");
+ goto act_on_entry;
+ }
+
+ /* last - first >= 1 */
+ modes[i] |= WORKING_DIRECTORY;
+
+ ALLOC_GROW(src_dir, src_dir_nr + 1, src_dir_alloc);
+ src_dir[src_dir_nr++] = src;
+
+ n = argc + last - first;
+ REALLOC_ARRAY(source, n);
+ REALLOC_ARRAY(destination, n);
+ REALLOC_ARRAY(modes, n);
+ REALLOC_ARRAY(submodule_gitfile, n);
+
+ dst = add_slash(dst);
+ dst_len = strlen(dst);
+
+ for (j = 0; j < last - first; j++) {
+ const struct cache_entry *ce = the_index.cache[first + j];
+ const char *path = ce->name;
+ source[argc + j] = path;
+ destination[argc + j] =
+ prefix_path(dst, dst_len, path + length + 1);
+ memset(modes + argc + j, 0, sizeof(enum update_mode));
+ modes[argc + j] |= ce_skip_worktree(ce) ? SPARSE : INDEX;
+ submodule_gitfile[argc + j] = NULL;
+ }
+ argc += last - first;
+ goto act_on_entry;
+ }
+ if (!(ce = index_file_exists(&the_index, src, length, 0))) {
+ bad = _("not under version control");
+ goto act_on_entry;
+ }
+ if (ce_stage(ce)) {
+ bad = _("conflicted");
+ goto act_on_entry;
+ }
+ if (lstat(dst, &st) == 0 &&
+ (!ignore_case || strcasecmp(src, dst))) {
+ bad = _("destination exists");
+ if (force) {
+ /*
+ * only files can overwrite each other:
+ * check both source and destination
+ */
+ if (S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)) {
+ if (verbose)
+ warning(_("overwriting '%s'"), dst);
+ bad = NULL;
+ } else
+ bad = _("Cannot overwrite");
+ }
+ goto act_on_entry;
+ }
+ if (string_list_has_string(&src_for_dst, dst)) {
+ bad = _("multiple sources for the same target");
+ goto act_on_entry;
+ }
+ if (is_dir_sep(dst[strlen(dst) - 1])) {
+ bad = _("destination directory does not exist");
+ goto act_on_entry;
+ }
+
+ if (ignore_sparse &&
+ (dst_mode & (SKIP_WORKTREE_DIR | SPARSE)) &&
+ index_entry_exists(&the_index, dst, strlen(dst))) {
+ bad = _("destination exists in the index");
+ if (force) {
+ if (verbose)
+ warning(_("overwriting '%s'"), dst);
+ bad = NULL;
+ } else {
+ goto act_on_entry;
+ }
+ }
+ /*
+ * We check if the paths are in the sparse-checkout
+ * definition as a very final check, since that
+ * allows us to point the user to the --sparse
+ * option as a way to have a successful run.
+ */
+ if (!ignore_sparse &&
+ !path_in_sparse_checkout(src, &the_index)) {
+ string_list_append(&only_match_skip_worktree, src);
+ skip_sparse = 1;
+ }
+ if (!ignore_sparse &&
+ !path_in_sparse_checkout(dst, &the_index)) {
+ string_list_append(&only_match_skip_worktree, dst);
+ skip_sparse = 1;
+ }
+
+ if (skip_sparse)
+ goto remove_entry;
+
+ string_list_insert(&src_for_dst, dst);
+
+act_on_entry:
+ if (!bad)
+ continue;
+ if (!ignore_errors)
+ die(_("%s, source=%s, destination=%s"),
+ bad, src, dst);
+remove_entry:
+ if (--argc > 0) {
+ int n = argc - i;
+ MOVE_ARRAY(source + i, source + i + 1, n);
+ MOVE_ARRAY(destination + i, destination + i + 1, n);
+ MOVE_ARRAY(modes + i, modes + i + 1, n);
+ MOVE_ARRAY(submodule_gitfile + i,
+ submodule_gitfile + i + 1, n);
+ i--;
+ }
+ }
+
+ if (only_match_skip_worktree.nr) {
+ advise_on_updating_sparse_paths(&only_match_skip_worktree);
+ if (!ignore_errors)
+ return 1;
+ }
+
+ for (i = 0; i < argc; i++) {
+ const char *src = source[i], *dst = destination[i];
+ enum update_mode mode = modes[i];
+ int pos;
+ int sparse_and_dirty = 0;
+ struct checkout state = CHECKOUT_INIT;
+ state.istate = &the_index;
+
+ if (force)
+ state.force = 1;
+ if (show_only || verbose)
+ printf(_("Renaming %s to %s\n"), src, dst);
+ if (show_only)
+ continue;
+ if (!(mode & (INDEX | SPARSE | SKIP_WORKTREE_DIR)) &&
+ !(dst_mode & (SKIP_WORKTREE_DIR | SPARSE)) &&
+ rename(src, dst) < 0) {
+ if (ignore_errors)
+ continue;
+ die_errno(_("renaming '%s' failed"), src);
+ }
+ if (submodule_gitfile[i]) {
+ if (!update_path_in_gitmodules(src, dst))
+ gitmodules_modified = 1;
+ if (submodule_gitfile[i] != SUBMODULE_WITH_GITDIR)
+ connect_work_tree_and_git_dir(dst,
+ submodule_gitfile[i],
+ 1);
+ }
+
+ if (mode & (WORKING_DIRECTORY | SKIP_WORKTREE_DIR))
+ continue;
+
+ pos = index_name_pos(&the_index, src, strlen(src));
+ assert(pos >= 0);
+ if (!(mode & SPARSE) && !lstat(src, &st))
+ sparse_and_dirty = ie_modified(&the_index,
+ the_index.cache[pos],
+ &st,
+ 0);
+ rename_index_entry_at(&the_index, pos, dst);
+
+ if (ignore_sparse &&
+ core_apply_sparse_checkout &&
+ core_sparse_checkout_cone) {
+ /*
+ * NEEDSWORK: we are *not* paying attention to
+ * "out-to-out" move (<source> is out-of-cone and
+ * <destination> is out-of-cone) at this point. It
+ * should be added in a future patch.
+ */
+ if ((mode & SPARSE) &&
+ path_in_sparse_checkout(dst, &the_index)) {
+ /* from out-of-cone to in-cone */
+ int dst_pos = cache_name_pos(dst, strlen(dst));
+ struct cache_entry *dst_ce = the_index.cache[dst_pos];
+
+ dst_ce->ce_flags &= ~CE_SKIP_WORKTREE;
+
+ if (checkout_entry(dst_ce, &state, NULL, NULL))
+ die(_("cannot checkout %s"), dst_ce->name);
+ } else if ((dst_mode & (SKIP_WORKTREE_DIR | SPARSE)) &&
+ !(mode & SPARSE) &&
+ !path_in_sparse_checkout(dst, &the_index)) {
+ /* from in-cone to out-of-cone */
+ int dst_pos = cache_name_pos(dst, strlen(dst));
+ struct cache_entry *dst_ce = the_index.cache[dst_pos];
+
+ /*
+ * if src is clean, it will suffice to remove it
+ */
+ if (!sparse_and_dirty) {
+ dst_ce->ce_flags |= CE_SKIP_WORKTREE;
+ unlink_or_warn(src);
+ } else {
+ /*
+ * if src is dirty, move it to the
+ * destination and create leading
+ * dirs if necessary
+ */
+ char *dst_dup = xstrdup(dst);
+ string_list_append(&dirty_paths, dst);
+ safe_create_leading_directories(dst_dup);
+ FREE_AND_NULL(dst_dup);
+ rename(src, dst);
+ }
+ }
+ }
+ }
+
+ /*
+ * cleanup the empty src_dirs
+ */
+ for (i = 0; i < src_dir_nr; i++) {
+ int dummy;
+ strbuf_addstr(&a_src_dir, src_dir[i]);
+ /*
+ * if entries under a_src_dir are all moved away,
+ * recursively remove a_src_dir to cleanup
+ */
+ if (index_range_of_same_dir(a_src_dir.buf, a_src_dir.len,
+ &dummy, &dummy) < 1) {
+ remove_dir_recursively(&a_src_dir, 0);
+ }
+ strbuf_reset(&a_src_dir);
+ }
+
+ strbuf_release(&a_src_dir);
+ free(src_dir);
+
+ if (dirty_paths.nr)
+ advise_on_moving_dirty_path(&dirty_paths);
+
+ if (gitmodules_modified)
+ stage_updated_gitmodules(&the_index);
+
+ if (write_locked_index(&the_index, &lock_file,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ die(_("Unable to write new index file"));
+
+ string_list_clear(&src_for_dst, 0);
+ string_list_clear(&dirty_paths, 0);
+ UNLEAK(source);
+ UNLEAK(dest_path);
+ free(submodule_gitfile);
+ free(modes);
+ return 0;
+}
diff --git a/builtin/name-rev.c b/builtin/name-rev.c
new file mode 100644
index 0000000..15535e9
--- /dev/null
+++ b/builtin/name-rev.c
@@ -0,0 +1,686 @@
+#include "builtin.h"
+#include "cache.h"
+#include "repository.h"
+#include "config.h"
+#include "commit.h"
+#include "tag.h"
+#include "refs.h"
+#include "parse-options.h"
+#include "prio-queue.h"
+#include "hash-lookup.h"
+#include "commit-slab.h"
+#include "commit-graph.h"
+
+/*
+ * One day. See the 'name a rev shortly after epoch' test in t6120 when
+ * changing this value
+ */
+#define CUTOFF_DATE_SLOP 86400
+
+struct rev_name {
+ const char *tip_name;
+ timestamp_t taggerdate;
+ int generation;
+ int distance;
+ int from_tag;
+};
+
+define_commit_slab(commit_rev_name, struct rev_name);
+
+static timestamp_t generation_cutoff = GENERATION_NUMBER_INFINITY;
+static timestamp_t cutoff = TIME_MAX;
+static struct commit_rev_name rev_names;
+
+/* Disable the cutoff checks entirely */
+static void disable_cutoff(void)
+{
+ generation_cutoff = 0;
+ cutoff = 0;
+}
+
+/* Cutoff searching any commits older than this one */
+static void set_commit_cutoff(struct commit *commit)
+{
+
+ if (cutoff > commit->date)
+ cutoff = commit->date;
+
+ if (generation_cutoff) {
+ timestamp_t generation = commit_graph_generation(commit);
+
+ if (generation_cutoff > generation)
+ generation_cutoff = generation;
+ }
+}
+
+/* adjust the commit date cutoff with a slop to allow for slightly incorrect
+ * commit timestamps in case of clock skew.
+ */
+static void adjust_cutoff_timestamp_for_slop(void)
+{
+ if (cutoff) {
+ /* check for undeflow */
+ if (cutoff > TIME_MIN + CUTOFF_DATE_SLOP)
+ cutoff = cutoff - CUTOFF_DATE_SLOP;
+ else
+ cutoff = TIME_MIN;
+ }
+}
+
+/* Check if a commit is before the cutoff. Prioritize generation numbers
+ * first, but use the commit timestamp if we lack generation data.
+ */
+static int commit_is_before_cutoff(struct commit *commit)
+{
+ if (generation_cutoff < GENERATION_NUMBER_INFINITY)
+ return generation_cutoff &&
+ commit_graph_generation(commit) < generation_cutoff;
+
+ return commit->date < cutoff;
+}
+
+/* How many generations are maximally preferred over _one_ merge traversal? */
+#define MERGE_TRAVERSAL_WEIGHT 65535
+
+static int is_valid_rev_name(const struct rev_name *name)
+{
+ return name && name->tip_name;
+}
+
+static struct rev_name *get_commit_rev_name(const struct commit *commit)
+{
+ struct rev_name *name = commit_rev_name_peek(&rev_names, commit);
+
+ return is_valid_rev_name(name) ? name : NULL;
+}
+
+static int effective_distance(int distance, int generation)
+{
+ return distance + (generation > 0 ? MERGE_TRAVERSAL_WEIGHT : 0);
+}
+
+static int is_better_name(struct rev_name *name,
+ timestamp_t taggerdate,
+ int generation,
+ int distance,
+ int from_tag)
+{
+ int name_distance = effective_distance(name->distance, name->generation);
+ int new_distance = effective_distance(distance, generation);
+
+ /*
+ * When comparing names based on tags, prefer names
+ * based on the older tag, even if it is farther away.
+ */
+ if (from_tag && name->from_tag)
+ return (name->taggerdate > taggerdate ||
+ (name->taggerdate == taggerdate &&
+ name_distance > new_distance));
+
+ /*
+ * We know that at least one of them is a non-tag at this point.
+ * favor a tag over a non-tag.
+ */
+ if (name->from_tag != from_tag)
+ return from_tag;
+
+ /*
+ * We are now looking at two non-tags. Tiebreak to favor
+ * shorter hops.
+ */
+ if (name_distance != new_distance)
+ return name_distance > new_distance;
+
+ /* ... or tiebreak to favor older date */
+ if (name->taggerdate != taggerdate)
+ return name->taggerdate > taggerdate;
+
+ /* keep the current one if we cannot decide */
+ return 0;
+}
+
+static struct rev_name *create_or_update_name(struct commit *commit,
+ timestamp_t taggerdate,
+ int generation, int distance,
+ int from_tag)
+{
+ struct rev_name *name = commit_rev_name_at(&rev_names, commit);
+
+ if (is_valid_rev_name(name) &&
+ !is_better_name(name, taggerdate, generation, distance, from_tag))
+ return NULL;
+
+ name->taggerdate = taggerdate;
+ name->generation = generation;
+ name->distance = distance;
+ name->from_tag = from_tag;
+
+ return name;
+}
+
+static char *get_parent_name(const struct rev_name *name, int parent_number)
+{
+ struct strbuf sb = STRBUF_INIT;
+ size_t len;
+
+ strip_suffix(name->tip_name, "^0", &len);
+ if (name->generation > 0) {
+ strbuf_grow(&sb, len +
+ 1 + decimal_width(name->generation) +
+ 1 + decimal_width(parent_number));
+ strbuf_addf(&sb, "%.*s~%d^%d", (int)len, name->tip_name,
+ name->generation, parent_number);
+ } else {
+ strbuf_grow(&sb, len +
+ 1 + decimal_width(parent_number));
+ strbuf_addf(&sb, "%.*s^%d", (int)len, name->tip_name,
+ parent_number);
+ }
+ return strbuf_detach(&sb, NULL);
+}
+
+static void name_rev(struct commit *start_commit,
+ const char *tip_name, timestamp_t taggerdate,
+ int from_tag, int deref)
+{
+ struct prio_queue queue;
+ struct commit *commit;
+ struct commit **parents_to_queue = NULL;
+ size_t parents_to_queue_nr, parents_to_queue_alloc = 0;
+ struct rev_name *start_name;
+
+ parse_commit(start_commit);
+ if (commit_is_before_cutoff(start_commit))
+ return;
+
+ start_name = create_or_update_name(start_commit, taggerdate, 0, 0,
+ from_tag);
+ if (!start_name)
+ return;
+ if (deref)
+ start_name->tip_name = xstrfmt("%s^0", tip_name);
+ else
+ start_name->tip_name = xstrdup(tip_name);
+
+ memset(&queue, 0, sizeof(queue)); /* Use the prio_queue as LIFO */
+ prio_queue_put(&queue, start_commit);
+
+ while ((commit = prio_queue_get(&queue))) {
+ struct rev_name *name = get_commit_rev_name(commit);
+ struct commit_list *parents;
+ int parent_number = 1;
+
+ parents_to_queue_nr = 0;
+
+ for (parents = commit->parents;
+ parents;
+ parents = parents->next, parent_number++) {
+ struct commit *parent = parents->item;
+ struct rev_name *parent_name;
+ int generation, distance;
+
+ parse_commit(parent);
+ if (commit_is_before_cutoff(parent))
+ continue;
+
+ if (parent_number > 1) {
+ generation = 0;
+ distance = name->distance + MERGE_TRAVERSAL_WEIGHT;
+ } else {
+ generation = name->generation + 1;
+ distance = name->distance + 1;
+ }
+
+ parent_name = create_or_update_name(parent, taggerdate,
+ generation,
+ distance, from_tag);
+ if (parent_name) {
+ if (parent_number > 1)
+ parent_name->tip_name =
+ get_parent_name(name,
+ parent_number);
+ else
+ parent_name->tip_name = name->tip_name;
+ ALLOC_GROW(parents_to_queue,
+ parents_to_queue_nr + 1,
+ parents_to_queue_alloc);
+ parents_to_queue[parents_to_queue_nr] = parent;
+ parents_to_queue_nr++;
+ }
+ }
+
+ /* The first parent must come out first from the prio_queue */
+ while (parents_to_queue_nr)
+ prio_queue_put(&queue,
+ parents_to_queue[--parents_to_queue_nr]);
+ }
+
+ clear_prio_queue(&queue);
+ free(parents_to_queue);
+}
+
+static int subpath_matches(const char *path, const char *filter)
+{
+ const char *subpath = path;
+
+ while (subpath) {
+ if (!wildmatch(filter, subpath, 0))
+ return subpath - path;
+ subpath = strchr(subpath, '/');
+ if (subpath)
+ subpath++;
+ }
+ return -1;
+}
+
+static const char *name_ref_abbrev(const char *refname, int shorten_unambiguous)
+{
+ if (shorten_unambiguous)
+ refname = shorten_unambiguous_ref(refname, 0);
+ else if (skip_prefix(refname, "refs/heads/", &refname))
+ ; /* refname already advanced */
+ else
+ skip_prefix(refname, "refs/", &refname);
+ return refname;
+}
+
+struct name_ref_data {
+ int tags_only;
+ int name_only;
+ struct string_list ref_filters;
+ struct string_list exclude_filters;
+};
+
+static struct tip_table {
+ struct tip_table_entry {
+ struct object_id oid;
+ const char *refname;
+ struct commit *commit;
+ timestamp_t taggerdate;
+ unsigned int from_tag:1;
+ unsigned int deref:1;
+ } *table;
+ int nr;
+ int alloc;
+ int sorted;
+} tip_table;
+
+static void add_to_tip_table(const struct object_id *oid, const char *refname,
+ int shorten_unambiguous, struct commit *commit,
+ timestamp_t taggerdate, int from_tag, int deref)
+{
+ refname = name_ref_abbrev(refname, shorten_unambiguous);
+
+ ALLOC_GROW(tip_table.table, tip_table.nr + 1, tip_table.alloc);
+ oidcpy(&tip_table.table[tip_table.nr].oid, oid);
+ tip_table.table[tip_table.nr].refname = xstrdup(refname);
+ tip_table.table[tip_table.nr].commit = commit;
+ tip_table.table[tip_table.nr].taggerdate = taggerdate;
+ tip_table.table[tip_table.nr].from_tag = from_tag;
+ tip_table.table[tip_table.nr].deref = deref;
+ tip_table.nr++;
+ tip_table.sorted = 0;
+}
+
+static int tipcmp(const void *a_, const void *b_)
+{
+ const struct tip_table_entry *a = a_, *b = b_;
+ return oidcmp(&a->oid, &b->oid);
+}
+
+static int cmp_by_tag_and_age(const void *a_, const void *b_)
+{
+ const struct tip_table_entry *a = a_, *b = b_;
+ int cmp;
+
+ /* Prefer tags. */
+ cmp = b->from_tag - a->from_tag;
+ if (cmp)
+ return cmp;
+
+ /* Older is better. */
+ if (a->taggerdate < b->taggerdate)
+ return -1;
+ return a->taggerdate != b->taggerdate;
+}
+
+static int name_ref(const char *path, const struct object_id *oid,
+ int flags UNUSED, void *cb_data)
+{
+ struct object *o = parse_object(the_repository, oid);
+ struct name_ref_data *data = cb_data;
+ int can_abbreviate_output = data->tags_only && data->name_only;
+ int deref = 0;
+ int from_tag = 0;
+ struct commit *commit = NULL;
+ timestamp_t taggerdate = TIME_MAX;
+
+ if (data->tags_only && !starts_with(path, "refs/tags/"))
+ return 0;
+
+ if (data->exclude_filters.nr) {
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, &data->exclude_filters) {
+ if (subpath_matches(path, item->string) >= 0)
+ return 0;
+ }
+ }
+
+ if (data->ref_filters.nr) {
+ struct string_list_item *item;
+ int matched = 0;
+
+ /* See if any of the patterns match. */
+ for_each_string_list_item(item, &data->ref_filters) {
+ /*
+ * Check all patterns even after finding a match, so
+ * that we can see if a match with a subpath exists.
+ * When a user asked for 'refs/tags/v*' and 'v1.*',
+ * both of which match, the user is showing her
+ * willingness to accept a shortened output by having
+ * the 'v1.*' in the acceptable refnames, so we
+ * shouldn't stop when seeing 'refs/tags/v1.4' matches
+ * 'refs/tags/v*'. We should show it as 'v1.4'.
+ */
+ switch (subpath_matches(path, item->string)) {
+ case -1: /* did not match */
+ break;
+ case 0: /* matched fully */
+ matched = 1;
+ break;
+ default: /* matched subpath */
+ matched = 1;
+ can_abbreviate_output = 1;
+ break;
+ }
+ }
+
+ /* If none of the patterns matched, stop now */
+ if (!matched)
+ return 0;
+ }
+
+ while (o && o->type == OBJ_TAG) {
+ struct tag *t = (struct tag *) o;
+ if (!t->tagged)
+ break; /* broken repository */
+ o = parse_object(the_repository, &t->tagged->oid);
+ deref = 1;
+ taggerdate = t->date;
+ }
+ if (o && o->type == OBJ_COMMIT) {
+ commit = (struct commit *)o;
+ from_tag = starts_with(path, "refs/tags/");
+ if (taggerdate == TIME_MAX)
+ taggerdate = commit->date;
+ }
+
+ add_to_tip_table(oid, path, can_abbreviate_output, commit, taggerdate,
+ from_tag, deref);
+ return 0;
+}
+
+static void name_tips(void)
+{
+ int i;
+
+ /*
+ * Try to set better names first, so that worse ones spread
+ * less.
+ */
+ QSORT(tip_table.table, tip_table.nr, cmp_by_tag_and_age);
+ for (i = 0; i < tip_table.nr; i++) {
+ struct tip_table_entry *e = &tip_table.table[i];
+ if (e->commit) {
+ name_rev(e->commit, e->refname, e->taggerdate,
+ e->from_tag, e->deref);
+ }
+ }
+}
+
+static const struct object_id *nth_tip_table_ent(size_t ix, const void *table_)
+{
+ const struct tip_table_entry *table = table_;
+ return &table[ix].oid;
+}
+
+static const char *get_exact_ref_match(const struct object *o)
+{
+ int found;
+
+ if (!tip_table.table || !tip_table.nr)
+ return NULL;
+
+ if (!tip_table.sorted) {
+ QSORT(tip_table.table, tip_table.nr, tipcmp);
+ tip_table.sorted = 1;
+ }
+
+ found = oid_pos(&o->oid, tip_table.table, tip_table.nr,
+ nth_tip_table_ent);
+ if (0 <= found)
+ return tip_table.table[found].refname;
+ return NULL;
+}
+
+/* may return a constant string or use "buf" as scratch space */
+static const char *get_rev_name(const struct object *o, struct strbuf *buf)
+{
+ struct rev_name *n;
+ const struct commit *c;
+
+ if (o->type != OBJ_COMMIT)
+ return get_exact_ref_match(o);
+ c = (const struct commit *) o;
+ n = get_commit_rev_name(c);
+ if (!n)
+ return NULL;
+
+ if (!n->generation)
+ return n->tip_name;
+ else {
+ strbuf_reset(buf);
+ strbuf_addstr(buf, n->tip_name);
+ strbuf_strip_suffix(buf, "^0");
+ strbuf_addf(buf, "~%d", n->generation);
+ return buf->buf;
+ }
+}
+
+static void show_name(const struct object *obj,
+ const char *caller_name,
+ int always, int allow_undefined, int name_only)
+{
+ const char *name;
+ const struct object_id *oid = &obj->oid;
+ struct strbuf buf = STRBUF_INIT;
+
+ if (!name_only)
+ printf("%s ", caller_name ? caller_name : oid_to_hex(oid));
+ name = get_rev_name(obj, &buf);
+ if (name)
+ printf("%s\n", name);
+ else if (allow_undefined)
+ printf("undefined\n");
+ else if (always)
+ printf("%s\n", find_unique_abbrev(oid, DEFAULT_ABBREV));
+ else
+ die("cannot describe '%s'", oid_to_hex(oid));
+ strbuf_release(&buf);
+}
+
+static char const * const name_rev_usage[] = {
+ N_("git name-rev [<options>] <commit>..."),
+ N_("git name-rev [<options>] --all"),
+ N_("git name-rev [<options>] --annotate-stdin"),
+ NULL
+};
+
+static void name_rev_line(char *p, struct name_ref_data *data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int counter = 0;
+ char *p_start;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
+ for (p_start = p; *p; p++) {
+#define ishex(x) (isdigit((x)) || ((x) >= 'a' && (x) <= 'f'))
+ if (!ishex(*p))
+ counter = 0;
+ else if (++counter == hexsz &&
+ !ishex(*(p+1))) {
+ struct object_id oid;
+ const char *name = NULL;
+ char c = *(p+1);
+ int p_len = p - p_start + 1;
+
+ counter = 0;
+
+ *(p+1) = 0;
+ if (!get_oid(p - (hexsz - 1), &oid)) {
+ struct object *o =
+ lookup_object(the_repository, &oid);
+ if (o)
+ name = get_rev_name(o, &buf);
+ }
+ *(p+1) = c;
+
+ if (!name)
+ continue;
+
+ if (data->name_only)
+ printf("%.*s%s", p_len - hexsz, p_start, name);
+ else
+ printf("%.*s (%s)", p_len, p_start, name);
+ p_start = p + 1;
+ }
+ }
+
+ /* flush */
+ if (p_start != p)
+ fwrite(p_start, p - p_start, 1, stdout);
+
+ strbuf_release(&buf);
+}
+
+int cmd_name_rev(int argc, const char **argv, const char *prefix)
+{
+ struct object_array revs = OBJECT_ARRAY_INIT;
+ int all = 0, annotate_stdin = 0, transform_stdin = 0, allow_undefined = 1, always = 0, peel_tag = 0;
+ struct name_ref_data data = { 0, 0, STRING_LIST_INIT_NODUP, STRING_LIST_INIT_NODUP };
+ struct option opts[] = {
+ OPT_BOOL(0, "name-only", &data.name_only, N_("print only ref-based names (no object names)")),
+ OPT_BOOL(0, "tags", &data.tags_only, N_("only use tags to name the commits")),
+ OPT_STRING_LIST(0, "refs", &data.ref_filters, N_("pattern"),
+ N_("only use refs matching <pattern>")),
+ OPT_STRING_LIST(0, "exclude", &data.exclude_filters, N_("pattern"),
+ N_("ignore refs matching <pattern>")),
+ OPT_GROUP(""),
+ OPT_BOOL(0, "all", &all, N_("list all commits reachable from all refs")),
+ OPT_BOOL(0, "stdin", &transform_stdin, N_("deprecated: use --annotate-stdin instead")),
+ OPT_BOOL(0, "annotate-stdin", &annotate_stdin, N_("annotate text from stdin")),
+ OPT_BOOL(0, "undefined", &allow_undefined, N_("allow to print `undefined` names (default)")),
+ OPT_BOOL(0, "always", &always,
+ N_("show abbreviated commit object as fallback")),
+ {
+ /* A Hidden OPT_BOOL */
+ OPTION_SET_INT, 0, "peel-tag", &peel_tag, NULL,
+ N_("dereference tags in the input (internal use)"),
+ PARSE_OPT_NOARG | PARSE_OPT_HIDDEN, NULL, 1,
+ },
+ OPT_END(),
+ };
+
+ init_commit_rev_name(&rev_names);
+ git_config(git_default_config, NULL);
+ argc = parse_options(argc, argv, prefix, opts, name_rev_usage, 0);
+
+ if (transform_stdin) {
+ warning("--stdin is deprecated. Please use --annotate-stdin instead, "
+ "which is functionally equivalent.\n"
+ "This option will be removed in a future release.");
+ annotate_stdin = 1;
+ }
+
+ if (all + annotate_stdin + !!argc > 1) {
+ error("Specify either a list, or --all, not both!");
+ usage_with_options(name_rev_usage, opts);
+ }
+ if (all || annotate_stdin)
+ disable_cutoff();
+
+ for (; argc; argc--, argv++) {
+ struct object_id oid;
+ struct object *object;
+ struct commit *commit;
+
+ if (get_oid(*argv, &oid)) {
+ fprintf(stderr, "Could not get sha1 for %s. Skipping.\n",
+ *argv);
+ continue;
+ }
+
+ commit = NULL;
+ object = parse_object(the_repository, &oid);
+ if (object) {
+ struct object *peeled = deref_tag(the_repository,
+ object, *argv, 0);
+ if (peeled && peeled->type == OBJ_COMMIT)
+ commit = (struct commit *)peeled;
+ }
+
+ if (!object) {
+ fprintf(stderr, "Could not get object for %s. Skipping.\n",
+ *argv);
+ continue;
+ }
+
+ if (commit)
+ set_commit_cutoff(commit);
+
+ if (peel_tag) {
+ if (!commit) {
+ fprintf(stderr, "Could not get commit for %s. Skipping.\n",
+ *argv);
+ continue;
+ }
+ object = (struct object *)commit;
+ }
+ add_object_array(object, *argv, &revs);
+ }
+
+ adjust_cutoff_timestamp_for_slop();
+
+ for_each_ref(name_ref, &data);
+ name_tips();
+
+ if (annotate_stdin) {
+ struct strbuf sb = STRBUF_INIT;
+
+ while (strbuf_getline(&sb, stdin) != EOF) {
+ strbuf_addch(&sb, '\n');
+ name_rev_line(sb.buf, &data);
+ }
+ strbuf_release(&sb);
+ } else if (all) {
+ int i, max;
+
+ max = get_max_object_index();
+ for (i = 0; i < max; i++) {
+ struct object *obj = get_indexed_object(i);
+ if (!obj || obj->type != OBJ_COMMIT)
+ continue;
+ show_name(obj, NULL,
+ always, allow_undefined, data.name_only);
+ }
+ } else {
+ int i;
+ for (i = 0; i < revs.nr; i++)
+ show_name(revs.objects[i].item, revs.objects[i].name,
+ always, allow_undefined, data.name_only);
+ }
+
+ UNLEAK(revs);
+ return 0;
+}
diff --git a/builtin/notes.c b/builtin/notes.c
new file mode 100644
index 0000000..80d9dfd
--- /dev/null
+++ b/builtin/notes.c
@@ -0,0 +1,1035 @@
+/*
+ * Builtin "git notes"
+ *
+ * Copyright (c) 2010 Johan Herland <johan@herland.net>
+ *
+ * Based on git-notes.sh by Johannes Schindelin,
+ * and builtin/tag.c by Kristian Høgsberg and Carlos Rica.
+ */
+
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "notes.h"
+#include "object-store.h"
+#include "repository.h"
+#include "blob.h"
+#include "pretty.h"
+#include "refs.h"
+#include "exec-cmd.h"
+#include "run-command.h"
+#include "parse-options.h"
+#include "string-list.h"
+#include "notes-merge.h"
+#include "notes-utils.h"
+#include "worktree.h"
+
+static const char * const git_notes_usage[] = {
+ N_("git notes [--ref <notes-ref>] [list [<object>]]"),
+ N_("git notes [--ref <notes-ref>] add [-f] [--allow-empty] [-m <msg> | -F <file> | (-c | -C) <object>] [<object>]"),
+ N_("git notes [--ref <notes-ref>] copy [-f] <from-object> <to-object>"),
+ N_("git notes [--ref <notes-ref>] append [--allow-empty] [-m <msg> | -F <file> | (-c | -C) <object>] [<object>]"),
+ N_("git notes [--ref <notes-ref>] edit [--allow-empty] [<object>]"),
+ N_("git notes [--ref <notes-ref>] show [<object>]"),
+ N_("git notes [--ref <notes-ref>] merge [-v | -q] [-s <strategy>] <notes-ref>"),
+ "git notes merge --commit [-v | -q]",
+ "git notes merge --abort [-v | -q]",
+ N_("git notes [--ref <notes-ref>] remove [<object>...]"),
+ N_("git notes [--ref <notes-ref>] prune [-n] [-v]"),
+ N_("git notes [--ref <notes-ref>] get-ref"),
+ NULL
+};
+
+static const char * const git_notes_list_usage[] = {
+ N_("git notes [list [<object>]]"),
+ NULL
+};
+
+static const char * const git_notes_add_usage[] = {
+ N_("git notes add [<options>] [<object>]"),
+ NULL
+};
+
+static const char * const git_notes_copy_usage[] = {
+ N_("git notes copy [<options>] <from-object> <to-object>"),
+ N_("git notes copy --stdin [<from-object> <to-object>]..."),
+ NULL
+};
+
+static const char * const git_notes_append_usage[] = {
+ N_("git notes append [<options>] [<object>]"),
+ NULL
+};
+
+static const char * const git_notes_edit_usage[] = {
+ N_("git notes edit [<object>]"),
+ NULL
+};
+
+static const char * const git_notes_show_usage[] = {
+ N_("git notes show [<object>]"),
+ NULL
+};
+
+static const char * const git_notes_merge_usage[] = {
+ N_("git notes merge [<options>] <notes-ref>"),
+ N_("git notes merge --commit [<options>]"),
+ N_("git notes merge --abort [<options>]"),
+ NULL
+};
+
+static const char * const git_notes_remove_usage[] = {
+ N_("git notes remove [<object>]"),
+ NULL
+};
+
+static const char * const git_notes_prune_usage[] = {
+ N_("git notes prune [<options>]"),
+ NULL
+};
+
+static const char * const git_notes_get_ref_usage[] = {
+ "git notes get-ref",
+ NULL
+};
+
+static const char note_template[] =
+ N_("Write/edit the notes for the following object:");
+
+struct note_data {
+ int given;
+ int use_editor;
+ char *edit_path;
+ struct strbuf buf;
+};
+
+static void free_note_data(struct note_data *d)
+{
+ if (d->edit_path) {
+ unlink_or_warn(d->edit_path);
+ free(d->edit_path);
+ }
+ strbuf_release(&d->buf);
+}
+
+static int list_each_note(const struct object_id *object_oid,
+ const struct object_id *note_oid, char *note_path,
+ void *cb_data)
+{
+ printf("%s %s\n", oid_to_hex(note_oid), oid_to_hex(object_oid));
+ return 0;
+}
+
+static void copy_obj_to_fd(int fd, const struct object_id *oid)
+{
+ unsigned long size;
+ enum object_type type;
+ char *buf = read_object_file(oid, &type, &size);
+ if (buf) {
+ if (size)
+ write_or_die(fd, buf, size);
+ free(buf);
+ }
+}
+
+static void write_commented_object(int fd, const struct object_id *object)
+{
+ struct child_process show = CHILD_PROCESS_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf cbuf = STRBUF_INIT;
+
+ /* Invoke "git show --stat --no-notes $object" */
+ strvec_pushl(&show.args, "show", "--stat", "--no-notes",
+ oid_to_hex(object), NULL);
+ show.no_stdin = 1;
+ show.out = -1;
+ show.err = 0;
+ show.git_cmd = 1;
+ if (start_command(&show))
+ die(_("unable to start 'show' for object '%s'"),
+ oid_to_hex(object));
+
+ if (strbuf_read(&buf, show.out, 0) < 0)
+ die_errno(_("could not read 'show' output"));
+ strbuf_add_commented_lines(&cbuf, buf.buf, buf.len);
+ write_or_die(fd, cbuf.buf, cbuf.len);
+
+ strbuf_release(&cbuf);
+ strbuf_release(&buf);
+
+ if (finish_command(&show))
+ die(_("failed to finish 'show' for object '%s'"),
+ oid_to_hex(object));
+}
+
+static void prepare_note_data(const struct object_id *object, struct note_data *d,
+ const struct object_id *old_note)
+{
+ if (d->use_editor || !d->given) {
+ int fd;
+ struct strbuf buf = STRBUF_INIT;
+
+ /* write the template message before editing: */
+ d->edit_path = git_pathdup("NOTES_EDITMSG");
+ fd = xopen(d->edit_path, O_CREAT | O_TRUNC | O_WRONLY, 0600);
+
+ if (d->given)
+ write_or_die(fd, d->buf.buf, d->buf.len);
+ else if (old_note)
+ copy_obj_to_fd(fd, old_note);
+
+ strbuf_addch(&buf, '\n');
+ strbuf_add_commented_lines(&buf, "\n", strlen("\n"));
+ strbuf_add_commented_lines(&buf, _(note_template), strlen(_(note_template)));
+ strbuf_add_commented_lines(&buf, "\n", strlen("\n"));
+ write_or_die(fd, buf.buf, buf.len);
+
+ write_commented_object(fd, object);
+
+ close(fd);
+ strbuf_release(&buf);
+ strbuf_reset(&d->buf);
+
+ if (launch_editor(d->edit_path, &d->buf, NULL)) {
+ die(_("please supply the note contents using either -m or -F option"));
+ }
+ strbuf_stripspace(&d->buf, 1);
+ }
+}
+
+static void write_note_data(struct note_data *d, struct object_id *oid)
+{
+ if (write_object_file(d->buf.buf, d->buf.len, OBJ_BLOB, oid)) {
+ int status = die_message(_("unable to write note object"));
+
+ if (d->edit_path)
+ die_message(_("the note contents have been left in %s"),
+ d->edit_path);
+ exit(status);
+ }
+}
+
+static int parse_msg_arg(const struct option *opt, const char *arg, int unset)
+{
+ struct note_data *d = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ strbuf_grow(&d->buf, strlen(arg) + 2);
+ if (d->buf.len)
+ strbuf_addch(&d->buf, '\n');
+ strbuf_addstr(&d->buf, arg);
+ strbuf_stripspace(&d->buf, 0);
+
+ d->given = 1;
+ return 0;
+}
+
+static int parse_file_arg(const struct option *opt, const char *arg, int unset)
+{
+ struct note_data *d = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ if (d->buf.len)
+ strbuf_addch(&d->buf, '\n');
+ if (!strcmp(arg, "-")) {
+ if (strbuf_read(&d->buf, 0, 1024) < 0)
+ die_errno(_("cannot read '%s'"), arg);
+ } else if (strbuf_read_file(&d->buf, arg, 1024) < 0)
+ die_errno(_("could not open or read '%s'"), arg);
+ strbuf_stripspace(&d->buf, 0);
+
+ d->given = 1;
+ return 0;
+}
+
+static int parse_reuse_arg(const struct option *opt, const char *arg, int unset)
+{
+ struct note_data *d = opt->value;
+ char *buf;
+ struct object_id object;
+ enum object_type type;
+ unsigned long len;
+
+ BUG_ON_OPT_NEG(unset);
+
+ if (d->buf.len)
+ strbuf_addch(&d->buf, '\n');
+
+ if (get_oid(arg, &object))
+ die(_("failed to resolve '%s' as a valid ref."), arg);
+ if (!(buf = read_object_file(&object, &type, &len)))
+ die(_("failed to read object '%s'."), arg);
+ if (type != OBJ_BLOB) {
+ free(buf);
+ die(_("cannot read note data from non-blob object '%s'."), arg);
+ }
+ strbuf_add(&d->buf, buf, len);
+ free(buf);
+
+ d->given = 1;
+ return 0;
+}
+
+static int parse_reedit_arg(const struct option *opt, const char *arg, int unset)
+{
+ struct note_data *d = opt->value;
+ BUG_ON_OPT_NEG(unset);
+ d->use_editor = 1;
+ return parse_reuse_arg(opt, arg, unset);
+}
+
+static int notes_copy_from_stdin(int force, const char *rewrite_cmd)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct notes_rewrite_cfg *c = NULL;
+ struct notes_tree *t = NULL;
+ int ret = 0;
+ const char *msg = "Notes added by 'git notes copy'";
+
+ if (rewrite_cmd) {
+ c = init_copy_notes_for_rewrite(rewrite_cmd);
+ if (!c)
+ return 0;
+ } else {
+ init_notes(NULL, NULL, NULL, NOTES_INIT_WRITABLE);
+ t = &default_notes_tree;
+ }
+
+ while (strbuf_getline_lf(&buf, stdin) != EOF) {
+ struct object_id from_obj, to_obj;
+ struct strbuf **split;
+ int err;
+
+ split = strbuf_split(&buf, ' ');
+ if (!split[0] || !split[1])
+ die(_("malformed input line: '%s'."), buf.buf);
+ strbuf_rtrim(split[0]);
+ strbuf_rtrim(split[1]);
+ if (get_oid(split[0]->buf, &from_obj))
+ die(_("failed to resolve '%s' as a valid ref."), split[0]->buf);
+ if (get_oid(split[1]->buf, &to_obj))
+ die(_("failed to resolve '%s' as a valid ref."), split[1]->buf);
+
+ if (rewrite_cmd)
+ err = copy_note_for_rewrite(c, &from_obj, &to_obj);
+ else
+ err = copy_note(t, &from_obj, &to_obj, force,
+ combine_notes_overwrite);
+
+ if (err) {
+ error(_("failed to copy notes from '%s' to '%s'"),
+ split[0]->buf, split[1]->buf);
+ ret = 1;
+ }
+
+ strbuf_list_free(split);
+ }
+
+ if (!rewrite_cmd) {
+ commit_notes(the_repository, t, msg);
+ free_notes(t);
+ } else {
+ finish_copy_notes_for_rewrite(the_repository, c, msg);
+ }
+ strbuf_release(&buf);
+ return ret;
+}
+
+static struct notes_tree *init_notes_check(const char *subcommand,
+ int flags)
+{
+ struct notes_tree *t;
+ const char *ref;
+ init_notes(NULL, NULL, NULL, flags);
+ t = &default_notes_tree;
+
+ ref = (flags & NOTES_INIT_WRITABLE) ? t->update_ref : t->ref;
+ if (!starts_with(ref, "refs/notes/"))
+ /*
+ * TRANSLATORS: the first %s will be replaced by a git
+ * notes command: 'add', 'merge', 'remove', etc.
+ */
+ die(_("refusing to %s notes in %s (outside of refs/notes/)"),
+ subcommand, ref);
+ return t;
+}
+
+static int list(int argc, const char **argv, const char *prefix)
+{
+ struct notes_tree *t;
+ struct object_id object;
+ const struct object_id *note;
+ int retval = -1;
+ struct option options[] = {
+ OPT_END()
+ };
+
+ if (argc)
+ argc = parse_options(argc, argv, prefix, options,
+ git_notes_list_usage, 0);
+
+ if (1 < argc) {
+ error(_("too many arguments"));
+ usage_with_options(git_notes_list_usage, options);
+ }
+
+ t = init_notes_check("list", 0);
+ if (argc) {
+ if (get_oid(argv[0], &object))
+ die(_("failed to resolve '%s' as a valid ref."), argv[0]);
+ note = get_note(t, &object);
+ if (note) {
+ puts(oid_to_hex(note));
+ retval = 0;
+ } else
+ retval = error(_("no note found for object %s."),
+ oid_to_hex(&object));
+ } else
+ retval = for_each_note(t, 0, list_each_note, NULL);
+
+ free_notes(t);
+ return retval;
+}
+
+static int append_edit(int argc, const char **argv, const char *prefix);
+
+static int add(int argc, const char **argv, const char *prefix)
+{
+ int force = 0, allow_empty = 0;
+ const char *object_ref;
+ struct notes_tree *t;
+ struct object_id object, new_note;
+ const struct object_id *note;
+ struct note_data d = { 0, 0, NULL, STRBUF_INIT };
+ struct option options[] = {
+ OPT_CALLBACK_F('m', "message", &d, N_("message"),
+ N_("note contents as a string"), PARSE_OPT_NONEG,
+ parse_msg_arg),
+ OPT_CALLBACK_F('F', "file", &d, N_("file"),
+ N_("note contents in a file"), PARSE_OPT_NONEG,
+ parse_file_arg),
+ OPT_CALLBACK_F('c', "reedit-message", &d, N_("object"),
+ N_("reuse and edit specified note object"), PARSE_OPT_NONEG,
+ parse_reedit_arg),
+ OPT_CALLBACK_F('C', "reuse-message", &d, N_("object"),
+ N_("reuse specified note object"), PARSE_OPT_NONEG,
+ parse_reuse_arg),
+ OPT_BOOL(0, "allow-empty", &allow_empty,
+ N_("allow storing empty note")),
+ OPT__FORCE(&force, N_("replace existing notes"), PARSE_OPT_NOCOMPLETE),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, git_notes_add_usage,
+ PARSE_OPT_KEEP_ARGV0);
+
+ if (2 < argc) {
+ error(_("too many arguments"));
+ usage_with_options(git_notes_add_usage, options);
+ }
+
+ object_ref = argc > 1 ? argv[1] : "HEAD";
+
+ if (get_oid(object_ref, &object))
+ die(_("failed to resolve '%s' as a valid ref."), object_ref);
+
+ t = init_notes_check("add", NOTES_INIT_WRITABLE);
+ note = get_note(t, &object);
+
+ if (note) {
+ if (!force) {
+ free_notes(t);
+ if (d.given) {
+ free_note_data(&d);
+ return error(_("Cannot add notes. "
+ "Found existing notes for object %s. "
+ "Use '-f' to overwrite existing notes"),
+ oid_to_hex(&object));
+ }
+ /*
+ * Redirect to "edit" subcommand.
+ *
+ * We only end up here if none of -m/-F/-c/-C or -f are
+ * given. The original args are therefore still in
+ * argv[0-1].
+ */
+ argv[0] = "edit";
+ return append_edit(argc, argv, prefix);
+ }
+ fprintf(stderr, _("Overwriting existing notes for object %s\n"),
+ oid_to_hex(&object));
+ }
+
+ prepare_note_data(&object, &d, note);
+ if (d.buf.len || allow_empty) {
+ write_note_data(&d, &new_note);
+ if (add_note(t, &object, &new_note, combine_notes_overwrite))
+ BUG("combine_notes_overwrite failed");
+ commit_notes(the_repository, t,
+ "Notes added by 'git notes add'");
+ } else {
+ fprintf(stderr, _("Removing note for object %s\n"),
+ oid_to_hex(&object));
+ remove_note(t, object.hash);
+ commit_notes(the_repository, t,
+ "Notes removed by 'git notes add'");
+ }
+
+ free_note_data(&d);
+ free_notes(t);
+ return 0;
+}
+
+static int copy(int argc, const char **argv, const char *prefix)
+{
+ int retval = 0, force = 0, from_stdin = 0;
+ const struct object_id *from_note, *note;
+ const char *object_ref;
+ struct object_id object, from_obj;
+ struct notes_tree *t;
+ const char *rewrite_cmd = NULL;
+ struct option options[] = {
+ OPT__FORCE(&force, N_("replace existing notes"), PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL(0, "stdin", &from_stdin, N_("read objects from stdin")),
+ OPT_STRING(0, "for-rewrite", &rewrite_cmd, N_("command"),
+ N_("load rewriting config for <command> (implies "
+ "--stdin)")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, git_notes_copy_usage,
+ 0);
+
+ if (from_stdin || rewrite_cmd) {
+ if (argc) {
+ error(_("too many arguments"));
+ usage_with_options(git_notes_copy_usage, options);
+ } else {
+ return notes_copy_from_stdin(force, rewrite_cmd);
+ }
+ }
+
+ if (argc < 1) {
+ error(_("too few arguments"));
+ usage_with_options(git_notes_copy_usage, options);
+ }
+ if (2 < argc) {
+ error(_("too many arguments"));
+ usage_with_options(git_notes_copy_usage, options);
+ }
+
+ if (get_oid(argv[0], &from_obj))
+ die(_("failed to resolve '%s' as a valid ref."), argv[0]);
+
+ object_ref = 1 < argc ? argv[1] : "HEAD";
+
+ if (get_oid(object_ref, &object))
+ die(_("failed to resolve '%s' as a valid ref."), object_ref);
+
+ t = init_notes_check("copy", NOTES_INIT_WRITABLE);
+ note = get_note(t, &object);
+
+ if (note) {
+ if (!force) {
+ retval = error(_("Cannot copy notes. Found existing "
+ "notes for object %s. Use '-f' to "
+ "overwrite existing notes"),
+ oid_to_hex(&object));
+ goto out;
+ }
+ fprintf(stderr, _("Overwriting existing notes for object %s\n"),
+ oid_to_hex(&object));
+ }
+
+ from_note = get_note(t, &from_obj);
+ if (!from_note) {
+ retval = error(_("missing notes on source object %s. Cannot "
+ "copy."), oid_to_hex(&from_obj));
+ goto out;
+ }
+
+ if (add_note(t, &object, from_note, combine_notes_overwrite))
+ BUG("combine_notes_overwrite failed");
+ commit_notes(the_repository, t,
+ "Notes added by 'git notes copy'");
+out:
+ free_notes(t);
+ return retval;
+}
+
+static int append_edit(int argc, const char **argv, const char *prefix)
+{
+ int allow_empty = 0;
+ const char *object_ref;
+ struct notes_tree *t;
+ struct object_id object, new_note;
+ const struct object_id *note;
+ char *logmsg;
+ const char * const *usage;
+ struct note_data d = { 0, 0, NULL, STRBUF_INIT };
+ struct option options[] = {
+ OPT_CALLBACK_F('m', "message", &d, N_("message"),
+ N_("note contents as a string"), PARSE_OPT_NONEG,
+ parse_msg_arg),
+ OPT_CALLBACK_F('F', "file", &d, N_("file"),
+ N_("note contents in a file"), PARSE_OPT_NONEG,
+ parse_file_arg),
+ OPT_CALLBACK_F('c', "reedit-message", &d, N_("object"),
+ N_("reuse and edit specified note object"), PARSE_OPT_NONEG,
+ parse_reedit_arg),
+ OPT_CALLBACK_F('C', "reuse-message", &d, N_("object"),
+ N_("reuse specified note object"), PARSE_OPT_NONEG,
+ parse_reuse_arg),
+ OPT_BOOL(0, "allow-empty", &allow_empty,
+ N_("allow storing empty note")),
+ OPT_END()
+ };
+ int edit = !strcmp(argv[0], "edit");
+
+ usage = edit ? git_notes_edit_usage : git_notes_append_usage;
+ argc = parse_options(argc, argv, prefix, options, usage,
+ PARSE_OPT_KEEP_ARGV0);
+
+ if (2 < argc) {
+ error(_("too many arguments"));
+ usage_with_options(usage, options);
+ }
+
+ if (d.given && edit)
+ fprintf(stderr, _("The -m/-F/-c/-C options have been deprecated "
+ "for the 'edit' subcommand.\n"
+ "Please use 'git notes add -f -m/-F/-c/-C' instead.\n"));
+
+ object_ref = 1 < argc ? argv[1] : "HEAD";
+
+ if (get_oid(object_ref, &object))
+ die(_("failed to resolve '%s' as a valid ref."), object_ref);
+
+ t = init_notes_check(argv[0], NOTES_INIT_WRITABLE);
+ note = get_note(t, &object);
+
+ prepare_note_data(&object, &d, edit && note ? note : NULL);
+
+ if (note && !edit) {
+ /* Append buf to previous note contents */
+ unsigned long size;
+ enum object_type type;
+ char *prev_buf = read_object_file(note, &type, &size);
+
+ strbuf_grow(&d.buf, size + 1);
+ if (d.buf.len && prev_buf && size)
+ strbuf_insertstr(&d.buf, 0, "\n");
+ if (prev_buf && size)
+ strbuf_insert(&d.buf, 0, prev_buf, size);
+ free(prev_buf);
+ }
+
+ if (d.buf.len || allow_empty) {
+ write_note_data(&d, &new_note);
+ if (add_note(t, &object, &new_note, combine_notes_overwrite))
+ BUG("combine_notes_overwrite failed");
+ logmsg = xstrfmt("Notes added by 'git notes %s'", argv[0]);
+ } else {
+ fprintf(stderr, _("Removing note for object %s\n"),
+ oid_to_hex(&object));
+ remove_note(t, object.hash);
+ logmsg = xstrfmt("Notes removed by 'git notes %s'", argv[0]);
+ }
+ commit_notes(the_repository, t, logmsg);
+
+ free(logmsg);
+ free_note_data(&d);
+ free_notes(t);
+ return 0;
+}
+
+static int show(int argc, const char **argv, const char *prefix)
+{
+ const char *object_ref;
+ struct notes_tree *t;
+ struct object_id object;
+ const struct object_id *note;
+ int retval;
+ struct option options[] = {
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, git_notes_show_usage,
+ 0);
+
+ if (1 < argc) {
+ error(_("too many arguments"));
+ usage_with_options(git_notes_show_usage, options);
+ }
+
+ object_ref = argc ? argv[0] : "HEAD";
+
+ if (get_oid(object_ref, &object))
+ die(_("failed to resolve '%s' as a valid ref."), object_ref);
+
+ t = init_notes_check("show", 0);
+ note = get_note(t, &object);
+
+ if (!note)
+ retval = error(_("no note found for object %s."),
+ oid_to_hex(&object));
+ else {
+ const char *show_args[3] = {"show", oid_to_hex(note), NULL};
+ retval = execv_git_cmd(show_args);
+ }
+ free_notes(t);
+ return retval;
+}
+
+static int merge_abort(struct notes_merge_options *o)
+{
+ int ret = 0;
+
+ /*
+ * Remove .git/NOTES_MERGE_PARTIAL and .git/NOTES_MERGE_REF, and call
+ * notes_merge_abort() to remove .git/NOTES_MERGE_WORKTREE.
+ */
+
+ if (delete_ref(NULL, "NOTES_MERGE_PARTIAL", NULL, 0))
+ ret += error(_("failed to delete ref NOTES_MERGE_PARTIAL"));
+ if (delete_ref(NULL, "NOTES_MERGE_REF", NULL, REF_NO_DEREF))
+ ret += error(_("failed to delete ref NOTES_MERGE_REF"));
+ if (notes_merge_abort(o))
+ ret += error(_("failed to remove 'git notes merge' worktree"));
+ return ret;
+}
+
+static int merge_commit(struct notes_merge_options *o)
+{
+ struct strbuf msg = STRBUF_INIT;
+ struct object_id oid, parent_oid;
+ struct notes_tree *t;
+ struct commit *partial;
+ struct pretty_print_context pretty_ctx;
+ void *local_ref_to_free;
+ int ret;
+
+ /*
+ * Read partial merge result from .git/NOTES_MERGE_PARTIAL,
+ * and target notes ref from .git/NOTES_MERGE_REF.
+ */
+
+ if (get_oid("NOTES_MERGE_PARTIAL", &oid))
+ die(_("failed to read ref NOTES_MERGE_PARTIAL"));
+ else if (!(partial = lookup_commit_reference(the_repository, &oid)))
+ die(_("could not find commit from NOTES_MERGE_PARTIAL."));
+ else if (parse_commit(partial))
+ die(_("could not parse commit from NOTES_MERGE_PARTIAL."));
+
+ if (partial->parents)
+ oidcpy(&parent_oid, &partial->parents->item->object.oid);
+ else
+ oidclr(&parent_oid);
+
+ CALLOC_ARRAY(t, 1);
+ init_notes(t, "NOTES_MERGE_PARTIAL", combine_notes_overwrite, 0);
+
+ o->local_ref = local_ref_to_free =
+ resolve_refdup("NOTES_MERGE_REF", 0, &oid, NULL);
+ if (!o->local_ref)
+ die(_("failed to resolve NOTES_MERGE_REF"));
+
+ if (notes_merge_commit(o, t, partial, &oid))
+ die(_("failed to finalize notes merge"));
+
+ /* Reuse existing commit message in reflog message */
+ memset(&pretty_ctx, 0, sizeof(pretty_ctx));
+ format_commit_message(partial, "%s", &msg, &pretty_ctx);
+ strbuf_trim(&msg);
+ strbuf_insertstr(&msg, 0, "notes: ");
+ update_ref(msg.buf, o->local_ref, &oid,
+ is_null_oid(&parent_oid) ? NULL : &parent_oid,
+ 0, UPDATE_REFS_DIE_ON_ERR);
+
+ free_notes(t);
+ strbuf_release(&msg);
+ ret = merge_abort(o);
+ free(local_ref_to_free);
+ return ret;
+}
+
+static int git_config_get_notes_strategy(const char *key,
+ enum notes_merge_strategy *strategy)
+{
+ char *value;
+
+ if (git_config_get_string(key, &value))
+ return 1;
+ if (parse_notes_merge_strategy(value, strategy))
+ git_die_config(key, _("unknown notes merge strategy %s"), value);
+
+ free(value);
+ return 0;
+}
+
+static int merge(int argc, const char **argv, const char *prefix)
+{
+ struct strbuf remote_ref = STRBUF_INIT, msg = STRBUF_INIT;
+ struct object_id result_oid;
+ struct notes_tree *t;
+ struct notes_merge_options o;
+ int do_merge = 0, do_commit = 0, do_abort = 0;
+ int verbosity = 0, result;
+ const char *strategy = NULL;
+ struct option options[] = {
+ OPT_GROUP(N_("General options")),
+ OPT__VERBOSITY(&verbosity),
+ OPT_GROUP(N_("Merge options")),
+ OPT_STRING('s', "strategy", &strategy, N_("strategy"),
+ N_("resolve notes conflicts using the given strategy "
+ "(manual/ours/theirs/union/cat_sort_uniq)")),
+ OPT_GROUP(N_("Committing unmerged notes")),
+ OPT_SET_INT_F(0, "commit", &do_commit,
+ N_("finalize notes merge by committing unmerged notes"),
+ 1, PARSE_OPT_NONEG),
+ OPT_GROUP(N_("Aborting notes merge resolution")),
+ OPT_SET_INT_F(0, "abort", &do_abort,
+ N_("abort notes merge"),
+ 1, PARSE_OPT_NONEG),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_notes_merge_usage, 0);
+
+ if (strategy || do_commit + do_abort == 0)
+ do_merge = 1;
+ if (do_merge + do_commit + do_abort != 1) {
+ error(_("cannot mix --commit, --abort or -s/--strategy"));
+ usage_with_options(git_notes_merge_usage, options);
+ }
+
+ if (do_merge && argc != 1) {
+ error(_("must specify a notes ref to merge"));
+ usage_with_options(git_notes_merge_usage, options);
+ } else if (!do_merge && argc) {
+ error(_("too many arguments"));
+ usage_with_options(git_notes_merge_usage, options);
+ }
+
+ init_notes_merge_options(the_repository, &o);
+ o.verbosity = verbosity + NOTES_MERGE_VERBOSITY_DEFAULT;
+
+ if (do_abort)
+ return merge_abort(&o);
+ if (do_commit)
+ return merge_commit(&o);
+
+ o.local_ref = default_notes_ref();
+ strbuf_addstr(&remote_ref, argv[0]);
+ expand_loose_notes_ref(&remote_ref);
+ o.remote_ref = remote_ref.buf;
+
+ t = init_notes_check("merge", NOTES_INIT_WRITABLE);
+
+ if (strategy) {
+ if (parse_notes_merge_strategy(strategy, &o.strategy)) {
+ error(_("unknown -s/--strategy: %s"), strategy);
+ usage_with_options(git_notes_merge_usage, options);
+ }
+ } else {
+ struct strbuf merge_key = STRBUF_INIT;
+ const char *short_ref = NULL;
+
+ if (!skip_prefix(o.local_ref, "refs/notes/", &short_ref))
+ BUG("local ref %s is outside of refs/notes/",
+ o.local_ref);
+
+ strbuf_addf(&merge_key, "notes.%s.mergeStrategy", short_ref);
+
+ if (git_config_get_notes_strategy(merge_key.buf, &o.strategy))
+ git_config_get_notes_strategy("notes.mergeStrategy", &o.strategy);
+
+ strbuf_release(&merge_key);
+ }
+
+ strbuf_addf(&msg, "notes: Merged notes from %s into %s",
+ remote_ref.buf, default_notes_ref());
+ strbuf_add(&(o.commit_msg), msg.buf + 7, msg.len - 7); /* skip "notes: " */
+
+ result = notes_merge(&o, t, &result_oid);
+
+ if (result >= 0) /* Merge resulted (trivially) in result_oid */
+ /* Update default notes ref with new commit */
+ update_ref(msg.buf, default_notes_ref(), &result_oid, NULL, 0,
+ UPDATE_REFS_DIE_ON_ERR);
+ else { /* Merge has unresolved conflicts */
+ struct worktree **worktrees;
+ const struct worktree *wt;
+ /* Update .git/NOTES_MERGE_PARTIAL with partial merge result */
+ update_ref(msg.buf, "NOTES_MERGE_PARTIAL", &result_oid, NULL,
+ 0, UPDATE_REFS_DIE_ON_ERR);
+ /* Store ref-to-be-updated into .git/NOTES_MERGE_REF */
+ worktrees = get_worktrees();
+ wt = find_shared_symref(worktrees, "NOTES_MERGE_REF",
+ default_notes_ref());
+ if (wt)
+ die(_("a notes merge into %s is already in-progress at %s"),
+ default_notes_ref(), wt->path);
+ free_worktrees(worktrees);
+ if (create_symref("NOTES_MERGE_REF", default_notes_ref(), NULL))
+ die(_("failed to store link to current notes ref (%s)"),
+ default_notes_ref());
+ fprintf(stderr, _("Automatic notes merge failed. Fix conflicts in %s "
+ "and commit the result with 'git notes merge --commit', "
+ "or abort the merge with 'git notes merge --abort'.\n"),
+ git_path(NOTES_MERGE_WORKTREE));
+ }
+
+ free_notes(t);
+ strbuf_release(&remote_ref);
+ strbuf_release(&msg);
+ return result < 0; /* return non-zero on conflicts */
+}
+
+#define IGNORE_MISSING 1
+
+static int remove_one_note(struct notes_tree *t, const char *name, unsigned flag)
+{
+ int status;
+ struct object_id oid;
+ if (get_oid(name, &oid))
+ return error(_("Failed to resolve '%s' as a valid ref."), name);
+ status = remove_note(t, oid.hash);
+ if (status)
+ fprintf(stderr, _("Object %s has no note\n"), name);
+ else
+ fprintf(stderr, _("Removing note for object %s\n"), name);
+ return (flag & IGNORE_MISSING) ? 0 : status;
+}
+
+static int remove_cmd(int argc, const char **argv, const char *prefix)
+{
+ unsigned flag = 0;
+ int from_stdin = 0;
+ struct option options[] = {
+ OPT_BIT(0, "ignore-missing", &flag,
+ N_("attempt to remove non-existent note is not an error"),
+ IGNORE_MISSING),
+ OPT_BOOL(0, "stdin", &from_stdin,
+ N_("read object names from the standard input")),
+ OPT_END()
+ };
+ struct notes_tree *t;
+ int retval = 0;
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_notes_remove_usage, 0);
+
+ t = init_notes_check("remove", NOTES_INIT_WRITABLE);
+
+ if (!argc && !from_stdin) {
+ retval = remove_one_note(t, "HEAD", flag);
+ } else {
+ while (*argv) {
+ retval |= remove_one_note(t, *argv, flag);
+ argv++;
+ }
+ }
+ if (from_stdin) {
+ struct strbuf sb = STRBUF_INIT;
+ while (strbuf_getwholeline(&sb, stdin, '\n') != EOF) {
+ strbuf_rtrim(&sb);
+ retval |= remove_one_note(t, sb.buf, flag);
+ }
+ strbuf_release(&sb);
+ }
+ if (!retval)
+ commit_notes(the_repository, t,
+ "Notes removed by 'git notes remove'");
+ free_notes(t);
+ return retval;
+}
+
+static int prune(int argc, const char **argv, const char *prefix)
+{
+ struct notes_tree *t;
+ int show_only = 0, verbose = 0;
+ struct option options[] = {
+ OPT__DRY_RUN(&show_only, N_("do not remove, show only")),
+ OPT__VERBOSE(&verbose, N_("report pruned notes")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, git_notes_prune_usage,
+ 0);
+
+ if (argc) {
+ error(_("too many arguments"));
+ usage_with_options(git_notes_prune_usage, options);
+ }
+
+ t = init_notes_check("prune", NOTES_INIT_WRITABLE);
+
+ prune_notes(t, (verbose ? NOTES_PRUNE_VERBOSE : 0) |
+ (show_only ? NOTES_PRUNE_VERBOSE|NOTES_PRUNE_DRYRUN : 0) );
+ if (!show_only)
+ commit_notes(the_repository, t,
+ "Notes removed by 'git notes prune'");
+ free_notes(t);
+ return 0;
+}
+
+static int get_ref(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = { OPT_END() };
+ argc = parse_options(argc, argv, prefix, options,
+ git_notes_get_ref_usage, 0);
+
+ if (argc) {
+ error(_("too many arguments"));
+ usage_with_options(git_notes_get_ref_usage, options);
+ }
+
+ puts(default_notes_ref());
+ return 0;
+}
+
+int cmd_notes(int argc, const char **argv, const char *prefix)
+{
+ const char *override_notes_ref = NULL;
+ parse_opt_subcommand_fn *fn = NULL;
+ struct option options[] = {
+ OPT_STRING(0, "ref", &override_notes_ref, N_("notes-ref"),
+ N_("use notes from <notes-ref>")),
+ OPT_SUBCOMMAND("list", &fn, list),
+ OPT_SUBCOMMAND("add", &fn, add),
+ OPT_SUBCOMMAND("copy", &fn, copy),
+ OPT_SUBCOMMAND("append", &fn, append_edit),
+ OPT_SUBCOMMAND("edit", &fn, append_edit),
+ OPT_SUBCOMMAND("show", &fn, show),
+ OPT_SUBCOMMAND("merge", &fn, merge),
+ OPT_SUBCOMMAND("remove", &fn, remove_cmd),
+ OPT_SUBCOMMAND("prune", &fn, prune),
+ OPT_SUBCOMMAND("get-ref", &fn, get_ref),
+ OPT_END()
+ };
+
+ git_config(git_default_config, NULL);
+ argc = parse_options(argc, argv, prefix, options, git_notes_usage,
+ PARSE_OPT_SUBCOMMAND_OPTIONAL);
+ if (!fn) {
+ if (argc) {
+ error(_("unknown subcommand: `%s'"), argv[0]);
+ usage_with_options(git_notes_usage, options);
+ }
+ fn = list;
+ }
+
+ if (override_notes_ref) {
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addstr(&sb, override_notes_ref);
+ expand_notes_ref(&sb);
+ setenv("GIT_NOTES_REF", sb.buf, 1);
+ strbuf_release(&sb);
+ }
+
+ return !!fn(argc, argv, prefix);
+}
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
new file mode 100644
index 0000000..573d0b2
--- /dev/null
+++ b/builtin/pack-objects.c
@@ -0,0 +1,4519 @@
+#include "builtin.h"
+#include "cache.h"
+#include "repository.h"
+#include "config.h"
+#include "attr.h"
+#include "object.h"
+#include "blob.h"
+#include "commit.h"
+#include "tag.h"
+#include "tree.h"
+#include "delta.h"
+#include "pack.h"
+#include "pack-revindex.h"
+#include "csum-file.h"
+#include "tree-walk.h"
+#include "diff.h"
+#include "revision.h"
+#include "list-objects.h"
+#include "list-objects-filter.h"
+#include "list-objects-filter-options.h"
+#include "pack-objects.h"
+#include "progress.h"
+#include "refs.h"
+#include "streaming.h"
+#include "thread-utils.h"
+#include "pack-bitmap.h"
+#include "delta-islands.h"
+#include "reachable.h"
+#include "oid-array.h"
+#include "strvec.h"
+#include "list.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "dir.h"
+#include "midx.h"
+#include "trace2.h"
+#include "shallow.h"
+#include "promisor-remote.h"
+#include "pack-mtimes.h"
+
+/*
+ * Objects we are going to pack are collected in the `to_pack` structure.
+ * It contains an array (dynamically expanded) of the object data, and a map
+ * that can resolve SHA1s to their position in the array.
+ */
+static struct packing_data to_pack;
+
+static inline struct object_entry *oe_delta(
+ const struct packing_data *pack,
+ const struct object_entry *e)
+{
+ if (!e->delta_idx)
+ return NULL;
+ if (e->ext_base)
+ return &pack->ext_bases[e->delta_idx - 1];
+ else
+ return &pack->objects[e->delta_idx - 1];
+}
+
+static inline unsigned long oe_delta_size(struct packing_data *pack,
+ const struct object_entry *e)
+{
+ if (e->delta_size_valid)
+ return e->delta_size_;
+
+ /*
+ * pack->delta_size[] can't be NULL because oe_set_delta_size()
+ * must have been called when a new delta is saved with
+ * oe_set_delta().
+ * If oe_delta() returns NULL (i.e. default state, which means
+ * delta_size_valid is also false), then the caller must never
+ * call oe_delta_size().
+ */
+ return pack->delta_size[e - pack->objects];
+}
+
+unsigned long oe_get_size_slow(struct packing_data *pack,
+ const struct object_entry *e);
+
+static inline unsigned long oe_size(struct packing_data *pack,
+ const struct object_entry *e)
+{
+ if (e->size_valid)
+ return e->size_;
+
+ return oe_get_size_slow(pack, e);
+}
+
+static inline void oe_set_delta(struct packing_data *pack,
+ struct object_entry *e,
+ struct object_entry *delta)
+{
+ if (delta)
+ e->delta_idx = (delta - pack->objects) + 1;
+ else
+ e->delta_idx = 0;
+}
+
+static inline struct object_entry *oe_delta_sibling(
+ const struct packing_data *pack,
+ const struct object_entry *e)
+{
+ if (e->delta_sibling_idx)
+ return &pack->objects[e->delta_sibling_idx - 1];
+ return NULL;
+}
+
+static inline struct object_entry *oe_delta_child(
+ const struct packing_data *pack,
+ const struct object_entry *e)
+{
+ if (e->delta_child_idx)
+ return &pack->objects[e->delta_child_idx - 1];
+ return NULL;
+}
+
+static inline void oe_set_delta_child(struct packing_data *pack,
+ struct object_entry *e,
+ struct object_entry *delta)
+{
+ if (delta)
+ e->delta_child_idx = (delta - pack->objects) + 1;
+ else
+ e->delta_child_idx = 0;
+}
+
+static inline void oe_set_delta_sibling(struct packing_data *pack,
+ struct object_entry *e,
+ struct object_entry *delta)
+{
+ if (delta)
+ e->delta_sibling_idx = (delta - pack->objects) + 1;
+ else
+ e->delta_sibling_idx = 0;
+}
+
+static inline void oe_set_size(struct packing_data *pack,
+ struct object_entry *e,
+ unsigned long size)
+{
+ if (size < pack->oe_size_limit) {
+ e->size_ = size;
+ e->size_valid = 1;
+ } else {
+ e->size_valid = 0;
+ if (oe_get_size_slow(pack, e) != size)
+ BUG("'size' is supposed to be the object size!");
+ }
+}
+
+static inline void oe_set_delta_size(struct packing_data *pack,
+ struct object_entry *e,
+ unsigned long size)
+{
+ if (size < pack->oe_delta_size_limit) {
+ e->delta_size_ = size;
+ e->delta_size_valid = 1;
+ } else {
+ packing_data_lock(pack);
+ if (!pack->delta_size)
+ ALLOC_ARRAY(pack->delta_size, pack->nr_alloc);
+ packing_data_unlock(pack);
+
+ pack->delta_size[e - pack->objects] = size;
+ e->delta_size_valid = 0;
+ }
+}
+
+#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
+#define SIZE(obj) oe_size(&to_pack, obj)
+#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
+#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj)
+#define DELTA(obj) oe_delta(&to_pack, obj)
+#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
+#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
+#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
+#define SET_DELTA_EXT(obj, oid) oe_set_delta_ext(&to_pack, obj, oid)
+#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
+#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
+#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
+
+static const char *pack_usage[] = {
+ N_("git pack-objects --stdout [<options>] [< <ref-list> | < <object-list>]"),
+ N_("git pack-objects [<options>] <base-name> [< <ref-list> | < <object-list>]"),
+ NULL
+};
+
+static struct pack_idx_entry **written_list;
+static uint32_t nr_result, nr_written, nr_seen;
+static struct bitmap_index *bitmap_git;
+static uint32_t write_layer;
+
+static int non_empty;
+static int reuse_delta = 1, reuse_object = 1;
+static int keep_unreachable, unpack_unreachable, include_tag;
+static timestamp_t unpack_unreachable_expiration;
+static int pack_loose_unreachable;
+static int cruft;
+static timestamp_t cruft_expiration;
+static int local;
+static int have_non_local_packs;
+static int incremental;
+static int ignore_packed_keep_on_disk;
+static int ignore_packed_keep_in_core;
+static int allow_ofs_delta;
+static struct pack_idx_option pack_idx_opts;
+static const char *base_name;
+static int progress = 1;
+static int window = 10;
+static unsigned long pack_size_limit;
+static int depth = 50;
+static int delta_search_threads;
+static int pack_to_stdout;
+static int sparse;
+static int thin;
+static int num_preferred_base;
+static struct progress *progress_state;
+
+static struct packed_git *reuse_packfile;
+static uint32_t reuse_packfile_objects;
+static struct bitmap *reuse_packfile_bitmap;
+
+static int use_bitmap_index_default = 1;
+static int use_bitmap_index = -1;
+static int allow_pack_reuse = 1;
+static enum {
+ WRITE_BITMAP_FALSE = 0,
+ WRITE_BITMAP_QUIET,
+ WRITE_BITMAP_TRUE,
+} write_bitmap_index;
+static uint16_t write_bitmap_options = BITMAP_OPT_HASH_CACHE;
+
+static int exclude_promisor_objects;
+
+static int use_delta_islands;
+
+static unsigned long delta_cache_size = 0;
+static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
+static unsigned long cache_max_small_delta_size = 1000;
+
+static unsigned long window_memory_limit = 0;
+
+static struct string_list uri_protocols = STRING_LIST_INIT_NODUP;
+
+enum missing_action {
+ MA_ERROR = 0, /* fail if any missing objects are encountered */
+ MA_ALLOW_ANY, /* silently allow ALL missing objects */
+ MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
+};
+static enum missing_action arg_missing_action;
+static show_object_fn fn_show_object;
+
+struct configured_exclusion {
+ struct oidmap_entry e;
+ char *pack_hash_hex;
+ char *uri;
+};
+static struct oidmap configured_exclusions;
+
+static struct oidset excluded_by_config;
+
+/*
+ * stats
+ */
+static uint32_t written, written_delta;
+static uint32_t reused, reused_delta;
+
+/*
+ * Indexed commits
+ */
+static struct commit **indexed_commits;
+static unsigned int indexed_commits_nr;
+static unsigned int indexed_commits_alloc;
+
+static void index_commit_for_bitmap(struct commit *commit)
+{
+ if (indexed_commits_nr >= indexed_commits_alloc) {
+ indexed_commits_alloc = (indexed_commits_alloc + 32) * 2;
+ REALLOC_ARRAY(indexed_commits, indexed_commits_alloc);
+ }
+
+ indexed_commits[indexed_commits_nr++] = commit;
+}
+
+static void *get_delta(struct object_entry *entry)
+{
+ unsigned long size, base_size, delta_size;
+ void *buf, *base_buf, *delta_buf;
+ enum object_type type;
+
+ buf = read_object_file(&entry->idx.oid, &type, &size);
+ if (!buf)
+ die(_("unable to read %s"), oid_to_hex(&entry->idx.oid));
+ base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
+ &base_size);
+ if (!base_buf)
+ die("unable to read %s",
+ oid_to_hex(&DELTA(entry)->idx.oid));
+ delta_buf = diff_delta(base_buf, base_size,
+ buf, size, &delta_size, 0);
+ /*
+ * We successfully computed this delta once but dropped it for
+ * memory reasons. Something is very wrong if this time we
+ * recompute and create a different delta.
+ */
+ if (!delta_buf || delta_size != DELTA_SIZE(entry))
+ BUG("delta size changed");
+ free(buf);
+ free(base_buf);
+ return delta_buf;
+}
+
+static unsigned long do_compress(void **pptr, unsigned long size)
+{
+ git_zstream stream;
+ void *in, *out;
+ unsigned long maxsize;
+
+ git_deflate_init(&stream, pack_compression_level);
+ maxsize = git_deflate_bound(&stream, size);
+
+ in = *pptr;
+ out = xmalloc(maxsize);
+ *pptr = out;
+
+ stream.next_in = in;
+ stream.avail_in = size;
+ stream.next_out = out;
+ stream.avail_out = maxsize;
+ while (git_deflate(&stream, Z_FINISH) == Z_OK)
+ ; /* nothing */
+ git_deflate_end(&stream);
+
+ free(in);
+ return stream.total_out;
+}
+
+static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
+ const struct object_id *oid)
+{
+ git_zstream stream;
+ unsigned char ibuf[1024 * 16];
+ unsigned char obuf[1024 * 16];
+ unsigned long olen = 0;
+
+ git_deflate_init(&stream, pack_compression_level);
+
+ for (;;) {
+ ssize_t readlen;
+ int zret = Z_OK;
+ readlen = read_istream(st, ibuf, sizeof(ibuf));
+ if (readlen == -1)
+ die(_("unable to read %s"), oid_to_hex(oid));
+
+ stream.next_in = ibuf;
+ stream.avail_in = readlen;
+ while ((stream.avail_in || readlen == 0) &&
+ (zret == Z_OK || zret == Z_BUF_ERROR)) {
+ stream.next_out = obuf;
+ stream.avail_out = sizeof(obuf);
+ zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
+ hashwrite(f, obuf, stream.next_out - obuf);
+ olen += stream.next_out - obuf;
+ }
+ if (stream.avail_in)
+ die(_("deflate error (%d)"), zret);
+ if (readlen == 0) {
+ if (zret != Z_STREAM_END)
+ die(_("deflate error (%d)"), zret);
+ break;
+ }
+ }
+ git_deflate_end(&stream);
+ return olen;
+}
+
+/*
+ * we are going to reuse the existing object data as is. make
+ * sure it is not corrupt.
+ */
+static int check_pack_inflate(struct packed_git *p,
+ struct pack_window **w_curs,
+ off_t offset,
+ off_t len,
+ unsigned long expect)
+{
+ git_zstream stream;
+ unsigned char fakebuf[4096], *in;
+ int st;
+
+ memset(&stream, 0, sizeof(stream));
+ git_inflate_init(&stream);
+ do {
+ in = use_pack(p, w_curs, offset, &stream.avail_in);
+ stream.next_in = in;
+ stream.next_out = fakebuf;
+ stream.avail_out = sizeof(fakebuf);
+ st = git_inflate(&stream, Z_FINISH);
+ offset += stream.next_in - in;
+ } while (st == Z_OK || st == Z_BUF_ERROR);
+ git_inflate_end(&stream);
+ return (st == Z_STREAM_END &&
+ stream.total_out == expect &&
+ stream.total_in == len) ? 0 : -1;
+}
+
+static void copy_pack_data(struct hashfile *f,
+ struct packed_git *p,
+ struct pack_window **w_curs,
+ off_t offset,
+ off_t len)
+{
+ unsigned char *in;
+ unsigned long avail;
+
+ while (len) {
+ in = use_pack(p, w_curs, offset, &avail);
+ if (avail > len)
+ avail = (unsigned long)len;
+ hashwrite(f, in, avail);
+ offset += avail;
+ len -= avail;
+ }
+}
+
+static inline int oe_size_greater_than(struct packing_data *pack,
+ const struct object_entry *lhs,
+ unsigned long rhs)
+{
+ if (lhs->size_valid)
+ return lhs->size_ > rhs;
+ if (rhs < pack->oe_size_limit) /* rhs < 2^x <= lhs ? */
+ return 1;
+ return oe_get_size_slow(pack, lhs) > rhs;
+}
+
+/* Return 0 if we will bust the pack-size limit */
+static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
+ unsigned long limit, int usable_delta)
+{
+ unsigned long size, datalen;
+ unsigned char header[MAX_PACK_OBJECT_HEADER],
+ dheader[MAX_PACK_OBJECT_HEADER];
+ unsigned hdrlen;
+ enum object_type type;
+ void *buf;
+ struct git_istream *st = NULL;
+ const unsigned hashsz = the_hash_algo->rawsz;
+
+ if (!usable_delta) {
+ if (oe_type(entry) == OBJ_BLOB &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
+ (st = open_istream(the_repository, &entry->idx.oid, &type,
+ &size, NULL)) != NULL)
+ buf = NULL;
+ else {
+ buf = read_object_file(&entry->idx.oid, &type, &size);
+ if (!buf)
+ die(_("unable to read %s"),
+ oid_to_hex(&entry->idx.oid));
+ }
+ /*
+ * make sure no cached delta data remains from a
+ * previous attempt before a pack split occurred.
+ */
+ FREE_AND_NULL(entry->delta_data);
+ entry->z_delta_size = 0;
+ } else if (entry->delta_data) {
+ size = DELTA_SIZE(entry);
+ buf = entry->delta_data;
+ entry->delta_data = NULL;
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
+ OBJ_OFS_DELTA : OBJ_REF_DELTA;
+ } else {
+ buf = get_delta(entry);
+ size = DELTA_SIZE(entry);
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
+ OBJ_OFS_DELTA : OBJ_REF_DELTA;
+ }
+
+ if (st) /* large blob case, just assume we don't compress well */
+ datalen = size;
+ else if (entry->z_delta_size)
+ datalen = entry->z_delta_size;
+ else
+ datalen = do_compress(&buf, size);
+
+ /*
+ * The object header is a byte of 'type' followed by zero or
+ * more bytes of length.
+ */
+ hdrlen = encode_in_pack_object_header(header, sizeof(header),
+ type, size);
+
+ if (type == OBJ_OFS_DELTA) {
+ /*
+ * Deltas with relative base contain an additional
+ * encoding of the relative offset for the delta
+ * base from this object's position in the pack.
+ */
+ off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
+ unsigned pos = sizeof(dheader) - 1;
+ dheader[pos] = ofs & 127;
+ while (ofs >>= 7)
+ dheader[--pos] = 128 | (--ofs & 127);
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
+ if (st)
+ close_istream(st);
+ free(buf);
+ return 0;
+ }
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
+ hdrlen += sizeof(dheader) - pos;
+ } else if (type == OBJ_REF_DELTA) {
+ /*
+ * Deltas with a base reference contain
+ * additional bytes for the base object ID.
+ */
+ if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
+ if (st)
+ close_istream(st);
+ free(buf);
+ return 0;
+ }
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
+ hdrlen += hashsz;
+ } else {
+ if (limit && hdrlen + datalen + hashsz >= limit) {
+ if (st)
+ close_istream(st);
+ free(buf);
+ return 0;
+ }
+ hashwrite(f, header, hdrlen);
+ }
+ if (st) {
+ datalen = write_large_blob_data(st, f, &entry->idx.oid);
+ close_istream(st);
+ } else {
+ hashwrite(f, buf, datalen);
+ free(buf);
+ }
+
+ return hdrlen + datalen;
+}
+
+/* Return 0 if we will bust the pack-size limit */
+static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
+ unsigned long limit, int usable_delta)
+{
+ struct packed_git *p = IN_PACK(entry);
+ struct pack_window *w_curs = NULL;
+ uint32_t pos;
+ off_t offset;
+ enum object_type type = oe_type(entry);
+ off_t datalen;
+ unsigned char header[MAX_PACK_OBJECT_HEADER],
+ dheader[MAX_PACK_OBJECT_HEADER];
+ unsigned hdrlen;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ unsigned long entry_size = SIZE(entry);
+
+ if (DELTA(entry))
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
+ OBJ_OFS_DELTA : OBJ_REF_DELTA;
+ hdrlen = encode_in_pack_object_header(header, sizeof(header),
+ type, entry_size);
+
+ offset = entry->in_pack_offset;
+ if (offset_to_pack_pos(p, offset, &pos) < 0)
+ die(_("write_reuse_object: could not locate %s, expected at "
+ "offset %"PRIuMAX" in pack %s"),
+ oid_to_hex(&entry->idx.oid), (uintmax_t)offset,
+ p->pack_name);
+ datalen = pack_pos_to_offset(p, pos + 1) - offset;
+ if (!pack_to_stdout && p->index_version > 1 &&
+ check_pack_crc(p, &w_curs, offset, datalen,
+ pack_pos_to_index(p, pos))) {
+ error(_("bad packed object CRC for %s"),
+ oid_to_hex(&entry->idx.oid));
+ unuse_pack(&w_curs);
+ return write_no_reuse_object(f, entry, limit, usable_delta);
+ }
+
+ offset += entry->in_pack_header_size;
+ datalen -= entry->in_pack_header_size;
+
+ if (!pack_to_stdout && p->index_version == 1 &&
+ check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
+ error(_("corrupt packed object for %s"),
+ oid_to_hex(&entry->idx.oid));
+ unuse_pack(&w_curs);
+ return write_no_reuse_object(f, entry, limit, usable_delta);
+ }
+
+ if (type == OBJ_OFS_DELTA) {
+ off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
+ unsigned pos = sizeof(dheader) - 1;
+ dheader[pos] = ofs & 127;
+ while (ofs >>= 7)
+ dheader[--pos] = 128 | (--ofs & 127);
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
+ unuse_pack(&w_curs);
+ return 0;
+ }
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
+ hdrlen += sizeof(dheader) - pos;
+ reused_delta++;
+ } else if (type == OBJ_REF_DELTA) {
+ if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
+ unuse_pack(&w_curs);
+ return 0;
+ }
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
+ hdrlen += hashsz;
+ reused_delta++;
+ } else {
+ if (limit && hdrlen + datalen + hashsz >= limit) {
+ unuse_pack(&w_curs);
+ return 0;
+ }
+ hashwrite(f, header, hdrlen);
+ }
+ copy_pack_data(f, p, &w_curs, offset, datalen);
+ unuse_pack(&w_curs);
+ reused++;
+ return hdrlen + datalen;
+}
+
+/* Return 0 if we will bust the pack-size limit */
+static off_t write_object(struct hashfile *f,
+ struct object_entry *entry,
+ off_t write_offset)
+{
+ unsigned long limit;
+ off_t len;
+ int usable_delta, to_reuse;
+
+ if (!pack_to_stdout)
+ crc32_begin(f);
+
+ /* apply size limit if limited packsize and not first object */
+ if (!pack_size_limit || !nr_written)
+ limit = 0;
+ else if (pack_size_limit <= write_offset)
+ /*
+ * the earlier object did not fit the limit; avoid
+ * mistaking this with unlimited (i.e. limit = 0).
+ */
+ limit = 1;
+ else
+ limit = pack_size_limit - write_offset;
+
+ if (!DELTA(entry))
+ usable_delta = 0; /* no delta */
+ else if (!pack_size_limit)
+ usable_delta = 1; /* unlimited packfile */
+ else if (DELTA(entry)->idx.offset == (off_t)-1)
+ usable_delta = 0; /* base was written to another pack */
+ else if (DELTA(entry)->idx.offset)
+ usable_delta = 1; /* base already exists in this pack */
+ else
+ usable_delta = 0; /* base could end up in another pack */
+
+ if (!reuse_object)
+ to_reuse = 0; /* explicit */
+ else if (!IN_PACK(entry))
+ to_reuse = 0; /* can't reuse what we don't have */
+ else if (oe_type(entry) == OBJ_REF_DELTA ||
+ oe_type(entry) == OBJ_OFS_DELTA)
+ /* check_object() decided it for us ... */
+ to_reuse = usable_delta;
+ /* ... but pack split may override that */
+ else if (oe_type(entry) != entry->in_pack_type)
+ to_reuse = 0; /* pack has delta which is unusable */
+ else if (DELTA(entry))
+ to_reuse = 0; /* we want to pack afresh */
+ else
+ to_reuse = 1; /* we have it in-pack undeltified,
+ * and we do not need to deltify it.
+ */
+
+ if (!to_reuse)
+ len = write_no_reuse_object(f, entry, limit, usable_delta);
+ else
+ len = write_reuse_object(f, entry, limit, usable_delta);
+ if (!len)
+ return 0;
+
+ if (usable_delta)
+ written_delta++;
+ written++;
+ if (!pack_to_stdout)
+ entry->idx.crc32 = crc32_end(f);
+ return len;
+}
+
+enum write_one_status {
+ WRITE_ONE_SKIP = -1, /* already written */
+ WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
+ WRITE_ONE_WRITTEN = 1, /* normal */
+ WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
+};
+
+static enum write_one_status write_one(struct hashfile *f,
+ struct object_entry *e,
+ off_t *offset)
+{
+ off_t size;
+ int recursing;
+
+ /*
+ * we set offset to 1 (which is an impossible value) to mark
+ * the fact that this object is involved in "write its base
+ * first before writing a deltified object" recursion.
+ */
+ recursing = (e->idx.offset == 1);
+ if (recursing) {
+ warning(_("recursive delta detected for object %s"),
+ oid_to_hex(&e->idx.oid));
+ return WRITE_ONE_RECURSIVE;
+ } else if (e->idx.offset || e->preferred_base) {
+ /* offset is non zero if object is written already. */
+ return WRITE_ONE_SKIP;
+ }
+
+ /* if we are deltified, write out base object first. */
+ if (DELTA(e)) {
+ e->idx.offset = 1; /* now recurse */
+ switch (write_one(f, DELTA(e), offset)) {
+ case WRITE_ONE_RECURSIVE:
+ /* we cannot depend on this one */
+ SET_DELTA(e, NULL);
+ break;
+ default:
+ break;
+ case WRITE_ONE_BREAK:
+ e->idx.offset = recursing;
+ return WRITE_ONE_BREAK;
+ }
+ }
+
+ e->idx.offset = *offset;
+ size = write_object(f, e, *offset);
+ if (!size) {
+ e->idx.offset = recursing;
+ return WRITE_ONE_BREAK;
+ }
+ written_list[nr_written++] = &e->idx;
+
+ /* make sure off_t is sufficiently large not to wrap */
+ if (signed_add_overflows(*offset, size))
+ die(_("pack too large for current definition of off_t"));
+ *offset += size;
+ return WRITE_ONE_WRITTEN;
+}
+
+static int mark_tagged(const char *path UNUSED, const struct object_id *oid,
+ int flag UNUSED, void *cb_data UNUSED)
+{
+ struct object_id peeled;
+ struct object_entry *entry = packlist_find(&to_pack, oid);
+
+ if (entry)
+ entry->tagged = 1;
+ if (!peel_iterated_oid(oid, &peeled)) {
+ entry = packlist_find(&to_pack, &peeled);
+ if (entry)
+ entry->tagged = 1;
+ }
+ return 0;
+}
+
+static inline unsigned char oe_layer(struct packing_data *pack,
+ struct object_entry *e)
+{
+ if (!pack->layer)
+ return 0;
+ return pack->layer[e - pack->objects];
+}
+
+static inline void add_to_write_order(struct object_entry **wo,
+ unsigned int *endp,
+ struct object_entry *e)
+{
+ if (e->filled || oe_layer(&to_pack, e) != write_layer)
+ return;
+ wo[(*endp)++] = e;
+ e->filled = 1;
+}
+
+static void add_descendants_to_write_order(struct object_entry **wo,
+ unsigned int *endp,
+ struct object_entry *e)
+{
+ int add_to_order = 1;
+ while (e) {
+ if (add_to_order) {
+ struct object_entry *s;
+ /* add this node... */
+ add_to_write_order(wo, endp, e);
+ /* all its siblings... */
+ for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
+ add_to_write_order(wo, endp, s);
+ }
+ }
+ /* drop down a level to add left subtree nodes if possible */
+ if (DELTA_CHILD(e)) {
+ add_to_order = 1;
+ e = DELTA_CHILD(e);
+ } else {
+ add_to_order = 0;
+ /* our sibling might have some children, it is next */
+ if (DELTA_SIBLING(e)) {
+ e = DELTA_SIBLING(e);
+ continue;
+ }
+ /* go back to our parent node */
+ e = DELTA(e);
+ while (e && !DELTA_SIBLING(e)) {
+ /* we're on the right side of a subtree, keep
+ * going up until we can go right again */
+ e = DELTA(e);
+ }
+ if (!e) {
+ /* done- we hit our original root node */
+ return;
+ }
+ /* pass it off to sibling at this level */
+ e = DELTA_SIBLING(e);
+ }
+ };
+}
+
+static void add_family_to_write_order(struct object_entry **wo,
+ unsigned int *endp,
+ struct object_entry *e)
+{
+ struct object_entry *root;
+
+ for (root = e; DELTA(root); root = DELTA(root))
+ ; /* nothing */
+ add_descendants_to_write_order(wo, endp, root);
+}
+
+static void compute_layer_order(struct object_entry **wo, unsigned int *wo_end)
+{
+ unsigned int i, last_untagged;
+ struct object_entry *objects = to_pack.objects;
+
+ for (i = 0; i < to_pack.nr_objects; i++) {
+ if (objects[i].tagged)
+ break;
+ add_to_write_order(wo, wo_end, &objects[i]);
+ }
+ last_untagged = i;
+
+ /*
+ * Then fill all the tagged tips.
+ */
+ for (; i < to_pack.nr_objects; i++) {
+ if (objects[i].tagged)
+ add_to_write_order(wo, wo_end, &objects[i]);
+ }
+
+ /*
+ * And then all remaining commits and tags.
+ */
+ for (i = last_untagged; i < to_pack.nr_objects; i++) {
+ if (oe_type(&objects[i]) != OBJ_COMMIT &&
+ oe_type(&objects[i]) != OBJ_TAG)
+ continue;
+ add_to_write_order(wo, wo_end, &objects[i]);
+ }
+
+ /*
+ * And then all the trees.
+ */
+ for (i = last_untagged; i < to_pack.nr_objects; i++) {
+ if (oe_type(&objects[i]) != OBJ_TREE)
+ continue;
+ add_to_write_order(wo, wo_end, &objects[i]);
+ }
+
+ /*
+ * Finally all the rest in really tight order
+ */
+ for (i = last_untagged; i < to_pack.nr_objects; i++) {
+ if (!objects[i].filled && oe_layer(&to_pack, &objects[i]) == write_layer)
+ add_family_to_write_order(wo, wo_end, &objects[i]);
+ }
+}
+
+static struct object_entry **compute_write_order(void)
+{
+ uint32_t max_layers = 1;
+ unsigned int i, wo_end;
+
+ struct object_entry **wo;
+ struct object_entry *objects = to_pack.objects;
+
+ for (i = 0; i < to_pack.nr_objects; i++) {
+ objects[i].tagged = 0;
+ objects[i].filled = 0;
+ SET_DELTA_CHILD(&objects[i], NULL);
+ SET_DELTA_SIBLING(&objects[i], NULL);
+ }
+
+ /*
+ * Fully connect delta_child/delta_sibling network.
+ * Make sure delta_sibling is sorted in the original
+ * recency order.
+ */
+ for (i = to_pack.nr_objects; i > 0;) {
+ struct object_entry *e = &objects[--i];
+ if (!DELTA(e))
+ continue;
+ /* Mark me as the first child */
+ e->delta_sibling_idx = DELTA(e)->delta_child_idx;
+ SET_DELTA_CHILD(DELTA(e), e);
+ }
+
+ /*
+ * Mark objects that are at the tip of tags.
+ */
+ for_each_tag_ref(mark_tagged, NULL);
+
+ if (use_delta_islands)
+ max_layers = compute_pack_layers(&to_pack);
+
+ ALLOC_ARRAY(wo, to_pack.nr_objects);
+ wo_end = 0;
+
+ for (; write_layer < max_layers; ++write_layer)
+ compute_layer_order(wo, &wo_end);
+
+ if (wo_end != to_pack.nr_objects)
+ die(_("ordered %u objects, expected %"PRIu32),
+ wo_end, to_pack.nr_objects);
+
+ return wo;
+}
+
+
+/*
+ * A reused set of objects. All objects in a chunk have the same
+ * relative position in the original packfile and the generated
+ * packfile.
+ */
+
+static struct reused_chunk {
+ /* The offset of the first object of this chunk in the original
+ * packfile. */
+ off_t original;
+ /* The difference for "original" minus the offset of the first object of
+ * this chunk in the generated packfile. */
+ off_t difference;
+} *reused_chunks;
+static int reused_chunks_nr;
+static int reused_chunks_alloc;
+
+static void record_reused_object(off_t where, off_t offset)
+{
+ if (reused_chunks_nr && reused_chunks[reused_chunks_nr-1].difference == offset)
+ return;
+
+ ALLOC_GROW(reused_chunks, reused_chunks_nr + 1,
+ reused_chunks_alloc);
+ reused_chunks[reused_chunks_nr].original = where;
+ reused_chunks[reused_chunks_nr].difference = offset;
+ reused_chunks_nr++;
+}
+
+/*
+ * Binary search to find the chunk that "where" is in. Note
+ * that we're not looking for an exact match, just the first
+ * chunk that contains it (which implicitly ends at the start
+ * of the next chunk.
+ */
+static off_t find_reused_offset(off_t where)
+{
+ int lo = 0, hi = reused_chunks_nr;
+ while (lo < hi) {
+ int mi = lo + ((hi - lo) / 2);
+ if (where == reused_chunks[mi].original)
+ return reused_chunks[mi].difference;
+ if (where < reused_chunks[mi].original)
+ hi = mi;
+ else
+ lo = mi + 1;
+ }
+
+ /*
+ * The first chunk starts at zero, so we can't have gone below
+ * there.
+ */
+ assert(lo);
+ return reused_chunks[lo-1].difference;
+}
+
+static void write_reused_pack_one(size_t pos, struct hashfile *out,
+ struct pack_window **w_curs)
+{
+ off_t offset, next, cur;
+ enum object_type type;
+ unsigned long size;
+
+ offset = pack_pos_to_offset(reuse_packfile, pos);
+ next = pack_pos_to_offset(reuse_packfile, pos + 1);
+
+ record_reused_object(offset, offset - hashfile_total(out));
+
+ cur = offset;
+ type = unpack_object_header(reuse_packfile, w_curs, &cur, &size);
+ assert(type >= 0);
+
+ if (type == OBJ_OFS_DELTA) {
+ off_t base_offset;
+ off_t fixup;
+
+ unsigned char header[MAX_PACK_OBJECT_HEADER];
+ unsigned len;
+
+ base_offset = get_delta_base(reuse_packfile, w_curs, &cur, type, offset);
+ assert(base_offset != 0);
+
+ /* Convert to REF_DELTA if we must... */
+ if (!allow_ofs_delta) {
+ uint32_t base_pos;
+ struct object_id base_oid;
+
+ if (offset_to_pack_pos(reuse_packfile, base_offset, &base_pos) < 0)
+ die(_("expected object at offset %"PRIuMAX" "
+ "in pack %s"),
+ (uintmax_t)base_offset,
+ reuse_packfile->pack_name);
+
+ nth_packed_object_id(&base_oid, reuse_packfile,
+ pack_pos_to_index(reuse_packfile, base_pos));
+
+ len = encode_in_pack_object_header(header, sizeof(header),
+ OBJ_REF_DELTA, size);
+ hashwrite(out, header, len);
+ hashwrite(out, base_oid.hash, the_hash_algo->rawsz);
+ copy_pack_data(out, reuse_packfile, w_curs, cur, next - cur);
+ return;
+ }
+
+ /* Otherwise see if we need to rewrite the offset... */
+ fixup = find_reused_offset(offset) -
+ find_reused_offset(base_offset);
+ if (fixup) {
+ unsigned char ofs_header[10];
+ unsigned i, ofs_len;
+ off_t ofs = offset - base_offset - fixup;
+
+ len = encode_in_pack_object_header(header, sizeof(header),
+ OBJ_OFS_DELTA, size);
+
+ i = sizeof(ofs_header) - 1;
+ ofs_header[i] = ofs & 127;
+ while (ofs >>= 7)
+ ofs_header[--i] = 128 | (--ofs & 127);
+
+ ofs_len = sizeof(ofs_header) - i;
+
+ hashwrite(out, header, len);
+ hashwrite(out, ofs_header + sizeof(ofs_header) - ofs_len, ofs_len);
+ copy_pack_data(out, reuse_packfile, w_curs, cur, next - cur);
+ return;
+ }
+
+ /* ...otherwise we have no fixup, and can write it verbatim */
+ }
+
+ copy_pack_data(out, reuse_packfile, w_curs, offset, next - offset);
+}
+
+static size_t write_reused_pack_verbatim(struct hashfile *out,
+ struct pack_window **w_curs)
+{
+ size_t pos = 0;
+
+ while (pos < reuse_packfile_bitmap->word_alloc &&
+ reuse_packfile_bitmap->words[pos] == (eword_t)~0)
+ pos++;
+
+ if (pos) {
+ off_t to_write;
+
+ written = (pos * BITS_IN_EWORD);
+ to_write = pack_pos_to_offset(reuse_packfile, written)
+ - sizeof(struct pack_header);
+
+ /* We're recording one chunk, not one object. */
+ record_reused_object(sizeof(struct pack_header), 0);
+ hashflush(out);
+ copy_pack_data(out, reuse_packfile, w_curs,
+ sizeof(struct pack_header), to_write);
+
+ display_progress(progress_state, written);
+ }
+ return pos;
+}
+
+static void write_reused_pack(struct hashfile *f)
+{
+ size_t i = 0;
+ uint32_t offset;
+ struct pack_window *w_curs = NULL;
+
+ if (allow_ofs_delta)
+ i = write_reused_pack_verbatim(f, &w_curs);
+
+ for (; i < reuse_packfile_bitmap->word_alloc; ++i) {
+ eword_t word = reuse_packfile_bitmap->words[i];
+ size_t pos = (i * BITS_IN_EWORD);
+
+ for (offset = 0; offset < BITS_IN_EWORD; ++offset) {
+ if ((word >> offset) == 0)
+ break;
+
+ offset += ewah_bit_ctz64(word >> offset);
+ /*
+ * Can use bit positions directly, even for MIDX
+ * bitmaps. See comment in try_partial_reuse()
+ * for why.
+ */
+ write_reused_pack_one(pos + offset, f, &w_curs);
+ display_progress(progress_state, ++written);
+ }
+ }
+
+ unuse_pack(&w_curs);
+}
+
+static void write_excluded_by_configs(void)
+{
+ struct oidset_iter iter;
+ const struct object_id *oid;
+
+ oidset_iter_init(&excluded_by_config, &iter);
+ while ((oid = oidset_iter_next(&iter))) {
+ struct configured_exclusion *ex =
+ oidmap_get(&configured_exclusions, oid);
+
+ if (!ex)
+ BUG("configured exclusion wasn't configured");
+ write_in_full(1, ex->pack_hash_hex, strlen(ex->pack_hash_hex));
+ write_in_full(1, " ", 1);
+ write_in_full(1, ex->uri, strlen(ex->uri));
+ write_in_full(1, "\n", 1);
+ }
+}
+
+static const char no_split_warning[] = N_(
+"disabling bitmap writing, packs are split due to pack.packSizeLimit"
+);
+
+static void write_pack_file(void)
+{
+ uint32_t i = 0, j;
+ struct hashfile *f;
+ off_t offset;
+ uint32_t nr_remaining = nr_result;
+ time_t last_mtime = 0;
+ struct object_entry **write_order;
+
+ if (progress > pack_to_stdout)
+ progress_state = start_progress(_("Writing objects"), nr_result);
+ ALLOC_ARRAY(written_list, to_pack.nr_objects);
+ write_order = compute_write_order();
+
+ do {
+ unsigned char hash[GIT_MAX_RAWSZ];
+ char *pack_tmp_name = NULL;
+
+ if (pack_to_stdout)
+ f = hashfd_throughput(1, "<stdout>", progress_state);
+ else
+ f = create_tmp_packfile(&pack_tmp_name);
+
+ offset = write_pack_header(f, nr_remaining);
+
+ if (reuse_packfile) {
+ assert(pack_to_stdout);
+ write_reused_pack(f);
+ offset = hashfile_total(f);
+ }
+
+ nr_written = 0;
+ for (; i < to_pack.nr_objects; i++) {
+ struct object_entry *e = write_order[i];
+ if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
+ break;
+ display_progress(progress_state, written);
+ }
+
+ if (pack_to_stdout) {
+ /*
+ * We never fsync when writing to stdout since we may
+ * not be writing to an actual pack file. For instance,
+ * the upload-pack code passes a pipe here. Calling
+ * fsync on a pipe results in unnecessary
+ * synchronization with the reader on some platforms.
+ */
+ finalize_hashfile(f, hash, FSYNC_COMPONENT_NONE,
+ CSUM_HASH_IN_STREAM | CSUM_CLOSE);
+ } else if (nr_written == nr_remaining) {
+ finalize_hashfile(f, hash, FSYNC_COMPONENT_PACK,
+ CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
+ } else {
+ /*
+ * If we wrote the wrong number of entries in the
+ * header, rewrite it like in fast-import.
+ */
+
+ int fd = finalize_hashfile(f, hash, FSYNC_COMPONENT_PACK, 0);
+ fixup_pack_header_footer(fd, hash, pack_tmp_name,
+ nr_written, hash, offset);
+ close(fd);
+ if (write_bitmap_index) {
+ if (write_bitmap_index != WRITE_BITMAP_QUIET)
+ warning(_(no_split_warning));
+ write_bitmap_index = 0;
+ }
+ }
+
+ if (!pack_to_stdout) {
+ struct stat st;
+ struct strbuf tmpname = STRBUF_INIT;
+ char *idx_tmp_name = NULL;
+
+ /*
+ * Packs are runtime accessed in their mtime
+ * order since newer packs are more likely to contain
+ * younger objects. So if we are creating multiple
+ * packs then we should modify the mtime of later ones
+ * to preserve this property.
+ */
+ if (stat(pack_tmp_name, &st) < 0) {
+ warning_errno(_("failed to stat %s"), pack_tmp_name);
+ } else if (!last_mtime) {
+ last_mtime = st.st_mtime;
+ } else {
+ struct utimbuf utb;
+ utb.actime = st.st_atime;
+ utb.modtime = --last_mtime;
+ if (utime(pack_tmp_name, &utb) < 0)
+ warning_errno(_("failed utime() on %s"), pack_tmp_name);
+ }
+
+ strbuf_addf(&tmpname, "%s-%s.", base_name,
+ hash_to_hex(hash));
+
+ if (write_bitmap_index) {
+ bitmap_writer_set_checksum(hash);
+ bitmap_writer_build_type_index(
+ &to_pack, written_list, nr_written);
+ }
+
+ if (cruft)
+ pack_idx_opts.flags |= WRITE_MTIMES;
+
+ stage_tmp_packfiles(&tmpname, pack_tmp_name,
+ written_list, nr_written,
+ &to_pack, &pack_idx_opts, hash,
+ &idx_tmp_name);
+
+ if (write_bitmap_index) {
+ size_t tmpname_len = tmpname.len;
+
+ strbuf_addstr(&tmpname, "bitmap");
+ stop_progress(&progress_state);
+
+ bitmap_writer_show_progress(progress);
+ bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1);
+ if (bitmap_writer_build(&to_pack) < 0)
+ die(_("failed to write bitmap index"));
+ bitmap_writer_finish(written_list, nr_written,
+ tmpname.buf, write_bitmap_options);
+ write_bitmap_index = 0;
+ strbuf_setlen(&tmpname, tmpname_len);
+ }
+
+ rename_tmp_packfile_idx(&tmpname, &idx_tmp_name);
+
+ free(idx_tmp_name);
+ strbuf_release(&tmpname);
+ free(pack_tmp_name);
+ puts(hash_to_hex(hash));
+ }
+
+ /* mark written objects as written to previous pack */
+ for (j = 0; j < nr_written; j++) {
+ written_list[j]->offset = (off_t)-1;
+ }
+ nr_remaining -= nr_written;
+ } while (nr_remaining && i < to_pack.nr_objects);
+
+ free(written_list);
+ free(write_order);
+ stop_progress(&progress_state);
+ if (written != nr_result)
+ die(_("wrote %"PRIu32" objects while expecting %"PRIu32),
+ written, nr_result);
+ trace2_data_intmax("pack-objects", the_repository,
+ "write_pack_file/wrote", nr_result);
+}
+
+static int no_try_delta(const char *path)
+{
+ static struct attr_check *check;
+
+ if (!check)
+ check = attr_check_initl("delta", NULL);
+ git_check_attr(the_repository->index, path, check);
+ if (ATTR_FALSE(check->items[0].value))
+ return 1;
+ return 0;
+}
+
+/*
+ * When adding an object, check whether we have already added it
+ * to our packing list. If so, we can skip. However, if we are
+ * being asked to excludei t, but the previous mention was to include
+ * it, make sure to adjust its flags and tweak our numbers accordingly.
+ *
+ * As an optimization, we pass out the index position where we would have
+ * found the item, since that saves us from having to look it up again a
+ * few lines later when we want to add the new entry.
+ */
+static int have_duplicate_entry(const struct object_id *oid,
+ int exclude)
+{
+ struct object_entry *entry;
+
+ if (reuse_packfile_bitmap &&
+ bitmap_walk_contains(bitmap_git, reuse_packfile_bitmap, oid))
+ return 1;
+
+ entry = packlist_find(&to_pack, oid);
+ if (!entry)
+ return 0;
+
+ if (exclude) {
+ if (!entry->preferred_base)
+ nr_result--;
+ entry->preferred_base = 1;
+ }
+
+ return 1;
+}
+
+static int want_found_object(const struct object_id *oid, int exclude,
+ struct packed_git *p)
+{
+ if (exclude)
+ return 1;
+ if (incremental)
+ return 0;
+
+ if (!is_pack_valid(p))
+ return -1;
+
+ /*
+ * When asked to do --local (do not include an object that appears in a
+ * pack we borrow from elsewhere) or --honor-pack-keep (do not include
+ * an object that appears in a pack marked with .keep), finding a pack
+ * that matches the criteria is sufficient for us to decide to omit it.
+ * However, even if this pack does not satisfy the criteria, we need to
+ * make sure no copy of this object appears in _any_ pack that makes us
+ * to omit the object, so we need to check all the packs.
+ *
+ * We can however first check whether these options can possibly matter;
+ * if they do not matter we know we want the object in generated pack.
+ * Otherwise, we signal "-1" at the end to tell the caller that we do
+ * not know either way, and it needs to check more packs.
+ */
+
+ /*
+ * Objects in packs borrowed from elsewhere are discarded regardless of
+ * if they appear in other packs that weren't borrowed.
+ */
+ if (local && !p->pack_local)
+ return 0;
+
+ /*
+ * Then handle .keep first, as we have a fast(er) path there.
+ */
+ if (ignore_packed_keep_on_disk || ignore_packed_keep_in_core) {
+ /*
+ * Set the flags for the kept-pack cache to be the ones we want
+ * to ignore.
+ *
+ * That is, if we are ignoring objects in on-disk keep packs,
+ * then we want to search through the on-disk keep and ignore
+ * the in-core ones.
+ */
+ unsigned flags = 0;
+ if (ignore_packed_keep_on_disk)
+ flags |= ON_DISK_KEEP_PACKS;
+ if (ignore_packed_keep_in_core)
+ flags |= IN_CORE_KEEP_PACKS;
+
+ if (ignore_packed_keep_on_disk && p->pack_keep)
+ return 0;
+ if (ignore_packed_keep_in_core && p->pack_keep_in_core)
+ return 0;
+ if (has_object_kept_pack(oid, flags))
+ return 0;
+ }
+
+ /*
+ * At this point we know definitively that either we don't care about
+ * keep-packs, or the object is not in one. Keep checking other
+ * conditions...
+ */
+ if (!local || !have_non_local_packs)
+ return 1;
+
+ /* we don't know yet; keep looking for more packs */
+ return -1;
+}
+
+static int want_object_in_pack_one(struct packed_git *p,
+ const struct object_id *oid,
+ int exclude,
+ struct packed_git **found_pack,
+ off_t *found_offset)
+{
+ off_t offset;
+
+ if (p == *found_pack)
+ offset = *found_offset;
+ else
+ offset = find_pack_entry_one(oid->hash, p);
+
+ if (offset) {
+ if (!*found_pack) {
+ if (!is_pack_valid(p))
+ return -1;
+ *found_offset = offset;
+ *found_pack = p;
+ }
+ return want_found_object(oid, exclude, p);
+ }
+ return -1;
+}
+
+/*
+ * Check whether we want the object in the pack (e.g., we do not want
+ * objects found in non-local stores if the "--local" option was used).
+ *
+ * If the caller already knows an existing pack it wants to take the object
+ * from, that is passed in *found_pack and *found_offset; otherwise this
+ * function finds if there is any pack that has the object and returns the pack
+ * and its offset in these variables.
+ */
+static int want_object_in_pack(const struct object_id *oid,
+ int exclude,
+ struct packed_git **found_pack,
+ off_t *found_offset)
+{
+ int want;
+ struct list_head *pos;
+ struct multi_pack_index *m;
+
+ if (!exclude && local && has_loose_object_nonlocal(oid))
+ return 0;
+
+ /*
+ * If we already know the pack object lives in, start checks from that
+ * pack - in the usual case when neither --local was given nor .keep files
+ * are present we will determine the answer right now.
+ */
+ if (*found_pack) {
+ want = want_found_object(oid, exclude, *found_pack);
+ if (want != -1)
+ return want;
+
+ *found_pack = NULL;
+ *found_offset = 0;
+ }
+
+ for (m = get_multi_pack_index(the_repository); m; m = m->next) {
+ struct pack_entry e;
+ if (fill_midx_entry(the_repository, oid, &e, m)) {
+ want = want_object_in_pack_one(e.p, oid, exclude, found_pack, found_offset);
+ if (want != -1)
+ return want;
+ }
+ }
+
+ list_for_each(pos, get_packed_git_mru(the_repository)) {
+ struct packed_git *p = list_entry(pos, struct packed_git, mru);
+ want = want_object_in_pack_one(p, oid, exclude, found_pack, found_offset);
+ if (!exclude && want > 0)
+ list_move(&p->mru,
+ get_packed_git_mru(the_repository));
+ if (want != -1)
+ return want;
+ }
+
+ if (uri_protocols.nr) {
+ struct configured_exclusion *ex =
+ oidmap_get(&configured_exclusions, oid);
+ int i;
+ const char *p;
+
+ if (ex) {
+ for (i = 0; i < uri_protocols.nr; i++) {
+ if (skip_prefix(ex->uri,
+ uri_protocols.items[i].string,
+ &p) &&
+ *p == ':') {
+ oidset_insert(&excluded_by_config, oid);
+ return 0;
+ }
+ }
+ }
+ }
+
+ return 1;
+}
+
+static struct object_entry *create_object_entry(const struct object_id *oid,
+ enum object_type type,
+ uint32_t hash,
+ int exclude,
+ int no_try_delta,
+ struct packed_git *found_pack,
+ off_t found_offset)
+{
+ struct object_entry *entry;
+
+ entry = packlist_alloc(&to_pack, oid);
+ entry->hash = hash;
+ oe_set_type(entry, type);
+ if (exclude)
+ entry->preferred_base = 1;
+ else
+ nr_result++;
+ if (found_pack) {
+ oe_set_in_pack(&to_pack, entry, found_pack);
+ entry->in_pack_offset = found_offset;
+ }
+
+ entry->no_try_delta = no_try_delta;
+
+ return entry;
+}
+
+static const char no_closure_warning[] = N_(
+"disabling bitmap writing, as some objects are not being packed"
+);
+
+static int add_object_entry(const struct object_id *oid, enum object_type type,
+ const char *name, int exclude)
+{
+ struct packed_git *found_pack = NULL;
+ off_t found_offset = 0;
+
+ display_progress(progress_state, ++nr_seen);
+
+ if (have_duplicate_entry(oid, exclude))
+ return 0;
+
+ if (!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {
+ /* The pack is missing an object, so it will not have closure */
+ if (write_bitmap_index) {
+ if (write_bitmap_index != WRITE_BITMAP_QUIET)
+ warning(_(no_closure_warning));
+ write_bitmap_index = 0;
+ }
+ return 0;
+ }
+
+ create_object_entry(oid, type, pack_name_hash(name),
+ exclude, name && no_try_delta(name),
+ found_pack, found_offset);
+ return 1;
+}
+
+static int add_object_entry_from_bitmap(const struct object_id *oid,
+ enum object_type type,
+ int flags, uint32_t name_hash,
+ struct packed_git *pack, off_t offset)
+{
+ display_progress(progress_state, ++nr_seen);
+
+ if (have_duplicate_entry(oid, 0))
+ return 0;
+
+ if (!want_object_in_pack(oid, 0, &pack, &offset))
+ return 0;
+
+ create_object_entry(oid, type, name_hash, 0, 0, pack, offset);
+ return 1;
+}
+
+struct pbase_tree_cache {
+ struct object_id oid;
+ int ref;
+ int temporary;
+ void *tree_data;
+ unsigned long tree_size;
+};
+
+static struct pbase_tree_cache *(pbase_tree_cache[256]);
+static int pbase_tree_cache_ix(const struct object_id *oid)
+{
+ return oid->hash[0] % ARRAY_SIZE(pbase_tree_cache);
+}
+static int pbase_tree_cache_ix_incr(int ix)
+{
+ return (ix+1) % ARRAY_SIZE(pbase_tree_cache);
+}
+
+static struct pbase_tree {
+ struct pbase_tree *next;
+ /* This is a phony "cache" entry; we are not
+ * going to evict it or find it through _get()
+ * mechanism -- this is for the toplevel node that
+ * would almost always change with any commit.
+ */
+ struct pbase_tree_cache pcache;
+} *pbase_tree;
+
+static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
+{
+ struct pbase_tree_cache *ent, *nent;
+ void *data;
+ unsigned long size;
+ enum object_type type;
+ int neigh;
+ int my_ix = pbase_tree_cache_ix(oid);
+ int available_ix = -1;
+
+ /* pbase-tree-cache acts as a limited hashtable.
+ * your object will be found at your index or within a few
+ * slots after that slot if it is cached.
+ */
+ for (neigh = 0; neigh < 8; neigh++) {
+ ent = pbase_tree_cache[my_ix];
+ if (ent && oideq(&ent->oid, oid)) {
+ ent->ref++;
+ return ent;
+ }
+ else if (((available_ix < 0) && (!ent || !ent->ref)) ||
+ ((0 <= available_ix) &&
+ (!ent && pbase_tree_cache[available_ix])))
+ available_ix = my_ix;
+ if (!ent)
+ break;
+ my_ix = pbase_tree_cache_ix_incr(my_ix);
+ }
+
+ /* Did not find one. Either we got a bogus request or
+ * we need to read and perhaps cache.
+ */
+ data = read_object_file(oid, &type, &size);
+ if (!data)
+ return NULL;
+ if (type != OBJ_TREE) {
+ free(data);
+ return NULL;
+ }
+
+ /* We need to either cache or return a throwaway copy */
+
+ if (available_ix < 0)
+ ent = NULL;
+ else {
+ ent = pbase_tree_cache[available_ix];
+ my_ix = available_ix;
+ }
+
+ if (!ent) {
+ nent = xmalloc(sizeof(*nent));
+ nent->temporary = (available_ix < 0);
+ }
+ else {
+ /* evict and reuse */
+ free(ent->tree_data);
+ nent = ent;
+ }
+ oidcpy(&nent->oid, oid);
+ nent->tree_data = data;
+ nent->tree_size = size;
+ nent->ref = 1;
+ if (!nent->temporary)
+ pbase_tree_cache[my_ix] = nent;
+ return nent;
+}
+
+static void pbase_tree_put(struct pbase_tree_cache *cache)
+{
+ if (!cache->temporary) {
+ cache->ref--;
+ return;
+ }
+ free(cache->tree_data);
+ free(cache);
+}
+
+static int name_cmp_len(const char *name)
+{
+ int i;
+ for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)
+ ;
+ return i;
+}
+
+static void add_pbase_object(struct tree_desc *tree,
+ const char *name,
+ int cmplen,
+ const char *fullname)
+{
+ struct name_entry entry;
+ int cmp;
+
+ while (tree_entry(tree,&entry)) {
+ if (S_ISGITLINK(entry.mode))
+ continue;
+ cmp = tree_entry_len(&entry) != cmplen ? 1 :
+ memcmp(name, entry.path, cmplen);
+ if (cmp > 0)
+ continue;
+ if (cmp < 0)
+ return;
+ if (name[cmplen] != '/') {
+ add_object_entry(&entry.oid,
+ object_type(entry.mode),
+ fullname, 1);
+ return;
+ }
+ if (S_ISDIR(entry.mode)) {
+ struct tree_desc sub;
+ struct pbase_tree_cache *tree;
+ const char *down = name+cmplen+1;
+ int downlen = name_cmp_len(down);
+
+ tree = pbase_tree_get(&entry.oid);
+ if (!tree)
+ return;
+ init_tree_desc(&sub, tree->tree_data, tree->tree_size);
+
+ add_pbase_object(&sub, down, downlen, fullname);
+ pbase_tree_put(tree);
+ }
+ }
+}
+
+static unsigned *done_pbase_paths;
+static int done_pbase_paths_num;
+static int done_pbase_paths_alloc;
+static int done_pbase_path_pos(unsigned hash)
+{
+ int lo = 0;
+ int hi = done_pbase_paths_num;
+ while (lo < hi) {
+ int mi = lo + (hi - lo) / 2;
+ if (done_pbase_paths[mi] == hash)
+ return mi;
+ if (done_pbase_paths[mi] < hash)
+ hi = mi;
+ else
+ lo = mi + 1;
+ }
+ return -lo-1;
+}
+
+static int check_pbase_path(unsigned hash)
+{
+ int pos = done_pbase_path_pos(hash);
+ if (0 <= pos)
+ return 1;
+ pos = -pos - 1;
+ ALLOC_GROW(done_pbase_paths,
+ done_pbase_paths_num + 1,
+ done_pbase_paths_alloc);
+ done_pbase_paths_num++;
+ if (pos < done_pbase_paths_num)
+ MOVE_ARRAY(done_pbase_paths + pos + 1, done_pbase_paths + pos,
+ done_pbase_paths_num - pos - 1);
+ done_pbase_paths[pos] = hash;
+ return 0;
+}
+
+static void add_preferred_base_object(const char *name)
+{
+ struct pbase_tree *it;
+ int cmplen;
+ unsigned hash = pack_name_hash(name);
+
+ if (!num_preferred_base || check_pbase_path(hash))
+ return;
+
+ cmplen = name_cmp_len(name);
+ for (it = pbase_tree; it; it = it->next) {
+ if (cmplen == 0) {
+ add_object_entry(&it->pcache.oid, OBJ_TREE, NULL, 1);
+ }
+ else {
+ struct tree_desc tree;
+ init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);
+ add_pbase_object(&tree, name, cmplen, name);
+ }
+ }
+}
+
+static void add_preferred_base(struct object_id *oid)
+{
+ struct pbase_tree *it;
+ void *data;
+ unsigned long size;
+ struct object_id tree_oid;
+
+ if (window <= num_preferred_base++)
+ return;
+
+ data = read_object_with_reference(the_repository, oid,
+ OBJ_TREE, &size, &tree_oid);
+ if (!data)
+ return;
+
+ for (it = pbase_tree; it; it = it->next) {
+ if (oideq(&it->pcache.oid, &tree_oid)) {
+ free(data);
+ return;
+ }
+ }
+
+ CALLOC_ARRAY(it, 1);
+ it->next = pbase_tree;
+ pbase_tree = it;
+
+ oidcpy(&it->pcache.oid, &tree_oid);
+ it->pcache.tree_data = data;
+ it->pcache.tree_size = size;
+}
+
+static void cleanup_preferred_base(void)
+{
+ struct pbase_tree *it;
+ unsigned i;
+
+ it = pbase_tree;
+ pbase_tree = NULL;
+ while (it) {
+ struct pbase_tree *tmp = it;
+ it = tmp->next;
+ free(tmp->pcache.tree_data);
+ free(tmp);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
+ if (!pbase_tree_cache[i])
+ continue;
+ free(pbase_tree_cache[i]->tree_data);
+ FREE_AND_NULL(pbase_tree_cache[i]);
+ }
+
+ FREE_AND_NULL(done_pbase_paths);
+ done_pbase_paths_num = done_pbase_paths_alloc = 0;
+}
+
+/*
+ * Return 1 iff the object specified by "delta" can be sent
+ * literally as a delta against the base in "base_sha1". If
+ * so, then *base_out will point to the entry in our packing
+ * list, or NULL if we must use the external-base list.
+ *
+ * Depth value does not matter - find_deltas() will
+ * never consider reused delta as the base object to
+ * deltify other objects against, in order to avoid
+ * circular deltas.
+ */
+static int can_reuse_delta(const struct object_id *base_oid,
+ struct object_entry *delta,
+ struct object_entry **base_out)
+{
+ struct object_entry *base;
+
+ /*
+ * First see if we're already sending the base (or it's explicitly in
+ * our "excluded" list).
+ */
+ base = packlist_find(&to_pack, base_oid);
+ if (base) {
+ if (!in_same_island(&delta->idx.oid, &base->idx.oid))
+ return 0;
+ *base_out = base;
+ return 1;
+ }
+
+ /*
+ * Otherwise, reachability bitmaps may tell us if the receiver has it,
+ * even if it was buried too deep in history to make it into the
+ * packing list.
+ */
+ if (thin && bitmap_has_oid_in_uninteresting(bitmap_git, base_oid)) {
+ if (use_delta_islands) {
+ if (!in_same_island(&delta->idx.oid, base_oid))
+ return 0;
+ }
+ *base_out = NULL;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void prefetch_to_pack(uint32_t object_index_start) {
+ struct oid_array to_fetch = OID_ARRAY_INIT;
+ uint32_t i;
+
+ for (i = object_index_start; i < to_pack.nr_objects; i++) {
+ struct object_entry *entry = to_pack.objects + i;
+
+ if (!oid_object_info_extended(the_repository,
+ &entry->idx.oid,
+ NULL,
+ OBJECT_INFO_FOR_PREFETCH))
+ continue;
+ oid_array_append(&to_fetch, &entry->idx.oid);
+ }
+ promisor_remote_get_direct(the_repository,
+ to_fetch.oid, to_fetch.nr);
+ oid_array_clear(&to_fetch);
+}
+
+static void check_object(struct object_entry *entry, uint32_t object_index)
+{
+ unsigned long canonical_size;
+ enum object_type type;
+ struct object_info oi = {.typep = &type, .sizep = &canonical_size};
+
+ if (IN_PACK(entry)) {
+ struct packed_git *p = IN_PACK(entry);
+ struct pack_window *w_curs = NULL;
+ int have_base = 0;
+ struct object_id base_ref;
+ struct object_entry *base_entry;
+ unsigned long used, used_0;
+ unsigned long avail;
+ off_t ofs;
+ unsigned char *buf, c;
+ enum object_type type;
+ unsigned long in_pack_size;
+
+ buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
+
+ /*
+ * We want in_pack_type even if we do not reuse delta
+ * since non-delta representations could still be reused.
+ */
+ used = unpack_object_header_buffer(buf, avail,
+ &type,
+ &in_pack_size);
+ if (used == 0)
+ goto give_up;
+
+ if (type < 0)
+ BUG("invalid type %d", type);
+ entry->in_pack_type = type;
+
+ /*
+ * Determine if this is a delta and if so whether we can
+ * reuse it or not. Otherwise let's find out as cheaply as
+ * possible what the actual type and size for this object is.
+ */
+ switch (entry->in_pack_type) {
+ default:
+ /* Not a delta hence we've already got all we need. */
+ oe_set_type(entry, entry->in_pack_type);
+ SET_SIZE(entry, in_pack_size);
+ entry->in_pack_header_size = used;
+ if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
+ goto give_up;
+ unuse_pack(&w_curs);
+ return;
+ case OBJ_REF_DELTA:
+ if (reuse_delta && !entry->preferred_base) {
+ oidread(&base_ref,
+ use_pack(p, &w_curs,
+ entry->in_pack_offset + used,
+ NULL));
+ have_base = 1;
+ }
+ entry->in_pack_header_size = used + the_hash_algo->rawsz;
+ break;
+ case OBJ_OFS_DELTA:
+ buf = use_pack(p, &w_curs,
+ entry->in_pack_offset + used, NULL);
+ used_0 = 0;
+ c = buf[used_0++];
+ ofs = c & 127;
+ while (c & 128) {
+ ofs += 1;
+ if (!ofs || MSB(ofs, 7)) {
+ error(_("delta base offset overflow in pack for %s"),
+ oid_to_hex(&entry->idx.oid));
+ goto give_up;
+ }
+ c = buf[used_0++];
+ ofs = (ofs << 7) + (c & 127);
+ }
+ ofs = entry->in_pack_offset - ofs;
+ if (ofs <= 0 || ofs >= entry->in_pack_offset) {
+ error(_("delta base offset out of bound for %s"),
+ oid_to_hex(&entry->idx.oid));
+ goto give_up;
+ }
+ if (reuse_delta && !entry->preferred_base) {
+ uint32_t pos;
+ if (offset_to_pack_pos(p, ofs, &pos) < 0)
+ goto give_up;
+ if (!nth_packed_object_id(&base_ref, p,
+ pack_pos_to_index(p, pos)))
+ have_base = 1;
+ }
+ entry->in_pack_header_size = used + used_0;
+ break;
+ }
+
+ if (have_base &&
+ can_reuse_delta(&base_ref, entry, &base_entry)) {
+ oe_set_type(entry, entry->in_pack_type);
+ SET_SIZE(entry, in_pack_size); /* delta size */
+ SET_DELTA_SIZE(entry, in_pack_size);
+
+ if (base_entry) {
+ SET_DELTA(entry, base_entry);
+ entry->delta_sibling_idx = base_entry->delta_child_idx;
+ SET_DELTA_CHILD(base_entry, entry);
+ } else {
+ SET_DELTA_EXT(entry, &base_ref);
+ }
+
+ unuse_pack(&w_curs);
+ return;
+ }
+
+ if (oe_type(entry)) {
+ off_t delta_pos;
+
+ /*
+ * This must be a delta and we already know what the
+ * final object type is. Let's extract the actual
+ * object size from the delta header.
+ */
+ delta_pos = entry->in_pack_offset + entry->in_pack_header_size;
+ canonical_size = get_size_from_delta(p, &w_curs, delta_pos);
+ if (canonical_size == 0)
+ goto give_up;
+ SET_SIZE(entry, canonical_size);
+ unuse_pack(&w_curs);
+ return;
+ }
+
+ /*
+ * No choice but to fall back to the recursive delta walk
+ * with oid_object_info() to find about the object type
+ * at this point...
+ */
+ give_up:
+ unuse_pack(&w_curs);
+ }
+
+ if (oid_object_info_extended(the_repository, &entry->idx.oid, &oi,
+ OBJECT_INFO_SKIP_FETCH_OBJECT | OBJECT_INFO_LOOKUP_REPLACE) < 0) {
+ if (has_promisor_remote()) {
+ prefetch_to_pack(object_index);
+ if (oid_object_info_extended(the_repository, &entry->idx.oid, &oi,
+ OBJECT_INFO_SKIP_FETCH_OBJECT | OBJECT_INFO_LOOKUP_REPLACE) < 0)
+ type = -1;
+ } else {
+ type = -1;
+ }
+ }
+ oe_set_type(entry, type);
+ if (entry->type_valid) {
+ SET_SIZE(entry, canonical_size);
+ } else {
+ /*
+ * Bad object type is checked in prepare_pack(). This is
+ * to permit a missing preferred base object to be ignored
+ * as a preferred base. Doing so can result in a larger
+ * pack file, but the transfer will still take place.
+ */
+ }
+}
+
+static int pack_offset_sort(const void *_a, const void *_b)
+{
+ const struct object_entry *a = *(struct object_entry **)_a;
+ const struct object_entry *b = *(struct object_entry **)_b;
+ const struct packed_git *a_in_pack = IN_PACK(a);
+ const struct packed_git *b_in_pack = IN_PACK(b);
+
+ /* avoid filesystem trashing with loose objects */
+ if (!a_in_pack && !b_in_pack)
+ return oidcmp(&a->idx.oid, &b->idx.oid);
+
+ if (a_in_pack < b_in_pack)
+ return -1;
+ if (a_in_pack > b_in_pack)
+ return 1;
+ return a->in_pack_offset < b->in_pack_offset ? -1 :
+ (a->in_pack_offset > b->in_pack_offset);
+}
+
+/*
+ * Drop an on-disk delta we were planning to reuse. Naively, this would
+ * just involve blanking out the "delta" field, but we have to deal
+ * with some extra book-keeping:
+ *
+ * 1. Removing ourselves from the delta_sibling linked list.
+ *
+ * 2. Updating our size/type to the non-delta representation. These were
+ * either not recorded initially (size) or overwritten with the delta type
+ * (type) when check_object() decided to reuse the delta.
+ *
+ * 3. Resetting our delta depth, as we are now a base object.
+ */
+static void drop_reused_delta(struct object_entry *entry)
+{
+ unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
+ struct object_info oi = OBJECT_INFO_INIT;
+ enum object_type type;
+ unsigned long size;
+
+ while (*idx) {
+ struct object_entry *oe = &to_pack.objects[*idx - 1];
+
+ if (oe == entry)
+ *idx = oe->delta_sibling_idx;
+ else
+ idx = &oe->delta_sibling_idx;
+ }
+ SET_DELTA(entry, NULL);
+ entry->depth = 0;
+
+ oi.sizep = &size;
+ oi.typep = &type;
+ if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
+ /*
+ * We failed to get the info from this pack for some reason;
+ * fall back to oid_object_info, which may find another copy.
+ * And if that fails, the error will be recorded in oe_type(entry)
+ * and dealt with in prepare_pack().
+ */
+ oe_set_type(entry,
+ oid_object_info(the_repository, &entry->idx.oid, &size));
+ } else {
+ oe_set_type(entry, type);
+ }
+ SET_SIZE(entry, size);
+}
+
+/*
+ * Follow the chain of deltas from this entry onward, throwing away any links
+ * that cause us to hit a cycle (as determined by the DFS state flags in
+ * the entries).
+ *
+ * We also detect too-long reused chains that would violate our --depth
+ * limit.
+ */
+static void break_delta_chains(struct object_entry *entry)
+{
+ /*
+ * The actual depth of each object we will write is stored as an int,
+ * as it cannot exceed our int "depth" limit. But before we break
+ * changes based no that limit, we may potentially go as deep as the
+ * number of objects, which is elsewhere bounded to a uint32_t.
+ */
+ uint32_t total_depth;
+ struct object_entry *cur, *next;
+
+ for (cur = entry, total_depth = 0;
+ cur;
+ cur = DELTA(cur), total_depth++) {
+ if (cur->dfs_state == DFS_DONE) {
+ /*
+ * We've already seen this object and know it isn't
+ * part of a cycle. We do need to append its depth
+ * to our count.
+ */
+ total_depth += cur->depth;
+ break;
+ }
+
+ /*
+ * We break cycles before looping, so an ACTIVE state (or any
+ * other cruft which made its way into the state variable)
+ * is a bug.
+ */
+ if (cur->dfs_state != DFS_NONE)
+ BUG("confusing delta dfs state in first pass: %d",
+ cur->dfs_state);
+
+ /*
+ * Now we know this is the first time we've seen the object. If
+ * it's not a delta, we're done traversing, but we'll mark it
+ * done to save time on future traversals.
+ */
+ if (!DELTA(cur)) {
+ cur->dfs_state = DFS_DONE;
+ break;
+ }
+
+ /*
+ * Mark ourselves as active and see if the next step causes
+ * us to cycle to another active object. It's important to do
+ * this _before_ we loop, because it impacts where we make the
+ * cut, and thus how our total_depth counter works.
+ * E.g., We may see a partial loop like:
+ *
+ * A -> B -> C -> D -> B
+ *
+ * Cutting B->C breaks the cycle. But now the depth of A is
+ * only 1, and our total_depth counter is at 3. The size of the
+ * error is always one less than the size of the cycle we
+ * broke. Commits C and D were "lost" from A's chain.
+ *
+ * If we instead cut D->B, then the depth of A is correct at 3.
+ * We keep all commits in the chain that we examined.
+ */
+ cur->dfs_state = DFS_ACTIVE;
+ if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
+ drop_reused_delta(cur);
+ cur->dfs_state = DFS_DONE;
+ break;
+ }
+ }
+
+ /*
+ * And now that we've gone all the way to the bottom of the chain, we
+ * need to clear the active flags and set the depth fields as
+ * appropriate. Unlike the loop above, which can quit when it drops a
+ * delta, we need to keep going to look for more depth cuts. So we need
+ * an extra "next" pointer to keep going after we reset cur->delta.
+ */
+ for (cur = entry; cur; cur = next) {
+ next = DELTA(cur);
+
+ /*
+ * We should have a chain of zero or more ACTIVE states down to
+ * a final DONE. We can quit after the DONE, because either it
+ * has no bases, or we've already handled them in a previous
+ * call.
+ */
+ if (cur->dfs_state == DFS_DONE)
+ break;
+ else if (cur->dfs_state != DFS_ACTIVE)
+ BUG("confusing delta dfs state in second pass: %d",
+ cur->dfs_state);
+
+ /*
+ * If the total_depth is more than depth, then we need to snip
+ * the chain into two or more smaller chains that don't exceed
+ * the maximum depth. Most of the resulting chains will contain
+ * (depth + 1) entries (i.e., depth deltas plus one base), and
+ * the last chain (i.e., the one containing entry) will contain
+ * whatever entries are left over, namely
+ * (total_depth % (depth + 1)) of them.
+ *
+ * Since we are iterating towards decreasing depth, we need to
+ * decrement total_depth as we go, and we need to write to the
+ * entry what its final depth will be after all of the
+ * snipping. Since we're snipping into chains of length (depth
+ * + 1) entries, the final depth of an entry will be its
+ * original depth modulo (depth + 1). Any time we encounter an
+ * entry whose final depth is supposed to be zero, we snip it
+ * from its delta base, thereby making it so.
+ */
+ cur->depth = (total_depth--) % (depth + 1);
+ if (!cur->depth)
+ drop_reused_delta(cur);
+
+ cur->dfs_state = DFS_DONE;
+ }
+}
+
+static void get_object_details(void)
+{
+ uint32_t i;
+ struct object_entry **sorted_by_offset;
+
+ if (progress)
+ progress_state = start_progress(_("Counting objects"),
+ to_pack.nr_objects);
+
+ CALLOC_ARRAY(sorted_by_offset, to_pack.nr_objects);
+ for (i = 0; i < to_pack.nr_objects; i++)
+ sorted_by_offset[i] = to_pack.objects + i;
+ QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);
+
+ for (i = 0; i < to_pack.nr_objects; i++) {
+ struct object_entry *entry = sorted_by_offset[i];
+ check_object(entry, i);
+ if (entry->type_valid &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold))
+ entry->no_try_delta = 1;
+ display_progress(progress_state, i + 1);
+ }
+ stop_progress(&progress_state);
+
+ /*
+ * This must happen in a second pass, since we rely on the delta
+ * information for the whole list being completed.
+ */
+ for (i = 0; i < to_pack.nr_objects; i++)
+ break_delta_chains(&to_pack.objects[i]);
+
+ free(sorted_by_offset);
+}
+
+/*
+ * We search for deltas in a list sorted by type, by filename hash, and then
+ * by size, so that we see progressively smaller and smaller files.
+ * That's because we prefer deltas to be from the bigger file
+ * to the smaller -- deletes are potentially cheaper, but perhaps
+ * more importantly, the bigger file is likely the more recent
+ * one. The deepest deltas are therefore the oldest objects which are
+ * less susceptible to be accessed often.
+ */
+static int type_size_sort(const void *_a, const void *_b)
+{
+ const struct object_entry *a = *(struct object_entry **)_a;
+ const struct object_entry *b = *(struct object_entry **)_b;
+ const enum object_type a_type = oe_type(a);
+ const enum object_type b_type = oe_type(b);
+ const unsigned long a_size = SIZE(a);
+ const unsigned long b_size = SIZE(b);
+
+ if (a_type > b_type)
+ return -1;
+ if (a_type < b_type)
+ return 1;
+ if (a->hash > b->hash)
+ return -1;
+ if (a->hash < b->hash)
+ return 1;
+ if (a->preferred_base > b->preferred_base)
+ return -1;
+ if (a->preferred_base < b->preferred_base)
+ return 1;
+ if (use_delta_islands) {
+ const int island_cmp = island_delta_cmp(&a->idx.oid, &b->idx.oid);
+ if (island_cmp)
+ return island_cmp;
+ }
+ if (a_size > b_size)
+ return -1;
+ if (a_size < b_size)
+ return 1;
+ return a < b ? -1 : (a > b); /* newest first */
+}
+
+struct unpacked {
+ struct object_entry *entry;
+ void *data;
+ struct delta_index *index;
+ unsigned depth;
+};
+
+static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
+ unsigned long delta_size)
+{
+ if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
+ return 0;
+
+ if (delta_size < cache_max_small_delta_size)
+ return 1;
+
+ /* cache delta, if objects are large enough compared to delta size */
+ if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
+ return 1;
+
+ return 0;
+}
+
+/* Protect delta_cache_size */
+static pthread_mutex_t cache_mutex;
+#define cache_lock() pthread_mutex_lock(&cache_mutex)
+#define cache_unlock() pthread_mutex_unlock(&cache_mutex)
+
+/*
+ * Protect object list partitioning (e.g. struct thread_param) and
+ * progress_state
+ */
+static pthread_mutex_t progress_mutex;
+#define progress_lock() pthread_mutex_lock(&progress_mutex)
+#define progress_unlock() pthread_mutex_unlock(&progress_mutex)
+
+/*
+ * Access to struct object_entry is unprotected since each thread owns
+ * a portion of the main object list. Just don't access object entries
+ * ahead in the list because they can be stolen and would need
+ * progress_mutex for protection.
+ */
+
+static inline int oe_size_less_than(struct packing_data *pack,
+ const struct object_entry *lhs,
+ unsigned long rhs)
+{
+ if (lhs->size_valid)
+ return lhs->size_ < rhs;
+ if (rhs < pack->oe_size_limit) /* rhs < 2^x <= lhs ? */
+ return 0;
+ return oe_get_size_slow(pack, lhs) < rhs;
+}
+
+static inline void oe_set_tree_depth(struct packing_data *pack,
+ struct object_entry *e,
+ unsigned int tree_depth)
+{
+ if (!pack->tree_depth)
+ CALLOC_ARRAY(pack->tree_depth, pack->nr_alloc);
+ pack->tree_depth[e - pack->objects] = tree_depth;
+}
+
+/*
+ * Return the size of the object without doing any delta
+ * reconstruction (so non-deltas are true object sizes, but deltas
+ * return the size of the delta data).
+ */
+unsigned long oe_get_size_slow(struct packing_data *pack,
+ const struct object_entry *e)
+{
+ struct packed_git *p;
+ struct pack_window *w_curs;
+ unsigned char *buf;
+ enum object_type type;
+ unsigned long used, avail, size;
+
+ if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
+ packing_data_lock(&to_pack);
+ if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
+ die(_("unable to get size of %s"),
+ oid_to_hex(&e->idx.oid));
+ packing_data_unlock(&to_pack);
+ return size;
+ }
+
+ p = oe_in_pack(pack, e);
+ if (!p)
+ BUG("when e->type is a delta, it must belong to a pack");
+
+ packing_data_lock(&to_pack);
+ w_curs = NULL;
+ buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);
+ used = unpack_object_header_buffer(buf, avail, &type, &size);
+ if (used == 0)
+ die(_("unable to parse object header of %s"),
+ oid_to_hex(&e->idx.oid));
+
+ unuse_pack(&w_curs);
+ packing_data_unlock(&to_pack);
+ return size;
+}
+
+static int try_delta(struct unpacked *trg, struct unpacked *src,
+ unsigned max_depth, unsigned long *mem_usage)
+{
+ struct object_entry *trg_entry = trg->entry;
+ struct object_entry *src_entry = src->entry;
+ unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;
+ unsigned ref_depth;
+ enum object_type type;
+ void *delta_buf;
+
+ /* Don't bother doing diffs between different types */
+ if (oe_type(trg_entry) != oe_type(src_entry))
+ return -1;
+
+ /*
+ * We do not bother to try a delta that we discarded on an
+ * earlier try, but only when reusing delta data. Note that
+ * src_entry that is marked as the preferred_base should always
+ * be considered, as even if we produce a suboptimal delta against
+ * it, we will still save the transfer cost, as we already know
+ * the other side has it and we won't send src_entry at all.
+ */
+ if (reuse_delta && IN_PACK(trg_entry) &&
+ IN_PACK(trg_entry) == IN_PACK(src_entry) &&
+ !src_entry->preferred_base &&
+ trg_entry->in_pack_type != OBJ_REF_DELTA &&
+ trg_entry->in_pack_type != OBJ_OFS_DELTA)
+ return 0;
+
+ /* Let's not bust the allowed depth. */
+ if (src->depth >= max_depth)
+ return 0;
+
+ /* Now some size filtering heuristics. */
+ trg_size = SIZE(trg_entry);
+ if (!DELTA(trg_entry)) {
+ max_size = trg_size/2 - the_hash_algo->rawsz;
+ ref_depth = 1;
+ } else {
+ max_size = DELTA_SIZE(trg_entry);
+ ref_depth = trg->depth;
+ }
+ max_size = (uint64_t)max_size * (max_depth - src->depth) /
+ (max_depth - ref_depth + 1);
+ if (max_size == 0)
+ return 0;
+ src_size = SIZE(src_entry);
+ sizediff = src_size < trg_size ? trg_size - src_size : 0;
+ if (sizediff >= max_size)
+ return 0;
+ if (trg_size < src_size / 32)
+ return 0;
+
+ if (!in_same_island(&trg->entry->idx.oid, &src->entry->idx.oid))
+ return 0;
+
+ /* Load data if not already done */
+ if (!trg->data) {
+ packing_data_lock(&to_pack);
+ trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
+ packing_data_unlock(&to_pack);
+ if (!trg->data)
+ die(_("object %s cannot be read"),
+ oid_to_hex(&trg_entry->idx.oid));
+ if (sz != trg_size)
+ die(_("object %s inconsistent object length (%"PRIuMAX" vs %"PRIuMAX")"),
+ oid_to_hex(&trg_entry->idx.oid), (uintmax_t)sz,
+ (uintmax_t)trg_size);
+ *mem_usage += sz;
+ }
+ if (!src->data) {
+ packing_data_lock(&to_pack);
+ src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
+ packing_data_unlock(&to_pack);
+ if (!src->data) {
+ if (src_entry->preferred_base) {
+ static int warned = 0;
+ if (!warned++)
+ warning(_("object %s cannot be read"),
+ oid_to_hex(&src_entry->idx.oid));
+ /*
+ * Those objects are not included in the
+ * resulting pack. Be resilient and ignore
+ * them if they can't be read, in case the
+ * pack could be created nevertheless.
+ */
+ return 0;
+ }
+ die(_("object %s cannot be read"),
+ oid_to_hex(&src_entry->idx.oid));
+ }
+ if (sz != src_size)
+ die(_("object %s inconsistent object length (%"PRIuMAX" vs %"PRIuMAX")"),
+ oid_to_hex(&src_entry->idx.oid), (uintmax_t)sz,
+ (uintmax_t)src_size);
+ *mem_usage += sz;
+ }
+ if (!src->index) {
+ src->index = create_delta_index(src->data, src_size);
+ if (!src->index) {
+ static int warned = 0;
+ if (!warned++)
+ warning(_("suboptimal pack - out of memory"));
+ return 0;
+ }
+ *mem_usage += sizeof_delta_index(src->index);
+ }
+
+ delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
+ if (!delta_buf)
+ return 0;
+
+ if (DELTA(trg_entry)) {
+ /* Prefer only shallower same-sized deltas. */
+ if (delta_size == DELTA_SIZE(trg_entry) &&
+ src->depth + 1 >= trg->depth) {
+ free(delta_buf);
+ return 0;
+ }
+ }
+
+ /*
+ * Handle memory allocation outside of the cache
+ * accounting lock. Compiler will optimize the strangeness
+ * away when NO_PTHREADS is defined.
+ */
+ free(trg_entry->delta_data);
+ cache_lock();
+ if (trg_entry->delta_data) {
+ delta_cache_size -= DELTA_SIZE(trg_entry);
+ trg_entry->delta_data = NULL;
+ }
+ if (delta_cacheable(src_size, trg_size, delta_size)) {
+ delta_cache_size += delta_size;
+ cache_unlock();
+ trg_entry->delta_data = xrealloc(delta_buf, delta_size);
+ } else {
+ cache_unlock();
+ free(delta_buf);
+ }
+
+ SET_DELTA(trg_entry, src_entry);
+ SET_DELTA_SIZE(trg_entry, delta_size);
+ trg->depth = src->depth + 1;
+
+ return 1;
+}
+
+static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
+{
+ struct object_entry *child = DELTA_CHILD(me);
+ unsigned int m = n;
+ while (child) {
+ const unsigned int c = check_delta_limit(child, n + 1);
+ if (m < c)
+ m = c;
+ child = DELTA_SIBLING(child);
+ }
+ return m;
+}
+
+static unsigned long free_unpacked(struct unpacked *n)
+{
+ unsigned long freed_mem = sizeof_delta_index(n->index);
+ free_delta_index(n->index);
+ n->index = NULL;
+ if (n->data) {
+ freed_mem += SIZE(n->entry);
+ FREE_AND_NULL(n->data);
+ }
+ n->entry = NULL;
+ n->depth = 0;
+ return freed_mem;
+}
+
+static void find_deltas(struct object_entry **list, unsigned *list_size,
+ int window, int depth, unsigned *processed)
+{
+ uint32_t i, idx = 0, count = 0;
+ struct unpacked *array;
+ unsigned long mem_usage = 0;
+
+ CALLOC_ARRAY(array, window);
+
+ for (;;) {
+ struct object_entry *entry;
+ struct unpacked *n = array + idx;
+ int j, max_depth, best_base = -1;
+
+ progress_lock();
+ if (!*list_size) {
+ progress_unlock();
+ break;
+ }
+ entry = *list++;
+ (*list_size)--;
+ if (!entry->preferred_base) {
+ (*processed)++;
+ display_progress(progress_state, *processed);
+ }
+ progress_unlock();
+
+ mem_usage -= free_unpacked(n);
+ n->entry = entry;
+
+ while (window_memory_limit &&
+ mem_usage > window_memory_limit &&
+ count > 1) {
+ const uint32_t tail = (idx + window - count) % window;
+ mem_usage -= free_unpacked(array + tail);
+ count--;
+ }
+
+ /* We do not compute delta to *create* objects we are not
+ * going to pack.
+ */
+ if (entry->preferred_base)
+ goto next;
+
+ /*
+ * If the current object is at pack edge, take the depth the
+ * objects that depend on the current object into account
+ * otherwise they would become too deep.
+ */
+ max_depth = depth;
+ if (DELTA_CHILD(entry)) {
+ max_depth -= check_delta_limit(entry, 0);
+ if (max_depth <= 0)
+ goto next;
+ }
+
+ j = window;
+ while (--j > 0) {
+ int ret;
+ uint32_t other_idx = idx + j;
+ struct unpacked *m;
+ if (other_idx >= window)
+ other_idx -= window;
+ m = array + other_idx;
+ if (!m->entry)
+ break;
+ ret = try_delta(n, m, max_depth, &mem_usage);
+ if (ret < 0)
+ break;
+ else if (ret > 0)
+ best_base = other_idx;
+ }
+
+ /*
+ * If we decided to cache the delta data, then it is best
+ * to compress it right away. First because we have to do
+ * it anyway, and doing it here while we're threaded will
+ * save a lot of time in the non threaded write phase,
+ * as well as allow for caching more deltas within
+ * the same cache size limit.
+ * ...
+ * But only if not writing to stdout, since in that case
+ * the network is most likely throttling writes anyway,
+ * and therefore it is best to go to the write phase ASAP
+ * instead, as we can afford spending more time compressing
+ * between writes at that moment.
+ */
+ if (entry->delta_data && !pack_to_stdout) {
+ unsigned long size;
+
+ size = do_compress(&entry->delta_data, DELTA_SIZE(entry));
+ if (size < (1U << OE_Z_DELTA_BITS)) {
+ entry->z_delta_size = size;
+ cache_lock();
+ delta_cache_size -= DELTA_SIZE(entry);
+ delta_cache_size += entry->z_delta_size;
+ cache_unlock();
+ } else {
+ FREE_AND_NULL(entry->delta_data);
+ entry->z_delta_size = 0;
+ }
+ }
+
+ /* if we made n a delta, and if n is already at max
+ * depth, leaving it in the window is pointless. we
+ * should evict it first.
+ */
+ if (DELTA(entry) && max_depth <= n->depth)
+ continue;
+
+ /*
+ * Move the best delta base up in the window, after the
+ * currently deltified object, to keep it longer. It will
+ * be the first base object to be attempted next.
+ */
+ if (DELTA(entry)) {
+ struct unpacked swap = array[best_base];
+ int dist = (window + idx - best_base) % window;
+ int dst = best_base;
+ while (dist--) {
+ int src = (dst + 1) % window;
+ array[dst] = array[src];
+ dst = src;
+ }
+ array[dst] = swap;
+ }
+
+ next:
+ idx++;
+ if (count + 1 < window)
+ count++;
+ if (idx >= window)
+ idx = 0;
+ }
+
+ for (i = 0; i < window; ++i) {
+ free_delta_index(array[i].index);
+ free(array[i].data);
+ }
+ free(array);
+}
+
+/*
+ * The main object list is split into smaller lists, each is handed to
+ * one worker.
+ *
+ * The main thread waits on the condition that (at least) one of the workers
+ * has stopped working (which is indicated in the .working member of
+ * struct thread_params).
+ *
+ * When a work thread has completed its work, it sets .working to 0 and
+ * signals the main thread and waits on the condition that .data_ready
+ * becomes 1.
+ *
+ * The main thread steals half of the work from the worker that has
+ * most work left to hand it to the idle worker.
+ */
+
+struct thread_params {
+ pthread_t thread;
+ struct object_entry **list;
+ unsigned list_size;
+ unsigned remaining;
+ int window;
+ int depth;
+ int working;
+ int data_ready;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ unsigned *processed;
+};
+
+static pthread_cond_t progress_cond;
+
+/*
+ * Mutex and conditional variable can't be statically-initialized on Windows.
+ */
+static void init_threaded_search(void)
+{
+ pthread_mutex_init(&cache_mutex, NULL);
+ pthread_mutex_init(&progress_mutex, NULL);
+ pthread_cond_init(&progress_cond, NULL);
+}
+
+static void cleanup_threaded_search(void)
+{
+ pthread_cond_destroy(&progress_cond);
+ pthread_mutex_destroy(&cache_mutex);
+ pthread_mutex_destroy(&progress_mutex);
+}
+
+static void *threaded_find_deltas(void *arg)
+{
+ struct thread_params *me = arg;
+
+ progress_lock();
+ while (me->remaining) {
+ progress_unlock();
+
+ find_deltas(me->list, &me->remaining,
+ me->window, me->depth, me->processed);
+
+ progress_lock();
+ me->working = 0;
+ pthread_cond_signal(&progress_cond);
+ progress_unlock();
+
+ /*
+ * We must not set ->data_ready before we wait on the
+ * condition because the main thread may have set it to 1
+ * before we get here. In order to be sure that new
+ * work is available if we see 1 in ->data_ready, it
+ * was initialized to 0 before this thread was spawned
+ * and we reset it to 0 right away.
+ */
+ pthread_mutex_lock(&me->mutex);
+ while (!me->data_ready)
+ pthread_cond_wait(&me->cond, &me->mutex);
+ me->data_ready = 0;
+ pthread_mutex_unlock(&me->mutex);
+
+ progress_lock();
+ }
+ progress_unlock();
+ /* leave ->working 1 so that this doesn't get more work assigned */
+ return NULL;
+}
+
+static void ll_find_deltas(struct object_entry **list, unsigned list_size,
+ int window, int depth, unsigned *processed)
+{
+ struct thread_params *p;
+ int i, ret, active_threads = 0;
+
+ init_threaded_search();
+
+ if (delta_search_threads <= 1) {
+ find_deltas(list, &list_size, window, depth, processed);
+ cleanup_threaded_search();
+ return;
+ }
+ if (progress > pack_to_stdout)
+ fprintf_ln(stderr, _("Delta compression using up to %d threads"),
+ delta_search_threads);
+ CALLOC_ARRAY(p, delta_search_threads);
+
+ /* Partition the work amongst work threads. */
+ for (i = 0; i < delta_search_threads; i++) {
+ unsigned sub_size = list_size / (delta_search_threads - i);
+
+ /* don't use too small segments or no deltas will be found */
+ if (sub_size < 2*window && i+1 < delta_search_threads)
+ sub_size = 0;
+
+ p[i].window = window;
+ p[i].depth = depth;
+ p[i].processed = processed;
+ p[i].working = 1;
+ p[i].data_ready = 0;
+
+ /* try to split chunks on "path" boundaries */
+ while (sub_size && sub_size < list_size &&
+ list[sub_size]->hash &&
+ list[sub_size]->hash == list[sub_size-1]->hash)
+ sub_size++;
+
+ p[i].list = list;
+ p[i].list_size = sub_size;
+ p[i].remaining = sub_size;
+
+ list += sub_size;
+ list_size -= sub_size;
+ }
+
+ /* Start work threads. */
+ for (i = 0; i < delta_search_threads; i++) {
+ if (!p[i].list_size)
+ continue;
+ pthread_mutex_init(&p[i].mutex, NULL);
+ pthread_cond_init(&p[i].cond, NULL);
+ ret = pthread_create(&p[i].thread, NULL,
+ threaded_find_deltas, &p[i]);
+ if (ret)
+ die(_("unable to create thread: %s"), strerror(ret));
+ active_threads++;
+ }
+
+ /*
+ * Now let's wait for work completion. Each time a thread is done
+ * with its work, we steal half of the remaining work from the
+ * thread with the largest number of unprocessed objects and give
+ * it to that newly idle thread. This ensure good load balancing
+ * until the remaining object list segments are simply too short
+ * to be worth splitting anymore.
+ */
+ while (active_threads) {
+ struct thread_params *target = NULL;
+ struct thread_params *victim = NULL;
+ unsigned sub_size = 0;
+
+ progress_lock();
+ for (;;) {
+ for (i = 0; !target && i < delta_search_threads; i++)
+ if (!p[i].working)
+ target = &p[i];
+ if (target)
+ break;
+ pthread_cond_wait(&progress_cond, &progress_mutex);
+ }
+
+ for (i = 0; i < delta_search_threads; i++)
+ if (p[i].remaining > 2*window &&
+ (!victim || victim->remaining < p[i].remaining))
+ victim = &p[i];
+ if (victim) {
+ sub_size = victim->remaining / 2;
+ list = victim->list + victim->list_size - sub_size;
+ while (sub_size && list[0]->hash &&
+ list[0]->hash == list[-1]->hash) {
+ list++;
+ sub_size--;
+ }
+ if (!sub_size) {
+ /*
+ * It is possible for some "paths" to have
+ * so many objects that no hash boundary
+ * might be found. Let's just steal the
+ * exact half in that case.
+ */
+ sub_size = victim->remaining / 2;
+ list -= sub_size;
+ }
+ target->list = list;
+ victim->list_size -= sub_size;
+ victim->remaining -= sub_size;
+ }
+ target->list_size = sub_size;
+ target->remaining = sub_size;
+ target->working = 1;
+ progress_unlock();
+
+ pthread_mutex_lock(&target->mutex);
+ target->data_ready = 1;
+ pthread_cond_signal(&target->cond);
+ pthread_mutex_unlock(&target->mutex);
+
+ if (!sub_size) {
+ pthread_join(target->thread, NULL);
+ pthread_cond_destroy(&target->cond);
+ pthread_mutex_destroy(&target->mutex);
+ active_threads--;
+ }
+ }
+ cleanup_threaded_search();
+ free(p);
+}
+
+static int obj_is_packed(const struct object_id *oid)
+{
+ return packlist_find(&to_pack, oid) ||
+ (reuse_packfile_bitmap &&
+ bitmap_walk_contains(bitmap_git, reuse_packfile_bitmap, oid));
+}
+
+static void add_tag_chain(const struct object_id *oid)
+{
+ struct tag *tag;
+
+ /*
+ * We catch duplicates already in add_object_entry(), but we'd
+ * prefer to do this extra check to avoid having to parse the
+ * tag at all if we already know that it's being packed (e.g., if
+ * it was included via bitmaps, we would not have parsed it
+ * previously).
+ */
+ if (obj_is_packed(oid))
+ return;
+
+ tag = lookup_tag(the_repository, oid);
+ while (1) {
+ if (!tag || parse_tag(tag) || !tag->tagged)
+ die(_("unable to pack objects reachable from tag %s"),
+ oid_to_hex(oid));
+
+ add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);
+
+ if (tag->tagged->type != OBJ_TAG)
+ return;
+
+ tag = (struct tag *)tag->tagged;
+ }
+}
+
+static int add_ref_tag(const char *tag UNUSED, const struct object_id *oid,
+ int flag UNUSED, void *cb_data UNUSED)
+{
+ struct object_id peeled;
+
+ if (!peel_iterated_oid(oid, &peeled) && obj_is_packed(&peeled))
+ add_tag_chain(oid);
+ return 0;
+}
+
+static void prepare_pack(int window, int depth)
+{
+ struct object_entry **delta_list;
+ uint32_t i, nr_deltas;
+ unsigned n;
+
+ if (use_delta_islands)
+ resolve_tree_islands(the_repository, progress, &to_pack);
+
+ get_object_details();
+
+ /*
+ * If we're locally repacking then we need to be doubly careful
+ * from now on in order to make sure no stealth corruption gets
+ * propagated to the new pack. Clients receiving streamed packs
+ * should validate everything they get anyway so no need to incur
+ * the additional cost here in that case.
+ */
+ if (!pack_to_stdout)
+ do_check_packed_object_crc = 1;
+
+ if (!to_pack.nr_objects || !window || !depth)
+ return;
+
+ ALLOC_ARRAY(delta_list, to_pack.nr_objects);
+ nr_deltas = n = 0;
+
+ for (i = 0; i < to_pack.nr_objects; i++) {
+ struct object_entry *entry = to_pack.objects + i;
+
+ if (DELTA(entry))
+ /* This happens if we decided to reuse existing
+ * delta from a pack. "reuse_delta &&" is implied.
+ */
+ continue;
+
+ if (!entry->type_valid ||
+ oe_size_less_than(&to_pack, entry, 50))
+ continue;
+
+ if (entry->no_try_delta)
+ continue;
+
+ if (!entry->preferred_base) {
+ nr_deltas++;
+ if (oe_type(entry) < 0)
+ die(_("unable to get type of object %s"),
+ oid_to_hex(&entry->idx.oid));
+ } else {
+ if (oe_type(entry) < 0) {
+ /*
+ * This object is not found, but we
+ * don't have to include it anyway.
+ */
+ continue;
+ }
+ }
+
+ delta_list[n++] = entry;
+ }
+
+ if (nr_deltas && n > 1) {
+ unsigned nr_done = 0;
+
+ if (progress)
+ progress_state = start_progress(_("Compressing objects"),
+ nr_deltas);
+ QSORT(delta_list, n, type_size_sort);
+ ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
+ stop_progress(&progress_state);
+ if (nr_done != nr_deltas)
+ die(_("inconsistency with delta count"));
+ }
+ free(delta_list);
+}
+
+static int git_pack_config(const char *k, const char *v, void *cb)
+{
+ if (!strcmp(k, "pack.window")) {
+ window = git_config_int(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "pack.windowmemory")) {
+ window_memory_limit = git_config_ulong(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "pack.depth")) {
+ depth = git_config_int(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "pack.deltacachesize")) {
+ max_delta_cache_size = git_config_int(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "pack.deltacachelimit")) {
+ cache_max_small_delta_size = git_config_int(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "pack.writebitmaphashcache")) {
+ if (git_config_bool(k, v))
+ write_bitmap_options |= BITMAP_OPT_HASH_CACHE;
+ else
+ write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;
+ }
+
+ if (!strcmp(k, "pack.writebitmaplookuptable")) {
+ if (git_config_bool(k, v))
+ write_bitmap_options |= BITMAP_OPT_LOOKUP_TABLE;
+ else
+ write_bitmap_options &= ~BITMAP_OPT_LOOKUP_TABLE;
+ }
+
+ if (!strcmp(k, "pack.usebitmaps")) {
+ use_bitmap_index_default = git_config_bool(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "pack.allowpackreuse")) {
+ allow_pack_reuse = git_config_bool(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "pack.threads")) {
+ delta_search_threads = git_config_int(k, v);
+ if (delta_search_threads < 0)
+ die(_("invalid number of threads specified (%d)"),
+ delta_search_threads);
+ if (!HAVE_THREADS && delta_search_threads != 1) {
+ warning(_("no threads support, ignoring %s"), k);
+ delta_search_threads = 0;
+ }
+ return 0;
+ }
+ if (!strcmp(k, "pack.indexversion")) {
+ pack_idx_opts.version = git_config_int(k, v);
+ if (pack_idx_opts.version > 2)
+ die(_("bad pack.indexVersion=%"PRIu32),
+ pack_idx_opts.version);
+ return 0;
+ }
+ if (!strcmp(k, "pack.writereverseindex")) {
+ if (git_config_bool(k, v))
+ pack_idx_opts.flags |= WRITE_REV;
+ else
+ pack_idx_opts.flags &= ~WRITE_REV;
+ return 0;
+ }
+ if (!strcmp(k, "uploadpack.blobpackfileuri")) {
+ struct configured_exclusion *ex = xmalloc(sizeof(*ex));
+ const char *oid_end, *pack_end;
+ /*
+ * Stores the pack hash. This is not a true object ID, but is
+ * of the same form.
+ */
+ struct object_id pack_hash;
+
+ if (parse_oid_hex(v, &ex->e.oid, &oid_end) ||
+ *oid_end != ' ' ||
+ parse_oid_hex(oid_end + 1, &pack_hash, &pack_end) ||
+ *pack_end != ' ')
+ die(_("value of uploadpack.blobpackfileuri must be "
+ "of the form '<object-hash> <pack-hash> <uri>' (got '%s')"), v);
+ if (oidmap_get(&configured_exclusions, &ex->e.oid))
+ die(_("object already configured in another "
+ "uploadpack.blobpackfileuri (got '%s')"), v);
+ ex->pack_hash_hex = xcalloc(1, pack_end - oid_end);
+ memcpy(ex->pack_hash_hex, oid_end + 1, pack_end - oid_end - 1);
+ ex->uri = xstrdup(pack_end + 1);
+ oidmap_put(&configured_exclusions, ex);
+ }
+ return git_default_config(k, v, cb);
+}
+
+/* Counters for trace2 output when in --stdin-packs mode. */
+static int stdin_packs_found_nr;
+static int stdin_packs_hints_nr;
+
+static int add_object_entry_from_pack(const struct object_id *oid,
+ struct packed_git *p,
+ uint32_t pos,
+ void *_data)
+{
+ off_t ofs;
+ enum object_type type = OBJ_NONE;
+
+ display_progress(progress_state, ++nr_seen);
+
+ if (have_duplicate_entry(oid, 0))
+ return 0;
+
+ ofs = nth_packed_object_offset(p, pos);
+ if (!want_object_in_pack(oid, 0, &p, &ofs))
+ return 0;
+
+ if (p) {
+ struct rev_info *revs = _data;
+ struct object_info oi = OBJECT_INFO_INIT;
+
+ oi.typep = &type;
+ if (packed_object_info(the_repository, p, ofs, &oi) < 0) {
+ die(_("could not get type of object %s in pack %s"),
+ oid_to_hex(oid), p->pack_name);
+ } else if (type == OBJ_COMMIT) {
+ /*
+ * commits in included packs are used as starting points for the
+ * subsequent revision walk
+ */
+ add_pending_oid(revs, NULL, oid, 0);
+ }
+
+ stdin_packs_found_nr++;
+ }
+
+ create_object_entry(oid, type, 0, 0, 0, p, ofs);
+
+ return 0;
+}
+
+static void show_commit_pack_hint(struct commit *commit, void *_data)
+{
+ /* nothing to do; commits don't have a namehash */
+}
+
+static void show_object_pack_hint(struct object *object, const char *name,
+ void *_data)
+{
+ struct object_entry *oe = packlist_find(&to_pack, &object->oid);
+ if (!oe)
+ return;
+
+ /*
+ * Our 'to_pack' list was constructed by iterating all objects packed in
+ * included packs, and so doesn't have a non-zero hash field that you
+ * would typically pick up during a reachability traversal.
+ *
+ * Make a best-effort attempt to fill in the ->hash and ->no_try_delta
+ * here using a now in order to perhaps improve the delta selection
+ * process.
+ */
+ oe->hash = pack_name_hash(name);
+ oe->no_try_delta = name && no_try_delta(name);
+
+ stdin_packs_hints_nr++;
+}
+
+static int pack_mtime_cmp(const void *_a, const void *_b)
+{
+ struct packed_git *a = ((const struct string_list_item*)_a)->util;
+ struct packed_git *b = ((const struct string_list_item*)_b)->util;
+
+ /*
+ * order packs by descending mtime so that objects are laid out
+ * roughly as newest-to-oldest
+ */
+ if (a->mtime < b->mtime)
+ return 1;
+ else if (b->mtime < a->mtime)
+ return -1;
+ else
+ return 0;
+}
+
+static void read_packs_list_from_stdin(void)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct string_list include_packs = STRING_LIST_INIT_DUP;
+ struct string_list exclude_packs = STRING_LIST_INIT_DUP;
+ struct string_list_item *item = NULL;
+
+ struct packed_git *p;
+ struct rev_info revs;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ /*
+ * Use a revision walk to fill in the namehash of objects in the include
+ * packs. To save time, we'll avoid traversing through objects that are
+ * in excluded packs.
+ *
+ * That may cause us to avoid populating all of the namehash fields of
+ * all included objects, but our goal is best-effort, since this is only
+ * an optimization during delta selection.
+ */
+ revs.no_kept_objects = 1;
+ revs.keep_pack_cache_flags |= IN_CORE_KEEP_PACKS;
+ revs.blob_objects = 1;
+ revs.tree_objects = 1;
+ revs.tag_objects = 1;
+ revs.ignore_missing_links = 1;
+
+ while (strbuf_getline(&buf, stdin) != EOF) {
+ if (!buf.len)
+ continue;
+
+ if (*buf.buf == '^')
+ string_list_append(&exclude_packs, buf.buf + 1);
+ else
+ string_list_append(&include_packs, buf.buf);
+
+ strbuf_reset(&buf);
+ }
+
+ string_list_sort(&include_packs);
+ string_list_sort(&exclude_packs);
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ const char *pack_name = pack_basename(p);
+
+ item = string_list_lookup(&include_packs, pack_name);
+ if (!item)
+ item = string_list_lookup(&exclude_packs, pack_name);
+
+ if (item)
+ item->util = p;
+ }
+
+ /*
+ * Arguments we got on stdin may not even be packs. First
+ * check that to avoid segfaulting later on in
+ * e.g. pack_mtime_cmp(), excluded packs are handled below.
+ *
+ * Since we first parsed our STDIN and then sorted the input
+ * lines the pack we error on will be whatever line happens to
+ * sort first. This is lazy, it's enough that we report one
+ * bad case here, we don't need to report the first/last one,
+ * or all of them.
+ */
+ for_each_string_list_item(item, &include_packs) {
+ struct packed_git *p = item->util;
+ if (!p)
+ die(_("could not find pack '%s'"), item->string);
+ if (!is_pack_valid(p))
+ die(_("packfile %s cannot be accessed"), p->pack_name);
+ }
+
+ /*
+ * Then, handle all of the excluded packs, marking them as
+ * kept in-core so that later calls to add_object_entry()
+ * discards any objects that are also found in excluded packs.
+ */
+ for_each_string_list_item(item, &exclude_packs) {
+ struct packed_git *p = item->util;
+ if (!p)
+ die(_("could not find pack '%s'"), item->string);
+ p->pack_keep_in_core = 1;
+ }
+
+ /*
+ * Order packs by ascending mtime; use QSORT directly to access the
+ * string_list_item's ->util pointer, which string_list_sort() does not
+ * provide.
+ */
+ QSORT(include_packs.items, include_packs.nr, pack_mtime_cmp);
+
+ for_each_string_list_item(item, &include_packs) {
+ struct packed_git *p = item->util;
+ for_each_object_in_pack(p,
+ add_object_entry_from_pack,
+ &revs,
+ FOR_EACH_OBJECT_PACK_ORDER);
+ }
+
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed"));
+ traverse_commit_list(&revs,
+ show_commit_pack_hint,
+ show_object_pack_hint,
+ NULL);
+
+ trace2_data_intmax("pack-objects", the_repository, "stdin_packs_found",
+ stdin_packs_found_nr);
+ trace2_data_intmax("pack-objects", the_repository, "stdin_packs_hints",
+ stdin_packs_hints_nr);
+
+ strbuf_release(&buf);
+ string_list_clear(&include_packs, 0);
+ string_list_clear(&exclude_packs, 0);
+}
+
+static void add_cruft_object_entry(const struct object_id *oid, enum object_type type,
+ struct packed_git *pack, off_t offset,
+ const char *name, uint32_t mtime)
+{
+ struct object_entry *entry;
+
+ display_progress(progress_state, ++nr_seen);
+
+ entry = packlist_find(&to_pack, oid);
+ if (entry) {
+ if (name) {
+ entry->hash = pack_name_hash(name);
+ entry->no_try_delta = no_try_delta(name);
+ }
+ } else {
+ if (!want_object_in_pack(oid, 0, &pack, &offset))
+ return;
+ if (!pack && type == OBJ_BLOB && !has_loose_object(oid)) {
+ /*
+ * If a traversed tree has a missing blob then we want
+ * to avoid adding that missing object to our pack.
+ *
+ * This only applies to missing blobs, not trees,
+ * because the traversal needs to parse sub-trees but
+ * not blobs.
+ *
+ * Note we only perform this check when we couldn't
+ * already find the object in a pack, so we're really
+ * limited to "ensure non-tip blobs which don't exist in
+ * packs do exist via loose objects". Confused?
+ */
+ return;
+ }
+
+ entry = create_object_entry(oid, type, pack_name_hash(name),
+ 0, name && no_try_delta(name),
+ pack, offset);
+ }
+
+ if (mtime > oe_cruft_mtime(&to_pack, entry))
+ oe_set_cruft_mtime(&to_pack, entry, mtime);
+ return;
+}
+
+static void show_cruft_object(struct object *obj, const char *name, void *data)
+{
+ /*
+ * if we did not record it earlier, it's at least as old as our
+ * expiration value. Rather than find it exactly, just use that
+ * value. This may bump it forward from its real mtime, but it
+ * will still be "too old" next time we run with the same
+ * expiration.
+ *
+ * if obj does appear in the packing list, this call is a noop (or may
+ * set the namehash).
+ */
+ add_cruft_object_entry(&obj->oid, obj->type, NULL, 0, name, cruft_expiration);
+}
+
+static void show_cruft_commit(struct commit *commit, void *data)
+{
+ show_cruft_object((struct object*)commit, NULL, data);
+}
+
+static int cruft_include_check_obj(struct object *obj, void *data)
+{
+ return !has_object_kept_pack(&obj->oid, IN_CORE_KEEP_PACKS);
+}
+
+static int cruft_include_check(struct commit *commit, void *data)
+{
+ return cruft_include_check_obj((struct object*)commit, data);
+}
+
+static void set_cruft_mtime(const struct object *object,
+ struct packed_git *pack,
+ off_t offset, time_t mtime)
+{
+ add_cruft_object_entry(&object->oid, object->type, pack, offset, NULL,
+ mtime);
+}
+
+static void mark_pack_kept_in_core(struct string_list *packs, unsigned keep)
+{
+ struct string_list_item *item = NULL;
+ for_each_string_list_item(item, packs) {
+ struct packed_git *p = item->util;
+ if (!p)
+ die(_("could not find pack '%s'"), item->string);
+ p->pack_keep_in_core = keep;
+ }
+}
+
+static void add_unreachable_loose_objects(void);
+static void add_objects_in_unpacked_packs(void);
+
+static void enumerate_cruft_objects(void)
+{
+ if (progress)
+ progress_state = start_progress(_("Enumerating cruft objects"), 0);
+
+ add_objects_in_unpacked_packs();
+ add_unreachable_loose_objects();
+
+ stop_progress(&progress_state);
+}
+
+static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs)
+{
+ struct packed_git *p;
+ struct rev_info revs;
+ int ret;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+
+ revs.tag_objects = 1;
+ revs.tree_objects = 1;
+ revs.blob_objects = 1;
+
+ revs.include_check = cruft_include_check;
+ revs.include_check_obj = cruft_include_check_obj;
+
+ revs.ignore_missing_links = 1;
+
+ if (progress)
+ progress_state = start_progress(_("Enumerating cruft objects"), 0);
+ ret = add_unseen_recent_objects_to_traversal(&revs, cruft_expiration,
+ set_cruft_mtime, 1);
+ stop_progress(&progress_state);
+
+ if (ret)
+ die(_("unable to add cruft objects"));
+
+ /*
+ * Re-mark only the fresh packs as kept so that objects in
+ * unknown packs do not halt the reachability traversal early.
+ */
+ for (p = get_all_packs(the_repository); p; p = p->next)
+ p->pack_keep_in_core = 0;
+ mark_pack_kept_in_core(fresh_packs, 1);
+
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed"));
+ if (progress)
+ progress_state = start_progress(_("Traversing cruft objects"), 0);
+ nr_seen = 0;
+ traverse_commit_list(&revs, show_cruft_commit, show_cruft_object, NULL);
+
+ stop_progress(&progress_state);
+}
+
+static void read_cruft_objects(void)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct string_list discard_packs = STRING_LIST_INIT_DUP;
+ struct string_list fresh_packs = STRING_LIST_INIT_DUP;
+ struct packed_git *p;
+
+ ignore_packed_keep_in_core = 1;
+
+ while (strbuf_getline(&buf, stdin) != EOF) {
+ if (!buf.len)
+ continue;
+
+ if (*buf.buf == '-')
+ string_list_append(&discard_packs, buf.buf + 1);
+ else
+ string_list_append(&fresh_packs, buf.buf);
+ strbuf_reset(&buf);
+ }
+
+ string_list_sort(&discard_packs);
+ string_list_sort(&fresh_packs);
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ const char *pack_name = pack_basename(p);
+ struct string_list_item *item;
+
+ item = string_list_lookup(&fresh_packs, pack_name);
+ if (!item)
+ item = string_list_lookup(&discard_packs, pack_name);
+
+ if (item) {
+ item->util = p;
+ } else {
+ /*
+ * This pack wasn't mentioned in either the "fresh" or
+ * "discard" list, so the caller didn't know about it.
+ *
+ * Mark it as kept so that its objects are ignored by
+ * add_unseen_recent_objects_to_traversal(). We'll
+ * unmark it before starting the traversal so it doesn't
+ * halt the traversal early.
+ */
+ p->pack_keep_in_core = 1;
+ }
+ }
+
+ mark_pack_kept_in_core(&fresh_packs, 1);
+ mark_pack_kept_in_core(&discard_packs, 0);
+
+ if (cruft_expiration)
+ enumerate_and_traverse_cruft_objects(&fresh_packs);
+ else
+ enumerate_cruft_objects();
+
+ strbuf_release(&buf);
+ string_list_clear(&discard_packs, 0);
+ string_list_clear(&fresh_packs, 0);
+}
+
+static void read_object_list_from_stdin(void)
+{
+ char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];
+ struct object_id oid;
+ const char *p;
+
+ for (;;) {
+ if (!fgets(line, sizeof(line), stdin)) {
+ if (feof(stdin))
+ break;
+ if (!ferror(stdin))
+ BUG("fgets returned NULL, not EOF, not error!");
+ if (errno != EINTR)
+ die_errno("fgets");
+ clearerr(stdin);
+ continue;
+ }
+ if (line[0] == '-') {
+ if (get_oid_hex(line+1, &oid))
+ die(_("expected edge object ID, got garbage:\n %s"),
+ line);
+ add_preferred_base(&oid);
+ continue;
+ }
+ if (parse_oid_hex(line, &oid, &p))
+ die(_("expected object ID, got garbage:\n %s"), line);
+
+ add_preferred_base_object(p + 1);
+ add_object_entry(&oid, OBJ_NONE, p + 1, 0);
+ }
+}
+
+static void show_commit(struct commit *commit, void *data)
+{
+ add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL, 0);
+
+ if (write_bitmap_index)
+ index_commit_for_bitmap(commit);
+
+ if (use_delta_islands)
+ propagate_island_marks(commit);
+}
+
+static void show_object(struct object *obj, const char *name, void *data)
+{
+ add_preferred_base_object(name);
+ add_object_entry(&obj->oid, obj->type, name, 0);
+
+ if (use_delta_islands) {
+ const char *p;
+ unsigned depth;
+ struct object_entry *ent;
+
+ /* the empty string is a root tree, which is depth 0 */
+ depth = *name ? 1 : 0;
+ for (p = strchr(name, '/'); p; p = strchr(p + 1, '/'))
+ depth++;
+
+ ent = packlist_find(&to_pack, &obj->oid);
+ if (ent && depth > oe_tree_depth(&to_pack, ent))
+ oe_set_tree_depth(&to_pack, ent, depth);
+ }
+}
+
+static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)
+{
+ assert(arg_missing_action == MA_ALLOW_ANY);
+
+ /*
+ * Quietly ignore ALL missing objects. This avoids problems with
+ * staging them now and getting an odd error later.
+ */
+ if (!has_object(the_repository, &obj->oid, 0))
+ return;
+
+ show_object(obj, name, data);
+}
+
+static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)
+{
+ assert(arg_missing_action == MA_ALLOW_PROMISOR);
+
+ /*
+ * Quietly ignore EXPECTED missing objects. This avoids problems with
+ * staging them now and getting an odd error later.
+ */
+ if (!has_object(the_repository, &obj->oid, 0) && is_promisor_object(&obj->oid))
+ return;
+
+ show_object(obj, name, data);
+}
+
+static int option_parse_missing_action(const struct option *opt,
+ const char *arg, int unset)
+{
+ assert(arg);
+ assert(!unset);
+
+ if (!strcmp(arg, "error")) {
+ arg_missing_action = MA_ERROR;
+ fn_show_object = show_object;
+ return 0;
+ }
+
+ if (!strcmp(arg, "allow-any")) {
+ arg_missing_action = MA_ALLOW_ANY;
+ fetch_if_missing = 0;
+ fn_show_object = show_object__ma_allow_any;
+ return 0;
+ }
+
+ if (!strcmp(arg, "allow-promisor")) {
+ arg_missing_action = MA_ALLOW_PROMISOR;
+ fetch_if_missing = 0;
+ fn_show_object = show_object__ma_allow_promisor;
+ return 0;
+ }
+
+ die(_("invalid value for '%s': '%s'"), "--missing", arg);
+ return 0;
+}
+
+static void show_edge(struct commit *commit)
+{
+ add_preferred_base(&commit->object.oid);
+}
+
+static int add_object_in_unpacked_pack(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *_data)
+{
+ if (cruft) {
+ off_t offset;
+ time_t mtime;
+
+ if (pack->is_cruft) {
+ if (load_pack_mtimes(pack) < 0)
+ die(_("could not load cruft pack .mtimes"));
+ mtime = nth_packed_mtime(pack, pos);
+ } else {
+ mtime = pack->mtime;
+ }
+ offset = nth_packed_object_offset(pack, pos);
+
+ add_cruft_object_entry(oid, OBJ_NONE, pack, offset,
+ NULL, mtime);
+ } else {
+ add_object_entry(oid, OBJ_NONE, "", 0);
+ }
+ return 0;
+}
+
+static void add_objects_in_unpacked_packs(void)
+{
+ if (for_each_packed_object(add_object_in_unpacked_pack, NULL,
+ FOR_EACH_OBJECT_PACK_ORDER |
+ FOR_EACH_OBJECT_LOCAL_ONLY |
+ FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS |
+ FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS))
+ die(_("cannot open pack index"));
+}
+
+static int add_loose_object(const struct object_id *oid, const char *path,
+ void *data)
+{
+ enum object_type type = oid_object_info(the_repository, oid, NULL);
+
+ if (type < 0) {
+ warning(_("loose object at %s could not be examined"), path);
+ return 0;
+ }
+
+ if (cruft) {
+ struct stat st;
+ if (stat(path, &st) < 0) {
+ if (errno == ENOENT)
+ return 0;
+ return error_errno("unable to stat %s", oid_to_hex(oid));
+ }
+
+ add_cruft_object_entry(oid, type, NULL, 0, NULL,
+ st.st_mtime);
+ } else {
+ add_object_entry(oid, type, "", 0);
+ }
+ return 0;
+}
+
+/*
+ * We actually don't even have to worry about reachability here.
+ * add_object_entry will weed out duplicates, so we just add every
+ * loose object we find.
+ */
+static void add_unreachable_loose_objects(void)
+{
+ for_each_loose_file_in_objdir(get_object_directory(),
+ add_loose_object,
+ NULL, NULL, NULL);
+}
+
+static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
+{
+ static struct packed_git *last_found = (void *)1;
+ struct packed_git *p;
+
+ p = (last_found != (void *)1) ? last_found :
+ get_all_packs(the_repository);
+
+ while (p) {
+ if ((!p->pack_local || p->pack_keep ||
+ p->pack_keep_in_core) &&
+ find_pack_entry_one(oid->hash, p)) {
+ last_found = p;
+ return 1;
+ }
+ if (p == last_found)
+ p = get_all_packs(the_repository);
+ else
+ p = p->next;
+ if (p == last_found)
+ p = p->next;
+ }
+ return 0;
+}
+
+/*
+ * Store a list of sha1s that are should not be discarded
+ * because they are either written too recently, or are
+ * reachable from another object that was.
+ *
+ * This is filled by get_object_list.
+ */
+static struct oid_array recent_objects;
+
+static int loosened_object_can_be_discarded(const struct object_id *oid,
+ timestamp_t mtime)
+{
+ if (!unpack_unreachable_expiration)
+ return 0;
+ if (mtime > unpack_unreachable_expiration)
+ return 0;
+ if (oid_array_lookup(&recent_objects, oid) >= 0)
+ return 0;
+ return 1;
+}
+
+static void loosen_unused_packed_objects(void)
+{
+ struct packed_git *p;
+ uint32_t i;
+ uint32_t loosened_objects_nr = 0;
+ struct object_id oid;
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
+ continue;
+
+ if (open_pack_index(p))
+ die(_("cannot open pack index"));
+
+ for (i = 0; i < p->num_objects; i++) {
+ nth_packed_object_id(&oid, p, i);
+ if (!packlist_find(&to_pack, &oid) &&
+ !has_sha1_pack_kept_or_nonlocal(&oid) &&
+ !loosened_object_can_be_discarded(&oid, p->mtime)) {
+ if (force_object_loose(&oid, p->mtime))
+ die(_("unable to force loose object"));
+ loosened_objects_nr++;
+ }
+ }
+ }
+
+ trace2_data_intmax("pack-objects", the_repository,
+ "loosen_unused_packed_objects/loosened", loosened_objects_nr);
+}
+
+/*
+ * This tracks any options which pack-reuse code expects to be on, or which a
+ * reader of the pack might not understand, and which would therefore prevent
+ * blind reuse of what we have on disk.
+ */
+static int pack_options_allow_reuse(void)
+{
+ return allow_pack_reuse &&
+ pack_to_stdout &&
+ !ignore_packed_keep_on_disk &&
+ !ignore_packed_keep_in_core &&
+ (!local || !have_non_local_packs) &&
+ !incremental;
+}
+
+static int get_object_list_from_bitmap(struct rev_info *revs)
+{
+ if (!(bitmap_git = prepare_bitmap_walk(revs, 0)))
+ return -1;
+
+ if (pack_options_allow_reuse() &&
+ !reuse_partial_packfile_from_bitmap(
+ bitmap_git,
+ &reuse_packfile,
+ &reuse_packfile_objects,
+ &reuse_packfile_bitmap)) {
+ assert(reuse_packfile_objects);
+ nr_result += reuse_packfile_objects;
+ nr_seen += reuse_packfile_objects;
+ display_progress(progress_state, nr_seen);
+ }
+
+ traverse_bitmap_commit_list(bitmap_git, revs,
+ &add_object_entry_from_bitmap);
+ return 0;
+}
+
+static void record_recent_object(struct object *obj,
+ const char *name,
+ void *data)
+{
+ oid_array_append(&recent_objects, &obj->oid);
+}
+
+static void record_recent_commit(struct commit *commit, void *data)
+{
+ oid_array_append(&recent_objects, &commit->object.oid);
+}
+
+static int mark_bitmap_preferred_tip(const char *refname,
+ const struct object_id *oid,
+ int flags UNUSED,
+ void *data UNUSED)
+{
+ struct object_id peeled;
+ struct object *object;
+
+ if (!peel_iterated_oid(oid, &peeled))
+ oid = &peeled;
+
+ object = parse_object_or_die(oid, refname);
+ if (object->type == OBJ_COMMIT)
+ object->flags |= NEEDS_BITMAP;
+
+ return 0;
+}
+
+static void mark_bitmap_preferred_tips(void)
+{
+ struct string_list_item *item;
+ const struct string_list *preferred_tips;
+
+ preferred_tips = bitmap_preferred_tips(the_repository);
+ if (!preferred_tips)
+ return;
+
+ for_each_string_list_item(item, preferred_tips) {
+ for_each_ref_in(item->string, mark_bitmap_preferred_tip, NULL);
+ }
+}
+
+static void get_object_list(struct rev_info *revs, int ac, const char **av)
+{
+ struct setup_revision_opt s_r_opt = {
+ .allow_exclude_promisor_objects = 1,
+ };
+ char line[1000];
+ int flags = 0;
+ int save_warning;
+
+ save_commit_buffer = 0;
+ setup_revisions(ac, av, revs, &s_r_opt);
+
+ /* make sure shallows are read */
+ is_repository_shallow(the_repository);
+
+ save_warning = warn_on_object_refname_ambiguity;
+ warn_on_object_refname_ambiguity = 0;
+
+ while (fgets(line, sizeof(line), stdin) != NULL) {
+ int len = strlen(line);
+ if (len && line[len - 1] == '\n')
+ line[--len] = 0;
+ if (!len)
+ break;
+ if (*line == '-') {
+ if (!strcmp(line, "--not")) {
+ flags ^= UNINTERESTING;
+ write_bitmap_index = 0;
+ continue;
+ }
+ if (starts_with(line, "--shallow ")) {
+ struct object_id oid;
+ if (get_oid_hex(line + 10, &oid))
+ die("not an object name '%s'", line + 10);
+ register_shallow(the_repository, &oid);
+ use_bitmap_index = 0;
+ continue;
+ }
+ die(_("not a rev '%s'"), line);
+ }
+ if (handle_revision_arg(line, revs, flags, REVARG_CANNOT_BE_FILENAME))
+ die(_("bad revision '%s'"), line);
+ }
+
+ warn_on_object_refname_ambiguity = save_warning;
+
+ if (use_bitmap_index && !get_object_list_from_bitmap(revs))
+ return;
+
+ if (use_delta_islands)
+ load_delta_islands(the_repository, progress);
+
+ if (write_bitmap_index)
+ mark_bitmap_preferred_tips();
+
+ if (prepare_revision_walk(revs))
+ die(_("revision walk setup failed"));
+ mark_edges_uninteresting(revs, show_edge, sparse);
+
+ if (!fn_show_object)
+ fn_show_object = show_object;
+ traverse_commit_list(revs,
+ show_commit, fn_show_object,
+ NULL);
+
+ if (unpack_unreachable_expiration) {
+ revs->ignore_missing_links = 1;
+ if (add_unseen_recent_objects_to_traversal(revs,
+ unpack_unreachable_expiration, NULL, 0))
+ die(_("unable to add recent objects"));
+ if (prepare_revision_walk(revs))
+ die(_("revision walk setup failed"));
+ traverse_commit_list(revs, record_recent_commit,
+ record_recent_object, NULL);
+ }
+
+ if (keep_unreachable)
+ add_objects_in_unpacked_packs();
+ if (pack_loose_unreachable)
+ add_unreachable_loose_objects();
+ if (unpack_unreachable)
+ loosen_unused_packed_objects();
+
+ oid_array_clear(&recent_objects);
+}
+
+static void add_extra_kept_packs(const struct string_list *names)
+{
+ struct packed_git *p;
+
+ if (!names->nr)
+ return;
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ const char *name = basename(p->pack_name);
+ int i;
+
+ if (!p->pack_local)
+ continue;
+
+ for (i = 0; i < names->nr; i++)
+ if (!fspathcmp(name, names->items[i].string))
+ break;
+
+ if (i < names->nr) {
+ p->pack_keep_in_core = 1;
+ ignore_packed_keep_in_core = 1;
+ continue;
+ }
+ }
+}
+
+static int option_parse_index_version(const struct option *opt,
+ const char *arg, int unset)
+{
+ char *c;
+ const char *val = arg;
+
+ BUG_ON_OPT_NEG(unset);
+
+ pack_idx_opts.version = strtoul(val, &c, 10);
+ if (pack_idx_opts.version > 2)
+ die(_("unsupported index version %s"), val);
+ if (*c == ',' && c[1])
+ pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);
+ if (*c || pack_idx_opts.off32_limit & 0x80000000)
+ die(_("bad index version '%s'"), val);
+ return 0;
+}
+
+static int option_parse_unpack_unreachable(const struct option *opt,
+ const char *arg, int unset)
+{
+ if (unset) {
+ unpack_unreachable = 0;
+ unpack_unreachable_expiration = 0;
+ }
+ else {
+ unpack_unreachable = 1;
+ if (arg)
+ unpack_unreachable_expiration = approxidate(arg);
+ }
+ return 0;
+}
+
+static int option_parse_cruft_expiration(const struct option *opt,
+ const char *arg, int unset)
+{
+ if (unset) {
+ cruft = 0;
+ cruft_expiration = 0;
+ } else {
+ cruft = 1;
+ if (arg)
+ cruft_expiration = approxidate(arg);
+ }
+ return 0;
+}
+
+struct po_filter_data {
+ unsigned have_revs:1;
+ struct rev_info revs;
+};
+
+static struct list_objects_filter_options *po_filter_revs_init(void *value)
+{
+ struct po_filter_data *data = value;
+
+ repo_init_revisions(the_repository, &data->revs, NULL);
+ data->have_revs = 1;
+
+ return &data->revs.filter;
+}
+
+int cmd_pack_objects(int argc, const char **argv, const char *prefix)
+{
+ int use_internal_rev_list = 0;
+ int shallow = 0;
+ int all_progress_implied = 0;
+ struct strvec rp = STRVEC_INIT;
+ int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
+ int rev_list_index = 0;
+ int stdin_packs = 0;
+ struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
+ struct po_filter_data pfd = { .have_revs = 0 };
+
+ struct option pack_objects_options[] = {
+ OPT_SET_INT('q', "quiet", &progress,
+ N_("do not show progress meter"), 0),
+ OPT_SET_INT(0, "progress", &progress,
+ N_("show progress meter"), 1),
+ OPT_SET_INT(0, "all-progress", &progress,
+ N_("show progress meter during object writing phase"), 2),
+ OPT_BOOL(0, "all-progress-implied",
+ &all_progress_implied,
+ N_("similar to --all-progress when progress meter is shown")),
+ OPT_CALLBACK_F(0, "index-version", NULL, N_("<version>[,<offset>]"),
+ N_("write the pack index file in the specified idx format version"),
+ PARSE_OPT_NONEG, option_parse_index_version),
+ OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
+ N_("maximum size of each output pack file")),
+ OPT_BOOL(0, "local", &local,
+ N_("ignore borrowed objects from alternate object store")),
+ OPT_BOOL(0, "incremental", &incremental,
+ N_("ignore packed objects")),
+ OPT_INTEGER(0, "window", &window,
+ N_("limit pack window by objects")),
+ OPT_MAGNITUDE(0, "window-memory", &window_memory_limit,
+ N_("limit pack window by memory in addition to object limit")),
+ OPT_INTEGER(0, "depth", &depth,
+ N_("maximum length of delta chain allowed in the resulting pack")),
+ OPT_BOOL(0, "reuse-delta", &reuse_delta,
+ N_("reuse existing deltas")),
+ OPT_BOOL(0, "reuse-object", &reuse_object,
+ N_("reuse existing objects")),
+ OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,
+ N_("use OFS_DELTA objects")),
+ OPT_INTEGER(0, "threads", &delta_search_threads,
+ N_("use threads when searching for best delta matches")),
+ OPT_BOOL(0, "non-empty", &non_empty,
+ N_("do not create an empty pack output")),
+ OPT_BOOL(0, "revs", &use_internal_rev_list,
+ N_("read revision arguments from standard input")),
+ OPT_SET_INT_F(0, "unpacked", &rev_list_unpacked,
+ N_("limit the objects to those that are not yet packed"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "all", &rev_list_all,
+ N_("include objects reachable from any reference"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "reflog", &rev_list_reflog,
+ N_("include objects referred by reflog entries"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "indexed-objects", &rev_list_index,
+ N_("include objects referred to by the index"),
+ 1, PARSE_OPT_NONEG),
+ OPT_BOOL(0, "stdin-packs", &stdin_packs,
+ N_("read packs from stdin")),
+ OPT_BOOL(0, "stdout", &pack_to_stdout,
+ N_("output pack to stdout")),
+ OPT_BOOL(0, "include-tag", &include_tag,
+ N_("include tag objects that refer to objects to be packed")),
+ OPT_BOOL(0, "keep-unreachable", &keep_unreachable,
+ N_("keep unreachable objects")),
+ OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable,
+ N_("pack loose unreachable objects")),
+ OPT_CALLBACK_F(0, "unpack-unreachable", NULL, N_("time"),
+ N_("unpack unreachable objects newer than <time>"),
+ PARSE_OPT_OPTARG, option_parse_unpack_unreachable),
+ OPT_BOOL(0, "cruft", &cruft, N_("create a cruft pack")),
+ OPT_CALLBACK_F(0, "cruft-expiration", NULL, N_("time"),
+ N_("expire cruft objects older than <time>"),
+ PARSE_OPT_OPTARG, option_parse_cruft_expiration),
+ OPT_BOOL(0, "sparse", &sparse,
+ N_("use the sparse reachability algorithm")),
+ OPT_BOOL(0, "thin", &thin,
+ N_("create thin packs")),
+ OPT_BOOL(0, "shallow", &shallow,
+ N_("create packs suitable for shallow fetches")),
+ OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,
+ N_("ignore packs that have companion .keep file")),
+ OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
+ N_("ignore this pack")),
+ OPT_INTEGER(0, "compression", &pack_compression_level,
+ N_("pack compression level")),
+ OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
+ N_("do not hide commits by grafts"), 0),
+ OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index,
+ N_("use a bitmap index if available to speed up counting objects")),
+ OPT_SET_INT(0, "write-bitmap-index", &write_bitmap_index,
+ N_("write a bitmap index together with the pack index"),
+ WRITE_BITMAP_TRUE),
+ OPT_SET_INT_F(0, "write-bitmap-index-quiet",
+ &write_bitmap_index,
+ N_("write a bitmap index if possible"),
+ WRITE_BITMAP_QUIET, PARSE_OPT_HIDDEN),
+ OPT_PARSE_LIST_OBJECTS_FILTER_INIT(&pfd, po_filter_revs_init),
+ OPT_CALLBACK_F(0, "missing", NULL, N_("action"),
+ N_("handling for missing objects"), PARSE_OPT_NONEG,
+ option_parse_missing_action),
+ OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+ N_("do not pack objects in promisor packfiles")),
+ OPT_BOOL(0, "delta-islands", &use_delta_islands,
+ N_("respect islands during delta compression")),
+ OPT_STRING_LIST(0, "uri-protocol", &uri_protocols,
+ N_("protocol"),
+ N_("exclude any configured uploadpack.blobpackfileuri with this protocol")),
+ OPT_END(),
+ };
+
+ if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
+ BUG("too many dfs states, increase OE_DFS_STATE_BITS");
+
+ read_replace_refs = 0;
+
+ sparse = git_env_bool("GIT_TEST_PACK_SPARSE", -1);
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ if (sparse < 0)
+ sparse = the_repository->settings.pack_use_sparse;
+ }
+
+ reset_pack_idx_option(&pack_idx_opts);
+ git_config(git_pack_config, NULL);
+ if (git_env_bool(GIT_TEST_WRITE_REV_INDEX, 0))
+ pack_idx_opts.flags |= WRITE_REV;
+
+ progress = isatty(2);
+ argc = parse_options(argc, argv, prefix, pack_objects_options,
+ pack_usage, 0);
+
+ if (argc) {
+ base_name = argv[0];
+ argc--;
+ }
+ if (pack_to_stdout != !base_name || argc)
+ usage_with_options(pack_usage, pack_objects_options);
+
+ if (depth < 0)
+ depth = 0;
+ if (depth >= (1 << OE_DEPTH_BITS)) {
+ warning(_("delta chain depth %d is too deep, forcing %d"),
+ depth, (1 << OE_DEPTH_BITS) - 1);
+ depth = (1 << OE_DEPTH_BITS) - 1;
+ }
+ if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {
+ warning(_("pack.deltaCacheLimit is too high, forcing %d"),
+ (1U << OE_Z_DELTA_BITS) - 1);
+ cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;
+ }
+ if (window < 0)
+ window = 0;
+
+ strvec_push(&rp, "pack-objects");
+ if (thin) {
+ use_internal_rev_list = 1;
+ strvec_push(&rp, shallow
+ ? "--objects-edge-aggressive"
+ : "--objects-edge");
+ } else
+ strvec_push(&rp, "--objects");
+
+ if (rev_list_all) {
+ use_internal_rev_list = 1;
+ strvec_push(&rp, "--all");
+ }
+ if (rev_list_reflog) {
+ use_internal_rev_list = 1;
+ strvec_push(&rp, "--reflog");
+ }
+ if (rev_list_index) {
+ use_internal_rev_list = 1;
+ strvec_push(&rp, "--indexed-objects");
+ }
+ if (rev_list_unpacked && !stdin_packs) {
+ use_internal_rev_list = 1;
+ strvec_push(&rp, "--unpacked");
+ }
+
+ if (exclude_promisor_objects) {
+ use_internal_rev_list = 1;
+ fetch_if_missing = 0;
+ strvec_push(&rp, "--exclude-promisor-objects");
+ }
+ if (unpack_unreachable || keep_unreachable || pack_loose_unreachable)
+ use_internal_rev_list = 1;
+
+ if (!reuse_object)
+ reuse_delta = 0;
+ if (pack_compression_level == -1)
+ pack_compression_level = Z_DEFAULT_COMPRESSION;
+ else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
+ die(_("bad pack compression level %d"), pack_compression_level);
+
+ if (!delta_search_threads) /* --threads=0 means autodetect */
+ delta_search_threads = online_cpus();
+
+ if (!HAVE_THREADS && delta_search_threads != 1)
+ warning(_("no threads support, ignoring --threads"));
+ if (!pack_to_stdout && !pack_size_limit && !cruft)
+ pack_size_limit = pack_size_limit_cfg;
+ if (pack_to_stdout && pack_size_limit)
+ die(_("--max-pack-size cannot be used to build a pack for transfer"));
+ if (pack_size_limit && pack_size_limit < 1024*1024) {
+ warning(_("minimum pack size limit is 1 MiB"));
+ pack_size_limit = 1024*1024;
+ }
+
+ if (!pack_to_stdout && thin)
+ die(_("--thin cannot be used to build an indexable pack"));
+
+ if (keep_unreachable && unpack_unreachable)
+ die(_("options '%s' and '%s' cannot be used together"), "--keep-unreachable", "--unpack-unreachable");
+ if (!rev_list_all || !rev_list_reflog || !rev_list_index)
+ unpack_unreachable_expiration = 0;
+
+ if (pfd.have_revs && pfd.revs.filter.choice) {
+ if (!pack_to_stdout)
+ die(_("cannot use --filter without --stdout"));
+ if (stdin_packs)
+ die(_("cannot use --filter with --stdin-packs"));
+ }
+
+ if (stdin_packs && use_internal_rev_list)
+ die(_("cannot use internal rev list with --stdin-packs"));
+
+ if (cruft) {
+ if (use_internal_rev_list)
+ die(_("cannot use internal rev list with --cruft"));
+ if (stdin_packs)
+ die(_("cannot use --stdin-packs with --cruft"));
+ if (pack_size_limit)
+ die(_("cannot use --max-pack-size with --cruft"));
+ }
+
+ /*
+ * "soft" reasons not to use bitmaps - for on-disk repack by default we want
+ *
+ * - to produce good pack (with bitmap index not-yet-packed objects are
+ * packed in suboptimal order).
+ *
+ * - to use more robust pack-generation codepath (avoiding possible
+ * bugs in bitmap code and possible bitmap index corruption).
+ */
+ if (!pack_to_stdout)
+ use_bitmap_index_default = 0;
+
+ if (use_bitmap_index < 0)
+ use_bitmap_index = use_bitmap_index_default;
+
+ /* "hard" reasons not to use bitmaps; these just won't work at all */
+ if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow(the_repository))
+ use_bitmap_index = 0;
+
+ if (pack_to_stdout || !rev_list_all)
+ write_bitmap_index = 0;
+
+ if (use_delta_islands)
+ strvec_push(&rp, "--topo-order");
+
+ if (progress && all_progress_implied)
+ progress = 2;
+
+ add_extra_kept_packs(&keep_pack_list);
+ if (ignore_packed_keep_on_disk) {
+ struct packed_git *p;
+ for (p = get_all_packs(the_repository); p; p = p->next)
+ if (p->pack_local && p->pack_keep)
+ break;
+ if (!p) /* no keep-able packs found */
+ ignore_packed_keep_on_disk = 0;
+ }
+ if (local) {
+ /*
+ * unlike ignore_packed_keep_on_disk above, we do not
+ * want to unset "local" based on looking at packs, as
+ * it also covers non-local objects
+ */
+ struct packed_git *p;
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ if (!p->pack_local) {
+ have_non_local_packs = 1;
+ break;
+ }
+ }
+ }
+
+ trace2_region_enter("pack-objects", "enumerate-objects",
+ the_repository);
+ prepare_packing_data(the_repository, &to_pack);
+
+ if (progress && !cruft)
+ progress_state = start_progress(_("Enumerating objects"), 0);
+ if (stdin_packs) {
+ /* avoids adding objects in excluded packs */
+ ignore_packed_keep_in_core = 1;
+ read_packs_list_from_stdin();
+ if (rev_list_unpacked)
+ add_unreachable_loose_objects();
+ } else if (cruft) {
+ read_cruft_objects();
+ } else if (!use_internal_rev_list) {
+ read_object_list_from_stdin();
+ } else if (pfd.have_revs) {
+ get_object_list(&pfd.revs, rp.nr, rp.v);
+ release_revisions(&pfd.revs);
+ } else {
+ struct rev_info revs;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ get_object_list(&revs, rp.nr, rp.v);
+ release_revisions(&revs);
+ }
+ cleanup_preferred_base();
+ if (include_tag && nr_result)
+ for_each_tag_ref(add_ref_tag, NULL);
+ stop_progress(&progress_state);
+ trace2_region_leave("pack-objects", "enumerate-objects",
+ the_repository);
+
+ if (non_empty && !nr_result)
+ goto cleanup;
+ if (nr_result) {
+ trace2_region_enter("pack-objects", "prepare-pack",
+ the_repository);
+ prepare_pack(window, depth);
+ trace2_region_leave("pack-objects", "prepare-pack",
+ the_repository);
+ }
+
+ trace2_region_enter("pack-objects", "write-pack-file", the_repository);
+ write_excluded_by_configs();
+ write_pack_file();
+ trace2_region_leave("pack-objects", "write-pack-file", the_repository);
+
+ if (progress)
+ fprintf_ln(stderr,
+ _("Total %"PRIu32" (delta %"PRIu32"),"
+ " reused %"PRIu32" (delta %"PRIu32"),"
+ " pack-reused %"PRIu32),
+ written, written_delta, reused, reused_delta,
+ reuse_packfile_objects);
+
+cleanup:
+ strvec_clear(&rp);
+
+ return 0;
+}
diff --git a/builtin/pack-redundant.c b/builtin/pack-redundant.c
new file mode 100644
index 0000000..ecd49ca
--- /dev/null
+++ b/builtin/pack-redundant.c
@@ -0,0 +1,672 @@
+/*
+*
+* Copyright 2005, Lukas Sandstrom <lukass@etek.chalmers.se>
+*
+* This file is licensed under the GPL v2.
+*
+*/
+
+#include "builtin.h"
+#include "repository.h"
+#include "packfile.h"
+#include "object-store.h"
+
+#define BLKSIZE 512
+
+static const char pack_redundant_usage[] =
+"git pack-redundant [--verbose] [--alt-odb] (--all | <pack-filename>...)";
+
+static int load_all_packs, verbose, alt_odb;
+
+struct llist_item {
+ struct llist_item *next;
+ struct object_id oid;
+};
+static struct llist {
+ struct llist_item *front;
+ struct llist_item *back;
+ size_t size;
+} *all_objects; /* all objects which must be present in local packfiles */
+
+static struct pack_list {
+ struct pack_list *next;
+ struct packed_git *pack;
+ struct llist *unique_objects;
+ struct llist *remaining_objects;
+ size_t all_objects_size;
+} *local_packs = NULL, *altodb_packs = NULL;
+
+static struct llist_item *free_nodes;
+
+static inline void llist_item_put(struct llist_item *item)
+{
+ item->next = free_nodes;
+ free_nodes = item;
+}
+
+static inline struct llist_item *llist_item_get(void)
+{
+ struct llist_item *new_item;
+ if ( free_nodes ) {
+ new_item = free_nodes;
+ free_nodes = free_nodes->next;
+ } else {
+ int i = 1;
+ ALLOC_ARRAY(new_item, BLKSIZE);
+ for (; i < BLKSIZE; i++)
+ llist_item_put(&new_item[i]);
+ }
+ return new_item;
+}
+
+static inline void llist_init(struct llist **list)
+{
+ *list = xmalloc(sizeof(struct llist));
+ (*list)->front = (*list)->back = NULL;
+ (*list)->size = 0;
+}
+
+static struct llist * llist_copy(struct llist *list)
+{
+ struct llist *ret;
+ struct llist_item *new_item, *old_item, *prev;
+
+ llist_init(&ret);
+
+ if ((ret->size = list->size) == 0)
+ return ret;
+
+ new_item = ret->front = llist_item_get();
+ new_item->oid = list->front->oid;
+
+ old_item = list->front->next;
+ while (old_item) {
+ prev = new_item;
+ new_item = llist_item_get();
+ prev->next = new_item;
+ new_item->oid = old_item->oid;
+ old_item = old_item->next;
+ }
+ new_item->next = NULL;
+ ret->back = new_item;
+
+ return ret;
+}
+
+static inline struct llist_item *llist_insert(struct llist *list,
+ struct llist_item *after,
+ const unsigned char *oid)
+{
+ struct llist_item *new_item = llist_item_get();
+ oidread(&new_item->oid, oid);
+ new_item->next = NULL;
+
+ if (after) {
+ new_item->next = after->next;
+ after->next = new_item;
+ if (after == list->back)
+ list->back = new_item;
+ } else {/* insert in front */
+ if (list->size == 0)
+ list->back = new_item;
+ else
+ new_item->next = list->front;
+ list->front = new_item;
+ }
+ list->size++;
+ return new_item;
+}
+
+static inline struct llist_item *llist_insert_back(struct llist *list,
+ const unsigned char *oid)
+{
+ return llist_insert(list, list->back, oid);
+}
+
+static inline struct llist_item *llist_insert_sorted_unique(struct llist *list,
+ const struct object_id *oid, struct llist_item *hint)
+{
+ struct llist_item *prev = NULL, *l;
+
+ l = (hint == NULL) ? list->front : hint;
+ while (l) {
+ int cmp = oidcmp(&l->oid, oid);
+ if (cmp > 0) { /* we insert before this entry */
+ return llist_insert(list, prev, oid->hash);
+ }
+ if (!cmp) { /* already exists */
+ return l;
+ }
+ prev = l;
+ l = l->next;
+ }
+ /* insert at the end */
+ return llist_insert_back(list, oid->hash);
+}
+
+/* returns a pointer to an item in front of sha1 */
+static inline struct llist_item * llist_sorted_remove(struct llist *list, const unsigned char *oid, struct llist_item *hint)
+{
+ struct llist_item *prev, *l;
+
+redo_from_start:
+ l = (hint == NULL) ? list->front : hint;
+ prev = NULL;
+ while (l) {
+ const int cmp = hashcmp(l->oid.hash, oid);
+ if (cmp > 0) /* not in list, since sorted */
+ return prev;
+ if (!cmp) { /* found */
+ if (!prev) {
+ if (hint != NULL && hint != list->front) {
+ /* we don't know the previous element */
+ hint = NULL;
+ goto redo_from_start;
+ }
+ list->front = l->next;
+ } else
+ prev->next = l->next;
+ if (l == list->back)
+ list->back = prev;
+ llist_item_put(l);
+ list->size--;
+ return prev;
+ }
+ prev = l;
+ l = l->next;
+ }
+ return prev;
+}
+
+/* computes A\B */
+static void llist_sorted_difference_inplace(struct llist *A,
+ struct llist *B)
+{
+ struct llist_item *hint, *b;
+
+ hint = NULL;
+ b = B->front;
+
+ while (b) {
+ hint = llist_sorted_remove(A, b->oid.hash, hint);
+ b = b->next;
+ }
+}
+
+static inline struct pack_list * pack_list_insert(struct pack_list **pl,
+ struct pack_list *entry)
+{
+ struct pack_list *p = xmalloc(sizeof(struct pack_list));
+ memcpy(p, entry, sizeof(struct pack_list));
+ p->next = *pl;
+ *pl = p;
+ return p;
+}
+
+static inline size_t pack_list_size(struct pack_list *pl)
+{
+ size_t ret = 0;
+ while (pl) {
+ ret++;
+ pl = pl->next;
+ }
+ return ret;
+}
+
+static struct pack_list * pack_list_difference(const struct pack_list *A,
+ const struct pack_list *B)
+{
+ struct pack_list *ret;
+ const struct pack_list *pl;
+
+ if (!A)
+ return NULL;
+
+ pl = B;
+ while (pl != NULL) {
+ if (A->pack == pl->pack)
+ return pack_list_difference(A->next, B);
+ pl = pl->next;
+ }
+ ret = xmalloc(sizeof(struct pack_list));
+ memcpy(ret, A, sizeof(struct pack_list));
+ ret->next = pack_list_difference(A->next, B);
+ return ret;
+}
+
+static void cmp_two_packs(struct pack_list *p1, struct pack_list *p2)
+{
+ size_t p1_off = 0, p2_off = 0, p1_step, p2_step;
+ const unsigned char *p1_base, *p2_base;
+ struct llist_item *p1_hint = NULL, *p2_hint = NULL;
+ const unsigned int hashsz = the_hash_algo->rawsz;
+
+ if (!p1->unique_objects)
+ p1->unique_objects = llist_copy(p1->remaining_objects);
+ if (!p2->unique_objects)
+ p2->unique_objects = llist_copy(p2->remaining_objects);
+
+ p1_base = p1->pack->index_data;
+ p2_base = p2->pack->index_data;
+ p1_base += 256 * 4 + ((p1->pack->index_version < 2) ? 4 : 8);
+ p2_base += 256 * 4 + ((p2->pack->index_version < 2) ? 4 : 8);
+ p1_step = hashsz + ((p1->pack->index_version < 2) ? 4 : 0);
+ p2_step = hashsz + ((p2->pack->index_version < 2) ? 4 : 0);
+
+ while (p1_off < p1->pack->num_objects * p1_step &&
+ p2_off < p2->pack->num_objects * p2_step)
+ {
+ const int cmp = hashcmp(p1_base + p1_off, p2_base + p2_off);
+ /* cmp ~ p1 - p2 */
+ if (cmp == 0) {
+ p1_hint = llist_sorted_remove(p1->unique_objects,
+ p1_base + p1_off,
+ p1_hint);
+ p2_hint = llist_sorted_remove(p2->unique_objects,
+ p1_base + p1_off,
+ p2_hint);
+ p1_off += p1_step;
+ p2_off += p2_step;
+ continue;
+ }
+ if (cmp < 0) { /* p1 has the object, p2 doesn't */
+ p1_off += p1_step;
+ } else { /* p2 has the object, p1 doesn't */
+ p2_off += p2_step;
+ }
+ }
+}
+
+static size_t sizeof_union(struct packed_git *p1, struct packed_git *p2)
+{
+ size_t ret = 0;
+ size_t p1_off = 0, p2_off = 0, p1_step, p2_step;
+ const unsigned char *p1_base, *p2_base;
+ const unsigned int hashsz = the_hash_algo->rawsz;
+
+ p1_base = p1->index_data;
+ p2_base = p2->index_data;
+ p1_base += 256 * 4 + ((p1->index_version < 2) ? 4 : 8);
+ p2_base += 256 * 4 + ((p2->index_version < 2) ? 4 : 8);
+ p1_step = hashsz + ((p1->index_version < 2) ? 4 : 0);
+ p2_step = hashsz + ((p2->index_version < 2) ? 4 : 0);
+
+ while (p1_off < p1->num_objects * p1_step &&
+ p2_off < p2->num_objects * p2_step)
+ {
+ int cmp = hashcmp(p1_base + p1_off, p2_base + p2_off);
+ /* cmp ~ p1 - p2 */
+ if (cmp == 0) {
+ ret++;
+ p1_off += p1_step;
+ p2_off += p2_step;
+ continue;
+ }
+ if (cmp < 0) { /* p1 has the object, p2 doesn't */
+ p1_off += p1_step;
+ } else { /* p2 has the object, p1 doesn't */
+ p2_off += p2_step;
+ }
+ }
+ return ret;
+}
+
+/* another O(n^2) function ... */
+static size_t get_pack_redundancy(struct pack_list *pl)
+{
+ struct pack_list *subset;
+ size_t ret = 0;
+
+ if (!pl)
+ return 0;
+
+ while ((subset = pl->next)) {
+ while (subset) {
+ ret += sizeof_union(pl->pack, subset->pack);
+ subset = subset->next;
+ }
+ pl = pl->next;
+ }
+ return ret;
+}
+
+static inline off_t pack_set_bytecount(struct pack_list *pl)
+{
+ off_t ret = 0;
+ while (pl) {
+ ret += pl->pack->pack_size;
+ ret += pl->pack->index_size;
+ pl = pl->next;
+ }
+ return ret;
+}
+
+static int cmp_remaining_objects(const void *a, const void *b)
+{
+ struct pack_list *pl_a = *((struct pack_list **)a);
+ struct pack_list *pl_b = *((struct pack_list **)b);
+
+ if (pl_a->remaining_objects->size == pl_b->remaining_objects->size) {
+ /* have the same remaining_objects, big pack first */
+ if (pl_a->all_objects_size == pl_b->all_objects_size)
+ return 0;
+ else if (pl_a->all_objects_size < pl_b->all_objects_size)
+ return 1;
+ else
+ return -1;
+ } else if (pl_a->remaining_objects->size < pl_b->remaining_objects->size) {
+ /* sort by remaining objects, more objects first */
+ return 1;
+ } else {
+ return -1;
+ }
+}
+
+/* Sort pack_list, greater size of remaining_objects first */
+static void sort_pack_list(struct pack_list **pl)
+{
+ struct pack_list **ary, *p;
+ int i;
+ size_t n = pack_list_size(*pl);
+
+ if (n < 2)
+ return;
+
+ /* prepare an array of packed_list for easier sorting */
+ CALLOC_ARRAY(ary, n);
+ for (n = 0, p = *pl; p; p = p->next)
+ ary[n++] = p;
+
+ QSORT(ary, n, cmp_remaining_objects);
+
+ /* link them back again */
+ for (i = 0; i < n - 1; i++)
+ ary[i]->next = ary[i + 1];
+ ary[n - 1]->next = NULL;
+ *pl = ary[0];
+
+ free(ary);
+}
+
+
+static void minimize(struct pack_list **min)
+{
+ struct pack_list *pl, *unique = NULL, *non_unique = NULL;
+ struct llist *missing, *unique_pack_objects;
+
+ pl = local_packs;
+ while (pl) {
+ if (pl->unique_objects->size)
+ pack_list_insert(&unique, pl);
+ else
+ pack_list_insert(&non_unique, pl);
+ pl = pl->next;
+ }
+ /* find out which objects are missing from the set of unique packs */
+ missing = llist_copy(all_objects);
+ pl = unique;
+ while (pl) {
+ llist_sorted_difference_inplace(missing, pl->remaining_objects);
+ pl = pl->next;
+ }
+
+ *min = unique;
+
+ /* return if there are no objects missing from the unique set */
+ if (missing->size == 0) {
+ free(missing);
+ return;
+ }
+
+ unique_pack_objects = llist_copy(all_objects);
+ llist_sorted_difference_inplace(unique_pack_objects, missing);
+
+ /* remove unique pack objects from the non_unique packs */
+ pl = non_unique;
+ while (pl) {
+ llist_sorted_difference_inplace(pl->remaining_objects, unique_pack_objects);
+ pl = pl->next;
+ }
+
+ while (non_unique) {
+ /* sort the non_unique packs, greater size of remaining_objects first */
+ sort_pack_list(&non_unique);
+ if (non_unique->remaining_objects->size == 0)
+ break;
+
+ pack_list_insert(min, non_unique);
+
+ for (pl = non_unique->next; pl && pl->remaining_objects->size > 0; pl = pl->next)
+ llist_sorted_difference_inplace(pl->remaining_objects, non_unique->remaining_objects);
+
+ non_unique = non_unique->next;
+ }
+}
+
+static void load_all_objects(void)
+{
+ struct pack_list *pl = local_packs;
+ struct llist_item *hint, *l;
+
+ llist_init(&all_objects);
+
+ while (pl) {
+ hint = NULL;
+ l = pl->remaining_objects->front;
+ while (l) {
+ hint = llist_insert_sorted_unique(all_objects,
+ &l->oid, hint);
+ l = l->next;
+ }
+ pl = pl->next;
+ }
+ /* remove objects present in remote packs */
+ pl = altodb_packs;
+ while (pl) {
+ llist_sorted_difference_inplace(all_objects, pl->remaining_objects);
+ pl = pl->next;
+ }
+}
+
+/* this scales like O(n^2) */
+static void cmp_local_packs(void)
+{
+ struct pack_list *subset, *pl = local_packs;
+
+ /* only one packfile */
+ if (!pl->next) {
+ llist_init(&pl->unique_objects);
+ return;
+ }
+
+ while ((subset = pl)) {
+ while ((subset = subset->next))
+ cmp_two_packs(pl, subset);
+ pl = pl->next;
+ }
+}
+
+static void scan_alt_odb_packs(void)
+{
+ struct pack_list *local, *alt;
+
+ alt = altodb_packs;
+ while (alt) {
+ local = local_packs;
+ while (local) {
+ llist_sorted_difference_inplace(local->remaining_objects,
+ alt->remaining_objects);
+ local = local->next;
+ }
+ alt = alt->next;
+ }
+}
+
+static struct pack_list * add_pack(struct packed_git *p)
+{
+ struct pack_list l;
+ size_t off = 0, step;
+ const unsigned char *base;
+
+ if (!p->pack_local && !(alt_odb || verbose))
+ return NULL;
+
+ l.pack = p;
+ llist_init(&l.remaining_objects);
+
+ if (open_pack_index(p))
+ return NULL;
+
+ base = p->index_data;
+ base += 256 * 4 + ((p->index_version < 2) ? 4 : 8);
+ step = the_hash_algo->rawsz + ((p->index_version < 2) ? 4 : 0);
+ while (off < p->num_objects * step) {
+ llist_insert_back(l.remaining_objects, base + off);
+ off += step;
+ }
+ l.all_objects_size = l.remaining_objects->size;
+ l.unique_objects = NULL;
+ if (p->pack_local)
+ return pack_list_insert(&local_packs, &l);
+ else
+ return pack_list_insert(&altodb_packs, &l);
+}
+
+static struct pack_list * add_pack_file(const char *filename)
+{
+ struct packed_git *p = get_all_packs(the_repository);
+
+ if (strlen(filename) < 40)
+ die("Bad pack filename: %s", filename);
+
+ while (p) {
+ if (strstr(p->pack_name, filename))
+ return add_pack(p);
+ p = p->next;
+ }
+ die("Filename %s not found in packed_git", filename);
+}
+
+static void load_all(void)
+{
+ struct packed_git *p = get_all_packs(the_repository);
+
+ while (p) {
+ add_pack(p);
+ p = p->next;
+ }
+}
+
+int cmd_pack_redundant(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ int i_still_use_this = 0;
+ struct pack_list *min = NULL, *red, *pl;
+ struct llist *ignore;
+ struct object_id *oid;
+ char buf[GIT_MAX_HEXSZ + 2]; /* hex hash + \n + \0 */
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(pack_redundant_usage);
+
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (!strcmp(arg, "--")) {
+ i++;
+ break;
+ }
+ if (!strcmp(arg, "--all")) {
+ load_all_packs = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--verbose")) {
+ verbose = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--alt-odb")) {
+ alt_odb = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--i-still-use-this")) {
+ i_still_use_this = 1;
+ continue;
+ }
+ if (*arg == '-')
+ usage(pack_redundant_usage);
+ else
+ break;
+ }
+
+ if (!i_still_use_this) {
+ fputs(_("'git pack-redundant' is nominated for removal.\n"
+ "If you still use this command, please add an extra\n"
+ "option, '--i-still-use-this', on the command line\n"
+ "and let us know you still use it by sending an e-mail\n"
+ "to <git@vger.kernel.org>. Thanks.\n"), stderr);
+ }
+
+ if (load_all_packs)
+ load_all();
+ else
+ while (*(argv + i) != NULL)
+ add_pack_file(*(argv + i++));
+
+ if (!local_packs)
+ die("Zero packs found!");
+
+ load_all_objects();
+
+ if (alt_odb)
+ scan_alt_odb_packs();
+
+ /* ignore objects given on stdin */
+ llist_init(&ignore);
+ if (!isatty(0)) {
+ while (fgets(buf, sizeof(buf), stdin)) {
+ oid = xmalloc(sizeof(*oid));
+ if (get_oid_hex(buf, oid))
+ die("Bad object ID on stdin: %s", buf);
+ llist_insert_sorted_unique(ignore, oid, NULL);
+ }
+ }
+ llist_sorted_difference_inplace(all_objects, ignore);
+ pl = local_packs;
+ while (pl) {
+ llist_sorted_difference_inplace(pl->remaining_objects, ignore);
+ pl = pl->next;
+ }
+
+ cmp_local_packs();
+
+ minimize(&min);
+
+ if (verbose) {
+ fprintf(stderr, "There are %lu packs available in alt-odbs.\n",
+ (unsigned long)pack_list_size(altodb_packs));
+ fprintf(stderr, "The smallest (bytewise) set of packs is:\n");
+ pl = min;
+ while (pl) {
+ fprintf(stderr, "\t%s\n", pl->pack->pack_name);
+ pl = pl->next;
+ }
+ fprintf(stderr, "containing %lu duplicate objects "
+ "with a total size of %lukb.\n",
+ (unsigned long)get_pack_redundancy(min),
+ (unsigned long)pack_set_bytecount(min)/1024);
+ fprintf(stderr, "A total of %lu unique objects were considered.\n",
+ (unsigned long)all_objects->size);
+ fprintf(stderr, "Redundant packs (with indexes):\n");
+ }
+ pl = red = pack_list_difference(local_packs, min);
+ while (pl) {
+ printf("%s\n%s\n",
+ sha1_pack_index_name(pl->pack->hash),
+ pl->pack->pack_name);
+ pl = pl->next;
+ }
+ if (verbose)
+ fprintf(stderr, "%luMB of redundant packs in total.\n",
+ (unsigned long)pack_set_bytecount(red)/(1024*1024));
+
+ return 0;
+}
diff --git a/builtin/pack-refs.c b/builtin/pack-refs.c
new file mode 100644
index 0000000..27c2ca0
--- /dev/null
+++ b/builtin/pack-refs.c
@@ -0,0 +1,24 @@
+#include "builtin.h"
+#include "config.h"
+#include "parse-options.h"
+#include "refs.h"
+#include "repository.h"
+
+static char const * const pack_refs_usage[] = {
+ N_("git pack-refs [--all] [--no-prune]"),
+ NULL
+};
+
+int cmd_pack_refs(int argc, const char **argv, const char *prefix)
+{
+ unsigned int flags = PACK_REFS_PRUNE;
+ struct option opts[] = {
+ OPT_BIT(0, "all", &flags, N_("pack everything"), PACK_REFS_ALL),
+ OPT_BIT(0, "prune", &flags, N_("prune loose refs (default)"), PACK_REFS_PRUNE),
+ OPT_END(),
+ };
+ git_config(git_default_config, NULL);
+ if (parse_options(argc, argv, prefix, opts, pack_refs_usage, 0))
+ usage_with_options(pack_refs_usage, opts);
+ return refs_pack_refs(get_main_ref_store(the_repository), flags);
+}
diff --git a/builtin/patch-id.c b/builtin/patch-id.c
new file mode 100644
index 0000000..f840fbf
--- /dev/null
+++ b/builtin/patch-id.c
@@ -0,0 +1,240 @@
+#include "cache.h"
+#include "builtin.h"
+#include "config.h"
+#include "diff.h"
+#include "parse-options.h"
+
+static void flush_current_id(int patchlen, struct object_id *id, struct object_id *result)
+{
+ if (patchlen)
+ printf("%s %s\n", oid_to_hex(result), oid_to_hex(id));
+}
+
+static int remove_space(char *line)
+{
+ char *src = line;
+ char *dst = line;
+ unsigned char c;
+
+ while ((c = *src++) != '\0') {
+ if (!isspace(c))
+ *dst++ = c;
+ }
+ return dst - line;
+}
+
+static int scan_hunk_header(const char *p, int *p_before, int *p_after)
+{
+ static const char digits[] = "0123456789";
+ const char *q, *r;
+ int n;
+
+ q = p + 4;
+ n = strspn(q, digits);
+ if (q[n] == ',') {
+ q += n + 1;
+ *p_before = atoi(q);
+ n = strspn(q, digits);
+ } else {
+ *p_before = 1;
+ }
+
+ if (n == 0 || q[n] != ' ' || q[n+1] != '+')
+ return 0;
+
+ r = q + n + 2;
+ n = strspn(r, digits);
+ if (r[n] == ',') {
+ r += n + 1;
+ *p_after = atoi(r);
+ n = strspn(r, digits);
+ } else {
+ *p_after = 1;
+ }
+ if (n == 0)
+ return 0;
+
+ return 1;
+}
+
+static int get_one_patchid(struct object_id *next_oid, struct object_id *result,
+ struct strbuf *line_buf, int stable, int verbatim)
+{
+ int patchlen = 0, found_next = 0;
+ int before = -1, after = -1;
+ int diff_is_binary = 0;
+ char pre_oid_str[GIT_MAX_HEXSZ + 1], post_oid_str[GIT_MAX_HEXSZ + 1];
+ git_hash_ctx ctx;
+
+ the_hash_algo->init_fn(&ctx);
+ oidclr(result);
+
+ while (strbuf_getwholeline(line_buf, stdin, '\n') != EOF) {
+ char *line = line_buf->buf;
+ const char *p = line;
+ int len;
+
+ /* Possibly skip over the prefix added by "log" or "format-patch" */
+ if (!skip_prefix(line, "commit ", &p) &&
+ !skip_prefix(line, "From ", &p) &&
+ starts_with(line, "\\ ") && 12 < strlen(line)) {
+ if (verbatim)
+ the_hash_algo->update_fn(&ctx, line, strlen(line));
+ continue;
+ }
+
+ if (!get_oid_hex(p, next_oid)) {
+ found_next = 1;
+ break;
+ }
+
+ /* Ignore commit comments */
+ if (!patchlen && !starts_with(line, "diff "))
+ continue;
+
+ /* Parsing diff header? */
+ if (before == -1) {
+ if (starts_with(line, "GIT binary patch") ||
+ starts_with(line, "Binary files")) {
+ diff_is_binary = 1;
+ before = 0;
+ the_hash_algo->update_fn(&ctx, pre_oid_str,
+ strlen(pre_oid_str));
+ the_hash_algo->update_fn(&ctx, post_oid_str,
+ strlen(post_oid_str));
+ if (stable)
+ flush_one_hunk(result, &ctx);
+ continue;
+ } else if (skip_prefix(line, "index ", &p)) {
+ char *oid1_end = strstr(line, "..");
+ char *oid2_end = NULL;
+ if (oid1_end)
+ oid2_end = strstr(oid1_end, " ");
+ if (!oid2_end)
+ oid2_end = line + strlen(line) - 1;
+ if (oid1_end != NULL && oid2_end != NULL) {
+ *oid1_end = *oid2_end = '\0';
+ strlcpy(pre_oid_str, p, GIT_MAX_HEXSZ + 1);
+ strlcpy(post_oid_str, oid1_end + 2, GIT_MAX_HEXSZ + 1);
+ }
+ continue;
+ } else if (starts_with(line, "--- "))
+ before = after = 1;
+ else if (!isalpha(line[0]))
+ break;
+ }
+
+ if (diff_is_binary) {
+ if (starts_with(line, "diff ")) {
+ diff_is_binary = 0;
+ before = -1;
+ }
+ continue;
+ }
+
+ /* Looking for a valid hunk header? */
+ if (before == 0 && after == 0) {
+ if (starts_with(line, "@@ -")) {
+ /* Parse next hunk, but ignore line numbers. */
+ scan_hunk_header(line, &before, &after);
+ continue;
+ }
+
+ /* Split at the end of the patch. */
+ if (!starts_with(line, "diff "))
+ break;
+
+ /* Else we're parsing another header. */
+ if (stable)
+ flush_one_hunk(result, &ctx);
+ before = after = -1;
+ }
+
+ /* If we get here, we're inside a hunk. */
+ if (line[0] == '-' || line[0] == ' ')
+ before--;
+ if (line[0] == '+' || line[0] == ' ')
+ after--;
+
+ /* Add line to hash algo (possibly removing whitespace) */
+ len = verbatim ? strlen(line) : remove_space(line);
+ patchlen += len;
+ the_hash_algo->update_fn(&ctx, line, len);
+ }
+
+ if (!found_next)
+ oidclr(next_oid);
+
+ flush_one_hunk(result, &ctx);
+
+ return patchlen;
+}
+
+static void generate_id_list(int stable, int verbatim)
+{
+ struct object_id oid, n, result;
+ int patchlen;
+ struct strbuf line_buf = STRBUF_INIT;
+
+ oidclr(&oid);
+ while (!feof(stdin)) {
+ patchlen = get_one_patchid(&n, &result, &line_buf, stable, verbatim);
+ flush_current_id(patchlen, &oid, &result);
+ oidcpy(&oid, &n);
+ }
+ strbuf_release(&line_buf);
+}
+
+static const char *const patch_id_usage[] = {
+ N_("git patch-id [--stable | --unstable | --verbatim]"), NULL
+};
+
+struct patch_id_opts {
+ int stable;
+ int verbatim;
+};
+
+static int git_patch_id_config(const char *var, const char *value, void *cb)
+{
+ struct patch_id_opts *opts = cb;
+
+ if (!strcmp(var, "patchid.stable")) {
+ opts->stable = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "patchid.verbatim")) {
+ opts->verbatim = git_config_bool(var, value);
+ return 0;
+ }
+
+ return git_default_config(var, value, cb);
+}
+
+int cmd_patch_id(int argc, const char **argv, const char *prefix)
+{
+ /* if nothing is set, default to unstable */
+ struct patch_id_opts config = {0, 0};
+ int opts = 0;
+ struct option builtin_patch_id_options[] = {
+ OPT_CMDMODE(0, "unstable", &opts,
+ N_("use the unstable patch-id algorithm"), 1),
+ OPT_CMDMODE(0, "stable", &opts,
+ N_("use the stable patch-id algorithm"), 2),
+ OPT_CMDMODE(0, "verbatim", &opts,
+ N_("don't strip whitespace from the patch"), 3),
+ OPT_END()
+ };
+
+ git_config(git_patch_id_config, &config);
+
+ /* verbatim implies stable */
+ if (config.verbatim)
+ config.stable = 1;
+
+ argc = parse_options(argc, argv, prefix, builtin_patch_id_options,
+ patch_id_usage, 0);
+
+ generate_id_list(opts ? opts > 1 : config.stable,
+ opts ? opts == 3 : config.verbatim);
+ return 0;
+}
diff --git a/builtin/prune-packed.c b/builtin/prune-packed.c
new file mode 100644
index 0000000..da3273a
--- /dev/null
+++ b/builtin/prune-packed.c
@@ -0,0 +1,31 @@
+#include "builtin.h"
+#include "parse-options.h"
+#include "prune-packed.h"
+
+static const char * const prune_packed_usage[] = {
+ "git prune-packed [-n | --dry-run] [-q | --quiet]",
+ NULL
+};
+
+int cmd_prune_packed(int argc, const char **argv, const char *prefix)
+{
+ int opts = isatty(2) ? PRUNE_PACKED_VERBOSE : 0;
+ const struct option prune_packed_options[] = {
+ OPT_BIT('n', "dry-run", &opts, N_("dry run"),
+ PRUNE_PACKED_DRY_RUN),
+ OPT_NEGBIT('q', "quiet", &opts, N_("be quiet"),
+ PRUNE_PACKED_VERBOSE),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, prune_packed_options,
+ prune_packed_usage, 0);
+
+ if (argc > 0)
+ usage_msg_opt(_("too many arguments"),
+ prune_packed_usage,
+ prune_packed_options);
+
+ prune_packed_objects(opts);
+ return 0;
+}
diff --git a/builtin/prune.c b/builtin/prune.c
new file mode 100644
index 0000000..2719220
--- /dev/null
+++ b/builtin/prune.c
@@ -0,0 +1,203 @@
+#include "cache.h"
+#include "commit.h"
+#include "diff.h"
+#include "revision.h"
+#include "builtin.h"
+#include "reachable.h"
+#include "parse-options.h"
+#include "progress.h"
+#include "prune-packed.h"
+#include "object-store.h"
+#include "shallow.h"
+
+static const char * const prune_usage[] = {
+ N_("git prune [-n] [-v] [--progress] [--expire <time>] [--] [<head>...]"),
+ NULL
+};
+static int show_only;
+static int verbose;
+static timestamp_t expire;
+static int show_progress = -1;
+
+static int prune_tmp_file(const char *fullpath)
+{
+ struct stat st;
+ if (lstat(fullpath, &st))
+ return error("Could not stat '%s'", fullpath);
+ if (st.st_mtime > expire)
+ return 0;
+ if (S_ISDIR(st.st_mode)) {
+ if (show_only || verbose)
+ printf("Removing stale temporary directory %s\n", fullpath);
+ if (!show_only) {
+ struct strbuf remove_dir_buf = STRBUF_INIT;
+
+ strbuf_addstr(&remove_dir_buf, fullpath);
+ remove_dir_recursively(&remove_dir_buf, 0);
+ strbuf_release(&remove_dir_buf);
+ }
+ } else {
+ if (show_only || verbose)
+ printf("Removing stale temporary file %s\n", fullpath);
+ if (!show_only)
+ unlink_or_warn(fullpath);
+ }
+ return 0;
+}
+
+static void perform_reachability_traversal(struct rev_info *revs)
+{
+ static int initialized;
+ struct progress *progress = NULL;
+
+ if (initialized)
+ return;
+
+ if (show_progress)
+ progress = start_delayed_progress(_("Checking connectivity"), 0);
+ mark_reachable_objects(revs, 1, expire, progress);
+ stop_progress(&progress);
+ initialized = 1;
+}
+
+static int is_object_reachable(const struct object_id *oid,
+ struct rev_info *revs)
+{
+ struct object *obj;
+
+ perform_reachability_traversal(revs);
+
+ obj = lookup_object(the_repository, oid);
+ return obj && (obj->flags & SEEN);
+}
+
+static int prune_object(const struct object_id *oid, const char *fullpath,
+ void *data)
+{
+ struct rev_info *revs = data;
+ struct stat st;
+
+ if (is_object_reachable(oid, revs))
+ return 0;
+
+ if (lstat(fullpath, &st)) {
+ /* report errors, but do not stop pruning */
+ error("Could not stat '%s'", fullpath);
+ return 0;
+ }
+ if (st.st_mtime > expire)
+ return 0;
+ if (show_only || verbose) {
+ enum object_type type = oid_object_info(the_repository, oid,
+ NULL);
+ printf("%s %s\n", oid_to_hex(oid),
+ (type > 0) ? type_name(type) : "unknown");
+ }
+ if (!show_only)
+ unlink_or_warn(fullpath);
+ return 0;
+}
+
+static int prune_cruft(const char *basename, const char *path, void *data)
+{
+ if (starts_with(basename, "tmp_obj_"))
+ prune_tmp_file(path);
+ else
+ fprintf(stderr, "bad sha1 file: %s\n", path);
+ return 0;
+}
+
+static int prune_subdir(unsigned int nr, const char *path, void *data)
+{
+ if (!show_only)
+ rmdir(path);
+ return 0;
+}
+
+/*
+ * Write errors (particularly out of space) can result in
+ * failed temporary packs (and more rarely indexes and other
+ * files beginning with "tmp_") accumulating in the object
+ * and the pack directories.
+ */
+static void remove_temporary_files(const char *path)
+{
+ DIR *dir;
+ struct dirent *de;
+
+ dir = opendir(path);
+ if (!dir) {
+ if (errno != ENOENT)
+ fprintf(stderr, "Unable to open directory %s: %s\n",
+ path, strerror(errno));
+ return;
+ }
+ while ((de = readdir(dir)) != NULL)
+ if (starts_with(de->d_name, "tmp_"))
+ prune_tmp_file(mkpath("%s/%s", path, de->d_name));
+ closedir(dir);
+}
+
+int cmd_prune(int argc, const char **argv, const char *prefix)
+{
+ struct rev_info revs;
+ int exclude_promisor_objects = 0;
+ const struct option options[] = {
+ OPT__DRY_RUN(&show_only, N_("do not remove, show only")),
+ OPT__VERBOSE(&verbose, N_("report pruned objects")),
+ OPT_BOOL(0, "progress", &show_progress, N_("show progress")),
+ OPT_EXPIRY_DATE(0, "expire", &expire,
+ N_("expire objects older than <time>")),
+ OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+ N_("limit traversal to objects outside promisor packfiles")),
+ OPT_END()
+ };
+ char *s;
+
+ expire = TIME_MAX;
+ save_commit_buffer = 0;
+ read_replace_refs = 0;
+ repo_init_revisions(the_repository, &revs, prefix);
+
+ argc = parse_options(argc, argv, prefix, options, prune_usage, 0);
+
+ if (repository_format_precious_objects)
+ die(_("cannot prune in a precious-objects repo"));
+
+ while (argc--) {
+ struct object_id oid;
+ const char *name = *argv++;
+
+ if (!get_oid(name, &oid)) {
+ struct object *object = parse_object_or_die(&oid,
+ name);
+ add_pending_object(&revs, object, "");
+ }
+ else
+ die("unrecognized argument: %s", name);
+ }
+
+ if (show_progress == -1)
+ show_progress = isatty(2);
+ if (exclude_promisor_objects) {
+ fetch_if_missing = 0;
+ revs.exclude_promisor_objects = 1;
+ }
+
+ for_each_loose_file_in_objdir(get_object_directory(), prune_object,
+ prune_cruft, prune_subdir, &revs);
+
+ prune_packed_objects(show_only ? PRUNE_PACKED_DRY_RUN : 0);
+ remove_temporary_files(get_object_directory());
+ s = mkpathdup("%s/pack", get_object_directory());
+ remove_temporary_files(s);
+ free(s);
+
+ if (is_repository_shallow(the_repository)) {
+ perform_reachability_traversal(&revs);
+ prune_shallow(show_only ? PRUNE_SHOW_ONLY : 0);
+ }
+
+ release_revisions(&revs);
+ return 0;
+}
diff --git a/builtin/pull.c b/builtin/pull.c
new file mode 100644
index 0000000..1ab4de0
--- /dev/null
+++ b/builtin/pull.c
@@ -0,0 +1,1159 @@
+/*
+ * Builtin "git pull"
+ *
+ * Based on git-pull.sh by Junio C Hamano
+ *
+ * Fetch one or more remote refs and merge it/them into the current HEAD.
+ */
+#define USE_THE_INDEX_VARIABLE
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "exec-cmd.h"
+#include "run-command.h"
+#include "oid-array.h"
+#include "remote.h"
+#include "dir.h"
+#include "rebase.h"
+#include "refs.h"
+#include "refspec.h"
+#include "revision.h"
+#include "submodule.h"
+#include "submodule-config.h"
+#include "tempfile.h"
+#include "lockfile.h"
+#include "wt-status.h"
+#include "commit-reach.h"
+#include "sequencer.h"
+#include "packfile.h"
+
+/**
+ * Parses the value of --rebase. If value is a false value, returns
+ * REBASE_FALSE. If value is a true value, returns REBASE_TRUE. If value is
+ * "merges", returns REBASE_MERGES. If value is a invalid value, dies with
+ * a fatal error if fatal is true, otherwise returns REBASE_INVALID.
+ */
+static enum rebase_type parse_config_rebase(const char *key, const char *value,
+ int fatal)
+{
+ enum rebase_type v = rebase_parse_value(value);
+ if (v != REBASE_INVALID)
+ return v;
+
+ if (fatal)
+ die(_("invalid value for '%s': '%s'"), key, value);
+ else
+ error(_("invalid value for '%s': '%s'"), key, value);
+
+ return REBASE_INVALID;
+}
+
+/**
+ * Callback for --rebase, which parses arg with parse_config_rebase().
+ */
+static int parse_opt_rebase(const struct option *opt, const char *arg, int unset)
+{
+ enum rebase_type *value = opt->value;
+
+ if (arg)
+ *value = parse_config_rebase("--rebase", arg, 0);
+ else
+ *value = unset ? REBASE_FALSE : REBASE_TRUE;
+ return *value == REBASE_INVALID ? -1 : 0;
+}
+
+static const char * const pull_usage[] = {
+ N_("git pull [<options>] [<repository> [<refspec>...]]"),
+ NULL
+};
+
+/* Shared options */
+static int opt_verbosity;
+static char *opt_progress;
+static int recurse_submodules = RECURSE_SUBMODULES_DEFAULT;
+static int recurse_submodules_cli = RECURSE_SUBMODULES_DEFAULT;
+
+/* Options passed to git-merge or git-rebase */
+static enum rebase_type opt_rebase = -1;
+static char *opt_diffstat;
+static char *opt_log;
+static char *opt_signoff;
+static char *opt_squash;
+static char *opt_commit;
+static char *opt_edit;
+static char *cleanup_arg;
+static char *opt_ff;
+static char *opt_verify_signatures;
+static char *opt_verify;
+static int opt_autostash = -1;
+static int config_autostash;
+static int check_trust_level = 1;
+static struct strvec opt_strategies = STRVEC_INIT;
+static struct strvec opt_strategy_opts = STRVEC_INIT;
+static char *opt_gpg_sign;
+static int opt_allow_unrelated_histories;
+
+/* Options passed to git-fetch */
+static char *opt_all;
+static char *opt_append;
+static char *opt_upload_pack;
+static int opt_force;
+static char *opt_tags;
+static char *opt_prune;
+static char *max_children;
+static int opt_dry_run;
+static char *opt_keep;
+static char *opt_depth;
+static char *opt_unshallow;
+static char *opt_update_shallow;
+static char *opt_refmap;
+static char *opt_ipv4;
+static char *opt_ipv6;
+static int opt_show_forced_updates = -1;
+static char *set_upstream;
+static struct strvec opt_fetch = STRVEC_INIT;
+
+static struct option pull_options[] = {
+ /* Shared options */
+ OPT__VERBOSITY(&opt_verbosity),
+ OPT_PASSTHRU(0, "progress", &opt_progress, NULL,
+ N_("force progress reporting"),
+ PARSE_OPT_NOARG),
+ OPT_CALLBACK_F(0, "recurse-submodules",
+ &recurse_submodules_cli, N_("on-demand"),
+ N_("control for recursive fetching of submodules"),
+ PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules),
+
+ /* Options passed to git-merge or git-rebase */
+ OPT_GROUP(N_("Options related to merging")),
+ OPT_CALLBACK_F('r', "rebase", &opt_rebase,
+ "(false|true|merges|interactive)",
+ N_("incorporate changes by rebasing rather than merging"),
+ PARSE_OPT_OPTARG, parse_opt_rebase),
+ OPT_PASSTHRU('n', NULL, &opt_diffstat, NULL,
+ N_("do not show a diffstat at the end of the merge"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG),
+ OPT_PASSTHRU(0, "stat", &opt_diffstat, NULL,
+ N_("show a diffstat at the end of the merge"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU(0, "summary", &opt_diffstat, NULL,
+ N_("(synonym to --stat)"),
+ PARSE_OPT_NOARG | PARSE_OPT_HIDDEN),
+ OPT_PASSTHRU(0, "log", &opt_log, N_("n"),
+ N_("add (at most <n>) entries from shortlog to merge commit message"),
+ PARSE_OPT_OPTARG),
+ OPT_PASSTHRU(0, "signoff", &opt_signoff, NULL,
+ N_("add a Signed-off-by trailer"),
+ PARSE_OPT_OPTARG),
+ OPT_PASSTHRU(0, "squash", &opt_squash, NULL,
+ N_("create a single commit instead of doing a merge"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU(0, "commit", &opt_commit, NULL,
+ N_("perform a commit if the merge succeeds (default)"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU(0, "edit", &opt_edit, NULL,
+ N_("edit message before committing"),
+ PARSE_OPT_NOARG),
+ OPT_CLEANUP(&cleanup_arg),
+ OPT_PASSTHRU(0, "ff", &opt_ff, NULL,
+ N_("allow fast-forward"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU(0, "ff-only", &opt_ff, NULL,
+ N_("abort if fast-forward is not possible"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG),
+ OPT_PASSTHRU(0, "verify", &opt_verify, NULL,
+ N_("control use of pre-merge-commit and commit-msg hooks"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU(0, "verify-signatures", &opt_verify_signatures, NULL,
+ N_("verify that the named commit has a valid GPG signature"),
+ PARSE_OPT_NOARG),
+ OPT_BOOL(0, "autostash", &opt_autostash,
+ N_("automatically stash/stash pop before and after")),
+ OPT_PASSTHRU_ARGV('s', "strategy", &opt_strategies, N_("strategy"),
+ N_("merge strategy to use"),
+ 0),
+ OPT_PASSTHRU_ARGV('X', "strategy-option", &opt_strategy_opts,
+ N_("option=value"),
+ N_("option for selected merge strategy"),
+ 0),
+ OPT_PASSTHRU('S', "gpg-sign", &opt_gpg_sign, N_("key-id"),
+ N_("GPG sign commit"),
+ PARSE_OPT_OPTARG),
+ OPT_SET_INT(0, "allow-unrelated-histories",
+ &opt_allow_unrelated_histories,
+ N_("allow merging unrelated histories"), 1),
+
+ /* Options passed to git-fetch */
+ OPT_GROUP(N_("Options related to fetching")),
+ OPT_PASSTHRU(0, "all", &opt_all, NULL,
+ N_("fetch from all remotes"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU('a', "append", &opt_append, NULL,
+ N_("append to .git/FETCH_HEAD instead of overwriting"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU(0, "upload-pack", &opt_upload_pack, N_("path"),
+ N_("path to upload pack on remote end"),
+ 0),
+ OPT__FORCE(&opt_force, N_("force overwrite of local branch"), 0),
+ OPT_PASSTHRU('t', "tags", &opt_tags, NULL,
+ N_("fetch all tags and associated objects"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU('p', "prune", &opt_prune, NULL,
+ N_("prune remote-tracking branches no longer on remote"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU('j', "jobs", &max_children, N_("n"),
+ N_("number of submodules pulled in parallel"),
+ PARSE_OPT_OPTARG),
+ OPT_BOOL(0, "dry-run", &opt_dry_run,
+ N_("dry run")),
+ OPT_PASSTHRU('k', "keep", &opt_keep, NULL,
+ N_("keep downloaded pack"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU(0, "depth", &opt_depth, N_("depth"),
+ N_("deepen history of shallow clone"),
+ 0),
+ OPT_PASSTHRU_ARGV(0, "shallow-since", &opt_fetch, N_("time"),
+ N_("deepen history of shallow repository based on time"),
+ 0),
+ OPT_PASSTHRU_ARGV(0, "shallow-exclude", &opt_fetch, N_("revision"),
+ N_("deepen history of shallow clone, excluding rev"),
+ 0),
+ OPT_PASSTHRU_ARGV(0, "deepen", &opt_fetch, N_("n"),
+ N_("deepen history of shallow clone"),
+ 0),
+ OPT_PASSTHRU(0, "unshallow", &opt_unshallow, NULL,
+ N_("convert to a complete repository"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG),
+ OPT_PASSTHRU(0, "update-shallow", &opt_update_shallow, NULL,
+ N_("accept refs that update .git/shallow"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU(0, "refmap", &opt_refmap, N_("refmap"),
+ N_("specify fetch refmap"),
+ PARSE_OPT_NONEG),
+ OPT_PASSTHRU_ARGV('o', "server-option", &opt_fetch,
+ N_("server-specific"),
+ N_("option to transmit"),
+ 0),
+ OPT_PASSTHRU('4', "ipv4", &opt_ipv4, NULL,
+ N_("use IPv4 addresses only"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU('6', "ipv6", &opt_ipv6, NULL,
+ N_("use IPv6 addresses only"),
+ PARSE_OPT_NOARG),
+ OPT_PASSTHRU_ARGV(0, "negotiation-tip", &opt_fetch, N_("revision"),
+ N_("report that we have only objects reachable from this object"),
+ 0),
+ OPT_BOOL(0, "show-forced-updates", &opt_show_forced_updates,
+ N_("check for forced-updates on all updated branches")),
+ OPT_PASSTHRU(0, "set-upstream", &set_upstream, NULL,
+ N_("set upstream for git pull/fetch"),
+ PARSE_OPT_NOARG),
+
+ OPT_END()
+};
+
+/**
+ * Pushes "-q" or "-v" switches into arr to match the opt_verbosity level.
+ */
+static void argv_push_verbosity(struct strvec *arr)
+{
+ int verbosity;
+
+ for (verbosity = opt_verbosity; verbosity > 0; verbosity--)
+ strvec_push(arr, "-v");
+
+ for (verbosity = opt_verbosity; verbosity < 0; verbosity++)
+ strvec_push(arr, "-q");
+}
+
+/**
+ * Pushes "-f" switches into arr to match the opt_force level.
+ */
+static void argv_push_force(struct strvec *arr)
+{
+ int force = opt_force;
+ while (force-- > 0)
+ strvec_push(arr, "-f");
+}
+
+/**
+ * Sets the GIT_REFLOG_ACTION environment variable to the concatenation of argv
+ */
+static void set_reflog_message(int argc, const char **argv)
+{
+ int i;
+ struct strbuf msg = STRBUF_INIT;
+
+ for (i = 0; i < argc; i++) {
+ if (i)
+ strbuf_addch(&msg, ' ');
+ strbuf_addstr(&msg, argv[i]);
+ }
+
+ setenv("GIT_REFLOG_ACTION", msg.buf, 0);
+
+ strbuf_release(&msg);
+}
+
+/**
+ * If pull.ff is unset, returns NULL. If pull.ff is "true", returns "--ff". If
+ * pull.ff is "false", returns "--no-ff". If pull.ff is "only", returns
+ * "--ff-only". Otherwise, if pull.ff is set to an invalid value, die with an
+ * error.
+ */
+static const char *config_get_ff(void)
+{
+ const char *value;
+
+ if (git_config_get_value("pull.ff", &value))
+ return NULL;
+
+ switch (git_parse_maybe_bool(value)) {
+ case 0:
+ return "--no-ff";
+ case 1:
+ return "--ff";
+ }
+
+ if (!strcmp(value, "only"))
+ return "--ff-only";
+
+ die(_("invalid value for '%s': '%s'"), "pull.ff", value);
+}
+
+/**
+ * Returns the default configured value for --rebase. It first looks for the
+ * value of "branch.$curr_branch.rebase", where $curr_branch is the current
+ * branch, and if HEAD is detached or the configuration key does not exist,
+ * looks for the value of "pull.rebase". If both configuration keys do not
+ * exist, returns REBASE_FALSE.
+ */
+static enum rebase_type config_get_rebase(int *rebase_unspecified)
+{
+ struct branch *curr_branch = branch_get("HEAD");
+ const char *value;
+
+ if (curr_branch) {
+ char *key = xstrfmt("branch.%s.rebase", curr_branch->name);
+
+ if (!git_config_get_value(key, &value)) {
+ enum rebase_type ret = parse_config_rebase(key, value, 1);
+ free(key);
+ return ret;
+ }
+
+ free(key);
+ }
+
+ if (!git_config_get_value("pull.rebase", &value))
+ return parse_config_rebase("pull.rebase", value, 1);
+
+ *rebase_unspecified = 1;
+
+ return REBASE_FALSE;
+}
+
+/**
+ * Read config variables.
+ */
+static int git_pull_config(const char *var, const char *value, void *cb)
+{
+ int status;
+
+ if (!strcmp(var, "rebase.autostash")) {
+ config_autostash = git_config_bool(var, value);
+ return 0;
+ } else if (!strcmp(var, "submodule.recurse")) {
+ recurse_submodules = git_config_bool(var, value) ?
+ RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF;
+ return 0;
+ } else if (!strcmp(var, "gpg.mintrustlevel")) {
+ check_trust_level = 0;
+ }
+
+ status = git_gpg_config(var, value, cb);
+ if (status)
+ return status;
+
+ return git_default_config(var, value, cb);
+}
+
+/**
+ * Appends merge candidates from FETCH_HEAD that are not marked not-for-merge
+ * into merge_heads.
+ */
+static void get_merge_heads(struct oid_array *merge_heads)
+{
+ const char *filename = git_path_fetch_head(the_repository);
+ FILE *fp;
+ struct strbuf sb = STRBUF_INIT;
+ struct object_id oid;
+
+ fp = xfopen(filename, "r");
+ while (strbuf_getline_lf(&sb, fp) != EOF) {
+ const char *p;
+ if (parse_oid_hex(sb.buf, &oid, &p))
+ continue; /* invalid line: does not start with object ID */
+ if (starts_with(p, "\tnot-for-merge\t"))
+ continue; /* ref is not-for-merge */
+ oid_array_append(merge_heads, &oid);
+ }
+ fclose(fp);
+ strbuf_release(&sb);
+}
+
+/**
+ * Used by die_no_merge_candidates() as a for_each_remote() callback to
+ * retrieve the name of the remote if the repository only has one remote.
+ */
+static int get_only_remote(struct remote *remote, void *cb_data)
+{
+ const char **remote_name = cb_data;
+
+ if (*remote_name)
+ return -1;
+
+ *remote_name = remote->name;
+ return 0;
+}
+
+/**
+ * Dies with the appropriate reason for why there are no merge candidates:
+ *
+ * 1. We fetched from a specific remote, and a refspec was given, but it ended
+ * up not fetching anything. This is usually because the user provided a
+ * wildcard refspec which had no matches on the remote end.
+ *
+ * 2. We fetched from a non-default remote, but didn't specify a branch to
+ * merge. We can't use the configured one because it applies to the default
+ * remote, thus the user must specify the branches to merge.
+ *
+ * 3. We fetched from the branch's or repo's default remote, but:
+ *
+ * a. We are not on a branch, so there will never be a configured branch to
+ * merge with.
+ *
+ * b. We are on a branch, but there is no configured branch to merge with.
+ *
+ * 4. We fetched from the branch's or repo's default remote, but the configured
+ * branch to merge didn't get fetched. (Either it doesn't exist, or wasn't
+ * part of the configured fetch refspec.)
+ */
+static void NORETURN die_no_merge_candidates(const char *repo, const char **refspecs)
+{
+ struct branch *curr_branch = branch_get("HEAD");
+ const char *remote = curr_branch ? curr_branch->remote_name : NULL;
+
+ if (*refspecs) {
+ if (opt_rebase)
+ fprintf_ln(stderr, _("There is no candidate for rebasing against among the refs that you just fetched."));
+ else
+ fprintf_ln(stderr, _("There are no candidates for merging among the refs that you just fetched."));
+ fprintf_ln(stderr, _("Generally this means that you provided a wildcard refspec which had no\n"
+ "matches on the remote end."));
+ } else if (repo && curr_branch && (!remote || strcmp(repo, remote))) {
+ fprintf_ln(stderr, _("You asked to pull from the remote '%s', but did not specify\n"
+ "a branch. Because this is not the default configured remote\n"
+ "for your current branch, you must specify a branch on the command line."),
+ repo);
+ } else if (!curr_branch) {
+ fprintf_ln(stderr, _("You are not currently on a branch."));
+ if (opt_rebase)
+ fprintf_ln(stderr, _("Please specify which branch you want to rebase against."));
+ else
+ fprintf_ln(stderr, _("Please specify which branch you want to merge with."));
+ fprintf_ln(stderr, _("See git-pull(1) for details."));
+ fprintf(stderr, "\n");
+ fprintf_ln(stderr, " git pull %s %s", _("<remote>"), _("<branch>"));
+ fprintf(stderr, "\n");
+ } else if (!curr_branch->merge_nr) {
+ const char *remote_name = NULL;
+
+ if (for_each_remote(get_only_remote, &remote_name) || !remote_name)
+ remote_name = _("<remote>");
+
+ fprintf_ln(stderr, _("There is no tracking information for the current branch."));
+ if (opt_rebase)
+ fprintf_ln(stderr, _("Please specify which branch you want to rebase against."));
+ else
+ fprintf_ln(stderr, _("Please specify which branch you want to merge with."));
+ fprintf_ln(stderr, _("See git-pull(1) for details."));
+ fprintf(stderr, "\n");
+ fprintf_ln(stderr, " git pull %s %s", _("<remote>"), _("<branch>"));
+ fprintf(stderr, "\n");
+ fprintf_ln(stderr, _("If you wish to set tracking information for this branch you can do so with:"));
+ fprintf(stderr, "\n");
+ fprintf_ln(stderr, " git branch --set-upstream-to=%s/%s %s\n",
+ remote_name, _("<branch>"), curr_branch->name);
+ } else
+ fprintf_ln(stderr, _("Your configuration specifies to merge with the ref '%s'\n"
+ "from the remote, but no such ref was fetched."),
+ *curr_branch->merge_name);
+ exit(1);
+}
+
+/**
+ * Parses argv into [<repo> [<refspecs>...]], returning their values in `repo`
+ * as a string and `refspecs` as a null-terminated array of strings. If `repo`
+ * is not provided in argv, it is set to NULL.
+ */
+static void parse_repo_refspecs(int argc, const char **argv, const char **repo,
+ const char ***refspecs)
+{
+ if (argc > 0) {
+ *repo = *argv++;
+ argc--;
+ } else
+ *repo = NULL;
+ *refspecs = argv;
+}
+
+/**
+ * Runs git-fetch, returning its exit status. `repo` and `refspecs` are the
+ * repository and refspecs to fetch, or NULL if they are not provided.
+ */
+static int run_fetch(const char *repo, const char **refspecs)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&cmd.args, "fetch", "--update-head-ok", NULL);
+
+ /* Shared options */
+ argv_push_verbosity(&cmd.args);
+ if (opt_progress)
+ strvec_push(&cmd.args, opt_progress);
+
+ /* Options passed to git-fetch */
+ if (opt_all)
+ strvec_push(&cmd.args, opt_all);
+ if (opt_append)
+ strvec_push(&cmd.args, opt_append);
+ if (opt_upload_pack)
+ strvec_push(&cmd.args, opt_upload_pack);
+ argv_push_force(&cmd.args);
+ if (opt_tags)
+ strvec_push(&cmd.args, opt_tags);
+ if (opt_prune)
+ strvec_push(&cmd.args, opt_prune);
+ if (recurse_submodules_cli != RECURSE_SUBMODULES_DEFAULT)
+ switch (recurse_submodules_cli) {
+ case RECURSE_SUBMODULES_ON:
+ strvec_push(&cmd.args, "--recurse-submodules=on");
+ break;
+ case RECURSE_SUBMODULES_OFF:
+ strvec_push(&cmd.args, "--recurse-submodules=no");
+ break;
+ case RECURSE_SUBMODULES_ON_DEMAND:
+ strvec_push(&cmd.args, "--recurse-submodules=on-demand");
+ break;
+ default:
+ BUG("submodule recursion option not understood");
+ }
+ if (max_children)
+ strvec_push(&cmd.args, max_children);
+ if (opt_dry_run)
+ strvec_push(&cmd.args, "--dry-run");
+ if (opt_keep)
+ strvec_push(&cmd.args, opt_keep);
+ if (opt_depth)
+ strvec_push(&cmd.args, opt_depth);
+ if (opt_unshallow)
+ strvec_push(&cmd.args, opt_unshallow);
+ if (opt_update_shallow)
+ strvec_push(&cmd.args, opt_update_shallow);
+ if (opt_refmap)
+ strvec_push(&cmd.args, opt_refmap);
+ if (opt_ipv4)
+ strvec_push(&cmd.args, opt_ipv4);
+ if (opt_ipv6)
+ strvec_push(&cmd.args, opt_ipv6);
+ if (opt_show_forced_updates > 0)
+ strvec_push(&cmd.args, "--show-forced-updates");
+ else if (opt_show_forced_updates == 0)
+ strvec_push(&cmd.args, "--no-show-forced-updates");
+ if (set_upstream)
+ strvec_push(&cmd.args, set_upstream);
+ strvec_pushv(&cmd.args, opt_fetch.v);
+
+ if (repo) {
+ strvec_push(&cmd.args, repo);
+ strvec_pushv(&cmd.args, refspecs);
+ } else if (*refspecs)
+ BUG("refspecs without repo?");
+ cmd.git_cmd = 1;
+ cmd.close_object_store = 1;
+ return run_command(&cmd);
+}
+
+/**
+ * "Pulls into void" by branching off merge_head.
+ */
+static int pull_into_void(const struct object_id *merge_head,
+ const struct object_id *curr_head)
+{
+ if (opt_verify_signatures) {
+ struct commit *commit;
+
+ commit = lookup_commit(the_repository, merge_head);
+ if (!commit)
+ die(_("unable to access commit %s"),
+ oid_to_hex(merge_head));
+
+ verify_merge_signature(commit, opt_verbosity,
+ check_trust_level);
+ }
+
+ /*
+ * Two-way merge: we treat the index as based on an empty tree,
+ * and try to fast-forward to HEAD. This ensures we will not lose
+ * index/worktree changes that the user already made on the unborn
+ * branch.
+ */
+ if (checkout_fast_forward(the_repository,
+ the_hash_algo->empty_tree,
+ merge_head, 0))
+ return 1;
+
+ if (update_ref("initial pull", "HEAD", merge_head, curr_head, 0, UPDATE_REFS_DIE_ON_ERR))
+ return 1;
+
+ return 0;
+}
+
+static int rebase_submodules(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ cp.no_stdin = 1;
+ strvec_pushl(&cp.args, "submodule", "update",
+ "--recursive", "--rebase", NULL);
+ argv_push_verbosity(&cp.args);
+
+ return run_command(&cp);
+}
+
+static int update_submodules(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ cp.no_stdin = 1;
+ strvec_pushl(&cp.args, "submodule", "update",
+ "--recursive", "--checkout", NULL);
+ argv_push_verbosity(&cp.args);
+
+ return run_command(&cp);
+}
+
+/**
+ * Runs git-merge, returning its exit status.
+ */
+static int run_merge(void)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&cmd.args, "merge", NULL);
+
+ /* Shared options */
+ argv_push_verbosity(&cmd.args);
+ if (opt_progress)
+ strvec_push(&cmd.args, opt_progress);
+
+ /* Options passed to git-merge */
+ if (opt_diffstat)
+ strvec_push(&cmd.args, opt_diffstat);
+ if (opt_log)
+ strvec_push(&cmd.args, opt_log);
+ if (opt_signoff)
+ strvec_push(&cmd.args, opt_signoff);
+ if (opt_squash)
+ strvec_push(&cmd.args, opt_squash);
+ if (opt_commit)
+ strvec_push(&cmd.args, opt_commit);
+ if (opt_edit)
+ strvec_push(&cmd.args, opt_edit);
+ if (cleanup_arg)
+ strvec_pushf(&cmd.args, "--cleanup=%s", cleanup_arg);
+ if (opt_ff)
+ strvec_push(&cmd.args, opt_ff);
+ if (opt_verify)
+ strvec_push(&cmd.args, opt_verify);
+ if (opt_verify_signatures)
+ strvec_push(&cmd.args, opt_verify_signatures);
+ strvec_pushv(&cmd.args, opt_strategies.v);
+ strvec_pushv(&cmd.args, opt_strategy_opts.v);
+ if (opt_gpg_sign)
+ strvec_push(&cmd.args, opt_gpg_sign);
+ if (opt_autostash == 0)
+ strvec_push(&cmd.args, "--no-autostash");
+ else if (opt_autostash == 1)
+ strvec_push(&cmd.args, "--autostash");
+ if (opt_allow_unrelated_histories > 0)
+ strvec_push(&cmd.args, "--allow-unrelated-histories");
+
+ strvec_push(&cmd.args, "FETCH_HEAD");
+ cmd.git_cmd = 1;
+ return run_command(&cmd);
+}
+
+/**
+ * Returns remote's upstream branch for the current branch. If remote is NULL,
+ * the current branch's configured default remote is used. Returns NULL if
+ * `remote` does not name a valid remote, HEAD does not point to a branch,
+ * remote is not the branch's configured remote or the branch does not have any
+ * configured upstream branch.
+ */
+static const char *get_upstream_branch(const char *remote)
+{
+ struct remote *rm;
+ struct branch *curr_branch;
+ const char *curr_branch_remote;
+
+ rm = remote_get(remote);
+ if (!rm)
+ return NULL;
+
+ curr_branch = branch_get("HEAD");
+ if (!curr_branch)
+ return NULL;
+
+ curr_branch_remote = remote_for_branch(curr_branch, NULL);
+ assert(curr_branch_remote);
+
+ if (strcmp(curr_branch_remote, rm->name))
+ return NULL;
+
+ return branch_get_upstream(curr_branch, NULL);
+}
+
+/**
+ * Derives the remote-tracking branch from the remote and refspec.
+ *
+ * FIXME: The current implementation assumes the default mapping of
+ * refs/heads/<branch_name> to refs/remotes/<remote_name>/<branch_name>.
+ */
+static const char *get_tracking_branch(const char *remote, const char *refspec)
+{
+ struct refspec_item spec;
+ const char *spec_src;
+ const char *merge_branch;
+
+ refspec_item_init_or_die(&spec, refspec, REFSPEC_FETCH);
+ spec_src = spec.src;
+ if (!*spec_src || !strcmp(spec_src, "HEAD"))
+ spec_src = "HEAD";
+ else if (skip_prefix(spec_src, "heads/", &spec_src))
+ ;
+ else if (skip_prefix(spec_src, "refs/heads/", &spec_src))
+ ;
+ else if (starts_with(spec_src, "refs/") ||
+ starts_with(spec_src, "tags/") ||
+ starts_with(spec_src, "remotes/"))
+ spec_src = "";
+
+ if (*spec_src) {
+ if (!strcmp(remote, "."))
+ merge_branch = mkpath("refs/heads/%s", spec_src);
+ else
+ merge_branch = mkpath("refs/remotes/%s/%s", remote, spec_src);
+ } else
+ merge_branch = NULL;
+
+ refspec_item_clear(&spec);
+ return merge_branch;
+}
+
+/**
+ * Given the repo and refspecs, sets fork_point to the point at which the
+ * current branch forked from its remote-tracking branch. Returns 0 on success,
+ * -1 on failure.
+ */
+static int get_rebase_fork_point(struct object_id *fork_point, const char *repo,
+ const char *refspec)
+{
+ int ret;
+ struct branch *curr_branch;
+ const char *remote_branch;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct strbuf sb = STRBUF_INIT;
+
+ curr_branch = branch_get("HEAD");
+ if (!curr_branch)
+ return -1;
+
+ if (refspec)
+ remote_branch = get_tracking_branch(repo, refspec);
+ else
+ remote_branch = get_upstream_branch(repo);
+
+ if (!remote_branch)
+ return -1;
+
+ strvec_pushl(&cp.args, "merge-base", "--fork-point",
+ remote_branch, curr_branch->name, NULL);
+ cp.no_stdin = 1;
+ cp.no_stderr = 1;
+ cp.git_cmd = 1;
+
+ ret = capture_command(&cp, &sb, GIT_MAX_HEXSZ);
+ if (ret)
+ goto cleanup;
+
+ ret = get_oid_hex(sb.buf, fork_point);
+ if (ret)
+ goto cleanup;
+
+cleanup:
+ strbuf_release(&sb);
+ return ret ? -1 : 0;
+}
+
+/**
+ * Sets merge_base to the octopus merge base of curr_head, merge_head and
+ * fork_point. Returns 0 if a merge base is found, 1 otherwise.
+ */
+static int get_octopus_merge_base(struct object_id *merge_base,
+ const struct object_id *curr_head,
+ const struct object_id *merge_head,
+ const struct object_id *fork_point)
+{
+ struct commit_list *revs = NULL, *result;
+
+ commit_list_insert(lookup_commit_reference(the_repository, curr_head),
+ &revs);
+ commit_list_insert(lookup_commit_reference(the_repository, merge_head),
+ &revs);
+ if (!is_null_oid(fork_point))
+ commit_list_insert(lookup_commit_reference(the_repository, fork_point),
+ &revs);
+
+ result = get_octopus_merge_bases(revs);
+ free_commit_list(revs);
+ reduce_heads_replace(&result);
+
+ if (!result)
+ return 1;
+
+ oidcpy(merge_base, &result->item->object.oid);
+ free_commit_list(result);
+ return 0;
+}
+
+/**
+ * Given the current HEAD oid, the merge head returned from git-fetch and the
+ * fork point calculated by get_rebase_fork_point(), compute the <newbase> and
+ * <upstream> arguments to use for the upcoming git-rebase invocation.
+ */
+static int get_rebase_newbase_and_upstream(struct object_id *newbase,
+ struct object_id *upstream,
+ const struct object_id *curr_head,
+ const struct object_id *merge_head,
+ const struct object_id *fork_point)
+{
+ struct object_id oct_merge_base;
+
+ if (!get_octopus_merge_base(&oct_merge_base, curr_head, merge_head, fork_point))
+ if (!is_null_oid(fork_point) && oideq(&oct_merge_base, fork_point))
+ fork_point = NULL;
+
+ if (fork_point && !is_null_oid(fork_point))
+ oidcpy(upstream, fork_point);
+ else
+ oidcpy(upstream, merge_head);
+
+ oidcpy(newbase, merge_head);
+
+ return 0;
+}
+
+/**
+ * Given the <newbase> and <upstream> calculated by
+ * get_rebase_newbase_and_upstream(), runs git-rebase with the
+ * appropriate arguments and returns its exit status.
+ */
+static int run_rebase(const struct object_id *newbase,
+ const struct object_id *upstream)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_push(&cmd.args, "rebase");
+
+ /* Shared options */
+ argv_push_verbosity(&cmd.args);
+
+ /* Options passed to git-rebase */
+ if (opt_rebase == REBASE_MERGES)
+ strvec_push(&cmd.args, "--rebase-merges");
+ else if (opt_rebase == REBASE_INTERACTIVE)
+ strvec_push(&cmd.args, "--interactive");
+ if (opt_diffstat)
+ strvec_push(&cmd.args, opt_diffstat);
+ strvec_pushv(&cmd.args, opt_strategies.v);
+ strvec_pushv(&cmd.args, opt_strategy_opts.v);
+ if (opt_gpg_sign)
+ strvec_push(&cmd.args, opt_gpg_sign);
+ if (opt_signoff)
+ strvec_push(&cmd.args, opt_signoff);
+ if (opt_autostash == 0)
+ strvec_push(&cmd.args, "--no-autostash");
+ else if (opt_autostash == 1)
+ strvec_push(&cmd.args, "--autostash");
+ if (opt_verify_signatures &&
+ !strcmp(opt_verify_signatures, "--verify-signatures"))
+ warning(_("ignoring --verify-signatures for rebase"));
+
+ strvec_push(&cmd.args, "--onto");
+ strvec_push(&cmd.args, oid_to_hex(newbase));
+
+ strvec_push(&cmd.args, oid_to_hex(upstream));
+
+ cmd.git_cmd = 1;
+ return run_command(&cmd);
+}
+
+static int get_can_ff(struct object_id *orig_head,
+ struct oid_array *merge_heads)
+{
+ int ret;
+ struct commit_list *list = NULL;
+ struct commit *merge_head, *head;
+ struct object_id *orig_merge_head;
+
+ if (merge_heads->nr > 1)
+ return 0;
+
+ orig_merge_head = &merge_heads->oid[0];
+ head = lookup_commit_reference(the_repository, orig_head);
+ commit_list_insert(head, &list);
+ merge_head = lookup_commit_reference(the_repository, orig_merge_head);
+ ret = repo_is_descendant_of(the_repository, merge_head, list);
+ free_commit_list(list);
+ return ret;
+}
+
+/*
+ * Is orig_head a descendant of _all_ merge_heads?
+ * Unfortunately is_descendant_of() cannot be used as it asks
+ * if orig_head is a descendant of at least one of them.
+ */
+static int already_up_to_date(struct object_id *orig_head,
+ struct oid_array *merge_heads)
+{
+ int i;
+ struct commit *ours;
+
+ ours = lookup_commit_reference(the_repository, orig_head);
+ for (i = 0; i < merge_heads->nr; i++) {
+ struct commit_list *list = NULL;
+ struct commit *theirs;
+ int ok;
+
+ theirs = lookup_commit_reference(the_repository, &merge_heads->oid[i]);
+ commit_list_insert(theirs, &list);
+ ok = repo_is_descendant_of(the_repository, ours, list);
+ free_commit_list(list);
+ if (!ok)
+ return 0;
+ }
+ return 1;
+}
+
+static void show_advice_pull_non_ff(void)
+{
+ advise(_("You have divergent branches and need to specify how to reconcile them.\n"
+ "You can do so by running one of the following commands sometime before\n"
+ "your next pull:\n"
+ "\n"
+ " git config pull.rebase false # merge\n"
+ " git config pull.rebase true # rebase\n"
+ " git config pull.ff only # fast-forward only\n"
+ "\n"
+ "You can replace \"git config\" with \"git config --global\" to set a default\n"
+ "preference for all repositories. You can also pass --rebase, --no-rebase,\n"
+ "or --ff-only on the command line to override the configured default per\n"
+ "invocation.\n"));
+}
+
+int cmd_pull(int argc, const char **argv, const char *prefix)
+{
+ const char *repo, **refspecs;
+ struct oid_array merge_heads = OID_ARRAY_INIT;
+ struct object_id orig_head, curr_head;
+ struct object_id rebase_fork_point;
+ int rebase_unspecified = 0;
+ int can_ff;
+ int divergent;
+ int ret;
+
+ if (!getenv("GIT_REFLOG_ACTION"))
+ set_reflog_message(argc, argv);
+
+ git_config(git_pull_config, NULL);
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
+
+ argc = parse_options(argc, argv, prefix, pull_options, pull_usage, 0);
+
+ if (recurse_submodules_cli != RECURSE_SUBMODULES_DEFAULT)
+ recurse_submodules = recurse_submodules_cli;
+
+ if (cleanup_arg)
+ /*
+ * this only checks the validity of cleanup_arg; we don't need
+ * a valid value for use_editor
+ */
+ get_cleanup_mode(cleanup_arg, 0);
+
+ parse_repo_refspecs(argc, argv, &repo, &refspecs);
+
+ if (!opt_ff) {
+ opt_ff = xstrdup_or_null(config_get_ff());
+ /*
+ * A subtle point: opt_ff was set on the line above via
+ * reading from config. opt_rebase, in contrast, is set
+ * before this point via command line options. The setting
+ * of opt_rebase via reading from config (using
+ * config_get_rebase()) does not happen until later. We
+ * are relying on the next if-condition happening before
+ * the config_get_rebase() call so that an explicit
+ * "--rebase" can override a config setting of
+ * pull.ff=only.
+ */
+ if (opt_rebase >= 0 && opt_ff && !strcmp(opt_ff, "--ff-only"))
+ opt_ff = "--ff";
+ }
+
+ if (opt_rebase < 0)
+ opt_rebase = config_get_rebase(&rebase_unspecified);
+
+ if (repo_read_index_unmerged(the_repository))
+ die_resolve_conflict("pull");
+
+ if (file_exists(git_path_merge_head(the_repository)))
+ die_conclude_merge();
+
+ if (get_oid("HEAD", &orig_head))
+ oidclr(&orig_head);
+
+ if (opt_rebase) {
+ if (opt_autostash == -1)
+ opt_autostash = config_autostash;
+
+ if (is_null_oid(&orig_head) && !is_index_unborn(&the_index))
+ die(_("Updating an unborn branch with changes added to the index."));
+
+ if (!opt_autostash)
+ require_clean_work_tree(the_repository,
+ N_("pull with rebase"),
+ _("please commit or stash them."), 1, 0);
+
+ if (get_rebase_fork_point(&rebase_fork_point, repo, *refspecs))
+ oidclr(&rebase_fork_point);
+ }
+
+ if (run_fetch(repo, refspecs))
+ return 1;
+
+ if (opt_dry_run)
+ return 0;
+
+ if (get_oid("HEAD", &curr_head))
+ oidclr(&curr_head);
+
+ if (!is_null_oid(&orig_head) && !is_null_oid(&curr_head) &&
+ !oideq(&orig_head, &curr_head)) {
+ /*
+ * The fetch involved updating the current branch.
+ *
+ * The working tree and the index file are still based on
+ * orig_head commit, but we are merging into curr_head.
+ * Update the working tree to match curr_head.
+ */
+
+ warning(_("fetch updated the current branch head.\n"
+ "fast-forwarding your working tree from\n"
+ "commit %s."), oid_to_hex(&orig_head));
+
+ if (checkout_fast_forward(the_repository, &orig_head,
+ &curr_head, 0))
+ die(_("Cannot fast-forward your working tree.\n"
+ "After making sure that you saved anything precious from\n"
+ "$ git diff %s\n"
+ "output, run\n"
+ "$ git reset --hard\n"
+ "to recover."), oid_to_hex(&orig_head));
+ }
+
+ get_merge_heads(&merge_heads);
+
+ if (!merge_heads.nr)
+ die_no_merge_candidates(repo, refspecs);
+
+ if (is_null_oid(&orig_head)) {
+ if (merge_heads.nr > 1)
+ die(_("Cannot merge multiple branches into empty head."));
+ ret = pull_into_void(merge_heads.oid, &curr_head);
+ goto cleanup;
+ }
+ if (merge_heads.nr > 1) {
+ if (opt_rebase)
+ die(_("Cannot rebase onto multiple branches."));
+ if (opt_ff && !strcmp(opt_ff, "--ff-only"))
+ die(_("Cannot fast-forward to multiple branches."));
+ }
+
+ can_ff = get_can_ff(&orig_head, &merge_heads);
+ divergent = !can_ff && !already_up_to_date(&orig_head, &merge_heads);
+
+ /* ff-only takes precedence over rebase */
+ if (opt_ff && !strcmp(opt_ff, "--ff-only")) {
+ if (divergent)
+ die_ff_impossible();
+ opt_rebase = REBASE_FALSE;
+ }
+ /* If no action specified and we can't fast forward, then warn. */
+ if (!opt_ff && rebase_unspecified && divergent) {
+ show_advice_pull_non_ff();
+ die(_("Need to specify how to reconcile divergent branches."));
+ }
+
+ if (opt_rebase) {
+ struct object_id newbase;
+ struct object_id upstream;
+ get_rebase_newbase_and_upstream(&newbase, &upstream, &curr_head,
+ merge_heads.oid, &rebase_fork_point);
+
+ if ((recurse_submodules == RECURSE_SUBMODULES_ON ||
+ recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND) &&
+ submodule_touches_in_range(the_repository, &upstream, &curr_head))
+ die(_("cannot rebase with locally recorded submodule modifications"));
+
+ if (can_ff) {
+ /* we can fast-forward this without invoking rebase */
+ opt_ff = "--ff-only";
+ ret = run_merge();
+ } else {
+ ret = run_rebase(&newbase, &upstream);
+ }
+
+ if (!ret && (recurse_submodules == RECURSE_SUBMODULES_ON ||
+ recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND))
+ ret = rebase_submodules();
+
+ goto cleanup;
+ } else {
+ ret = run_merge();
+ if (!ret && (recurse_submodules == RECURSE_SUBMODULES_ON ||
+ recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND))
+ ret = update_submodules();
+ goto cleanup;
+ }
+
+cleanup:
+ oid_array_clear(&merge_heads);
+ return ret;
+}
diff --git a/builtin/push.c b/builtin/push.c
new file mode 100644
index 0000000..60ac801
--- /dev/null
+++ b/builtin/push.c
@@ -0,0 +1,703 @@
+/*
+ * "git push"
+ */
+#include "cache.h"
+#include "branch.h"
+#include "config.h"
+#include "refs.h"
+#include "refspec.h"
+#include "run-command.h"
+#include "builtin.h"
+#include "remote.h"
+#include "transport.h"
+#include "parse-options.h"
+#include "submodule.h"
+#include "submodule-config.h"
+#include "send-pack.h"
+#include "color.h"
+
+static const char * const push_usage[] = {
+ N_("git push [<options>] [<repository> [<refspec>...]]"),
+ NULL,
+};
+
+static int push_use_color = -1;
+static char push_colors[][COLOR_MAXLEN] = {
+ GIT_COLOR_RESET,
+ GIT_COLOR_RED, /* ERROR */
+};
+
+enum color_push {
+ PUSH_COLOR_RESET = 0,
+ PUSH_COLOR_ERROR = 1
+};
+
+static int parse_push_color_slot(const char *slot)
+{
+ if (!strcasecmp(slot, "reset"))
+ return PUSH_COLOR_RESET;
+ if (!strcasecmp(slot, "error"))
+ return PUSH_COLOR_ERROR;
+ return -1;
+}
+
+static const char *push_get_color(enum color_push ix)
+{
+ if (want_color_stderr(push_use_color))
+ return push_colors[ix];
+ return "";
+}
+
+static int thin = 1;
+static int deleterefs;
+static const char *receivepack;
+static int verbosity;
+static int progress = -1;
+static int recurse_submodules = RECURSE_SUBMODULES_DEFAULT;
+static enum transport_family family;
+
+static struct push_cas_option cas;
+
+static struct refspec rs = REFSPEC_INIT_PUSH;
+
+static struct string_list push_options_config = STRING_LIST_INIT_DUP;
+
+static void refspec_append_mapped(struct refspec *refspec, const char *ref,
+ struct remote *remote, struct ref *local_refs)
+{
+ const char *branch_name;
+ struct ref *matched = NULL;
+
+ /* Does "ref" uniquely name our ref? */
+ if (count_refspec_match(ref, local_refs, &matched) != 1) {
+ refspec_append(refspec, ref);
+ return;
+ }
+
+ if (remote->push.nr) {
+ struct refspec_item query;
+ memset(&query, 0, sizeof(struct refspec_item));
+ query.src = matched->name;
+ if (!query_refspecs(&remote->push, &query) && query.dst) {
+ refspec_appendf(refspec, "%s%s:%s",
+ query.force ? "+" : "",
+ query.src, query.dst);
+ return;
+ }
+ }
+
+ if (push_default == PUSH_DEFAULT_UPSTREAM &&
+ skip_prefix(matched->name, "refs/heads/", &branch_name)) {
+ struct branch *branch = branch_get(branch_name);
+ if (branch->merge_nr == 1 && branch->merge[0]->src) {
+ refspec_appendf(refspec, "%s:%s",
+ ref, branch->merge[0]->src);
+ return;
+ }
+ }
+
+ refspec_append(refspec, ref);
+}
+
+static void set_refspecs(const char **refs, int nr, const char *repo)
+{
+ struct remote *remote = NULL;
+ struct ref *local_refs = NULL;
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ const char *ref = refs[i];
+ if (!strcmp("tag", ref)) {
+ if (nr <= ++i)
+ die(_("tag shorthand without <tag>"));
+ ref = refs[i];
+ if (deleterefs)
+ refspec_appendf(&rs, ":refs/tags/%s", ref);
+ else
+ refspec_appendf(&rs, "refs/tags/%s", ref);
+ } else if (deleterefs) {
+ if (strchr(ref, ':') || !*ref)
+ die(_("--delete only accepts plain target ref names"));
+ refspec_appendf(&rs, ":%s", ref);
+ } else if (!strchr(ref, ':')) {
+ if (!remote) {
+ /* lazily grab remote and local_refs */
+ remote = remote_get(repo);
+ local_refs = get_local_heads();
+ }
+ refspec_append_mapped(&rs, ref, remote, local_refs);
+ } else
+ refspec_append(&rs, ref);
+ }
+}
+
+static int push_url_of_remote(struct remote *remote, const char ***url_p)
+{
+ if (remote->pushurl_nr) {
+ *url_p = remote->pushurl;
+ return remote->pushurl_nr;
+ }
+ *url_p = remote->url;
+ return remote->url_nr;
+}
+
+static NORETURN void die_push_simple(struct branch *branch,
+ struct remote *remote)
+{
+ /*
+ * There's no point in using shorten_unambiguous_ref here,
+ * as the ambiguity would be on the remote side, not what
+ * we have locally. Plus, this is supposed to be the simple
+ * mode. If the user is doing something crazy like setting
+ * upstream to a non-branch, we should probably be showing
+ * them the big ugly fully qualified ref.
+ */
+ const char *advice_pushdefault_maybe = "";
+ const char *advice_automergesimple_maybe = "";
+ const char *short_upstream = branch->merge[0]->src;
+
+ skip_prefix(short_upstream, "refs/heads/", &short_upstream);
+
+ /*
+ * Don't show advice for people who explicitly set
+ * push.default.
+ */
+ if (push_default == PUSH_DEFAULT_UNSPECIFIED)
+ advice_pushdefault_maybe = _("\n"
+ "To choose either option permanently, "
+ "see push.default in 'git help config'.\n");
+ if (git_branch_track != BRANCH_TRACK_SIMPLE)
+ advice_automergesimple_maybe = _("\n"
+ "To avoid automatically configuring "
+ "an upstream branch when its name\n"
+ "won't match the local branch, see option "
+ "'simple' of branch.autoSetupMerge\n"
+ "in 'git help config'.\n");
+ die(_("The upstream branch of your current branch does not match\n"
+ "the name of your current branch. To push to the upstream branch\n"
+ "on the remote, use\n"
+ "\n"
+ " git push %s HEAD:%s\n"
+ "\n"
+ "To push to the branch of the same name on the remote, use\n"
+ "\n"
+ " git push %s HEAD\n"
+ "%s%s"),
+ remote->name, short_upstream,
+ remote->name, advice_pushdefault_maybe,
+ advice_automergesimple_maybe);
+}
+
+static const char message_detached_head_die[] =
+ N_("You are not currently on a branch.\n"
+ "To push the history leading to the current (detached HEAD)\n"
+ "state now, use\n"
+ "\n"
+ " git push %s HEAD:<name-of-remote-branch>\n");
+
+static const char *get_upstream_ref(int flags, struct branch *branch, const char *remote_name)
+{
+ if (branch->merge_nr == 0 && (flags & TRANSPORT_PUSH_AUTO_UPSTREAM)) {
+ /* if missing, assume same; set_upstream will be defined later */
+ return branch->refname;
+ }
+
+ if (!branch->merge_nr || !branch->merge || !branch->remote_name) {
+ const char *advice_autosetup_maybe = "";
+ if (!(flags & TRANSPORT_PUSH_AUTO_UPSTREAM)) {
+ advice_autosetup_maybe = _("\n"
+ "To have this happen automatically for "
+ "branches without a tracking\n"
+ "upstream, see 'push.autoSetupRemote' "
+ "in 'git help config'.\n");
+ }
+ die(_("The current branch %s has no upstream branch.\n"
+ "To push the current branch and set the remote as upstream, use\n"
+ "\n"
+ " git push --set-upstream %s %s\n"
+ "%s"),
+ branch->name,
+ remote_name,
+ branch->name,
+ advice_autosetup_maybe);
+ }
+ if (branch->merge_nr != 1)
+ die(_("The current branch %s has multiple upstream branches, "
+ "refusing to push."), branch->name);
+
+ return branch->merge[0]->src;
+}
+
+static void setup_default_push_refspecs(int *flags, struct remote *remote)
+{
+ struct branch *branch;
+ const char *dst;
+ int same_remote;
+
+ switch (push_default) {
+ case PUSH_DEFAULT_MATCHING:
+ refspec_append(&rs, ":");
+ return;
+
+ case PUSH_DEFAULT_NOTHING:
+ die(_("You didn't specify any refspecs to push, and "
+ "push.default is \"nothing\"."));
+ return;
+ default:
+ break;
+ }
+
+ branch = branch_get(NULL);
+ if (!branch)
+ die(_(message_detached_head_die), remote->name);
+
+ dst = branch->refname;
+ same_remote = !strcmp(remote->name, remote_for_branch(branch, NULL));
+
+ switch (push_default) {
+ default:
+ case PUSH_DEFAULT_UNSPECIFIED:
+ case PUSH_DEFAULT_SIMPLE:
+ if (!same_remote)
+ break;
+ if (strcmp(branch->refname, get_upstream_ref(*flags, branch, remote->name)))
+ die_push_simple(branch, remote);
+ break;
+
+ case PUSH_DEFAULT_UPSTREAM:
+ if (!same_remote)
+ die(_("You are pushing to remote '%s', which is not the upstream of\n"
+ "your current branch '%s', without telling me what to push\n"
+ "to update which remote branch."),
+ remote->name, branch->name);
+ dst = get_upstream_ref(*flags, branch, remote->name);
+ break;
+
+ case PUSH_DEFAULT_CURRENT:
+ break;
+ }
+
+ /*
+ * this is a default push - if auto-upstream is enabled and there is
+ * no upstream defined, then set it (with options 'simple', 'upstream',
+ * and 'current').
+ */
+ if ((*flags & TRANSPORT_PUSH_AUTO_UPSTREAM) && branch->merge_nr == 0)
+ *flags |= TRANSPORT_PUSH_SET_UPSTREAM;
+
+ refspec_appendf(&rs, "%s:%s", branch->refname, dst);
+}
+
+static const char message_advice_pull_before_push[] =
+ N_("Updates were rejected because the tip of your current branch is behind\n"
+ "its remote counterpart. Integrate the remote changes (e.g.\n"
+ "'git pull ...') before pushing again.\n"
+ "See the 'Note about fast-forwards' in 'git push --help' for details.");
+
+static const char message_advice_checkout_pull_push[] =
+ N_("Updates were rejected because a pushed branch tip is behind its remote\n"
+ "counterpart. Check out this branch and integrate the remote changes\n"
+ "(e.g. 'git pull ...') before pushing again.\n"
+ "See the 'Note about fast-forwards' in 'git push --help' for details.");
+
+static const char message_advice_ref_fetch_first[] =
+ N_("Updates were rejected because the remote contains work that you do\n"
+ "not have locally. This is usually caused by another repository pushing\n"
+ "to the same ref. You may want to first integrate the remote changes\n"
+ "(e.g., 'git pull ...') before pushing again.\n"
+ "See the 'Note about fast-forwards' in 'git push --help' for details.");
+
+static const char message_advice_ref_already_exists[] =
+ N_("Updates were rejected because the tag already exists in the remote.");
+
+static const char message_advice_ref_needs_force[] =
+ N_("You cannot update a remote ref that points at a non-commit object,\n"
+ "or update a remote ref to make it point at a non-commit object,\n"
+ "without using the '--force' option.\n");
+
+static const char message_advice_ref_needs_update[] =
+ N_("Updates were rejected because the tip of the remote-tracking\n"
+ "branch has been updated since the last checkout. You may want\n"
+ "to integrate those changes locally (e.g., 'git pull ...')\n"
+ "before forcing an update.\n");
+
+static void advise_pull_before_push(void)
+{
+ if (!advice_enabled(ADVICE_PUSH_NON_FF_CURRENT) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED))
+ return;
+ advise(_(message_advice_pull_before_push));
+}
+
+static void advise_checkout_pull_push(void)
+{
+ if (!advice_enabled(ADVICE_PUSH_NON_FF_MATCHING) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED))
+ return;
+ advise(_(message_advice_checkout_pull_push));
+}
+
+static void advise_ref_already_exists(void)
+{
+ if (!advice_enabled(ADVICE_PUSH_ALREADY_EXISTS) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED))
+ return;
+ advise(_(message_advice_ref_already_exists));
+}
+
+static void advise_ref_fetch_first(void)
+{
+ if (!advice_enabled(ADVICE_PUSH_FETCH_FIRST) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED))
+ return;
+ advise(_(message_advice_ref_fetch_first));
+}
+
+static void advise_ref_needs_force(void)
+{
+ if (!advice_enabled(ADVICE_PUSH_NEEDS_FORCE) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED))
+ return;
+ advise(_(message_advice_ref_needs_force));
+}
+
+static void advise_ref_needs_update(void)
+{
+ if (!advice_enabled(ADVICE_PUSH_REF_NEEDS_UPDATE) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED))
+ return;
+ advise(_(message_advice_ref_needs_update));
+}
+
+static int push_with_options(struct transport *transport, struct refspec *rs,
+ int flags)
+{
+ int err;
+ unsigned int reject_reasons;
+ char *anon_url = transport_anonymize_url(transport->url);
+
+ transport_set_verbosity(transport, verbosity, progress);
+ transport->family = family;
+
+ if (receivepack)
+ transport_set_option(transport,
+ TRANS_OPT_RECEIVEPACK, receivepack);
+ transport_set_option(transport, TRANS_OPT_THIN, thin ? "yes" : NULL);
+
+ if (!is_empty_cas(&cas)) {
+ if (!transport->smart_options)
+ die("underlying transport does not support --%s option",
+ CAS_OPT_NAME);
+ transport->smart_options->cas = &cas;
+ }
+
+ if (verbosity > 0)
+ fprintf(stderr, _("Pushing to %s\n"), anon_url);
+ trace2_region_enter("push", "transport_push", the_repository);
+ err = transport_push(the_repository, transport,
+ rs, flags, &reject_reasons);
+ trace2_region_leave("push", "transport_push", the_repository);
+ if (err != 0) {
+ fprintf(stderr, "%s", push_get_color(PUSH_COLOR_ERROR));
+ error(_("failed to push some refs to '%s'"), anon_url);
+ fprintf(stderr, "%s", push_get_color(PUSH_COLOR_RESET));
+ }
+
+ err |= transport_disconnect(transport);
+ free(anon_url);
+ if (!err)
+ return 0;
+
+ if (reject_reasons & REJECT_NON_FF_HEAD) {
+ advise_pull_before_push();
+ } else if (reject_reasons & REJECT_NON_FF_OTHER) {
+ advise_checkout_pull_push();
+ } else if (reject_reasons & REJECT_ALREADY_EXISTS) {
+ advise_ref_already_exists();
+ } else if (reject_reasons & REJECT_FETCH_FIRST) {
+ advise_ref_fetch_first();
+ } else if (reject_reasons & REJECT_NEEDS_FORCE) {
+ advise_ref_needs_force();
+ } else if (reject_reasons & REJECT_REF_NEEDS_UPDATE) {
+ advise_ref_needs_update();
+ }
+
+ return 1;
+}
+
+static int do_push(int flags,
+ const struct string_list *push_options,
+ struct remote *remote)
+{
+ int i, errs;
+ const char **url;
+ int url_nr;
+ struct refspec *push_refspec = &rs;
+
+ if (push_options->nr)
+ flags |= TRANSPORT_PUSH_OPTIONS;
+
+ if (!push_refspec->nr && !(flags & TRANSPORT_PUSH_ALL)) {
+ if (remote->push.nr) {
+ push_refspec = &remote->push;
+ } else if (!(flags & TRANSPORT_PUSH_MIRROR))
+ setup_default_push_refspecs(&flags, remote);
+ }
+ errs = 0;
+ url_nr = push_url_of_remote(remote, &url);
+ if (url_nr) {
+ for (i = 0; i < url_nr; i++) {
+ struct transport *transport =
+ transport_get(remote, url[i]);
+ if (flags & TRANSPORT_PUSH_OPTIONS)
+ transport->push_options = push_options;
+ if (push_with_options(transport, push_refspec, flags))
+ errs++;
+ }
+ } else {
+ struct transport *transport =
+ transport_get(remote, NULL);
+ if (flags & TRANSPORT_PUSH_OPTIONS)
+ transport->push_options = push_options;
+ if (push_with_options(transport, push_refspec, flags))
+ errs++;
+ }
+ return !!errs;
+}
+
+static int option_parse_recurse_submodules(const struct option *opt,
+ const char *arg, int unset)
+{
+ int *recurse_submodules = opt->value;
+
+ if (unset)
+ *recurse_submodules = RECURSE_SUBMODULES_OFF;
+ else {
+ if (!strcmp(arg, "only-is-on-demand")) {
+ if (*recurse_submodules == RECURSE_SUBMODULES_ONLY) {
+ warning(_("recursing into submodule with push.recurseSubmodules=only; using on-demand instead"));
+ *recurse_submodules = RECURSE_SUBMODULES_ON_DEMAND;
+ }
+ } else {
+ *recurse_submodules = parse_push_recurse_submodules_arg(opt->long_name, arg);
+ }
+ }
+
+ return 0;
+}
+
+static void set_push_cert_flags(int *flags, int v)
+{
+ switch (v) {
+ case SEND_PACK_PUSH_CERT_NEVER:
+ *flags &= ~(TRANSPORT_PUSH_CERT_ALWAYS | TRANSPORT_PUSH_CERT_IF_ASKED);
+ break;
+ case SEND_PACK_PUSH_CERT_ALWAYS:
+ *flags |= TRANSPORT_PUSH_CERT_ALWAYS;
+ *flags &= ~TRANSPORT_PUSH_CERT_IF_ASKED;
+ break;
+ case SEND_PACK_PUSH_CERT_IF_ASKED:
+ *flags |= TRANSPORT_PUSH_CERT_IF_ASKED;
+ *flags &= ~TRANSPORT_PUSH_CERT_ALWAYS;
+ break;
+ }
+}
+
+
+static int git_push_config(const char *k, const char *v, void *cb)
+{
+ const char *slot_name;
+ int *flags = cb;
+ int status;
+
+ status = git_gpg_config(k, v, NULL);
+ if (status)
+ return status;
+
+ if (!strcmp(k, "push.followtags")) {
+ if (git_config_bool(k, v))
+ *flags |= TRANSPORT_PUSH_FOLLOW_TAGS;
+ else
+ *flags &= ~TRANSPORT_PUSH_FOLLOW_TAGS;
+ return 0;
+ } else if (!strcmp(k, "push.autosetupremote")) {
+ if (git_config_bool(k, v))
+ *flags |= TRANSPORT_PUSH_AUTO_UPSTREAM;
+ return 0;
+ } else if (!strcmp(k, "push.gpgsign")) {
+ const char *value;
+ if (!git_config_get_value("push.gpgsign", &value)) {
+ switch (git_parse_maybe_bool(value)) {
+ case 0:
+ set_push_cert_flags(flags, SEND_PACK_PUSH_CERT_NEVER);
+ break;
+ case 1:
+ set_push_cert_flags(flags, SEND_PACK_PUSH_CERT_ALWAYS);
+ break;
+ default:
+ if (value && !strcasecmp(value, "if-asked"))
+ set_push_cert_flags(flags, SEND_PACK_PUSH_CERT_IF_ASKED);
+ else
+ return error(_("invalid value for '%s'"), k);
+ }
+ }
+ } else if (!strcmp(k, "push.recursesubmodules")) {
+ const char *value;
+ if (!git_config_get_value("push.recursesubmodules", &value))
+ recurse_submodules = parse_push_recurse_submodules_arg(k, value);
+ } else if (!strcmp(k, "submodule.recurse")) {
+ int val = git_config_bool(k, v) ?
+ RECURSE_SUBMODULES_ON_DEMAND : RECURSE_SUBMODULES_OFF;
+ recurse_submodules = val;
+ } else if (!strcmp(k, "push.pushoption")) {
+ if (!v)
+ return config_error_nonbool(k);
+ else
+ if (!*v)
+ string_list_clear(&push_options_config, 0);
+ else
+ string_list_append(&push_options_config, v);
+ return 0;
+ } else if (!strcmp(k, "color.push")) {
+ push_use_color = git_config_colorbool(k, v);
+ return 0;
+ } else if (skip_prefix(k, "color.push.", &slot_name)) {
+ int slot = parse_push_color_slot(slot_name);
+ if (slot < 0)
+ return 0;
+ if (!v)
+ return config_error_nonbool(k);
+ return color_parse(v, push_colors[slot]);
+ } else if (!strcmp(k, "push.useforceifincludes")) {
+ if (git_config_bool(k, v))
+ *flags |= TRANSPORT_PUSH_FORCE_IF_INCLUDES;
+ else
+ *flags &= ~TRANSPORT_PUSH_FORCE_IF_INCLUDES;
+ return 0;
+ }
+
+ return git_default_config(k, v, NULL);
+}
+
+int cmd_push(int argc, const char **argv, const char *prefix)
+{
+ int flags = 0;
+ int tags = 0;
+ int push_cert = -1;
+ int rc;
+ const char *repo = NULL; /* default repository */
+ struct string_list push_options_cmdline = STRING_LIST_INIT_DUP;
+ struct string_list *push_options;
+ const struct string_list_item *item;
+ struct remote *remote;
+
+ struct option options[] = {
+ OPT__VERBOSITY(&verbosity),
+ OPT_STRING( 0 , "repo", &repo, N_("repository"), N_("repository")),
+ OPT_BIT( 0 , "all", &flags, N_("push all refs"), TRANSPORT_PUSH_ALL),
+ OPT_BIT( 0 , "mirror", &flags, N_("mirror all refs"),
+ (TRANSPORT_PUSH_MIRROR|TRANSPORT_PUSH_FORCE)),
+ OPT_BOOL('d', "delete", &deleterefs, N_("delete refs")),
+ OPT_BOOL( 0 , "tags", &tags, N_("push tags (can't be used with --all or --mirror)")),
+ OPT_BIT('n' , "dry-run", &flags, N_("dry run"), TRANSPORT_PUSH_DRY_RUN),
+ OPT_BIT( 0, "porcelain", &flags, N_("machine-readable output"), TRANSPORT_PUSH_PORCELAIN),
+ OPT_BIT('f', "force", &flags, N_("force updates"), TRANSPORT_PUSH_FORCE),
+ OPT_CALLBACK_F(0, CAS_OPT_NAME, &cas, N_("<refname>:<expect>"),
+ N_("require old value of ref to be at this value"),
+ PARSE_OPT_OPTARG | PARSE_OPT_LITERAL_ARGHELP, parseopt_push_cas_option),
+ OPT_BIT(0, TRANS_OPT_FORCE_IF_INCLUDES, &flags,
+ N_("require remote updates to be integrated locally"),
+ TRANSPORT_PUSH_FORCE_IF_INCLUDES),
+ OPT_CALLBACK(0, "recurse-submodules", &recurse_submodules, "(check|on-demand|no)",
+ N_("control recursive pushing of submodules"), option_parse_recurse_submodules),
+ OPT_BOOL_F( 0 , "thin", &thin, N_("use thin pack"), PARSE_OPT_NOCOMPLETE),
+ OPT_STRING( 0 , "receive-pack", &receivepack, "receive-pack", N_("receive pack program")),
+ OPT_STRING( 0 , "exec", &receivepack, "receive-pack", N_("receive pack program")),
+ OPT_BIT('u', "set-upstream", &flags, N_("set upstream for git pull/status"),
+ TRANSPORT_PUSH_SET_UPSTREAM),
+ OPT_BOOL(0, "progress", &progress, N_("force progress reporting")),
+ OPT_BIT(0, "prune", &flags, N_("prune locally removed refs"),
+ TRANSPORT_PUSH_PRUNE),
+ OPT_BIT(0, "no-verify", &flags, N_("bypass pre-push hook"), TRANSPORT_PUSH_NO_HOOK),
+ OPT_BIT(0, "follow-tags", &flags, N_("push missing but relevant tags"),
+ TRANSPORT_PUSH_FOLLOW_TAGS),
+ OPT_CALLBACK_F(0, "signed", &push_cert, "(yes|no|if-asked)", N_("GPG sign the push"),
+ PARSE_OPT_OPTARG, option_parse_push_signed),
+ OPT_BIT(0, "atomic", &flags, N_("request atomic transaction on remote side"), TRANSPORT_PUSH_ATOMIC),
+ OPT_STRING_LIST('o', "push-option", &push_options_cmdline, N_("server-specific"), N_("option to transmit")),
+ OPT_SET_INT('4', "ipv4", &family, N_("use IPv4 addresses only"),
+ TRANSPORT_FAMILY_IPV4),
+ OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
+ TRANSPORT_FAMILY_IPV6),
+ OPT_END()
+ };
+
+ packet_trace_identity("push");
+ git_config(git_push_config, &flags);
+ argc = parse_options(argc, argv, prefix, options, push_usage, 0);
+ push_options = (push_options_cmdline.nr
+ ? &push_options_cmdline
+ : &push_options_config);
+ set_push_cert_flags(&flags, push_cert);
+
+ if (deleterefs && (tags || (flags & (TRANSPORT_PUSH_ALL | TRANSPORT_PUSH_MIRROR))))
+ die(_("options '%s' and '%s' cannot be used together"), "--delete", "--all/--mirror/--tags");
+ if (deleterefs && argc < 2)
+ die(_("--delete doesn't make sense without any refs"));
+
+ if (recurse_submodules == RECURSE_SUBMODULES_CHECK)
+ flags |= TRANSPORT_RECURSE_SUBMODULES_CHECK;
+ else if (recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND)
+ flags |= TRANSPORT_RECURSE_SUBMODULES_ON_DEMAND;
+ else if (recurse_submodules == RECURSE_SUBMODULES_ONLY)
+ flags |= TRANSPORT_RECURSE_SUBMODULES_ONLY;
+
+ if (tags)
+ refspec_append(&rs, "refs/tags/*");
+
+ if (argc > 0) {
+ repo = argv[0];
+ set_refspecs(argv + 1, argc - 1, repo);
+ }
+
+ remote = pushremote_get(repo);
+ if (!remote) {
+ if (repo)
+ die(_("bad repository '%s'"), repo);
+ die(_("No configured push destination.\n"
+ "Either specify the URL from the command-line or configure a remote repository using\n"
+ "\n"
+ " git remote add <name> <url>\n"
+ "\n"
+ "and then push using the remote name\n"
+ "\n"
+ " git push <name>\n"));
+ }
+
+ if (remote->mirror)
+ flags |= (TRANSPORT_PUSH_MIRROR|TRANSPORT_PUSH_FORCE);
+
+ if (flags & TRANSPORT_PUSH_ALL) {
+ if (tags)
+ die(_("options '%s' and '%s' cannot be used together"), "--all", "--tags");
+ if (argc >= 2)
+ die(_("--all can't be combined with refspecs"));
+ }
+ if (flags & TRANSPORT_PUSH_MIRROR) {
+ if (tags)
+ die(_("options '%s' and '%s' cannot be used together"), "--mirror", "--tags");
+ if (argc >= 2)
+ die(_("--mirror can't be combined with refspecs"));
+ }
+ if ((flags & TRANSPORT_PUSH_ALL) && (flags & TRANSPORT_PUSH_MIRROR))
+ die(_("options '%s' and '%s' cannot be used together"), "--all", "--mirror");
+
+ if (!is_empty_cas(&cas) && (flags & TRANSPORT_PUSH_FORCE_IF_INCLUDES))
+ cas.use_force_if_includes = 1;
+
+ for_each_string_list_item(item, push_options)
+ if (strchr(item->string, '\n'))
+ die(_("push options must not have new line characters"));
+
+ rc = do_push(flags, push_options, remote);
+ string_list_clear(&push_options_cmdline, 0);
+ string_list_clear(&push_options_config, 0);
+ if (rc == -1)
+ usage_with_options(push_usage, options);
+ else
+ return rc;
+}
diff --git a/builtin/range-diff.c b/builtin/range-diff.c
new file mode 100644
index 0000000..e2a74ef
--- /dev/null
+++ b/builtin/range-diff.c
@@ -0,0 +1,157 @@
+#include "cache.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "range-diff.h"
+#include "config.h"
+#include "revision.h"
+
+static const char * const builtin_range_diff_usage[] = {
+N_("git range-diff [<options>] <old-base>..<old-tip> <new-base>..<new-tip>"),
+N_("git range-diff [<options>] <old-tip>...<new-tip>"),
+N_("git range-diff [<options>] <base> <old-tip> <new-tip>"),
+NULL
+};
+
+int cmd_range_diff(int argc, const char **argv, const char *prefix)
+{
+ struct diff_options diffopt = { NULL };
+ struct strvec other_arg = STRVEC_INIT;
+ struct range_diff_options range_diff_opts = {
+ .creation_factor = RANGE_DIFF_CREATION_FACTOR_DEFAULT,
+ .diffopt = &diffopt,
+ .other_arg = &other_arg
+ };
+ int simple_color = -1, left_only = 0, right_only = 0;
+ struct option range_diff_options[] = {
+ OPT_INTEGER(0, "creation-factor",
+ &range_diff_opts.creation_factor,
+ N_("percentage by which creation is weighted")),
+ OPT_BOOL(0, "no-dual-color", &simple_color,
+ N_("use simple diff colors")),
+ OPT_PASSTHRU_ARGV(0, "notes", &other_arg,
+ N_("notes"), N_("passed to 'git log'"),
+ PARSE_OPT_OPTARG),
+ OPT_BOOL(0, "left-only", &left_only,
+ N_("only emit output related to the first range")),
+ OPT_BOOL(0, "right-only", &right_only,
+ N_("only emit output related to the second range")),
+ OPT_END()
+ };
+ struct option *options;
+ int i, dash_dash = -1, res = 0;
+ struct strbuf range1 = STRBUF_INIT, range2 = STRBUF_INIT;
+ struct object_id oid;
+ const char *three_dots = NULL;
+
+ git_config(git_diff_ui_config, NULL);
+
+ repo_diff_setup(the_repository, &diffopt);
+
+ options = parse_options_concat(range_diff_options, diffopt.parseopts);
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_range_diff_usage, PARSE_OPT_KEEP_DASHDASH);
+
+ diff_setup_done(&diffopt);
+
+ /* force color when --dual-color was used */
+ if (!simple_color)
+ diffopt.use_color = 1;
+
+ for (i = 0; i < argc; i++)
+ if (!strcmp(argv[i], "--")) {
+ dash_dash = i;
+ break;
+ }
+
+ if (dash_dash == 3 ||
+ (dash_dash < 0 && argc > 2 &&
+ !get_oid_committish(argv[0], &oid) &&
+ !get_oid_committish(argv[1], &oid) &&
+ !get_oid_committish(argv[2], &oid))) {
+ if (dash_dash < 0)
+ ; /* already validated arguments */
+ else if (get_oid_committish(argv[0], &oid))
+ usage_msg_optf(_("not a revision: '%s'"),
+ builtin_range_diff_usage, options,
+ argv[0]);
+ else if (get_oid_committish(argv[1], &oid))
+ usage_msg_optf(_("not a revision: '%s'"),
+ builtin_range_diff_usage, options,
+ argv[1]);
+ else if (get_oid_committish(argv[2], &oid))
+ usage_msg_optf(_("not a revision: '%s'"),
+ builtin_range_diff_usage, options,
+ argv[2]);
+
+ strbuf_addf(&range1, "%s..%s", argv[0], argv[1]);
+ strbuf_addf(&range2, "%s..%s", argv[0], argv[2]);
+
+ strvec_pushv(&other_arg, argv +
+ (dash_dash < 0 ? 3 : dash_dash));
+ } else if (dash_dash == 2 ||
+ (dash_dash < 0 && argc > 1 &&
+ is_range_diff_range(argv[0]) &&
+ is_range_diff_range(argv[1]))) {
+ if (dash_dash < 0)
+ ; /* already validated arguments */
+ else if (!is_range_diff_range(argv[0]))
+ usage_msg_optf(_("not a commit range: '%s'"),
+ builtin_range_diff_usage, options,
+ argv[0]);
+ else if (!is_range_diff_range(argv[1]))
+ usage_msg_optf(_("not a commit range: '%s'"),
+ builtin_range_diff_usage, options,
+ argv[1]);
+
+ strbuf_addstr(&range1, argv[0]);
+ strbuf_addstr(&range2, argv[1]);
+
+ strvec_pushv(&other_arg, argv +
+ (dash_dash < 0 ? 2 : dash_dash));
+ } else if (dash_dash == 1 ||
+ (dash_dash < 0 && argc > 0 &&
+ (three_dots = strstr(argv[0], "...")))) {
+ const char *a, *b;
+ int a_len;
+
+ if (dash_dash < 0)
+ ; /* already validated arguments */
+ else if (!(three_dots = strstr(argv[0], "...")))
+ usage_msg_optf(_("not a symmetric range: '%s'"),
+ builtin_range_diff_usage, options,
+ argv[0]);
+
+ if (three_dots == argv[0]) {
+ a = "HEAD";
+ a_len = strlen(a);
+ } else {
+ a = argv[0];
+ a_len = (int)(three_dots - a);
+ }
+
+ if (three_dots[3])
+ b = three_dots + 3;
+ else
+ b = "HEAD";
+
+ strbuf_addf(&range1, "%s..%.*s", b, a_len, a);
+ strbuf_addf(&range2, "%.*s..%s", a_len, a, b);
+
+ strvec_pushv(&other_arg, argv +
+ (dash_dash < 0 ? 1 : dash_dash));
+ } else
+ usage_msg_opt(_("need two commit ranges"),
+ builtin_range_diff_usage, options);
+ FREE_AND_NULL(options);
+
+ range_diff_opts.dual_color = simple_color < 1;
+ range_diff_opts.left_only = left_only;
+ range_diff_opts.right_only = right_only;
+ res = show_range_diff(range1.buf, range2.buf, &range_diff_opts);
+
+ strvec_clear(&other_arg);
+ strbuf_release(&range1);
+ strbuf_release(&range2);
+
+ return res;
+}
diff --git a/builtin/read-tree.c b/builtin/read-tree.c
new file mode 100644
index 0000000..f702f9d
--- /dev/null
+++ b/builtin/read-tree.c
@@ -0,0 +1,282 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ */
+
+#define USE_THE_INDEX_VARIABLE
+#include "cache.h"
+#include "config.h"
+#include "lockfile.h"
+#include "object.h"
+#include "tree.h"
+#include "tree-walk.h"
+#include "cache-tree.h"
+#include "unpack-trees.h"
+#include "dir.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "resolve-undo.h"
+#include "submodule.h"
+#include "submodule-config.h"
+
+static int nr_trees;
+static int read_empty;
+static struct tree *trees[MAX_UNPACK_TREES];
+
+static int list_tree(struct object_id *oid)
+{
+ struct tree *tree;
+
+ if (nr_trees >= MAX_UNPACK_TREES)
+ die("I cannot read more than %d trees", MAX_UNPACK_TREES);
+ tree = parse_tree_indirect(oid);
+ if (!tree)
+ return -1;
+ trees[nr_trees++] = tree;
+ return 0;
+}
+
+static const char * const read_tree_usage[] = {
+ N_("git read-tree [(-m [--trivial] [--aggressive] | --reset | --prefix=<prefix>)\n"
+ " [-u | -i]] [--index-output=<file>] [--no-sparse-checkout]\n"
+ " (--empty | <tree-ish1> [<tree-ish2> [<tree-ish3>]])"),
+ NULL
+};
+
+static int index_output_cb(const struct option *opt, const char *arg,
+ int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+ set_alternate_index_output(arg);
+ return 0;
+}
+
+static int exclude_per_directory_cb(const struct option *opt, const char *arg,
+ int unset)
+{
+ struct unpack_trees_options *opts;
+
+ BUG_ON_OPT_NEG(unset);
+
+ opts = (struct unpack_trees_options *)opt->value;
+
+ if (!opts->update)
+ die("--exclude-per-directory is meaningless unless -u");
+ if (strcmp(arg, ".gitignore"))
+ die("--exclude-per-directory argument must be .gitignore");
+ return 0;
+}
+
+static void debug_stage(const char *label, const struct cache_entry *ce,
+ struct unpack_trees_options *o)
+{
+ printf("%s ", label);
+ if (!ce)
+ printf("(missing)\n");
+ else if (ce == o->df_conflict_entry)
+ printf("(conflict)\n");
+ else
+ printf("%06o #%d %s %.8s\n",
+ ce->ce_mode, ce_stage(ce), ce->name,
+ oid_to_hex(&ce->oid));
+}
+
+static int debug_merge(const struct cache_entry * const *stages,
+ struct unpack_trees_options *o)
+{
+ int i;
+
+ printf("* %d-way merge\n", o->merge_size);
+ debug_stage("index", stages[0], o);
+ for (i = 1; i <= o->merge_size; i++) {
+ char buf[24];
+ xsnprintf(buf, sizeof(buf), "ent#%d", i);
+ debug_stage(buf, stages[i], o);
+ }
+ return 0;
+}
+
+static int git_read_tree_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "submodule.recurse"))
+ return git_default_submodule_config(var, value, cb);
+
+ return git_default_config(var, value, cb);
+}
+
+int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
+{
+ int i, stage = 0;
+ struct object_id oid;
+ struct tree_desc t[MAX_UNPACK_TREES];
+ struct unpack_trees_options opts;
+ int prefix_set = 0;
+ struct lock_file lock_file = LOCK_INIT;
+ const struct option read_tree_options[] = {
+ OPT_CALLBACK_F(0, "index-output", NULL, N_("file"),
+ N_("write resulting index to <file>"),
+ PARSE_OPT_NONEG, index_output_cb),
+ OPT_BOOL(0, "empty", &read_empty,
+ N_("only empty the index")),
+ OPT__VERBOSE(&opts.verbose_update, N_("be verbose")),
+ OPT_GROUP(N_("Merging")),
+ OPT_BOOL('m', NULL, &opts.merge,
+ N_("perform a merge in addition to a read")),
+ OPT_BOOL(0, "trivial", &opts.trivial_merges_only,
+ N_("3-way merge if no file level merging required")),
+ OPT_BOOL(0, "aggressive", &opts.aggressive,
+ N_("3-way merge in presence of adds and removes")),
+ OPT_BOOL(0, "reset", &opts.reset,
+ N_("same as -m, but discard unmerged entries")),
+ { OPTION_STRING, 0, "prefix", &opts.prefix, N_("<subdirectory>/"),
+ N_("read the tree into the index under <subdirectory>/"),
+ PARSE_OPT_NONEG },
+ OPT_BOOL('u', NULL, &opts.update,
+ N_("update working tree with merge result")),
+ OPT_CALLBACK_F(0, "exclude-per-directory", &opts,
+ N_("gitignore"),
+ N_("allow explicitly ignored files to be overwritten"),
+ PARSE_OPT_NONEG, exclude_per_directory_cb),
+ OPT_BOOL('i', NULL, &opts.index_only,
+ N_("don't check the working tree after merging")),
+ OPT__DRY_RUN(&opts.dry_run, N_("don't update the index or the work tree")),
+ OPT_BOOL(0, "no-sparse-checkout", &opts.skip_sparse_checkout,
+ N_("skip applying sparse checkout filter")),
+ OPT_BOOL(0, "debug-unpack", &opts.debug_unpack,
+ N_("debug unpack-trees")),
+ OPT_CALLBACK_F(0, "recurse-submodules", NULL,
+ "checkout", "control recursive updating of submodules",
+ PARSE_OPT_OPTARG, option_parse_recurse_submodules_worktree_updater),
+ OPT__QUIET(&opts.quiet, N_("suppress feedback messages")),
+ OPT_END()
+ };
+
+ memset(&opts, 0, sizeof(opts));
+ opts.head_idx = -1;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+
+ git_config(git_read_tree_config, NULL);
+
+ argc = parse_options(argc, argv, cmd_prefix, read_tree_options,
+ read_tree_usage, 0);
+
+ prefix_set = opts.prefix ? 1 : 0;
+ if (1 < opts.merge + opts.reset + prefix_set)
+ die("Which one? -m, --reset, or --prefix?");
+
+ /* Prefix should not start with a directory separator */
+ if (opts.prefix && opts.prefix[0] == '/')
+ die("Invalid prefix, prefix cannot start with '/'");
+
+ if (opts.reset)
+ opts.reset = UNPACK_RESET_OVERWRITE_UNTRACKED;
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR);
+
+ /*
+ * NEEDSWORK
+ *
+ * The old index should be read anyway even if we're going to
+ * destroy all index entries because we still need to preserve
+ * certain information such as index version or split-index
+ * mode.
+ */
+
+ if (opts.reset || opts.merge || opts.prefix) {
+ if (repo_read_index_unmerged(the_repository) && (opts.prefix || opts.merge))
+ die(_("You need to resolve your current index first"));
+ stage = opts.merge = 1;
+ }
+ resolve_undo_clear_index(&the_index);
+
+ for (i = 0; i < argc; i++) {
+ const char *arg = argv[i];
+
+ if (get_oid(arg, &oid))
+ die("Not a valid object name %s", arg);
+ if (list_tree(&oid) < 0)
+ die("failed to unpack tree object %s", arg);
+ stage++;
+ }
+ if (!nr_trees && !read_empty && !opts.merge)
+ warning("read-tree: emptying the index with no arguments is deprecated; use --empty");
+ else if (nr_trees > 0 && read_empty)
+ die("passing trees as arguments contradicts --empty");
+
+ if (1 < opts.index_only + opts.update)
+ die("-u and -i at the same time makes no sense");
+ if ((opts.update || opts.index_only) && !opts.merge)
+ die("%s is meaningless without -m, --reset, or --prefix",
+ opts.update ? "-u" : "-i");
+ if (opts.update && !opts.reset)
+ opts.preserve_ignored = 0;
+ /* otherwise, opts.preserve_ignored is irrelevant */
+ if (opts.merge && !opts.index_only)
+ setup_work_tree();
+
+ if (opts.skip_sparse_checkout)
+ ensure_full_index(&the_index);
+
+ if (opts.merge) {
+ switch (stage - 1) {
+ case 0:
+ die("you must specify at least one tree to merge");
+ break;
+ case 1:
+ opts.fn = opts.prefix ? bind_merge : oneway_merge;
+ break;
+ case 2:
+ opts.fn = twoway_merge;
+ opts.initial_checkout = is_index_unborn(&the_index);
+ break;
+ case 3:
+ default:
+ opts.fn = threeway_merge;
+ break;
+ }
+
+ if (stage - 1 >= 3)
+ opts.head_idx = stage - 2;
+ else
+ opts.head_idx = 1;
+ }
+
+ if (opts.debug_unpack)
+ opts.fn = debug_merge;
+
+ /* If we're going to prime_cache_tree later, skip cache tree update */
+ if (nr_trees == 1 && !opts.prefix)
+ opts.skip_cache_tree_update = 1;
+
+ cache_tree_free(&the_index.cache_tree);
+ for (i = 0; i < nr_trees; i++) {
+ struct tree *tree = trees[i];
+ parse_tree(tree);
+ init_tree_desc(t+i, tree->buffer, tree->size);
+ }
+ if (unpack_trees(nr_trees, t, &opts))
+ return 128;
+
+ if (opts.debug_unpack || opts.dry_run)
+ return 0; /* do not write the index out */
+
+ /*
+ * When reading only one tree (either the most basic form,
+ * "-m ent" or "--reset ent" form), we can obtain a fully
+ * valid cache-tree because the index must match exactly
+ * what came from the tree.
+ */
+ if (nr_trees == 1 && !opts.prefix)
+ prime_cache_tree(the_repository,
+ the_repository->index,
+ trees[0]);
+
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+ die("unable to write new index file");
+ return 0;
+}
diff --git a/builtin/rebase.c b/builtin/rebase.c
new file mode 100644
index 0000000..b22768c
--- /dev/null
+++ b/builtin/rebase.c
@@ -0,0 +1,1837 @@
+/*
+ * "git rebase" builtin command
+ *
+ * Copyright (c) 2018 Pratik Karki
+ */
+
+#define USE_THE_INDEX_VARIABLE
+#include "builtin.h"
+#include "run-command.h"
+#include "exec-cmd.h"
+#include "strvec.h"
+#include "dir.h"
+#include "packfile.h"
+#include "refs.h"
+#include "quote.h"
+#include "config.h"
+#include "cache-tree.h"
+#include "unpack-trees.h"
+#include "lockfile.h"
+#include "parse-options.h"
+#include "commit.h"
+#include "diff.h"
+#include "wt-status.h"
+#include "revision.h"
+#include "commit-reach.h"
+#include "rerere.h"
+#include "branch.h"
+#include "sequencer.h"
+#include "rebase-interactive.h"
+#include "reset.h"
+#include "hook.h"
+
+static char const * const builtin_rebase_usage[] = {
+ N_("git rebase [-i] [options] [--exec <cmd>] "
+ "[--onto <newbase> | --keep-base] [<upstream> [<branch>]]"),
+ N_("git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] "
+ "--root [<branch>]"),
+ "git rebase --continue | --abort | --skip | --edit-todo",
+ NULL
+};
+
+static GIT_PATH_FUNC(path_squash_onto, "rebase-merge/squash-onto")
+static GIT_PATH_FUNC(path_interactive, "rebase-merge/interactive")
+static GIT_PATH_FUNC(apply_dir, "rebase-apply")
+static GIT_PATH_FUNC(merge_dir, "rebase-merge")
+
+enum rebase_type {
+ REBASE_UNSPECIFIED = -1,
+ REBASE_APPLY,
+ REBASE_MERGE
+};
+
+enum empty_type {
+ EMPTY_UNSPECIFIED = -1,
+ EMPTY_DROP,
+ EMPTY_KEEP,
+ EMPTY_ASK
+};
+
+enum action {
+ ACTION_NONE = 0,
+ ACTION_CONTINUE,
+ ACTION_SKIP,
+ ACTION_ABORT,
+ ACTION_QUIT,
+ ACTION_EDIT_TODO,
+ ACTION_SHOW_CURRENT_PATCH
+};
+
+static const char *action_names[] = {
+ "undefined",
+ "continue",
+ "skip",
+ "abort",
+ "quit",
+ "edit_todo",
+ "show_current_patch"
+};
+
+struct rebase_options {
+ enum rebase_type type;
+ enum empty_type empty;
+ const char *default_backend;
+ const char *state_dir;
+ struct commit *upstream;
+ const char *upstream_name;
+ const char *upstream_arg;
+ char *head_name;
+ struct commit *orig_head;
+ struct commit *onto;
+ const char *onto_name;
+ const char *revisions;
+ const char *switch_to;
+ int root, root_with_onto;
+ struct object_id *squash_onto;
+ struct commit *restrict_revision;
+ int dont_finish_rebase;
+ enum {
+ REBASE_NO_QUIET = 1<<0,
+ REBASE_VERBOSE = 1<<1,
+ REBASE_DIFFSTAT = 1<<2,
+ REBASE_FORCE = 1<<3,
+ REBASE_INTERACTIVE_EXPLICIT = 1<<4,
+ } flags;
+ struct strvec git_am_opts;
+ enum action action;
+ char *reflog_action;
+ int signoff;
+ int allow_rerere_autoupdate;
+ int keep_empty;
+ int autosquash;
+ char *gpg_sign_opt;
+ int autostash;
+ int committer_date_is_author_date;
+ int ignore_date;
+ char *cmd;
+ int allow_empty_message;
+ int rebase_merges, rebase_cousins;
+ char *strategy, *strategy_opts;
+ struct strbuf git_format_patch_opt;
+ int reschedule_failed_exec;
+ int reapply_cherry_picks;
+ int fork_point;
+ int update_refs;
+};
+
+#define REBASE_OPTIONS_INIT { \
+ .type = REBASE_UNSPECIFIED, \
+ .empty = EMPTY_UNSPECIFIED, \
+ .keep_empty = 1, \
+ .default_backend = "merge", \
+ .flags = REBASE_NO_QUIET, \
+ .git_am_opts = STRVEC_INIT, \
+ .git_format_patch_opt = STRBUF_INIT, \
+ .fork_point = -1, \
+ }
+
+static struct replay_opts get_replay_opts(const struct rebase_options *opts)
+{
+ struct replay_opts replay = REPLAY_OPTS_INIT;
+
+ replay.action = REPLAY_INTERACTIVE_REBASE;
+ replay.strategy = NULL;
+ sequencer_init_config(&replay);
+
+ replay.signoff = opts->signoff;
+ replay.allow_ff = !(opts->flags & REBASE_FORCE);
+ if (opts->allow_rerere_autoupdate)
+ replay.allow_rerere_auto = opts->allow_rerere_autoupdate;
+ replay.allow_empty = 1;
+ replay.allow_empty_message = opts->allow_empty_message;
+ replay.drop_redundant_commits = (opts->empty == EMPTY_DROP);
+ replay.keep_redundant_commits = (opts->empty == EMPTY_KEEP);
+ replay.quiet = !(opts->flags & REBASE_NO_QUIET);
+ replay.verbose = opts->flags & REBASE_VERBOSE;
+ replay.reschedule_failed_exec = opts->reschedule_failed_exec;
+ replay.committer_date_is_author_date =
+ opts->committer_date_is_author_date;
+ replay.ignore_date = opts->ignore_date;
+ replay.gpg_sign = xstrdup_or_null(opts->gpg_sign_opt);
+ replay.reflog_action = xstrdup(opts->reflog_action);
+ if (opts->strategy)
+ replay.strategy = xstrdup_or_null(opts->strategy);
+ else if (!replay.strategy && replay.default_strategy) {
+ replay.strategy = replay.default_strategy;
+ replay.default_strategy = NULL;
+ }
+
+ if (opts->strategy_opts)
+ parse_strategy_opts(&replay, opts->strategy_opts);
+
+ if (opts->squash_onto) {
+ oidcpy(&replay.squash_onto, opts->squash_onto);
+ replay.have_squash_onto = 1;
+ }
+
+ return replay;
+}
+
+static int edit_todo_file(unsigned flags)
+{
+ const char *todo_file = rebase_path_todo();
+ struct todo_list todo_list = TODO_LIST_INIT,
+ new_todo = TODO_LIST_INIT;
+ int res = 0;
+
+ if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
+ return error_errno(_("could not read '%s'."), todo_file);
+
+ strbuf_stripspace(&todo_list.buf, 1);
+ res = edit_todo_list(the_repository, &todo_list, &new_todo, NULL, NULL, flags);
+ if (!res && todo_list_write_to_file(the_repository, &new_todo, todo_file,
+ NULL, NULL, -1, flags & ~(TODO_LIST_SHORTEN_IDS)))
+ res = error_errno(_("could not write '%s'"), todo_file);
+
+ todo_list_release(&todo_list);
+ todo_list_release(&new_todo);
+
+ return res;
+}
+
+static int get_revision_ranges(struct commit *upstream, struct commit *onto,
+ struct object_id *orig_head, char **revisions,
+ char **shortrevisions)
+{
+ struct commit *base_rev = upstream ? upstream : onto;
+ const char *shorthead;
+
+ *revisions = xstrfmt("%s...%s", oid_to_hex(&base_rev->object.oid),
+ oid_to_hex(orig_head));
+
+ shorthead = find_unique_abbrev(orig_head, DEFAULT_ABBREV);
+
+ if (upstream) {
+ const char *shortrev;
+
+ shortrev = find_unique_abbrev(&base_rev->object.oid,
+ DEFAULT_ABBREV);
+
+ *shortrevisions = xstrfmt("%s..%s", shortrev, shorthead);
+ } else
+ *shortrevisions = xstrdup(shorthead);
+
+ return 0;
+}
+
+static int init_basic_state(struct replay_opts *opts, const char *head_name,
+ struct commit *onto,
+ const struct object_id *orig_head)
+{
+ FILE *interactive;
+
+ if (!is_directory(merge_dir()) && mkdir_in_gitdir(merge_dir()))
+ return error_errno(_("could not create temporary %s"), merge_dir());
+
+ delete_reflog("REBASE_HEAD");
+
+ interactive = fopen(path_interactive(), "w");
+ if (!interactive)
+ return error_errno(_("could not mark as interactive"));
+ fclose(interactive);
+
+ return write_basic_state(opts, head_name, onto, orig_head);
+}
+
+static void split_exec_commands(const char *cmd, struct string_list *commands)
+{
+ if (cmd && *cmd) {
+ string_list_split(commands, cmd, '\n', -1);
+
+ /* rebase.c adds a new line to cmd after every command,
+ * so here the last command is always empty */
+ string_list_remove_empty_items(commands, 0);
+ }
+}
+
+static int do_interactive_rebase(struct rebase_options *opts, unsigned flags)
+{
+ int ret;
+ char *revisions = NULL, *shortrevisions = NULL;
+ struct strvec make_script_args = STRVEC_INIT;
+ struct todo_list todo_list = TODO_LIST_INIT;
+ struct replay_opts replay = get_replay_opts(opts);
+ struct string_list commands = STRING_LIST_INIT_DUP;
+
+ if (get_revision_ranges(opts->upstream, opts->onto, &opts->orig_head->object.oid,
+ &revisions, &shortrevisions))
+ return -1;
+
+ if (init_basic_state(&replay,
+ opts->head_name ? opts->head_name : "detached HEAD",
+ opts->onto, &opts->orig_head->object.oid)) {
+ free(revisions);
+ free(shortrevisions);
+
+ return -1;
+ }
+
+ if (!opts->upstream && opts->squash_onto)
+ write_file(path_squash_onto(), "%s\n",
+ oid_to_hex(opts->squash_onto));
+
+ strvec_pushl(&make_script_args, "", revisions, NULL);
+ if (opts->restrict_revision)
+ strvec_pushf(&make_script_args, "^%s",
+ oid_to_hex(&opts->restrict_revision->object.oid));
+
+ ret = sequencer_make_script(the_repository, &todo_list.buf,
+ make_script_args.nr, make_script_args.v,
+ flags);
+
+ if (ret)
+ error(_("could not generate todo list"));
+ else {
+ discard_index(&the_index);
+ if (todo_list_parse_insn_buffer(the_repository, todo_list.buf.buf,
+ &todo_list))
+ BUG("unusable todo list");
+
+ split_exec_commands(opts->cmd, &commands);
+ ret = complete_action(the_repository, &replay, flags,
+ shortrevisions, opts->onto_name, opts->onto,
+ &opts->orig_head->object.oid, &commands,
+ opts->autosquash, opts->update_refs, &todo_list);
+ }
+
+ string_list_clear(&commands, 0);
+ free(revisions);
+ free(shortrevisions);
+ todo_list_release(&todo_list);
+ strvec_clear(&make_script_args);
+
+ return ret;
+}
+
+static int run_sequencer_rebase(struct rebase_options *opts)
+{
+ unsigned flags = 0;
+ int abbreviate_commands = 0, ret = 0;
+
+ git_config_get_bool("rebase.abbreviatecommands", &abbreviate_commands);
+
+ flags |= opts->keep_empty ? TODO_LIST_KEEP_EMPTY : 0;
+ flags |= abbreviate_commands ? TODO_LIST_ABBREVIATE_CMDS : 0;
+ flags |= opts->rebase_merges ? TODO_LIST_REBASE_MERGES : 0;
+ flags |= opts->rebase_cousins > 0 ? TODO_LIST_REBASE_COUSINS : 0;
+ flags |= opts->root_with_onto ? TODO_LIST_ROOT_WITH_ONTO : 0;
+ flags |= opts->reapply_cherry_picks ? TODO_LIST_REAPPLY_CHERRY_PICKS : 0;
+ flags |= opts->flags & REBASE_NO_QUIET ? TODO_LIST_WARN_SKIPPED_CHERRY_PICKS : 0;
+
+ switch (opts->action) {
+ case ACTION_NONE: {
+ if (!opts->onto && !opts->upstream)
+ die(_("a base commit must be provided with --upstream or --onto"));
+
+ ret = do_interactive_rebase(opts, flags);
+ break;
+ }
+ case ACTION_SKIP: {
+ struct string_list merge_rr = STRING_LIST_INIT_DUP;
+
+ rerere_clear(the_repository, &merge_rr);
+ }
+ /* fallthrough */
+ case ACTION_CONTINUE: {
+ struct replay_opts replay_opts = get_replay_opts(opts);
+
+ ret = sequencer_continue(the_repository, &replay_opts);
+ break;
+ }
+ case ACTION_EDIT_TODO:
+ ret = edit_todo_file(flags);
+ break;
+ case ACTION_SHOW_CURRENT_PATCH: {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ strvec_pushl(&cmd.args, "show", "REBASE_HEAD", "--", NULL);
+ ret = run_command(&cmd);
+
+ break;
+ }
+ default:
+ BUG("invalid command '%d'", opts->action);
+ }
+
+ return ret;
+}
+
+static void imply_merge(struct rebase_options *opts, const char *option);
+static int parse_opt_keep_empty(const struct option *opt, const char *arg,
+ int unset)
+{
+ struct rebase_options *opts = opt->value;
+
+ BUG_ON_OPT_ARG(arg);
+
+ imply_merge(opts, unset ? "--no-keep-empty" : "--keep-empty");
+ opts->keep_empty = !unset;
+ opts->type = REBASE_MERGE;
+ return 0;
+}
+
+static int is_merge(struct rebase_options *opts)
+{
+ return opts->type == REBASE_MERGE;
+}
+
+static void imply_merge(struct rebase_options *opts, const char *option)
+{
+ switch (opts->type) {
+ case REBASE_APPLY:
+ die(_("%s requires the merge backend"), option);
+ break;
+ case REBASE_MERGE:
+ break;
+ default:
+ opts->type = REBASE_MERGE; /* implied */
+ break;
+ }
+}
+
+/* Returns the filename prefixed by the state_dir */
+static const char *state_dir_path(const char *filename, struct rebase_options *opts)
+{
+ static struct strbuf path = STRBUF_INIT;
+ static size_t prefix_len;
+
+ if (!prefix_len) {
+ strbuf_addf(&path, "%s/", opts->state_dir);
+ prefix_len = path.len;
+ }
+
+ strbuf_setlen(&path, prefix_len);
+ strbuf_addstr(&path, filename);
+ return path.buf;
+}
+
+/* Initialize the rebase options from the state directory. */
+static int read_basic_state(struct rebase_options *opts)
+{
+ struct strbuf head_name = STRBUF_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ struct object_id oid;
+
+ if (!read_oneliner(&head_name, state_dir_path("head-name", opts),
+ READ_ONELINER_WARN_MISSING) ||
+ !read_oneliner(&buf, state_dir_path("onto", opts),
+ READ_ONELINER_WARN_MISSING))
+ return -1;
+ opts->head_name = starts_with(head_name.buf, "refs/") ?
+ xstrdup(head_name.buf) : NULL;
+ strbuf_release(&head_name);
+ if (get_oid_hex(buf.buf, &oid) ||
+ !(opts->onto = lookup_commit_object(the_repository, &oid)))
+ return error(_("invalid onto: '%s'"), buf.buf);
+
+ /*
+ * We always write to orig-head, but interactive rebase used to write to
+ * head. Fall back to reading from head to cover for the case that the
+ * user upgraded git with an ongoing interactive rebase.
+ */
+ strbuf_reset(&buf);
+ if (file_exists(state_dir_path("orig-head", opts))) {
+ if (!read_oneliner(&buf, state_dir_path("orig-head", opts),
+ READ_ONELINER_WARN_MISSING))
+ return -1;
+ } else if (!read_oneliner(&buf, state_dir_path("head", opts),
+ READ_ONELINER_WARN_MISSING))
+ return -1;
+ if (get_oid_hex(buf.buf, &oid) ||
+ !(opts->orig_head = lookup_commit_object(the_repository, &oid)))
+ return error(_("invalid orig-head: '%s'"), buf.buf);
+
+ if (file_exists(state_dir_path("quiet", opts)))
+ opts->flags &= ~REBASE_NO_QUIET;
+ else
+ opts->flags |= REBASE_NO_QUIET;
+
+ if (file_exists(state_dir_path("verbose", opts)))
+ opts->flags |= REBASE_VERBOSE;
+
+ if (file_exists(state_dir_path("signoff", opts))) {
+ opts->signoff = 1;
+ opts->flags |= REBASE_FORCE;
+ }
+
+ if (file_exists(state_dir_path("allow_rerere_autoupdate", opts))) {
+ strbuf_reset(&buf);
+ if (!read_oneliner(&buf, state_dir_path("allow_rerere_autoupdate", opts),
+ READ_ONELINER_WARN_MISSING))
+ return -1;
+ if (!strcmp(buf.buf, "--rerere-autoupdate"))
+ opts->allow_rerere_autoupdate = RERERE_AUTOUPDATE;
+ else if (!strcmp(buf.buf, "--no-rerere-autoupdate"))
+ opts->allow_rerere_autoupdate = RERERE_NOAUTOUPDATE;
+ else
+ warning(_("ignoring invalid allow_rerere_autoupdate: "
+ "'%s'"), buf.buf);
+ }
+
+ if (file_exists(state_dir_path("gpg_sign_opt", opts))) {
+ strbuf_reset(&buf);
+ if (!read_oneliner(&buf, state_dir_path("gpg_sign_opt", opts),
+ READ_ONELINER_WARN_MISSING))
+ return -1;
+ free(opts->gpg_sign_opt);
+ opts->gpg_sign_opt = xstrdup(buf.buf);
+ }
+
+ if (file_exists(state_dir_path("strategy", opts))) {
+ strbuf_reset(&buf);
+ if (!read_oneliner(&buf, state_dir_path("strategy", opts),
+ READ_ONELINER_WARN_MISSING))
+ return -1;
+ free(opts->strategy);
+ opts->strategy = xstrdup(buf.buf);
+ }
+
+ if (file_exists(state_dir_path("strategy_opts", opts))) {
+ strbuf_reset(&buf);
+ if (!read_oneliner(&buf, state_dir_path("strategy_opts", opts),
+ READ_ONELINER_WARN_MISSING))
+ return -1;
+ free(opts->strategy_opts);
+ opts->strategy_opts = xstrdup(buf.buf);
+ }
+
+ strbuf_release(&buf);
+
+ return 0;
+}
+
+static int rebase_write_basic_state(struct rebase_options *opts)
+{
+ write_file(state_dir_path("head-name", opts), "%s",
+ opts->head_name ? opts->head_name : "detached HEAD");
+ write_file(state_dir_path("onto", opts), "%s",
+ opts->onto ? oid_to_hex(&opts->onto->object.oid) : "");
+ write_file(state_dir_path("orig-head", opts), "%s",
+ oid_to_hex(&opts->orig_head->object.oid));
+ if (!(opts->flags & REBASE_NO_QUIET))
+ write_file(state_dir_path("quiet", opts), "%s", "");
+ if (opts->flags & REBASE_VERBOSE)
+ write_file(state_dir_path("verbose", opts), "%s", "");
+ if (opts->strategy)
+ write_file(state_dir_path("strategy", opts), "%s",
+ opts->strategy);
+ if (opts->strategy_opts)
+ write_file(state_dir_path("strategy_opts", opts), "%s",
+ opts->strategy_opts);
+ if (opts->allow_rerere_autoupdate > 0)
+ write_file(state_dir_path("allow_rerere_autoupdate", opts),
+ "-%s-rerere-autoupdate",
+ opts->allow_rerere_autoupdate == RERERE_AUTOUPDATE ?
+ "" : "-no");
+ if (opts->gpg_sign_opt)
+ write_file(state_dir_path("gpg_sign_opt", opts), "%s",
+ opts->gpg_sign_opt);
+ if (opts->signoff)
+ write_file(state_dir_path("signoff", opts), "--signoff");
+
+ return 0;
+}
+
+static int finish_rebase(struct rebase_options *opts)
+{
+ struct strbuf dir = STRBUF_INIT;
+ int ret = 0;
+
+ delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
+ unlink(git_path_auto_merge(the_repository));
+ apply_autostash(state_dir_path("autostash", opts));
+ /*
+ * We ignore errors in 'git maintenance run --auto', since the
+ * user should see them.
+ */
+ run_auto_maintenance(!(opts->flags & (REBASE_NO_QUIET|REBASE_VERBOSE)));
+ if (opts->type == REBASE_MERGE) {
+ struct replay_opts replay = REPLAY_OPTS_INIT;
+
+ replay.action = REPLAY_INTERACTIVE_REBASE;
+ ret = sequencer_remove_state(&replay);
+ } else {
+ strbuf_addstr(&dir, opts->state_dir);
+ if (remove_dir_recursively(&dir, 0))
+ ret = error(_("could not remove '%s'"),
+ opts->state_dir);
+ strbuf_release(&dir);
+ }
+
+ return ret;
+}
+
+static int move_to_original_branch(struct rebase_options *opts)
+{
+ struct strbuf branch_reflog = STRBUF_INIT, head_reflog = STRBUF_INIT;
+ struct reset_head_opts ropts = { 0 };
+ int ret;
+
+ if (!opts->head_name)
+ return 0; /* nothing to move back to */
+
+ if (!opts->onto)
+ BUG("move_to_original_branch without onto");
+
+ strbuf_addf(&branch_reflog, "%s (finish): %s onto %s",
+ opts->reflog_action,
+ opts->head_name, oid_to_hex(&opts->onto->object.oid));
+ strbuf_addf(&head_reflog, "%s (finish): returning to %s",
+ opts->reflog_action, opts->head_name);
+ ropts.branch = opts->head_name;
+ ropts.flags = RESET_HEAD_REFS_ONLY;
+ ropts.branch_msg = branch_reflog.buf;
+ ropts.head_msg = head_reflog.buf;
+ ret = reset_head(the_repository, &ropts);
+
+ strbuf_release(&branch_reflog);
+ strbuf_release(&head_reflog);
+ return ret;
+}
+
+static const char *resolvemsg =
+N_("Resolve all conflicts manually, mark them as resolved with\n"
+"\"git add/rm <conflicted_files>\", then run \"git rebase --continue\".\n"
+"You can instead skip this commit: run \"git rebase --skip\".\n"
+"To abort and get back to the state before \"git rebase\", run "
+"\"git rebase --abort\".");
+
+static int run_am(struct rebase_options *opts)
+{
+ struct child_process am = CHILD_PROCESS_INIT;
+ struct child_process format_patch = CHILD_PROCESS_INIT;
+ struct strbuf revisions = STRBUF_INIT;
+ int status;
+ char *rebased_patches;
+
+ am.git_cmd = 1;
+ strvec_push(&am.args, "am");
+ strvec_pushf(&am.env, GIT_REFLOG_ACTION_ENVIRONMENT "=%s (pick)",
+ opts->reflog_action);
+ if (opts->action == ACTION_CONTINUE) {
+ strvec_push(&am.args, "--resolved");
+ strvec_pushf(&am.args, "--resolvemsg=%s", resolvemsg);
+ if (opts->gpg_sign_opt)
+ strvec_push(&am.args, opts->gpg_sign_opt);
+ status = run_command(&am);
+ if (status)
+ return status;
+
+ return move_to_original_branch(opts);
+ }
+ if (opts->action == ACTION_SKIP) {
+ strvec_push(&am.args, "--skip");
+ strvec_pushf(&am.args, "--resolvemsg=%s", resolvemsg);
+ status = run_command(&am);
+ if (status)
+ return status;
+
+ return move_to_original_branch(opts);
+ }
+ if (opts->action == ACTION_SHOW_CURRENT_PATCH) {
+ strvec_push(&am.args, "--show-current-patch");
+ return run_command(&am);
+ }
+
+ strbuf_addf(&revisions, "%s...%s",
+ oid_to_hex(opts->root ?
+ /* this is now equivalent to !opts->upstream */
+ &opts->onto->object.oid :
+ &opts->upstream->object.oid),
+ oid_to_hex(&opts->orig_head->object.oid));
+
+ rebased_patches = xstrdup(git_path("rebased-patches"));
+ format_patch.out = open(rebased_patches,
+ O_WRONLY | O_CREAT | O_TRUNC, 0666);
+ if (format_patch.out < 0) {
+ status = error_errno(_("could not open '%s' for writing"),
+ rebased_patches);
+ free(rebased_patches);
+ strvec_clear(&am.args);
+ return status;
+ }
+
+ format_patch.git_cmd = 1;
+ strvec_pushl(&format_patch.args, "format-patch", "-k", "--stdout",
+ "--full-index", "--cherry-pick", "--right-only",
+ "--src-prefix=a/", "--dst-prefix=b/", "--no-renames",
+ "--no-cover-letter", "--pretty=mboxrd", "--topo-order",
+ "--no-base", NULL);
+ if (opts->git_format_patch_opt.len)
+ strvec_split(&format_patch.args,
+ opts->git_format_patch_opt.buf);
+ strvec_push(&format_patch.args, revisions.buf);
+ if (opts->restrict_revision)
+ strvec_pushf(&format_patch.args, "^%s",
+ oid_to_hex(&opts->restrict_revision->object.oid));
+
+ status = run_command(&format_patch);
+ if (status) {
+ struct reset_head_opts ropts = { 0 };
+ unlink(rebased_patches);
+ free(rebased_patches);
+ strvec_clear(&am.args);
+
+ ropts.oid = &opts->orig_head->object.oid;
+ ropts.branch = opts->head_name;
+ ropts.default_reflog_action = opts->reflog_action;
+ reset_head(the_repository, &ropts);
+ error(_("\ngit encountered an error while preparing the "
+ "patches to replay\n"
+ "these revisions:\n"
+ "\n %s\n\n"
+ "As a result, git cannot rebase them."),
+ opts->revisions);
+
+ strbuf_release(&revisions);
+ return status;
+ }
+ strbuf_release(&revisions);
+
+ am.in = open(rebased_patches, O_RDONLY);
+ if (am.in < 0) {
+ status = error_errno(_("could not open '%s' for reading"),
+ rebased_patches);
+ free(rebased_patches);
+ strvec_clear(&am.args);
+ return status;
+ }
+
+ strvec_pushv(&am.args, opts->git_am_opts.v);
+ strvec_push(&am.args, "--rebasing");
+ strvec_pushf(&am.args, "--resolvemsg=%s", resolvemsg);
+ strvec_push(&am.args, "--patch-format=mboxrd");
+ if (opts->allow_rerere_autoupdate == RERERE_AUTOUPDATE)
+ strvec_push(&am.args, "--rerere-autoupdate");
+ else if (opts->allow_rerere_autoupdate == RERERE_NOAUTOUPDATE)
+ strvec_push(&am.args, "--no-rerere-autoupdate");
+ if (opts->gpg_sign_opt)
+ strvec_push(&am.args, opts->gpg_sign_opt);
+ status = run_command(&am);
+ unlink(rebased_patches);
+ free(rebased_patches);
+
+ if (!status) {
+ return move_to_original_branch(opts);
+ }
+
+ if (is_directory(opts->state_dir))
+ rebase_write_basic_state(opts);
+
+ return status;
+}
+
+static int run_specific_rebase(struct rebase_options *opts)
+{
+ int status;
+
+ if (opts->type == REBASE_MERGE) {
+ /* Run sequencer-based rebase */
+ setenv("GIT_CHERRY_PICK_HELP", resolvemsg, 1);
+ if (!(opts->flags & REBASE_INTERACTIVE_EXPLICIT)) {
+ setenv("GIT_SEQUENCE_EDITOR", ":", 1);
+ opts->autosquash = 0;
+ }
+ if (opts->gpg_sign_opt) {
+ /* remove the leading "-S" */
+ char *tmp = xstrdup(opts->gpg_sign_opt + 2);
+ free(opts->gpg_sign_opt);
+ opts->gpg_sign_opt = tmp;
+ }
+
+ status = run_sequencer_rebase(opts);
+ } else if (opts->type == REBASE_APPLY)
+ status = run_am(opts);
+ else
+ BUG("Unhandled rebase type %d", opts->type);
+
+ if (opts->dont_finish_rebase)
+ ; /* do nothing */
+ else if (opts->type == REBASE_MERGE)
+ ; /* merge backend cleans up after itself */
+ else if (status == 0) {
+ if (!file_exists(state_dir_path("stopped-sha", opts)))
+ finish_rebase(opts);
+ } else if (status == 2) {
+ struct strbuf dir = STRBUF_INIT;
+
+ apply_autostash(state_dir_path("autostash", opts));
+ strbuf_addstr(&dir, opts->state_dir);
+ remove_dir_recursively(&dir, 0);
+ strbuf_release(&dir);
+ die("Nothing to do");
+ }
+
+ return status ? -1 : 0;
+}
+
+static int rebase_config(const char *var, const char *value, void *data)
+{
+ struct rebase_options *opts = data;
+
+ if (!strcmp(var, "rebase.stat")) {
+ if (git_config_bool(var, value))
+ opts->flags |= REBASE_DIFFSTAT;
+ else
+ opts->flags &= ~REBASE_DIFFSTAT;
+ return 0;
+ }
+
+ if (!strcmp(var, "rebase.autosquash")) {
+ opts->autosquash = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (!strcmp(var, "commit.gpgsign")) {
+ free(opts->gpg_sign_opt);
+ opts->gpg_sign_opt = git_config_bool(var, value) ?
+ xstrdup("-S") : NULL;
+ return 0;
+ }
+
+ if (!strcmp(var, "rebase.autostash")) {
+ opts->autostash = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (!strcmp(var, "rebase.updaterefs")) {
+ opts->update_refs = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (!strcmp(var, "rebase.reschedulefailedexec")) {
+ opts->reschedule_failed_exec = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (!strcmp(var, "rebase.forkpoint")) {
+ opts->fork_point = git_config_bool(var, value) ? -1 : 0;
+ return 0;
+ }
+
+ if (!strcmp(var, "rebase.backend")) {
+ return git_config_string(&opts->default_backend, var, value);
+ }
+
+ return git_default_config(var, value, data);
+}
+
+static int checkout_up_to_date(struct rebase_options *options)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct reset_head_opts ropts = { 0 };
+ int ret = 0;
+
+ strbuf_addf(&buf, "%s: checkout %s",
+ options->reflog_action, options->switch_to);
+ ropts.oid = &options->orig_head->object.oid;
+ ropts.branch = options->head_name;
+ ropts.flags = RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
+ if (!ropts.branch)
+ ropts.flags |= RESET_HEAD_DETACH;
+ ropts.head_msg = buf.buf;
+ if (reset_head(the_repository, &ropts) < 0)
+ ret = error(_("could not switch to %s"), options->switch_to);
+ strbuf_release(&buf);
+
+ return ret;
+}
+
+/*
+ * Determines whether the commits in from..to are linear, i.e. contain
+ * no merge commits. This function *expects* `from` to be an ancestor of
+ * `to`.
+ */
+static int is_linear_history(struct commit *from, struct commit *to)
+{
+ while (to && to != from) {
+ parse_commit(to);
+ if (!to->parents)
+ return 1;
+ if (to->parents->next)
+ return 0;
+ to = to->parents->item;
+ }
+ return 1;
+}
+
+static int can_fast_forward(struct commit *onto, struct commit *upstream,
+ struct commit *restrict_revision,
+ struct commit *head, struct object_id *branch_base)
+{
+ struct commit_list *merge_bases = NULL;
+ int res = 0;
+
+ if (is_null_oid(branch_base))
+ goto done; /* fill_branch_base() found multiple merge bases */
+
+ if (!oideq(branch_base, &onto->object.oid))
+ goto done;
+
+ if (restrict_revision && !oideq(&restrict_revision->object.oid, branch_base))
+ goto done;
+
+ if (!upstream)
+ goto done;
+
+ merge_bases = get_merge_bases(upstream, head);
+ if (!merge_bases || merge_bases->next)
+ goto done;
+
+ if (!oideq(&onto->object.oid, &merge_bases->item->object.oid))
+ goto done;
+
+ res = 1;
+
+done:
+ free_commit_list(merge_bases);
+ return res && is_linear_history(onto, head);
+}
+
+static void fill_branch_base(struct rebase_options *options,
+ struct object_id *branch_base)
+{
+ struct commit_list *merge_bases = NULL;
+
+ merge_bases = get_merge_bases(options->onto, options->orig_head);
+ if (!merge_bases || merge_bases->next)
+ oidcpy(branch_base, null_oid());
+ else
+ oidcpy(branch_base, &merge_bases->item->object.oid);
+
+ free_commit_list(merge_bases);
+}
+
+static int parse_opt_am(const struct option *opt, const char *arg, int unset)
+{
+ struct rebase_options *opts = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+
+ opts->type = REBASE_APPLY;
+
+ return 0;
+}
+
+/* -i followed by -m is still -i */
+static int parse_opt_merge(const struct option *opt, const char *arg, int unset)
+{
+ struct rebase_options *opts = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+
+ if (!is_merge(opts))
+ opts->type = REBASE_MERGE;
+
+ return 0;
+}
+
+/* -i followed by -r is still explicitly interactive, but -r alone is not */
+static int parse_opt_interactive(const struct option *opt, const char *arg,
+ int unset)
+{
+ struct rebase_options *opts = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+
+ opts->type = REBASE_MERGE;
+ opts->flags |= REBASE_INTERACTIVE_EXPLICIT;
+
+ return 0;
+}
+
+static enum empty_type parse_empty_value(const char *value)
+{
+ if (!strcasecmp(value, "drop"))
+ return EMPTY_DROP;
+ else if (!strcasecmp(value, "keep"))
+ return EMPTY_KEEP;
+ else if (!strcasecmp(value, "ask"))
+ return EMPTY_ASK;
+
+ die(_("unrecognized empty type '%s'; valid values are \"drop\", \"keep\", and \"ask\"."), value);
+}
+
+static int parse_opt_empty(const struct option *opt, const char *arg, int unset)
+{
+ struct rebase_options *options = opt->value;
+ enum empty_type value = parse_empty_value(arg);
+
+ BUG_ON_OPT_NEG(unset);
+
+ options->empty = value;
+ return 0;
+}
+
+static void NORETURN error_on_missing_default_upstream(void)
+{
+ struct branch *current_branch = branch_get(NULL);
+
+ printf(_("%s\n"
+ "Please specify which branch you want to rebase against.\n"
+ "See git-rebase(1) for details.\n"
+ "\n"
+ " git rebase '<branch>'\n"
+ "\n"),
+ current_branch ? _("There is no tracking information for "
+ "the current branch.") :
+ _("You are not currently on a branch."));
+
+ if (current_branch) {
+ const char *remote = current_branch->remote_name;
+
+ if (!remote)
+ remote = _("<remote>");
+
+ printf(_("If you wish to set tracking information for this "
+ "branch you can do so with:\n"
+ "\n"
+ " git branch --set-upstream-to=%s/<branch> %s\n"
+ "\n"),
+ remote, current_branch->name);
+ }
+ exit(1);
+}
+
+static int check_exec_cmd(const char *cmd)
+{
+ if (strchr(cmd, '\n'))
+ return error(_("exec commands cannot contain newlines"));
+
+ /* Does the command consist purely of whitespace? */
+ if (!cmd[strspn(cmd, " \t\r\f\v")])
+ return error(_("empty exec command"));
+
+ return 0;
+}
+
+int cmd_rebase(int argc, const char **argv, const char *prefix)
+{
+ struct rebase_options options = REBASE_OPTIONS_INIT;
+ const char *branch_name;
+ int ret, flags, total_argc, in_progress = 0;
+ int keep_base = 0;
+ int ok_to_skip_pre_rebase = 0;
+ struct strbuf msg = STRBUF_INIT;
+ struct strbuf revisions = STRBUF_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ struct object_id branch_base;
+ int ignore_whitespace = 0;
+ const char *gpg_sign = NULL;
+ struct string_list exec = STRING_LIST_INIT_NODUP;
+ const char *rebase_merges = NULL;
+ struct string_list strategy_options = STRING_LIST_INIT_NODUP;
+ struct object_id squash_onto;
+ char *squash_onto_name = NULL;
+ int reschedule_failed_exec = -1;
+ int allow_preemptive_ff = 1;
+ int preserve_merges_selected = 0;
+ struct reset_head_opts ropts = { 0 };
+ struct option builtin_rebase_options[] = {
+ OPT_STRING(0, "onto", &options.onto_name,
+ N_("revision"),
+ N_("rebase onto given branch instead of upstream")),
+ OPT_BOOL(0, "keep-base", &keep_base,
+ N_("use the merge-base of upstream and branch as the current base")),
+ OPT_BOOL(0, "no-verify", &ok_to_skip_pre_rebase,
+ N_("allow pre-rebase hook to run")),
+ OPT_NEGBIT('q', "quiet", &options.flags,
+ N_("be quiet. implies --no-stat"),
+ REBASE_NO_QUIET | REBASE_VERBOSE | REBASE_DIFFSTAT),
+ OPT_BIT('v', "verbose", &options.flags,
+ N_("display a diffstat of what changed upstream"),
+ REBASE_NO_QUIET | REBASE_VERBOSE | REBASE_DIFFSTAT),
+ {OPTION_NEGBIT, 'n', "no-stat", &options.flags, NULL,
+ N_("do not show diffstat of what changed upstream"),
+ PARSE_OPT_NOARG, NULL, REBASE_DIFFSTAT },
+ OPT_BOOL(0, "signoff", &options.signoff,
+ N_("add a Signed-off-by trailer to each commit")),
+ OPT_BOOL(0, "committer-date-is-author-date",
+ &options.committer_date_is_author_date,
+ N_("make committer date match author date")),
+ OPT_BOOL(0, "reset-author-date", &options.ignore_date,
+ N_("ignore author date and use current date")),
+ OPT_HIDDEN_BOOL(0, "ignore-date", &options.ignore_date,
+ N_("synonym of --reset-author-date")),
+ OPT_PASSTHRU_ARGV('C', NULL, &options.git_am_opts, N_("n"),
+ N_("passed to 'git apply'"), 0),
+ OPT_BOOL(0, "ignore-whitespace", &ignore_whitespace,
+ N_("ignore changes in whitespace")),
+ OPT_PASSTHRU_ARGV(0, "whitespace", &options.git_am_opts,
+ N_("action"), N_("passed to 'git apply'"), 0),
+ OPT_BIT('f', "force-rebase", &options.flags,
+ N_("cherry-pick all commits, even if unchanged"),
+ REBASE_FORCE),
+ OPT_BIT(0, "no-ff", &options.flags,
+ N_("cherry-pick all commits, even if unchanged"),
+ REBASE_FORCE),
+ OPT_CMDMODE(0, "continue", &options.action, N_("continue"),
+ ACTION_CONTINUE),
+ OPT_CMDMODE(0, "skip", &options.action,
+ N_("skip current patch and continue"), ACTION_SKIP),
+ OPT_CMDMODE(0, "abort", &options.action,
+ N_("abort and check out the original branch"),
+ ACTION_ABORT),
+ OPT_CMDMODE(0, "quit", &options.action,
+ N_("abort but keep HEAD where it is"), ACTION_QUIT),
+ OPT_CMDMODE(0, "edit-todo", &options.action, N_("edit the todo list "
+ "during an interactive rebase"), ACTION_EDIT_TODO),
+ OPT_CMDMODE(0, "show-current-patch", &options.action,
+ N_("show the patch file being applied or merged"),
+ ACTION_SHOW_CURRENT_PATCH),
+ OPT_CALLBACK_F(0, "apply", &options, NULL,
+ N_("use apply strategies to rebase"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG,
+ parse_opt_am),
+ OPT_CALLBACK_F('m', "merge", &options, NULL,
+ N_("use merging strategies to rebase"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG,
+ parse_opt_merge),
+ OPT_CALLBACK_F('i', "interactive", &options, NULL,
+ N_("let the user edit the list of commits to rebase"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG,
+ parse_opt_interactive),
+ OPT_SET_INT_F('p', "preserve-merges", &preserve_merges_selected,
+ N_("(REMOVED) was: try to recreate merges "
+ "instead of ignoring them"),
+ 1, PARSE_OPT_HIDDEN),
+ OPT_RERERE_AUTOUPDATE(&options.allow_rerere_autoupdate),
+ OPT_CALLBACK_F(0, "empty", &options, "{drop,keep,ask}",
+ N_("how to handle commits that become empty"),
+ PARSE_OPT_NONEG, parse_opt_empty),
+ OPT_CALLBACK_F('k', "keep-empty", &options, NULL,
+ N_("keep commits which start empty"),
+ PARSE_OPT_NOARG | PARSE_OPT_HIDDEN,
+ parse_opt_keep_empty),
+ OPT_BOOL(0, "autosquash", &options.autosquash,
+ N_("move commits that begin with "
+ "squash!/fixup! under -i")),
+ OPT_BOOL(0, "update-refs", &options.update_refs,
+ N_("update branches that point to commits "
+ "that are being rebased")),
+ { OPTION_STRING, 'S', "gpg-sign", &gpg_sign, N_("key-id"),
+ N_("GPG-sign commits"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
+ OPT_AUTOSTASH(&options.autostash),
+ OPT_STRING_LIST('x', "exec", &exec, N_("exec"),
+ N_("add exec lines after each commit of the "
+ "editable list")),
+ OPT_BOOL_F(0, "allow-empty-message",
+ &options.allow_empty_message,
+ N_("allow rebasing commits with empty messages"),
+ PARSE_OPT_HIDDEN),
+ {OPTION_STRING, 'r', "rebase-merges", &rebase_merges,
+ N_("mode"),
+ N_("try to rebase merges instead of skipping them"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t)""},
+ OPT_BOOL(0, "fork-point", &options.fork_point,
+ N_("use 'merge-base --fork-point' to refine upstream")),
+ OPT_STRING('s', "strategy", &options.strategy,
+ N_("strategy"), N_("use the given merge strategy")),
+ OPT_STRING_LIST('X', "strategy-option", &strategy_options,
+ N_("option"),
+ N_("pass the argument through to the merge "
+ "strategy")),
+ OPT_BOOL(0, "root", &options.root,
+ N_("rebase all reachable commits up to the root(s)")),
+ OPT_BOOL(0, "reschedule-failed-exec",
+ &reschedule_failed_exec,
+ N_("automatically re-schedule any `exec` that fails")),
+ OPT_BOOL(0, "reapply-cherry-picks", &options.reapply_cherry_picks,
+ N_("apply all changes, even those already present upstream")),
+ OPT_END(),
+ };
+ int i;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_rebase_usage,
+ builtin_rebase_options);
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ options.reapply_cherry_picks = -1;
+ options.allow_empty_message = 1;
+ git_config(rebase_config, &options);
+ /* options.gpg_sign_opt will be either "-S" or NULL */
+ gpg_sign = options.gpg_sign_opt ? "" : NULL;
+ FREE_AND_NULL(options.gpg_sign_opt);
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s/applying", apply_dir());
+ if(file_exists(buf.buf))
+ die(_("It looks like 'git am' is in progress. Cannot rebase."));
+
+ if (is_directory(apply_dir())) {
+ options.type = REBASE_APPLY;
+ options.state_dir = apply_dir();
+ } else if (is_directory(merge_dir())) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s/rewritten", merge_dir());
+ if (!(options.action == ACTION_ABORT) && is_directory(buf.buf)) {
+ die(_("`rebase --preserve-merges` (-p) is no longer supported.\n"
+ "Use `git rebase --abort` to terminate current rebase.\n"
+ "Or downgrade to v2.33, or earlier, to complete the rebase."));
+ } else {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s/interactive", merge_dir());
+ options.type = REBASE_MERGE;
+ if (file_exists(buf.buf))
+ options.flags |= REBASE_INTERACTIVE_EXPLICIT;
+ }
+ options.state_dir = merge_dir();
+ }
+
+ if (options.type != REBASE_UNSPECIFIED)
+ in_progress = 1;
+
+ total_argc = argc;
+ argc = parse_options(argc, argv, prefix,
+ builtin_rebase_options,
+ builtin_rebase_usage, 0);
+
+ if (preserve_merges_selected)
+ die(_("--preserve-merges was replaced by --rebase-merges\n"
+ "Note: Your `pull.rebase` configuration may also be set to 'preserve',\n"
+ "which is no longer supported; use 'merges' instead"));
+
+ if (options.action != ACTION_NONE && total_argc != 2) {
+ usage_with_options(builtin_rebase_usage,
+ builtin_rebase_options);
+ }
+
+ if (argc > 2)
+ usage_with_options(builtin_rebase_usage,
+ builtin_rebase_options);
+
+ if (keep_base) {
+ if (options.onto_name)
+ die(_("options '%s' and '%s' cannot be used together"), "--keep-base", "--onto");
+ if (options.root)
+ die(_("options '%s' and '%s' cannot be used together"), "--keep-base", "--root");
+ /*
+ * --keep-base defaults to --no-fork-point to keep the
+ * base the same.
+ */
+ if (options.fork_point < 0)
+ options.fork_point = 0;
+ }
+ /*
+ * --keep-base defaults to --reapply-cherry-picks to avoid losing
+ * commits when using this option.
+ */
+ if (options.reapply_cherry_picks < 0)
+ options.reapply_cherry_picks = keep_base;
+
+ if (options.root && options.fork_point > 0)
+ die(_("options '%s' and '%s' cannot be used together"), "--root", "--fork-point");
+
+ if (options.action != ACTION_NONE && !in_progress)
+ die(_("No rebase in progress?"));
+
+ if (options.action == ACTION_EDIT_TODO && !is_merge(&options))
+ die(_("The --edit-todo action can only be used during "
+ "interactive rebase."));
+
+ if (trace2_is_enabled()) {
+ if (is_merge(&options))
+ trace2_cmd_mode("interactive");
+ else if (exec.nr)
+ trace2_cmd_mode("interactive-exec");
+ else
+ trace2_cmd_mode(action_names[options.action]);
+ }
+
+ options.reflog_action = getenv(GIT_REFLOG_ACTION_ENVIRONMENT);
+ options.reflog_action =
+ xstrdup(options.reflog_action ? options.reflog_action : "rebase");
+
+ switch (options.action) {
+ case ACTION_CONTINUE: {
+ struct object_id head;
+ struct lock_file lock_file = LOCK_INIT;
+ int fd;
+
+ /* Sanity check */
+ if (get_oid("HEAD", &head))
+ die(_("Cannot read HEAD"));
+
+ fd = repo_hold_locked_index(the_repository, &lock_file, 0);
+ if (repo_read_index(the_repository) < 0)
+ die(_("could not read index"));
+ refresh_index(the_repository->index, REFRESH_QUIET, NULL, NULL,
+ NULL);
+ if (0 <= fd)
+ repo_update_index_if_able(the_repository, &lock_file);
+ rollback_lock_file(&lock_file);
+
+ if (has_unstaged_changes(the_repository, 1)) {
+ puts(_("You must edit all merge conflicts and then\n"
+ "mark them as resolved using git add"));
+ exit(1);
+ }
+ if (read_basic_state(&options))
+ exit(1);
+ goto run_rebase;
+ }
+ case ACTION_SKIP: {
+ struct string_list merge_rr = STRING_LIST_INIT_DUP;
+
+ rerere_clear(the_repository, &merge_rr);
+ string_list_clear(&merge_rr, 1);
+ ropts.flags = RESET_HEAD_HARD;
+ if (reset_head(the_repository, &ropts) < 0)
+ die(_("could not discard worktree changes"));
+ remove_branch_state(the_repository, 0);
+ if (read_basic_state(&options))
+ exit(1);
+ goto run_rebase;
+ }
+ case ACTION_ABORT: {
+ struct string_list merge_rr = STRING_LIST_INIT_DUP;
+ struct strbuf head_msg = STRBUF_INIT;
+
+ rerere_clear(the_repository, &merge_rr);
+ string_list_clear(&merge_rr, 1);
+
+ if (read_basic_state(&options))
+ exit(1);
+
+ strbuf_addf(&head_msg, "%s (abort): returning to %s",
+ options.reflog_action,
+ options.head_name ? options.head_name
+ : oid_to_hex(&options.orig_head->object.oid));
+ ropts.oid = &options.orig_head->object.oid;
+ ropts.head_msg = head_msg.buf;
+ ropts.branch = options.head_name;
+ ropts.flags = RESET_HEAD_HARD;
+ if (reset_head(the_repository, &ropts) < 0)
+ die(_("could not move back to %s"),
+ oid_to_hex(&options.orig_head->object.oid));
+ remove_branch_state(the_repository, 0);
+ ret = finish_rebase(&options);
+ goto cleanup;
+ }
+ case ACTION_QUIT: {
+ save_autostash(state_dir_path("autostash", &options));
+ if (options.type == REBASE_MERGE) {
+ struct replay_opts replay = REPLAY_OPTS_INIT;
+
+ replay.action = REPLAY_INTERACTIVE_REBASE;
+ ret = sequencer_remove_state(&replay);
+ } else {
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, options.state_dir);
+ ret = remove_dir_recursively(&buf, 0);
+ if (ret)
+ error(_("could not remove '%s'"),
+ options.state_dir);
+ }
+ goto cleanup;
+ }
+ case ACTION_EDIT_TODO:
+ options.dont_finish_rebase = 1;
+ goto run_rebase;
+ case ACTION_SHOW_CURRENT_PATCH:
+ options.dont_finish_rebase = 1;
+ goto run_rebase;
+ case ACTION_NONE:
+ break;
+ default:
+ BUG("action: %d", options.action);
+ }
+
+ /* Make sure no rebase is in progress */
+ if (in_progress) {
+ const char *last_slash = strrchr(options.state_dir, '/');
+ const char *state_dir_base =
+ last_slash ? last_slash + 1 : options.state_dir;
+ const char *cmd_live_rebase =
+ "git rebase (--continue | --abort | --skip)";
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "rm -fr \"%s\"", options.state_dir);
+ die(_("It seems that there is already a %s directory, and\n"
+ "I wonder if you are in the middle of another rebase. "
+ "If that is the\n"
+ "case, please try\n\t%s\n"
+ "If that is not the case, please\n\t%s\n"
+ "and run me again. I am stopping in case you still "
+ "have something\n"
+ "valuable there.\n"),
+ state_dir_base, cmd_live_rebase, buf.buf);
+ }
+
+ if ((options.flags & REBASE_INTERACTIVE_EXPLICIT) ||
+ (options.action != ACTION_NONE) ||
+ (exec.nr > 0) ||
+ options.autosquash) {
+ allow_preemptive_ff = 0;
+ }
+ if (options.committer_date_is_author_date || options.ignore_date)
+ options.flags |= REBASE_FORCE;
+
+ for (i = 0; i < options.git_am_opts.nr; i++) {
+ const char *option = options.git_am_opts.v[i], *p;
+ if (!strcmp(option, "--whitespace=fix") ||
+ !strcmp(option, "--whitespace=strip"))
+ allow_preemptive_ff = 0;
+ else if (skip_prefix(option, "-C", &p)) {
+ while (*p)
+ if (!isdigit(*(p++)))
+ die(_("switch `C' expects a "
+ "numerical value"));
+ } else if (skip_prefix(option, "--whitespace=", &p)) {
+ if (*p && strcmp(p, "warn") && strcmp(p, "nowarn") &&
+ strcmp(p, "error") && strcmp(p, "error-all"))
+ die("Invalid whitespace option: '%s'", p);
+ }
+ }
+
+ for (i = 0; i < exec.nr; i++)
+ if (check_exec_cmd(exec.items[i].string))
+ exit(1);
+
+ if (!(options.flags & REBASE_NO_QUIET))
+ strvec_push(&options.git_am_opts, "-q");
+
+ if (options.empty != EMPTY_UNSPECIFIED)
+ imply_merge(&options, "--empty");
+
+ /*
+ * --keep-base implements --reapply-cherry-picks by altering upstream so
+ * it works with both backends.
+ */
+ if (options.reapply_cherry_picks && !keep_base)
+ imply_merge(&options, "--reapply-cherry-picks");
+
+ if (gpg_sign)
+ options.gpg_sign_opt = xstrfmt("-S%s", gpg_sign);
+
+ if (exec.nr) {
+ int i;
+
+ imply_merge(&options, "--exec");
+
+ strbuf_reset(&buf);
+ for (i = 0; i < exec.nr; i++)
+ strbuf_addf(&buf, "exec %s\n", exec.items[i].string);
+ options.cmd = xstrdup(buf.buf);
+ }
+
+ if (rebase_merges) {
+ if (!*rebase_merges)
+ ; /* default mode; do nothing */
+ else if (!strcmp("rebase-cousins", rebase_merges))
+ options.rebase_cousins = 1;
+ else if (strcmp("no-rebase-cousins", rebase_merges))
+ die(_("Unknown mode: %s"), rebase_merges);
+ options.rebase_merges = 1;
+ imply_merge(&options, "--rebase-merges");
+ }
+
+ if (options.type == REBASE_APPLY) {
+ if (ignore_whitespace)
+ strvec_push(&options.git_am_opts,
+ "--ignore-whitespace");
+ if (options.committer_date_is_author_date)
+ strvec_push(&options.git_am_opts,
+ "--committer-date-is-author-date");
+ if (options.ignore_date)
+ strvec_push(&options.git_am_opts, "--ignore-date");
+ } else {
+ /* REBASE_MERGE */
+ if (ignore_whitespace) {
+ string_list_append(&strategy_options,
+ "ignore-space-change");
+ }
+ }
+
+ if (strategy_options.nr) {
+ int i;
+
+ if (!options.strategy)
+ options.strategy = "ort";
+
+ strbuf_reset(&buf);
+ for (i = 0; i < strategy_options.nr; i++)
+ strbuf_addf(&buf, " --%s",
+ strategy_options.items[i].string);
+ options.strategy_opts = xstrdup(buf.buf);
+ }
+
+ if (options.strategy) {
+ options.strategy = xstrdup(options.strategy);
+ switch (options.type) {
+ case REBASE_APPLY:
+ die(_("--strategy requires --merge or --interactive"));
+ case REBASE_MERGE:
+ /* compatible */
+ break;
+ case REBASE_UNSPECIFIED:
+ options.type = REBASE_MERGE;
+ break;
+ default:
+ BUG("unhandled rebase type (%d)", options.type);
+ }
+ }
+
+ if (options.type == REBASE_MERGE)
+ imply_merge(&options, "--merge");
+
+ if (options.root && !options.onto_name)
+ imply_merge(&options, "--root without --onto");
+
+ if (isatty(2) && options.flags & REBASE_NO_QUIET)
+ strbuf_addstr(&options.git_format_patch_opt, " --progress");
+
+ if (options.git_am_opts.nr || options.type == REBASE_APPLY) {
+ /* all am options except -q are compatible only with --apply */
+ for (i = options.git_am_opts.nr - 1; i >= 0; i--)
+ if (strcmp(options.git_am_opts.v[i], "-q"))
+ break;
+
+ if (i >= 0) {
+ if (is_merge(&options))
+ die(_("apply options and merge options "
+ "cannot be used together"));
+ else
+ options.type = REBASE_APPLY;
+ }
+ }
+
+ if (options.type == REBASE_UNSPECIFIED) {
+ if (!strcmp(options.default_backend, "merge"))
+ imply_merge(&options, "--merge");
+ else if (!strcmp(options.default_backend, "apply"))
+ options.type = REBASE_APPLY;
+ else
+ die(_("Unknown rebase backend: %s"),
+ options.default_backend);
+ }
+
+ if (options.type == REBASE_MERGE &&
+ !options.strategy &&
+ getenv("GIT_TEST_MERGE_ALGORITHM"))
+ options.strategy = xstrdup(getenv("GIT_TEST_MERGE_ALGORITHM"));
+
+ switch (options.type) {
+ case REBASE_MERGE:
+ options.state_dir = merge_dir();
+ break;
+ case REBASE_APPLY:
+ options.state_dir = apply_dir();
+ break;
+ default:
+ BUG("options.type was just set above; should be unreachable.");
+ }
+
+ if (options.empty == EMPTY_UNSPECIFIED) {
+ if (options.flags & REBASE_INTERACTIVE_EXPLICIT)
+ options.empty = EMPTY_ASK;
+ else if (exec.nr > 0)
+ options.empty = EMPTY_KEEP;
+ else
+ options.empty = EMPTY_DROP;
+ }
+ if (reschedule_failed_exec > 0 && !is_merge(&options))
+ die(_("--reschedule-failed-exec requires "
+ "--exec or --interactive"));
+ if (reschedule_failed_exec >= 0)
+ options.reschedule_failed_exec = reschedule_failed_exec;
+
+ if (options.signoff) {
+ strvec_push(&options.git_am_opts, "--signoff");
+ options.flags |= REBASE_FORCE;
+ }
+
+ if (!options.root) {
+ if (argc < 1) {
+ struct branch *branch;
+
+ branch = branch_get(NULL);
+ options.upstream_name = branch_get_upstream(branch,
+ NULL);
+ if (!options.upstream_name)
+ error_on_missing_default_upstream();
+ if (options.fork_point < 0)
+ options.fork_point = 1;
+ } else {
+ options.upstream_name = argv[0];
+ argc--;
+ argv++;
+ if (!strcmp(options.upstream_name, "-"))
+ options.upstream_name = "@{-1}";
+ }
+ options.upstream =
+ lookup_commit_reference_by_name(options.upstream_name);
+ if (!options.upstream)
+ die(_("invalid upstream '%s'"), options.upstream_name);
+ options.upstream_arg = options.upstream_name;
+ } else {
+ if (!options.onto_name) {
+ if (commit_tree("", 0, the_hash_algo->empty_tree, NULL,
+ &squash_onto, NULL, NULL) < 0)
+ die(_("Could not create new root commit"));
+ options.squash_onto = &squash_onto;
+ options.onto_name = squash_onto_name =
+ xstrdup(oid_to_hex(&squash_onto));
+ } else
+ options.root_with_onto = 1;
+
+ options.upstream_name = NULL;
+ options.upstream = NULL;
+ if (argc > 1)
+ usage_with_options(builtin_rebase_usage,
+ builtin_rebase_options);
+ options.upstream_arg = "--root";
+ }
+
+ /*
+ * If the branch to rebase is given, that is the branch we will rebase
+ * branch_name -- branch/commit being rebased, or
+ * HEAD (already detached)
+ * orig_head -- commit object name of tip of the branch before rebasing
+ * head_name -- refs/heads/<that-branch> or NULL (detached HEAD)
+ */
+ if (argc == 1) {
+ /* Is it "rebase other branchname" or "rebase other commit"? */
+ struct object_id branch_oid;
+ branch_name = argv[0];
+ options.switch_to = argv[0];
+
+ /* Is it a local branch? */
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "refs/heads/%s", branch_name);
+ if (!read_ref(buf.buf, &branch_oid)) {
+ die_if_checked_out(buf.buf, 1);
+ options.head_name = xstrdup(buf.buf);
+ options.orig_head =
+ lookup_commit_object(the_repository,
+ &branch_oid);
+ /* If not is it a valid ref (branch or commit)? */
+ } else {
+ options.orig_head =
+ lookup_commit_reference_by_name(branch_name);
+ options.head_name = NULL;
+ }
+ if (!options.orig_head)
+ die(_("no such branch/commit '%s'"), branch_name);
+ } else if (argc == 0) {
+ /* Do not need to switch branches, we are already on it. */
+ options.head_name =
+ xstrdup_or_null(resolve_ref_unsafe("HEAD", 0, NULL,
+ &flags));
+ if (!options.head_name)
+ die(_("No such ref: %s"), "HEAD");
+ if (flags & REF_ISSYMREF) {
+ if (!skip_prefix(options.head_name,
+ "refs/heads/", &branch_name))
+ branch_name = options.head_name;
+
+ } else {
+ FREE_AND_NULL(options.head_name);
+ branch_name = "HEAD";
+ }
+ options.orig_head = lookup_commit_reference_by_name("HEAD");
+ if (!options.orig_head)
+ die(_("Could not resolve HEAD to a commit"));
+ } else
+ BUG("unexpected number of arguments left to parse");
+
+ /* Make sure the branch to rebase onto is valid. */
+ if (keep_base) {
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, options.upstream_name);
+ strbuf_addstr(&buf, "...");
+ strbuf_addstr(&buf, branch_name);
+ options.onto_name = xstrdup(buf.buf);
+ } else if (!options.onto_name)
+ options.onto_name = options.upstream_name;
+ if (strstr(options.onto_name, "...")) {
+ if (get_oid_mb(options.onto_name, &branch_base) < 0) {
+ if (keep_base)
+ die(_("'%s': need exactly one merge base with branch"),
+ options.upstream_name);
+ else
+ die(_("'%s': need exactly one merge base"),
+ options.onto_name);
+ }
+ options.onto = lookup_commit_or_die(&branch_base,
+ options.onto_name);
+ } else {
+ options.onto =
+ lookup_commit_reference_by_name(options.onto_name);
+ if (!options.onto)
+ die(_("Does not point to a valid commit '%s'"),
+ options.onto_name);
+ fill_branch_base(&options, &branch_base);
+ }
+
+ if (keep_base && options.reapply_cherry_picks)
+ options.upstream = options.onto;
+
+ if (options.fork_point > 0)
+ options.restrict_revision =
+ get_fork_point(options.upstream_name, options.orig_head);
+
+ if (repo_read_index(the_repository) < 0)
+ die(_("could not read index"));
+
+ if (options.autostash)
+ create_autostash(the_repository,
+ state_dir_path("autostash", &options));
+
+
+ if (require_clean_work_tree(the_repository, "rebase",
+ _("Please commit or stash them."), 1, 1)) {
+ ret = -1;
+ goto cleanup;
+ }
+
+ /*
+ * Now we are rebasing commits upstream..orig_head (or with --root,
+ * everything leading up to orig_head) on top of onto.
+ */
+
+ /*
+ * Check if we are already based on onto with linear history,
+ * in which case we could fast-forward without replacing the commits
+ * with new commits recreated by replaying their changes.
+ */
+ if (allow_preemptive_ff &&
+ can_fast_forward(options.onto, options.upstream, options.restrict_revision,
+ options.orig_head, &branch_base)) {
+ int flag;
+
+ if (!(options.flags & REBASE_FORCE)) {
+ /* Lazily switch to the target branch if needed... */
+ if (options.switch_to) {
+ ret = checkout_up_to_date(&options);
+ if (ret)
+ goto cleanup;
+ }
+
+ if (!(options.flags & REBASE_NO_QUIET))
+ ; /* be quiet */
+ else if (!strcmp(branch_name, "HEAD") &&
+ resolve_ref_unsafe("HEAD", 0, NULL, &flag))
+ puts(_("HEAD is up to date."));
+ else
+ printf(_("Current branch %s is up to date.\n"),
+ branch_name);
+ ret = finish_rebase(&options);
+ goto cleanup;
+ } else if (!(options.flags & REBASE_NO_QUIET))
+ ; /* be quiet */
+ else if (!strcmp(branch_name, "HEAD") &&
+ resolve_ref_unsafe("HEAD", 0, NULL, &flag))
+ puts(_("HEAD is up to date, rebase forced."));
+ else
+ printf(_("Current branch %s is up to date, rebase "
+ "forced.\n"), branch_name);
+ }
+
+ /* If a hook exists, give it a chance to interrupt*/
+ if (!ok_to_skip_pre_rebase &&
+ run_hooks_l("pre-rebase", options.upstream_arg,
+ argc ? argv[0] : NULL, NULL))
+ die(_("The pre-rebase hook refused to rebase."));
+
+ if (options.flags & REBASE_DIFFSTAT) {
+ struct diff_options opts;
+
+ if (options.flags & REBASE_VERBOSE) {
+ if (is_null_oid(&branch_base))
+ printf(_("Changes to %s:\n"),
+ oid_to_hex(&options.onto->object.oid));
+ else
+ printf(_("Changes from %s to %s:\n"),
+ oid_to_hex(&branch_base),
+ oid_to_hex(&options.onto->object.oid));
+ }
+
+ /* We want color (if set), but no pager */
+ diff_setup(&opts);
+ opts.stat_width = -1; /* use full terminal width */
+ opts.stat_graph_width = -1; /* respect statGraphWidth config */
+ opts.output_format |=
+ DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
+ opts.detect_rename = DIFF_DETECT_RENAME;
+ diff_setup_done(&opts);
+ diff_tree_oid(is_null_oid(&branch_base) ?
+ the_hash_algo->empty_tree : &branch_base,
+ &options.onto->object.oid, "", &opts);
+ diffcore_std(&opts);
+ diff_flush(&opts);
+ }
+
+ if (is_merge(&options))
+ goto run_rebase;
+
+ /* Detach HEAD and reset the tree */
+ if (options.flags & REBASE_NO_QUIET)
+ printf(_("First, rewinding head to replay your work on top of "
+ "it...\n"));
+
+ strbuf_addf(&msg, "%s (start): checkout %s",
+ options.reflog_action, options.onto_name);
+ ropts.oid = &options.onto->object.oid;
+ ropts.orig_head = &options.orig_head->object.oid,
+ ropts.flags = RESET_HEAD_DETACH | RESET_ORIG_HEAD |
+ RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
+ ropts.head_msg = msg.buf;
+ ropts.default_reflog_action = options.reflog_action;
+ if (reset_head(the_repository, &ropts))
+ die(_("Could not detach HEAD"));
+ strbuf_release(&msg);
+
+ /*
+ * If the onto is a proper descendant of the tip of the branch, then
+ * we just fast-forwarded.
+ */
+ if (oideq(&branch_base, &options.orig_head->object.oid)) {
+ printf(_("Fast-forwarded %s to %s.\n"),
+ branch_name, options.onto_name);
+ move_to_original_branch(&options);
+ ret = finish_rebase(&options);
+ goto cleanup;
+ }
+
+ strbuf_addf(&revisions, "%s..%s",
+ options.root ? oid_to_hex(&options.onto->object.oid) :
+ (options.restrict_revision ?
+ oid_to_hex(&options.restrict_revision->object.oid) :
+ oid_to_hex(&options.upstream->object.oid)),
+ oid_to_hex(&options.orig_head->object.oid));
+
+ options.revisions = revisions.buf;
+
+run_rebase:
+ ret = run_specific_rebase(&options);
+
+cleanup:
+ strbuf_release(&buf);
+ strbuf_release(&revisions);
+ free(options.reflog_action);
+ free(options.head_name);
+ free(options.gpg_sign_opt);
+ free(options.cmd);
+ free(options.strategy);
+ strbuf_release(&options.git_format_patch_opt);
+ free(squash_onto_name);
+ return !!ret;
+}
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
new file mode 100644
index 0000000..a90af30
--- /dev/null
+++ b/builtin/receive-pack.c
@@ -0,0 +1,2600 @@
+#include "builtin.h"
+#include "repository.h"
+#include "config.h"
+#include "lockfile.h"
+#include "pack.h"
+#include "refs.h"
+#include "pkt-line.h"
+#include "sideband.h"
+#include "run-command.h"
+#include "hook.h"
+#include "exec-cmd.h"
+#include "commit.h"
+#include "object.h"
+#include "remote.h"
+#include "connect.h"
+#include "string-list.h"
+#include "oid-array.h"
+#include "connected.h"
+#include "strvec.h"
+#include "version.h"
+#include "tag.h"
+#include "gpg-interface.h"
+#include "sigchain.h"
+#include "fsck.h"
+#include "tmp-objdir.h"
+#include "oidset.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "protocol.h"
+#include "commit-reach.h"
+#include "worktree.h"
+#include "shallow.h"
+
+static const char * const receive_pack_usage[] = {
+ N_("git receive-pack <git-dir>"),
+ NULL
+};
+
+enum deny_action {
+ DENY_UNCONFIGURED,
+ DENY_IGNORE,
+ DENY_WARN,
+ DENY_REFUSE,
+ DENY_UPDATE_INSTEAD
+};
+
+static int deny_deletes;
+static int deny_non_fast_forwards;
+static enum deny_action deny_current_branch = DENY_UNCONFIGURED;
+static enum deny_action deny_delete_current = DENY_UNCONFIGURED;
+static int receive_fsck_objects = -1;
+static int transfer_fsck_objects = -1;
+static struct strbuf fsck_msg_types = STRBUF_INIT;
+static int receive_unpack_limit = -1;
+static int transfer_unpack_limit = -1;
+static int advertise_atomic_push = 1;
+static int advertise_push_options;
+static int advertise_sid;
+static int unpack_limit = 100;
+static off_t max_input_size;
+static int report_status;
+static int report_status_v2;
+static int use_sideband;
+static int use_atomic;
+static int use_push_options;
+static int quiet;
+static int prefer_ofs_delta = 1;
+static int auto_update_server_info;
+static int auto_gc = 1;
+static int reject_thin;
+static int stateless_rpc;
+static const char *service_dir;
+static const char *head_name;
+static void *head_name_to_free;
+static int sent_capabilities;
+static int shallow_update;
+static const char *alt_shallow_file;
+static struct strbuf push_cert = STRBUF_INIT;
+static struct object_id push_cert_oid;
+static struct signature_check sigcheck;
+static const char *push_cert_nonce;
+static const char *cert_nonce_seed;
+static struct string_list hidden_refs = STRING_LIST_INIT_DUP;
+
+static const char *NONCE_UNSOLICITED = "UNSOLICITED";
+static const char *NONCE_BAD = "BAD";
+static const char *NONCE_MISSING = "MISSING";
+static const char *NONCE_OK = "OK";
+static const char *NONCE_SLOP = "SLOP";
+static const char *nonce_status;
+static long nonce_stamp_slop;
+static timestamp_t nonce_stamp_slop_limit;
+static struct ref_transaction *transaction;
+
+static enum {
+ KEEPALIVE_NEVER = 0,
+ KEEPALIVE_AFTER_NUL,
+ KEEPALIVE_ALWAYS
+} use_keepalive;
+static int keepalive_in_sec = 5;
+
+static struct tmp_objdir *tmp_objdir;
+
+static struct proc_receive_ref {
+ unsigned int want_add:1,
+ want_delete:1,
+ want_modify:1,
+ negative_ref:1;
+ char *ref_prefix;
+ struct proc_receive_ref *next;
+} *proc_receive_ref;
+
+static void proc_receive_ref_append(const char *prefix);
+
+static enum deny_action parse_deny_action(const char *var, const char *value)
+{
+ if (value) {
+ if (!strcasecmp(value, "ignore"))
+ return DENY_IGNORE;
+ if (!strcasecmp(value, "warn"))
+ return DENY_WARN;
+ if (!strcasecmp(value, "refuse"))
+ return DENY_REFUSE;
+ if (!strcasecmp(value, "updateinstead"))
+ return DENY_UPDATE_INSTEAD;
+ }
+ if (git_config_bool(var, value))
+ return DENY_REFUSE;
+ return DENY_IGNORE;
+}
+
+static int receive_pack_config(const char *var, const char *value, void *cb)
+{
+ int status = parse_hide_refs_config(var, value, "receive", &hidden_refs);
+
+ if (status)
+ return status;
+
+ status = git_gpg_config(var, value, NULL);
+ if (status)
+ return status;
+
+ if (strcmp(var, "receive.denydeletes") == 0) {
+ deny_deletes = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.denynonfastforwards") == 0) {
+ deny_non_fast_forwards = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.unpacklimit") == 0) {
+ receive_unpack_limit = git_config_int(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "transfer.unpacklimit") == 0) {
+ transfer_unpack_limit = git_config_int(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.fsck.skiplist") == 0) {
+ const char *path;
+
+ if (git_config_pathname(&path, var, value))
+ return 1;
+ strbuf_addf(&fsck_msg_types, "%cskiplist=%s",
+ fsck_msg_types.len ? ',' : '=', path);
+ free((char *)path);
+ return 0;
+ }
+
+ if (skip_prefix(var, "receive.fsck.", &var)) {
+ if (is_valid_msg_type(var, value))
+ strbuf_addf(&fsck_msg_types, "%c%s=%s",
+ fsck_msg_types.len ? ',' : '=', var, value);
+ else
+ warning("skipping unknown msg id '%s'", var);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.fsckobjects") == 0) {
+ receive_fsck_objects = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "transfer.fsckobjects") == 0) {
+ transfer_fsck_objects = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (!strcmp(var, "receive.denycurrentbranch")) {
+ deny_current_branch = parse_deny_action(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.denydeletecurrent") == 0) {
+ deny_delete_current = parse_deny_action(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "repack.usedeltabaseoffset") == 0) {
+ prefer_ofs_delta = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.updateserverinfo") == 0) {
+ auto_update_server_info = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.autogc") == 0) {
+ auto_gc = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.shallowupdate") == 0) {
+ shallow_update = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.certnonceseed") == 0)
+ return git_config_string(&cert_nonce_seed, var, value);
+
+ if (strcmp(var, "receive.certnonceslop") == 0) {
+ nonce_stamp_slop_limit = git_config_ulong(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.advertiseatomic") == 0) {
+ advertise_atomic_push = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.advertisepushoptions") == 0) {
+ advertise_push_options = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.keepalive") == 0) {
+ keepalive_in_sec = git_config_int(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.maxinputsize") == 0) {
+ max_input_size = git_config_int64(var, value);
+ return 0;
+ }
+
+ if (strcmp(var, "receive.procreceiverefs") == 0) {
+ if (!value)
+ return config_error_nonbool(var);
+ proc_receive_ref_append(value);
+ return 0;
+ }
+
+ if (strcmp(var, "transfer.advertisesid") == 0) {
+ advertise_sid = git_config_bool(var, value);
+ return 0;
+ }
+
+ return git_default_config(var, value, cb);
+}
+
+static void show_ref(const char *path, const struct object_id *oid)
+{
+ if (sent_capabilities) {
+ packet_write_fmt(1, "%s %s\n", oid_to_hex(oid), path);
+ } else {
+ struct strbuf cap = STRBUF_INIT;
+
+ strbuf_addstr(&cap,
+ "report-status report-status-v2 delete-refs side-band-64k quiet");
+ if (advertise_atomic_push)
+ strbuf_addstr(&cap, " atomic");
+ if (prefer_ofs_delta)
+ strbuf_addstr(&cap, " ofs-delta");
+ if (push_cert_nonce)
+ strbuf_addf(&cap, " push-cert=%s", push_cert_nonce);
+ if (advertise_push_options)
+ strbuf_addstr(&cap, " push-options");
+ if (advertise_sid)
+ strbuf_addf(&cap, " session-id=%s", trace2_session_id());
+ strbuf_addf(&cap, " object-format=%s", the_hash_algo->name);
+ strbuf_addf(&cap, " agent=%s", git_user_agent_sanitized());
+ packet_write_fmt(1, "%s %s%c%s\n",
+ oid_to_hex(oid), path, 0, cap.buf);
+ strbuf_release(&cap);
+ sent_capabilities = 1;
+ }
+}
+
+static int show_ref_cb(const char *path_full, const struct object_id *oid,
+ int flag UNUSED, void *data)
+{
+ struct oidset *seen = data;
+ const char *path = strip_namespace(path_full);
+
+ if (ref_is_hidden(path, path_full, &hidden_refs))
+ return 0;
+
+ /*
+ * Advertise refs outside our current namespace as ".have"
+ * refs, so that the client can use them to minimize data
+ * transfer but will otherwise ignore them.
+ */
+ if (!path) {
+ if (oidset_insert(seen, oid))
+ return 0;
+ path = ".have";
+ } else {
+ oidset_insert(seen, oid);
+ }
+ show_ref(path, oid);
+ return 0;
+}
+
+static void show_one_alternate_ref(const struct object_id *oid,
+ void *data)
+{
+ struct oidset *seen = data;
+
+ if (oidset_insert(seen, oid))
+ return;
+
+ show_ref(".have", oid);
+}
+
+static void write_head_info(void)
+{
+ static struct oidset seen = OIDSET_INIT;
+
+ for_each_ref(show_ref_cb, &seen);
+ for_each_alternate_ref(show_one_alternate_ref, &seen);
+ oidset_clear(&seen);
+ if (!sent_capabilities)
+ show_ref("capabilities^{}", null_oid());
+
+ advertise_shallow_grafts(1);
+
+ /* EOF */
+ packet_flush(1);
+}
+
+#define RUN_PROC_RECEIVE_SCHEDULED 1
+#define RUN_PROC_RECEIVE_RETURNED 2
+struct command {
+ struct command *next;
+ const char *error_string;
+ struct ref_push_report *report;
+ unsigned int skip_update:1,
+ did_not_exist:1,
+ run_proc_receive:2;
+ int index;
+ struct object_id old_oid;
+ struct object_id new_oid;
+ char ref_name[FLEX_ARRAY]; /* more */
+};
+
+static void proc_receive_ref_append(const char *prefix)
+{
+ struct proc_receive_ref *ref_pattern;
+ char *p;
+ int len;
+
+ CALLOC_ARRAY(ref_pattern, 1);
+ p = strchr(prefix, ':');
+ if (p) {
+ while (prefix < p) {
+ if (*prefix == 'a')
+ ref_pattern->want_add = 1;
+ else if (*prefix == 'd')
+ ref_pattern->want_delete = 1;
+ else if (*prefix == 'm')
+ ref_pattern->want_modify = 1;
+ else if (*prefix == '!')
+ ref_pattern->negative_ref = 1;
+ prefix++;
+ }
+ prefix++;
+ } else {
+ ref_pattern->want_add = 1;
+ ref_pattern->want_delete = 1;
+ ref_pattern->want_modify = 1;
+ }
+ len = strlen(prefix);
+ while (len && prefix[len - 1] == '/')
+ len--;
+ ref_pattern->ref_prefix = xmemdupz(prefix, len);
+ if (!proc_receive_ref) {
+ proc_receive_ref = ref_pattern;
+ } else {
+ struct proc_receive_ref *end;
+
+ end = proc_receive_ref;
+ while (end->next)
+ end = end->next;
+ end->next = ref_pattern;
+ }
+}
+
+static int proc_receive_ref_matches(struct command *cmd)
+{
+ struct proc_receive_ref *p;
+
+ if (!proc_receive_ref)
+ return 0;
+
+ for (p = proc_receive_ref; p; p = p->next) {
+ const char *match = p->ref_prefix;
+ const char *remains;
+
+ if (!p->want_add && is_null_oid(&cmd->old_oid))
+ continue;
+ else if (!p->want_delete && is_null_oid(&cmd->new_oid))
+ continue;
+ else if (!p->want_modify &&
+ !is_null_oid(&cmd->old_oid) &&
+ !is_null_oid(&cmd->new_oid))
+ continue;
+
+ if (skip_prefix(cmd->ref_name, match, &remains) &&
+ (!*remains || *remains == '/')) {
+ if (!p->negative_ref)
+ return 1;
+ } else if (p->negative_ref) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void report_message(const char *prefix, const char *err, va_list params)
+{
+ int sz;
+ char msg[4096];
+
+ sz = xsnprintf(msg, sizeof(msg), "%s", prefix);
+ sz += vsnprintf(msg + sz, sizeof(msg) - sz, err, params);
+ if (sz > (sizeof(msg) - 1))
+ sz = sizeof(msg) - 1;
+ msg[sz++] = '\n';
+
+ if (use_sideband)
+ send_sideband(1, 2, msg, sz, use_sideband);
+ else
+ xwrite(2, msg, sz);
+}
+
+__attribute__((format (printf, 1, 2)))
+static void rp_warning(const char *err, ...)
+{
+ va_list params;
+ va_start(params, err);
+ report_message("warning: ", err, params);
+ va_end(params);
+}
+
+__attribute__((format (printf, 1, 2)))
+static void rp_error(const char *err, ...)
+{
+ va_list params;
+ va_start(params, err);
+ report_message("error: ", err, params);
+ va_end(params);
+}
+
+static int copy_to_sideband(int in, int out UNUSED, void *arg UNUSED)
+{
+ char data[128];
+ int keepalive_active = 0;
+
+ if (keepalive_in_sec <= 0)
+ use_keepalive = KEEPALIVE_NEVER;
+ if (use_keepalive == KEEPALIVE_ALWAYS)
+ keepalive_active = 1;
+
+ while (1) {
+ ssize_t sz;
+
+ if (keepalive_active) {
+ struct pollfd pfd;
+ int ret;
+
+ pfd.fd = in;
+ pfd.events = POLLIN;
+ ret = poll(&pfd, 1, 1000 * keepalive_in_sec);
+
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ else
+ break;
+ } else if (ret == 0) {
+ /* no data; send a keepalive packet */
+ static const char buf[] = "0005\1";
+ write_or_die(1, buf, sizeof(buf) - 1);
+ continue;
+ } /* else there is actual data to read */
+ }
+
+ sz = xread(in, data, sizeof(data));
+ if (sz <= 0)
+ break;
+
+ if (use_keepalive == KEEPALIVE_AFTER_NUL && !keepalive_active) {
+ const char *p = memchr(data, '\0', sz);
+ if (p) {
+ /*
+ * The NUL tells us to start sending keepalives. Make
+ * sure we send any other data we read along
+ * with it.
+ */
+ keepalive_active = 1;
+ send_sideband(1, 2, data, p - data, use_sideband);
+ send_sideband(1, 2, p + 1, sz - (p - data + 1), use_sideband);
+ continue;
+ }
+ }
+
+ /*
+ * Either we're not looking for a NUL signal, or we didn't see
+ * it yet; just pass along the data.
+ */
+ send_sideband(1, 2, data, sz, use_sideband);
+ }
+ close(in);
+ return 0;
+}
+
+static void hmac_hash(unsigned char *out,
+ const char *key_in, size_t key_len,
+ const char *text, size_t text_len)
+{
+ unsigned char key[GIT_MAX_BLKSZ];
+ unsigned char k_ipad[GIT_MAX_BLKSZ];
+ unsigned char k_opad[GIT_MAX_BLKSZ];
+ int i;
+ git_hash_ctx ctx;
+
+ /* RFC 2104 2. (1) */
+ memset(key, '\0', GIT_MAX_BLKSZ);
+ if (the_hash_algo->blksz < key_len) {
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, key_in, key_len);
+ the_hash_algo->final_fn(key, &ctx);
+ } else {
+ memcpy(key, key_in, key_len);
+ }
+
+ /* RFC 2104 2. (2) & (5) */
+ for (i = 0; i < sizeof(key); i++) {
+ k_ipad[i] = key[i] ^ 0x36;
+ k_opad[i] = key[i] ^ 0x5c;
+ }
+
+ /* RFC 2104 2. (3) & (4) */
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, k_ipad, sizeof(k_ipad));
+ the_hash_algo->update_fn(&ctx, text, text_len);
+ the_hash_algo->final_fn(out, &ctx);
+
+ /* RFC 2104 2. (6) & (7) */
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, k_opad, sizeof(k_opad));
+ the_hash_algo->update_fn(&ctx, out, the_hash_algo->rawsz);
+ the_hash_algo->final_fn(out, &ctx);
+}
+
+static char *prepare_push_cert_nonce(const char *path, timestamp_t stamp)
+{
+ struct strbuf buf = STRBUF_INIT;
+ unsigned char hash[GIT_MAX_RAWSZ];
+
+ strbuf_addf(&buf, "%s:%"PRItime, path, stamp);
+ hmac_hash(hash, buf.buf, buf.len, cert_nonce_seed, strlen(cert_nonce_seed));
+ strbuf_release(&buf);
+
+ /* RFC 2104 5. HMAC-SHA1 or HMAC-SHA256 */
+ strbuf_addf(&buf, "%"PRItime"-%.*s", stamp, (int)the_hash_algo->hexsz, hash_to_hex(hash));
+ return strbuf_detach(&buf, NULL);
+}
+
+static char *find_header(const char *msg, size_t len, const char *key,
+ const char **next_line)
+{
+ size_t out_len;
+ const char *val = find_header_mem(msg, len, key, &out_len);
+
+ if (!val)
+ return NULL;
+
+ if (next_line)
+ *next_line = val + out_len + 1;
+
+ return xmemdupz(val, out_len);
+}
+
+/*
+ * Return zero if a and b are equal up to n bytes and nonzero if they are not.
+ * This operation is guaranteed to run in constant time to avoid leaking data.
+ */
+static int constant_memequal(const char *a, const char *b, size_t n)
+{
+ int res = 0;
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ res |= a[i] ^ b[i];
+ return res;
+}
+
+static const char *check_nonce(const char *buf, size_t len)
+{
+ char *nonce = find_header(buf, len, "nonce", NULL);
+ timestamp_t stamp, ostamp;
+ char *bohmac, *expect = NULL;
+ const char *retval = NONCE_BAD;
+ size_t noncelen;
+
+ if (!nonce) {
+ retval = NONCE_MISSING;
+ goto leave;
+ } else if (!push_cert_nonce) {
+ retval = NONCE_UNSOLICITED;
+ goto leave;
+ } else if (!strcmp(push_cert_nonce, nonce)) {
+ retval = NONCE_OK;
+ goto leave;
+ }
+
+ if (!stateless_rpc) {
+ /* returned nonce MUST match what we gave out earlier */
+ retval = NONCE_BAD;
+ goto leave;
+ }
+
+ /*
+ * In stateless mode, we may be receiving a nonce issued by
+ * another instance of the server that serving the same
+ * repository, and the timestamps may not match, but the
+ * nonce-seed and dir should match, so we can recompute and
+ * report the time slop.
+ *
+ * In addition, when a nonce issued by another instance has
+ * timestamp within receive.certnonceslop seconds, we pretend
+ * as if we issued that nonce when reporting to the hook.
+ */
+
+ /* nonce is concat(<seconds-since-epoch>, "-", <hmac>) */
+ if (*nonce <= '0' || '9' < *nonce) {
+ retval = NONCE_BAD;
+ goto leave;
+ }
+ stamp = parse_timestamp(nonce, &bohmac, 10);
+ if (bohmac == nonce || bohmac[0] != '-') {
+ retval = NONCE_BAD;
+ goto leave;
+ }
+
+ noncelen = strlen(nonce);
+ expect = prepare_push_cert_nonce(service_dir, stamp);
+ if (noncelen != strlen(expect)) {
+ /* This is not even the right size. */
+ retval = NONCE_BAD;
+ goto leave;
+ }
+ if (constant_memequal(expect, nonce, noncelen)) {
+ /* Not what we would have signed earlier */
+ retval = NONCE_BAD;
+ goto leave;
+ }
+
+ /*
+ * By how many seconds is this nonce stale? Negative value
+ * would mean it was issued by another server with its clock
+ * skewed in the future.
+ */
+ ostamp = parse_timestamp(push_cert_nonce, NULL, 10);
+ nonce_stamp_slop = (long)ostamp - (long)stamp;
+
+ if (nonce_stamp_slop_limit &&
+ labs(nonce_stamp_slop) <= nonce_stamp_slop_limit) {
+ /*
+ * Pretend as if the received nonce (which passes the
+ * HMAC check, so it is not a forged by third-party)
+ * is what we issued.
+ */
+ free((void *)push_cert_nonce);
+ push_cert_nonce = xstrdup(nonce);
+ retval = NONCE_OK;
+ } else {
+ retval = NONCE_SLOP;
+ }
+
+leave:
+ free(nonce);
+ free(expect);
+ return retval;
+}
+
+/*
+ * Return 1 if there is no push_cert or if the push options in push_cert are
+ * the same as those in the argument; 0 otherwise.
+ */
+static int check_cert_push_options(const struct string_list *push_options)
+{
+ const char *buf = push_cert.buf;
+ int len = push_cert.len;
+
+ char *option;
+ const char *next_line;
+ int options_seen = 0;
+
+ int retval = 1;
+
+ if (!len)
+ return 1;
+
+ while ((option = find_header(buf, len, "push-option", &next_line))) {
+ len -= (next_line - buf);
+ buf = next_line;
+ options_seen++;
+ if (options_seen > push_options->nr
+ || strcmp(option,
+ push_options->items[options_seen - 1].string)) {
+ retval = 0;
+ goto leave;
+ }
+ free(option);
+ }
+
+ if (options_seen != push_options->nr)
+ retval = 0;
+
+leave:
+ free(option);
+ return retval;
+}
+
+static void prepare_push_cert_sha1(struct child_process *proc)
+{
+ static int already_done;
+
+ if (!push_cert.len)
+ return;
+
+ if (!already_done) {
+ int bogs /* beginning_of_gpg_sig */;
+
+ already_done = 1;
+ if (write_object_file(push_cert.buf, push_cert.len, OBJ_BLOB,
+ &push_cert_oid))
+ oidclr(&push_cert_oid);
+
+ memset(&sigcheck, '\0', sizeof(sigcheck));
+
+ bogs = parse_signed_buffer(push_cert.buf, push_cert.len);
+ sigcheck.payload = xmemdupz(push_cert.buf, bogs);
+ sigcheck.payload_len = bogs;
+ check_signature(&sigcheck, push_cert.buf + bogs,
+ push_cert.len - bogs);
+
+ nonce_status = check_nonce(push_cert.buf, bogs);
+ }
+ if (!is_null_oid(&push_cert_oid)) {
+ strvec_pushf(&proc->env, "GIT_PUSH_CERT=%s",
+ oid_to_hex(&push_cert_oid));
+ strvec_pushf(&proc->env, "GIT_PUSH_CERT_SIGNER=%s",
+ sigcheck.signer ? sigcheck.signer : "");
+ strvec_pushf(&proc->env, "GIT_PUSH_CERT_KEY=%s",
+ sigcheck.key ? sigcheck.key : "");
+ strvec_pushf(&proc->env, "GIT_PUSH_CERT_STATUS=%c",
+ sigcheck.result);
+ if (push_cert_nonce) {
+ strvec_pushf(&proc->env,
+ "GIT_PUSH_CERT_NONCE=%s",
+ push_cert_nonce);
+ strvec_pushf(&proc->env,
+ "GIT_PUSH_CERT_NONCE_STATUS=%s",
+ nonce_status);
+ if (nonce_status == NONCE_SLOP)
+ strvec_pushf(&proc->env,
+ "GIT_PUSH_CERT_NONCE_SLOP=%ld",
+ nonce_stamp_slop);
+ }
+ }
+}
+
+struct receive_hook_feed_state {
+ struct command *cmd;
+ struct ref_push_report *report;
+ int skip_broken;
+ struct strbuf buf;
+ const struct string_list *push_options;
+};
+
+typedef int (*feed_fn)(void *, const char **, size_t *);
+static int run_and_feed_hook(const char *hook_name, feed_fn feed,
+ struct receive_hook_feed_state *feed_state)
+{
+ struct child_process proc = CHILD_PROCESS_INIT;
+ struct async muxer;
+ int code;
+ const char *hook_path = find_hook(hook_name);
+
+ if (!hook_path)
+ return 0;
+
+ strvec_push(&proc.args, hook_path);
+ proc.in = -1;
+ proc.stdout_to_stderr = 1;
+ proc.trace2_hook_name = hook_name;
+
+ if (feed_state->push_options) {
+ size_t i;
+ for (i = 0; i < feed_state->push_options->nr; i++)
+ strvec_pushf(&proc.env,
+ "GIT_PUSH_OPTION_%"PRIuMAX"=%s",
+ (uintmax_t)i,
+ feed_state->push_options->items[i].string);
+ strvec_pushf(&proc.env, "GIT_PUSH_OPTION_COUNT=%"PRIuMAX"",
+ (uintmax_t)feed_state->push_options->nr);
+ } else
+ strvec_pushf(&proc.env, "GIT_PUSH_OPTION_COUNT");
+
+ if (tmp_objdir)
+ strvec_pushv(&proc.env, tmp_objdir_env(tmp_objdir));
+
+ if (use_sideband) {
+ memset(&muxer, 0, sizeof(muxer));
+ muxer.proc = copy_to_sideband;
+ muxer.in = -1;
+ code = start_async(&muxer);
+ if (code)
+ return code;
+ proc.err = muxer.in;
+ }
+
+ prepare_push_cert_sha1(&proc);
+
+ code = start_command(&proc);
+ if (code) {
+ if (use_sideband)
+ finish_async(&muxer);
+ return code;
+ }
+
+ sigchain_push(SIGPIPE, SIG_IGN);
+
+ while (1) {
+ const char *buf;
+ size_t n;
+ if (feed(feed_state, &buf, &n))
+ break;
+ if (write_in_full(proc.in, buf, n) < 0)
+ break;
+ }
+ close(proc.in);
+ if (use_sideband)
+ finish_async(&muxer);
+
+ sigchain_pop(SIGPIPE);
+
+ return finish_command(&proc);
+}
+
+static int feed_receive_hook(void *state_, const char **bufp, size_t *sizep)
+{
+ struct receive_hook_feed_state *state = state_;
+ struct command *cmd = state->cmd;
+
+ while (cmd &&
+ state->skip_broken && (cmd->error_string || cmd->did_not_exist))
+ cmd = cmd->next;
+ if (!cmd)
+ return -1; /* EOF */
+ if (!bufp)
+ return 0; /* OK, can feed something. */
+ strbuf_reset(&state->buf);
+ if (!state->report)
+ state->report = cmd->report;
+ if (state->report) {
+ struct object_id *old_oid;
+ struct object_id *new_oid;
+ const char *ref_name;
+
+ old_oid = state->report->old_oid ? state->report->old_oid : &cmd->old_oid;
+ new_oid = state->report->new_oid ? state->report->new_oid : &cmd->new_oid;
+ ref_name = state->report->ref_name ? state->report->ref_name : cmd->ref_name;
+ strbuf_addf(&state->buf, "%s %s %s\n",
+ oid_to_hex(old_oid), oid_to_hex(new_oid),
+ ref_name);
+ state->report = state->report->next;
+ if (!state->report)
+ state->cmd = cmd->next;
+ } else {
+ strbuf_addf(&state->buf, "%s %s %s\n",
+ oid_to_hex(&cmd->old_oid), oid_to_hex(&cmd->new_oid),
+ cmd->ref_name);
+ state->cmd = cmd->next;
+ }
+ if (bufp) {
+ *bufp = state->buf.buf;
+ *sizep = state->buf.len;
+ }
+ return 0;
+}
+
+static int run_receive_hook(struct command *commands,
+ const char *hook_name,
+ int skip_broken,
+ const struct string_list *push_options)
+{
+ struct receive_hook_feed_state state;
+ int status;
+
+ strbuf_init(&state.buf, 0);
+ state.cmd = commands;
+ state.skip_broken = skip_broken;
+ state.report = NULL;
+ if (feed_receive_hook(&state, NULL, NULL))
+ return 0;
+ state.cmd = commands;
+ state.push_options = push_options;
+ status = run_and_feed_hook(hook_name, feed_receive_hook, &state);
+ strbuf_release(&state.buf);
+ return status;
+}
+
+static int run_update_hook(struct command *cmd)
+{
+ struct child_process proc = CHILD_PROCESS_INIT;
+ int code;
+ const char *hook_path = find_hook("update");
+
+ if (!hook_path)
+ return 0;
+
+ strvec_push(&proc.args, hook_path);
+ strvec_push(&proc.args, cmd->ref_name);
+ strvec_push(&proc.args, oid_to_hex(&cmd->old_oid));
+ strvec_push(&proc.args, oid_to_hex(&cmd->new_oid));
+
+ proc.no_stdin = 1;
+ proc.stdout_to_stderr = 1;
+ proc.err = use_sideband ? -1 : 0;
+ proc.trace2_hook_name = "update";
+
+ code = start_command(&proc);
+ if (code)
+ return code;
+ if (use_sideband)
+ copy_to_sideband(proc.err, -1, NULL);
+ return finish_command(&proc);
+}
+
+static struct command *find_command_by_refname(struct command *list,
+ const char *refname)
+{
+ for (; list; list = list->next)
+ if (!strcmp(list->ref_name, refname))
+ return list;
+ return NULL;
+}
+
+static int read_proc_receive_report(struct packet_reader *reader,
+ struct command *commands,
+ struct strbuf *errmsg)
+{
+ struct command *cmd;
+ struct command *hint = NULL;
+ struct ref_push_report *report = NULL;
+ int new_report = 0;
+ int code = 0;
+ int once = 0;
+ int response = 0;
+
+ for (;;) {
+ struct object_id old_oid, new_oid;
+ const char *head;
+ const char *refname;
+ char *p;
+ enum packet_read_status status;
+
+ status = packet_reader_read(reader);
+ if (status != PACKET_READ_NORMAL) {
+ /* Check whether proc-receive exited abnormally */
+ if (status == PACKET_READ_EOF && !response) {
+ strbuf_addstr(errmsg, "proc-receive exited abnormally");
+ return -1;
+ }
+ break;
+ }
+ response++;
+
+ head = reader->line;
+ p = strchr(head, ' ');
+ if (!p) {
+ strbuf_addf(errmsg, "proc-receive reported incomplete status line: '%s'\n", head);
+ code = -1;
+ continue;
+ }
+ *p++ = '\0';
+ if (!strcmp(head, "option")) {
+ const char *key, *val;
+
+ if (!hint || !(report || new_report)) {
+ if (!once++)
+ strbuf_addstr(errmsg, "proc-receive reported 'option' without a matching 'ok/ng' directive\n");
+ code = -1;
+ continue;
+ }
+ if (new_report) {
+ if (!hint->report) {
+ CALLOC_ARRAY(hint->report, 1);
+ report = hint->report;
+ } else {
+ report = hint->report;
+ while (report->next)
+ report = report->next;
+ report->next = xcalloc(1, sizeof(struct ref_push_report));
+ report = report->next;
+ }
+ new_report = 0;
+ }
+ key = p;
+ p = strchr(key, ' ');
+ if (p)
+ *p++ = '\0';
+ val = p;
+ if (!strcmp(key, "refname"))
+ report->ref_name = xstrdup_or_null(val);
+ else if (!strcmp(key, "old-oid") && val &&
+ !parse_oid_hex(val, &old_oid, &val))
+ report->old_oid = oiddup(&old_oid);
+ else if (!strcmp(key, "new-oid") && val &&
+ !parse_oid_hex(val, &new_oid, &val))
+ report->new_oid = oiddup(&new_oid);
+ else if (!strcmp(key, "forced-update"))
+ report->forced_update = 1;
+ else if (!strcmp(key, "fall-through"))
+ /* Fall through, let 'receive-pack' to execute it. */
+ hint->run_proc_receive = 0;
+ continue;
+ }
+
+ report = NULL;
+ new_report = 0;
+ refname = p;
+ p = strchr(refname, ' ');
+ if (p)
+ *p++ = '\0';
+ if (strcmp(head, "ok") && strcmp(head, "ng")) {
+ strbuf_addf(errmsg, "proc-receive reported bad status '%s' on ref '%s'\n",
+ head, refname);
+ code = -1;
+ continue;
+ }
+
+ /* first try searching at our hint, falling back to all refs */
+ if (hint)
+ hint = find_command_by_refname(hint, refname);
+ if (!hint)
+ hint = find_command_by_refname(commands, refname);
+ if (!hint) {
+ strbuf_addf(errmsg, "proc-receive reported status on unknown ref: %s\n",
+ refname);
+ code = -1;
+ continue;
+ }
+ if (!hint->run_proc_receive) {
+ strbuf_addf(errmsg, "proc-receive reported status on unexpected ref: %s\n",
+ refname);
+ code = -1;
+ continue;
+ }
+ hint->run_proc_receive |= RUN_PROC_RECEIVE_RETURNED;
+ if (!strcmp(head, "ng")) {
+ if (p)
+ hint->error_string = xstrdup(p);
+ else
+ hint->error_string = "failed";
+ code = -1;
+ continue;
+ }
+ new_report = 1;
+ }
+
+ for (cmd = commands; cmd; cmd = cmd->next)
+ if (cmd->run_proc_receive && !cmd->error_string &&
+ !(cmd->run_proc_receive & RUN_PROC_RECEIVE_RETURNED)) {
+ cmd->error_string = "proc-receive failed to report status";
+ code = -1;
+ }
+ return code;
+}
+
+static int run_proc_receive_hook(struct command *commands,
+ const struct string_list *push_options)
+{
+ struct child_process proc = CHILD_PROCESS_INIT;
+ struct async muxer;
+ struct command *cmd;
+ struct packet_reader reader;
+ struct strbuf cap = STRBUF_INIT;
+ struct strbuf errmsg = STRBUF_INIT;
+ int hook_use_push_options = 0;
+ int version = 0;
+ int code;
+ const char *hook_path = find_hook("proc-receive");
+
+ if (!hook_path) {
+ rp_error("cannot find hook 'proc-receive'");
+ return -1;
+ }
+
+ strvec_push(&proc.args, hook_path);
+ proc.in = -1;
+ proc.out = -1;
+ proc.trace2_hook_name = "proc-receive";
+
+ if (use_sideband) {
+ memset(&muxer, 0, sizeof(muxer));
+ muxer.proc = copy_to_sideband;
+ muxer.in = -1;
+ code = start_async(&muxer);
+ if (code)
+ return code;
+ proc.err = muxer.in;
+ } else {
+ proc.err = 0;
+ }
+
+ code = start_command(&proc);
+ if (code) {
+ if (use_sideband)
+ finish_async(&muxer);
+ return code;
+ }
+
+ sigchain_push(SIGPIPE, SIG_IGN);
+
+ /* Version negotiaton */
+ packet_reader_init(&reader, proc.out, NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_GENTLE_ON_EOF);
+ if (use_atomic)
+ strbuf_addstr(&cap, " atomic");
+ if (use_push_options)
+ strbuf_addstr(&cap, " push-options");
+ if (cap.len) {
+ code = packet_write_fmt_gently(proc.in, "version=1%c%s\n", '\0', cap.buf + 1);
+ strbuf_release(&cap);
+ } else {
+ code = packet_write_fmt_gently(proc.in, "version=1\n");
+ }
+ if (!code)
+ code = packet_flush_gently(proc.in);
+
+ if (!code)
+ for (;;) {
+ int linelen;
+ enum packet_read_status status;
+
+ status = packet_reader_read(&reader);
+ if (status != PACKET_READ_NORMAL) {
+ /* Check whether proc-receive exited abnormally */
+ if (status == PACKET_READ_EOF)
+ code = -1;
+ break;
+ }
+
+ if (reader.pktlen > 8 && starts_with(reader.line, "version=")) {
+ version = atoi(reader.line + 8);
+ linelen = strlen(reader.line);
+ if (linelen < reader.pktlen) {
+ const char *feature_list = reader.line + linelen + 1;
+ if (parse_feature_request(feature_list, "push-options"))
+ hook_use_push_options = 1;
+ }
+ }
+ }
+
+ if (code) {
+ strbuf_addstr(&errmsg, "fail to negotiate version with proc-receive hook");
+ goto cleanup;
+ }
+
+ switch (version) {
+ case 0:
+ /* fallthrough */
+ case 1:
+ break;
+ default:
+ strbuf_addf(&errmsg, "proc-receive version '%d' is not supported",
+ version);
+ code = -1;
+ goto cleanup;
+ }
+
+ /* Send commands */
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (!cmd->run_proc_receive || cmd->skip_update || cmd->error_string)
+ continue;
+ code = packet_write_fmt_gently(proc.in, "%s %s %s",
+ oid_to_hex(&cmd->old_oid),
+ oid_to_hex(&cmd->new_oid),
+ cmd->ref_name);
+ if (code)
+ break;
+ }
+ if (!code)
+ code = packet_flush_gently(proc.in);
+ if (code) {
+ strbuf_addstr(&errmsg, "fail to write commands to proc-receive hook");
+ goto cleanup;
+ }
+
+ /* Send push options */
+ if (hook_use_push_options) {
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, push_options) {
+ code = packet_write_fmt_gently(proc.in, "%s", item->string);
+ if (code)
+ break;
+ }
+ if (!code)
+ code = packet_flush_gently(proc.in);
+ if (code) {
+ strbuf_addstr(&errmsg,
+ "fail to write push-options to proc-receive hook");
+ goto cleanup;
+ }
+ }
+
+ /* Read result from proc-receive */
+ code = read_proc_receive_report(&reader, commands, &errmsg);
+
+cleanup:
+ close(proc.in);
+ close(proc.out);
+ if (use_sideband)
+ finish_async(&muxer);
+ if (finish_command(&proc))
+ code = -1;
+ if (errmsg.len >0) {
+ char *p = errmsg.buf;
+
+ p += errmsg.len - 1;
+ if (*p == '\n')
+ *p = '\0';
+ rp_error("%s", errmsg.buf);
+ strbuf_release(&errmsg);
+ }
+ sigchain_pop(SIGPIPE);
+
+ return code;
+}
+
+static char *refuse_unconfigured_deny_msg =
+ N_("By default, updating the current branch in a non-bare repository\n"
+ "is denied, because it will make the index and work tree inconsistent\n"
+ "with what you pushed, and will require 'git reset --hard' to match\n"
+ "the work tree to HEAD.\n"
+ "\n"
+ "You can set the 'receive.denyCurrentBranch' configuration variable\n"
+ "to 'ignore' or 'warn' in the remote repository to allow pushing into\n"
+ "its current branch; however, this is not recommended unless you\n"
+ "arranged to update its work tree to match what you pushed in some\n"
+ "other way.\n"
+ "\n"
+ "To squelch this message and still keep the default behaviour, set\n"
+ "'receive.denyCurrentBranch' configuration variable to 'refuse'.");
+
+static void refuse_unconfigured_deny(void)
+{
+ rp_error("%s", _(refuse_unconfigured_deny_msg));
+}
+
+static char *refuse_unconfigured_deny_delete_current_msg =
+ N_("By default, deleting the current branch is denied, because the next\n"
+ "'git clone' won't result in any file checked out, causing confusion.\n"
+ "\n"
+ "You can set 'receive.denyDeleteCurrent' configuration variable to\n"
+ "'warn' or 'ignore' in the remote repository to allow deleting the\n"
+ "current branch, with or without a warning message.\n"
+ "\n"
+ "To squelch this message, you can set it to 'refuse'.");
+
+static void refuse_unconfigured_deny_delete_current(void)
+{
+ rp_error("%s", _(refuse_unconfigured_deny_delete_current_msg));
+}
+
+static const struct object_id *command_singleton_iterator(void *cb_data);
+static int update_shallow_ref(struct command *cmd, struct shallow_info *si)
+{
+ struct shallow_lock shallow_lock = SHALLOW_LOCK_INIT;
+ struct oid_array extra = OID_ARRAY_INIT;
+ struct check_connected_options opt = CHECK_CONNECTED_INIT;
+ uint32_t mask = 1 << (cmd->index % 32);
+ int i;
+
+ trace_printf_key(&trace_shallow,
+ "shallow: update_shallow_ref %s\n", cmd->ref_name);
+ for (i = 0; i < si->shallow->nr; i++)
+ if (si->used_shallow[i] &&
+ (si->used_shallow[i][cmd->index / 32] & mask) &&
+ !delayed_reachability_test(si, i))
+ oid_array_append(&extra, &si->shallow->oid[i]);
+
+ opt.env = tmp_objdir_env(tmp_objdir);
+ setup_alternate_shallow(&shallow_lock, &opt.shallow_file, &extra);
+ if (check_connected(command_singleton_iterator, cmd, &opt)) {
+ rollback_shallow_file(the_repository, &shallow_lock);
+ oid_array_clear(&extra);
+ return -1;
+ }
+
+ commit_shallow_file(the_repository, &shallow_lock);
+
+ /*
+ * Make sure setup_alternate_shallow() for the next ref does
+ * not lose these new roots..
+ */
+ for (i = 0; i < extra.nr; i++)
+ register_shallow(the_repository, &extra.oid[i]);
+
+ si->shallow_ref[cmd->index] = 0;
+ oid_array_clear(&extra);
+ return 0;
+}
+
+/*
+ * NEEDSWORK: we should consolidate various implementions of "are we
+ * on an unborn branch?" test into one, and make the unified one more
+ * robust. !get_sha1() based check used here and elsewhere would not
+ * allow us to tell an unborn branch from corrupt ref, for example.
+ * For the purpose of fixing "deploy-to-update does not work when
+ * pushing into an empty repository" issue, this should suffice for
+ * now.
+ */
+static int head_has_history(void)
+{
+ struct object_id oid;
+
+ return !get_oid("HEAD", &oid);
+}
+
+static const char *push_to_deploy(unsigned char *sha1,
+ struct strvec *env,
+ const char *work_tree)
+{
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&child.args, "update-index", "-q", "--ignore-submodules",
+ "--refresh", NULL);
+ strvec_pushv(&child.env, env->v);
+ child.dir = work_tree;
+ child.no_stdin = 1;
+ child.stdout_to_stderr = 1;
+ child.git_cmd = 1;
+ if (run_command(&child))
+ return "Up-to-date check failed";
+
+ /* run_command() does not clean up completely; reinitialize */
+ child_process_init(&child);
+ strvec_pushl(&child.args, "diff-files", "--quiet",
+ "--ignore-submodules", "--", NULL);
+ strvec_pushv(&child.env, env->v);
+ child.dir = work_tree;
+ child.no_stdin = 1;
+ child.stdout_to_stderr = 1;
+ child.git_cmd = 1;
+ if (run_command(&child))
+ return "Working directory has unstaged changes";
+
+ child_process_init(&child);
+ strvec_pushl(&child.args, "diff-index", "--quiet", "--cached",
+ "--ignore-submodules",
+ /* diff-index with either HEAD or an empty tree */
+ head_has_history() ? "HEAD" : empty_tree_oid_hex(),
+ "--", NULL);
+ strvec_pushv(&child.env, env->v);
+ child.no_stdin = 1;
+ child.no_stdout = 1;
+ child.stdout_to_stderr = 0;
+ child.git_cmd = 1;
+ if (run_command(&child))
+ return "Working directory has staged changes";
+
+ child_process_init(&child);
+ strvec_pushl(&child.args, "read-tree", "-u", "-m", hash_to_hex(sha1),
+ NULL);
+ strvec_pushv(&child.env, env->v);
+ child.dir = work_tree;
+ child.no_stdin = 1;
+ child.no_stdout = 1;
+ child.stdout_to_stderr = 0;
+ child.git_cmd = 1;
+ if (run_command(&child))
+ return "Could not update working tree to new HEAD";
+
+ return NULL;
+}
+
+static const char *push_to_checkout_hook = "push-to-checkout";
+
+static const char *push_to_checkout(unsigned char *hash,
+ int *invoked_hook,
+ struct strvec *env,
+ const char *work_tree)
+{
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+ opt.invoked_hook = invoked_hook;
+
+ strvec_pushf(env, "GIT_WORK_TREE=%s", absolute_path(work_tree));
+ strvec_pushv(&opt.env, env->v);
+ strvec_push(&opt.args, hash_to_hex(hash));
+ if (run_hooks_opt(push_to_checkout_hook, &opt))
+ return "push-to-checkout hook declined";
+ else
+ return NULL;
+}
+
+static const char *update_worktree(unsigned char *sha1, const struct worktree *worktree)
+{
+ const char *retval, *git_dir;
+ struct strvec env = STRVEC_INIT;
+ int invoked_hook;
+
+ if (!worktree || !worktree->path)
+ BUG("worktree->path must be non-NULL");
+
+ if (worktree->is_bare)
+ return "denyCurrentBranch = updateInstead needs a worktree";
+ git_dir = get_worktree_git_dir(worktree);
+
+ strvec_pushf(&env, "GIT_DIR=%s", absolute_path(git_dir));
+
+ retval = push_to_checkout(sha1, &invoked_hook, &env, worktree->path);
+ if (!invoked_hook)
+ retval = push_to_deploy(sha1, &env, worktree->path);
+
+ strvec_clear(&env);
+ return retval;
+}
+
+static const char *update(struct command *cmd, struct shallow_info *si)
+{
+ const char *name = cmd->ref_name;
+ struct strbuf namespaced_name_buf = STRBUF_INIT;
+ static char *namespaced_name;
+ const char *ret;
+ struct object_id *old_oid = &cmd->old_oid;
+ struct object_id *new_oid = &cmd->new_oid;
+ int do_update_worktree = 0;
+ struct worktree **worktrees = get_worktrees();
+ const struct worktree *worktree =
+ find_shared_symref(worktrees, "HEAD", name);
+
+ /* only refs/... are allowed */
+ if (!starts_with(name, "refs/") || check_refname_format(name + 5, 0)) {
+ rp_error("refusing to create funny ref '%s' remotely", name);
+ ret = "funny refname";
+ goto out;
+ }
+
+ strbuf_addf(&namespaced_name_buf, "%s%s", get_git_namespace(), name);
+ free(namespaced_name);
+ namespaced_name = strbuf_detach(&namespaced_name_buf, NULL);
+
+ if (worktree && !worktree->is_bare) {
+ switch (deny_current_branch) {
+ case DENY_IGNORE:
+ break;
+ case DENY_WARN:
+ rp_warning("updating the current branch");
+ break;
+ case DENY_REFUSE:
+ case DENY_UNCONFIGURED:
+ rp_error("refusing to update checked out branch: %s", name);
+ if (deny_current_branch == DENY_UNCONFIGURED)
+ refuse_unconfigured_deny();
+ ret = "branch is currently checked out";
+ goto out;
+ case DENY_UPDATE_INSTEAD:
+ /* pass -- let other checks intervene first */
+ do_update_worktree = 1;
+ break;
+ }
+ }
+
+ if (!is_null_oid(new_oid) && !has_object_file(new_oid)) {
+ error("unpack should have generated %s, "
+ "but I can't find it!", oid_to_hex(new_oid));
+ ret = "bad pack";
+ goto out;
+ }
+
+ if (!is_null_oid(old_oid) && is_null_oid(new_oid)) {
+ if (deny_deletes && starts_with(name, "refs/heads/")) {
+ rp_error("denying ref deletion for %s", name);
+ ret = "deletion prohibited";
+ goto out;
+ }
+
+ if (worktree || (head_name && !strcmp(namespaced_name, head_name))) {
+ switch (deny_delete_current) {
+ case DENY_IGNORE:
+ break;
+ case DENY_WARN:
+ rp_warning("deleting the current branch");
+ break;
+ case DENY_REFUSE:
+ case DENY_UNCONFIGURED:
+ case DENY_UPDATE_INSTEAD:
+ if (deny_delete_current == DENY_UNCONFIGURED)
+ refuse_unconfigured_deny_delete_current();
+ rp_error("refusing to delete the current branch: %s", name);
+ ret = "deletion of the current branch prohibited";
+ goto out;
+ default:
+ ret = "Invalid denyDeleteCurrent setting";
+ goto out;
+ }
+ }
+ }
+
+ if (deny_non_fast_forwards && !is_null_oid(new_oid) &&
+ !is_null_oid(old_oid) &&
+ starts_with(name, "refs/heads/")) {
+ struct object *old_object, *new_object;
+ struct commit *old_commit, *new_commit;
+
+ old_object = parse_object(the_repository, old_oid);
+ new_object = parse_object(the_repository, new_oid);
+
+ if (!old_object || !new_object ||
+ old_object->type != OBJ_COMMIT ||
+ new_object->type != OBJ_COMMIT) {
+ error("bad sha1 objects for %s", name);
+ ret = "bad ref";
+ goto out;
+ }
+ old_commit = (struct commit *)old_object;
+ new_commit = (struct commit *)new_object;
+ if (!in_merge_bases(old_commit, new_commit)) {
+ rp_error("denying non-fast-forward %s"
+ " (you should pull first)", name);
+ ret = "non-fast-forward";
+ goto out;
+ }
+ }
+ if (run_update_hook(cmd)) {
+ rp_error("hook declined to update %s", name);
+ ret = "hook declined";
+ goto out;
+ }
+
+ if (do_update_worktree) {
+ ret = update_worktree(new_oid->hash, worktree);
+ if (ret)
+ goto out;
+ }
+
+ if (is_null_oid(new_oid)) {
+ struct strbuf err = STRBUF_INIT;
+ if (!parse_object(the_repository, old_oid)) {
+ old_oid = NULL;
+ if (ref_exists(name)) {
+ rp_warning("allowing deletion of corrupt ref");
+ } else {
+ rp_warning("deleting a non-existent ref");
+ cmd->did_not_exist = 1;
+ }
+ }
+ if (ref_transaction_delete(transaction,
+ namespaced_name,
+ old_oid,
+ 0, "push", &err)) {
+ rp_error("%s", err.buf);
+ ret = "failed to delete";
+ } else {
+ ret = NULL; /* good */
+ }
+ strbuf_release(&err);
+ }
+ else {
+ struct strbuf err = STRBUF_INIT;
+ if (shallow_update && si->shallow_ref[cmd->index] &&
+ update_shallow_ref(cmd, si)) {
+ ret = "shallow error";
+ goto out;
+ }
+
+ if (ref_transaction_update(transaction,
+ namespaced_name,
+ new_oid, old_oid,
+ 0, "push",
+ &err)) {
+ rp_error("%s", err.buf);
+ ret = "failed to update ref";
+ } else {
+ ret = NULL; /* good */
+ }
+ strbuf_release(&err);
+ }
+
+out:
+ free_worktrees(worktrees);
+ return ret;
+}
+
+static void run_update_post_hook(struct command *commands)
+{
+ struct command *cmd;
+ struct child_process proc = CHILD_PROCESS_INIT;
+ const char *hook;
+
+ hook = find_hook("post-update");
+ if (!hook)
+ return;
+
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (cmd->error_string || cmd->did_not_exist)
+ continue;
+ if (!proc.args.nr)
+ strvec_push(&proc.args, hook);
+ strvec_push(&proc.args, cmd->ref_name);
+ }
+ if (!proc.args.nr)
+ return;
+
+ proc.no_stdin = 1;
+ proc.stdout_to_stderr = 1;
+ proc.err = use_sideband ? -1 : 0;
+ proc.trace2_hook_name = "post-update";
+
+ if (!start_command(&proc)) {
+ if (use_sideband)
+ copy_to_sideband(proc.err, -1, NULL);
+ finish_command(&proc);
+ }
+}
+
+static void check_aliased_update_internal(struct command *cmd,
+ struct string_list *list,
+ const char *dst_name, int flag)
+{
+ struct string_list_item *item;
+ struct command *dst_cmd;
+
+ if (!(flag & REF_ISSYMREF))
+ return;
+
+ if (!dst_name) {
+ rp_error("refusing update to broken symref '%s'", cmd->ref_name);
+ cmd->skip_update = 1;
+ cmd->error_string = "broken symref";
+ return;
+ }
+ dst_name = strip_namespace(dst_name);
+
+ if (!(item = string_list_lookup(list, dst_name)))
+ return;
+
+ cmd->skip_update = 1;
+
+ dst_cmd = (struct command *) item->util;
+
+ if (oideq(&cmd->old_oid, &dst_cmd->old_oid) &&
+ oideq(&cmd->new_oid, &dst_cmd->new_oid))
+ return;
+
+ dst_cmd->skip_update = 1;
+
+ rp_error("refusing inconsistent update between symref '%s' (%s..%s) and"
+ " its target '%s' (%s..%s)",
+ cmd->ref_name,
+ find_unique_abbrev(&cmd->old_oid, DEFAULT_ABBREV),
+ find_unique_abbrev(&cmd->new_oid, DEFAULT_ABBREV),
+ dst_cmd->ref_name,
+ find_unique_abbrev(&dst_cmd->old_oid, DEFAULT_ABBREV),
+ find_unique_abbrev(&dst_cmd->new_oid, DEFAULT_ABBREV));
+
+ cmd->error_string = dst_cmd->error_string =
+ "inconsistent aliased update";
+}
+
+static void check_aliased_update(struct command *cmd, struct string_list *list)
+{
+ struct strbuf buf = STRBUF_INIT;
+ const char *dst_name;
+ int flag;
+
+ strbuf_addf(&buf, "%s%s", get_git_namespace(), cmd->ref_name);
+ dst_name = resolve_ref_unsafe(buf.buf, 0, NULL, &flag);
+ check_aliased_update_internal(cmd, list, dst_name, flag);
+ strbuf_release(&buf);
+}
+
+static void check_aliased_updates(struct command *commands)
+{
+ struct command *cmd;
+ struct string_list ref_list = STRING_LIST_INIT_NODUP;
+
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ struct string_list_item *item =
+ string_list_append(&ref_list, cmd->ref_name);
+ item->util = (void *)cmd;
+ }
+ string_list_sort(&ref_list);
+
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (!cmd->error_string)
+ check_aliased_update(cmd, &ref_list);
+ }
+
+ string_list_clear(&ref_list, 0);
+}
+
+static const struct object_id *command_singleton_iterator(void *cb_data)
+{
+ struct command **cmd_list = cb_data;
+ struct command *cmd = *cmd_list;
+
+ if (!cmd || is_null_oid(&cmd->new_oid))
+ return NULL;
+ *cmd_list = NULL; /* this returns only one */
+ return &cmd->new_oid;
+}
+
+static void set_connectivity_errors(struct command *commands,
+ struct shallow_info *si)
+{
+ struct command *cmd;
+
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ struct command *singleton = cmd;
+ struct check_connected_options opt = CHECK_CONNECTED_INIT;
+
+ if (shallow_update && si->shallow_ref[cmd->index])
+ /* to be checked in update_shallow_ref() */
+ continue;
+
+ opt.env = tmp_objdir_env(tmp_objdir);
+ if (!check_connected(command_singleton_iterator, &singleton,
+ &opt))
+ continue;
+
+ cmd->error_string = "missing necessary objects";
+ }
+}
+
+struct iterate_data {
+ struct command *cmds;
+ struct shallow_info *si;
+};
+
+static const struct object_id *iterate_receive_command_list(void *cb_data)
+{
+ struct iterate_data *data = cb_data;
+ struct command **cmd_list = &data->cmds;
+ struct command *cmd = *cmd_list;
+
+ for (; cmd; cmd = cmd->next) {
+ if (shallow_update && data->si->shallow_ref[cmd->index])
+ /* to be checked in update_shallow_ref() */
+ continue;
+ if (!is_null_oid(&cmd->new_oid) && !cmd->skip_update) {
+ *cmd_list = cmd->next;
+ return &cmd->new_oid;
+ }
+ }
+ return NULL;
+}
+
+static void reject_updates_to_hidden(struct command *commands)
+{
+ struct strbuf refname_full = STRBUF_INIT;
+ size_t prefix_len;
+ struct command *cmd;
+
+ strbuf_addstr(&refname_full, get_git_namespace());
+ prefix_len = refname_full.len;
+
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (cmd->error_string)
+ continue;
+
+ strbuf_setlen(&refname_full, prefix_len);
+ strbuf_addstr(&refname_full, cmd->ref_name);
+
+ if (!ref_is_hidden(cmd->ref_name, refname_full.buf, &hidden_refs))
+ continue;
+ if (is_null_oid(&cmd->new_oid))
+ cmd->error_string = "deny deleting a hidden ref";
+ else
+ cmd->error_string = "deny updating a hidden ref";
+ }
+
+ strbuf_release(&refname_full);
+}
+
+static int should_process_cmd(struct command *cmd)
+{
+ return !cmd->error_string && !cmd->skip_update;
+}
+
+static void BUG_if_skipped_connectivity_check(struct command *commands,
+ struct shallow_info *si)
+{
+ struct command *cmd;
+
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (should_process_cmd(cmd) && si->shallow_ref[cmd->index])
+ bug("connectivity check has not been run on ref %s",
+ cmd->ref_name);
+ }
+ BUG_if_bug("connectivity check skipped???");
+}
+
+static void execute_commands_non_atomic(struct command *commands,
+ struct shallow_info *si)
+{
+ struct command *cmd;
+ struct strbuf err = STRBUF_INIT;
+
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (!should_process_cmd(cmd) || cmd->run_proc_receive)
+ continue;
+
+ transaction = ref_transaction_begin(&err);
+ if (!transaction) {
+ rp_error("%s", err.buf);
+ strbuf_reset(&err);
+ cmd->error_string = "transaction failed to start";
+ continue;
+ }
+
+ cmd->error_string = update(cmd, si);
+
+ if (!cmd->error_string
+ && ref_transaction_commit(transaction, &err)) {
+ rp_error("%s", err.buf);
+ strbuf_reset(&err);
+ cmd->error_string = "failed to update ref";
+ }
+ ref_transaction_free(transaction);
+ }
+ strbuf_release(&err);
+}
+
+static void execute_commands_atomic(struct command *commands,
+ struct shallow_info *si)
+{
+ struct command *cmd;
+ struct strbuf err = STRBUF_INIT;
+ const char *reported_error = "atomic push failure";
+
+ transaction = ref_transaction_begin(&err);
+ if (!transaction) {
+ rp_error("%s", err.buf);
+ strbuf_reset(&err);
+ reported_error = "transaction failed to start";
+ goto failure;
+ }
+
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (!should_process_cmd(cmd) || cmd->run_proc_receive)
+ continue;
+
+ cmd->error_string = update(cmd, si);
+
+ if (cmd->error_string)
+ goto failure;
+ }
+
+ if (ref_transaction_commit(transaction, &err)) {
+ rp_error("%s", err.buf);
+ reported_error = "atomic transaction failed";
+ goto failure;
+ }
+ goto cleanup;
+
+failure:
+ for (cmd = commands; cmd; cmd = cmd->next)
+ if (!cmd->error_string)
+ cmd->error_string = reported_error;
+
+cleanup:
+ ref_transaction_free(transaction);
+ strbuf_release(&err);
+}
+
+static void execute_commands(struct command *commands,
+ const char *unpacker_error,
+ struct shallow_info *si,
+ const struct string_list *push_options)
+{
+ struct check_connected_options opt = CHECK_CONNECTED_INIT;
+ struct command *cmd;
+ struct iterate_data data;
+ struct async muxer;
+ int err_fd = 0;
+ int run_proc_receive = 0;
+
+ if (unpacker_error) {
+ for (cmd = commands; cmd; cmd = cmd->next)
+ cmd->error_string = "unpacker error";
+ return;
+ }
+
+ if (use_sideband) {
+ memset(&muxer, 0, sizeof(muxer));
+ muxer.proc = copy_to_sideband;
+ muxer.in = -1;
+ if (!start_async(&muxer))
+ err_fd = muxer.in;
+ /* ...else, continue without relaying sideband */
+ }
+
+ data.cmds = commands;
+ data.si = si;
+ opt.err_fd = err_fd;
+ opt.progress = err_fd && !quiet;
+ opt.env = tmp_objdir_env(tmp_objdir);
+ opt.exclude_hidden_refs_section = "receive";
+
+ if (check_connected(iterate_receive_command_list, &data, &opt))
+ set_connectivity_errors(commands, si);
+
+ if (use_sideband)
+ finish_async(&muxer);
+
+ reject_updates_to_hidden(commands);
+
+ /*
+ * Try to find commands that have special prefix in their reference names,
+ * and mark them to run an external "proc-receive" hook later.
+ */
+ if (proc_receive_ref) {
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (!should_process_cmd(cmd))
+ continue;
+
+ if (proc_receive_ref_matches(cmd)) {
+ cmd->run_proc_receive = RUN_PROC_RECEIVE_SCHEDULED;
+ run_proc_receive = 1;
+ }
+ }
+ }
+
+ if (run_receive_hook(commands, "pre-receive", 0, push_options)) {
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (!cmd->error_string)
+ cmd->error_string = "pre-receive hook declined";
+ }
+ return;
+ }
+
+ /*
+ * If there is no command ready to run, should return directly to destroy
+ * temporary data in the quarantine area.
+ */
+ for (cmd = commands; cmd && cmd->error_string; cmd = cmd->next)
+ ; /* nothing */
+ if (!cmd)
+ return;
+
+ /*
+ * Now we'll start writing out refs, which means the objects need
+ * to be in their final positions so that other processes can see them.
+ */
+ if (tmp_objdir_migrate(tmp_objdir) < 0) {
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (!cmd->error_string)
+ cmd->error_string = "unable to migrate objects to permanent storage";
+ }
+ return;
+ }
+ tmp_objdir = NULL;
+
+ check_aliased_updates(commands);
+
+ free(head_name_to_free);
+ head_name = head_name_to_free = resolve_refdup("HEAD", 0, NULL, NULL);
+
+ if (run_proc_receive &&
+ run_proc_receive_hook(commands, push_options))
+ for (cmd = commands; cmd; cmd = cmd->next)
+ if (!cmd->error_string &&
+ !(cmd->run_proc_receive & RUN_PROC_RECEIVE_RETURNED) &&
+ (cmd->run_proc_receive || use_atomic))
+ cmd->error_string = "fail to run proc-receive hook";
+
+ if (use_atomic)
+ execute_commands_atomic(commands, si);
+ else
+ execute_commands_non_atomic(commands, si);
+
+ if (shallow_update)
+ BUG_if_skipped_connectivity_check(commands, si);
+}
+
+static struct command **queue_command(struct command **tail,
+ const char *line,
+ int linelen)
+{
+ struct object_id old_oid, new_oid;
+ struct command *cmd;
+ const char *refname;
+ int reflen;
+ const char *p;
+
+ if (parse_oid_hex(line, &old_oid, &p) ||
+ *p++ != ' ' ||
+ parse_oid_hex(p, &new_oid, &p) ||
+ *p++ != ' ')
+ die("protocol error: expected old/new/ref, got '%s'", line);
+
+ refname = p;
+ reflen = linelen - (p - line);
+ FLEX_ALLOC_MEM(cmd, ref_name, refname, reflen);
+ oidcpy(&cmd->old_oid, &old_oid);
+ oidcpy(&cmd->new_oid, &new_oid);
+ *tail = cmd;
+ return &cmd->next;
+}
+
+static void queue_commands_from_cert(struct command **tail,
+ struct strbuf *push_cert)
+{
+ const char *boc, *eoc;
+
+ if (*tail)
+ die("protocol error: got both push certificate and unsigned commands");
+
+ boc = strstr(push_cert->buf, "\n\n");
+ if (!boc)
+ die("malformed push certificate %.*s", 100, push_cert->buf);
+ else
+ boc += 2;
+ eoc = push_cert->buf + parse_signed_buffer(push_cert->buf, push_cert->len);
+
+ while (boc < eoc) {
+ const char *eol = memchr(boc, '\n', eoc - boc);
+ tail = queue_command(tail, boc, eol ? eol - boc : eoc - boc);
+ boc = eol ? eol + 1 : eoc;
+ }
+}
+
+static struct command *read_head_info(struct packet_reader *reader,
+ struct oid_array *shallow)
+{
+ struct command *commands = NULL;
+ struct command **p = &commands;
+ for (;;) {
+ int linelen;
+
+ if (packet_reader_read(reader) != PACKET_READ_NORMAL)
+ break;
+
+ if (reader->pktlen > 8 && starts_with(reader->line, "shallow ")) {
+ struct object_id oid;
+ if (get_oid_hex(reader->line + 8, &oid))
+ die("protocol error: expected shallow sha, got '%s'",
+ reader->line + 8);
+ oid_array_append(shallow, &oid);
+ continue;
+ }
+
+ linelen = strlen(reader->line);
+ if (linelen < reader->pktlen) {
+ const char *feature_list = reader->line + linelen + 1;
+ const char *hash = NULL;
+ const char *client_sid;
+ int len = 0;
+ if (parse_feature_request(feature_list, "report-status"))
+ report_status = 1;
+ if (parse_feature_request(feature_list, "report-status-v2"))
+ report_status_v2 = 1;
+ if (parse_feature_request(feature_list, "side-band-64k"))
+ use_sideband = LARGE_PACKET_MAX;
+ if (parse_feature_request(feature_list, "quiet"))
+ quiet = 1;
+ if (advertise_atomic_push
+ && parse_feature_request(feature_list, "atomic"))
+ use_atomic = 1;
+ if (advertise_push_options
+ && parse_feature_request(feature_list, "push-options"))
+ use_push_options = 1;
+ hash = parse_feature_value(feature_list, "object-format", &len, NULL);
+ if (!hash) {
+ hash = hash_algos[GIT_HASH_SHA1].name;
+ len = strlen(hash);
+ }
+ if (xstrncmpz(the_hash_algo->name, hash, len))
+ die("error: unsupported object format '%s'", hash);
+ client_sid = parse_feature_value(feature_list, "session-id", &len, NULL);
+ if (client_sid) {
+ char *sid = xstrndup(client_sid, len);
+ trace2_data_string("transfer", NULL, "client-sid", client_sid);
+ free(sid);
+ }
+ }
+
+ if (!strcmp(reader->line, "push-cert")) {
+ int true_flush = 0;
+ int saved_options = reader->options;
+ reader->options &= ~PACKET_READ_CHOMP_NEWLINE;
+
+ for (;;) {
+ packet_reader_read(reader);
+ if (reader->status == PACKET_READ_FLUSH) {
+ true_flush = 1;
+ break;
+ }
+ if (reader->status != PACKET_READ_NORMAL) {
+ die("protocol error: got an unexpected packet");
+ }
+ if (!strcmp(reader->line, "push-cert-end\n"))
+ break; /* end of cert */
+ strbuf_addstr(&push_cert, reader->line);
+ }
+ reader->options = saved_options;
+
+ if (true_flush)
+ break;
+ continue;
+ }
+
+ p = queue_command(p, reader->line, linelen);
+ }
+
+ if (push_cert.len)
+ queue_commands_from_cert(p, &push_cert);
+
+ return commands;
+}
+
+static void read_push_options(struct packet_reader *reader,
+ struct string_list *options)
+{
+ while (1) {
+ if (packet_reader_read(reader) != PACKET_READ_NORMAL)
+ break;
+
+ string_list_append(options, reader->line);
+ }
+}
+
+static const char *parse_pack_header(struct pack_header *hdr)
+{
+ switch (read_pack_header(0, hdr)) {
+ case PH_ERROR_EOF:
+ return "eof before pack header was fully read";
+
+ case PH_ERROR_PACK_SIGNATURE:
+ return "protocol error (pack signature mismatch detected)";
+
+ case PH_ERROR_PROTOCOL:
+ return "protocol error (pack version unsupported)";
+
+ default:
+ return "unknown error in parse_pack_header";
+
+ case 0:
+ return NULL;
+ }
+}
+
+static const char *pack_lockfile;
+
+static void push_header_arg(struct strvec *args, struct pack_header *hdr)
+{
+ strvec_pushf(args, "--pack_header=%"PRIu32",%"PRIu32,
+ ntohl(hdr->hdr_version), ntohl(hdr->hdr_entries));
+}
+
+static const char *unpack(int err_fd, struct shallow_info *si)
+{
+ struct pack_header hdr;
+ const char *hdr_err;
+ int status;
+ struct child_process child = CHILD_PROCESS_INIT;
+ int fsck_objects = (receive_fsck_objects >= 0
+ ? receive_fsck_objects
+ : transfer_fsck_objects >= 0
+ ? transfer_fsck_objects
+ : 0);
+
+ hdr_err = parse_pack_header(&hdr);
+ if (hdr_err) {
+ if (err_fd > 0)
+ close(err_fd);
+ return hdr_err;
+ }
+
+ if (si->nr_ours || si->nr_theirs) {
+ alt_shallow_file = setup_temporary_shallow(si->shallow);
+ strvec_push(&child.args, "--shallow-file");
+ strvec_push(&child.args, alt_shallow_file);
+ }
+
+ tmp_objdir = tmp_objdir_create("incoming");
+ if (!tmp_objdir) {
+ if (err_fd > 0)
+ close(err_fd);
+ return "unable to create temporary object directory";
+ }
+ strvec_pushv(&child.env, tmp_objdir_env(tmp_objdir));
+
+ /*
+ * Normally we just pass the tmp_objdir environment to the child
+ * processes that do the heavy lifting, but we may need to see these
+ * objects ourselves to set up shallow information.
+ */
+ tmp_objdir_add_as_alternate(tmp_objdir);
+
+ if (ntohl(hdr.hdr_entries) < unpack_limit) {
+ strvec_push(&child.args, "unpack-objects");
+ push_header_arg(&child.args, &hdr);
+ if (quiet)
+ strvec_push(&child.args, "-q");
+ if (fsck_objects)
+ strvec_pushf(&child.args, "--strict%s",
+ fsck_msg_types.buf);
+ if (max_input_size)
+ strvec_pushf(&child.args, "--max-input-size=%"PRIuMAX,
+ (uintmax_t)max_input_size);
+ child.no_stdout = 1;
+ child.err = err_fd;
+ child.git_cmd = 1;
+ status = run_command(&child);
+ if (status)
+ return "unpack-objects abnormal exit";
+ } else {
+ char hostname[HOST_NAME_MAX + 1];
+
+ strvec_pushl(&child.args, "index-pack", "--stdin", NULL);
+ push_header_arg(&child.args, &hdr);
+
+ if (xgethostname(hostname, sizeof(hostname)))
+ xsnprintf(hostname, sizeof(hostname), "localhost");
+ strvec_pushf(&child.args,
+ "--keep=receive-pack %"PRIuMAX" on %s",
+ (uintmax_t)getpid(),
+ hostname);
+
+ if (!quiet && err_fd)
+ strvec_push(&child.args, "--show-resolving-progress");
+ if (use_sideband)
+ strvec_push(&child.args, "--report-end-of-input");
+ if (fsck_objects)
+ strvec_pushf(&child.args, "--strict%s",
+ fsck_msg_types.buf);
+ if (!reject_thin)
+ strvec_push(&child.args, "--fix-thin");
+ if (max_input_size)
+ strvec_pushf(&child.args, "--max-input-size=%"PRIuMAX,
+ (uintmax_t)max_input_size);
+ child.out = -1;
+ child.err = err_fd;
+ child.git_cmd = 1;
+ status = start_command(&child);
+ if (status)
+ return "index-pack fork failed";
+ pack_lockfile = index_pack_lockfile(child.out, NULL);
+ close(child.out);
+ status = finish_command(&child);
+ if (status)
+ return "index-pack abnormal exit";
+ reprepare_packed_git(the_repository);
+ }
+ return NULL;
+}
+
+static const char *unpack_with_sideband(struct shallow_info *si)
+{
+ struct async muxer;
+ const char *ret;
+
+ if (!use_sideband)
+ return unpack(0, si);
+
+ use_keepalive = KEEPALIVE_AFTER_NUL;
+ memset(&muxer, 0, sizeof(muxer));
+ muxer.proc = copy_to_sideband;
+ muxer.in = -1;
+ if (start_async(&muxer))
+ return NULL;
+
+ ret = unpack(muxer.in, si);
+
+ finish_async(&muxer);
+ return ret;
+}
+
+static void prepare_shallow_update(struct shallow_info *si)
+{
+ int i, j, k, bitmap_size = DIV_ROUND_UP(si->ref->nr, 32);
+
+ ALLOC_ARRAY(si->used_shallow, si->shallow->nr);
+ assign_shallow_commits_to_refs(si, si->used_shallow, NULL);
+
+ CALLOC_ARRAY(si->need_reachability_test, si->shallow->nr);
+ CALLOC_ARRAY(si->reachable, si->shallow->nr);
+ CALLOC_ARRAY(si->shallow_ref, si->ref->nr);
+
+ for (i = 0; i < si->nr_ours; i++)
+ si->need_reachability_test[si->ours[i]] = 1;
+
+ for (i = 0; i < si->shallow->nr; i++) {
+ if (!si->used_shallow[i])
+ continue;
+ for (j = 0; j < bitmap_size; j++) {
+ if (!si->used_shallow[i][j])
+ continue;
+ si->need_reachability_test[i]++;
+ for (k = 0; k < 32; k++)
+ if (si->used_shallow[i][j] & (1U << k))
+ si->shallow_ref[j * 32 + k]++;
+ }
+
+ /*
+ * true for those associated with some refs and belong
+ * in "ours" list aka "step 7 not done yet"
+ */
+ si->need_reachability_test[i] =
+ si->need_reachability_test[i] > 1;
+ }
+
+ /*
+ * keep hooks happy by forcing a temporary shallow file via
+ * env variable because we can't add --shallow-file to every
+ * command. check_connected() will be done with
+ * true .git/shallow though.
+ */
+ setenv(GIT_SHALLOW_FILE_ENVIRONMENT, alt_shallow_file, 1);
+}
+
+static void update_shallow_info(struct command *commands,
+ struct shallow_info *si,
+ struct oid_array *ref)
+{
+ struct command *cmd;
+ int *ref_status;
+ remove_nonexistent_theirs_shallow(si);
+ if (!si->nr_ours && !si->nr_theirs) {
+ shallow_update = 0;
+ return;
+ }
+
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (is_null_oid(&cmd->new_oid))
+ continue;
+ oid_array_append(ref, &cmd->new_oid);
+ cmd->index = ref->nr - 1;
+ }
+ si->ref = ref;
+
+ if (shallow_update) {
+ prepare_shallow_update(si);
+ return;
+ }
+
+ ALLOC_ARRAY(ref_status, ref->nr);
+ assign_shallow_commits_to_refs(si, NULL, ref_status);
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (is_null_oid(&cmd->new_oid))
+ continue;
+ if (ref_status[cmd->index]) {
+ cmd->error_string = "shallow update not allowed";
+ cmd->skip_update = 1;
+ }
+ }
+ free(ref_status);
+}
+
+static void report(struct command *commands, const char *unpack_status)
+{
+ struct command *cmd;
+ struct strbuf buf = STRBUF_INIT;
+
+ packet_buf_write(&buf, "unpack %s\n",
+ unpack_status ? unpack_status : "ok");
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (!cmd->error_string)
+ packet_buf_write(&buf, "ok %s\n",
+ cmd->ref_name);
+ else
+ packet_buf_write(&buf, "ng %s %s\n",
+ cmd->ref_name, cmd->error_string);
+ }
+ packet_buf_flush(&buf);
+
+ if (use_sideband)
+ send_sideband(1, 1, buf.buf, buf.len, use_sideband);
+ else
+ write_or_die(1, buf.buf, buf.len);
+ strbuf_release(&buf);
+}
+
+static void report_v2(struct command *commands, const char *unpack_status)
+{
+ struct command *cmd;
+ struct strbuf buf = STRBUF_INIT;
+ struct ref_push_report *report;
+
+ packet_buf_write(&buf, "unpack %s\n",
+ unpack_status ? unpack_status : "ok");
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ int count = 0;
+
+ if (cmd->error_string) {
+ packet_buf_write(&buf, "ng %s %s\n",
+ cmd->ref_name,
+ cmd->error_string);
+ continue;
+ }
+ packet_buf_write(&buf, "ok %s\n",
+ cmd->ref_name);
+ for (report = cmd->report; report; report = report->next) {
+ if (count++ > 0)
+ packet_buf_write(&buf, "ok %s\n",
+ cmd->ref_name);
+ if (report->ref_name)
+ packet_buf_write(&buf, "option refname %s\n",
+ report->ref_name);
+ if (report->old_oid)
+ packet_buf_write(&buf, "option old-oid %s\n",
+ oid_to_hex(report->old_oid));
+ if (report->new_oid)
+ packet_buf_write(&buf, "option new-oid %s\n",
+ oid_to_hex(report->new_oid));
+ if (report->forced_update)
+ packet_buf_write(&buf, "option forced-update\n");
+ }
+ }
+ packet_buf_flush(&buf);
+
+ if (use_sideband)
+ send_sideband(1, 1, buf.buf, buf.len, use_sideband);
+ else
+ write_or_die(1, buf.buf, buf.len);
+ strbuf_release(&buf);
+}
+
+static int delete_only(struct command *commands)
+{
+ struct command *cmd;
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (!is_null_oid(&cmd->new_oid))
+ return 0;
+ }
+ return 1;
+}
+
+int cmd_receive_pack(int argc, const char **argv, const char *prefix)
+{
+ int advertise_refs = 0;
+ struct command *commands;
+ struct oid_array shallow = OID_ARRAY_INIT;
+ struct oid_array ref = OID_ARRAY_INIT;
+ struct shallow_info si;
+ struct packet_reader reader;
+
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("quiet")),
+ OPT_HIDDEN_BOOL(0, "stateless-rpc", &stateless_rpc, NULL),
+ OPT_HIDDEN_BOOL(0, "http-backend-info-refs", &advertise_refs, NULL),
+ OPT_ALIAS(0, "advertise-refs", "http-backend-info-refs"),
+ OPT_HIDDEN_BOOL(0, "reject-thin-pack-for-testing", &reject_thin, NULL),
+ OPT_END()
+ };
+
+ packet_trace_identity("receive-pack");
+
+ argc = parse_options(argc, argv, prefix, options, receive_pack_usage, 0);
+
+ if (argc > 1)
+ usage_msg_opt(_("too many arguments"), receive_pack_usage, options);
+ if (argc == 0)
+ usage_msg_opt(_("you must specify a directory"), receive_pack_usage, options);
+
+ service_dir = argv[0];
+
+ setup_path();
+
+ if (!enter_repo(service_dir, 0))
+ die("'%s' does not appear to be a git repository", service_dir);
+
+ git_config(receive_pack_config, NULL);
+ if (cert_nonce_seed)
+ push_cert_nonce = prepare_push_cert_nonce(service_dir, time(NULL));
+
+ if (0 <= transfer_unpack_limit)
+ unpack_limit = transfer_unpack_limit;
+ else if (0 <= receive_unpack_limit)
+ unpack_limit = receive_unpack_limit;
+
+ switch (determine_protocol_version_server()) {
+ case protocol_v2:
+ /*
+ * push support for protocol v2 has not been implemented yet,
+ * so ignore the request to use v2 and fallback to using v0.
+ */
+ break;
+ case protocol_v1:
+ /*
+ * v1 is just the original protocol with a version string,
+ * so just fall through after writing the version string.
+ */
+ if (advertise_refs || !stateless_rpc)
+ packet_write_fmt(1, "version 1\n");
+
+ /* fallthrough */
+ case protocol_v0:
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
+
+ if (advertise_refs || !stateless_rpc) {
+ write_head_info();
+ }
+ if (advertise_refs)
+ return 0;
+
+ packet_reader_init(&reader, 0, NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_DIE_ON_ERR_PACKET);
+
+ if ((commands = read_head_info(&reader, &shallow))) {
+ const char *unpack_status = NULL;
+ struct string_list push_options = STRING_LIST_INIT_DUP;
+
+ if (use_push_options)
+ read_push_options(&reader, &push_options);
+ if (!check_cert_push_options(&push_options)) {
+ struct command *cmd;
+ for (cmd = commands; cmd; cmd = cmd->next)
+ cmd->error_string = "inconsistent push options";
+ }
+
+ prepare_shallow_info(&si, &shallow);
+ if (!si.nr_ours && !si.nr_theirs)
+ shallow_update = 0;
+ if (!delete_only(commands)) {
+ unpack_status = unpack_with_sideband(&si);
+ update_shallow_info(commands, &si, &ref);
+ }
+ use_keepalive = KEEPALIVE_ALWAYS;
+ execute_commands(commands, unpack_status, &si,
+ &push_options);
+ if (pack_lockfile)
+ unlink_or_warn(pack_lockfile);
+ sigchain_push(SIGPIPE, SIG_IGN);
+ if (report_status_v2)
+ report_v2(commands, unpack_status);
+ else if (report_status)
+ report(commands, unpack_status);
+ sigchain_pop(SIGPIPE);
+ run_receive_hook(commands, "post-receive", 1,
+ &push_options);
+ run_update_post_hook(commands);
+ string_list_clear(&push_options, 0);
+ if (auto_gc) {
+ struct child_process proc = CHILD_PROCESS_INIT;
+
+ proc.no_stdin = 1;
+ proc.stdout_to_stderr = 1;
+ proc.err = use_sideband ? -1 : 0;
+ proc.git_cmd = proc.close_object_store = 1;
+ strvec_pushl(&proc.args, "gc", "--auto", "--quiet",
+ NULL);
+
+ if (!start_command(&proc)) {
+ if (use_sideband)
+ copy_to_sideband(proc.err, -1, NULL);
+ finish_command(&proc);
+ }
+ }
+ if (auto_update_server_info)
+ update_server_info(0);
+ clear_shallow_info(&si);
+ }
+ if (use_sideband)
+ packet_flush(1);
+ oid_array_clear(&shallow);
+ oid_array_clear(&ref);
+ string_list_clear(&hidden_refs, 0);
+ free((void *)push_cert_nonce);
+ return 0;
+}
diff --git a/builtin/reflog.c b/builtin/reflog.c
new file mode 100644
index 0000000..270681d
--- /dev/null
+++ b/builtin/reflog.c
@@ -0,0 +1,430 @@
+#include "builtin.h"
+#include "config.h"
+#include "revision.h"
+#include "reachable.h"
+#include "worktree.h"
+#include "reflog.h"
+
+#define BUILTIN_REFLOG_SHOW_USAGE \
+ N_("git reflog [show] [<log-options>] [<ref>]")
+
+#define BUILTIN_REFLOG_EXPIRE_USAGE \
+ N_("git reflog expire [--expire=<time>] [--expire-unreachable=<time>]\n" \
+ " [--rewrite] [--updateref] [--stale-fix]\n" \
+ " [--dry-run | -n] [--verbose] [--all [--single-worktree] | <refs>...]")
+
+#define BUILTIN_REFLOG_DELETE_USAGE \
+ N_("git reflog delete [--rewrite] [--updateref]\n" \
+ " [--dry-run | -n] [--verbose] <ref>@{<specifier>}...")
+
+#define BUILTIN_REFLOG_EXISTS_USAGE \
+ N_("git reflog exists <ref>")
+
+static const char *const reflog_show_usage[] = {
+ BUILTIN_REFLOG_SHOW_USAGE,
+ NULL,
+};
+
+static const char *const reflog_expire_usage[] = {
+ BUILTIN_REFLOG_EXPIRE_USAGE,
+ NULL
+};
+
+static const char *const reflog_delete_usage[] = {
+ BUILTIN_REFLOG_DELETE_USAGE,
+ NULL
+};
+
+static const char *const reflog_exists_usage[] = {
+ BUILTIN_REFLOG_EXISTS_USAGE,
+ NULL,
+};
+
+static const char *const reflog_usage[] = {
+ BUILTIN_REFLOG_SHOW_USAGE,
+ BUILTIN_REFLOG_EXPIRE_USAGE,
+ BUILTIN_REFLOG_DELETE_USAGE,
+ BUILTIN_REFLOG_EXISTS_USAGE,
+ NULL
+};
+
+static timestamp_t default_reflog_expire;
+static timestamp_t default_reflog_expire_unreachable;
+
+struct worktree_reflogs {
+ struct worktree *worktree;
+ struct string_list reflogs;
+};
+
+static int collect_reflog(const char *ref, const struct object_id *oid UNUSED,
+ int flags UNUSED, void *cb_data)
+{
+ struct worktree_reflogs *cb = cb_data;
+ struct worktree *worktree = cb->worktree;
+ struct strbuf newref = STRBUF_INIT;
+
+ /*
+ * Avoid collecting the same shared ref multiple times because
+ * they are available via all worktrees.
+ */
+ if (!worktree->is_current &&
+ parse_worktree_ref(ref, NULL, NULL, NULL) == REF_WORKTREE_SHARED)
+ return 0;
+
+ strbuf_worktree_ref(worktree, &newref, ref);
+ string_list_append_nodup(&cb->reflogs, strbuf_detach(&newref, NULL));
+
+ return 0;
+}
+
+static struct reflog_expire_cfg {
+ struct reflog_expire_cfg *next;
+ timestamp_t expire_total;
+ timestamp_t expire_unreachable;
+ char pattern[FLEX_ARRAY];
+} *reflog_expire_cfg, **reflog_expire_cfg_tail;
+
+static struct reflog_expire_cfg *find_cfg_ent(const char *pattern, size_t len)
+{
+ struct reflog_expire_cfg *ent;
+
+ if (!reflog_expire_cfg_tail)
+ reflog_expire_cfg_tail = &reflog_expire_cfg;
+
+ for (ent = reflog_expire_cfg; ent; ent = ent->next)
+ if (!strncmp(ent->pattern, pattern, len) &&
+ ent->pattern[len] == '\0')
+ return ent;
+
+ FLEX_ALLOC_MEM(ent, pattern, pattern, len);
+ *reflog_expire_cfg_tail = ent;
+ reflog_expire_cfg_tail = &(ent->next);
+ return ent;
+}
+
+/* expiry timer slot */
+#define EXPIRE_TOTAL 01
+#define EXPIRE_UNREACH 02
+
+static int reflog_expire_config(const char *var, const char *value, void *cb)
+{
+ const char *pattern, *key;
+ size_t pattern_len;
+ timestamp_t expire;
+ int slot;
+ struct reflog_expire_cfg *ent;
+
+ if (parse_config_key(var, "gc", &pattern, &pattern_len, &key) < 0)
+ return git_default_config(var, value, cb);
+
+ if (!strcmp(key, "reflogexpire")) {
+ slot = EXPIRE_TOTAL;
+ if (git_config_expiry_date(&expire, var, value))
+ return -1;
+ } else if (!strcmp(key, "reflogexpireunreachable")) {
+ slot = EXPIRE_UNREACH;
+ if (git_config_expiry_date(&expire, var, value))
+ return -1;
+ } else
+ return git_default_config(var, value, cb);
+
+ if (!pattern) {
+ switch (slot) {
+ case EXPIRE_TOTAL:
+ default_reflog_expire = expire;
+ break;
+ case EXPIRE_UNREACH:
+ default_reflog_expire_unreachable = expire;
+ break;
+ }
+ return 0;
+ }
+
+ ent = find_cfg_ent(pattern, pattern_len);
+ if (!ent)
+ return -1;
+ switch (slot) {
+ case EXPIRE_TOTAL:
+ ent->expire_total = expire;
+ break;
+ case EXPIRE_UNREACH:
+ ent->expire_unreachable = expire;
+ break;
+ }
+ return 0;
+}
+
+static void set_reflog_expiry_param(struct cmd_reflog_expire_cb *cb, const char *ref)
+{
+ struct reflog_expire_cfg *ent;
+
+ if (cb->explicit_expiry == (EXPIRE_TOTAL|EXPIRE_UNREACH))
+ return; /* both given explicitly -- nothing to tweak */
+
+ for (ent = reflog_expire_cfg; ent; ent = ent->next) {
+ if (!wildmatch(ent->pattern, ref, 0)) {
+ if (!(cb->explicit_expiry & EXPIRE_TOTAL))
+ cb->expire_total = ent->expire_total;
+ if (!(cb->explicit_expiry & EXPIRE_UNREACH))
+ cb->expire_unreachable = ent->expire_unreachable;
+ return;
+ }
+ }
+
+ /*
+ * If unconfigured, make stash never expire
+ */
+ if (!strcmp(ref, "refs/stash")) {
+ if (!(cb->explicit_expiry & EXPIRE_TOTAL))
+ cb->expire_total = 0;
+ if (!(cb->explicit_expiry & EXPIRE_UNREACH))
+ cb->expire_unreachable = 0;
+ return;
+ }
+
+ /* Nothing matched -- use the default value */
+ if (!(cb->explicit_expiry & EXPIRE_TOTAL))
+ cb->expire_total = default_reflog_expire;
+ if (!(cb->explicit_expiry & EXPIRE_UNREACH))
+ cb->expire_unreachable = default_reflog_expire_unreachable;
+}
+
+static int expire_unreachable_callback(const struct option *opt,
+ const char *arg,
+ int unset)
+{
+ struct cmd_reflog_expire_cb *cmd = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ if (parse_expiry_date(arg, &cmd->expire_unreachable))
+ die(_("invalid timestamp '%s' given to '--%s'"),
+ arg, opt->long_name);
+
+ cmd->explicit_expiry |= EXPIRE_UNREACH;
+ return 0;
+}
+
+static int expire_total_callback(const struct option *opt,
+ const char *arg,
+ int unset)
+{
+ struct cmd_reflog_expire_cb *cmd = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ if (parse_expiry_date(arg, &cmd->expire_total))
+ die(_("invalid timestamp '%s' given to '--%s'"),
+ arg, opt->long_name);
+
+ cmd->explicit_expiry |= EXPIRE_TOTAL;
+ return 0;
+}
+
+static int cmd_reflog_show(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+
+ parse_options(argc, argv, prefix, options, reflog_show_usage,
+ PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0 |
+ PARSE_OPT_KEEP_UNKNOWN_OPT);
+
+ return cmd_log_reflog(argc, argv, prefix);
+}
+
+static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
+{
+ struct cmd_reflog_expire_cb cmd = { 0 };
+ timestamp_t now = time(NULL);
+ int i, status, do_all, all_worktrees = 1;
+ unsigned int flags = 0;
+ int verbose = 0;
+ reflog_expiry_should_prune_fn *should_prune_fn = should_expire_reflog_ent;
+ const struct option options[] = {
+ OPT_BIT(0, "dry-run", &flags, N_("do not actually prune any entries"),
+ EXPIRE_REFLOGS_DRY_RUN),
+ OPT_BIT(0, "rewrite", &flags,
+ N_("rewrite the old SHA1 with the new SHA1 of the entry that now precedes it"),
+ EXPIRE_REFLOGS_REWRITE),
+ OPT_BIT(0, "updateref", &flags,
+ N_("update the reference to the value of the top reflog entry"),
+ EXPIRE_REFLOGS_UPDATE_REF),
+ OPT_BOOL(0, "verbose", &verbose, N_("print extra information on screen")),
+ OPT_CALLBACK_F(0, "expire", &cmd, N_("timestamp"),
+ N_("prune entries older than the specified time"),
+ PARSE_OPT_NONEG,
+ expire_total_callback),
+ OPT_CALLBACK_F(0, "expire-unreachable", &cmd, N_("timestamp"),
+ N_("prune entries older than <time> that are not reachable from the current tip of the branch"),
+ PARSE_OPT_NONEG,
+ expire_unreachable_callback),
+ OPT_BOOL(0, "stale-fix", &cmd.stalefix,
+ N_("prune any reflog entries that point to broken commits")),
+ OPT_BOOL(0, "all", &do_all, N_("process the reflogs of all references")),
+ OPT_BOOL(1, "single-worktree", &all_worktrees,
+ N_("limits processing to reflogs from the current worktree only")),
+ OPT_END()
+ };
+
+ default_reflog_expire_unreachable = now - 30 * 24 * 3600;
+ default_reflog_expire = now - 90 * 24 * 3600;
+ git_config(reflog_expire_config, NULL);
+
+ save_commit_buffer = 0;
+ do_all = status = 0;
+
+ cmd.explicit_expiry = 0;
+ cmd.expire_total = default_reflog_expire;
+ cmd.expire_unreachable = default_reflog_expire_unreachable;
+
+ argc = parse_options(argc, argv, prefix, options, reflog_expire_usage, 0);
+
+ if (verbose)
+ should_prune_fn = should_expire_reflog_ent_verbose;
+
+ /*
+ * We can trust the commits and objects reachable from refs
+ * even in older repository. We cannot trust what's reachable
+ * from reflog if the repository was pruned with older git.
+ */
+ if (cmd.stalefix) {
+ struct rev_info revs;
+
+ repo_init_revisions(the_repository, &revs, prefix);
+ revs.do_not_die_on_missing_tree = 1;
+ revs.ignore_missing = 1;
+ revs.ignore_missing_links = 1;
+ if (verbose)
+ printf(_("Marking reachable objects..."));
+ mark_reachable_objects(&revs, 0, 0, NULL);
+ release_revisions(&revs);
+ if (verbose)
+ putchar('\n');
+ }
+
+ if (do_all) {
+ struct worktree_reflogs collected = {
+ .reflogs = STRING_LIST_INIT_DUP,
+ };
+ struct string_list_item *item;
+ struct worktree **worktrees, **p;
+
+ worktrees = get_worktrees();
+ for (p = worktrees; *p; p++) {
+ if (!all_worktrees && !(*p)->is_current)
+ continue;
+ collected.worktree = *p;
+ refs_for_each_reflog(get_worktree_ref_store(*p),
+ collect_reflog, &collected);
+ }
+ free_worktrees(worktrees);
+
+ for_each_string_list_item(item, &collected.reflogs) {
+ struct expire_reflog_policy_cb cb = {
+ .cmd = cmd,
+ .dry_run = !!(flags & EXPIRE_REFLOGS_DRY_RUN),
+ };
+
+ set_reflog_expiry_param(&cb.cmd, item->string);
+ status |= reflog_expire(item->string, flags,
+ reflog_expiry_prepare,
+ should_prune_fn,
+ reflog_expiry_cleanup,
+ &cb);
+ }
+ string_list_clear(&collected.reflogs, 0);
+ }
+
+ for (i = 0; i < argc; i++) {
+ char *ref;
+ struct expire_reflog_policy_cb cb = { .cmd = cmd };
+
+ if (!dwim_log(argv[i], strlen(argv[i]), NULL, &ref)) {
+ status |= error(_("%s points nowhere!"), argv[i]);
+ continue;
+ }
+ set_reflog_expiry_param(&cb.cmd, ref);
+ status |= reflog_expire(ref, flags,
+ reflog_expiry_prepare,
+ should_prune_fn,
+ reflog_expiry_cleanup,
+ &cb);
+ free(ref);
+ }
+ return status;
+}
+
+static int cmd_reflog_delete(int argc, const char **argv, const char *prefix)
+{
+ int i, status = 0;
+ unsigned int flags = 0;
+ int verbose = 0;
+
+ const struct option options[] = {
+ OPT_BIT(0, "dry-run", &flags, N_("do not actually prune any entries"),
+ EXPIRE_REFLOGS_DRY_RUN),
+ OPT_BIT(0, "rewrite", &flags,
+ N_("rewrite the old SHA1 with the new SHA1 of the entry that now precedes it"),
+ EXPIRE_REFLOGS_REWRITE),
+ OPT_BIT(0, "updateref", &flags,
+ N_("update the reference to the value of the top reflog entry"),
+ EXPIRE_REFLOGS_UPDATE_REF),
+ OPT_BOOL(0, "verbose", &verbose, N_("print extra information on screen")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, reflog_delete_usage, 0);
+
+ if (argc < 1)
+ return error(_("no reflog specified to delete"));
+
+ for (i = 0; i < argc; i++)
+ status |= reflog_delete(argv[i], flags, verbose);
+
+ return status;
+}
+
+static int cmd_reflog_exists(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+ const char *refname;
+
+ argc = parse_options(argc, argv, prefix, options, reflog_exists_usage,
+ 0);
+ if (!argc)
+ usage_with_options(reflog_exists_usage, options);
+
+ refname = argv[0];
+ if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
+ die(_("invalid ref format: %s"), refname);
+ return !reflog_exists(refname);
+}
+
+/*
+ * main "reflog"
+ */
+
+int cmd_reflog(int argc, const char **argv, const char *prefix)
+{
+ parse_opt_subcommand_fn *fn = NULL;
+ struct option options[] = {
+ OPT_SUBCOMMAND("show", &fn, cmd_reflog_show),
+ OPT_SUBCOMMAND("expire", &fn, cmd_reflog_expire),
+ OPT_SUBCOMMAND("delete", &fn, cmd_reflog_delete),
+ OPT_SUBCOMMAND("exists", &fn, cmd_reflog_exists),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, reflog_usage,
+ PARSE_OPT_SUBCOMMAND_OPTIONAL |
+ PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0 |
+ PARSE_OPT_KEEP_UNKNOWN_OPT);
+ if (fn)
+ return fn(argc - 1, argv + 1, prefix);
+ else
+ return cmd_log_reflog(argc, argv, prefix);
+}
diff --git a/builtin/remote-ext.c b/builtin/remote-ext.c
new file mode 100644
index 0000000..fd3538d
--- /dev/null
+++ b/builtin/remote-ext.c
@@ -0,0 +1,202 @@
+#include "builtin.h"
+#include "transport.h"
+#include "run-command.h"
+#include "pkt-line.h"
+
+static const char usage_msg[] =
+ "git remote-ext <remote> <url>";
+
+/*
+ * URL syntax:
+ * 'command [arg1 [arg2 [...]]]' Invoke command with given arguments.
+ * Special characters:
+ * '% ': Literal space in argument.
+ * '%%': Literal percent sign.
+ * '%S': Name of service (git-upload-pack/git-upload-archive/
+ * git-receive-pack.
+ * '%s': Same as \s, but with possible git- prefix stripped.
+ * '%G': Only allowed as first 'character' of argument. Do not pass this
+ * Argument to command, instead send this as name of repository
+ * in in-line git://-style request (also activates sending this
+ * style of request).
+ * '%V': Only allowed as first 'character' of argument. Used in
+ * conjunction with '%G': Do not pass this argument to command,
+ * instead send this as vhost in git://-style request (note: does
+ * not activate sending git:// style request).
+ */
+
+static char *git_req;
+static char *git_req_vhost;
+
+static char *strip_escapes(const char *str, const char *service,
+ const char **next)
+{
+ size_t rpos = 0;
+ int escape = 0;
+ char special = 0;
+ const char *service_noprefix = service;
+ struct strbuf ret = STRBUF_INIT;
+
+ skip_prefix(service_noprefix, "git-", &service_noprefix);
+
+ /* Pass the service to command. */
+ setenv("GIT_EXT_SERVICE", service, 1);
+ setenv("GIT_EXT_SERVICE_NOPREFIX", service_noprefix, 1);
+
+ /* Scan the length of argument. */
+ while (str[rpos] && (escape || str[rpos] != ' ')) {
+ if (escape) {
+ switch (str[rpos]) {
+ case ' ':
+ case '%':
+ case 's':
+ case 'S':
+ break;
+ case 'G':
+ case 'V':
+ special = str[rpos];
+ if (rpos == 1)
+ break;
+ /* fallthrough */
+ default:
+ die("Bad remote-ext placeholder '%%%c'.",
+ str[rpos]);
+ }
+ escape = 0;
+ } else
+ escape = (str[rpos] == '%');
+ rpos++;
+ }
+ if (escape && !str[rpos])
+ die("remote-ext command has incomplete placeholder");
+ *next = str + rpos;
+ if (**next == ' ')
+ ++*next; /* Skip over space */
+
+ /*
+ * Do the actual placeholder substitution. The string will be short
+ * enough not to overflow integers.
+ */
+ rpos = special ? 2 : 0; /* Skip first 2 bytes in specials. */
+ escape = 0;
+ while (str[rpos] && (escape || str[rpos] != ' ')) {
+ if (escape) {
+ switch (str[rpos]) {
+ case ' ':
+ case '%':
+ strbuf_addch(&ret, str[rpos]);
+ break;
+ case 's':
+ strbuf_addstr(&ret, service_noprefix);
+ break;
+ case 'S':
+ strbuf_addstr(&ret, service);
+ break;
+ }
+ escape = 0;
+ } else
+ switch (str[rpos]) {
+ case '%':
+ escape = 1;
+ break;
+ default:
+ strbuf_addch(&ret, str[rpos]);
+ break;
+ }
+ rpos++;
+ }
+ switch (special) {
+ case 'G':
+ git_req = strbuf_detach(&ret, NULL);
+ return NULL;
+ case 'V':
+ git_req_vhost = strbuf_detach(&ret, NULL);
+ return NULL;
+ default:
+ return strbuf_detach(&ret, NULL);
+ }
+}
+
+static void parse_argv(struct strvec *out, const char *arg, const char *service)
+{
+ while (*arg) {
+ char *expanded = strip_escapes(arg, service, &arg);
+ if (expanded)
+ strvec_push(out, expanded);
+ free(expanded);
+ }
+}
+
+static void send_git_request(int stdin_fd, const char *serv, const char *repo,
+ const char *vhost)
+{
+ if (!vhost)
+ packet_write_fmt(stdin_fd, "%s %s%c", serv, repo, 0);
+ else
+ packet_write_fmt(stdin_fd, "%s %s%chost=%s%c", serv, repo, 0,
+ vhost, 0);
+}
+
+static int run_child(const char *arg, const char *service)
+{
+ int r;
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ child.in = -1;
+ child.out = -1;
+ child.err = 0;
+ parse_argv(&child.args, arg, service);
+
+ if (start_command(&child) < 0)
+ die("Can't run specified command");
+
+ if (git_req)
+ send_git_request(child.in, service, git_req, git_req_vhost);
+
+ r = bidirectional_transfer_loop(child.out, child.in);
+ if (!r)
+ r = finish_command(&child);
+ else
+ finish_command(&child);
+ return r;
+}
+
+#define MAXCOMMAND 4096
+
+static int command_loop(const char *child)
+{
+ char buffer[MAXCOMMAND];
+
+ while (1) {
+ size_t i;
+ if (!fgets(buffer, MAXCOMMAND - 1, stdin)) {
+ if (ferror(stdin))
+ die("Command input error");
+ exit(0);
+ }
+ /* Strip end of line characters. */
+ i = strlen(buffer);
+ while (i > 0 && isspace(buffer[i - 1]))
+ buffer[--i] = 0;
+
+ if (!strcmp(buffer, "capabilities")) {
+ printf("*connect\n\n");
+ fflush(stdout);
+ } else if (!strncmp(buffer, "connect ", 8)) {
+ printf("\n");
+ fflush(stdout);
+ return run_child(child, buffer + 8);
+ } else {
+ fprintf(stderr, "Bad command");
+ return 1;
+ }
+ }
+}
+
+int cmd_remote_ext(int argc, const char **argv, const char *prefix)
+{
+ if (argc != 3)
+ usage(usage_msg);
+
+ return command_loop(argv[2]);
+}
diff --git a/builtin/remote-fd.c b/builtin/remote-fd.c
new file mode 100644
index 0000000..91dfe07
--- /dev/null
+++ b/builtin/remote-fd.c
@@ -0,0 +1,82 @@
+#include "builtin.h"
+#include "transport.h"
+
+static const char usage_msg[] =
+ "git remote-fd <remote> <url>";
+
+/*
+ * URL syntax:
+ * 'fd::<inoutfd>[/<anything>]' Read/write socket pair
+ * <inoutfd>.
+ * 'fd::<infd>,<outfd>[/<anything>]' Read pipe <infd> and write
+ * pipe <outfd>.
+ * [foo] indicates 'foo' is optional. <anything> is any string.
+ *
+ * The data output to <outfd>/<inoutfd> should be passed unmolested to
+ * git-receive-pack/git-upload-pack/git-upload-archive and output of
+ * git-receive-pack/git-upload-pack/git-upload-archive should be passed
+ * unmolested to <infd>/<inoutfd>.
+ *
+ */
+
+#define MAXCOMMAND 4096
+
+static void command_loop(int input_fd, int output_fd)
+{
+ char buffer[MAXCOMMAND];
+
+ while (1) {
+ size_t i;
+ if (!fgets(buffer, MAXCOMMAND - 1, stdin)) {
+ if (ferror(stdin))
+ die("Input error");
+ return;
+ }
+ /* Strip end of line characters. */
+ i = strlen(buffer);
+ while (i > 0 && isspace(buffer[i - 1]))
+ buffer[--i] = 0;
+
+ if (!strcmp(buffer, "capabilities")) {
+ printf("*connect\n\n");
+ fflush(stdout);
+ } else if (!strncmp(buffer, "connect ", 8)) {
+ printf("\n");
+ fflush(stdout);
+ if (bidirectional_transfer_loop(input_fd,
+ output_fd))
+ die("Copying data between file descriptors failed");
+ return;
+ } else {
+ die("Bad command: %s", buffer);
+ }
+ }
+}
+
+int cmd_remote_fd(int argc, const char **argv, const char *prefix)
+{
+ int input_fd = -1;
+ int output_fd = -1;
+ char *end;
+
+ if (argc != 3)
+ usage(usage_msg);
+
+ input_fd = (int)strtoul(argv[2], &end, 10);
+
+ if ((end == argv[2]) || (*end != ',' && *end != '/' && *end))
+ die("Bad URL syntax");
+
+ if (*end == '/' || !*end) {
+ output_fd = input_fd;
+ } else {
+ char *end2;
+ output_fd = (int)strtoul(end + 1, &end2, 10);
+
+ if ((end2 == end + 1) || (*end2 != '/' && *end2))
+ die("Bad URL syntax");
+ }
+
+ command_loop(input_fd, output_fd);
+ return 0;
+}
diff --git a/builtin/remote.c b/builtin/remote.c
new file mode 100644
index 0000000..729f6f3
--- /dev/null
+++ b/builtin/remote.c
@@ -0,0 +1,1782 @@
+#include "builtin.h"
+#include "config.h"
+#include "parse-options.h"
+#include "transport.h"
+#include "remote.h"
+#include "string-list.h"
+#include "strbuf.h"
+#include "run-command.h"
+#include "rebase.h"
+#include "refs.h"
+#include "refspec.h"
+#include "object-store.h"
+#include "strvec.h"
+#include "commit-reach.h"
+#include "progress.h"
+
+static const char * const builtin_remote_usage[] = {
+ "git remote [-v | --verbose]",
+ N_("git remote add [-t <branch>] [-m <master>] [-f] [--tags | --no-tags] [--mirror=<fetch|push>] <name> <url>"),
+ N_("git remote rename [--[no-]progress] <old> <new>"),
+ N_("git remote remove <name>"),
+ N_("git remote set-head <name> (-a | --auto | -d | --delete | <branch>)"),
+ N_("git remote [-v | --verbose] show [-n] <name>"),
+ N_("git remote prune [-n | --dry-run] <name>"),
+ N_("git remote [-v | --verbose] update [-p | --prune] [(<group> | <remote>)...]"),
+ N_("git remote set-branches [--add] <name> <branch>..."),
+ N_("git remote get-url [--push] [--all] <name>"),
+ N_("git remote set-url [--push] <name> <newurl> [<oldurl>]"),
+ N_("git remote set-url --add <name> <newurl>"),
+ N_("git remote set-url --delete <name> <url>"),
+ NULL
+};
+
+static const char * const builtin_remote_add_usage[] = {
+ N_("git remote add [<options>] <name> <url>"),
+ NULL
+};
+
+static const char * const builtin_remote_rename_usage[] = {
+ N_("git remote rename [--[no-]progress] <old> <new>"),
+ NULL
+};
+
+static const char * const builtin_remote_rm_usage[] = {
+ N_("git remote remove <name>"),
+ NULL
+};
+
+static const char * const builtin_remote_sethead_usage[] = {
+ N_("git remote set-head <name> (-a | --auto | -d | --delete | <branch>)"),
+ NULL
+};
+
+static const char * const builtin_remote_setbranches_usage[] = {
+ N_("git remote set-branches <name> <branch>..."),
+ N_("git remote set-branches --add <name> <branch>..."),
+ NULL
+};
+
+static const char * const builtin_remote_show_usage[] = {
+ N_("git remote show [<options>] <name>"),
+ NULL
+};
+
+static const char * const builtin_remote_prune_usage[] = {
+ N_("git remote prune [<options>] <name>"),
+ NULL
+};
+
+static const char * const builtin_remote_update_usage[] = {
+ N_("git remote update [<options>] [<group> | <remote>]..."),
+ NULL
+};
+
+static const char * const builtin_remote_geturl_usage[] = {
+ N_("git remote get-url [--push] [--all] <name>"),
+ NULL
+};
+
+static const char * const builtin_remote_seturl_usage[] = {
+ N_("git remote set-url [--push] <name> <newurl> [<oldurl>]"),
+ N_("git remote set-url --add <name> <newurl>"),
+ N_("git remote set-url --delete <name> <url>"),
+ NULL
+};
+
+#define GET_REF_STATES (1<<0)
+#define GET_HEAD_NAMES (1<<1)
+#define GET_PUSH_REF_STATES (1<<2)
+
+static int verbose;
+
+static int fetch_remote(const char *name)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_push(&cmd.args, "fetch");
+ if (verbose)
+ strvec_push(&cmd.args, "-v");
+ strvec_push(&cmd.args, name);
+ cmd.git_cmd = 1;
+ printf_ln(_("Updating %s"), name);
+ if (run_command(&cmd))
+ return error(_("Could not fetch %s"), name);
+ return 0;
+}
+
+enum {
+ TAGS_UNSET = 0,
+ TAGS_DEFAULT = 1,
+ TAGS_SET = 2
+};
+
+#define MIRROR_NONE 0
+#define MIRROR_FETCH 1
+#define MIRROR_PUSH 2
+#define MIRROR_BOTH (MIRROR_FETCH|MIRROR_PUSH)
+
+static void add_branch(const char *key, const char *branchname,
+ const char *remotename, int mirror, struct strbuf *tmp)
+{
+ strbuf_reset(tmp);
+ strbuf_addch(tmp, '+');
+ if (mirror)
+ strbuf_addf(tmp, "refs/%s:refs/%s",
+ branchname, branchname);
+ else
+ strbuf_addf(tmp, "refs/heads/%s:refs/remotes/%s/%s",
+ branchname, remotename, branchname);
+ git_config_set_multivar(key, tmp->buf, "^$", 0);
+}
+
+static const char mirror_advice[] =
+N_("--mirror is dangerous and deprecated; please\n"
+ "\t use --mirror=fetch or --mirror=push instead");
+
+static int parse_mirror_opt(const struct option *opt, const char *arg, int not)
+{
+ unsigned *mirror = opt->value;
+ if (not)
+ *mirror = MIRROR_NONE;
+ else if (!arg) {
+ warning("%s", _(mirror_advice));
+ *mirror = MIRROR_BOTH;
+ }
+ else if (!strcmp(arg, "fetch"))
+ *mirror = MIRROR_FETCH;
+ else if (!strcmp(arg, "push"))
+ *mirror = MIRROR_PUSH;
+ else
+ return error(_("unknown mirror argument: %s"), arg);
+ return 0;
+}
+
+static int add(int argc, const char **argv, const char *prefix)
+{
+ int fetch = 0, fetch_tags = TAGS_DEFAULT;
+ unsigned mirror = MIRROR_NONE;
+ struct string_list track = STRING_LIST_INIT_NODUP;
+ const char *master = NULL;
+ struct remote *remote;
+ struct strbuf buf = STRBUF_INIT, buf2 = STRBUF_INIT;
+ const char *name, *url;
+ int i;
+
+ struct option options[] = {
+ OPT_BOOL('f', "fetch", &fetch, N_("fetch the remote branches")),
+ OPT_SET_INT(0, "tags", &fetch_tags,
+ N_("import all tags and associated objects when fetching"),
+ TAGS_SET),
+ OPT_SET_INT(0, NULL, &fetch_tags,
+ N_("or do not fetch any tag at all (--no-tags)"), TAGS_UNSET),
+ OPT_STRING_LIST('t', "track", &track, N_("branch"),
+ N_("branch(es) to track")),
+ OPT_STRING('m', "master", &master, N_("branch"), N_("master branch")),
+ OPT_CALLBACK_F(0, "mirror", &mirror, "(push|fetch)",
+ N_("set up remote as a mirror to push to or fetch from"),
+ PARSE_OPT_OPTARG | PARSE_OPT_COMP_ARG, parse_mirror_opt),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_remote_add_usage, 0);
+
+ if (argc != 2)
+ usage_with_options(builtin_remote_add_usage, options);
+
+ if (mirror && master)
+ die(_("specifying a master branch makes no sense with --mirror"));
+ if (mirror && !(mirror & MIRROR_FETCH) && track.nr)
+ die(_("specifying branches to track makes sense only with fetch mirrors"));
+
+ name = argv[0];
+ url = argv[1];
+
+ remote = remote_get(name);
+ if (remote_is_configured(remote, 1)) {
+ error(_("remote %s already exists."), name);
+ exit(3);
+ }
+
+ if (!valid_remote_name(name))
+ die(_("'%s' is not a valid remote name"), name);
+
+ strbuf_addf(&buf, "remote.%s.url", name);
+ git_config_set(buf.buf, url);
+
+ if (!mirror || mirror & MIRROR_FETCH) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "remote.%s.fetch", name);
+ if (track.nr == 0)
+ string_list_append(&track, "*");
+ for (i = 0; i < track.nr; i++) {
+ add_branch(buf.buf, track.items[i].string,
+ name, mirror, &buf2);
+ }
+ }
+
+ if (mirror & MIRROR_PUSH) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "remote.%s.mirror", name);
+ git_config_set(buf.buf, "true");
+ }
+
+ if (fetch_tags != TAGS_DEFAULT) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "remote.%s.tagOpt", name);
+ git_config_set(buf.buf,
+ fetch_tags == TAGS_SET ? "--tags" : "--no-tags");
+ }
+
+ if (fetch && fetch_remote(name))
+ return 1;
+
+ if (master) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "refs/remotes/%s/HEAD", name);
+
+ strbuf_reset(&buf2);
+ strbuf_addf(&buf2, "refs/remotes/%s/%s", name, master);
+
+ if (create_symref(buf.buf, buf2.buf, "remote add"))
+ return error(_("Could not setup master '%s'"), master);
+ }
+
+ strbuf_release(&buf);
+ strbuf_release(&buf2);
+ string_list_clear(&track, 0);
+
+ return 0;
+}
+
+struct branch_info {
+ char *remote_name;
+ struct string_list merge;
+ enum rebase_type rebase;
+ char *push_remote_name;
+};
+
+static struct string_list branch_list = STRING_LIST_INIT_NODUP;
+
+static const char *abbrev_ref(const char *name, const char *prefix)
+{
+ skip_prefix(name, prefix, &name);
+ return name;
+}
+#define abbrev_branch(name) abbrev_ref((name), "refs/heads/")
+
+static int config_read_branches(const char *key, const char *value,
+ void *data UNUSED)
+{
+ const char *orig_key = key;
+ char *name;
+ struct string_list_item *item;
+ struct branch_info *info;
+ enum { REMOTE, MERGE, REBASE, PUSH_REMOTE } type;
+ size_t key_len;
+
+ if (!starts_with(key, "branch."))
+ return 0;
+
+ key += strlen("branch.");
+ if (strip_suffix(key, ".remote", &key_len))
+ type = REMOTE;
+ else if (strip_suffix(key, ".merge", &key_len))
+ type = MERGE;
+ else if (strip_suffix(key, ".rebase", &key_len))
+ type = REBASE;
+ else if (strip_suffix(key, ".pushremote", &key_len))
+ type = PUSH_REMOTE;
+ else
+ return 0;
+ name = xmemdupz(key, key_len);
+
+ item = string_list_insert(&branch_list, name);
+
+ if (!item->util)
+ item->util = xcalloc(1, sizeof(struct branch_info));
+ info = item->util;
+ switch (type) {
+ case REMOTE:
+ if (info->remote_name)
+ warning(_("more than one %s"), orig_key);
+ info->remote_name = xstrdup(value);
+ break;
+ case MERGE: {
+ char *space = strchr(value, ' ');
+ value = abbrev_branch(value);
+ while (space) {
+ char *merge;
+ merge = xstrndup(value, space - value);
+ string_list_append(&info->merge, merge);
+ value = abbrev_branch(space + 1);
+ space = strchr(value, ' ');
+ }
+ string_list_append(&info->merge, xstrdup(value));
+ break;
+ }
+ case REBASE:
+ /*
+ * Consider invalid values as false and check the
+ * truth value with >= REBASE_TRUE.
+ */
+ info->rebase = rebase_parse_value(value);
+ if (info->rebase == REBASE_INVALID)
+ warning(_("unhandled branch.%s.rebase=%s; assuming "
+ "'true'"), name, value);
+ break;
+ case PUSH_REMOTE:
+ if (info->push_remote_name)
+ warning(_("more than one %s"), orig_key);
+ info->push_remote_name = xstrdup(value);
+ break;
+ default:
+ BUG("unexpected type=%d", type);
+ }
+
+ return 0;
+}
+
+static void read_branches(void)
+{
+ if (branch_list.nr)
+ return;
+ git_config(config_read_branches, NULL);
+}
+
+struct ref_states {
+ struct remote *remote;
+ struct string_list new_refs, skipped, stale, tracked, heads, push;
+ int queried;
+};
+
+#define REF_STATES_INIT { \
+ .new_refs = STRING_LIST_INIT_DUP, \
+ .skipped = STRING_LIST_INIT_DUP, \
+ .stale = STRING_LIST_INIT_DUP, \
+ .tracked = STRING_LIST_INIT_DUP, \
+ .heads = STRING_LIST_INIT_DUP, \
+ .push = STRING_LIST_INIT_DUP, \
+}
+
+static int get_ref_states(const struct ref *remote_refs, struct ref_states *states)
+{
+ struct ref *fetch_map = NULL, **tail = &fetch_map;
+ struct ref *ref, *stale_refs;
+ int i;
+
+ for (i = 0; i < states->remote->fetch.nr; i++)
+ if (get_fetch_map(remote_refs, &states->remote->fetch.items[i], &tail, 1))
+ die(_("Could not get fetch map for refspec %s"),
+ states->remote->fetch.raw[i]);
+
+ for (ref = fetch_map; ref; ref = ref->next) {
+ if (omit_name_by_refspec(ref->name, &states->remote->fetch))
+ string_list_append(&states->skipped, abbrev_branch(ref->name));
+ else if (!ref->peer_ref || !ref_exists(ref->peer_ref->name))
+ string_list_append(&states->new_refs, abbrev_branch(ref->name));
+ else
+ string_list_append(&states->tracked, abbrev_branch(ref->name));
+ }
+ stale_refs = get_stale_heads(&states->remote->fetch, fetch_map);
+ for (ref = stale_refs; ref; ref = ref->next) {
+ struct string_list_item *item =
+ string_list_append(&states->stale, abbrev_branch(ref->name));
+ item->util = xstrdup(ref->name);
+ }
+ free_refs(stale_refs);
+ free_refs(fetch_map);
+
+ string_list_sort(&states->new_refs);
+ string_list_sort(&states->skipped);
+ string_list_sort(&states->tracked);
+ string_list_sort(&states->stale);
+
+ return 0;
+}
+
+struct push_info {
+ char *dest;
+ int forced;
+ enum {
+ PUSH_STATUS_CREATE = 0,
+ PUSH_STATUS_DELETE,
+ PUSH_STATUS_UPTODATE,
+ PUSH_STATUS_FASTFORWARD,
+ PUSH_STATUS_OUTOFDATE,
+ PUSH_STATUS_NOTQUERIED
+ } status;
+};
+
+static int get_push_ref_states(const struct ref *remote_refs,
+ struct ref_states *states)
+{
+ struct remote *remote = states->remote;
+ struct ref *ref, *local_refs, *push_map;
+ if (remote->mirror)
+ return 0;
+
+ local_refs = get_local_heads();
+ push_map = copy_ref_list(remote_refs);
+
+ match_push_refs(local_refs, &push_map, &remote->push, MATCH_REFS_NONE);
+
+ for (ref = push_map; ref; ref = ref->next) {
+ struct string_list_item *item;
+ struct push_info *info;
+
+ if (!ref->peer_ref)
+ continue;
+ oidcpy(&ref->new_oid, &ref->peer_ref->new_oid);
+
+ item = string_list_append(&states->push,
+ abbrev_branch(ref->peer_ref->name));
+ item->util = xcalloc(1, sizeof(struct push_info));
+ info = item->util;
+ info->forced = ref->force;
+ info->dest = xstrdup(abbrev_branch(ref->name));
+
+ if (is_null_oid(&ref->new_oid)) {
+ info->status = PUSH_STATUS_DELETE;
+ } else if (oideq(&ref->old_oid, &ref->new_oid))
+ info->status = PUSH_STATUS_UPTODATE;
+ else if (is_null_oid(&ref->old_oid))
+ info->status = PUSH_STATUS_CREATE;
+ else if (has_object_file(&ref->old_oid) &&
+ ref_newer(&ref->new_oid, &ref->old_oid))
+ info->status = PUSH_STATUS_FASTFORWARD;
+ else
+ info->status = PUSH_STATUS_OUTOFDATE;
+ }
+ free_refs(local_refs);
+ free_refs(push_map);
+ return 0;
+}
+
+static int get_push_ref_states_noquery(struct ref_states *states)
+{
+ int i;
+ struct remote *remote = states->remote;
+ struct string_list_item *item;
+ struct push_info *info;
+
+ if (remote->mirror)
+ return 0;
+
+ if (!remote->push.nr) {
+ item = string_list_append(&states->push, _("(matching)"));
+ info = item->util = xcalloc(1, sizeof(struct push_info));
+ info->status = PUSH_STATUS_NOTQUERIED;
+ info->dest = xstrdup(item->string);
+ }
+ for (i = 0; i < remote->push.nr; i++) {
+ const struct refspec_item *spec = &remote->push.items[i];
+ if (spec->matching)
+ item = string_list_append(&states->push, _("(matching)"));
+ else if (strlen(spec->src))
+ item = string_list_append(&states->push, spec->src);
+ else
+ item = string_list_append(&states->push, _("(delete)"));
+
+ info = item->util = xcalloc(1, sizeof(struct push_info));
+ info->forced = spec->force;
+ info->status = PUSH_STATUS_NOTQUERIED;
+ info->dest = xstrdup(spec->dst ? spec->dst : item->string);
+ }
+ return 0;
+}
+
+static int get_head_names(const struct ref *remote_refs, struct ref_states *states)
+{
+ struct ref *ref, *matches;
+ struct ref *fetch_map = NULL, **fetch_map_tail = &fetch_map;
+ struct refspec_item refspec;
+
+ memset(&refspec, 0, sizeof(refspec));
+ refspec.force = 0;
+ refspec.pattern = 1;
+ refspec.src = refspec.dst = "refs/heads/*";
+ get_fetch_map(remote_refs, &refspec, &fetch_map_tail, 0);
+ matches = guess_remote_head(find_ref_by_name(remote_refs, "HEAD"),
+ fetch_map, 1);
+ for (ref = matches; ref; ref = ref->next)
+ string_list_append(&states->heads, abbrev_branch(ref->name));
+
+ free_refs(fetch_map);
+ free_refs(matches);
+
+ return 0;
+}
+
+struct known_remote {
+ struct known_remote *next;
+ struct remote *remote;
+};
+
+struct known_remotes {
+ struct remote *to_delete;
+ struct known_remote *list;
+};
+
+static int add_known_remote(struct remote *remote, void *cb_data)
+{
+ struct known_remotes *all = cb_data;
+ struct known_remote *r;
+
+ if (!strcmp(all->to_delete->name, remote->name))
+ return 0;
+
+ r = xmalloc(sizeof(*r));
+ r->remote = remote;
+ r->next = all->list;
+ all->list = r;
+ return 0;
+}
+
+struct branches_for_remote {
+ struct remote *remote;
+ struct string_list *branches, *skipped;
+ struct known_remotes *keep;
+};
+
+static int add_branch_for_removal(const char *refname,
+ const struct object_id *oid UNUSED,
+ int flags UNUSED, void *cb_data)
+{
+ struct branches_for_remote *branches = cb_data;
+ struct refspec_item refspec;
+ struct known_remote *kr;
+
+ memset(&refspec, 0, sizeof(refspec));
+ refspec.dst = (char *)refname;
+ if (remote_find_tracking(branches->remote, &refspec))
+ return 0;
+
+ /* don't delete a branch if another remote also uses it */
+ for (kr = branches->keep->list; kr; kr = kr->next) {
+ memset(&refspec, 0, sizeof(refspec));
+ refspec.dst = (char *)refname;
+ if (!remote_find_tracking(kr->remote, &refspec))
+ return 0;
+ }
+
+ /* don't delete non-remote-tracking refs */
+ if (!starts_with(refname, "refs/remotes/")) {
+ /* advise user how to delete local branches */
+ if (starts_with(refname, "refs/heads/"))
+ string_list_append(branches->skipped,
+ abbrev_branch(refname));
+ /* silently skip over other non-remote refs */
+ return 0;
+ }
+
+ string_list_append(branches->branches, refname);
+
+ return 0;
+}
+
+struct rename_info {
+ const char *old_name;
+ const char *new_name;
+ struct string_list *remote_branches;
+ uint32_t symrefs_nr;
+};
+
+static int read_remote_branches(const char *refname,
+ const struct object_id *oid UNUSED,
+ int flags UNUSED, void *cb_data)
+{
+ struct rename_info *rename = cb_data;
+ struct strbuf buf = STRBUF_INIT;
+ struct string_list_item *item;
+ int flag;
+ const char *symref;
+
+ strbuf_addf(&buf, "refs/remotes/%s/", rename->old_name);
+ if (starts_with(refname, buf.buf)) {
+ item = string_list_append(rename->remote_branches, refname);
+ symref = resolve_ref_unsafe(refname, RESOLVE_REF_READING,
+ NULL, &flag);
+ if (symref && (flag & REF_ISSYMREF)) {
+ item->util = xstrdup(symref);
+ rename->symrefs_nr++;
+ } else {
+ item->util = NULL;
+ }
+ }
+ strbuf_release(&buf);
+
+ return 0;
+}
+
+static int migrate_file(struct remote *remote)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int i;
+
+ strbuf_addf(&buf, "remote.%s.url", remote->name);
+ for (i = 0; i < remote->url_nr; i++)
+ git_config_set_multivar(buf.buf, remote->url[i], "^$", 0);
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "remote.%s.push", remote->name);
+ for (i = 0; i < remote->push.raw_nr; i++)
+ git_config_set_multivar(buf.buf, remote->push.raw[i], "^$", 0);
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "remote.%s.fetch", remote->name);
+ for (i = 0; i < remote->fetch.raw_nr; i++)
+ git_config_set_multivar(buf.buf, remote->fetch.raw[i], "^$", 0);
+ if (remote->origin == REMOTE_REMOTES)
+ unlink_or_warn(git_path("remotes/%s", remote->name));
+ else if (remote->origin == REMOTE_BRANCHES)
+ unlink_or_warn(git_path("branches/%s", remote->name));
+ strbuf_release(&buf);
+
+ return 0;
+}
+
+struct push_default_info
+{
+ const char *old_name;
+ enum config_scope scope;
+ struct strbuf origin;
+ int linenr;
+};
+
+static int config_read_push_default(const char *key, const char *value,
+ void *cb)
+{
+ struct push_default_info* info = cb;
+ if (strcmp(key, "remote.pushdefault") ||
+ !value || strcmp(value, info->old_name))
+ return 0;
+
+ info->scope = current_config_scope();
+ strbuf_reset(&info->origin);
+ strbuf_addstr(&info->origin, current_config_name());
+ info->linenr = current_config_line();
+
+ return 0;
+}
+
+static void handle_push_default(const char* old_name, const char* new_name)
+{
+ struct push_default_info push_default = {
+ old_name, CONFIG_SCOPE_UNKNOWN, STRBUF_INIT, -1 };
+ git_config(config_read_push_default, &push_default);
+ if (push_default.scope >= CONFIG_SCOPE_COMMAND)
+ ; /* pass */
+ else if (push_default.scope >= CONFIG_SCOPE_LOCAL) {
+ int result = git_config_set_gently("remote.pushDefault",
+ new_name);
+ if (new_name && result && result != CONFIG_NOTHING_SET)
+ die(_("could not set '%s'"), "remote.pushDefault");
+ else if (!new_name && result && result != CONFIG_NOTHING_SET)
+ die(_("could not unset '%s'"), "remote.pushDefault");
+ } else if (push_default.scope >= CONFIG_SCOPE_SYSTEM) {
+ /* warn */
+ warning(_("The %s configuration remote.pushDefault in:\n"
+ "\t%s:%d\n"
+ "now names the non-existent remote '%s'"),
+ config_scope_name(push_default.scope),
+ push_default.origin.buf, push_default.linenr,
+ old_name);
+ }
+}
+
+
+static int mv(int argc, const char **argv, const char *prefix)
+{
+ int show_progress = isatty(2);
+ struct option options[] = {
+ OPT_BOOL(0, "progress", &show_progress, N_("force progress reporting")),
+ OPT_END()
+ };
+ struct remote *oldremote, *newremote;
+ struct strbuf buf = STRBUF_INIT, buf2 = STRBUF_INIT, buf3 = STRBUF_INIT,
+ old_remote_context = STRBUF_INIT;
+ struct string_list remote_branches = STRING_LIST_INIT_DUP;
+ struct rename_info rename;
+ int i, refs_renamed_nr = 0, refspec_updated = 0;
+ struct progress *progress = NULL;
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_remote_rename_usage, 0);
+
+ if (argc != 2)
+ usage_with_options(builtin_remote_rename_usage, options);
+
+ rename.old_name = argv[0];
+ rename.new_name = argv[1];
+ rename.remote_branches = &remote_branches;
+ rename.symrefs_nr = 0;
+
+ oldremote = remote_get(rename.old_name);
+ if (!remote_is_configured(oldremote, 1)) {
+ error(_("No such remote: '%s'"), rename.old_name);
+ exit(2);
+ }
+
+ if (!strcmp(rename.old_name, rename.new_name) && oldremote->origin != REMOTE_CONFIG)
+ return migrate_file(oldremote);
+
+ newremote = remote_get(rename.new_name);
+ if (remote_is_configured(newremote, 1)) {
+ error(_("remote %s already exists."), rename.new_name);
+ exit(3);
+ }
+
+ if (!valid_remote_name(rename.new_name))
+ die(_("'%s' is not a valid remote name"), rename.new_name);
+
+ strbuf_addf(&buf, "remote.%s", rename.old_name);
+ strbuf_addf(&buf2, "remote.%s", rename.new_name);
+ if (git_config_rename_section(buf.buf, buf2.buf) < 1)
+ return error(_("Could not rename config section '%s' to '%s'"),
+ buf.buf, buf2.buf);
+
+ if (oldremote->fetch.raw_nr) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "remote.%s.fetch", rename.new_name);
+ git_config_set_multivar(buf.buf, NULL, NULL, CONFIG_FLAGS_MULTI_REPLACE);
+ strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old_name);
+ for (i = 0; i < oldremote->fetch.raw_nr; i++) {
+ char *ptr;
+
+ strbuf_reset(&buf2);
+ strbuf_addstr(&buf2, oldremote->fetch.raw[i]);
+ ptr = strstr(buf2.buf, old_remote_context.buf);
+ if (ptr) {
+ refspec_updated = 1;
+ strbuf_splice(&buf2,
+ ptr-buf2.buf + strlen(":refs/remotes/"),
+ strlen(rename.old_name), rename.new_name,
+ strlen(rename.new_name));
+ } else
+ warning(_("Not updating non-default fetch refspec\n"
+ "\t%s\n"
+ "\tPlease update the configuration manually if necessary."),
+ buf2.buf);
+
+ git_config_set_multivar(buf.buf, buf2.buf, "^$", 0);
+ }
+ }
+
+ read_branches();
+ for (i = 0; i < branch_list.nr; i++) {
+ struct string_list_item *item = branch_list.items + i;
+ struct branch_info *info = item->util;
+ if (info->remote_name && !strcmp(info->remote_name, rename.old_name)) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "branch.%s.remote", item->string);
+ git_config_set(buf.buf, rename.new_name);
+ }
+ if (info->push_remote_name && !strcmp(info->push_remote_name, rename.old_name)) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "branch.%s.pushRemote", item->string);
+ git_config_set(buf.buf, rename.new_name);
+ }
+ }
+
+ if (!refspec_updated)
+ return 0;
+
+ /*
+ * First remove symrefs, then rename the rest, finally create
+ * the new symrefs.
+ */
+ for_each_ref(read_remote_branches, &rename);
+ if (show_progress) {
+ /*
+ * Count symrefs twice, since "renaming" them is done by
+ * deleting and recreating them in two separate passes.
+ */
+ progress = start_progress(_("Renaming remote references"),
+ rename.remote_branches->nr + rename.symrefs_nr);
+ }
+ for (i = 0; i < remote_branches.nr; i++) {
+ struct string_list_item *item = remote_branches.items + i;
+ struct strbuf referent = STRBUF_INIT;
+
+ if (refs_read_symbolic_ref(get_main_ref_store(the_repository), item->string,
+ &referent))
+ continue;
+ if (delete_ref(NULL, item->string, NULL, REF_NO_DEREF))
+ die(_("deleting '%s' failed"), item->string);
+
+ strbuf_release(&referent);
+ display_progress(progress, ++refs_renamed_nr);
+ }
+ for (i = 0; i < remote_branches.nr; i++) {
+ struct string_list_item *item = remote_branches.items + i;
+
+ if (item->util)
+ continue;
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, item->string);
+ strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old_name),
+ rename.new_name, strlen(rename.new_name));
+ strbuf_reset(&buf2);
+ strbuf_addf(&buf2, "remote: renamed %s to %s",
+ item->string, buf.buf);
+ if (rename_ref(item->string, buf.buf, buf2.buf))
+ die(_("renaming '%s' failed"), item->string);
+ display_progress(progress, ++refs_renamed_nr);
+ }
+ for (i = 0; i < remote_branches.nr; i++) {
+ struct string_list_item *item = remote_branches.items + i;
+
+ if (!item->util)
+ continue;
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, item->string);
+ strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old_name),
+ rename.new_name, strlen(rename.new_name));
+ strbuf_reset(&buf2);
+ strbuf_addstr(&buf2, item->util);
+ strbuf_splice(&buf2, strlen("refs/remotes/"), strlen(rename.old_name),
+ rename.new_name, strlen(rename.new_name));
+ strbuf_reset(&buf3);
+ strbuf_addf(&buf3, "remote: renamed %s to %s",
+ item->string, buf.buf);
+ if (create_symref(buf.buf, buf2.buf, buf3.buf))
+ die(_("creating '%s' failed"), buf.buf);
+ display_progress(progress, ++refs_renamed_nr);
+ }
+ stop_progress(&progress);
+ string_list_clear(&remote_branches, 1);
+
+ handle_push_default(rename.old_name, rename.new_name);
+
+ return 0;
+}
+
+static int rm(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+ struct remote *remote;
+ struct strbuf buf = STRBUF_INIT;
+ struct known_remotes known_remotes = { NULL, NULL };
+ struct string_list branches = STRING_LIST_INIT_DUP;
+ struct string_list skipped = STRING_LIST_INIT_DUP;
+ struct branches_for_remote cb_data;
+ int i, result;
+
+ memset(&cb_data, 0, sizeof(cb_data));
+ cb_data.branches = &branches;
+ cb_data.skipped = &skipped;
+ cb_data.keep = &known_remotes;
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_remote_rm_usage, 0);
+ if (argc != 1)
+ usage_with_options(builtin_remote_rm_usage, options);
+
+ remote = remote_get(argv[0]);
+ if (!remote_is_configured(remote, 1)) {
+ error(_("No such remote: '%s'"), argv[0]);
+ exit(2);
+ }
+
+ known_remotes.to_delete = remote;
+ for_each_remote(add_known_remote, &known_remotes);
+
+ read_branches();
+ for (i = 0; i < branch_list.nr; i++) {
+ struct string_list_item *item = branch_list.items + i;
+ struct branch_info *info = item->util;
+ if (info->remote_name && !strcmp(info->remote_name, remote->name)) {
+ const char *keys[] = { "remote", "merge", NULL }, **k;
+ for (k = keys; *k; k++) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "branch.%s.%s",
+ item->string, *k);
+ result = git_config_set_gently(buf.buf, NULL);
+ if (result && result != CONFIG_NOTHING_SET)
+ die(_("could not unset '%s'"), buf.buf);
+ }
+ }
+ if (info->push_remote_name && !strcmp(info->push_remote_name, remote->name)) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "branch.%s.pushremote", item->string);
+ result = git_config_set_gently(buf.buf, NULL);
+ if (result && result != CONFIG_NOTHING_SET)
+ die(_("could not unset '%s'"), buf.buf);
+ }
+ }
+
+ /*
+ * We cannot just pass a function to for_each_ref() which deletes
+ * the branches one by one, since for_each_ref() relies on cached
+ * refs, which are invalidated when deleting a branch.
+ */
+ cb_data.remote = remote;
+ result = for_each_ref(add_branch_for_removal, &cb_data);
+ strbuf_release(&buf);
+
+ if (!result)
+ result = delete_refs("remote: remove", &branches, REF_NO_DEREF);
+ string_list_clear(&branches, 0);
+
+ if (skipped.nr) {
+ fprintf_ln(stderr,
+ Q_("Note: A branch outside the refs/remotes/ hierarchy was not removed;\n"
+ "to delete it, use:",
+ "Note: Some branches outside the refs/remotes/ hierarchy were not removed;\n"
+ "to delete them, use:",
+ skipped.nr));
+ for (i = 0; i < skipped.nr; i++)
+ fprintf(stderr, " git branch -d %s\n",
+ skipped.items[i].string);
+ }
+ string_list_clear(&skipped, 0);
+
+ if (!result) {
+ strbuf_addf(&buf, "remote.%s", remote->name);
+ if (git_config_rename_section(buf.buf, NULL) < 1)
+ return error(_("Could not remove config section '%s'"), buf.buf);
+
+ handle_push_default(remote->name, NULL);
+ }
+
+ return result;
+}
+
+static void clear_push_info(void *util, const char *string UNUSED)
+{
+ struct push_info *info = util;
+ free(info->dest);
+ free(info);
+}
+
+static void free_remote_ref_states(struct ref_states *states)
+{
+ string_list_clear(&states->new_refs, 0);
+ string_list_clear(&states->skipped, 0);
+ string_list_clear(&states->stale, 1);
+ string_list_clear(&states->tracked, 0);
+ string_list_clear(&states->heads, 0);
+ string_list_clear_func(&states->push, clear_push_info);
+}
+
+static int append_ref_to_tracked_list(const char *refname,
+ const struct object_id *oid UNUSED,
+ int flags, void *cb_data)
+{
+ struct ref_states *states = cb_data;
+ struct refspec_item refspec;
+
+ if (flags & REF_ISSYMREF)
+ return 0;
+
+ memset(&refspec, 0, sizeof(refspec));
+ refspec.dst = (char *)refname;
+ if (!remote_find_tracking(states->remote, &refspec))
+ string_list_append(&states->tracked, abbrev_branch(refspec.src));
+
+ return 0;
+}
+
+static int get_remote_ref_states(const char *name,
+ struct ref_states *states,
+ int query)
+{
+ states->remote = remote_get(name);
+ if (!states->remote)
+ return error(_("No such remote: '%s'"), name);
+
+ read_branches();
+
+ if (query) {
+ struct transport *transport;
+ const struct ref *remote_refs;
+
+ transport = transport_get(states->remote, states->remote->url_nr > 0 ?
+ states->remote->url[0] : NULL);
+ remote_refs = transport_get_remote_refs(transport, NULL);
+
+ states->queried = 1;
+ if (query & GET_REF_STATES)
+ get_ref_states(remote_refs, states);
+ if (query & GET_HEAD_NAMES)
+ get_head_names(remote_refs, states);
+ if (query & GET_PUSH_REF_STATES)
+ get_push_ref_states(remote_refs, states);
+ transport_disconnect(transport);
+ } else {
+ for_each_ref(append_ref_to_tracked_list, states);
+ string_list_sort(&states->tracked);
+ get_push_ref_states_noquery(states);
+ }
+
+ return 0;
+}
+
+struct show_info {
+ struct string_list list;
+ struct ref_states states;
+ int width, width2;
+ int any_rebase;
+};
+
+#define SHOW_INFO_INIT { \
+ .list = STRING_LIST_INIT_DUP, \
+ .states = REF_STATES_INIT, \
+}
+
+static int add_remote_to_show_info(struct string_list_item *item, void *cb_data)
+{
+ struct show_info *info = cb_data;
+ int n = strlen(item->string);
+ if (n > info->width)
+ info->width = n;
+ string_list_insert(&info->list, item->string);
+ return 0;
+}
+
+static int show_remote_info_item(struct string_list_item *item, void *cb_data)
+{
+ struct show_info *info = cb_data;
+ struct ref_states *states = &info->states;
+ const char *name = item->string;
+
+ if (states->queried) {
+ const char *fmt = "%s";
+ const char *arg = "";
+ if (string_list_has_string(&states->new_refs, name)) {
+ fmt = _(" new (next fetch will store in remotes/%s)");
+ arg = states->remote->name;
+ } else if (string_list_has_string(&states->tracked, name))
+ arg = _(" tracked");
+ else if (string_list_has_string(&states->skipped, name))
+ arg = _(" skipped");
+ else if (string_list_has_string(&states->stale, name))
+ arg = _(" stale (use 'git remote prune' to remove)");
+ else
+ arg = _(" ???");
+ printf(" %-*s", info->width, name);
+ printf(fmt, arg);
+ printf("\n");
+ } else
+ printf(" %s\n", name);
+
+ return 0;
+}
+
+static int add_local_to_show_info(struct string_list_item *branch_item, void *cb_data)
+{
+ struct show_info *show_info = cb_data;
+ struct ref_states *states = &show_info->states;
+ struct branch_info *branch_info = branch_item->util;
+ struct string_list_item *item;
+ int n;
+
+ if (!branch_info->merge.nr || !branch_info->remote_name ||
+ strcmp(states->remote->name, branch_info->remote_name))
+ return 0;
+ if ((n = strlen(branch_item->string)) > show_info->width)
+ show_info->width = n;
+ if (branch_info->rebase >= REBASE_TRUE)
+ show_info->any_rebase = 1;
+
+ item = string_list_insert(&show_info->list, branch_item->string);
+ item->util = branch_info;
+
+ return 0;
+}
+
+static int show_local_info_item(struct string_list_item *item, void *cb_data)
+{
+ struct show_info *show_info = cb_data;
+ struct branch_info *branch_info = item->util;
+ struct string_list *merge = &branch_info->merge;
+ int width = show_info->width + 4;
+ int i;
+
+ if (branch_info->rebase >= REBASE_TRUE && branch_info->merge.nr > 1) {
+ error(_("invalid branch.%s.merge; cannot rebase onto > 1 branch"),
+ item->string);
+ return 0;
+ }
+
+ printf(" %-*s ", show_info->width, item->string);
+ if (branch_info->rebase >= REBASE_TRUE) {
+ const char *msg;
+ if (branch_info->rebase == REBASE_INTERACTIVE)
+ msg = _("rebases interactively onto remote %s");
+ else if (branch_info->rebase == REBASE_MERGES)
+ msg = _("rebases interactively (with merges) onto "
+ "remote %s");
+ else
+ msg = _("rebases onto remote %s");
+ printf_ln(msg, merge->items[0].string);
+ return 0;
+ } else if (show_info->any_rebase) {
+ printf_ln(_(" merges with remote %s"), merge->items[0].string);
+ width++;
+ } else {
+ printf_ln(_("merges with remote %s"), merge->items[0].string);
+ }
+ for (i = 1; i < merge->nr; i++)
+ printf(_("%-*s and with remote %s\n"), width, "",
+ merge->items[i].string);
+
+ return 0;
+}
+
+static int add_push_to_show_info(struct string_list_item *push_item, void *cb_data)
+{
+ struct show_info *show_info = cb_data;
+ struct push_info *push_info = push_item->util;
+ struct string_list_item *item;
+ int n;
+ if ((n = strlen(push_item->string)) > show_info->width)
+ show_info->width = n;
+ if ((n = strlen(push_info->dest)) > show_info->width2)
+ show_info->width2 = n;
+ item = string_list_append(&show_info->list, push_item->string);
+ item->util = push_item->util;
+ return 0;
+}
+
+/*
+ * Sorting comparison for a string list that has push_info
+ * structs in its util field
+ */
+static int cmp_string_with_push(const void *va, const void *vb)
+{
+ const struct string_list_item *a = va;
+ const struct string_list_item *b = vb;
+ const struct push_info *a_push = a->util;
+ const struct push_info *b_push = b->util;
+ int cmp = strcmp(a->string, b->string);
+ return cmp ? cmp : strcmp(a_push->dest, b_push->dest);
+}
+
+static int show_push_info_item(struct string_list_item *item, void *cb_data)
+{
+ struct show_info *show_info = cb_data;
+ struct push_info *push_info = item->util;
+ const char *src = item->string, *status = NULL;
+
+ switch (push_info->status) {
+ case PUSH_STATUS_CREATE:
+ status = _("create");
+ break;
+ case PUSH_STATUS_DELETE:
+ status = _("delete");
+ src = _("(none)");
+ break;
+ case PUSH_STATUS_UPTODATE:
+ status = _("up to date");
+ break;
+ case PUSH_STATUS_FASTFORWARD:
+ status = _("fast-forwardable");
+ break;
+ case PUSH_STATUS_OUTOFDATE:
+ status = _("local out of date");
+ break;
+ case PUSH_STATUS_NOTQUERIED:
+ break;
+ }
+ if (status) {
+ if (push_info->forced)
+ printf_ln(_(" %-*s forces to %-*s (%s)"), show_info->width, src,
+ show_info->width2, push_info->dest, status);
+ else
+ printf_ln(_(" %-*s pushes to %-*s (%s)"), show_info->width, src,
+ show_info->width2, push_info->dest, status);
+ } else {
+ if (push_info->forced)
+ printf_ln(_(" %-*s forces to %s"), show_info->width, src,
+ push_info->dest);
+ else
+ printf_ln(_(" %-*s pushes to %s"), show_info->width, src,
+ push_info->dest);
+ }
+ return 0;
+}
+
+static int get_one_entry(struct remote *remote, void *priv)
+{
+ struct string_list *list = priv;
+ struct strbuf remote_info_buf = STRBUF_INIT;
+ const char **url;
+ int i, url_nr;
+
+ if (remote->url_nr > 0) {
+ struct strbuf promisor_config = STRBUF_INIT;
+ const char *partial_clone_filter = NULL;
+
+ strbuf_addf(&promisor_config, "remote.%s.partialclonefilter", remote->name);
+ strbuf_addf(&remote_info_buf, "%s (fetch)", remote->url[0]);
+ if (!git_config_get_string_tmp(promisor_config.buf, &partial_clone_filter))
+ strbuf_addf(&remote_info_buf, " [%s]", partial_clone_filter);
+
+ strbuf_release(&promisor_config);
+ string_list_append(list, remote->name)->util =
+ strbuf_detach(&remote_info_buf, NULL);
+ } else
+ string_list_append(list, remote->name)->util = NULL;
+ if (remote->pushurl_nr) {
+ url = remote->pushurl;
+ url_nr = remote->pushurl_nr;
+ } else {
+ url = remote->url;
+ url_nr = remote->url_nr;
+ }
+ for (i = 0; i < url_nr; i++)
+ {
+ strbuf_addf(&remote_info_buf, "%s (push)", url[i]);
+ string_list_append(list, remote->name)->util =
+ strbuf_detach(&remote_info_buf, NULL);
+ }
+
+ return 0;
+}
+
+static int show_all(void)
+{
+ struct string_list list = STRING_LIST_INIT_DUP;
+ int result;
+
+ result = for_each_remote(get_one_entry, &list);
+
+ if (!result) {
+ int i;
+
+ string_list_sort(&list);
+ for (i = 0; i < list.nr; i++) {
+ struct string_list_item *item = list.items + i;
+ if (verbose)
+ printf("%s\t%s\n", item->string,
+ item->util ? (const char *)item->util : "");
+ else {
+ if (i && !strcmp((item - 1)->string, item->string))
+ continue;
+ printf("%s\n", item->string);
+ }
+ }
+ }
+ string_list_clear(&list, 1);
+ return result;
+}
+
+static int show(int argc, const char **argv, const char *prefix)
+{
+ int no_query = 0, result = 0, query_flag = 0;
+ struct option options[] = {
+ OPT_BOOL('n', NULL, &no_query, N_("do not query remotes")),
+ OPT_END()
+ };
+ struct show_info info = SHOW_INFO_INIT;
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_remote_show_usage,
+ 0);
+
+ if (argc < 1)
+ return show_all();
+
+ if (!no_query)
+ query_flag = (GET_REF_STATES | GET_HEAD_NAMES | GET_PUSH_REF_STATES);
+
+ for (; argc; argc--, argv++) {
+ int i;
+ const char **url;
+ int url_nr;
+
+ get_remote_ref_states(*argv, &info.states, query_flag);
+
+ printf_ln(_("* remote %s"), *argv);
+ printf_ln(_(" Fetch URL: %s"), info.states.remote->url_nr > 0 ?
+ info.states.remote->url[0] : _("(no URL)"));
+ if (info.states.remote->pushurl_nr) {
+ url = info.states.remote->pushurl;
+ url_nr = info.states.remote->pushurl_nr;
+ } else {
+ url = info.states.remote->url;
+ url_nr = info.states.remote->url_nr;
+ }
+ for (i = 0; i < url_nr; i++)
+ /*
+ * TRANSLATORS: the colon ':' should align
+ * with the one in " Fetch URL: %s"
+ * translation.
+ */
+ printf_ln(_(" Push URL: %s"), url[i]);
+ if (!i)
+ printf_ln(_(" Push URL: %s"), _("(no URL)"));
+ if (no_query)
+ printf_ln(_(" HEAD branch: %s"), _("(not queried)"));
+ else if (!info.states.heads.nr)
+ printf_ln(_(" HEAD branch: %s"), _("(unknown)"));
+ else if (info.states.heads.nr == 1)
+ printf_ln(_(" HEAD branch: %s"), info.states.heads.items[0].string);
+ else {
+ printf(_(" HEAD branch (remote HEAD is ambiguous,"
+ " may be one of the following):\n"));
+ for (i = 0; i < info.states.heads.nr; i++)
+ printf(" %s\n", info.states.heads.items[i].string);
+ }
+
+ /* remote branch info */
+ info.width = 0;
+ for_each_string_list(&info.states.new_refs, add_remote_to_show_info, &info);
+ for_each_string_list(&info.states.skipped, add_remote_to_show_info, &info);
+ for_each_string_list(&info.states.tracked, add_remote_to_show_info, &info);
+ for_each_string_list(&info.states.stale, add_remote_to_show_info, &info);
+ if (info.list.nr)
+ printf_ln(Q_(" Remote branch:%s",
+ " Remote branches:%s",
+ info.list.nr),
+ no_query ? _(" (status not queried)") : "");
+ for_each_string_list(&info.list, show_remote_info_item, &info);
+ string_list_clear(&info.list, 0);
+
+ /* git pull info */
+ info.width = 0;
+ info.any_rebase = 0;
+ for_each_string_list(&branch_list, add_local_to_show_info, &info);
+ if (info.list.nr)
+ printf_ln(Q_(" Local branch configured for 'git pull':",
+ " Local branches configured for 'git pull':",
+ info.list.nr));
+ for_each_string_list(&info.list, show_local_info_item, &info);
+ string_list_clear(&info.list, 0);
+
+ /* git push info */
+ if (info.states.remote->mirror)
+ printf_ln(_(" Local refs will be mirrored by 'git push'"));
+
+ info.width = info.width2 = 0;
+ for_each_string_list(&info.states.push, add_push_to_show_info, &info);
+ QSORT(info.list.items, info.list.nr, cmp_string_with_push);
+ if (info.list.nr)
+ printf_ln(Q_(" Local ref configured for 'git push'%s:",
+ " Local refs configured for 'git push'%s:",
+ info.list.nr),
+ no_query ? _(" (status not queried)") : "");
+ for_each_string_list(&info.list, show_push_info_item, &info);
+ string_list_clear(&info.list, 0);
+
+ free_remote_ref_states(&info.states);
+ }
+
+ return result;
+}
+
+static int set_head(int argc, const char **argv, const char *prefix)
+{
+ int i, opt_a = 0, opt_d = 0, result = 0;
+ struct strbuf buf = STRBUF_INIT, buf2 = STRBUF_INIT;
+ char *head_name = NULL;
+
+ struct option options[] = {
+ OPT_BOOL('a', "auto", &opt_a,
+ N_("set refs/remotes/<name>/HEAD according to remote")),
+ OPT_BOOL('d', "delete", &opt_d,
+ N_("delete refs/remotes/<name>/HEAD")),
+ OPT_END()
+ };
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_remote_sethead_usage, 0);
+ if (argc)
+ strbuf_addf(&buf, "refs/remotes/%s/HEAD", argv[0]);
+
+ if (!opt_a && !opt_d && argc == 2) {
+ head_name = xstrdup(argv[1]);
+ } else if (opt_a && !opt_d && argc == 1) {
+ struct ref_states states = REF_STATES_INIT;
+ get_remote_ref_states(argv[0], &states, GET_HEAD_NAMES);
+ if (!states.heads.nr)
+ result |= error(_("Cannot determine remote HEAD"));
+ else if (states.heads.nr > 1) {
+ result |= error(_("Multiple remote HEAD branches. "
+ "Please choose one explicitly with:"));
+ for (i = 0; i < states.heads.nr; i++)
+ fprintf(stderr, " git remote set-head %s %s\n",
+ argv[0], states.heads.items[i].string);
+ } else
+ head_name = xstrdup(states.heads.items[0].string);
+ free_remote_ref_states(&states);
+ } else if (opt_d && !opt_a && argc == 1) {
+ if (delete_ref(NULL, buf.buf, NULL, REF_NO_DEREF))
+ result |= error(_("Could not delete %s"), buf.buf);
+ } else
+ usage_with_options(builtin_remote_sethead_usage, options);
+
+ if (head_name) {
+ strbuf_addf(&buf2, "refs/remotes/%s/%s", argv[0], head_name);
+ /* make sure it's valid */
+ if (!ref_exists(buf2.buf))
+ result |= error(_("Not a valid ref: %s"), buf2.buf);
+ else if (create_symref(buf.buf, buf2.buf, "remote set-head"))
+ result |= error(_("Could not setup %s"), buf.buf);
+ else if (opt_a)
+ printf("%s/HEAD set to %s\n", argv[0], head_name);
+ free(head_name);
+ }
+
+ strbuf_release(&buf);
+ strbuf_release(&buf2);
+ return result;
+}
+
+static int prune_remote(const char *remote, int dry_run)
+{
+ int result = 0;
+ struct ref_states states = REF_STATES_INIT;
+ struct string_list refs_to_prune = STRING_LIST_INIT_NODUP;
+ struct string_list_item *item;
+ const char *dangling_msg = dry_run
+ ? _(" %s will become dangling!")
+ : _(" %s has become dangling!");
+
+ get_remote_ref_states(remote, &states, GET_REF_STATES);
+
+ if (!states.stale.nr) {
+ free_remote_ref_states(&states);
+ return 0;
+ }
+
+ printf_ln(_("Pruning %s"), remote);
+ printf_ln(_("URL: %s"),
+ states.remote->url_nr
+ ? states.remote->url[0]
+ : _("(no URL)"));
+
+ for_each_string_list_item(item, &states.stale)
+ string_list_append(&refs_to_prune, item->util);
+ string_list_sort(&refs_to_prune);
+
+ if (!dry_run)
+ result |= delete_refs("remote: prune", &refs_to_prune, 0);
+
+ for_each_string_list_item(item, &states.stale) {
+ const char *refname = item->util;
+
+ if (dry_run)
+ printf_ln(_(" * [would prune] %s"),
+ abbrev_ref(refname, "refs/remotes/"));
+ else
+ printf_ln(_(" * [pruned] %s"),
+ abbrev_ref(refname, "refs/remotes/"));
+ }
+
+ warn_dangling_symrefs(stdout, dangling_msg, &refs_to_prune);
+
+ string_list_clear(&refs_to_prune, 0);
+ free_remote_ref_states(&states);
+ return result;
+}
+
+static int prune(int argc, const char **argv, const char *prefix)
+{
+ int dry_run = 0, result = 0;
+ struct option options[] = {
+ OPT__DRY_RUN(&dry_run, N_("dry run")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_remote_prune_usage, 0);
+
+ if (argc < 1)
+ usage_with_options(builtin_remote_prune_usage, options);
+
+ for (; argc; argc--, argv++)
+ result |= prune_remote(*argv, dry_run);
+
+ return result;
+}
+
+static int get_remote_default(const char *key, const char *value UNUSED, void *priv)
+{
+ if (strcmp(key, "remotes.default") == 0) {
+ int *found = priv;
+ *found = 1;
+ }
+ return 0;
+}
+
+static int update(int argc, const char **argv, const char *prefix)
+{
+ int i, prune = -1;
+ struct option options[] = {
+ OPT_BOOL('p', "prune", &prune,
+ N_("prune remotes after fetching")),
+ OPT_END()
+ };
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ int default_defined = 0;
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_remote_update_usage,
+ PARSE_OPT_KEEP_ARGV0);
+
+ strvec_push(&cmd.args, "fetch");
+
+ if (prune != -1)
+ strvec_push(&cmd.args, prune ? "--prune" : "--no-prune");
+ if (verbose)
+ strvec_push(&cmd.args, "-v");
+ strvec_push(&cmd.args, "--multiple");
+ if (argc < 2)
+ strvec_push(&cmd.args, "default");
+ for (i = 1; i < argc; i++)
+ strvec_push(&cmd.args, argv[i]);
+
+ if (strcmp(cmd.args.v[cmd.args.nr-1], "default") == 0) {
+ git_config(get_remote_default, &default_defined);
+ if (!default_defined) {
+ strvec_pop(&cmd.args);
+ strvec_push(&cmd.args, "--all");
+ }
+ }
+
+ cmd.git_cmd = 1;
+ return run_command(&cmd);
+}
+
+static int remove_all_fetch_refspecs(const char *key)
+{
+ return git_config_set_multivar_gently(key, NULL, NULL,
+ CONFIG_FLAGS_MULTI_REPLACE);
+}
+
+static void add_branches(struct remote *remote, const char **branches,
+ const char *key)
+{
+ const char *remotename = remote->name;
+ int mirror = remote->mirror;
+ struct strbuf refspec = STRBUF_INIT;
+
+ for (; *branches; branches++)
+ add_branch(key, *branches, remotename, mirror, &refspec);
+
+ strbuf_release(&refspec);
+}
+
+static int set_remote_branches(const char *remotename, const char **branches,
+ int add_mode)
+{
+ struct strbuf key = STRBUF_INIT;
+ struct remote *remote;
+
+ strbuf_addf(&key, "remote.%s.fetch", remotename);
+
+ remote = remote_get(remotename);
+ if (!remote_is_configured(remote, 1)) {
+ error(_("No such remote '%s'"), remotename);
+ exit(2);
+ }
+
+ if (!add_mode && remove_all_fetch_refspecs(key.buf)) {
+ strbuf_release(&key);
+ return 1;
+ }
+ add_branches(remote, branches, key.buf);
+
+ strbuf_release(&key);
+ return 0;
+}
+
+static int set_branches(int argc, const char **argv, const char *prefix)
+{
+ int add_mode = 0;
+ struct option options[] = {
+ OPT_BOOL('\0', "add", &add_mode, N_("add branch")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_remote_setbranches_usage, 0);
+ if (argc == 0) {
+ error(_("no remote specified"));
+ usage_with_options(builtin_remote_setbranches_usage, options);
+ }
+ argv[argc] = NULL;
+
+ return set_remote_branches(argv[0], argv + 1, add_mode);
+}
+
+static int get_url(int argc, const char **argv, const char *prefix)
+{
+ int i, push_mode = 0, all_mode = 0;
+ const char *remotename = NULL;
+ struct remote *remote;
+ const char **url;
+ int url_nr;
+ struct option options[] = {
+ OPT_BOOL('\0', "push", &push_mode,
+ N_("query push URLs rather than fetch URLs")),
+ OPT_BOOL('\0', "all", &all_mode,
+ N_("return all URLs")),
+ OPT_END()
+ };
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_remote_geturl_usage, 0);
+
+ if (argc != 1)
+ usage_with_options(builtin_remote_geturl_usage, options);
+
+ remotename = argv[0];
+
+ remote = remote_get(remotename);
+ if (!remote_is_configured(remote, 1)) {
+ error(_("No such remote '%s'"), remotename);
+ exit(2);
+ }
+
+ url_nr = 0;
+ if (push_mode) {
+ url = remote->pushurl;
+ url_nr = remote->pushurl_nr;
+ }
+ /* else fetch mode */
+
+ /* Use the fetch URL when no push URLs were found or requested. */
+ if (!url_nr) {
+ url = remote->url;
+ url_nr = remote->url_nr;
+ }
+
+ if (!url_nr)
+ die(_("no URLs configured for remote '%s'"), remotename);
+
+ if (all_mode) {
+ for (i = 0; i < url_nr; i++)
+ printf_ln("%s", url[i]);
+ } else {
+ printf_ln("%s", *url);
+ }
+
+ return 0;
+}
+
+static int set_url(int argc, const char **argv, const char *prefix)
+{
+ int i, push_mode = 0, add_mode = 0, delete_mode = 0;
+ int matches = 0, negative_matches = 0;
+ const char *remotename = NULL;
+ const char *newurl = NULL;
+ const char *oldurl = NULL;
+ struct remote *remote;
+ regex_t old_regex;
+ const char **urlset;
+ int urlset_nr;
+ struct strbuf name_buf = STRBUF_INIT;
+ struct option options[] = {
+ OPT_BOOL('\0', "push", &push_mode,
+ N_("manipulate push URLs")),
+ OPT_BOOL('\0', "add", &add_mode,
+ N_("add URL")),
+ OPT_BOOL('\0', "delete", &delete_mode,
+ N_("delete URLs")),
+ OPT_END()
+ };
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_remote_seturl_usage,
+ PARSE_OPT_KEEP_ARGV0);
+
+ if (add_mode && delete_mode)
+ die(_("--add --delete doesn't make sense"));
+
+ if (argc < 3 || argc > 4 || ((add_mode || delete_mode) && argc != 3))
+ usage_with_options(builtin_remote_seturl_usage, options);
+
+ remotename = argv[1];
+ newurl = argv[2];
+ if (argc > 3)
+ oldurl = argv[3];
+
+ if (delete_mode)
+ oldurl = newurl;
+
+ remote = remote_get(remotename);
+ if (!remote_is_configured(remote, 1)) {
+ error(_("No such remote '%s'"), remotename);
+ exit(2);
+ }
+
+ if (push_mode) {
+ strbuf_addf(&name_buf, "remote.%s.pushurl", remotename);
+ urlset = remote->pushurl;
+ urlset_nr = remote->pushurl_nr;
+ } else {
+ strbuf_addf(&name_buf, "remote.%s.url", remotename);
+ urlset = remote->url;
+ urlset_nr = remote->url_nr;
+ }
+
+ /* Special cases that add new entry. */
+ if ((!oldurl && !delete_mode) || add_mode) {
+ if (add_mode)
+ git_config_set_multivar(name_buf.buf, newurl,
+ "^$", 0);
+ else
+ git_config_set(name_buf.buf, newurl);
+ goto out;
+ }
+
+ /* Old URL specified. Demand that one matches. */
+ if (regcomp(&old_regex, oldurl, REG_EXTENDED))
+ die(_("Invalid old URL pattern: %s"), oldurl);
+
+ for (i = 0; i < urlset_nr; i++)
+ if (!regexec(&old_regex, urlset[i], 0, NULL, 0))
+ matches++;
+ else
+ negative_matches++;
+ if (!delete_mode && !matches)
+ die(_("No such URL found: %s"), oldurl);
+ if (delete_mode && !negative_matches && !push_mode)
+ die(_("Will not delete all non-push URLs"));
+
+ regfree(&old_regex);
+
+ if (!delete_mode)
+ git_config_set_multivar(name_buf.buf, newurl, oldurl, 0);
+ else
+ git_config_set_multivar(name_buf.buf, NULL, oldurl,
+ CONFIG_FLAGS_MULTI_REPLACE);
+out:
+ strbuf_release(&name_buf);
+ return 0;
+}
+
+int cmd_remote(int argc, const char **argv, const char *prefix)
+{
+ parse_opt_subcommand_fn *fn = NULL;
+ struct option options[] = {
+ OPT__VERBOSE(&verbose, N_("be verbose; must be placed before a subcommand")),
+ OPT_SUBCOMMAND("add", &fn, add),
+ OPT_SUBCOMMAND("rename", &fn, mv),
+ OPT_SUBCOMMAND_F("rm", &fn, rm, PARSE_OPT_NOCOMPLETE),
+ OPT_SUBCOMMAND("remove", &fn, rm),
+ OPT_SUBCOMMAND("set-head", &fn, set_head),
+ OPT_SUBCOMMAND("set-branches", &fn, set_branches),
+ OPT_SUBCOMMAND("get-url", &fn, get_url),
+ OPT_SUBCOMMAND("set-url", &fn, set_url),
+ OPT_SUBCOMMAND("show", &fn, show),
+ OPT_SUBCOMMAND("prune", &fn, prune),
+ OPT_SUBCOMMAND("update", &fn, update),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, builtin_remote_usage,
+ PARSE_OPT_SUBCOMMAND_OPTIONAL);
+
+ if (fn) {
+ return !!fn(argc, argv, prefix);
+ } else {
+ if (argc) {
+ error(_("unknown subcommand: `%s'"), argv[0]);
+ usage_with_options(builtin_remote_usage, options);
+ }
+ return !!show_all();
+ }
+}
diff --git a/builtin/repack.c b/builtin/repack.c
new file mode 100644
index 0000000..65eb1b8
--- /dev/null
+++ b/builtin/repack.c
@@ -0,0 +1,1181 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "dir.h"
+#include "parse-options.h"
+#include "run-command.h"
+#include "sigchain.h"
+#include "strbuf.h"
+#include "string-list.h"
+#include "strvec.h"
+#include "midx.h"
+#include "packfile.h"
+#include "prune-packed.h"
+#include "object-store.h"
+#include "promisor-remote.h"
+#include "shallow.h"
+#include "pack.h"
+#include "pack-bitmap.h"
+#include "refs.h"
+
+#define ALL_INTO_ONE 1
+#define LOOSEN_UNREACHABLE 2
+#define PACK_CRUFT 4
+
+#define DELETE_PACK 1
+#define CRUFT_PACK 2
+
+static int pack_everything;
+static int delta_base_offset = 1;
+static int pack_kept_objects = -1;
+static int write_bitmaps = -1;
+static int use_delta_islands;
+static int run_update_server_info = 1;
+static char *packdir, *packtmp_name, *packtmp;
+
+static const char *const git_repack_usage[] = {
+ N_("git repack [<options>]"),
+ NULL
+};
+
+static const char incremental_bitmap_conflict_error[] = N_(
+"Incremental repacks are incompatible with bitmap indexes. Use\n"
+"--no-write-bitmap-index or disable the pack.writeBitmaps configuration."
+);
+
+struct pack_objects_args {
+ const char *window;
+ const char *window_memory;
+ const char *depth;
+ const char *threads;
+ const char *max_pack_size;
+ int no_reuse_delta;
+ int no_reuse_object;
+ int quiet;
+ int local;
+};
+
+static int repack_config(const char *var, const char *value, void *cb)
+{
+ struct pack_objects_args *cruft_po_args = cb;
+ if (!strcmp(var, "repack.usedeltabaseoffset")) {
+ delta_base_offset = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "repack.packkeptobjects")) {
+ pack_kept_objects = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "repack.writebitmaps") ||
+ !strcmp(var, "pack.writebitmaps")) {
+ write_bitmaps = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "repack.usedeltaislands")) {
+ use_delta_islands = git_config_bool(var, value);
+ return 0;
+ }
+ if (strcmp(var, "repack.updateserverinfo") == 0) {
+ run_update_server_info = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "repack.cruftwindow"))
+ return git_config_string(&cruft_po_args->window, var, value);
+ if (!strcmp(var, "repack.cruftwindowmemory"))
+ return git_config_string(&cruft_po_args->window_memory, var, value);
+ if (!strcmp(var, "repack.cruftdepth"))
+ return git_config_string(&cruft_po_args->depth, var, value);
+ if (!strcmp(var, "repack.cruftthreads"))
+ return git_config_string(&cruft_po_args->threads, var, value);
+ return git_default_config(var, value, cb);
+}
+
+/*
+ * Adds all packs hex strings to either fname_nonkept_list or
+ * fname_kept_list based on whether each pack has a corresponding
+ * .keep file or not. Packs without a .keep file are not to be kept
+ * if we are going to pack everything into one file.
+ */
+static void collect_pack_filenames(struct string_list *fname_nonkept_list,
+ struct string_list *fname_kept_list,
+ const struct string_list *extra_keep)
+{
+ DIR *dir;
+ struct dirent *e;
+ char *fname;
+
+ if (!(dir = opendir(packdir)))
+ return;
+
+ while ((e = readdir(dir)) != NULL) {
+ size_t len;
+ int i;
+
+ if (!strip_suffix(e->d_name, ".pack", &len))
+ continue;
+
+ for (i = 0; i < extra_keep->nr; i++)
+ if (!fspathcmp(e->d_name, extra_keep->items[i].string))
+ break;
+
+ fname = xmemdupz(e->d_name, len);
+
+ if ((extra_keep->nr > 0 && i < extra_keep->nr) ||
+ (file_exists(mkpath("%s/%s.keep", packdir, fname)))) {
+ string_list_append_nodup(fname_kept_list, fname);
+ } else {
+ struct string_list_item *item;
+ item = string_list_append_nodup(fname_nonkept_list,
+ fname);
+ if (file_exists(mkpath("%s/%s.mtimes", packdir, fname)))
+ item->util = (void*)(uintptr_t)CRUFT_PACK;
+ }
+ }
+ closedir(dir);
+
+ string_list_sort(fname_kept_list);
+}
+
+static void remove_redundant_pack(const char *dir_name, const char *base_name)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct multi_pack_index *m = get_local_multi_pack_index(the_repository);
+ strbuf_addf(&buf, "%s.pack", base_name);
+ if (m && midx_contains_pack(m, buf.buf))
+ clear_midx_file(the_repository);
+ strbuf_insertf(&buf, 0, "%s/", dir_name);
+ unlink_pack_path(buf.buf, 1);
+ strbuf_release(&buf);
+}
+
+static void prepare_pack_objects(struct child_process *cmd,
+ const struct pack_objects_args *args,
+ const char *out)
+{
+ strvec_push(&cmd->args, "pack-objects");
+ if (args->window)
+ strvec_pushf(&cmd->args, "--window=%s", args->window);
+ if (args->window_memory)
+ strvec_pushf(&cmd->args, "--window-memory=%s", args->window_memory);
+ if (args->depth)
+ strvec_pushf(&cmd->args, "--depth=%s", args->depth);
+ if (args->threads)
+ strvec_pushf(&cmd->args, "--threads=%s", args->threads);
+ if (args->max_pack_size)
+ strvec_pushf(&cmd->args, "--max-pack-size=%s", args->max_pack_size);
+ if (args->no_reuse_delta)
+ strvec_pushf(&cmd->args, "--no-reuse-delta");
+ if (args->no_reuse_object)
+ strvec_pushf(&cmd->args, "--no-reuse-object");
+ if (args->local)
+ strvec_push(&cmd->args, "--local");
+ if (args->quiet)
+ strvec_push(&cmd->args, "--quiet");
+ if (delta_base_offset)
+ strvec_push(&cmd->args, "--delta-base-offset");
+ strvec_push(&cmd->args, out);
+ cmd->git_cmd = 1;
+ cmd->out = -1;
+}
+
+/*
+ * Write oid to the given struct child_process's stdin, starting it first if
+ * necessary.
+ */
+static int write_oid(const struct object_id *oid, struct packed_git *pack,
+ uint32_t pos, void *data)
+{
+ struct child_process *cmd = data;
+
+ if (cmd->in == -1) {
+ if (start_command(cmd))
+ die(_("could not start pack-objects to repack promisor objects"));
+ }
+
+ xwrite(cmd->in, oid_to_hex(oid), the_hash_algo->hexsz);
+ xwrite(cmd->in, "\n", 1);
+ return 0;
+}
+
+static struct {
+ const char *name;
+ unsigned optional:1;
+} exts[] = {
+ {".pack"},
+ {".rev", 1},
+ {".mtimes", 1},
+ {".bitmap", 1},
+ {".promisor", 1},
+ {".idx"},
+};
+
+struct generated_pack_data {
+ struct tempfile *tempfiles[ARRAY_SIZE(exts)];
+};
+
+static struct generated_pack_data *populate_pack_exts(const char *name)
+{
+ struct stat statbuf;
+ struct strbuf path = STRBUF_INIT;
+ struct generated_pack_data *data = xcalloc(1, sizeof(*data));
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(exts); i++) {
+ strbuf_reset(&path);
+ strbuf_addf(&path, "%s-%s%s", packtmp, name, exts[i].name);
+
+ if (stat(path.buf, &statbuf))
+ continue;
+
+ data->tempfiles[i] = register_tempfile(path.buf);
+ }
+
+ strbuf_release(&path);
+ return data;
+}
+
+static void repack_promisor_objects(const struct pack_objects_args *args,
+ struct string_list *names)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ FILE *out;
+ struct strbuf line = STRBUF_INIT;
+
+ prepare_pack_objects(&cmd, args, packtmp);
+ cmd.in = -1;
+
+ /*
+ * NEEDSWORK: Giving pack-objects only the OIDs without any ordering
+ * hints may result in suboptimal deltas in the resulting pack. See if
+ * the OIDs can be sent with fake paths such that pack-objects can use a
+ * {type -> existing pack order} ordering when computing deltas instead
+ * of a {type -> size} ordering, which may produce better deltas.
+ */
+ for_each_packed_object(write_oid, &cmd,
+ FOR_EACH_OBJECT_PROMISOR_ONLY);
+
+ if (cmd.in == -1) {
+ /* No packed objects; cmd was never started */
+ child_process_clear(&cmd);
+ return;
+ }
+
+ close(cmd.in);
+
+ out = xfdopen(cmd.out, "r");
+ while (strbuf_getline_lf(&line, out) != EOF) {
+ struct string_list_item *item;
+ char *promisor_name;
+
+ if (line.len != the_hash_algo->hexsz)
+ die(_("repack: Expecting full hex object ID lines only from pack-objects."));
+ item = string_list_append(names, line.buf);
+
+ /*
+ * pack-objects creates the .pack and .idx files, but not the
+ * .promisor file. Create the .promisor file, which is empty.
+ *
+ * NEEDSWORK: fetch-pack sometimes generates non-empty
+ * .promisor files containing the ref names and associated
+ * hashes at the point of generation of the corresponding
+ * packfile, but this would not preserve their contents. Maybe
+ * concatenate the contents of all .promisor files instead of
+ * just creating a new empty file.
+ */
+ promisor_name = mkpathdup("%s-%s.promisor", packtmp,
+ line.buf);
+ write_promisor_file(promisor_name, NULL, 0);
+
+ item->util = populate_pack_exts(item->string);
+
+ free(promisor_name);
+ }
+ fclose(out);
+ if (finish_command(&cmd))
+ die(_("could not finish pack-objects to repack promisor objects"));
+}
+
+struct pack_geometry {
+ struct packed_git **pack;
+ uint32_t pack_nr, pack_alloc;
+ uint32_t split;
+};
+
+static uint32_t geometry_pack_weight(struct packed_git *p)
+{
+ if (open_pack_index(p))
+ die(_("cannot open index for %s"), p->pack_name);
+ return p->num_objects;
+}
+
+static int geometry_cmp(const void *va, const void *vb)
+{
+ uint32_t aw = geometry_pack_weight(*(struct packed_git **)va),
+ bw = geometry_pack_weight(*(struct packed_git **)vb);
+
+ if (aw < bw)
+ return -1;
+ if (aw > bw)
+ return 1;
+ return 0;
+}
+
+static void init_pack_geometry(struct pack_geometry **geometry_p,
+ struct string_list *existing_kept_packs)
+{
+ struct packed_git *p;
+ struct pack_geometry *geometry;
+ struct strbuf buf = STRBUF_INIT;
+
+ *geometry_p = xcalloc(1, sizeof(struct pack_geometry));
+ geometry = *geometry_p;
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ if (!pack_kept_objects) {
+ /*
+ * Any pack that has its pack_keep bit set will appear
+ * in existing_kept_packs below, but this saves us from
+ * doing a more expensive check.
+ */
+ if (p->pack_keep)
+ continue;
+
+ /*
+ * The pack may be kept via the --keep-pack option;
+ * check 'existing_kept_packs' to determine whether to
+ * ignore it.
+ */
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, pack_basename(p));
+ strbuf_strip_suffix(&buf, ".pack");
+
+ if (string_list_has_string(existing_kept_packs, buf.buf))
+ continue;
+ }
+ if (p->is_cruft)
+ continue;
+
+ ALLOC_GROW(geometry->pack,
+ geometry->pack_nr + 1,
+ geometry->pack_alloc);
+
+ geometry->pack[geometry->pack_nr] = p;
+ geometry->pack_nr++;
+ }
+
+ QSORT(geometry->pack, geometry->pack_nr, geometry_cmp);
+ strbuf_release(&buf);
+}
+
+static void split_pack_geometry(struct pack_geometry *geometry, int factor)
+{
+ uint32_t i;
+ uint32_t split;
+ off_t total_size = 0;
+
+ if (!geometry->pack_nr) {
+ geometry->split = geometry->pack_nr;
+ return;
+ }
+
+ /*
+ * First, count the number of packs (in descending order of size) which
+ * already form a geometric progression.
+ */
+ for (i = geometry->pack_nr - 1; i > 0; i--) {
+ struct packed_git *ours = geometry->pack[i];
+ struct packed_git *prev = geometry->pack[i - 1];
+
+ if (unsigned_mult_overflows(factor, geometry_pack_weight(prev)))
+ die(_("pack %s too large to consider in geometric "
+ "progression"),
+ prev->pack_name);
+
+ if (geometry_pack_weight(ours) < factor * geometry_pack_weight(prev))
+ break;
+ }
+
+ split = i;
+
+ if (split) {
+ /*
+ * Move the split one to the right, since the top element in the
+ * last-compared pair can't be in the progression. Only do this
+ * when we split in the middle of the array (otherwise if we got
+ * to the end, then the split is in the right place).
+ */
+ split++;
+ }
+
+ /*
+ * Then, anything to the left of 'split' must be in a new pack. But,
+ * creating that new pack may cause packs in the heavy half to no longer
+ * form a geometric progression.
+ *
+ * Compute an expected size of the new pack, and then determine how many
+ * packs in the heavy half need to be joined into it (if any) to restore
+ * the geometric progression.
+ */
+ for (i = 0; i < split; i++) {
+ struct packed_git *p = geometry->pack[i];
+
+ if (unsigned_add_overflows(total_size, geometry_pack_weight(p)))
+ die(_("pack %s too large to roll up"), p->pack_name);
+ total_size += geometry_pack_weight(p);
+ }
+ for (i = split; i < geometry->pack_nr; i++) {
+ struct packed_git *ours = geometry->pack[i];
+
+ if (unsigned_mult_overflows(factor, total_size))
+ die(_("pack %s too large to roll up"), ours->pack_name);
+
+ if (geometry_pack_weight(ours) < factor * total_size) {
+ if (unsigned_add_overflows(total_size,
+ geometry_pack_weight(ours)))
+ die(_("pack %s too large to roll up"),
+ ours->pack_name);
+
+ split++;
+ total_size += geometry_pack_weight(ours);
+ } else
+ break;
+ }
+
+ geometry->split = split;
+}
+
+static struct packed_git *get_largest_active_pack(struct pack_geometry *geometry)
+{
+ if (!geometry) {
+ /*
+ * No geometry means either an all-into-one repack (in which
+ * case there is only one pack left and it is the largest) or an
+ * incremental one.
+ *
+ * If repacking incrementally, then we could check the size of
+ * all packs to determine which should be preferred, but leave
+ * this for later.
+ */
+ return NULL;
+ }
+ if (geometry->split == geometry->pack_nr)
+ return NULL;
+ return geometry->pack[geometry->pack_nr - 1];
+}
+
+static void clear_pack_geometry(struct pack_geometry *geometry)
+{
+ if (!geometry)
+ return;
+
+ free(geometry->pack);
+ geometry->pack_nr = 0;
+ geometry->pack_alloc = 0;
+ geometry->split = 0;
+}
+
+struct midx_snapshot_ref_data {
+ struct tempfile *f;
+ struct oidset seen;
+ int preferred;
+};
+
+static int midx_snapshot_ref_one(const char *refname UNUSED,
+ const struct object_id *oid,
+ int flag UNUSED, void *_data)
+{
+ struct midx_snapshot_ref_data *data = _data;
+ struct object_id peeled;
+
+ if (!peel_iterated_oid(oid, &peeled))
+ oid = &peeled;
+
+ if (oidset_insert(&data->seen, oid))
+ return 0; /* already seen */
+
+ if (oid_object_info(the_repository, oid, NULL) != OBJ_COMMIT)
+ return 0;
+
+ fprintf(data->f->fp, "%s%s\n", data->preferred ? "+" : "",
+ oid_to_hex(oid));
+
+ return 0;
+}
+
+static void midx_snapshot_refs(struct tempfile *f)
+{
+ struct midx_snapshot_ref_data data;
+ const struct string_list *preferred = bitmap_preferred_tips(the_repository);
+
+ data.f = f;
+ data.preferred = 0;
+ oidset_init(&data.seen, 0);
+
+ if (!fdopen_tempfile(f, "w"))
+ die(_("could not open tempfile %s for writing"),
+ get_tempfile_path(f));
+
+ if (preferred) {
+ struct string_list_item *item;
+
+ data.preferred = 1;
+ for_each_string_list_item(item, preferred)
+ for_each_ref_in(item->string, midx_snapshot_ref_one, &data);
+ data.preferred = 0;
+ }
+
+ for_each_ref(midx_snapshot_ref_one, &data);
+
+ if (close_tempfile_gently(f)) {
+ int save_errno = errno;
+ delete_tempfile(&f);
+ errno = save_errno;
+ die_errno(_("could not close refs snapshot tempfile"));
+ }
+
+ oidset_clear(&data.seen);
+}
+
+static void midx_included_packs(struct string_list *include,
+ struct string_list *existing_nonkept_packs,
+ struct string_list *existing_kept_packs,
+ struct string_list *names,
+ struct pack_geometry *geometry)
+{
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, existing_kept_packs)
+ string_list_insert(include, xstrfmt("%s.idx", item->string));
+ for_each_string_list_item(item, names)
+ string_list_insert(include, xstrfmt("pack-%s.idx", item->string));
+ if (geometry) {
+ struct strbuf buf = STRBUF_INIT;
+ uint32_t i;
+ for (i = geometry->split; i < geometry->pack_nr; i++) {
+ struct packed_git *p = geometry->pack[i];
+
+ strbuf_addstr(&buf, pack_basename(p));
+ strbuf_strip_suffix(&buf, ".pack");
+ strbuf_addstr(&buf, ".idx");
+
+ string_list_insert(include, strbuf_detach(&buf, NULL));
+ }
+
+ for_each_string_list_item(item, existing_nonkept_packs) {
+ if (!((uintptr_t)item->util & CRUFT_PACK)) {
+ /*
+ * no need to check DELETE_PACK, since we're not
+ * doing an ALL_INTO_ONE repack
+ */
+ continue;
+ }
+ string_list_insert(include, xstrfmt("%s.idx", item->string));
+ }
+ } else {
+ for_each_string_list_item(item, existing_nonkept_packs) {
+ if ((uintptr_t)item->util & DELETE_PACK)
+ continue;
+ string_list_insert(include, xstrfmt("%s.idx", item->string));
+ }
+ }
+}
+
+static int write_midx_included_packs(struct string_list *include,
+ struct pack_geometry *geometry,
+ const char *refs_snapshot,
+ int show_progress, int write_bitmaps)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ struct string_list_item *item;
+ struct packed_git *largest = get_largest_active_pack(geometry);
+ FILE *in;
+ int ret;
+
+ if (!include->nr)
+ return 0;
+
+ cmd.in = -1;
+ cmd.git_cmd = 1;
+
+ strvec_push(&cmd.args, "multi-pack-index");
+ strvec_pushl(&cmd.args, "write", "--stdin-packs", NULL);
+
+ if (show_progress)
+ strvec_push(&cmd.args, "--progress");
+ else
+ strvec_push(&cmd.args, "--no-progress");
+
+ if (write_bitmaps)
+ strvec_push(&cmd.args, "--bitmap");
+
+ if (largest)
+ strvec_pushf(&cmd.args, "--preferred-pack=%s",
+ pack_basename(largest));
+
+ if (refs_snapshot)
+ strvec_pushf(&cmd.args, "--refs-snapshot=%s", refs_snapshot);
+
+ ret = start_command(&cmd);
+ if (ret)
+ return ret;
+
+ in = xfdopen(cmd.in, "w");
+ for_each_string_list_item(item, include)
+ fprintf(in, "%s\n", item->string);
+ fclose(in);
+
+ return finish_command(&cmd);
+}
+
+static void remove_redundant_bitmaps(struct string_list *include,
+ const char *packdir)
+{
+ struct strbuf path = STRBUF_INIT;
+ struct string_list_item *item;
+ size_t packdir_len;
+
+ strbuf_addstr(&path, packdir);
+ strbuf_addch(&path, '/');
+ packdir_len = path.len;
+
+ /*
+ * Remove any pack bitmaps corresponding to packs which are now
+ * included in the MIDX.
+ */
+ for_each_string_list_item(item, include) {
+ strbuf_addstr(&path, item->string);
+ strbuf_strip_suffix(&path, ".idx");
+ strbuf_addstr(&path, ".bitmap");
+
+ if (unlink(path.buf) && errno != ENOENT)
+ warning_errno(_("could not remove stale bitmap: %s"),
+ path.buf);
+
+ strbuf_setlen(&path, packdir_len);
+ }
+ strbuf_release(&path);
+}
+
+static int write_cruft_pack(const struct pack_objects_args *args,
+ const char *destination,
+ const char *pack_prefix,
+ const char *cruft_expiration,
+ struct string_list *names,
+ struct string_list *existing_packs,
+ struct string_list *existing_kept_packs)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ struct strbuf line = STRBUF_INIT;
+ struct string_list_item *item;
+ FILE *in, *out;
+ int ret;
+ const char *scratch;
+ int local = skip_prefix(destination, packdir, &scratch);
+
+ prepare_pack_objects(&cmd, args, destination);
+
+ strvec_push(&cmd.args, "--cruft");
+ if (cruft_expiration)
+ strvec_pushf(&cmd.args, "--cruft-expiration=%s",
+ cruft_expiration);
+
+ strvec_push(&cmd.args, "--honor-pack-keep");
+ strvec_push(&cmd.args, "--non-empty");
+ strvec_push(&cmd.args, "--max-pack-size=0");
+
+ cmd.in = -1;
+
+ ret = start_command(&cmd);
+ if (ret)
+ return ret;
+
+ /*
+ * names has a confusing double use: it both provides the list
+ * of just-written new packs, and accepts the name of the cruft
+ * pack we are writing.
+ *
+ * By the time it is read here, it contains only the pack(s)
+ * that were just written, which is exactly the set of packs we
+ * want to consider kept.
+ *
+ * If `--expire-to` is given, the double-use served by `names`
+ * ensures that the pack written to `--expire-to` excludes any
+ * objects contained in the cruft pack.
+ */
+ in = xfdopen(cmd.in, "w");
+ for_each_string_list_item(item, names)
+ fprintf(in, "%s-%s.pack\n", pack_prefix, item->string);
+ for_each_string_list_item(item, existing_packs)
+ fprintf(in, "-%s.pack\n", item->string);
+ for_each_string_list_item(item, existing_kept_packs)
+ fprintf(in, "%s.pack\n", item->string);
+ fclose(in);
+
+ out = xfdopen(cmd.out, "r");
+ while (strbuf_getline_lf(&line, out) != EOF) {
+ struct string_list_item *item;
+
+ if (line.len != the_hash_algo->hexsz)
+ die(_("repack: Expecting full hex object ID lines only "
+ "from pack-objects."));
+ /*
+ * avoid putting packs written outside of the repository in the
+ * list of names
+ */
+ if (local) {
+ item = string_list_append(names, line.buf);
+ item->util = populate_pack_exts(line.buf);
+ }
+ }
+ fclose(out);
+
+ strbuf_release(&line);
+
+ return finish_command(&cmd);
+}
+
+int cmd_repack(int argc, const char **argv, const char *prefix)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ struct string_list_item *item;
+ struct string_list names = STRING_LIST_INIT_DUP;
+ struct string_list existing_nonkept_packs = STRING_LIST_INIT_DUP;
+ struct string_list existing_kept_packs = STRING_LIST_INIT_DUP;
+ struct pack_geometry *geometry = NULL;
+ struct strbuf line = STRBUF_INIT;
+ struct tempfile *refs_snapshot = NULL;
+ int i, ext, ret;
+ FILE *out;
+ int show_progress;
+
+ /* variables to be filled by option parsing */
+ int delete_redundant = 0;
+ const char *unpack_unreachable = NULL;
+ int keep_unreachable = 0;
+ struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
+ struct pack_objects_args po_args = {NULL};
+ struct pack_objects_args cruft_po_args = {NULL};
+ int geometric_factor = 0;
+ int write_midx = 0;
+ const char *cruft_expiration = NULL;
+ const char *expire_to = NULL;
+
+ struct option builtin_repack_options[] = {
+ OPT_BIT('a', NULL, &pack_everything,
+ N_("pack everything in a single pack"), ALL_INTO_ONE),
+ OPT_BIT('A', NULL, &pack_everything,
+ N_("same as -a, and turn unreachable objects loose"),
+ LOOSEN_UNREACHABLE | ALL_INTO_ONE),
+ OPT_BIT(0, "cruft", &pack_everything,
+ N_("same as -a, pack unreachable cruft objects separately"),
+ PACK_CRUFT),
+ OPT_STRING(0, "cruft-expiration", &cruft_expiration, N_("approxidate"),
+ N_("with -C, expire objects older than this")),
+ OPT_BOOL('d', NULL, &delete_redundant,
+ N_("remove redundant packs, and run git-prune-packed")),
+ OPT_BOOL('f', NULL, &po_args.no_reuse_delta,
+ N_("pass --no-reuse-delta to git-pack-objects")),
+ OPT_BOOL('F', NULL, &po_args.no_reuse_object,
+ N_("pass --no-reuse-object to git-pack-objects")),
+ OPT_NEGBIT('n', NULL, &run_update_server_info,
+ N_("do not run git-update-server-info"), 1),
+ OPT__QUIET(&po_args.quiet, N_("be quiet")),
+ OPT_BOOL('l', "local", &po_args.local,
+ N_("pass --local to git-pack-objects")),
+ OPT_BOOL('b', "write-bitmap-index", &write_bitmaps,
+ N_("write bitmap index")),
+ OPT_BOOL('i', "delta-islands", &use_delta_islands,
+ N_("pass --delta-islands to git-pack-objects")),
+ OPT_STRING(0, "unpack-unreachable", &unpack_unreachable, N_("approxidate"),
+ N_("with -A, do not loosen objects older than this")),
+ OPT_BOOL('k', "keep-unreachable", &keep_unreachable,
+ N_("with -a, repack unreachable objects")),
+ OPT_STRING(0, "window", &po_args.window, N_("n"),
+ N_("size of the window used for delta compression")),
+ OPT_STRING(0, "window-memory", &po_args.window_memory, N_("bytes"),
+ N_("same as the above, but limit memory size instead of entries count")),
+ OPT_STRING(0, "depth", &po_args.depth, N_("n"),
+ N_("limits the maximum delta depth")),
+ OPT_STRING(0, "threads", &po_args.threads, N_("n"),
+ N_("limits the maximum number of threads")),
+ OPT_STRING(0, "max-pack-size", &po_args.max_pack_size, N_("bytes"),
+ N_("maximum size of each packfile")),
+ OPT_BOOL(0, "pack-kept-objects", &pack_kept_objects,
+ N_("repack objects in packs marked with .keep")),
+ OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
+ N_("do not repack this pack")),
+ OPT_INTEGER('g', "geometric", &geometric_factor,
+ N_("find a geometric progression with factor <N>")),
+ OPT_BOOL('m', "write-midx", &write_midx,
+ N_("write a multi-pack index of the resulting packs")),
+ OPT_STRING(0, "expire-to", &expire_to, N_("dir"),
+ N_("pack prefix to store a pack containing pruned objects")),
+ OPT_END()
+ };
+
+ git_config(repack_config, &cruft_po_args);
+
+ argc = parse_options(argc, argv, prefix, builtin_repack_options,
+ git_repack_usage, 0);
+
+ if (delete_redundant && repository_format_precious_objects)
+ die(_("cannot delete packs in a precious-objects repo"));
+
+ if (keep_unreachable &&
+ (unpack_unreachable || (pack_everything & LOOSEN_UNREACHABLE)))
+ die(_("options '%s' and '%s' cannot be used together"), "--keep-unreachable", "-A");
+
+ if (pack_everything & PACK_CRUFT) {
+ pack_everything |= ALL_INTO_ONE;
+
+ if (unpack_unreachable || (pack_everything & LOOSEN_UNREACHABLE))
+ die(_("options '%s' and '%s' cannot be used together"), "--cruft", "-A");
+ if (keep_unreachable)
+ die(_("options '%s' and '%s' cannot be used together"), "--cruft", "-k");
+ }
+
+ if (write_bitmaps < 0) {
+ if (!write_midx &&
+ (!(pack_everything & ALL_INTO_ONE) || !is_bare_repository()))
+ write_bitmaps = 0;
+ } else if (write_bitmaps &&
+ git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0) &&
+ git_env_bool(GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP, 0)) {
+ write_bitmaps = 0;
+ }
+ if (pack_kept_objects < 0)
+ pack_kept_objects = write_bitmaps > 0 && !write_midx;
+
+ if (write_bitmaps && !(pack_everything & ALL_INTO_ONE) && !write_midx)
+ die(_(incremental_bitmap_conflict_error));
+
+ if (write_midx && write_bitmaps) {
+ struct strbuf path = STRBUF_INIT;
+
+ strbuf_addf(&path, "%s/%s_XXXXXX", get_object_directory(),
+ "bitmap-ref-tips");
+
+ refs_snapshot = xmks_tempfile(path.buf);
+ midx_snapshot_refs(refs_snapshot);
+
+ strbuf_release(&path);
+ }
+
+ packdir = mkpathdup("%s/pack", get_object_directory());
+ packtmp_name = xstrfmt(".tmp-%d-pack", (int)getpid());
+ packtmp = mkpathdup("%s/%s", packdir, packtmp_name);
+
+ collect_pack_filenames(&existing_nonkept_packs, &existing_kept_packs,
+ &keep_pack_list);
+
+ if (geometric_factor) {
+ if (pack_everything)
+ die(_("options '%s' and '%s' cannot be used together"), "--geometric", "-A/-a");
+ init_pack_geometry(&geometry, &existing_kept_packs);
+ split_pack_geometry(geometry, geometric_factor);
+ }
+
+ prepare_pack_objects(&cmd, &po_args, packtmp);
+
+ show_progress = !po_args.quiet && isatty(2);
+
+ strvec_push(&cmd.args, "--keep-true-parents");
+ if (!pack_kept_objects)
+ strvec_push(&cmd.args, "--honor-pack-keep");
+ for (i = 0; i < keep_pack_list.nr; i++)
+ strvec_pushf(&cmd.args, "--keep-pack=%s",
+ keep_pack_list.items[i].string);
+ strvec_push(&cmd.args, "--non-empty");
+ if (!geometry) {
+ /*
+ * We need to grab all reachable objects, including those that
+ * are reachable from reflogs and the index.
+ *
+ * When repacking into a geometric progression of packs,
+ * however, we ask 'git pack-objects --stdin-packs', and it is
+ * not about packing objects based on reachability but about
+ * repacking all the objects in specified packs and loose ones
+ * (indeed, --stdin-packs is incompatible with these options).
+ */
+ strvec_push(&cmd.args, "--all");
+ strvec_push(&cmd.args, "--reflog");
+ strvec_push(&cmd.args, "--indexed-objects");
+ }
+ if (has_promisor_remote())
+ strvec_push(&cmd.args, "--exclude-promisor-objects");
+ if (!write_midx) {
+ if (write_bitmaps > 0)
+ strvec_push(&cmd.args, "--write-bitmap-index");
+ else if (write_bitmaps < 0)
+ strvec_push(&cmd.args, "--write-bitmap-index-quiet");
+ }
+ if (use_delta_islands)
+ strvec_push(&cmd.args, "--delta-islands");
+
+ if (pack_everything & ALL_INTO_ONE) {
+ repack_promisor_objects(&po_args, &names);
+
+ if (existing_nonkept_packs.nr && delete_redundant &&
+ !(pack_everything & PACK_CRUFT)) {
+ for_each_string_list_item(item, &names) {
+ strvec_pushf(&cmd.args, "--keep-pack=%s-%s.pack",
+ packtmp_name, item->string);
+ }
+ if (unpack_unreachable) {
+ strvec_pushf(&cmd.args,
+ "--unpack-unreachable=%s",
+ unpack_unreachable);
+ } else if (pack_everything & LOOSEN_UNREACHABLE) {
+ strvec_push(&cmd.args,
+ "--unpack-unreachable");
+ } else if (keep_unreachable) {
+ strvec_push(&cmd.args, "--keep-unreachable");
+ strvec_push(&cmd.args, "--pack-loose-unreachable");
+ }
+ }
+ } else if (geometry) {
+ strvec_push(&cmd.args, "--stdin-packs");
+ strvec_push(&cmd.args, "--unpacked");
+ } else {
+ strvec_push(&cmd.args, "--unpacked");
+ strvec_push(&cmd.args, "--incremental");
+ }
+
+ if (geometry)
+ cmd.in = -1;
+ else
+ cmd.no_stdin = 1;
+
+ ret = start_command(&cmd);
+ if (ret)
+ return ret;
+
+ if (geometry) {
+ FILE *in = xfdopen(cmd.in, "w");
+ /*
+ * The resulting pack should contain all objects in packs that
+ * are going to be rolled up, but exclude objects in packs which
+ * are being left alone.
+ */
+ for (i = 0; i < geometry->split; i++)
+ fprintf(in, "%s\n", pack_basename(geometry->pack[i]));
+ for (i = geometry->split; i < geometry->pack_nr; i++)
+ fprintf(in, "^%s\n", pack_basename(geometry->pack[i]));
+ fclose(in);
+ }
+
+ out = xfdopen(cmd.out, "r");
+ while (strbuf_getline_lf(&line, out) != EOF) {
+ struct string_list_item *item;
+
+ if (line.len != the_hash_algo->hexsz)
+ die(_("repack: Expecting full hex object ID lines only from pack-objects."));
+ item = string_list_append(&names, line.buf);
+ item->util = populate_pack_exts(item->string);
+ }
+ fclose(out);
+ ret = finish_command(&cmd);
+ if (ret)
+ return ret;
+
+ if (!names.nr && !po_args.quiet)
+ printf_ln(_("Nothing new to pack."));
+
+ if (pack_everything & PACK_CRUFT) {
+ const char *pack_prefix;
+ if (!skip_prefix(packtmp, packdir, &pack_prefix))
+ die(_("pack prefix %s does not begin with objdir %s"),
+ packtmp, packdir);
+ if (*pack_prefix == '/')
+ pack_prefix++;
+
+ if (!cruft_po_args.window)
+ cruft_po_args.window = po_args.window;
+ if (!cruft_po_args.window_memory)
+ cruft_po_args.window_memory = po_args.window_memory;
+ if (!cruft_po_args.depth)
+ cruft_po_args.depth = po_args.depth;
+ if (!cruft_po_args.threads)
+ cruft_po_args.threads = po_args.threads;
+
+ cruft_po_args.local = po_args.local;
+ cruft_po_args.quiet = po_args.quiet;
+
+ ret = write_cruft_pack(&cruft_po_args, packtmp, pack_prefix,
+ cruft_expiration, &names,
+ &existing_nonkept_packs,
+ &existing_kept_packs);
+ if (ret)
+ return ret;
+
+ if (delete_redundant && expire_to) {
+ /*
+ * If `--expire-to` is given with `-d`, it's possible
+ * that we're about to prune some objects. With cruft
+ * packs, pruning is implicit: any objects from existing
+ * packs that weren't picked up by new packs are removed
+ * when their packs are deleted.
+ *
+ * Generate an additional cruft pack, with one twist:
+ * `names` now includes the name of the cruft pack
+ * written in the previous step. So the contents of
+ * _this_ cruft pack exclude everything contained in the
+ * existing cruft pack (that is, all of the unreachable
+ * objects which are no older than
+ * `--cruft-expiration`).
+ *
+ * To make this work, cruft_expiration must become NULL
+ * so that this cruft pack doesn't actually prune any
+ * objects. If it were non-NULL, this call would always
+ * generate an empty pack (since every object not in the
+ * cruft pack generated above will have an mtime older
+ * than the expiration).
+ */
+ ret = write_cruft_pack(&cruft_po_args, expire_to,
+ pack_prefix,
+ NULL,
+ &names,
+ &existing_nonkept_packs,
+ &existing_kept_packs);
+ if (ret)
+ return ret;
+ }
+ }
+
+ string_list_sort(&names);
+
+ close_object_store(the_repository->objects);
+
+ /*
+ * Ok we have prepared all new packfiles.
+ */
+ for_each_string_list_item(item, &names) {
+ struct generated_pack_data *data = item->util;
+
+ for (ext = 0; ext < ARRAY_SIZE(exts); ext++) {
+ char *fname;
+
+ fname = mkpathdup("%s/pack-%s%s",
+ packdir, item->string, exts[ext].name);
+
+ if (data->tempfiles[ext]) {
+ const char *fname_old = get_tempfile_path(data->tempfiles[ext]);
+ struct stat statbuffer;
+
+ if (!stat(fname_old, &statbuffer)) {
+ statbuffer.st_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
+ chmod(fname_old, statbuffer.st_mode);
+ }
+
+ if (rename_tempfile(&data->tempfiles[ext], fname))
+ die_errno(_("renaming pack to '%s' failed"), fname);
+ } else if (!exts[ext].optional)
+ die(_("pack-objects did not write a '%s' file for pack %s-%s"),
+ exts[ext].name, packtmp, item->string);
+ else if (unlink(fname) < 0 && errno != ENOENT)
+ die_errno(_("could not unlink: %s"), fname);
+
+ free(fname);
+ }
+ }
+ /* End of pack replacement. */
+
+ if (delete_redundant && pack_everything & ALL_INTO_ONE) {
+ const int hexsz = the_hash_algo->hexsz;
+ for_each_string_list_item(item, &existing_nonkept_packs) {
+ char *sha1;
+ size_t len = strlen(item->string);
+ if (len < hexsz)
+ continue;
+ sha1 = item->string + len - hexsz;
+ /*
+ * Mark this pack for deletion, which ensures that this
+ * pack won't be included in a MIDX (if `--write-midx`
+ * was given) and that we will actually delete this pack
+ * (if `-d` was given).
+ */
+ if (!string_list_has_string(&names, sha1))
+ item->util = (void*)(uintptr_t)((size_t)item->util | DELETE_PACK);
+ }
+ }
+
+ if (write_midx) {
+ struct string_list include = STRING_LIST_INIT_NODUP;
+ midx_included_packs(&include, &existing_nonkept_packs,
+ &existing_kept_packs, &names, geometry);
+
+ ret = write_midx_included_packs(&include, geometry,
+ refs_snapshot ? get_tempfile_path(refs_snapshot) : NULL,
+ show_progress, write_bitmaps > 0);
+
+ if (!ret && write_bitmaps)
+ remove_redundant_bitmaps(&include, packdir);
+
+ string_list_clear(&include, 0);
+
+ if (ret)
+ return ret;
+ }
+
+ reprepare_packed_git(the_repository);
+
+ if (delete_redundant) {
+ int opts = 0;
+ for_each_string_list_item(item, &existing_nonkept_packs) {
+ if (!((uintptr_t)item->util & DELETE_PACK))
+ continue;
+ remove_redundant_pack(packdir, item->string);
+ }
+
+ if (geometry) {
+ struct strbuf buf = STRBUF_INIT;
+
+ uint32_t i;
+ for (i = 0; i < geometry->split; i++) {
+ struct packed_git *p = geometry->pack[i];
+ if (string_list_has_string(&names,
+ hash_to_hex(p->hash)))
+ continue;
+
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, pack_basename(p));
+ strbuf_strip_suffix(&buf, ".pack");
+
+ if ((p->pack_keep) ||
+ (string_list_has_string(&existing_kept_packs,
+ buf.buf)))
+ continue;
+
+ remove_redundant_pack(packdir, buf.buf);
+ }
+ strbuf_release(&buf);
+ }
+ if (show_progress)
+ opts |= PRUNE_PACKED_VERBOSE;
+ prune_packed_objects(opts);
+
+ if (!keep_unreachable &&
+ (!(pack_everything & LOOSEN_UNREACHABLE) ||
+ unpack_unreachable) &&
+ is_repository_shallow(the_repository))
+ prune_shallow(PRUNE_QUICK);
+ }
+
+ if (run_update_server_info)
+ update_server_info(0);
+
+ if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0)) {
+ unsigned flags = 0;
+ if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP, 0))
+ flags |= MIDX_WRITE_BITMAP | MIDX_WRITE_REV_INDEX;
+ write_midx_file(get_object_directory(), NULL, NULL, flags);
+ }
+
+ string_list_clear(&names, 1);
+ string_list_clear(&existing_nonkept_packs, 0);
+ string_list_clear(&existing_kept_packs, 0);
+ clear_pack_geometry(geometry);
+ strbuf_release(&line);
+
+ return 0;
+}
diff --git a/builtin/replace.c b/builtin/replace.c
new file mode 100644
index 0000000..a29e911
--- /dev/null
+++ b/builtin/replace.c
@@ -0,0 +1,626 @@
+/*
+ * Builtin "git replace"
+ *
+ * Copyright (c) 2008 Christian Couder <chriscool@tuxfamily.org>
+ *
+ * Based on builtin/tag.c by Kristian Høgsberg <krh@redhat.com>
+ * and Carlos Rica <jasampler@gmail.com> that was itself based on
+ * git-tag.sh and mktag.c by Linus Torvalds.
+ */
+
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "refs.h"
+#include "parse-options.h"
+#include "run-command.h"
+#include "object-store.h"
+#include "repository.h"
+#include "tag.h"
+
+static const char * const git_replace_usage[] = {
+ N_("git replace [-f] <object> <replacement>"),
+ N_("git replace [-f] --edit <object>"),
+ N_("git replace [-f] --graft <commit> [<parent>...]"),
+ "git replace [-f] --convert-graft-file",
+ N_("git replace -d <object>..."),
+ N_("git replace [--format=<format>] [-l [<pattern>]]"),
+ NULL
+};
+
+enum replace_format {
+ REPLACE_FORMAT_SHORT,
+ REPLACE_FORMAT_MEDIUM,
+ REPLACE_FORMAT_LONG
+};
+
+struct show_data {
+ const char *pattern;
+ enum replace_format format;
+};
+
+static int show_reference(struct repository *r, const char *refname,
+ const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ struct show_data *data = cb_data;
+
+ if (!wildmatch(data->pattern, refname, 0)) {
+ if (data->format == REPLACE_FORMAT_SHORT)
+ printf("%s\n", refname);
+ else if (data->format == REPLACE_FORMAT_MEDIUM)
+ printf("%s -> %s\n", refname, oid_to_hex(oid));
+ else { /* data->format == REPLACE_FORMAT_LONG */
+ struct object_id object;
+ enum object_type obj_type, repl_type;
+
+ if (get_oid(refname, &object))
+ return error(_("failed to resolve '%s' as a valid ref"), refname);
+
+ obj_type = oid_object_info(r, &object, NULL);
+ repl_type = oid_object_info(r, oid, NULL);
+
+ printf("%s (%s) -> %s (%s)\n", refname, type_name(obj_type),
+ oid_to_hex(oid), type_name(repl_type));
+ }
+ }
+
+ return 0;
+}
+
+static int list_replace_refs(const char *pattern, const char *format)
+{
+ struct show_data data;
+
+ if (!pattern)
+ pattern = "*";
+ data.pattern = pattern;
+
+ if (format == NULL || *format == '\0' || !strcmp(format, "short"))
+ data.format = REPLACE_FORMAT_SHORT;
+ else if (!strcmp(format, "medium"))
+ data.format = REPLACE_FORMAT_MEDIUM;
+ else if (!strcmp(format, "long"))
+ data.format = REPLACE_FORMAT_LONG;
+ /*
+ * Please update _git_replace() in git-completion.bash when
+ * you add new format
+ */
+ else
+ return error(_("invalid replace format '%s'\n"
+ "valid formats are 'short', 'medium' and 'long'"),
+ format);
+
+ for_each_replace_ref(the_repository, show_reference, (void *)&data);
+
+ return 0;
+}
+
+typedef int (*each_replace_name_fn)(const char *name, const char *ref,
+ const struct object_id *oid);
+
+static int for_each_replace_name(const char **argv, each_replace_name_fn fn)
+{
+ const char **p, *full_hex;
+ struct strbuf ref = STRBUF_INIT;
+ size_t base_len;
+ int had_error = 0;
+ struct object_id oid;
+ const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref;
+
+ strbuf_addstr(&ref, git_replace_ref_base);
+ base_len = ref.len;
+
+ for (p = argv; *p; p++) {
+ if (get_oid(*p, &oid)) {
+ error("failed to resolve '%s' as a valid ref", *p);
+ had_error = 1;
+ continue;
+ }
+
+ strbuf_setlen(&ref, base_len);
+ strbuf_addstr(&ref, oid_to_hex(&oid));
+ full_hex = ref.buf + base_len;
+
+ if (read_ref(ref.buf, &oid)) {
+ error(_("replace ref '%s' not found"), full_hex);
+ had_error = 1;
+ continue;
+ }
+ if (fn(full_hex, ref.buf, &oid))
+ had_error = 1;
+ }
+ strbuf_release(&ref);
+ return had_error;
+}
+
+static int delete_replace_ref(const char *name, const char *ref,
+ const struct object_id *oid)
+{
+ if (delete_ref(NULL, ref, oid, 0))
+ return 1;
+ printf_ln(_("Deleted replace ref '%s'"), name);
+ return 0;
+}
+
+static int check_ref_valid(struct object_id *object,
+ struct object_id *prev,
+ struct strbuf *ref,
+ int force)
+{
+ const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref;
+
+ strbuf_reset(ref);
+ strbuf_addf(ref, "%s%s", git_replace_ref_base, oid_to_hex(object));
+ if (check_refname_format(ref->buf, 0))
+ return error(_("'%s' is not a valid ref name"), ref->buf);
+
+ if (read_ref(ref->buf, prev))
+ oidclr(prev);
+ else if (!force)
+ return error(_("replace ref '%s' already exists"), ref->buf);
+ return 0;
+}
+
+static int replace_object_oid(const char *object_ref,
+ struct object_id *object,
+ const char *replace_ref,
+ struct object_id *repl,
+ int force)
+{
+ struct object_id prev;
+ enum object_type obj_type, repl_type;
+ struct strbuf ref = STRBUF_INIT;
+ struct ref_transaction *transaction;
+ struct strbuf err = STRBUF_INIT;
+ int res = 0;
+
+ obj_type = oid_object_info(the_repository, object, NULL);
+ repl_type = oid_object_info(the_repository, repl, NULL);
+ if (!force && obj_type != repl_type)
+ return error(_("Objects must be of the same type.\n"
+ "'%s' points to a replaced object of type '%s'\n"
+ "while '%s' points to a replacement object of "
+ "type '%s'."),
+ object_ref, type_name(obj_type),
+ replace_ref, type_name(repl_type));
+
+ if (check_ref_valid(object, &prev, &ref, force)) {
+ strbuf_release(&ref);
+ return -1;
+ }
+
+ transaction = ref_transaction_begin(&err);
+ if (!transaction ||
+ ref_transaction_update(transaction, ref.buf, repl, &prev,
+ 0, NULL, &err) ||
+ ref_transaction_commit(transaction, &err))
+ res = error("%s", err.buf);
+
+ ref_transaction_free(transaction);
+ strbuf_release(&ref);
+ return res;
+}
+
+static int replace_object(const char *object_ref, const char *replace_ref, int force)
+{
+ struct object_id object, repl;
+
+ if (get_oid(object_ref, &object))
+ return error(_("failed to resolve '%s' as a valid ref"),
+ object_ref);
+ if (get_oid(replace_ref, &repl))
+ return error(_("failed to resolve '%s' as a valid ref"),
+ replace_ref);
+
+ return replace_object_oid(object_ref, &object, replace_ref, &repl, force);
+}
+
+/*
+ * Write the contents of the object named by "sha1" to the file "filename".
+ * If "raw" is true, then the object's raw contents are printed according to
+ * "type". Otherwise, we pretty-print the contents for human editing.
+ */
+static int export_object(const struct object_id *oid, enum object_type type,
+ int raw, const char *filename)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ int fd;
+
+ fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+ if (fd < 0)
+ return error_errno(_("unable to open %s for writing"), filename);
+
+ strvec_push(&cmd.args, "--no-replace-objects");
+ strvec_push(&cmd.args, "cat-file");
+ if (raw)
+ strvec_push(&cmd.args, type_name(type));
+ else
+ strvec_push(&cmd.args, "-p");
+ strvec_push(&cmd.args, oid_to_hex(oid));
+ cmd.git_cmd = 1;
+ cmd.out = fd;
+
+ if (run_command(&cmd))
+ return error(_("cat-file reported failure"));
+ return 0;
+}
+
+/*
+ * Read a previously-exported (and possibly edited) object back from "filename",
+ * interpreting it as "type", and writing the result to the object database.
+ * The sha1 of the written object is returned via sha1.
+ */
+static int import_object(struct object_id *oid, enum object_type type,
+ int raw, const char *filename)
+{
+ int fd;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ return error_errno(_("unable to open %s for reading"), filename);
+
+ if (!raw && type == OBJ_TREE) {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ struct strbuf result = STRBUF_INIT;
+
+ strvec_push(&cmd.args, "mktree");
+ cmd.git_cmd = 1;
+ cmd.in = fd;
+ cmd.out = -1;
+
+ if (start_command(&cmd)) {
+ close(fd);
+ return error(_("unable to spawn mktree"));
+ }
+
+ if (strbuf_read(&result, cmd.out, the_hash_algo->hexsz + 1) < 0) {
+ error_errno(_("unable to read from mktree"));
+ close(fd);
+ close(cmd.out);
+ return -1;
+ }
+ close(cmd.out);
+
+ if (finish_command(&cmd)) {
+ strbuf_release(&result);
+ return error(_("mktree reported failure"));
+ }
+ if (get_oid_hex(result.buf, oid) < 0) {
+ strbuf_release(&result);
+ return error(_("mktree did not return an object name"));
+ }
+
+ strbuf_release(&result);
+ } else {
+ struct stat st;
+ int flags = HASH_FORMAT_CHECK | HASH_WRITE_OBJECT;
+
+ if (fstat(fd, &st) < 0) {
+ error_errno(_("unable to fstat %s"), filename);
+ close(fd);
+ return -1;
+ }
+ if (index_fd(the_repository->index, oid, fd, &st, type, NULL, flags) < 0)
+ return error(_("unable to write object to database"));
+ /* index_fd close()s fd for us */
+ }
+
+ /*
+ * No need to close(fd) here; both run-command and index-fd
+ * will have done it for us.
+ */
+ return 0;
+}
+
+static int edit_and_replace(const char *object_ref, int force, int raw)
+{
+ char *tmpfile;
+ enum object_type type;
+ struct object_id old_oid, new_oid, prev;
+ struct strbuf ref = STRBUF_INIT;
+
+ if (get_oid(object_ref, &old_oid) < 0)
+ return error(_("not a valid object name: '%s'"), object_ref);
+
+ type = oid_object_info(the_repository, &old_oid, NULL);
+ if (type < 0)
+ return error(_("unable to get object type for %s"),
+ oid_to_hex(&old_oid));
+
+ if (check_ref_valid(&old_oid, &prev, &ref, force)) {
+ strbuf_release(&ref);
+ return -1;
+ }
+ strbuf_release(&ref);
+
+ tmpfile = git_pathdup("REPLACE_EDITOBJ");
+ if (export_object(&old_oid, type, raw, tmpfile)) {
+ free(tmpfile);
+ return -1;
+ }
+ if (launch_editor(tmpfile, NULL, NULL) < 0) {
+ free(tmpfile);
+ return error(_("editing object file failed"));
+ }
+ if (import_object(&new_oid, type, raw, tmpfile)) {
+ free(tmpfile);
+ return -1;
+ }
+ free(tmpfile);
+
+ if (oideq(&old_oid, &new_oid))
+ return error(_("new object is the same as the old one: '%s'"), oid_to_hex(&old_oid));
+
+ return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force);
+}
+
+static int replace_parents(struct strbuf *buf, int argc, const char **argv)
+{
+ struct strbuf new_parents = STRBUF_INIT;
+ const char *parent_start, *parent_end;
+ int i;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
+ /* find existing parents */
+ parent_start = buf->buf;
+ parent_start += hexsz + 6; /* "tree " + "hex sha1" + "\n" */
+ parent_end = parent_start;
+
+ while (starts_with(parent_end, "parent "))
+ parent_end += hexsz + 8; /* "parent " + "hex sha1" + "\n" */
+
+ /* prepare new parents */
+ for (i = 0; i < argc; i++) {
+ struct object_id oid;
+ struct commit *commit;
+
+ if (get_oid(argv[i], &oid) < 0) {
+ strbuf_release(&new_parents);
+ return error(_("not a valid object name: '%s'"),
+ argv[i]);
+ }
+ commit = lookup_commit_reference(the_repository, &oid);
+ if (!commit) {
+ strbuf_release(&new_parents);
+ return error(_("could not parse %s as a commit"), argv[i]);
+ }
+ strbuf_addf(&new_parents, "parent %s\n", oid_to_hex(&commit->object.oid));
+ }
+
+ /* replace existing parents with new ones */
+ strbuf_splice(buf, parent_start - buf->buf, parent_end - parent_start,
+ new_parents.buf, new_parents.len);
+
+ strbuf_release(&new_parents);
+ return 0;
+}
+
+struct check_mergetag_data {
+ int argc;
+ const char **argv;
+};
+
+static int check_one_mergetag(struct commit *commit,
+ struct commit_extra_header *extra,
+ void *data)
+{
+ struct check_mergetag_data *mergetag_data = (struct check_mergetag_data *)data;
+ const char *ref = mergetag_data->argv[0];
+ struct object_id tag_oid;
+ struct tag *tag;
+ int i;
+
+ hash_object_file(the_hash_algo, extra->value, extra->len,
+ OBJ_TAG, &tag_oid);
+ tag = lookup_tag(the_repository, &tag_oid);
+ if (!tag)
+ return error(_("bad mergetag in commit '%s'"), ref);
+ if (parse_tag_buffer(the_repository, tag, extra->value, extra->len))
+ return error(_("malformed mergetag in commit '%s'"), ref);
+
+ /* iterate over new parents */
+ for (i = 1; i < mergetag_data->argc; i++) {
+ struct object_id oid;
+ if (get_oid(mergetag_data->argv[i], &oid) < 0)
+ return error(_("not a valid object name: '%s'"),
+ mergetag_data->argv[i]);
+ if (oideq(get_tagged_oid(tag), &oid))
+ return 0; /* found */
+ }
+
+ return error(_("original commit '%s' contains mergetag '%s' that is "
+ "discarded; use --edit instead of --graft"), ref,
+ oid_to_hex(&tag_oid));
+}
+
+static int check_mergetags(struct commit *commit, int argc, const char **argv)
+{
+ struct check_mergetag_data mergetag_data;
+
+ mergetag_data.argc = argc;
+ mergetag_data.argv = argv;
+ return for_each_mergetag(check_one_mergetag, commit, &mergetag_data);
+}
+
+static int create_graft(int argc, const char **argv, int force, int gentle)
+{
+ struct object_id old_oid, new_oid;
+ const char *old_ref = argv[0];
+ struct commit *commit;
+ struct strbuf buf = STRBUF_INIT;
+ const char *buffer;
+ unsigned long size;
+
+ if (get_oid(old_ref, &old_oid) < 0)
+ return error(_("not a valid object name: '%s'"), old_ref);
+ commit = lookup_commit_reference(the_repository, &old_oid);
+ if (!commit)
+ return error(_("could not parse %s"), old_ref);
+
+ buffer = get_commit_buffer(commit, &size);
+ strbuf_add(&buf, buffer, size);
+ unuse_commit_buffer(commit, buffer);
+
+ if (replace_parents(&buf, argc - 1, &argv[1]) < 0) {
+ strbuf_release(&buf);
+ return -1;
+ }
+
+ if (remove_signature(&buf)) {
+ warning(_("the original commit '%s' has a gpg signature"), old_ref);
+ warning(_("the signature will be removed in the replacement commit!"));
+ }
+
+ if (check_mergetags(commit, argc, argv)) {
+ strbuf_release(&buf);
+ return -1;
+ }
+
+ if (write_object_file(buf.buf, buf.len, OBJ_COMMIT, &new_oid)) {
+ strbuf_release(&buf);
+ return error(_("could not write replacement commit for: '%s'"),
+ old_ref);
+ }
+
+ strbuf_release(&buf);
+
+ if (oideq(&commit->object.oid, &new_oid)) {
+ if (gentle) {
+ warning(_("graft for '%s' unnecessary"),
+ oid_to_hex(&commit->object.oid));
+ return 0;
+ }
+ return error(_("new commit is the same as the old one: '%s'"),
+ oid_to_hex(&commit->object.oid));
+ }
+
+ return replace_object_oid(old_ref, &commit->object.oid,
+ "replacement", &new_oid, force);
+}
+
+static int convert_graft_file(int force)
+{
+ const char *graft_file = get_graft_file(the_repository);
+ FILE *fp = fopen_or_warn(graft_file, "r");
+ struct strbuf buf = STRBUF_INIT, err = STRBUF_INIT;
+ struct strvec args = STRVEC_INIT;
+
+ if (!fp)
+ return -1;
+
+ no_graft_file_deprecated_advice = 1;
+ while (strbuf_getline(&buf, fp) != EOF) {
+ if (*buf.buf == '#')
+ continue;
+
+ strvec_split(&args, buf.buf);
+ if (args.nr && create_graft(args.nr, args.v, force, 1))
+ strbuf_addf(&err, "\n\t%s", buf.buf);
+ strvec_clear(&args);
+ }
+ fclose(fp);
+
+ strbuf_release(&buf);
+
+ if (!err.len)
+ return unlink_or_warn(graft_file);
+
+ warning(_("could not convert the following graft(s):\n%s"), err.buf);
+ strbuf_release(&err);
+
+ return -1;
+}
+
+int cmd_replace(int argc, const char **argv, const char *prefix)
+{
+ int force = 0;
+ int raw = 0;
+ const char *format = NULL;
+ enum {
+ MODE_UNSPECIFIED = 0,
+ MODE_LIST,
+ MODE_DELETE,
+ MODE_EDIT,
+ MODE_GRAFT,
+ MODE_CONVERT_GRAFT_FILE,
+ MODE_REPLACE
+ } cmdmode = MODE_UNSPECIFIED;
+ struct option options[] = {
+ OPT_CMDMODE('l', "list", &cmdmode, N_("list replace refs"), MODE_LIST),
+ OPT_CMDMODE('d', "delete", &cmdmode, N_("delete replace refs"), MODE_DELETE),
+ OPT_CMDMODE('e', "edit", &cmdmode, N_("edit existing object"), MODE_EDIT),
+ OPT_CMDMODE('g', "graft", &cmdmode, N_("change a commit's parents"), MODE_GRAFT),
+ OPT_CMDMODE(0, "convert-graft-file", &cmdmode, N_("convert existing graft file"), MODE_CONVERT_GRAFT_FILE),
+ OPT_BOOL_F('f', "force", &force, N_("replace the ref if it exists"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL(0, "raw", &raw, N_("do not pretty-print contents for --edit")),
+ OPT_STRING(0, "format", &format, N_("format"), N_("use this format")),
+ OPT_END()
+ };
+
+ read_replace_refs = 0;
+ git_config(git_default_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, options, git_replace_usage, 0);
+
+ if (!cmdmode)
+ cmdmode = argc ? MODE_REPLACE : MODE_LIST;
+
+ if (format && cmdmode != MODE_LIST)
+ usage_msg_opt(_("--format cannot be used when not listing"),
+ git_replace_usage, options);
+
+ if (force &&
+ cmdmode != MODE_REPLACE &&
+ cmdmode != MODE_EDIT &&
+ cmdmode != MODE_GRAFT &&
+ cmdmode != MODE_CONVERT_GRAFT_FILE)
+ usage_msg_opt(_("-f only makes sense when writing a replacement"),
+ git_replace_usage, options);
+
+ if (raw && cmdmode != MODE_EDIT)
+ usage_msg_opt(_("--raw only makes sense with --edit"),
+ git_replace_usage, options);
+
+ switch (cmdmode) {
+ case MODE_DELETE:
+ if (argc < 1)
+ usage_msg_opt(_("-d needs at least one argument"),
+ git_replace_usage, options);
+ return for_each_replace_name(argv, delete_replace_ref);
+
+ case MODE_REPLACE:
+ if (argc != 2)
+ usage_msg_opt(_("bad number of arguments"),
+ git_replace_usage, options);
+ return replace_object(argv[0], argv[1], force);
+
+ case MODE_EDIT:
+ if (argc != 1)
+ usage_msg_opt(_("-e needs exactly one argument"),
+ git_replace_usage, options);
+ return edit_and_replace(argv[0], force, raw);
+
+ case MODE_GRAFT:
+ if (argc < 1)
+ usage_msg_opt(_("-g needs at least one argument"),
+ git_replace_usage, options);
+ return create_graft(argc, argv, force, 0);
+
+ case MODE_CONVERT_GRAFT_FILE:
+ if (argc != 0)
+ usage_msg_opt(_("--convert-graft-file takes no argument"),
+ git_replace_usage, options);
+ return !!convert_graft_file(force);
+
+ case MODE_LIST:
+ if (argc > 1)
+ usage_msg_opt(_("only one pattern can be given with -l"),
+ git_replace_usage, options);
+ return list_replace_refs(argv[0], format);
+
+ default:
+ BUG("invalid cmdmode %d", (int)cmdmode);
+ }
+}
diff --git a/builtin/rerere.c b/builtin/rerere.c
new file mode 100644
index 0000000..8b7392d
--- /dev/null
+++ b/builtin/rerere.c
@@ -0,0 +1,118 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "dir.h"
+#include "parse-options.h"
+#include "string-list.h"
+#include "rerere.h"
+#include "xdiff/xdiff.h"
+#include "xdiff-interface.h"
+#include "pathspec.h"
+
+static const char * const rerere_usage[] = {
+ N_("git rerere [clear | forget <pathspec>... | diff | status | remaining | gc]"),
+ NULL,
+};
+
+static int outf(void *dummy, mmbuffer_t *ptr, int nbuf)
+{
+ int i;
+ for (i = 0; i < nbuf; i++)
+ if (write_in_full(1, ptr[i].ptr, ptr[i].size) < 0)
+ return -1;
+ return 0;
+}
+
+static int diff_two(const char *file1, const char *label1,
+ const char *file2, const char *label2)
+{
+ xpparam_t xpp;
+ xdemitconf_t xecfg;
+ xdemitcb_t ecb = { .out_line = outf };
+ mmfile_t minus, plus;
+ int ret;
+
+ if (read_mmfile(&minus, file1) || read_mmfile(&plus, file2))
+ return -1;
+
+ printf("--- a/%s\n+++ b/%s\n", label1, label2);
+ fflush(stdout);
+ memset(&xpp, 0, sizeof(xpp));
+ xpp.flags = 0;
+ memset(&xecfg, 0, sizeof(xecfg));
+ xecfg.ctxlen = 3;
+ ret = xdi_diff(&minus, &plus, &xpp, &xecfg, &ecb);
+
+ free(minus.ptr);
+ free(plus.ptr);
+ return ret;
+}
+
+int cmd_rerere(int argc, const char **argv, const char *prefix)
+{
+ struct string_list merge_rr = STRING_LIST_INIT_DUP;
+ int i, autoupdate = -1, flags = 0;
+
+ struct option options[] = {
+ OPT_SET_INT(0, "rerere-autoupdate", &autoupdate,
+ N_("register clean resolutions in index"), 1),
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, prefix, options, rerere_usage, 0);
+
+ git_config(git_xmerge_config, NULL);
+
+ if (autoupdate == 1)
+ flags = RERERE_AUTOUPDATE;
+ if (autoupdate == 0)
+ flags = RERERE_NOAUTOUPDATE;
+
+ if (argc < 1)
+ return repo_rerere(the_repository, flags);
+
+ if (!strcmp(argv[0], "forget")) {
+ struct pathspec pathspec;
+ if (argc < 2)
+ warning(_("'git rerere forget' without paths is deprecated"));
+ parse_pathspec(&pathspec, 0, PATHSPEC_PREFER_CWD,
+ prefix, argv + 1);
+ return rerere_forget(the_repository, &pathspec);
+ }
+
+ if (!strcmp(argv[0], "clear")) {
+ rerere_clear(the_repository, &merge_rr);
+ } else if (!strcmp(argv[0], "gc"))
+ rerere_gc(the_repository, &merge_rr);
+ else if (!strcmp(argv[0], "status")) {
+ if (setup_rerere(the_repository, &merge_rr,
+ flags | RERERE_READONLY) < 0)
+ return 0;
+ for (i = 0; i < merge_rr.nr; i++)
+ printf("%s\n", merge_rr.items[i].string);
+ } else if (!strcmp(argv[0], "remaining")) {
+ rerere_remaining(the_repository, &merge_rr);
+ for (i = 0; i < merge_rr.nr; i++) {
+ if (merge_rr.items[i].util != RERERE_RESOLVED)
+ printf("%s\n", merge_rr.items[i].string);
+ else
+ /* prepare for later call to
+ * string_list_clear() */
+ merge_rr.items[i].util = NULL;
+ }
+ } else if (!strcmp(argv[0], "diff")) {
+ if (setup_rerere(the_repository, &merge_rr,
+ flags | RERERE_READONLY) < 0)
+ return 0;
+ for (i = 0; i < merge_rr.nr; i++) {
+ const char *path = merge_rr.items[i].string;
+ const struct rerere_id *id = merge_rr.items[i].util;
+ if (diff_two(rerere_path(id, "preimage"), path, path, path))
+ die(_("unable to generate diff for '%s'"), rerere_path(id, NULL));
+ }
+ } else
+ usage_with_options(rerere_usage, options);
+
+ string_list_clear(&merge_rr, 1);
+ return 0;
+}
diff --git a/builtin/reset.c b/builtin/reset.c
new file mode 100644
index 0000000..1fa86ed
--- /dev/null
+++ b/builtin/reset.c
@@ -0,0 +1,489 @@
+/*
+ * "git reset" builtin command
+ *
+ * Copyright (c) 2007 Carlos Rica
+ *
+ * Based on git-reset.sh, which is
+ *
+ * Copyright (c) 2005, 2006 Linus Torvalds and Junio C Hamano
+ */
+#define USE_THE_INDEX_VARIABLE
+#include "builtin.h"
+#include "config.h"
+#include "lockfile.h"
+#include "tag.h"
+#include "object.h"
+#include "pretty.h"
+#include "run-command.h"
+#include "refs.h"
+#include "diff.h"
+#include "diffcore.h"
+#include "tree.h"
+#include "branch.h"
+#include "parse-options.h"
+#include "unpack-trees.h"
+#include "cache-tree.h"
+#include "submodule.h"
+#include "submodule-config.h"
+#include "dir.h"
+
+#define REFRESH_INDEX_DELAY_WARNING_IN_MS (2 * 1000)
+
+static const char * const git_reset_usage[] = {
+ N_("git reset [--mixed | --soft | --hard | --merge | --keep] [-q] [<commit>]"),
+ N_("git reset [-q] [<tree-ish>] [--] <pathspec>..."),
+ N_("git reset [-q] [--pathspec-from-file [--pathspec-file-nul]] [<tree-ish>]"),
+ N_("git reset --patch [<tree-ish>] [--] [<pathspec>...]"),
+ NULL
+};
+
+enum reset_type { MIXED, SOFT, HARD, MERGE, KEEP, NONE };
+static const char *reset_type_names[] = {
+ N_("mixed"), N_("soft"), N_("hard"), N_("merge"), N_("keep"), NULL
+};
+
+static inline int is_merge(void)
+{
+ return !access(git_path_merge_head(the_repository), F_OK);
+}
+
+static int reset_index(const char *ref, const struct object_id *oid, int reset_type, int quiet)
+{
+ int i, nr = 0;
+ struct tree_desc desc[2];
+ struct tree *tree;
+ struct unpack_trees_options opts;
+ int ret = -1;
+
+ memset(&opts, 0, sizeof(opts));
+ opts.head_idx = 1;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+ opts.fn = oneway_merge;
+ opts.merge = 1;
+ init_checkout_metadata(&opts.meta, ref, oid, NULL);
+ if (!quiet)
+ opts.verbose_update = 1;
+ switch (reset_type) {
+ case KEEP:
+ case MERGE:
+ opts.update = 1;
+ opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
+ break;
+ case HARD:
+ opts.update = 1;
+ opts.reset = UNPACK_RESET_OVERWRITE_UNTRACKED;
+ opts.skip_cache_tree_update = 1;
+ break;
+ case MIXED:
+ opts.reset = UNPACK_RESET_PROTECT_UNTRACKED;
+ opts.skip_cache_tree_update = 1;
+ /* but opts.update=0, so working tree not updated */
+ break;
+ default:
+ BUG("invalid reset_type passed to reset_index");
+ }
+
+ repo_read_index_unmerged(the_repository);
+
+ if (reset_type == KEEP) {
+ struct object_id head_oid;
+ if (get_oid("HEAD", &head_oid))
+ return error(_("You do not have a valid HEAD."));
+ if (!fill_tree_descriptor(the_repository, desc + nr, &head_oid))
+ return error(_("Failed to find tree of HEAD."));
+ nr++;
+ opts.fn = twoway_merge;
+ }
+
+ if (!fill_tree_descriptor(the_repository, desc + nr, oid)) {
+ error(_("Failed to find tree of %s."), oid_to_hex(oid));
+ goto out;
+ }
+ nr++;
+
+ if (unpack_trees(nr, desc, &opts))
+ goto out;
+
+ if (reset_type == MIXED || reset_type == HARD) {
+ tree = parse_tree_indirect(oid);
+ prime_cache_tree(the_repository, the_repository->index, tree);
+ }
+
+ ret = 0;
+
+out:
+ for (i = 0; i < nr; i++)
+ free((void *)desc[i].buffer);
+ return ret;
+}
+
+static void print_new_head_line(struct commit *commit)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ printf(_("HEAD is now at %s"),
+ find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV));
+
+ pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf);
+ if (buf.len > 0)
+ printf(" %s", buf.buf);
+ putchar('\n');
+ strbuf_release(&buf);
+}
+
+static void update_index_from_diff(struct diff_queue_struct *q,
+ struct diff_options *opt, void *data)
+{
+ int i;
+ int intent_to_add = *(int *)data;
+
+ for (i = 0; i < q->nr; i++) {
+ int pos;
+ struct diff_filespec *one = q->queue[i]->one;
+ int is_in_reset_tree = one->mode && !is_null_oid(&one->oid);
+ struct cache_entry *ce;
+
+ if (!is_in_reset_tree && !intent_to_add) {
+ remove_file_from_index(&the_index, one->path);
+ continue;
+ }
+
+ ce = make_cache_entry(&the_index, one->mode, &one->oid, one->path,
+ 0, 0);
+
+ /*
+ * If the file 1) corresponds to an existing index entry with
+ * skip-worktree set, or 2) does not exist in the index but is
+ * outside the sparse checkout definition, add a skip-worktree bit
+ * to the new index entry. Note that a sparse index will be expanded
+ * if this entry is outside the sparse cone - this is necessary
+ * to properly construct the reset sparse directory.
+ */
+ pos = index_name_pos(&the_index, one->path, strlen(one->path));
+ if ((pos >= 0 && ce_skip_worktree(the_index.cache[pos])) ||
+ (pos < 0 && !path_in_sparse_checkout(one->path, &the_index)))
+ ce->ce_flags |= CE_SKIP_WORKTREE;
+
+ if (!ce)
+ die(_("make_cache_entry failed for path '%s'"),
+ one->path);
+ if (!is_in_reset_tree) {
+ ce->ce_flags |= CE_INTENT_TO_ADD;
+ set_object_name_for_intent_to_add_entry(ce);
+ }
+ add_index_entry(&the_index, ce,
+ ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
+ }
+}
+
+static int read_from_tree(const struct pathspec *pathspec,
+ struct object_id *tree_oid,
+ int intent_to_add)
+{
+ struct diff_options opt;
+
+ memset(&opt, 0, sizeof(opt));
+ copy_pathspec(&opt.pathspec, pathspec);
+ opt.output_format = DIFF_FORMAT_CALLBACK;
+ opt.format_callback = update_index_from_diff;
+ opt.format_callback_data = &intent_to_add;
+ opt.flags.override_submodule_config = 1;
+ opt.flags.recursive = 1;
+ opt.repo = the_repository;
+ opt.change = diff_change;
+ opt.add_remove = diff_addremove;
+
+ if (pathspec->nr && pathspec_needs_expanded_index(&the_index, pathspec))
+ ensure_full_index(&the_index);
+
+ if (do_diff_cache(tree_oid, &opt))
+ return 1;
+ diffcore_std(&opt);
+ diff_flush(&opt);
+
+ return 0;
+}
+
+static void set_reflog_message(struct strbuf *sb, const char *action,
+ const char *rev)
+{
+ const char *rla = getenv("GIT_REFLOG_ACTION");
+
+ strbuf_reset(sb);
+ if (rla)
+ strbuf_addf(sb, "%s: %s", rla, action);
+ else if (rev)
+ strbuf_addf(sb, "reset: moving to %s", rev);
+ else
+ strbuf_addf(sb, "reset: %s", action);
+}
+
+static void die_if_unmerged_cache(int reset_type)
+{
+ if (is_merge() || unmerged_index(&the_index))
+ die(_("Cannot do a %s reset in the middle of a merge."),
+ _(reset_type_names[reset_type]));
+
+}
+
+static void parse_args(struct pathspec *pathspec,
+ const char **argv, const char *prefix,
+ int patch_mode,
+ const char **rev_ret)
+{
+ const char *rev = "HEAD";
+ struct object_id unused;
+ /*
+ * Possible arguments are:
+ *
+ * git reset [-opts] [<rev>]
+ * git reset [-opts] <tree> [<paths>...]
+ * git reset [-opts] <tree> -- [<paths>...]
+ * git reset [-opts] -- [<paths>...]
+ * git reset [-opts] <paths>...
+ *
+ * At this point, argv points immediately after [-opts].
+ */
+
+ if (argv[0]) {
+ if (!strcmp(argv[0], "--")) {
+ argv++; /* reset to HEAD, possibly with paths */
+ } else if (argv[1] && !strcmp(argv[1], "--")) {
+ rev = argv[0];
+ argv += 2;
+ }
+ /*
+ * Otherwise, argv[0] could be either <rev> or <paths> and
+ * has to be unambiguous. If there is a single argument, it
+ * can not be a tree
+ */
+ else if ((!argv[1] && !get_oid_committish(argv[0], &unused)) ||
+ (argv[1] && !get_oid_treeish(argv[0], &unused))) {
+ /*
+ * Ok, argv[0] looks like a commit/tree; it should not
+ * be a filename.
+ */
+ verify_non_filename(prefix, argv[0]);
+ rev = *argv++;
+ } else {
+ /* Otherwise we treat this as a filename */
+ verify_filename(prefix, argv[0], 1);
+ }
+ }
+ *rev_ret = rev;
+
+ parse_pathspec(pathspec, 0,
+ PATHSPEC_PREFER_FULL |
+ (patch_mode ? PATHSPEC_PREFIX_ORIGIN : 0),
+ prefix, argv);
+}
+
+static int reset_refs(const char *rev, const struct object_id *oid)
+{
+ int update_ref_status;
+ struct strbuf msg = STRBUF_INIT;
+ struct object_id *orig = NULL, oid_orig,
+ *old_orig = NULL, oid_old_orig;
+
+ if (!get_oid("ORIG_HEAD", &oid_old_orig))
+ old_orig = &oid_old_orig;
+ if (!get_oid("HEAD", &oid_orig)) {
+ orig = &oid_orig;
+ set_reflog_message(&msg, "updating ORIG_HEAD", NULL);
+ update_ref(msg.buf, "ORIG_HEAD", orig, old_orig, 0,
+ UPDATE_REFS_MSG_ON_ERR);
+ } else if (old_orig)
+ delete_ref(NULL, "ORIG_HEAD", old_orig, 0);
+ set_reflog_message(&msg, "updating HEAD", rev);
+ update_ref_status = update_ref(msg.buf, "HEAD", oid, orig, 0,
+ UPDATE_REFS_MSG_ON_ERR);
+ strbuf_release(&msg);
+ return update_ref_status;
+}
+
+static int git_reset_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "submodule.recurse"))
+ return git_default_submodule_config(var, value, cb);
+
+ return git_default_config(var, value, cb);
+}
+
+int cmd_reset(int argc, const char **argv, const char *prefix)
+{
+ int reset_type = NONE, update_ref_status = 0, quiet = 0;
+ int no_refresh = 0;
+ int patch_mode = 0, pathspec_file_nul = 0, unborn;
+ const char *rev, *pathspec_from_file = NULL;
+ struct object_id oid;
+ struct pathspec pathspec;
+ int intent_to_add = 0;
+ const struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_BOOL(0, "no-refresh", &no_refresh,
+ N_("skip refreshing the index after reset")),
+ OPT_SET_INT(0, "mixed", &reset_type,
+ N_("reset HEAD and index"), MIXED),
+ OPT_SET_INT(0, "soft", &reset_type, N_("reset only HEAD"), SOFT),
+ OPT_SET_INT(0, "hard", &reset_type,
+ N_("reset HEAD, index and working tree"), HARD),
+ OPT_SET_INT(0, "merge", &reset_type,
+ N_("reset HEAD, index and working tree"), MERGE),
+ OPT_SET_INT(0, "keep", &reset_type,
+ N_("reset HEAD but keep local changes"), KEEP),
+ OPT_CALLBACK_F(0, "recurse-submodules", NULL,
+ "reset", "control recursive updating of submodules",
+ PARSE_OPT_OPTARG, option_parse_recurse_submodules_worktree_updater),
+ OPT_BOOL('p', "patch", &patch_mode, N_("select hunks interactively")),
+ OPT_BOOL('N', "intent-to-add", &intent_to_add,
+ N_("record only the fact that removed paths will be added later")),
+ OPT_PATHSPEC_FROM_FILE(&pathspec_from_file),
+ OPT_PATHSPEC_FILE_NUL(&pathspec_file_nul),
+ OPT_END()
+ };
+
+ git_config(git_reset_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, options, git_reset_usage,
+ PARSE_OPT_KEEP_DASHDASH);
+ parse_args(&pathspec, argv, prefix, patch_mode, &rev);
+
+ if (pathspec_from_file) {
+ if (patch_mode)
+ die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--patch");
+
+ if (pathspec.nr)
+ die(_("'%s' and pathspec arguments cannot be used together"), "--pathspec-from-file");
+
+ parse_pathspec_file(&pathspec, 0,
+ PATHSPEC_PREFER_FULL,
+ prefix, pathspec_from_file, pathspec_file_nul);
+ } else if (pathspec_file_nul) {
+ die(_("the option '%s' requires '%s'"), "--pathspec-file-nul", "--pathspec-from-file");
+ }
+
+ unborn = !strcmp(rev, "HEAD") && get_oid("HEAD", &oid);
+ if (unborn) {
+ /* reset on unborn branch: treat as reset to empty tree */
+ oidcpy(&oid, the_hash_algo->empty_tree);
+ } else if (!pathspec.nr && !patch_mode) {
+ struct commit *commit;
+ if (get_oid_committish(rev, &oid))
+ die(_("Failed to resolve '%s' as a valid revision."), rev);
+ commit = lookup_commit_reference(the_repository, &oid);
+ if (!commit)
+ die(_("Could not parse object '%s'."), rev);
+ oidcpy(&oid, &commit->object.oid);
+ } else {
+ struct tree *tree;
+ if (get_oid_treeish(rev, &oid))
+ die(_("Failed to resolve '%s' as a valid tree."), rev);
+ tree = parse_tree_indirect(&oid);
+ if (!tree)
+ die(_("Could not parse object '%s'."), rev);
+ oidcpy(&oid, &tree->object.oid);
+ }
+
+ if (patch_mode) {
+ if (reset_type != NONE)
+ die(_("options '%s' and '%s' cannot be used together"), "--patch", "--{hard,mixed,soft}");
+ trace2_cmd_mode("patch-interactive");
+ return run_add_interactive(rev, "--patch=reset", &pathspec);
+ }
+
+ /* git reset tree [--] paths... can be used to
+ * load chosen paths from the tree into the index without
+ * affecting the working tree nor HEAD. */
+ if (pathspec.nr) {
+ if (reset_type == MIXED)
+ warning(_("--mixed with paths is deprecated; use 'git reset -- <paths>' instead."));
+ else if (reset_type != NONE)
+ die(_("Cannot do %s reset with paths."),
+ _(reset_type_names[reset_type]));
+ }
+ if (reset_type == NONE)
+ reset_type = MIXED; /* by default */
+
+ if (pathspec.nr)
+ trace2_cmd_mode("path");
+ else
+ trace2_cmd_mode(reset_type_names[reset_type]);
+
+ if (reset_type != SOFT && (reset_type != MIXED || get_git_work_tree()))
+ setup_work_tree();
+
+ if (reset_type == MIXED && is_bare_repository())
+ die(_("%s reset is not allowed in a bare repository"),
+ _(reset_type_names[reset_type]));
+
+ if (intent_to_add && reset_type != MIXED)
+ die(_("the option '%s' requires '%s'"), "-N", "--mixed");
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ if (repo_read_index(the_repository) < 0)
+ die(_("index file corrupt"));
+
+ /* Soft reset does not touch the index file nor the working tree
+ * at all, but requires them in a good order. Other resets reset
+ * the index file to the tree object we are switching to. */
+ if (reset_type == SOFT || reset_type == KEEP)
+ die_if_unmerged_cache(reset_type);
+
+ if (reset_type != SOFT) {
+ struct lock_file lock = LOCK_INIT;
+ repo_hold_locked_index(the_repository, &lock,
+ LOCK_DIE_ON_ERROR);
+ if (reset_type == MIXED) {
+ int flags = quiet ? REFRESH_QUIET : REFRESH_IN_PORCELAIN;
+ if (read_from_tree(&pathspec, &oid, intent_to_add))
+ return 1;
+ the_index.updated_skipworktree = 1;
+ if (!no_refresh && get_git_work_tree()) {
+ uint64_t t_begin, t_delta_in_ms;
+
+ t_begin = getnanotime();
+ refresh_index(&the_index, flags, NULL, NULL,
+ _("Unstaged changes after reset:"));
+ t_delta_in_ms = (getnanotime() - t_begin) / 1000000;
+ if (!quiet && advice_enabled(ADVICE_RESET_NO_REFRESH_WARNING) && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) {
+ advise(_("It took %.2f seconds to refresh the index after reset. You can use\n"
+ "'--no-refresh' to avoid this."), t_delta_in_ms / 1000.0);
+ }
+ }
+ } else {
+ struct object_id dummy;
+ char *ref = NULL;
+ int err;
+
+ dwim_ref(rev, strlen(rev), &dummy, &ref, 0);
+ if (ref && !starts_with(ref, "refs/"))
+ FREE_AND_NULL(ref);
+
+ err = reset_index(ref, &oid, reset_type, quiet);
+ if (reset_type == KEEP && !err)
+ err = reset_index(ref, &oid, MIXED, quiet);
+ if (err)
+ die(_("Could not reset index file to revision '%s'."), rev);
+ free(ref);
+ }
+
+ if (write_locked_index(&the_index, &lock, COMMIT_LOCK))
+ die(_("Could not write new index file."));
+ }
+
+ if (!pathspec.nr && !unborn) {
+ /* Any resets without paths update HEAD to the head being
+ * switched to, saving the previous head in ORIG_HEAD before. */
+ update_ref_status = reset_refs(rev, &oid);
+
+ if (reset_type == HARD && !update_ref_status && !quiet)
+ print_new_head_line(lookup_commit_reference(the_repository, &oid));
+ }
+ if (!pathspec.nr)
+ remove_branch_state(the_repository, 0);
+
+ return update_ref_status;
+}
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
new file mode 100644
index 0000000..d42db0b
--- /dev/null
+++ b/builtin/rev-list.c
@@ -0,0 +1,791 @@
+#include "cache.h"
+#include "config.h"
+#include "commit.h"
+#include "diff.h"
+#include "revision.h"
+#include "list-objects.h"
+#include "list-objects-filter.h"
+#include "list-objects-filter-options.h"
+#include "object.h"
+#include "object-store.h"
+#include "pack.h"
+#include "pack-bitmap.h"
+#include "builtin.h"
+#include "log-tree.h"
+#include "graph.h"
+#include "bisect.h"
+#include "progress.h"
+#include "reflog-walk.h"
+#include "oidset.h"
+#include "packfile.h"
+
+static const char rev_list_usage[] =
+"git rev-list [<options>] <commit>... [--] [<path>...]\n"
+"\n"
+" limiting output:\n"
+" --max-count=<n>\n"
+" --max-age=<epoch>\n"
+" --min-age=<epoch>\n"
+" --sparse\n"
+" --no-merges\n"
+" --min-parents=<n>\n"
+" --no-min-parents\n"
+" --max-parents=<n>\n"
+" --no-max-parents\n"
+" --remove-empty\n"
+" --all\n"
+" --branches\n"
+" --tags\n"
+" --remotes\n"
+" --stdin\n"
+" --exclude-hidden=[receive|uploadpack]\n"
+" --quiet\n"
+" ordering output:\n"
+" --topo-order\n"
+" --date-order\n"
+" --reverse\n"
+" formatting output:\n"
+" --parents\n"
+" --children\n"
+" --objects | --objects-edge\n"
+" --disk-usage[=human]\n"
+" --unpacked\n"
+" --header | --pretty\n"
+" --[no-]object-names\n"
+" --abbrev=<n> | --no-abbrev\n"
+" --abbrev-commit\n"
+" --left-right\n"
+" --count\n"
+" special purpose:\n"
+" --bisect\n"
+" --bisect-vars\n"
+" --bisect-all"
+;
+
+static struct progress *progress;
+static unsigned progress_counter;
+
+static struct oidset omitted_objects;
+static int arg_print_omitted; /* print objects omitted by filter */
+
+static struct oidset missing_objects;
+enum missing_action {
+ MA_ERROR = 0, /* fail if any missing objects are encountered */
+ MA_ALLOW_ANY, /* silently allow ALL missing objects */
+ MA_PRINT, /* print ALL missing objects in special section */
+ MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
+};
+static enum missing_action arg_missing_action;
+
+/* display only the oid of each object encountered */
+static int arg_show_object_names = 1;
+
+#define DEFAULT_OIDSET_SIZE (16*1024)
+
+static int show_disk_usage;
+static off_t total_disk_usage;
+static int human_readable;
+
+static off_t get_object_disk_usage(struct object *obj)
+{
+ off_t size;
+ struct object_info oi = OBJECT_INFO_INIT;
+ oi.disk_sizep = &size;
+ if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0)
+ die(_("unable to get disk usage of %s"), oid_to_hex(&obj->oid));
+ return size;
+}
+
+static void finish_commit(struct commit *commit);
+static void show_commit(struct commit *commit, void *data)
+{
+ struct rev_list_info *info = data;
+ struct rev_info *revs = info->revs;
+
+ display_progress(progress, ++progress_counter);
+
+ if (show_disk_usage)
+ total_disk_usage += get_object_disk_usage(&commit->object);
+
+ if (info->flags & REV_LIST_QUIET) {
+ finish_commit(commit);
+ return;
+ }
+
+ graph_show_commit(revs->graph);
+
+ if (revs->count) {
+ if (commit->object.flags & PATCHSAME)
+ revs->count_same++;
+ else if (commit->object.flags & SYMMETRIC_LEFT)
+ revs->count_left++;
+ else
+ revs->count_right++;
+ finish_commit(commit);
+ return;
+ }
+
+ if (info->show_timestamp)
+ printf("%"PRItime" ", commit->date);
+ if (info->header_prefix)
+ fputs(info->header_prefix, stdout);
+
+ if (revs->include_header) {
+ if (!revs->graph)
+ fputs(get_revision_mark(revs, commit), stdout);
+ if (revs->abbrev_commit && revs->abbrev)
+ fputs(find_unique_abbrev(&commit->object.oid, revs->abbrev),
+ stdout);
+ else
+ fputs(oid_to_hex(&commit->object.oid), stdout);
+ }
+ if (revs->print_parents) {
+ struct commit_list *parents = commit->parents;
+ while (parents) {
+ printf(" %s", oid_to_hex(&parents->item->object.oid));
+ parents = parents->next;
+ }
+ }
+ if (revs->children.name) {
+ struct commit_list *children;
+
+ children = lookup_decoration(&revs->children, &commit->object);
+ while (children) {
+ printf(" %s", oid_to_hex(&children->item->object.oid));
+ children = children->next;
+ }
+ }
+ show_decorations(revs, commit);
+ if (revs->commit_format == CMIT_FMT_ONELINE)
+ putchar(' ');
+ else if (revs->include_header)
+ putchar('\n');
+
+ if (revs->verbose_header) {
+ struct strbuf buf = STRBUF_INIT;
+ struct pretty_print_context ctx = {0};
+ ctx.abbrev = revs->abbrev;
+ ctx.date_mode = revs->date_mode;
+ ctx.date_mode_explicit = revs->date_mode_explicit;
+ ctx.fmt = revs->commit_format;
+ ctx.output_encoding = get_log_output_encoding();
+ ctx.color = revs->diffopt.use_color;
+ pretty_print_commit(&ctx, commit, &buf);
+ if (buf.len) {
+ if (revs->commit_format != CMIT_FMT_ONELINE)
+ graph_show_oneline(revs->graph);
+
+ graph_show_commit_msg(revs->graph, stdout, &buf);
+
+ /*
+ * Add a newline after the commit message.
+ *
+ * Usually, this newline produces a blank
+ * padding line between entries, in which case
+ * we need to add graph padding on this line.
+ *
+ * However, the commit message may not end in a
+ * newline. In this case the newline simply
+ * ends the last line of the commit message,
+ * and we don't need any graph output. (This
+ * always happens with CMIT_FMT_ONELINE, and it
+ * happens with CMIT_FMT_USERFORMAT when the
+ * format doesn't explicitly end in a newline.)
+ */
+ if (buf.len && buf.buf[buf.len - 1] == '\n')
+ graph_show_padding(revs->graph);
+ putchar(info->hdr_termination);
+ } else {
+ /*
+ * If the message buffer is empty, just show
+ * the rest of the graph output for this
+ * commit.
+ */
+ if (graph_show_remainder(revs->graph))
+ putchar('\n');
+ if (revs->commit_format == CMIT_FMT_ONELINE)
+ putchar('\n');
+ }
+ strbuf_release(&buf);
+ } else {
+ if (graph_show_remainder(revs->graph))
+ putchar('\n');
+ }
+ maybe_flush_or_die(stdout, "stdout");
+ finish_commit(commit);
+}
+
+static void finish_commit(struct commit *commit)
+{
+ free_commit_list(commit->parents);
+ commit->parents = NULL;
+ free_commit_buffer(the_repository->parsed_objects,
+ commit);
+}
+
+static inline void finish_object__ma(struct object *obj)
+{
+ /*
+ * Whether or not we try to dynamically fetch missing objects
+ * from the server, we currently DO NOT have the object. We
+ * can either print, allow (ignore), or conditionally allow
+ * (ignore) them.
+ */
+ switch (arg_missing_action) {
+ case MA_ERROR:
+ die("missing %s object '%s'",
+ type_name(obj->type), oid_to_hex(&obj->oid));
+ return;
+
+ case MA_ALLOW_ANY:
+ return;
+
+ case MA_PRINT:
+ oidset_insert(&missing_objects, &obj->oid);
+ return;
+
+ case MA_ALLOW_PROMISOR:
+ if (is_promisor_object(&obj->oid))
+ return;
+ die("unexpected missing %s object '%s'",
+ type_name(obj->type), oid_to_hex(&obj->oid));
+ return;
+
+ default:
+ BUG("unhandled missing_action");
+ return;
+ }
+}
+
+static int finish_object(struct object *obj, const char *name, void *cb_data)
+{
+ struct rev_list_info *info = cb_data;
+ if (oid_object_info_extended(the_repository, &obj->oid, NULL, 0) < 0) {
+ finish_object__ma(obj);
+ return 1;
+ }
+ if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT)
+ parse_object(the_repository, &obj->oid);
+ return 0;
+}
+
+static void show_object(struct object *obj, const char *name, void *cb_data)
+{
+ struct rev_list_info *info = cb_data;
+ struct rev_info *revs = info->revs;
+
+ if (finish_object(obj, name, cb_data))
+ return;
+ display_progress(progress, ++progress_counter);
+ if (show_disk_usage)
+ total_disk_usage += get_object_disk_usage(obj);
+ if (info->flags & REV_LIST_QUIET)
+ return;
+
+ if (revs->count) {
+ /*
+ * The object count is always accumulated in the .count_right
+ * field for traversal that is not a left-right traversal,
+ * and cmd_rev_list() made sure that a .count request that
+ * wants to count non-commit objects, which is handled by
+ * the show_object() callback, does not ask for .left_right.
+ */
+ revs->count_right++;
+ return;
+ }
+
+ if (arg_show_object_names)
+ show_object_with_name(stdout, obj, name);
+ else
+ printf("%s\n", oid_to_hex(&obj->oid));
+}
+
+static void show_edge(struct commit *commit)
+{
+ printf("-%s\n", oid_to_hex(&commit->object.oid));
+}
+
+static void print_var_str(const char *var, const char *val)
+{
+ printf("%s='%s'\n", var, val);
+}
+
+static void print_var_int(const char *var, int val)
+{
+ printf("%s=%d\n", var, val);
+}
+
+static int show_bisect_vars(struct rev_list_info *info, int reaches, int all)
+{
+ int cnt, flags = info->flags;
+ char hex[GIT_MAX_HEXSZ + 1] = "";
+ struct commit_list *tried;
+ struct rev_info *revs = info->revs;
+
+ if (!revs->commits)
+ return 1;
+
+ revs->commits = filter_skipped(revs->commits, &tried,
+ flags & BISECT_SHOW_ALL,
+ NULL, NULL);
+
+ /*
+ * revs->commits can reach "reaches" commits among
+ * "all" commits. If it is good, then there are
+ * (all-reaches) commits left to be bisected.
+ * On the other hand, if it is bad, then the set
+ * to bisect is "reaches".
+ * A bisect set of size N has (N-1) commits further
+ * to test, as we already know one bad one.
+ */
+ cnt = all - reaches;
+ if (cnt < reaches)
+ cnt = reaches;
+
+ if (revs->commits)
+ oid_to_hex_r(hex, &revs->commits->item->object.oid);
+
+ if (flags & BISECT_SHOW_ALL) {
+ traverse_commit_list(revs, show_commit, show_object, info);
+ printf("------\n");
+ }
+
+ print_var_str("bisect_rev", hex);
+ print_var_int("bisect_nr", cnt - 1);
+ print_var_int("bisect_good", all - reaches - 1);
+ print_var_int("bisect_bad", reaches - 1);
+ print_var_int("bisect_all", all);
+ print_var_int("bisect_steps", estimate_bisect_steps(all));
+
+ return 0;
+}
+
+static int show_object_fast(
+ const struct object_id *oid,
+ enum object_type type,
+ int exclude,
+ uint32_t name_hash,
+ struct packed_git *found_pack,
+ off_t found_offset)
+{
+ fprintf(stdout, "%s\n", oid_to_hex(oid));
+ return 1;
+}
+
+static void print_disk_usage(off_t size)
+{
+ struct strbuf sb = STRBUF_INIT;
+ if (human_readable)
+ strbuf_humanise_bytes(&sb, size);
+ else
+ strbuf_addf(&sb, "%"PRIuMAX, (uintmax_t)size);
+ puts(sb.buf);
+ strbuf_release(&sb);
+}
+
+static inline int parse_missing_action_value(const char *value)
+{
+ if (!strcmp(value, "error")) {
+ arg_missing_action = MA_ERROR;
+ return 1;
+ }
+
+ if (!strcmp(value, "allow-any")) {
+ arg_missing_action = MA_ALLOW_ANY;
+ fetch_if_missing = 0;
+ return 1;
+ }
+
+ if (!strcmp(value, "print")) {
+ arg_missing_action = MA_PRINT;
+ fetch_if_missing = 0;
+ return 1;
+ }
+
+ if (!strcmp(value, "allow-promisor")) {
+ arg_missing_action = MA_ALLOW_PROMISOR;
+ fetch_if_missing = 0;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int try_bitmap_count(struct rev_info *revs,
+ int filter_provided_objects)
+{
+ uint32_t commit_count = 0,
+ tag_count = 0,
+ tree_count = 0,
+ blob_count = 0;
+ int max_count;
+ struct bitmap_index *bitmap_git;
+
+ /* This function only handles counting, not general traversal. */
+ if (!revs->count)
+ return -1;
+
+ /*
+ * A bitmap result can't know left/right, etc, because we don't
+ * actually traverse.
+ */
+ if (revs->left_right || revs->cherry_mark)
+ return -1;
+
+ /*
+ * If we're counting reachable objects, we can't handle a max count of
+ * commits to traverse, since we don't know which objects go with which
+ * commit.
+ */
+ if (revs->max_count >= 0 &&
+ (revs->tag_objects || revs->tree_objects || revs->blob_objects))
+ return -1;
+
+ /*
+ * This must be saved before doing any walking, since the revision
+ * machinery will count it down to zero while traversing.
+ */
+ max_count = revs->max_count;
+
+ bitmap_git = prepare_bitmap_walk(revs, filter_provided_objects);
+ if (!bitmap_git)
+ return -1;
+
+ count_bitmap_commit_list(bitmap_git, &commit_count,
+ revs->tree_objects ? &tree_count : NULL,
+ revs->blob_objects ? &blob_count : NULL,
+ revs->tag_objects ? &tag_count : NULL);
+ if (max_count >= 0 && max_count < commit_count)
+ commit_count = max_count;
+
+ printf("%d\n", commit_count + tree_count + blob_count + tag_count);
+ free_bitmap_index(bitmap_git);
+ return 0;
+}
+
+static int try_bitmap_traversal(struct rev_info *revs,
+ int filter_provided_objects)
+{
+ struct bitmap_index *bitmap_git;
+
+ /*
+ * We can't use a bitmap result with a traversal limit, since the set
+ * of commits we'd get would be essentially random.
+ */
+ if (revs->max_count >= 0)
+ return -1;
+
+ bitmap_git = prepare_bitmap_walk(revs, filter_provided_objects);
+ if (!bitmap_git)
+ return -1;
+
+ traverse_bitmap_commit_list(bitmap_git, revs, &show_object_fast);
+ free_bitmap_index(bitmap_git);
+ return 0;
+}
+
+static int try_bitmap_disk_usage(struct rev_info *revs,
+ int filter_provided_objects)
+{
+ struct bitmap_index *bitmap_git;
+ off_t size_from_bitmap;
+
+ if (!show_disk_usage)
+ return -1;
+
+ bitmap_git = prepare_bitmap_walk(revs, filter_provided_objects);
+ if (!bitmap_git)
+ return -1;
+
+ size_from_bitmap = get_disk_usage_from_bitmap(bitmap_git, revs);
+ print_disk_usage(size_from_bitmap);
+ return 0;
+}
+
+int cmd_rev_list(int argc, const char **argv, const char *prefix)
+{
+ struct rev_info revs;
+ struct rev_list_info info;
+ struct setup_revision_opt s_r_opt = {
+ .allow_exclude_promisor_objects = 1,
+ };
+ int i;
+ int bisect_list = 0;
+ int bisect_show_vars = 0;
+ int bisect_find_all = 0;
+ int use_bitmap_index = 0;
+ int filter_provided_objects = 0;
+ const char *show_progress = NULL;
+ int ret = 0;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(rev_list_usage);
+
+ git_config(git_default_config, NULL);
+ repo_init_revisions(the_repository, &revs, prefix);
+ revs.abbrev = DEFAULT_ABBREV;
+ revs.commit_format = CMIT_FMT_UNSPECIFIED;
+ revs.include_header = 1;
+
+ /*
+ * Scan the argument list before invoking setup_revisions(), so that we
+ * know if fetch_if_missing needs to be set to 0.
+ *
+ * "--exclude-promisor-objects" acts as a pre-filter on missing objects
+ * by not crossing the boundary from realized objects to promisor
+ * objects.
+ *
+ * Let "--missing" to conditionally set fetch_if_missing.
+ */
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (!strcmp(arg, "--exclude-promisor-objects")) {
+ fetch_if_missing = 0;
+ revs.exclude_promisor_objects = 1;
+ break;
+ }
+ }
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (skip_prefix(arg, "--missing=", &arg)) {
+ if (revs.exclude_promisor_objects)
+ die(_("options '%s' and '%s' cannot be used together"), "--exclude-promisor-objects", "--missing");
+ if (parse_missing_action_value(arg))
+ break;
+ }
+ }
+
+ if (arg_missing_action)
+ revs.do_not_die_on_missing_tree = 1;
+
+ argc = setup_revisions(argc, argv, &revs, &s_r_opt);
+
+ memset(&info, 0, sizeof(info));
+ info.revs = &revs;
+ if (revs.bisect)
+ bisect_list = 1;
+
+ if (revs.diffopt.flags.quick)
+ info.flags |= REV_LIST_QUIET;
+ for (i = 1 ; i < argc; i++) {
+ const char *arg = argv[i];
+
+ if (!strcmp(arg, "--header")) {
+ revs.verbose_header = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--timestamp")) {
+ info.show_timestamp = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--bisect")) {
+ bisect_list = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--bisect-all")) {
+ bisect_list = 1;
+ bisect_find_all = 1;
+ info.flags |= BISECT_SHOW_ALL;
+ revs.show_decorations = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--bisect-vars")) {
+ bisect_list = 1;
+ bisect_show_vars = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--use-bitmap-index")) {
+ use_bitmap_index = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--test-bitmap")) {
+ test_bitmap_walk(&revs);
+ goto cleanup;
+ }
+ if (skip_prefix(arg, "--progress=", &arg)) {
+ show_progress = arg;
+ continue;
+ }
+ if (!strcmp(arg, "--filter-provided-objects")) {
+ filter_provided_objects = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--filter-print-omitted")) {
+ arg_print_omitted = 1;
+ continue;
+ }
+
+ if (!strcmp(arg, "--exclude-promisor-objects"))
+ continue; /* already handled above */
+ if (skip_prefix(arg, "--missing=", &arg))
+ continue; /* already handled above */
+
+ if (!strcmp(arg, ("--no-object-names"))) {
+ arg_show_object_names = 0;
+ continue;
+ }
+
+ if (!strcmp(arg, ("--object-names"))) {
+ arg_show_object_names = 1;
+ continue;
+ }
+
+ if (!strcmp(arg, ("--commit-header"))) {
+ revs.include_header = 1;
+ continue;
+ }
+
+ if (!strcmp(arg, ("--no-commit-header"))) {
+ revs.include_header = 0;
+ continue;
+ }
+
+ if (skip_prefix(arg, "--disk-usage", &arg)) {
+ if (*arg == '=') {
+ if (!strcmp(++arg, "human")) {
+ human_readable = 1;
+ } else
+ die(_("invalid value for '%s': '%s', the only allowed format is '%s'"),
+ "--disk-usage=<format>", arg, "human");
+ } else if (*arg) {
+ /*
+ * Arguably should goto a label to continue chain of ifs?
+ * Doesn't matter unless we try to add --disk-usage-foo
+ * afterwards.
+ */
+ usage(rev_list_usage);
+ }
+ show_disk_usage = 1;
+ info.flags |= REV_LIST_QUIET;
+ continue;
+ }
+
+ usage(rev_list_usage);
+
+ }
+ if (revs.commit_format != CMIT_FMT_USERFORMAT)
+ revs.include_header = 1;
+ if (revs.commit_format != CMIT_FMT_UNSPECIFIED) {
+ /* The command line has a --pretty */
+ info.hdr_termination = '\n';
+ if (revs.commit_format == CMIT_FMT_ONELINE || !revs.include_header)
+ info.header_prefix = "";
+ else
+ info.header_prefix = "commit ";
+ }
+ else if (revs.verbose_header)
+ /* Only --header was specified */
+ revs.commit_format = CMIT_FMT_RAW;
+
+ if ((!revs.commits && reflog_walk_empty(revs.reflog_info) &&
+ (!(revs.tag_objects || revs.tree_objects || revs.blob_objects) &&
+ !revs.pending.nr) &&
+ !revs.rev_input_given && !revs.read_from_stdin) ||
+ revs.diff)
+ usage(rev_list_usage);
+
+ if (revs.show_notes)
+ die(_("rev-list does not support display of notes"));
+
+ if (revs.count &&
+ (revs.tag_objects || revs.tree_objects || revs.blob_objects) &&
+ (revs.left_right || revs.cherry_mark))
+ die(_("marked counting and '%s' cannot be used together"), "--objects");
+
+ save_commit_buffer = (revs.verbose_header ||
+ revs.grep_filter.pattern_list ||
+ revs.grep_filter.header_list);
+ if (bisect_list)
+ revs.limited = 1;
+
+ if (show_progress)
+ progress = start_delayed_progress(show_progress, 0);
+
+ if (use_bitmap_index) {
+ if (!try_bitmap_count(&revs, filter_provided_objects))
+ goto cleanup;
+ if (!try_bitmap_disk_usage(&revs, filter_provided_objects))
+ goto cleanup;
+ if (!try_bitmap_traversal(&revs, filter_provided_objects))
+ goto cleanup;
+ }
+
+ if (prepare_revision_walk(&revs))
+ die("revision walk setup failed");
+ if (revs.tree_objects)
+ mark_edges_uninteresting(&revs, show_edge, 0);
+
+ if (bisect_list) {
+ int reaches, all;
+ unsigned bisect_flags = 0;
+
+ if (bisect_find_all)
+ bisect_flags |= FIND_BISECTION_ALL;
+
+ if (revs.first_parent_only)
+ bisect_flags |= FIND_BISECTION_FIRST_PARENT_ONLY;
+
+ find_bisection(&revs.commits, &reaches, &all, bisect_flags);
+
+ if (bisect_show_vars) {
+ ret = show_bisect_vars(&info, reaches, all);
+ goto cleanup;
+ }
+ }
+
+ if (filter_provided_objects) {
+ struct commit_list *c;
+ for (i = 0; i < revs.pending.nr; i++) {
+ struct object_array_entry *pending = revs.pending.objects + i;
+ pending->item->flags |= NOT_USER_GIVEN;
+ }
+ for (c = revs.commits; c; c = c->next)
+ c->item->object.flags |= NOT_USER_GIVEN;
+ }
+
+ if (arg_print_omitted)
+ oidset_init(&omitted_objects, DEFAULT_OIDSET_SIZE);
+ if (arg_missing_action == MA_PRINT)
+ oidset_init(&missing_objects, DEFAULT_OIDSET_SIZE);
+
+ traverse_commit_list_filtered(
+ &revs, show_commit, show_object, &info,
+ (arg_print_omitted ? &omitted_objects : NULL));
+
+ if (arg_print_omitted) {
+ struct oidset_iter iter;
+ struct object_id *oid;
+ oidset_iter_init(&omitted_objects, &iter);
+ while ((oid = oidset_iter_next(&iter)))
+ printf("~%s\n", oid_to_hex(oid));
+ oidset_clear(&omitted_objects);
+ }
+ if (arg_missing_action == MA_PRINT) {
+ struct oidset_iter iter;
+ struct object_id *oid;
+ oidset_iter_init(&missing_objects, &iter);
+ while ((oid = oidset_iter_next(&iter)))
+ printf("?%s\n", oid_to_hex(oid));
+ oidset_clear(&missing_objects);
+ }
+
+ stop_progress(&progress);
+
+ if (revs.count) {
+ if (revs.left_right && revs.cherry_mark)
+ printf("%d\t%d\t%d\n", revs.count_left, revs.count_right, revs.count_same);
+ else if (revs.left_right)
+ printf("%d\t%d\n", revs.count_left, revs.count_right);
+ else if (revs.cherry_mark)
+ printf("%d\t%d\n", revs.count_left + revs.count_right, revs.count_same);
+ else
+ printf("%d\n", revs.count_left + revs.count_right);
+ }
+
+ if (show_disk_usage)
+ print_disk_usage(total_disk_usage);
+
+cleanup:
+ release_revisions(&revs);
+ return ret;
+}
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
new file mode 100644
index 0000000..1c344d7
--- /dev/null
+++ b/builtin/rev-parse.c
@@ -0,0 +1,1096 @@
+/*
+ * rev-parse.c
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ */
+#define USE_THE_INDEX_VARIABLE
+#include "cache.h"
+#include "config.h"
+#include "commit.h"
+#include "refs.h"
+#include "quote.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "diff.h"
+#include "revision.h"
+#include "split-index.h"
+#include "submodule.h"
+#include "commit-reach.h"
+#include "shallow.h"
+
+#define DO_REVS 1
+#define DO_NOREV 2
+#define DO_FLAGS 4
+#define DO_NONFLAGS 8
+static int filter = ~0;
+
+static const char *def;
+
+#define NORMAL 0
+#define REVERSED 1
+static int show_type = NORMAL;
+
+#define SHOW_SYMBOLIC_ASIS 1
+#define SHOW_SYMBOLIC_FULL 2
+static int symbolic;
+static int abbrev;
+static int abbrev_ref;
+static int abbrev_ref_strict;
+static int output_sq;
+
+static int stuck_long;
+static struct ref_exclusions ref_excludes = REF_EXCLUSIONS_INIT;
+
+/*
+ * Some arguments are relevant "revision" arguments,
+ * others are about output format or other details.
+ * This sorts it all out.
+ */
+static int is_rev_argument(const char *arg)
+{
+ static const char *rev_args[] = {
+ "--all",
+ "--bisect",
+ "--dense",
+ "--branches=",
+ "--branches",
+ "--header",
+ "--ignore-missing",
+ "--max-age=",
+ "--max-count=",
+ "--min-age=",
+ "--no-merges",
+ "--min-parents=",
+ "--no-min-parents",
+ "--max-parents=",
+ "--no-max-parents",
+ "--objects",
+ "--objects-edge",
+ "--parents",
+ "--pretty",
+ "--remotes=",
+ "--remotes",
+ "--glob=",
+ "--sparse",
+ "--tags=",
+ "--tags",
+ "--topo-order",
+ "--date-order",
+ "--unpacked",
+ NULL
+ };
+ const char **p = rev_args;
+
+ /* accept -<digit>, like traditional "head" */
+ if ((*arg == '-') && isdigit(arg[1]))
+ return 1;
+
+ for (;;) {
+ const char *str = *p++;
+ int len;
+ if (!str)
+ return 0;
+ len = strlen(str);
+ if (!strcmp(arg, str) ||
+ (str[len-1] == '=' && !strncmp(arg, str, len)))
+ return 1;
+ }
+}
+
+/* Output argument as a string, either SQ or normal */
+static void show(const char *arg)
+{
+ if (output_sq) {
+ int sq = '\'', ch;
+
+ putchar(sq);
+ while ((ch = *arg++)) {
+ if (ch == sq)
+ fputs("'\\'", stdout);
+ putchar(ch);
+ }
+ putchar(sq);
+ putchar(' ');
+ }
+ else
+ puts(arg);
+}
+
+/* Like show(), but with a negation prefix according to type */
+static void show_with_type(int type, const char *arg)
+{
+ if (type != show_type)
+ putchar('^');
+ show(arg);
+}
+
+/* Output a revision, only if filter allows it */
+static void show_rev(int type, const struct object_id *oid, const char *name)
+{
+ if (!(filter & DO_REVS))
+ return;
+ def = NULL;
+
+ if ((symbolic || abbrev_ref) && name) {
+ if (symbolic == SHOW_SYMBOLIC_FULL || abbrev_ref) {
+ struct object_id discard;
+ char *full;
+
+ switch (dwim_ref(name, strlen(name), &discard, &full, 0)) {
+ case 0:
+ /*
+ * Not found -- not a ref. We could
+ * emit "name" here, but symbolic-full
+ * users are interested in finding the
+ * refs spelled in full, and they would
+ * need to filter non-refs if we did so.
+ */
+ break;
+ case 1: /* happy */
+ if (abbrev_ref)
+ full = shorten_unambiguous_ref(full,
+ abbrev_ref_strict);
+ show_with_type(type, full);
+ break;
+ default: /* ambiguous */
+ error("refname '%s' is ambiguous", name);
+ break;
+ }
+ free(full);
+ } else {
+ show_with_type(type, name);
+ }
+ }
+ else if (abbrev)
+ show_with_type(type, find_unique_abbrev(oid, abbrev));
+ else
+ show_with_type(type, oid_to_hex(oid));
+}
+
+/* Output a flag, only if filter allows it. */
+static int show_flag(const char *arg)
+{
+ if (!(filter & DO_FLAGS))
+ return 0;
+ if (filter & (is_rev_argument(arg) ? DO_REVS : DO_NOREV)) {
+ show(arg);
+ return 1;
+ }
+ return 0;
+}
+
+static int show_default(void)
+{
+ const char *s = def;
+
+ if (s) {
+ struct object_id oid;
+
+ def = NULL;
+ if (!get_oid(s, &oid)) {
+ show_rev(NORMAL, &oid, s);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int show_reference(const char *refname, const struct object_id *oid,
+ int flag UNUSED, void *cb_data UNUSED)
+{
+ if (ref_excluded(&ref_excludes, refname))
+ return 0;
+ show_rev(NORMAL, oid, refname);
+ return 0;
+}
+
+static int anti_reference(const char *refname, const struct object_id *oid,
+ int flag UNUSED, void *cb_data UNUSED)
+{
+ show_rev(REVERSED, oid, refname);
+ return 0;
+}
+
+static int show_abbrev(const struct object_id *oid, void *cb_data)
+{
+ show_rev(NORMAL, oid, NULL);
+ return 0;
+}
+
+static void show_datestring(const char *flag, const char *datestr)
+{
+ char *buffer;
+
+ /* date handling requires both flags and revs */
+ if ((filter & (DO_FLAGS | DO_REVS)) != (DO_FLAGS | DO_REVS))
+ return;
+ buffer = xstrfmt("%s%"PRItime, flag, approxidate(datestr));
+ show(buffer);
+ free(buffer);
+}
+
+static int show_file(const char *arg, int output_prefix)
+{
+ show_default();
+ if ((filter & (DO_NONFLAGS|DO_NOREV)) == (DO_NONFLAGS|DO_NOREV)) {
+ if (output_prefix) {
+ const char *prefix = startup_info->prefix;
+ char *fname = prefix_filename(prefix, arg);
+ show(fname);
+ free(fname);
+ } else
+ show(arg);
+ return 1;
+ }
+ return 0;
+}
+
+static int try_difference(const char *arg)
+{
+ char *dotdot;
+ struct object_id start_oid;
+ struct object_id end_oid;
+ const char *end;
+ const char *start;
+ int symmetric;
+ static const char head_by_default[] = "HEAD";
+
+ if (!(dotdot = strstr(arg, "..")))
+ return 0;
+ end = dotdot + 2;
+ start = arg;
+ symmetric = (*end == '.');
+
+ *dotdot = 0;
+ end += symmetric;
+
+ if (!*end)
+ end = head_by_default;
+ if (dotdot == arg)
+ start = head_by_default;
+
+ if (start == head_by_default && end == head_by_default &&
+ !symmetric) {
+ /*
+ * Just ".."? That is not a range but the
+ * pathspec for the parent directory.
+ */
+ *dotdot = '.';
+ return 0;
+ }
+
+ if (!get_oid_committish(start, &start_oid) && !get_oid_committish(end, &end_oid)) {
+ show_rev(NORMAL, &end_oid, end);
+ show_rev(symmetric ? NORMAL : REVERSED, &start_oid, start);
+ if (symmetric) {
+ struct commit_list *exclude;
+ struct commit *a, *b;
+ a = lookup_commit_reference(the_repository, &start_oid);
+ b = lookup_commit_reference(the_repository, &end_oid);
+ if (!a || !b) {
+ *dotdot = '.';
+ return 0;
+ }
+ exclude = get_merge_bases(a, b);
+ while (exclude) {
+ struct commit *commit = pop_commit(&exclude);
+ show_rev(REVERSED, &commit->object.oid, NULL);
+ }
+ }
+ *dotdot = '.';
+ return 1;
+ }
+ *dotdot = '.';
+ return 0;
+}
+
+static int try_parent_shorthands(const char *arg)
+{
+ char *dotdot;
+ struct object_id oid;
+ struct commit *commit;
+ struct commit_list *parents;
+ int parent_number;
+ int include_rev = 0;
+ int include_parents = 0;
+ int exclude_parent = 0;
+
+ if ((dotdot = strstr(arg, "^!"))) {
+ include_rev = 1;
+ if (dotdot[2])
+ return 0;
+ } else if ((dotdot = strstr(arg, "^@"))) {
+ include_parents = 1;
+ if (dotdot[2])
+ return 0;
+ } else if ((dotdot = strstr(arg, "^-"))) {
+ include_rev = 1;
+ exclude_parent = 1;
+
+ if (dotdot[2]) {
+ char *end;
+ exclude_parent = strtoul(dotdot + 2, &end, 10);
+ if (*end != '\0' || !exclude_parent)
+ return 0;
+ }
+ } else
+ return 0;
+
+ *dotdot = 0;
+ if (get_oid_committish(arg, &oid) ||
+ !(commit = lookup_commit_reference(the_repository, &oid))) {
+ *dotdot = '^';
+ return 0;
+ }
+
+ if (exclude_parent &&
+ exclude_parent > commit_list_count(commit->parents)) {
+ *dotdot = '^';
+ return 0;
+ }
+
+ if (include_rev)
+ show_rev(NORMAL, &oid, arg);
+ for (parents = commit->parents, parent_number = 1;
+ parents;
+ parents = parents->next, parent_number++) {
+ char *name = NULL;
+
+ if (exclude_parent && parent_number != exclude_parent)
+ continue;
+
+ if (symbolic)
+ name = xstrfmt("%s^%d", arg, parent_number);
+ show_rev(include_parents ? NORMAL : REVERSED,
+ &parents->item->object.oid, name);
+ free(name);
+ }
+
+ *dotdot = '^';
+ return 1;
+}
+
+static int parseopt_dump(const struct option *o, const char *arg, int unset)
+{
+ struct strbuf *parsed = o->value;
+ if (unset)
+ strbuf_addf(parsed, " --no-%s", o->long_name);
+ else if (o->short_name && (o->long_name == NULL || !stuck_long))
+ strbuf_addf(parsed, " -%c", o->short_name);
+ else
+ strbuf_addf(parsed, " --%s", o->long_name);
+ if (arg) {
+ if (!stuck_long)
+ strbuf_addch(parsed, ' ');
+ else if (o->long_name)
+ strbuf_addch(parsed, '=');
+ sq_quote_buf(parsed, arg);
+ }
+ return 0;
+}
+
+static const char *skipspaces(const char *s)
+{
+ while (isspace(*s))
+ s++;
+ return s;
+}
+
+static char *findspace(const char *s)
+{
+ for (; *s; s++)
+ if (isspace(*s))
+ return (char*)s;
+ return NULL;
+}
+
+static int cmd_parseopt(int argc, const char **argv, const char *prefix)
+{
+ static int keep_dashdash = 0, stop_at_non_option = 0;
+ static char const * const parseopt_usage[] = {
+ N_("git rev-parse --parseopt [<options>] -- [<args>...]"),
+ NULL
+ };
+ static struct option parseopt_opts[] = {
+ OPT_BOOL(0, "keep-dashdash", &keep_dashdash,
+ N_("keep the `--` passed as an arg")),
+ OPT_BOOL(0, "stop-at-non-option", &stop_at_non_option,
+ N_("stop parsing after the "
+ "first non-option argument")),
+ OPT_BOOL(0, "stuck-long", &stuck_long,
+ N_("output in stuck long form")),
+ OPT_END(),
+ };
+ static const char * const flag_chars = "*=?!";
+
+ struct strbuf sb = STRBUF_INIT, parsed = STRBUF_INIT;
+ const char **usage = NULL;
+ struct option *opts = NULL;
+ int onb = 0, osz = 0, unb = 0, usz = 0;
+
+ strbuf_addstr(&parsed, "set --");
+ argc = parse_options(argc, argv, prefix, parseopt_opts, parseopt_usage,
+ PARSE_OPT_KEEP_DASHDASH);
+ if (argc < 1 || strcmp(argv[0], "--"))
+ usage_with_options(parseopt_usage, parseopt_opts);
+
+ /* get the usage up to the first line with a -- on it */
+ for (;;) {
+ if (strbuf_getline(&sb, stdin) == EOF)
+ die(_("premature end of input"));
+ ALLOC_GROW(usage, unb + 1, usz);
+ if (!strcmp("--", sb.buf)) {
+ if (unb < 1)
+ die(_("no usage string given before the `--' separator"));
+ usage[unb] = NULL;
+ break;
+ }
+ usage[unb++] = strbuf_detach(&sb, NULL);
+ }
+
+ /* parse: (<short>|<short>,<long>|<long>)[*=?!]*<arghint>? SP+ <help> */
+ while (strbuf_getline(&sb, stdin) != EOF) {
+ const char *s;
+ char *help;
+ struct option *o;
+
+ if (!sb.len)
+ continue;
+
+ ALLOC_GROW(opts, onb + 1, osz);
+ memset(opts + onb, 0, sizeof(opts[onb]));
+
+ o = &opts[onb++];
+ help = findspace(sb.buf);
+ if (!help || sb.buf == help) {
+ o->type = OPTION_GROUP;
+ o->help = xstrdup(skipspaces(sb.buf));
+ continue;
+ }
+
+ *help = '\0';
+
+ o->type = OPTION_CALLBACK;
+ o->help = xstrdup(skipspaces(help+1));
+ o->value = &parsed;
+ o->flags = PARSE_OPT_NOARG;
+ o->callback = &parseopt_dump;
+
+ /* name(s) */
+ s = strpbrk(sb.buf, flag_chars);
+ if (!s)
+ s = help;
+
+ if (s == sb.buf)
+ die(_("missing opt-spec before option flags"));
+
+ if (s - sb.buf == 1) /* short option only */
+ o->short_name = *sb.buf;
+ else if (sb.buf[1] != ',') /* long option only */
+ o->long_name = xmemdupz(sb.buf, s - sb.buf);
+ else {
+ o->short_name = *sb.buf;
+ o->long_name = xmemdupz(sb.buf + 2, s - sb.buf - 2);
+ }
+
+ /* flags */
+ while (s < help) {
+ switch (*s++) {
+ case '=':
+ o->flags &= ~PARSE_OPT_NOARG;
+ continue;
+ case '?':
+ o->flags &= ~PARSE_OPT_NOARG;
+ o->flags |= PARSE_OPT_OPTARG;
+ continue;
+ case '!':
+ o->flags |= PARSE_OPT_NONEG;
+ continue;
+ case '*':
+ o->flags |= PARSE_OPT_HIDDEN;
+ continue;
+ }
+ s--;
+ break;
+ }
+
+ if (s < help)
+ o->argh = xmemdupz(s, help - s);
+ }
+ strbuf_release(&sb);
+
+ /* put an OPT_END() */
+ ALLOC_GROW(opts, onb + 1, osz);
+ memset(opts + onb, 0, sizeof(opts[onb]));
+ argc = parse_options(argc, argv, prefix, opts, usage,
+ (keep_dashdash ? PARSE_OPT_KEEP_DASHDASH : 0) |
+ (stop_at_non_option ? PARSE_OPT_STOP_AT_NON_OPTION : 0) |
+ PARSE_OPT_SHELL_EVAL);
+
+ strbuf_addstr(&parsed, " --");
+ sq_quote_argv(&parsed, argv);
+ puts(parsed.buf);
+ return 0;
+}
+
+static int cmd_sq_quote(int argc, const char **argv)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ if (argc)
+ sq_quote_argv(&buf, argv);
+ printf("%s\n", buf.buf);
+ strbuf_release(&buf);
+
+ return 0;
+}
+
+static void die_no_single_rev(int quiet)
+{
+ if (quiet)
+ exit(1);
+ else
+ die(_("Needed a single revision"));
+}
+
+static const char builtin_rev_parse_usage[] =
+N_("git rev-parse --parseopt [<options>] -- [<args>...]\n"
+ " or: git rev-parse --sq-quote [<arg>...]\n"
+ " or: git rev-parse [<options>] [<arg>...]\n"
+ "\n"
+ "Run \"git rev-parse --parseopt -h\" for more information on the first usage.");
+
+/*
+ * Parse "opt" or "opt=<value>", setting value respectively to either
+ * NULL or the string after "=".
+ */
+static int opt_with_value(const char *arg, const char *opt, const char **value)
+{
+ if (skip_prefix(arg, opt, &arg)) {
+ if (!*arg) {
+ *value = NULL;
+ return 1;
+ }
+ if (*arg++ == '=') {
+ *value = arg;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void handle_ref_opt(const char *pattern, const char *prefix)
+{
+ if (pattern)
+ for_each_glob_ref_in(show_reference, pattern, prefix, NULL);
+ else
+ for_each_ref_in(prefix, show_reference, NULL);
+ clear_ref_exclusions(&ref_excludes);
+}
+
+enum format_type {
+ /* We would like a relative path. */
+ FORMAT_RELATIVE,
+ /* We would like a canonical absolute path. */
+ FORMAT_CANONICAL,
+ /* We would like the default behavior. */
+ FORMAT_DEFAULT,
+};
+
+enum default_type {
+ /* Our default is a relative path. */
+ DEFAULT_RELATIVE,
+ /* Our default is a relative path if there's a shared root. */
+ DEFAULT_RELATIVE_IF_SHARED,
+ /* Our default is a canonical absolute path. */
+ DEFAULT_CANONICAL,
+ /* Our default is not to modify the item. */
+ DEFAULT_UNMODIFIED,
+};
+
+static void print_path(const char *path, const char *prefix, enum format_type format, enum default_type def)
+{
+ char *cwd = NULL;
+ /*
+ * We don't ever produce a relative path if prefix is NULL, so set the
+ * prefix to the current directory so that we can produce a relative
+ * path whenever possible. If we're using RELATIVE_IF_SHARED mode, then
+ * we want an absolute path unless the two share a common prefix, so don't
+ * set it in that case, since doing so causes a relative path to always
+ * be produced if possible.
+ */
+ if (!prefix && (format != FORMAT_DEFAULT || def != DEFAULT_RELATIVE_IF_SHARED))
+ prefix = cwd = xgetcwd();
+ if (format == FORMAT_DEFAULT && def == DEFAULT_UNMODIFIED) {
+ puts(path);
+ } else if (format == FORMAT_RELATIVE ||
+ (format == FORMAT_DEFAULT && def == DEFAULT_RELATIVE)) {
+ /*
+ * In order for relative_path to work as expected, we need to
+ * make sure that both paths are absolute paths. If we don't,
+ * we can end up with an unexpected absolute path that the user
+ * didn't want.
+ */
+ struct strbuf buf = STRBUF_INIT, realbuf = STRBUF_INIT, prefixbuf = STRBUF_INIT;
+ if (!is_absolute_path(path)) {
+ strbuf_realpath_forgiving(&realbuf, path, 1);
+ path = realbuf.buf;
+ }
+ if (!is_absolute_path(prefix)) {
+ strbuf_realpath_forgiving(&prefixbuf, prefix, 1);
+ prefix = prefixbuf.buf;
+ }
+ puts(relative_path(path, prefix, &buf));
+ strbuf_release(&buf);
+ strbuf_release(&realbuf);
+ strbuf_release(&prefixbuf);
+ } else if (format == FORMAT_DEFAULT && def == DEFAULT_RELATIVE_IF_SHARED) {
+ struct strbuf buf = STRBUF_INIT;
+ puts(relative_path(path, prefix, &buf));
+ strbuf_release(&buf);
+ } else {
+ struct strbuf buf = STRBUF_INIT;
+ strbuf_realpath_forgiving(&buf, path, 1);
+ puts(buf.buf);
+ strbuf_release(&buf);
+ }
+ free(cwd);
+}
+
+int cmd_rev_parse(int argc, const char **argv, const char *prefix)
+{
+ int i, as_is = 0, verify = 0, quiet = 0, revs_count = 0, type = 0;
+ int did_repo_setup = 0;
+ int has_dashdash = 0;
+ int output_prefix = 0;
+ struct object_id oid;
+ unsigned int flags = 0;
+ const char *name = NULL;
+ struct object_context unused;
+ struct strbuf buf = STRBUF_INIT;
+ const int hexsz = the_hash_algo->hexsz;
+ int seen_end_of_options = 0;
+ enum format_type format = FORMAT_DEFAULT;
+
+ if (argc > 1 && !strcmp("--parseopt", argv[1]))
+ return cmd_parseopt(argc - 1, argv + 1, prefix);
+
+ if (argc > 1 && !strcmp("--sq-quote", argv[1]))
+ return cmd_sq_quote(argc - 2, argv + 2);
+
+ if (argc > 1 && !strcmp("-h", argv[1]))
+ usage(builtin_rev_parse_usage);
+
+ for (i = 1; i < argc; i++) {
+ if (!strcmp(argv[i], "--")) {
+ has_dashdash = 1;
+ break;
+ }
+ }
+
+ /* No options; just report on whether we're in a git repo or not. */
+ if (argc == 1) {
+ setup_git_directory();
+ git_config(git_default_config, NULL);
+ return 0;
+ }
+
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+
+ if (as_is) {
+ if (show_file(arg, output_prefix) && as_is < 2)
+ verify_filename(prefix, arg, 0);
+ continue;
+ }
+
+ if (!seen_end_of_options) {
+ if (!strcmp(arg, "--local-env-vars")) {
+ int i;
+ for (i = 0; local_repo_env[i]; i++)
+ printf("%s\n", local_repo_env[i]);
+ continue;
+ }
+ if (!strcmp(arg, "--resolve-git-dir")) {
+ const char *gitdir = argv[++i];
+ if (!gitdir)
+ die(_("--resolve-git-dir requires an argument"));
+ gitdir = resolve_gitdir(gitdir);
+ if (!gitdir)
+ die(_("not a gitdir '%s'"), argv[i]);
+ puts(gitdir);
+ continue;
+ }
+ }
+
+ /* The rest of the options require a git repository. */
+ if (!did_repo_setup) {
+ prefix = setup_git_directory();
+ git_config(git_default_config, NULL);
+ did_repo_setup = 1;
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
+
+ if (!strcmp(arg, "--")) {
+ as_is = 2;
+ /* Pass on the "--" if we show anything but files.. */
+ if (filter & (DO_FLAGS | DO_REVS))
+ show_file(arg, 0);
+ continue;
+ }
+
+ if (!seen_end_of_options && *arg == '-') {
+ if (!strcmp(arg, "--git-path")) {
+ if (!argv[i + 1])
+ die(_("--git-path requires an argument"));
+ strbuf_reset(&buf);
+ print_path(git_path("%s", argv[i + 1]), prefix,
+ format,
+ DEFAULT_RELATIVE_IF_SHARED);
+ i++;
+ continue;
+ }
+ if (!strcmp(arg,"-n")) {
+ if (++i >= argc)
+ die(_("-n requires an argument"));
+ if ((filter & DO_FLAGS) && (filter & DO_REVS)) {
+ show(arg);
+ show(argv[i]);
+ }
+ continue;
+ }
+ if (starts_with(arg, "-n")) {
+ if ((filter & DO_FLAGS) && (filter & DO_REVS))
+ show(arg);
+ continue;
+ }
+ if (opt_with_value(arg, "--path-format", &arg)) {
+ if (!arg)
+ die(_("--path-format requires an argument"));
+ if (!strcmp(arg, "absolute")) {
+ format = FORMAT_CANONICAL;
+ } else if (!strcmp(arg, "relative")) {
+ format = FORMAT_RELATIVE;
+ } else {
+ die(_("unknown argument to --path-format: %s"), arg);
+ }
+ continue;
+ }
+ if (!strcmp(arg, "--default")) {
+ def = argv[++i];
+ if (!def)
+ die(_("--default requires an argument"));
+ continue;
+ }
+ if (!strcmp(arg, "--prefix")) {
+ prefix = argv[++i];
+ if (!prefix)
+ die(_("--prefix requires an argument"));
+ startup_info->prefix = prefix;
+ output_prefix = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--revs-only")) {
+ filter &= ~DO_NOREV;
+ continue;
+ }
+ if (!strcmp(arg, "--no-revs")) {
+ filter &= ~DO_REVS;
+ continue;
+ }
+ if (!strcmp(arg, "--flags")) {
+ filter &= ~DO_NONFLAGS;
+ continue;
+ }
+ if (!strcmp(arg, "--no-flags")) {
+ filter &= ~DO_FLAGS;
+ continue;
+ }
+ if (!strcmp(arg, "--verify")) {
+ filter &= ~(DO_FLAGS|DO_NOREV);
+ verify = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--quiet") || !strcmp(arg, "-q")) {
+ quiet = 1;
+ flags |= GET_OID_QUIETLY;
+ continue;
+ }
+ if (opt_with_value(arg, "--short", &arg)) {
+ filter &= ~(DO_FLAGS|DO_NOREV);
+ verify = 1;
+ abbrev = DEFAULT_ABBREV;
+ if (!arg)
+ continue;
+ abbrev = strtoul(arg, NULL, 10);
+ if (abbrev < MINIMUM_ABBREV)
+ abbrev = MINIMUM_ABBREV;
+ else if (hexsz <= abbrev)
+ abbrev = hexsz;
+ continue;
+ }
+ if (!strcmp(arg, "--sq")) {
+ output_sq = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--not")) {
+ show_type ^= REVERSED;
+ continue;
+ }
+ if (!strcmp(arg, "--symbolic")) {
+ symbolic = SHOW_SYMBOLIC_ASIS;
+ continue;
+ }
+ if (!strcmp(arg, "--symbolic-full-name")) {
+ symbolic = SHOW_SYMBOLIC_FULL;
+ continue;
+ }
+ if (opt_with_value(arg, "--abbrev-ref", &arg)) {
+ abbrev_ref = 1;
+ abbrev_ref_strict = warn_ambiguous_refs;
+ if (arg) {
+ if (!strcmp(arg, "strict"))
+ abbrev_ref_strict = 1;
+ else if (!strcmp(arg, "loose"))
+ abbrev_ref_strict = 0;
+ else
+ die(_("unknown mode for --abbrev-ref: %s"),
+ arg);
+ }
+ continue;
+ }
+ if (!strcmp(arg, "--all")) {
+ for_each_ref(show_reference, NULL);
+ clear_ref_exclusions(&ref_excludes);
+ continue;
+ }
+ if (skip_prefix(arg, "--disambiguate=", &arg)) {
+ for_each_abbrev(arg, show_abbrev, NULL);
+ continue;
+ }
+ if (!strcmp(arg, "--bisect")) {
+ for_each_fullref_in("refs/bisect/bad", show_reference, NULL);
+ for_each_fullref_in("refs/bisect/good", anti_reference, NULL);
+ continue;
+ }
+ if (opt_with_value(arg, "--branches", &arg)) {
+ if (ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --branches"));
+ handle_ref_opt(arg, "refs/heads/");
+ continue;
+ }
+ if (opt_with_value(arg, "--tags", &arg)) {
+ if (ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --tags"));
+ handle_ref_opt(arg, "refs/tags/");
+ continue;
+ }
+ if (skip_prefix(arg, "--glob=", &arg)) {
+ handle_ref_opt(arg, NULL);
+ continue;
+ }
+ if (opt_with_value(arg, "--remotes", &arg)) {
+ if (ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --remotes"));
+ handle_ref_opt(arg, "refs/remotes/");
+ continue;
+ }
+ if (skip_prefix(arg, "--exclude=", &arg)) {
+ add_ref_exclusion(&ref_excludes, arg);
+ continue;
+ }
+ if (skip_prefix(arg, "--exclude-hidden=", &arg)) {
+ exclude_hidden_refs(&ref_excludes, arg);
+ continue;
+ }
+ if (!strcmp(arg, "--show-toplevel")) {
+ const char *work_tree = get_git_work_tree();
+ if (work_tree)
+ print_path(work_tree, prefix, format, DEFAULT_UNMODIFIED);
+ else
+ die(_("this operation must be run in a work tree"));
+ continue;
+ }
+ if (!strcmp(arg, "--show-superproject-working-tree")) {
+ struct strbuf superproject = STRBUF_INIT;
+ if (get_superproject_working_tree(&superproject))
+ print_path(superproject.buf, prefix, format, DEFAULT_UNMODIFIED);
+ strbuf_release(&superproject);
+ continue;
+ }
+ if (!strcmp(arg, "--show-prefix")) {
+ if (prefix)
+ puts(prefix);
+ else
+ putchar('\n');
+ continue;
+ }
+ if (!strcmp(arg, "--show-cdup")) {
+ const char *pfx = prefix;
+ if (!is_inside_work_tree()) {
+ const char *work_tree =
+ get_git_work_tree();
+ if (work_tree)
+ printf("%s\n", work_tree);
+ continue;
+ }
+ while (pfx) {
+ pfx = strchr(pfx, '/');
+ if (pfx) {
+ pfx++;
+ printf("../");
+ }
+ }
+ putchar('\n');
+ continue;
+ }
+ if (!strcmp(arg, "--git-dir") ||
+ !strcmp(arg, "--absolute-git-dir")) {
+ const char *gitdir = getenv(GIT_DIR_ENVIRONMENT);
+ char *cwd;
+ int len;
+ enum format_type wanted = format;
+ if (arg[2] == 'g') { /* --git-dir */
+ if (gitdir) {
+ print_path(gitdir, prefix, format, DEFAULT_UNMODIFIED);
+ continue;
+ }
+ if (!prefix) {
+ print_path(".git", prefix, format, DEFAULT_UNMODIFIED);
+ continue;
+ }
+ } else { /* --absolute-git-dir */
+ wanted = FORMAT_CANONICAL;
+ if (!gitdir && !prefix)
+ gitdir = ".git";
+ if (gitdir) {
+ struct strbuf realpath = STRBUF_INIT;
+ strbuf_realpath(&realpath, gitdir, 1);
+ puts(realpath.buf);
+ strbuf_release(&realpath);
+ continue;
+ }
+ }
+ cwd = xgetcwd();
+ len = strlen(cwd);
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s%s.git", cwd, len && cwd[len-1] != '/' ? "/" : "");
+ free(cwd);
+ print_path(buf.buf, prefix, wanted, DEFAULT_CANONICAL);
+ continue;
+ }
+ if (!strcmp(arg, "--git-common-dir")) {
+ print_path(get_git_common_dir(), prefix, format, DEFAULT_RELATIVE_IF_SHARED);
+ continue;
+ }
+ if (!strcmp(arg, "--is-inside-git-dir")) {
+ printf("%s\n", is_inside_git_dir() ? "true"
+ : "false");
+ continue;
+ }
+ if (!strcmp(arg, "--is-inside-work-tree")) {
+ printf("%s\n", is_inside_work_tree() ? "true"
+ : "false");
+ continue;
+ }
+ if (!strcmp(arg, "--is-bare-repository")) {
+ printf("%s\n", is_bare_repository() ? "true"
+ : "false");
+ continue;
+ }
+ if (!strcmp(arg, "--is-shallow-repository")) {
+ printf("%s\n",
+ is_repository_shallow(the_repository) ? "true"
+ : "false");
+ continue;
+ }
+ if (!strcmp(arg, "--shared-index-path")) {
+ if (repo_read_index(the_repository) < 0)
+ die(_("Could not read the index"));
+ if (the_index.split_index) {
+ const struct object_id *oid = &the_index.split_index->base_oid;
+ const char *path = git_path("sharedindex.%s", oid_to_hex(oid));
+ print_path(path, prefix, format, DEFAULT_RELATIVE);
+ }
+ continue;
+ }
+ if (skip_prefix(arg, "--since=", &arg)) {
+ show_datestring("--max-age=", arg);
+ continue;
+ }
+ if (skip_prefix(arg, "--after=", &arg)) {
+ show_datestring("--max-age=", arg);
+ continue;
+ }
+ if (skip_prefix(arg, "--before=", &arg)) {
+ show_datestring("--min-age=", arg);
+ continue;
+ }
+ if (skip_prefix(arg, "--until=", &arg)) {
+ show_datestring("--min-age=", arg);
+ continue;
+ }
+ if (opt_with_value(arg, "--show-object-format", &arg)) {
+ const char *val = arg ? arg : "storage";
+
+ if (strcmp(val, "storage") &&
+ strcmp(val, "input") &&
+ strcmp(val, "output"))
+ die(_("unknown mode for --show-object-format: %s"),
+ arg);
+ puts(the_hash_algo->name);
+ continue;
+ }
+ if (!strcmp(arg, "--end-of-options")) {
+ seen_end_of_options = 1;
+ if (filter & (DO_FLAGS | DO_REVS))
+ show_file(arg, 0);
+ continue;
+ }
+ if (show_flag(arg) && verify)
+ die_no_single_rev(quiet);
+ continue;
+ }
+
+ /* Not a flag argument */
+ if (try_difference(arg))
+ continue;
+ if (try_parent_shorthands(arg))
+ continue;
+ name = arg;
+ type = NORMAL;
+ if (*arg == '^') {
+ name++;
+ type = REVERSED;
+ }
+ if (!get_oid_with_context(the_repository, name,
+ flags, &oid, &unused)) {
+ if (verify)
+ revs_count++;
+ else
+ show_rev(type, &oid, name);
+ continue;
+ }
+ if (verify)
+ die_no_single_rev(quiet);
+ if (has_dashdash)
+ die(_("bad revision '%s'"), arg);
+ as_is = 1;
+ if (!show_file(arg, output_prefix))
+ continue;
+ verify_filename(prefix, arg, 1);
+ }
+ strbuf_release(&buf);
+ if (verify) {
+ if (revs_count == 1) {
+ show_rev(type, &oid, name);
+ return 0;
+ } else if (revs_count == 0 && show_default())
+ return 0;
+ die_no_single_rev(quiet);
+ } else
+ show_default();
+ return 0;
+}
diff --git a/builtin/revert.c b/builtin/revert.c
new file mode 100644
index 0000000..6a9b550
--- /dev/null
+++ b/builtin/revert.c
@@ -0,0 +1,267 @@
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "diff.h"
+#include "revision.h"
+#include "rerere.h"
+#include "dir.h"
+#include "sequencer.h"
+#include "branch.h"
+
+/*
+ * This implements the builtins revert and cherry-pick.
+ *
+ * Copyright (c) 2007 Johannes E. Schindelin
+ *
+ * Based on git-revert.sh, which is
+ *
+ * Copyright (c) 2005 Linus Torvalds
+ * Copyright (c) 2005 Junio C Hamano
+ */
+
+static const char * const revert_usage[] = {
+ N_("git revert [--[no-]edit] [-n] [-m <parent-number>] [-s] [-S[<keyid>]] <commit>..."),
+ N_("git revert (--continue | --skip | --abort | --quit)"),
+ NULL
+};
+
+static const char * const cherry_pick_usage[] = {
+ N_("git cherry-pick [--edit] [-n] [-m <parent-number>] [-s] [-x] [--ff]\n"
+ " [-S[<keyid>]] <commit>..."),
+ N_("git cherry-pick (--continue | --skip | --abort | --quit)"),
+ NULL
+};
+
+static const char *action_name(const struct replay_opts *opts)
+{
+ return opts->action == REPLAY_REVERT ? "revert" : "cherry-pick";
+}
+
+static const char * const *revert_or_cherry_pick_usage(struct replay_opts *opts)
+{
+ return opts->action == REPLAY_REVERT ? revert_usage : cherry_pick_usage;
+}
+
+static int option_parse_x(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct replay_opts **opts_ptr = opt->value;
+ struct replay_opts *opts = *opts_ptr;
+
+ if (unset)
+ return 0;
+
+ ALLOC_GROW(opts->xopts, opts->xopts_nr + 1, opts->xopts_alloc);
+ opts->xopts[opts->xopts_nr++] = xstrdup(arg);
+ return 0;
+}
+
+static int option_parse_m(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct replay_opts *replay = opt->value;
+ char *end;
+
+ if (unset) {
+ replay->mainline = 0;
+ return 0;
+ }
+
+ replay->mainline = strtol(arg, &end, 10);
+ if (*end || replay->mainline <= 0)
+ return error(_("option `%s' expects a number greater than zero"),
+ opt->long_name);
+
+ return 0;
+}
+
+LAST_ARG_MUST_BE_NULL
+static void verify_opt_compatible(const char *me, const char *base_opt, ...)
+{
+ const char *this_opt;
+ va_list ap;
+
+ va_start(ap, base_opt);
+ while ((this_opt = va_arg(ap, const char *))) {
+ if (va_arg(ap, int))
+ break;
+ }
+ va_end(ap);
+
+ if (this_opt)
+ die(_("%s: %s cannot be used with %s"), me, this_opt, base_opt);
+}
+
+static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
+{
+ const char * const * usage_str = revert_or_cherry_pick_usage(opts);
+ const char *me = action_name(opts);
+ const char *cleanup_arg = NULL;
+ int cmd = 0;
+ struct option base_options[] = {
+ OPT_CMDMODE(0, "quit", &cmd, N_("end revert or cherry-pick sequence"), 'q'),
+ OPT_CMDMODE(0, "continue", &cmd, N_("resume revert or cherry-pick sequence"), 'c'),
+ OPT_CMDMODE(0, "abort", &cmd, N_("cancel revert or cherry-pick sequence"), 'a'),
+ OPT_CMDMODE(0, "skip", &cmd, N_("skip current commit and continue"), 's'),
+ OPT_CLEANUP(&cleanup_arg),
+ OPT_BOOL('n', "no-commit", &opts->no_commit, N_("don't automatically commit")),
+ OPT_BOOL('e', "edit", &opts->edit, N_("edit the commit message")),
+ OPT_NOOP_NOARG('r', NULL),
+ OPT_BOOL('s', "signoff", &opts->signoff, N_("add a Signed-off-by trailer")),
+ OPT_CALLBACK('m', "mainline", opts, N_("parent-number"),
+ N_("select mainline parent"), option_parse_m),
+ OPT_RERERE_AUTOUPDATE(&opts->allow_rerere_auto),
+ OPT_STRING(0, "strategy", &opts->strategy, N_("strategy"), N_("merge strategy")),
+ OPT_CALLBACK('X', "strategy-option", &opts, N_("option"),
+ N_("option for merge strategy"), option_parse_x),
+ { OPTION_STRING, 'S', "gpg-sign", &opts->gpg_sign, N_("key-id"),
+ N_("GPG sign commit"), PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
+ OPT_END()
+ };
+ struct option *options = base_options;
+
+ if (opts->action == REPLAY_PICK) {
+ struct option cp_extra[] = {
+ OPT_BOOL('x', NULL, &opts->record_origin, N_("append commit name")),
+ OPT_BOOL(0, "ff", &opts->allow_ff, N_("allow fast-forward")),
+ OPT_BOOL(0, "allow-empty", &opts->allow_empty, N_("preserve initially empty commits")),
+ OPT_BOOL(0, "allow-empty-message", &opts->allow_empty_message, N_("allow commits with empty messages")),
+ OPT_BOOL(0, "keep-redundant-commits", &opts->keep_redundant_commits, N_("keep redundant, empty commits")),
+ OPT_END(),
+ };
+ options = parse_options_concat(options, cp_extra);
+ } else if (opts->action == REPLAY_REVERT) {
+ struct option cp_extra[] = {
+ OPT_BOOL(0, "reference", &opts->commit_use_reference,
+ N_("use the 'reference' format to refer to commits")),
+ OPT_END(),
+ };
+ options = parse_options_concat(options, cp_extra);
+ }
+
+ argc = parse_options(argc, argv, NULL, options, usage_str,
+ PARSE_OPT_KEEP_ARGV0 |
+ PARSE_OPT_KEEP_UNKNOWN_OPT);
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ /* implies allow_empty */
+ if (opts->keep_redundant_commits)
+ opts->allow_empty = 1;
+
+ if (cleanup_arg) {
+ opts->default_msg_cleanup = get_cleanup_mode(cleanup_arg, 1);
+ opts->explicit_cleanup = 1;
+ }
+
+ /* Check for incompatible command line arguments */
+ if (cmd) {
+ char *this_operation;
+ if (cmd == 'q')
+ this_operation = "--quit";
+ else if (cmd == 'c')
+ this_operation = "--continue";
+ else if (cmd == 's')
+ this_operation = "--skip";
+ else {
+ assert(cmd == 'a');
+ this_operation = "--abort";
+ }
+
+ verify_opt_compatible(me, this_operation,
+ "--no-commit", opts->no_commit,
+ "--signoff", opts->signoff,
+ "--mainline", opts->mainline,
+ "--strategy", opts->strategy ? 1 : 0,
+ "--strategy-option", opts->xopts ? 1 : 0,
+ "-x", opts->record_origin,
+ "--ff", opts->allow_ff,
+ "--rerere-autoupdate", opts->allow_rerere_auto == RERERE_AUTOUPDATE,
+ "--no-rerere-autoupdate", opts->allow_rerere_auto == RERERE_NOAUTOUPDATE,
+ NULL);
+ }
+
+ if (!opts->strategy && opts->default_strategy) {
+ opts->strategy = opts->default_strategy;
+ opts->default_strategy = NULL;
+ }
+
+ if (opts->allow_ff)
+ verify_opt_compatible(me, "--ff",
+ "--signoff", opts->signoff,
+ "--no-commit", opts->no_commit,
+ "-x", opts->record_origin,
+ "--edit", opts->edit > 0,
+ NULL);
+
+ if (cmd) {
+ opts->revs = NULL;
+ } else {
+ struct setup_revision_opt s_r_opt;
+ opts->revs = xmalloc(sizeof(*opts->revs));
+ repo_init_revisions(the_repository, opts->revs, NULL);
+ opts->revs->no_walk = 1;
+ opts->revs->unsorted_input = 1;
+ if (argc < 2)
+ usage_with_options(usage_str, options);
+ if (!strcmp(argv[1], "-"))
+ argv[1] = "@{-1}";
+ memset(&s_r_opt, 0, sizeof(s_r_opt));
+ s_r_opt.assume_dashdash = 1;
+ argc = setup_revisions(argc, argv, opts->revs, &s_r_opt);
+ }
+
+ if (argc > 1)
+ usage_with_options(usage_str, options);
+
+ /* These option values will be free()d */
+ opts->gpg_sign = xstrdup_or_null(opts->gpg_sign);
+ opts->strategy = xstrdup_or_null(opts->strategy);
+ if (!opts->strategy && getenv("GIT_TEST_MERGE_ALGORITHM"))
+ opts->strategy = xstrdup(getenv("GIT_TEST_MERGE_ALGORITHM"));
+
+ if (cmd == 'q') {
+ int ret = sequencer_remove_state(opts);
+ if (!ret)
+ remove_branch_state(the_repository, 0);
+ return ret;
+ }
+ if (cmd == 'c')
+ return sequencer_continue(the_repository, opts);
+ if (cmd == 'a')
+ return sequencer_rollback(the_repository, opts);
+ if (cmd == 's')
+ return sequencer_skip(the_repository, opts);
+ return sequencer_pick_revisions(the_repository, opts);
+}
+
+int cmd_revert(int argc, const char **argv, const char *prefix)
+{
+ struct replay_opts opts = REPLAY_OPTS_INIT;
+ int res;
+
+ opts.action = REPLAY_REVERT;
+ sequencer_init_config(&opts);
+ res = run_sequencer(argc, argv, &opts);
+ if (res < 0)
+ die(_("revert failed"));
+ if (opts.revs)
+ release_revisions(opts.revs);
+ free(opts.revs);
+ return res;
+}
+
+int cmd_cherry_pick(int argc, const char **argv, const char *prefix)
+{
+ struct replay_opts opts = REPLAY_OPTS_INIT;
+ int res;
+
+ opts.action = REPLAY_PICK;
+ sequencer_init_config(&opts);
+ res = run_sequencer(argc, argv, &opts);
+ if (res < 0)
+ die(_("cherry-pick failed"));
+ return res;
+}
diff --git a/builtin/rm.c b/builtin/rm.c
new file mode 100644
index 0000000..d4989d4
--- /dev/null
+++ b/builtin/rm.c
@@ -0,0 +1,437 @@
+/*
+ * "git rm" builtin command
+ *
+ * Copyright (C) Linus Torvalds 2006
+ */
+#define USE_THE_INDEX_COMPATIBILITY_MACROS
+#include "builtin.h"
+#include "advice.h"
+#include "config.h"
+#include "lockfile.h"
+#include "dir.h"
+#include "cache-tree.h"
+#include "tree-walk.h"
+#include "parse-options.h"
+#include "string-list.h"
+#include "submodule.h"
+#include "pathspec.h"
+
+static const char * const builtin_rm_usage[] = {
+ N_("git rm [-f | --force] [-n] [-r] [--cached] [--ignore-unmatch]\n"
+ " [--quiet] [--pathspec-from-file=<file> [--pathspec-file-nul]]\n"
+ " [--] [<pathspec>...]"),
+ NULL
+};
+
+static struct {
+ int nr, alloc;
+ struct {
+ const char *name;
+ char is_submodule;
+ } *entry;
+} list;
+
+static int get_ours_cache_pos(const char *path, int pos)
+{
+ int i = -pos - 1;
+
+ while ((i < the_index.cache_nr) && !strcmp(the_index.cache[i]->name, path)) {
+ if (ce_stage(the_index.cache[i]) == 2)
+ return i;
+ i++;
+ }
+ return -1;
+}
+
+static void print_error_files(struct string_list *files_list,
+ const char *main_msg,
+ const char *hints_msg,
+ int *errs)
+{
+ if (files_list->nr) {
+ int i;
+ struct strbuf err_msg = STRBUF_INIT;
+
+ strbuf_addstr(&err_msg, main_msg);
+ for (i = 0; i < files_list->nr; i++)
+ strbuf_addf(&err_msg,
+ "\n %s",
+ files_list->items[i].string);
+ if (advice_enabled(ADVICE_RM_HINTS))
+ strbuf_addstr(&err_msg, hints_msg);
+ *errs = error("%s", err_msg.buf);
+ strbuf_release(&err_msg);
+ }
+}
+
+static void submodules_absorb_gitdir_if_needed(void)
+{
+ int i;
+ for (i = 0; i < list.nr; i++) {
+ const char *name = list.entry[i].name;
+ int pos;
+ const struct cache_entry *ce;
+
+ pos = index_name_pos(&the_index, name, strlen(name));
+ if (pos < 0) {
+ pos = get_ours_cache_pos(name, pos);
+ if (pos < 0)
+ continue;
+ }
+ ce = the_index.cache[pos];
+
+ if (!S_ISGITLINK(ce->ce_mode) ||
+ !file_exists(ce->name) ||
+ is_empty_dir(name))
+ continue;
+
+ if (!submodule_uses_gitfile(name))
+ absorb_git_dir_into_superproject(name);
+ }
+}
+
+static int check_local_mod(struct object_id *head, int index_only)
+{
+ /*
+ * Items in list are already sorted in the cache order,
+ * so we could do this a lot more efficiently by using
+ * tree_desc based traversal if we wanted to, but I am
+ * lazy, and who cares if removal of files is a tad
+ * slower than the theoretical maximum speed?
+ */
+ int i, no_head;
+ int errs = 0;
+ struct string_list files_staged = STRING_LIST_INIT_NODUP;
+ struct string_list files_cached = STRING_LIST_INIT_NODUP;
+ struct string_list files_local = STRING_LIST_INIT_NODUP;
+
+ no_head = is_null_oid(head);
+ for (i = 0; i < list.nr; i++) {
+ struct stat st;
+ int pos;
+ const struct cache_entry *ce;
+ const char *name = list.entry[i].name;
+ struct object_id oid;
+ unsigned short mode;
+ int local_changes = 0;
+ int staged_changes = 0;
+
+ pos = index_name_pos(&the_index, name, strlen(name));
+ if (pos < 0) {
+ /*
+ * Skip unmerged entries except for populated submodules
+ * that could lose history when removed.
+ */
+ pos = get_ours_cache_pos(name, pos);
+ if (pos < 0)
+ continue;
+
+ if (!S_ISGITLINK(the_index.cache[pos]->ce_mode) ||
+ is_empty_dir(name))
+ continue;
+ }
+ ce = the_index.cache[pos];
+
+ if (lstat(ce->name, &st) < 0) {
+ if (!is_missing_file_error(errno))
+ warning_errno(_("failed to stat '%s'"), ce->name);
+ /* It already vanished from the working tree */
+ continue;
+ }
+ else if (S_ISDIR(st.st_mode)) {
+ /* if a file was removed and it is now a
+ * directory, that is the same as ENOENT as
+ * far as git is concerned; we do not track
+ * directories unless they are submodules.
+ */
+ if (!S_ISGITLINK(ce->ce_mode))
+ continue;
+ }
+
+ /*
+ * "rm" of a path that has changes need to be treated
+ * carefully not to allow losing local changes
+ * accidentally. A local change could be (1) file in
+ * work tree is different since the index; and/or (2)
+ * the user staged a content that is different from
+ * the current commit in the index.
+ *
+ * In such a case, you would need to --force the
+ * removal. However, "rm --cached" (remove only from
+ * the index) is safe if the index matches the file in
+ * the work tree or the HEAD commit, as it means that
+ * the content being removed is available elsewhere.
+ */
+
+ /*
+ * Is the index different from the file in the work tree?
+ * If it's a submodule, is its work tree modified?
+ */
+ if (ie_match_stat(&the_index, ce, &st, 0) ||
+ (S_ISGITLINK(ce->ce_mode) &&
+ bad_to_remove_submodule(ce->name,
+ SUBMODULE_REMOVAL_DIE_ON_ERROR |
+ SUBMODULE_REMOVAL_IGNORE_IGNORED_UNTRACKED)))
+ local_changes = 1;
+
+ /*
+ * Is the index different from the HEAD commit? By
+ * definition, before the very initial commit,
+ * anything staged in the index is treated by the same
+ * way as changed from the HEAD.
+ */
+ if (no_head
+ || get_tree_entry(the_repository, head, name, &oid, &mode)
+ || ce->ce_mode != create_ce_mode(mode)
+ || !oideq(&ce->oid, &oid))
+ staged_changes = 1;
+
+ /*
+ * If the index does not match the file in the work
+ * tree and if it does not match the HEAD commit
+ * either, (1) "git rm" without --cached definitely
+ * will lose information; (2) "git rm --cached" will
+ * lose information unless it is about removing an
+ * "intent to add" entry.
+ */
+ if (local_changes && staged_changes) {
+ if (!index_only || !ce_intent_to_add(ce))
+ string_list_append(&files_staged, name);
+ }
+ else if (!index_only) {
+ if (staged_changes)
+ string_list_append(&files_cached, name);
+ if (local_changes)
+ string_list_append(&files_local, name);
+ }
+ }
+ print_error_files(&files_staged,
+ Q_("the following file has staged content different "
+ "from both the\nfile and the HEAD:",
+ "the following files have staged content different"
+ " from both the\nfile and the HEAD:",
+ files_staged.nr),
+ _("\n(use -f to force removal)"),
+ &errs);
+ string_list_clear(&files_staged, 0);
+ print_error_files(&files_cached,
+ Q_("the following file has changes "
+ "staged in the index:",
+ "the following files have changes "
+ "staged in the index:", files_cached.nr),
+ _("\n(use --cached to keep the file,"
+ " or -f to force removal)"),
+ &errs);
+ string_list_clear(&files_cached, 0);
+
+ print_error_files(&files_local,
+ Q_("the following file has local modifications:",
+ "the following files have local modifications:",
+ files_local.nr),
+ _("\n(use --cached to keep the file,"
+ " or -f to force removal)"),
+ &errs);
+ string_list_clear(&files_local, 0);
+
+ return errs;
+}
+
+static int show_only = 0, force = 0, index_only = 0, recursive = 0, quiet = 0;
+static int ignore_unmatch = 0, pathspec_file_nul;
+static int include_sparse;
+static char *pathspec_from_file;
+
+static struct option builtin_rm_options[] = {
+ OPT__DRY_RUN(&show_only, N_("dry run")),
+ OPT__QUIET(&quiet, N_("do not list removed files")),
+ OPT_BOOL( 0 , "cached", &index_only, N_("only remove from the index")),
+ OPT__FORCE(&force, N_("override the up-to-date check"), PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL('r', NULL, &recursive, N_("allow recursive removal")),
+ OPT_BOOL( 0 , "ignore-unmatch", &ignore_unmatch,
+ N_("exit with a zero status even if nothing matched")),
+ OPT_BOOL(0, "sparse", &include_sparse, N_("allow updating entries outside of the sparse-checkout cone")),
+ OPT_PATHSPEC_FROM_FILE(&pathspec_from_file),
+ OPT_PATHSPEC_FILE_NUL(&pathspec_file_nul),
+ OPT_END(),
+};
+
+int cmd_rm(int argc, const char **argv, const char *prefix)
+{
+ struct lock_file lock_file = LOCK_INIT;
+ int i, ret = 0;
+ struct pathspec pathspec;
+ char *seen;
+
+ git_config(git_default_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, builtin_rm_options,
+ builtin_rm_usage, 0);
+
+ parse_pathspec(&pathspec, 0,
+ PATHSPEC_PREFER_CWD,
+ prefix, argv);
+
+ if (pathspec_from_file) {
+ if (pathspec.nr)
+ die(_("'%s' and pathspec arguments cannot be used together"), "--pathspec-from-file");
+
+ parse_pathspec_file(&pathspec, 0,
+ PATHSPEC_PREFER_CWD,
+ prefix, pathspec_from_file, pathspec_file_nul);
+ } else if (pathspec_file_nul) {
+ die(_("the option '%s' requires '%s'"), "--pathspec-file-nul", "--pathspec-from-file");
+ }
+
+ if (!pathspec.nr)
+ die(_("No pathspec was given. Which files should I remove?"));
+
+ if (!index_only)
+ setup_work_tree();
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR);
+
+ if (repo_read_index(the_repository) < 0)
+ die(_("index file corrupt"));
+
+ refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, &pathspec, NULL, NULL);
+
+ seen = xcalloc(pathspec.nr, 1);
+
+ if (pathspec_needs_expanded_index(&the_index, &pathspec))
+ ensure_full_index(&the_index);
+
+ for (i = 0; i < the_index.cache_nr; i++) {
+ const struct cache_entry *ce = the_index.cache[i];
+
+ if (!include_sparse &&
+ (ce_skip_worktree(ce) ||
+ !path_in_sparse_checkout(ce->name, &the_index)))
+ continue;
+ if (!ce_path_match(&the_index, ce, &pathspec, seen))
+ continue;
+ ALLOC_GROW(list.entry, list.nr + 1, list.alloc);
+ list.entry[list.nr].name = xstrdup(ce->name);
+ list.entry[list.nr].is_submodule = S_ISGITLINK(ce->ce_mode);
+ if (list.entry[list.nr++].is_submodule &&
+ !is_staging_gitmodules_ok(&the_index))
+ die(_("please stage your changes to .gitmodules or stash them to proceed"));
+ }
+
+ if (pathspec.nr) {
+ const char *original;
+ int seen_any = 0;
+ char *skip_worktree_seen = NULL;
+ struct string_list only_match_skip_worktree = STRING_LIST_INIT_NODUP;
+
+ for (i = 0; i < pathspec.nr; i++) {
+ original = pathspec.items[i].original;
+ if (seen[i])
+ seen_any = 1;
+ else if (ignore_unmatch)
+ continue;
+ else if (!include_sparse &&
+ matches_skip_worktree(&pathspec, i, &skip_worktree_seen))
+ string_list_append(&only_match_skip_worktree, original);
+ else
+ die(_("pathspec '%s' did not match any files"), original);
+
+ if (!recursive && seen[i] == MATCHED_RECURSIVELY)
+ die(_("not removing '%s' recursively without -r"),
+ *original ? original : ".");
+ }
+
+ if (only_match_skip_worktree.nr) {
+ advise_on_updating_sparse_paths(&only_match_skip_worktree);
+ ret = 1;
+ }
+ free(skip_worktree_seen);
+ string_list_clear(&only_match_skip_worktree, 0);
+
+ if (!seen_any)
+ exit(ret);
+ }
+ clear_pathspec(&pathspec);
+ free(seen);
+
+ if (!index_only)
+ submodules_absorb_gitdir_if_needed();
+
+ /*
+ * If not forced, the file, the index and the HEAD (if exists)
+ * must match; but the file can already been removed, since
+ * this sequence is a natural "novice" way:
+ *
+ * rm F; git rm F
+ *
+ * Further, if HEAD commit exists, "diff-index --cached" must
+ * report no changes unless forced.
+ */
+ if (!force) {
+ struct object_id oid;
+ if (get_oid("HEAD", &oid))
+ oidclr(&oid);
+ if (check_local_mod(&oid, index_only))
+ exit(1);
+ }
+
+ /*
+ * First remove the names from the index: we won't commit
+ * the index unless all of them succeed.
+ */
+ for (i = 0; i < list.nr; i++) {
+ const char *path = list.entry[i].name;
+ if (!quiet)
+ printf("rm '%s'\n", path);
+
+ if (remove_file_from_index(&the_index, path))
+ die(_("git rm: unable to remove %s"), path);
+ }
+
+ if (show_only)
+ return 0;
+
+ /*
+ * Then, unless we used "--cached", remove the filenames from
+ * the workspace. If we fail to remove the first one, we
+ * abort the "git rm" (but once we've successfully removed
+ * any file at all, we'll go ahead and commit to it all:
+ * by then we've already committed ourselves and can't fail
+ * in the middle)
+ */
+ if (!index_only) {
+ int removed = 0, gitmodules_modified = 0;
+ struct strbuf buf = STRBUF_INIT;
+ int flag = force ? REMOVE_DIR_PURGE_ORIGINAL_CWD : 0;
+ for (i = 0; i < list.nr; i++) {
+ const char *path = list.entry[i].name;
+ if (list.entry[i].is_submodule) {
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, path);
+ if (remove_dir_recursively(&buf, flag))
+ die(_("could not remove '%s'"), path);
+
+ removed = 1;
+ if (!remove_path_from_gitmodules(path))
+ gitmodules_modified = 1;
+ continue;
+ }
+ if (!remove_path(path)) {
+ removed = 1;
+ continue;
+ }
+ if (!removed)
+ die_errno("git rm: '%s'", path);
+ }
+ strbuf_release(&buf);
+ if (gitmodules_modified)
+ stage_updated_gitmodules(&the_index);
+ }
+
+ if (write_locked_index(&the_index, &lock_file,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ die(_("Unable to write new index file"));
+
+ return ret;
+}
diff --git a/builtin/send-pack.c b/builtin/send-pack.c
new file mode 100644
index 0000000..4c5d125
--- /dev/null
+++ b/builtin/send-pack.c
@@ -0,0 +1,345 @@
+#include "builtin.h"
+#include "config.h"
+#include "commit.h"
+#include "refs.h"
+#include "pkt-line.h"
+#include "sideband.h"
+#include "run-command.h"
+#include "remote.h"
+#include "connect.h"
+#include "send-pack.h"
+#include "quote.h"
+#include "transport.h"
+#include "version.h"
+#include "oid-array.h"
+#include "gpg-interface.h"
+#include "gettext.h"
+#include "protocol.h"
+
+static const char * const send_pack_usage[] = {
+ N_("git send-pack [--mirror] [--dry-run] [--force]\n"
+ " [--receive-pack=<git-receive-pack>]\n"
+ " [--verbose] [--thin] [--atomic]\n"
+ " [--[no-]signed | --signed=(true|false|if-asked)]\n"
+ " [<host>:]<directory> (--all | <ref>...)"),
+ NULL,
+};
+
+static struct send_pack_args args;
+
+static void print_helper_status(struct ref *ref)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct ref_push_report *report;
+
+ for (; ref; ref = ref->next) {
+ const char *msg = NULL;
+ const char *res;
+ int count = 0;
+
+ switch(ref->status) {
+ case REF_STATUS_NONE:
+ res = "error";
+ msg = "no match";
+ break;
+
+ case REF_STATUS_OK:
+ res = "ok";
+ break;
+
+ case REF_STATUS_UPTODATE:
+ res = "ok";
+ msg = "up to date";
+ break;
+
+ case REF_STATUS_REJECT_NONFASTFORWARD:
+ res = "error";
+ msg = "non-fast forward";
+ break;
+
+ case REF_STATUS_REJECT_FETCH_FIRST:
+ res = "error";
+ msg = "fetch first";
+ break;
+
+ case REF_STATUS_REJECT_NEEDS_FORCE:
+ res = "error";
+ msg = "needs force";
+ break;
+
+ case REF_STATUS_REJECT_STALE:
+ res = "error";
+ msg = "stale info";
+ break;
+
+ case REF_STATUS_REJECT_REMOTE_UPDATED:
+ res = "error";
+ msg = "remote ref updated since checkout";
+ break;
+
+ case REF_STATUS_REJECT_ALREADY_EXISTS:
+ res = "error";
+ msg = "already exists";
+ break;
+
+ case REF_STATUS_REJECT_NODELETE:
+ case REF_STATUS_REMOTE_REJECT:
+ res = "error";
+ break;
+
+ case REF_STATUS_EXPECTING_REPORT:
+ res = "error";
+ msg = "expecting report";
+ break;
+
+ default:
+ continue;
+ }
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s %s", res, ref->name);
+ if (ref->remote_status)
+ msg = ref->remote_status;
+ if (msg) {
+ strbuf_addch(&buf, ' ');
+ quote_two_c_style(&buf, "", msg, 0);
+ }
+ strbuf_addch(&buf, '\n');
+
+ if (ref->status == REF_STATUS_OK) {
+ for (report = ref->report; report; report = report->next) {
+ if (count++ > 0)
+ strbuf_addf(&buf, "ok %s\n", ref->name);
+ if (report->ref_name)
+ strbuf_addf(&buf, "option refname %s\n",
+ report->ref_name);
+ if (report->old_oid)
+ strbuf_addf(&buf, "option old-oid %s\n",
+ oid_to_hex(report->old_oid));
+ if (report->new_oid)
+ strbuf_addf(&buf, "option new-oid %s\n",
+ oid_to_hex(report->new_oid));
+ if (report->forced_update)
+ strbuf_addstr(&buf, "option forced-update\n");
+ }
+ }
+ write_or_die(1, buf.buf, buf.len);
+ }
+ strbuf_release(&buf);
+}
+
+static int send_pack_config(const char *k, const char *v, void *cb)
+{
+ git_gpg_config(k, v, NULL);
+
+ if (!strcmp(k, "push.gpgsign")) {
+ const char *value;
+ if (!git_config_get_value("push.gpgsign", &value)) {
+ switch (git_parse_maybe_bool(value)) {
+ case 0:
+ args.push_cert = SEND_PACK_PUSH_CERT_NEVER;
+ break;
+ case 1:
+ args.push_cert = SEND_PACK_PUSH_CERT_ALWAYS;
+ break;
+ default:
+ if (value && !strcasecmp(value, "if-asked"))
+ args.push_cert = SEND_PACK_PUSH_CERT_IF_ASKED;
+ else
+ return error(_("invalid value for '%s'"), k);
+ }
+ }
+ }
+ return git_default_config(k, v, cb);
+}
+
+int cmd_send_pack(int argc, const char **argv, const char *prefix)
+{
+ struct refspec rs = REFSPEC_INIT_PUSH;
+ const char *remote_name = NULL;
+ struct remote *remote = NULL;
+ const char *dest = NULL;
+ int fd[2];
+ struct child_process *conn;
+ struct oid_array extra_have = OID_ARRAY_INIT;
+ struct oid_array shallow = OID_ARRAY_INIT;
+ struct ref *remote_refs, *local_refs;
+ int ret;
+ int helper_status = 0;
+ int send_all = 0;
+ int verbose = 0;
+ const char *receivepack = "git-receive-pack";
+ unsigned dry_run = 0;
+ unsigned send_mirror = 0;
+ unsigned force_update = 0;
+ unsigned quiet = 0;
+ int push_cert = 0;
+ struct string_list push_options = STRING_LIST_INIT_NODUP;
+ unsigned use_thin_pack = 0;
+ unsigned atomic = 0;
+ unsigned stateless_rpc = 0;
+ int flags;
+ unsigned int reject_reasons;
+ int progress = -1;
+ int from_stdin = 0;
+ struct push_cas_option cas = {0};
+ int force_if_includes = 0;
+ struct packet_reader reader;
+
+ struct option options[] = {
+ OPT__VERBOSITY(&verbose),
+ OPT_STRING(0, "receive-pack", &receivepack, "receive-pack", N_("receive pack program")),
+ OPT_STRING(0, "exec", &receivepack, "receive-pack", N_("receive pack program")),
+ OPT_STRING(0, "remote", &remote_name, "remote", N_("remote name")),
+ OPT_BOOL(0, "all", &send_all, N_("push all refs")),
+ OPT_BOOL('n' , "dry-run", &dry_run, N_("dry run")),
+ OPT_BOOL(0, "mirror", &send_mirror, N_("mirror all refs")),
+ OPT_BOOL('f', "force", &force_update, N_("force updates")),
+ OPT_CALLBACK_F(0, "signed", &push_cert, "(yes|no|if-asked)", N_("GPG sign the push"),
+ PARSE_OPT_OPTARG, option_parse_push_signed),
+ OPT_STRING_LIST(0, "push-option", &push_options,
+ N_("server-specific"),
+ N_("option to transmit")),
+ OPT_BOOL(0, "progress", &progress, N_("force progress reporting")),
+ OPT_BOOL(0, "thin", &use_thin_pack, N_("use thin pack")),
+ OPT_BOOL(0, "atomic", &atomic, N_("request atomic transaction on remote side")),
+ OPT_BOOL(0, "stateless-rpc", &stateless_rpc, N_("use stateless RPC protocol")),
+ OPT_BOOL(0, "stdin", &from_stdin, N_("read refs from stdin")),
+ OPT_BOOL(0, "helper-status", &helper_status, N_("print status from remote helper")),
+ OPT_CALLBACK_F(0, CAS_OPT_NAME, &cas, N_("<refname>:<expect>"),
+ N_("require old value of ref to be at this value"),
+ PARSE_OPT_OPTARG, parseopt_push_cas_option),
+ OPT_BOOL(0, TRANS_OPT_FORCE_IF_INCLUDES, &force_if_includes,
+ N_("require remote updates to be integrated locally")),
+ OPT_END()
+ };
+
+ git_config(send_pack_config, NULL);
+ argc = parse_options(argc, argv, prefix, options, send_pack_usage, 0);
+ if (argc > 0) {
+ dest = argv[0];
+ refspec_appendn(&rs, argv + 1, argc - 1);
+ }
+
+ if (!dest)
+ usage_with_options(send_pack_usage, options);
+
+ args.verbose = verbose;
+ args.dry_run = dry_run;
+ args.send_mirror = send_mirror;
+ args.force_update = force_update;
+ args.quiet = quiet;
+ args.push_cert = push_cert;
+ args.progress = progress;
+ args.use_thin_pack = use_thin_pack;
+ args.atomic = atomic;
+ args.stateless_rpc = stateless_rpc;
+ args.push_options = push_options.nr ? &push_options : NULL;
+ args.url = dest;
+
+ if (from_stdin) {
+ if (args.stateless_rpc) {
+ const char *buf;
+ while ((buf = packet_read_line(0, NULL)))
+ refspec_append(&rs, buf);
+ } else {
+ struct strbuf line = STRBUF_INIT;
+ while (strbuf_getline(&line, stdin) != EOF)
+ refspec_append(&rs, line.buf);
+ strbuf_release(&line);
+ }
+ }
+
+ /*
+ * --all and --mirror are incompatible; neither makes sense
+ * with any refspecs.
+ */
+ if ((rs.nr > 0 && (send_all || args.send_mirror)) ||
+ (send_all && args.send_mirror))
+ usage_with_options(send_pack_usage, options);
+
+ if (remote_name) {
+ remote = remote_get(remote_name);
+ if (!remote_has_url(remote, dest)) {
+ die("Destination %s is not a uri for %s",
+ dest, remote_name);
+ }
+ }
+
+ if (progress == -1)
+ progress = !args.quiet && isatty(2);
+ args.progress = progress;
+
+ if (args.stateless_rpc) {
+ conn = NULL;
+ fd[0] = 0;
+ fd[1] = 1;
+ } else {
+ conn = git_connect(fd, dest, receivepack,
+ args.verbose ? CONNECT_VERBOSE : 0);
+ }
+
+ packet_reader_init(&reader, fd[0], NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_GENTLE_ON_EOF |
+ PACKET_READ_DIE_ON_ERR_PACKET);
+
+ switch (discover_version(&reader)) {
+ case protocol_v2:
+ die("support for protocol v2 not implemented yet");
+ break;
+ case protocol_v1:
+ case protocol_v0:
+ get_remote_heads(&reader, &remote_refs, REF_NORMAL,
+ &extra_have, &shallow);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
+
+ local_refs = get_local_heads();
+
+ flags = MATCH_REFS_NONE;
+
+ if (send_all)
+ flags |= MATCH_REFS_ALL;
+ if (args.send_mirror)
+ flags |= MATCH_REFS_MIRROR;
+
+ /* match them up */
+ if (match_push_refs(local_refs, &remote_refs, &rs, flags))
+ return -1;
+
+ if (!is_empty_cas(&cas))
+ apply_push_cas(&cas, remote, remote_refs);
+
+ if (!is_empty_cas(&cas) && force_if_includes)
+ cas.use_force_if_includes = 1;
+
+ set_ref_status_for_push(remote_refs, args.send_mirror,
+ args.force_update);
+
+ ret = send_pack(&args, fd, conn, remote_refs, &extra_have);
+
+ if (helper_status)
+ print_helper_status(remote_refs);
+
+ close(fd[1]);
+ close(fd[0]);
+
+ ret |= finish_connect(conn);
+
+ if (!helper_status)
+ transport_print_push_status(dest, remote_refs, args.verbose, 0, &reject_reasons);
+
+ if (!args.dry_run && remote) {
+ struct ref *ref;
+ for (ref = remote_refs; ref; ref = ref->next)
+ transport_update_tracking_ref(remote, ref, args.verbose);
+ }
+
+ if (!ret && !transport_refs_pushed(remote_refs))
+ fprintf(stderr, "Everything up-to-date\n");
+
+ return ret;
+}
diff --git a/builtin/shortlog.c b/builtin/shortlog.c
new file mode 100644
index 0000000..27a8716
--- /dev/null
+++ b/builtin/shortlog.c
@@ -0,0 +1,515 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "commit.h"
+#include "diff.h"
+#include "string-list.h"
+#include "revision.h"
+#include "utf8.h"
+#include "mailmap.h"
+#include "shortlog.h"
+#include "parse-options.h"
+#include "trailer.h"
+#include "strmap.h"
+
+static char const * const shortlog_usage[] = {
+ N_("git shortlog [<options>] [<revision-range>] [[--] <path>...]"),
+ N_("git log --pretty=short | git shortlog [<options>]"),
+ NULL
+};
+
+/*
+ * The util field of our string_list_items will contain one of two things:
+ *
+ * - if --summary is not in use, it will point to a string list of the
+ * oneline subjects assigned to this author
+ *
+ * - if --summary is in use, we don't need that list; we only need to know
+ * its size. So we abuse the pointer slot to store our integer counter.
+ *
+ * This macro accesses the latter.
+ */
+#define UTIL_TO_INT(x) ((intptr_t)(x)->util)
+
+static int compare_by_counter(const void *a1, const void *a2)
+{
+ const struct string_list_item *i1 = a1, *i2 = a2;
+ return UTIL_TO_INT(i2) - UTIL_TO_INT(i1);
+}
+
+static int compare_by_list(const void *a1, const void *a2)
+{
+ const struct string_list_item *i1 = a1, *i2 = a2;
+ const struct string_list *l1 = i1->util, *l2 = i2->util;
+
+ if (l1->nr < l2->nr)
+ return 1;
+ else if (l1->nr == l2->nr)
+ return 0;
+ else
+ return -1;
+}
+
+static void insert_one_record(struct shortlog *log,
+ const char *ident,
+ const char *oneline)
+{
+ struct string_list_item *item;
+
+ item = string_list_insert(&log->list, ident);
+
+ if (log->summary)
+ item->util = (void *)(UTIL_TO_INT(item) + 1);
+ else {
+ char *buffer;
+ struct strbuf subject = STRBUF_INIT;
+ const char *eol;
+
+ /* Skip any leading whitespace, including any blank lines. */
+ while (*oneline && isspace(*oneline))
+ oneline++;
+ eol = strchr(oneline, '\n');
+ if (!eol)
+ eol = oneline + strlen(oneline);
+ if (starts_with(oneline, "[PATCH")) {
+ char *eob = strchr(oneline, ']');
+ if (eob && (!eol || eob < eol))
+ oneline = eob + 1;
+ }
+ while (*oneline && isspace(*oneline) && *oneline != '\n')
+ oneline++;
+ format_subject(&subject, oneline, " ");
+ buffer = strbuf_detach(&subject, NULL);
+
+ if (!item->util) {
+ item->util = xmalloc(sizeof(struct string_list));
+ string_list_init_nodup(item->util);
+ }
+ string_list_append(item->util, buffer);
+ }
+}
+
+static int parse_ident(struct shortlog *log,
+ struct strbuf *out, const char *in)
+{
+ const char *mailbuf, *namebuf;
+ size_t namelen, maillen;
+ struct ident_split ident;
+
+ if (split_ident_line(&ident, in, strlen(in)))
+ return -1;
+
+ namebuf = ident.name_begin;
+ mailbuf = ident.mail_begin;
+ namelen = ident.name_end - ident.name_begin;
+ maillen = ident.mail_end - ident.mail_begin;
+
+ map_user(&log->mailmap, &mailbuf, &maillen, &namebuf, &namelen);
+ strbuf_add(out, namebuf, namelen);
+ if (log->email)
+ strbuf_addf(out, " <%.*s>", (int)maillen, mailbuf);
+
+ return 0;
+}
+
+static void read_from_stdin(struct shortlog *log)
+{
+ struct strbuf ident = STRBUF_INIT;
+ struct strbuf mapped_ident = STRBUF_INIT;
+ struct strbuf oneline = STRBUF_INIT;
+ static const char *author_match[2] = { "Author: ", "author " };
+ static const char *committer_match[2] = { "Commit: ", "committer " };
+ const char **match;
+
+ if (HAS_MULTI_BITS(log->groups))
+ die(_("using multiple --group options with stdin is not supported"));
+
+ switch (log->groups) {
+ case SHORTLOG_GROUP_AUTHOR:
+ match = author_match;
+ break;
+ case SHORTLOG_GROUP_COMMITTER:
+ match = committer_match;
+ break;
+ case SHORTLOG_GROUP_TRAILER:
+ die(_("using %s with stdin is not supported"), "--group=trailer");
+ case SHORTLOG_GROUP_FORMAT:
+ die(_("using %s with stdin is not supported"), "--group=format");
+ default:
+ BUG("unhandled shortlog group");
+ }
+
+ while (strbuf_getline_lf(&ident, stdin) != EOF) {
+ const char *v;
+ if (!skip_prefix(ident.buf, match[0], &v) &&
+ !skip_prefix(ident.buf, match[1], &v))
+ continue;
+ while (strbuf_getline_lf(&oneline, stdin) != EOF &&
+ oneline.len)
+ ; /* discard headers */
+ while (strbuf_getline_lf(&oneline, stdin) != EOF &&
+ !oneline.len)
+ ; /* discard blanks */
+
+ strbuf_reset(&mapped_ident);
+ if (parse_ident(log, &mapped_ident, v) < 0)
+ continue;
+
+ insert_one_record(log, mapped_ident.buf, oneline.buf);
+ }
+ strbuf_release(&ident);
+ strbuf_release(&mapped_ident);
+ strbuf_release(&oneline);
+}
+
+static void insert_records_from_trailers(struct shortlog *log,
+ struct strset *dups,
+ struct commit *commit,
+ struct pretty_print_context *ctx,
+ const char *oneline)
+{
+ struct trailer_iterator iter;
+ const char *commit_buffer, *body;
+ struct strbuf ident = STRBUF_INIT;
+
+ if (!log->trailers.nr)
+ return;
+
+ /*
+ * Using format_commit_message("%B") would be simpler here, but
+ * this saves us copying the message.
+ */
+ commit_buffer = logmsg_reencode(commit, NULL, ctx->output_encoding);
+ body = strstr(commit_buffer, "\n\n");
+ if (!body)
+ return;
+
+ trailer_iterator_init(&iter, body);
+ while (trailer_iterator_advance(&iter)) {
+ const char *value = iter.val.buf;
+
+ if (!string_list_has_string(&log->trailers, iter.key.buf))
+ continue;
+
+ strbuf_reset(&ident);
+ if (!parse_ident(log, &ident, value))
+ value = ident.buf;
+
+ if (!strset_add(dups, value))
+ continue;
+ insert_one_record(log, value, oneline);
+ }
+ trailer_iterator_release(&iter);
+
+ strbuf_release(&ident);
+ unuse_commit_buffer(commit, commit_buffer);
+}
+
+static int shortlog_needs_dedup(const struct shortlog *log)
+{
+ return HAS_MULTI_BITS(log->groups) || log->format.nr > 1 || log->trailers.nr;
+}
+
+static void insert_records_from_format(struct shortlog *log,
+ struct strset *dups,
+ struct commit *commit,
+ struct pretty_print_context *ctx,
+ const char *oneline)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, &log->format) {
+ strbuf_reset(&buf);
+
+ format_commit_message(commit, item->string, &buf, ctx);
+
+ if (!shortlog_needs_dedup(log) || strset_add(dups, buf.buf))
+ insert_one_record(log, buf.buf, oneline);
+ }
+
+ strbuf_release(&buf);
+}
+
+void shortlog_add_commit(struct shortlog *log, struct commit *commit)
+{
+ struct strbuf oneline = STRBUF_INIT;
+ struct strset dups = STRSET_INIT;
+ struct pretty_print_context ctx = {0};
+ const char *oneline_str;
+
+ ctx.fmt = CMIT_FMT_USERFORMAT;
+ ctx.abbrev = log->abbrev;
+ ctx.print_email_subject = 1;
+ ctx.date_mode = log->date_mode;
+ ctx.output_encoding = get_log_output_encoding();
+
+ if (!log->summary) {
+ if (log->user_format)
+ pretty_print_commit(&ctx, commit, &oneline);
+ else
+ format_commit_message(commit, "%s", &oneline, &ctx);
+ }
+ oneline_str = oneline.len ? oneline.buf : "<none>";
+
+ insert_records_from_trailers(log, &dups, commit, &ctx, oneline_str);
+ insert_records_from_format(log, &dups, commit, &ctx, oneline_str);
+
+ strset_clear(&dups);
+ strbuf_release(&oneline);
+}
+
+static void get_from_rev(struct rev_info *rev, struct shortlog *log)
+{
+ struct commit *commit;
+
+ if (prepare_revision_walk(rev))
+ die(_("revision walk setup failed"));
+ while ((commit = get_revision(rev)) != NULL)
+ shortlog_add_commit(log, commit);
+}
+
+static int parse_uint(char const **arg, int comma, int defval)
+{
+ unsigned long ul;
+ int ret;
+ char *endp;
+
+ ul = strtoul(*arg, &endp, 10);
+ if (*endp && *endp != comma)
+ return -1;
+ if (ul > INT_MAX)
+ return -1;
+ ret = *arg == endp ? defval : (int)ul;
+ *arg = *endp ? endp + 1 : endp;
+ return ret;
+}
+
+static const char wrap_arg_usage[] = "-w[<width>[,<indent1>[,<indent2>]]]";
+#define DEFAULT_WRAPLEN 76
+#define DEFAULT_INDENT1 6
+#define DEFAULT_INDENT2 9
+
+static int parse_wrap_args(const struct option *opt, const char *arg, int unset)
+{
+ struct shortlog *log = opt->value;
+
+ log->wrap_lines = !unset;
+ if (unset)
+ return 0;
+ if (!arg) {
+ log->wrap = DEFAULT_WRAPLEN;
+ log->in1 = DEFAULT_INDENT1;
+ log->in2 = DEFAULT_INDENT2;
+ return 0;
+ }
+
+ log->wrap = parse_uint(&arg, ',', DEFAULT_WRAPLEN);
+ log->in1 = parse_uint(&arg, ',', DEFAULT_INDENT1);
+ log->in2 = parse_uint(&arg, '\0', DEFAULT_INDENT2);
+ if (log->wrap < 0 || log->in1 < 0 || log->in2 < 0)
+ return error(wrap_arg_usage);
+ if (log->wrap &&
+ ((log->in1 && log->wrap <= log->in1) ||
+ (log->in2 && log->wrap <= log->in2)))
+ return error(wrap_arg_usage);
+ return 0;
+}
+
+static int parse_group_option(const struct option *opt, const char *arg, int unset)
+{
+ struct shortlog *log = opt->value;
+ const char *field;
+
+ if (unset) {
+ log->groups = 0;
+ string_list_clear(&log->trailers, 0);
+ string_list_clear(&log->format, 0);
+ } else if (!strcasecmp(arg, "author"))
+ log->groups |= SHORTLOG_GROUP_AUTHOR;
+ else if (!strcasecmp(arg, "committer"))
+ log->groups |= SHORTLOG_GROUP_COMMITTER;
+ else if (skip_prefix(arg, "trailer:", &field)) {
+ log->groups |= SHORTLOG_GROUP_TRAILER;
+ string_list_append(&log->trailers, field);
+ } else if (skip_prefix(arg, "format:", &field)) {
+ log->groups |= SHORTLOG_GROUP_FORMAT;
+ string_list_append(&log->format, field);
+ } else if (strchr(arg, '%')) {
+ log->groups |= SHORTLOG_GROUP_FORMAT;
+ string_list_append(&log->format, arg);
+ } else {
+ return error(_("unknown group type: %s"), arg);
+ }
+
+ return 0;
+}
+
+
+void shortlog_init(struct shortlog *log)
+{
+ memset(log, 0, sizeof(*log));
+
+ read_mailmap(&log->mailmap);
+
+ log->list.strdup_strings = 1;
+ log->wrap = DEFAULT_WRAPLEN;
+ log->in1 = DEFAULT_INDENT1;
+ log->in2 = DEFAULT_INDENT2;
+ log->trailers.strdup_strings = 1;
+ log->trailers.cmp = strcasecmp;
+ log->format.strdup_strings = 1;
+}
+
+void shortlog_finish_setup(struct shortlog *log)
+{
+ if (log->groups & SHORTLOG_GROUP_AUTHOR)
+ string_list_append(&log->format,
+ log->email ? "%aN <%aE>" : "%aN");
+ if (log->groups & SHORTLOG_GROUP_COMMITTER)
+ string_list_append(&log->format,
+ log->email ? "%cN <%cE>" : "%cN");
+
+ string_list_sort(&log->trailers);
+}
+
+int cmd_shortlog(int argc, const char **argv, const char *prefix)
+{
+ struct shortlog log = { STRING_LIST_INIT_NODUP };
+ struct rev_info rev;
+ int nongit = !startup_info->have_repository;
+
+ const struct option options[] = {
+ OPT_BIT('c', "committer", &log.groups,
+ N_("group by committer rather than author"),
+ SHORTLOG_GROUP_COMMITTER),
+ OPT_BOOL('n', "numbered", &log.sort_by_number,
+ N_("sort output according to the number of commits per author")),
+ OPT_BOOL('s', "summary", &log.summary,
+ N_("suppress commit descriptions, only provides commit count")),
+ OPT_BOOL('e', "email", &log.email,
+ N_("show the email address of each author")),
+ OPT_CALLBACK_F('w', NULL, &log, N_("<w>[,<i1>[,<i2>]]"),
+ N_("linewrap output"), PARSE_OPT_OPTARG,
+ &parse_wrap_args),
+ OPT_CALLBACK(0, "group", &log, N_("field"),
+ N_("group by field"), parse_group_option),
+ OPT_END(),
+ };
+
+ struct parse_opt_ctx_t ctx;
+
+ git_config(git_default_config, NULL);
+ shortlog_init(&log);
+ repo_init_revisions(the_repository, &rev, prefix);
+ parse_options_start(&ctx, argc, argv, prefix, options,
+ PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0);
+
+ for (;;) {
+ switch (parse_options_step(&ctx, options, shortlog_usage)) {
+ case PARSE_OPT_NON_OPTION:
+ case PARSE_OPT_UNKNOWN:
+ break;
+ case PARSE_OPT_HELP:
+ case PARSE_OPT_ERROR:
+ case PARSE_OPT_SUBCOMMAND:
+ exit(129);
+ case PARSE_OPT_COMPLETE:
+ exit(0);
+ case PARSE_OPT_DONE:
+ goto parse_done;
+ }
+ parse_revision_opt(&rev, &ctx, options, shortlog_usage);
+ }
+parse_done:
+ revision_opts_finish(&rev);
+ argc = parse_options_end(&ctx);
+
+ if (nongit && argc > 1) {
+ error(_("too many arguments given outside repository"));
+ usage_with_options(shortlog_usage, options);
+ }
+
+ if (setup_revisions(argc, argv, &rev, NULL) != 1) {
+ error(_("unrecognized argument: %s"), argv[1]);
+ usage_with_options(shortlog_usage, options);
+ }
+
+ log.user_format = rev.commit_format == CMIT_FMT_USERFORMAT;
+ log.abbrev = rev.abbrev;
+ log.file = rev.diffopt.file;
+ log.date_mode = rev.date_mode;
+
+ if (!log.groups)
+ log.groups = SHORTLOG_GROUP_AUTHOR;
+ shortlog_finish_setup(&log);
+
+ /* assume HEAD if from a tty */
+ if (!nongit && !rev.pending.nr && isatty(0))
+ add_head_to_pending(&rev);
+ if (rev.pending.nr == 0) {
+ if (isatty(0))
+ fprintf(stderr, _("(reading log message from standard input)\n"));
+ read_from_stdin(&log);
+ }
+ else
+ get_from_rev(&rev, &log);
+
+ release_revisions(&rev);
+
+ shortlog_output(&log);
+ if (log.file != stdout)
+ fclose(log.file);
+ return 0;
+}
+
+static void add_wrapped_shortlog_msg(struct strbuf *sb, const char *s,
+ const struct shortlog *log)
+{
+ strbuf_add_wrapped_text(sb, s, log->in1, log->in2, log->wrap);
+ strbuf_addch(sb, '\n');
+}
+
+void shortlog_output(struct shortlog *log)
+{
+ size_t i, j;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (log->sort_by_number)
+ STABLE_QSORT(log->list.items, log->list.nr,
+ log->summary ? compare_by_counter : compare_by_list);
+ for (i = 0; i < log->list.nr; i++) {
+ const struct string_list_item *item = &log->list.items[i];
+ if (log->summary) {
+ fprintf(log->file, "%6d\t%s\n",
+ (int)UTIL_TO_INT(item), item->string);
+ } else {
+ struct string_list *onelines = item->util;
+ fprintf(log->file, "%s (%"PRIuMAX"):\n",
+ item->string, (uintmax_t)onelines->nr);
+ for (j = onelines->nr; j >= 1; j--) {
+ const char *msg = onelines->items[j - 1].string;
+
+ if (log->wrap_lines) {
+ strbuf_reset(&sb);
+ add_wrapped_shortlog_msg(&sb, msg, log);
+ fwrite(sb.buf, sb.len, 1, log->file);
+ }
+ else
+ fprintf(log->file, " %s\n", msg);
+ }
+ putc('\n', log->file);
+ onelines->strdup_strings = 1;
+ string_list_clear(onelines, 0);
+ free(onelines);
+ }
+
+ log->list.items[i].util = NULL;
+ }
+
+ strbuf_release(&sb);
+ log->list.strdup_strings = 1;
+ string_list_clear(&log->list, 1);
+ clear_mailmap(&log->mailmap);
+ string_list_clear(&log->format, 0);
+}
diff --git a/builtin/show-branch.c b/builtin/show-branch.c
new file mode 100644
index 0000000..c013aba
--- /dev/null
+++ b/builtin/show-branch.c
@@ -0,0 +1,960 @@
+#include "cache.h"
+#include "config.h"
+#include "pretty.h"
+#include "refs.h"
+#include "builtin.h"
+#include "color.h"
+#include "strvec.h"
+#include "parse-options.h"
+#include "dir.h"
+#include "commit-slab.h"
+#include "date.h"
+
+static const char* show_branch_usage[] = {
+ N_("git show-branch [-a | --all] [-r | --remotes] [--topo-order | --date-order]\n"
+ " [--current] [--color[=<when>] | --no-color] [--sparse]\n"
+ " [--more=<n> | --list | --independent | --merge-base]\n"
+ " [--no-name | --sha1-name] [--topics]\n"
+ " [(<rev> | <glob>)...]"),
+ N_("git show-branch (-g | --reflog)[=<n>[,<base>]] [--list] [<ref>]"),
+ NULL
+};
+
+static int showbranch_use_color = -1;
+
+static struct strvec default_args = STRVEC_INIT;
+
+/*
+ * TODO: convert this use of commit->object.flags to commit-slab
+ * instead to store a pointer to ref name directly. Then use the same
+ * UNINTERESTING definition from revision.h here.
+ */
+#define UNINTERESTING 01
+
+#define REV_SHIFT 2
+#define MAX_REVS (FLAG_BITS - REV_SHIFT) /* should not exceed bits_per_int - REV_SHIFT */
+
+#define DEFAULT_REFLOG 4
+
+static const char *get_color_code(int idx)
+{
+ if (want_color(showbranch_use_color))
+ return column_colors_ansi[idx % column_colors_ansi_max];
+ return "";
+}
+
+static const char *get_color_reset_code(void)
+{
+ if (want_color(showbranch_use_color))
+ return GIT_COLOR_RESET;
+ return "";
+}
+
+static struct commit *interesting(struct commit_list *list)
+{
+ while (list) {
+ struct commit *commit = list->item;
+ list = list->next;
+ if (commit->object.flags & UNINTERESTING)
+ continue;
+ return commit;
+ }
+ return NULL;
+}
+
+struct commit_name {
+ const char *head_name; /* which head's ancestor? */
+ int generation; /* how many parents away from head_name */
+};
+
+define_commit_slab(commit_name_slab, struct commit_name *);
+static struct commit_name_slab name_slab;
+
+static struct commit_name *commit_to_name(struct commit *commit)
+{
+ return *commit_name_slab_at(&name_slab, commit);
+}
+
+
+/* Name the commit as nth generation ancestor of head_name;
+ * we count only the first-parent relationship for naming purposes.
+ */
+static void name_commit(struct commit *commit, const char *head_name, int nth)
+{
+ struct commit_name *name;
+
+ name = *commit_name_slab_at(&name_slab, commit);
+ if (!name) {
+ name = xmalloc(sizeof(*name));
+ *commit_name_slab_at(&name_slab, commit) = name;
+ }
+ name->head_name = head_name;
+ name->generation = nth;
+}
+
+/* Parent is the first parent of the commit. We may name it
+ * as (n+1)th generation ancestor of the same head_name as
+ * commit is nth generation ancestor of, if that generation
+ * number is better than the name it already has.
+ */
+static void name_parent(struct commit *commit, struct commit *parent)
+{
+ struct commit_name *commit_name = commit_to_name(commit);
+ struct commit_name *parent_name = commit_to_name(parent);
+ if (!commit_name)
+ return;
+ if (!parent_name ||
+ commit_name->generation + 1 < parent_name->generation)
+ name_commit(parent, commit_name->head_name,
+ commit_name->generation + 1);
+}
+
+static int name_first_parent_chain(struct commit *c)
+{
+ int i = 0;
+ while (c) {
+ struct commit *p;
+ if (!commit_to_name(c))
+ break;
+ if (!c->parents)
+ break;
+ p = c->parents->item;
+ if (!commit_to_name(p)) {
+ name_parent(c, p);
+ i++;
+ }
+ else
+ break;
+ c = p;
+ }
+ return i;
+}
+
+static void name_commits(struct commit_list *list,
+ struct commit **rev,
+ char **ref_name,
+ int num_rev)
+{
+ struct commit_list *cl;
+ struct commit *c;
+ int i;
+
+ /* First give names to the given heads */
+ for (cl = list; cl; cl = cl->next) {
+ c = cl->item;
+ if (commit_to_name(c))
+ continue;
+ for (i = 0; i < num_rev; i++) {
+ if (rev[i] == c) {
+ name_commit(c, ref_name[i], 0);
+ break;
+ }
+ }
+ }
+
+ /* Then commits on the first parent ancestry chain */
+ do {
+ i = 0;
+ for (cl = list; cl; cl = cl->next) {
+ i += name_first_parent_chain(cl->item);
+ }
+ } while (i);
+
+ /* Finally, any unnamed commits */
+ do {
+ i = 0;
+ for (cl = list; cl; cl = cl->next) {
+ struct commit_list *parents;
+ struct commit_name *n;
+ int nth;
+ c = cl->item;
+ if (!commit_to_name(c))
+ continue;
+ n = commit_to_name(c);
+ parents = c->parents;
+ nth = 0;
+ while (parents) {
+ struct commit *p = parents->item;
+ struct strbuf newname = STRBUF_INIT;
+ parents = parents->next;
+ nth++;
+ if (commit_to_name(p))
+ continue;
+ switch (n->generation) {
+ case 0:
+ strbuf_addstr(&newname, n->head_name);
+ break;
+ case 1:
+ strbuf_addf(&newname, "%s^", n->head_name);
+ break;
+ default:
+ strbuf_addf(&newname, "%s~%d",
+ n->head_name, n->generation);
+ break;
+ }
+ if (nth == 1)
+ strbuf_addch(&newname, '^');
+ else
+ strbuf_addf(&newname, "^%d", nth);
+ name_commit(p, strbuf_detach(&newname, NULL), 0);
+ i++;
+ name_first_parent_chain(p);
+ }
+ }
+ } while (i);
+}
+
+static int mark_seen(struct commit *commit, struct commit_list **seen_p)
+{
+ if (!commit->object.flags) {
+ commit_list_insert(commit, seen_p);
+ return 1;
+ }
+ return 0;
+}
+
+static void join_revs(struct commit_list **list_p,
+ struct commit_list **seen_p,
+ int num_rev, int extra)
+{
+ int all_mask = ((1u << (REV_SHIFT + num_rev)) - 1);
+ int all_revs = all_mask & ~((1u << REV_SHIFT) - 1);
+
+ while (*list_p) {
+ struct commit_list *parents;
+ int still_interesting = !!interesting(*list_p);
+ struct commit *commit = pop_commit(list_p);
+ int flags = commit->object.flags & all_mask;
+
+ if (!still_interesting && extra <= 0)
+ break;
+
+ mark_seen(commit, seen_p);
+ if ((flags & all_revs) == all_revs)
+ flags |= UNINTERESTING;
+ parents = commit->parents;
+
+ while (parents) {
+ struct commit *p = parents->item;
+ int this_flag = p->object.flags;
+ parents = parents->next;
+ if ((this_flag & flags) == flags)
+ continue;
+ parse_commit(p);
+ if (mark_seen(p, seen_p) && !still_interesting)
+ extra--;
+ p->object.flags |= flags;
+ commit_list_insert_by_date(p, list_p);
+ }
+ }
+
+ /*
+ * Postprocess to complete well-poisoning.
+ *
+ * At this point we have all the commits we have seen in
+ * seen_p list. Mark anything that can be reached from
+ * uninteresting commits not interesting.
+ */
+ for (;;) {
+ int changed = 0;
+ struct commit_list *s;
+ for (s = *seen_p; s; s = s->next) {
+ struct commit *c = s->item;
+ struct commit_list *parents;
+
+ if (((c->object.flags & all_revs) != all_revs) &&
+ !(c->object.flags & UNINTERESTING))
+ continue;
+
+ /* The current commit is either a merge base or
+ * already uninteresting one. Mark its parents
+ * as uninteresting commits _only_ if they are
+ * already parsed. No reason to find new ones
+ * here.
+ */
+ parents = c->parents;
+ while (parents) {
+ struct commit *p = parents->item;
+ parents = parents->next;
+ if (!(p->object.flags & UNINTERESTING)) {
+ p->object.flags |= UNINTERESTING;
+ changed = 1;
+ }
+ }
+ }
+ if (!changed)
+ break;
+ }
+}
+
+static void show_one_commit(struct commit *commit, int no_name)
+{
+ struct strbuf pretty = STRBUF_INIT;
+ const char *pretty_str = "(unavailable)";
+ struct commit_name *name = commit_to_name(commit);
+
+ if (commit->object.parsed) {
+ pp_commit_easy(CMIT_FMT_ONELINE, commit, &pretty);
+ pretty_str = pretty.buf;
+ }
+ skip_prefix(pretty_str, "[PATCH] ", &pretty_str);
+
+ if (!no_name) {
+ if (name && name->head_name) {
+ printf("[%s", name->head_name);
+ if (name->generation) {
+ if (name->generation == 1)
+ printf("^");
+ else
+ printf("~%d", name->generation);
+ }
+ printf("] ");
+ }
+ else
+ printf("[%s] ",
+ find_unique_abbrev(&commit->object.oid,
+ DEFAULT_ABBREV));
+ }
+ puts(pretty_str);
+ strbuf_release(&pretty);
+}
+
+static char *ref_name[MAX_REVS + 1];
+static int ref_name_cnt;
+
+static const char *find_digit_prefix(const char *s, int *v)
+{
+ const char *p;
+ int ver;
+ char ch;
+
+ for (p = s, ver = 0;
+ '0' <= (ch = *p) && ch <= '9';
+ p++)
+ ver = ver * 10 + ch - '0';
+ *v = ver;
+ return p;
+}
+
+
+static int version_cmp(const char *a, const char *b)
+{
+ while (1) {
+ int va, vb;
+
+ a = find_digit_prefix(a, &va);
+ b = find_digit_prefix(b, &vb);
+ if (va != vb)
+ return va - vb;
+
+ while (1) {
+ int ca = *a;
+ int cb = *b;
+ if ('0' <= ca && ca <= '9')
+ ca = 0;
+ if ('0' <= cb && cb <= '9')
+ cb = 0;
+ if (ca != cb)
+ return ca - cb;
+ if (!ca)
+ break;
+ a++;
+ b++;
+ }
+ if (!*a && !*b)
+ return 0;
+ }
+}
+
+static int compare_ref_name(const void *a_, const void *b_)
+{
+ const char * const*a = a_, * const*b = b_;
+ return version_cmp(*a, *b);
+}
+
+static void sort_ref_range(int bottom, int top)
+{
+ QSORT(ref_name + bottom, top - bottom, compare_ref_name);
+}
+
+static int append_ref(const char *refname, const struct object_id *oid,
+ int allow_dups)
+{
+ struct commit *commit = lookup_commit_reference_gently(the_repository,
+ oid, 1);
+ int i;
+
+ if (!commit)
+ return 0;
+
+ if (!allow_dups) {
+ /* Avoid adding the same thing twice */
+ for (i = 0; i < ref_name_cnt; i++)
+ if (!strcmp(refname, ref_name[i]))
+ return 0;
+ }
+ if (MAX_REVS <= ref_name_cnt) {
+ warning(Q_("ignoring %s; cannot handle more than %d ref",
+ "ignoring %s; cannot handle more than %d refs",
+ MAX_REVS), refname, MAX_REVS);
+ return 0;
+ }
+ ref_name[ref_name_cnt++] = xstrdup(refname);
+ ref_name[ref_name_cnt] = NULL;
+ return 0;
+}
+
+static int append_head_ref(const char *refname, const struct object_id *oid,
+ int flag UNUSED, void *cb_data UNUSED)
+{
+ struct object_id tmp;
+ int ofs = 11;
+ if (!starts_with(refname, "refs/heads/"))
+ return 0;
+ /* If both heads/foo and tags/foo exists, get_sha1 would
+ * get confused.
+ */
+ if (get_oid(refname + ofs, &tmp) || !oideq(&tmp, oid))
+ ofs = 5;
+ return append_ref(refname + ofs, oid, 0);
+}
+
+static int append_remote_ref(const char *refname, const struct object_id *oid,
+ int flag UNUSED, void *cb_data UNUSED)
+{
+ struct object_id tmp;
+ int ofs = 13;
+ if (!starts_with(refname, "refs/remotes/"))
+ return 0;
+ /* If both heads/foo and tags/foo exists, get_sha1 would
+ * get confused.
+ */
+ if (get_oid(refname + ofs, &tmp) || !oideq(&tmp, oid))
+ ofs = 5;
+ return append_ref(refname + ofs, oid, 0);
+}
+
+static int append_tag_ref(const char *refname, const struct object_id *oid,
+ int flag UNUSED, void *cb_data UNUSED)
+{
+ if (!starts_with(refname, "refs/tags/"))
+ return 0;
+ return append_ref(refname + 5, oid, 0);
+}
+
+static const char *match_ref_pattern = NULL;
+static int match_ref_slash = 0;
+
+static int append_matching_ref(const char *refname, const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ /* we want to allow pattern hold/<asterisk> to show all
+ * branches under refs/heads/hold/, and v0.99.9? to show
+ * refs/tags/v0.99.9a and friends.
+ */
+ const char *tail;
+ int slash = count_slashes(refname);
+ for (tail = refname; *tail && match_ref_slash < slash; )
+ if (*tail++ == '/')
+ slash--;
+ if (!*tail)
+ return 0;
+ if (wildmatch(match_ref_pattern, tail, 0))
+ return 0;
+ if (starts_with(refname, "refs/heads/"))
+ return append_head_ref(refname, oid, flag, cb_data);
+ if (starts_with(refname, "refs/tags/"))
+ return append_tag_ref(refname, oid, flag, cb_data);
+ return append_ref(refname, oid, 0);
+}
+
+static void snarf_refs(int head, int remotes)
+{
+ if (head) {
+ int orig_cnt = ref_name_cnt;
+
+ for_each_ref(append_head_ref, NULL);
+ sort_ref_range(orig_cnt, ref_name_cnt);
+ }
+ if (remotes) {
+ int orig_cnt = ref_name_cnt;
+
+ for_each_ref(append_remote_ref, NULL);
+ sort_ref_range(orig_cnt, ref_name_cnt);
+ }
+}
+
+static int rev_is_head(const char *head, const char *name)
+{
+ if (!head)
+ return 0;
+ skip_prefix(head, "refs/heads/", &head);
+ if (!skip_prefix(name, "refs/heads/", &name))
+ skip_prefix(name, "heads/", &name);
+ return !strcmp(head, name);
+}
+
+static int show_merge_base(struct commit_list *seen, int num_rev)
+{
+ int all_mask = ((1u << (REV_SHIFT + num_rev)) - 1);
+ int all_revs = all_mask & ~((1u << REV_SHIFT) - 1);
+ int exit_status = 1;
+
+ while (seen) {
+ struct commit *commit = pop_commit(&seen);
+ int flags = commit->object.flags & all_mask;
+ if (!(flags & UNINTERESTING) &&
+ ((flags & all_revs) == all_revs)) {
+ puts(oid_to_hex(&commit->object.oid));
+ exit_status = 0;
+ commit->object.flags |= UNINTERESTING;
+ }
+ }
+ return exit_status;
+}
+
+static int show_independent(struct commit **rev,
+ int num_rev,
+ unsigned int *rev_mask)
+{
+ int i;
+
+ for (i = 0; i < num_rev; i++) {
+ struct commit *commit = rev[i];
+ unsigned int flag = rev_mask[i];
+
+ if (commit->object.flags == flag)
+ puts(oid_to_hex(&commit->object.oid));
+ commit->object.flags |= UNINTERESTING;
+ }
+ return 0;
+}
+
+static void append_one_rev(const char *av)
+{
+ struct object_id revkey;
+ if (!get_oid(av, &revkey)) {
+ append_ref(av, &revkey, 0);
+ return;
+ }
+ if (strpbrk(av, "*?[")) {
+ /* glob style match */
+ int saved_matches = ref_name_cnt;
+
+ match_ref_pattern = av;
+ match_ref_slash = count_slashes(av);
+ for_each_ref(append_matching_ref, NULL);
+ if (saved_matches == ref_name_cnt &&
+ ref_name_cnt < MAX_REVS)
+ error(_("no matching refs with %s"), av);
+ sort_ref_range(saved_matches, ref_name_cnt);
+ return;
+ }
+ die("bad sha1 reference %s", av);
+}
+
+static int git_show_branch_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "showbranch.default")) {
+ if (!value)
+ return config_error_nonbool(var);
+ /*
+ * default_arg is now passed to parse_options(), so we need to
+ * mimic the real argv a bit better.
+ */
+ if (!default_args.nr)
+ strvec_push(&default_args, "show-branch");
+ strvec_push(&default_args, value);
+ return 0;
+ }
+
+ if (!strcmp(var, "color.showbranch")) {
+ showbranch_use_color = git_config_colorbool(var, value);
+ return 0;
+ }
+
+ return git_color_default_config(var, value, cb);
+}
+
+static int omit_in_dense(struct commit *commit, struct commit **rev, int n)
+{
+ /* If the commit is tip of the named branches, do not
+ * omit it.
+ * Otherwise, if it is a merge that is reachable from only one
+ * tip, it is not that interesting.
+ */
+ int i, flag, count;
+ for (i = 0; i < n; i++)
+ if (rev[i] == commit)
+ return 0;
+ flag = commit->object.flags;
+ for (i = count = 0; i < n; i++) {
+ if (flag & (1u << (i + REV_SHIFT)))
+ count++;
+ }
+ if (count == 1)
+ return 1;
+ return 0;
+}
+
+static int reflog = 0;
+
+static int parse_reflog_param(const struct option *opt, const char *arg,
+ int unset)
+{
+ char *ep;
+ const char **base = (const char **)opt->value;
+ BUG_ON_OPT_NEG(unset);
+ if (!arg)
+ arg = "";
+ reflog = strtoul(arg, &ep, 10);
+ if (*ep == ',')
+ *base = ep + 1;
+ else if (*ep)
+ return error("unrecognized reflog param '%s'", arg);
+ else
+ *base = NULL;
+ if (reflog <= 0)
+ reflog = DEFAULT_REFLOG;
+ return 0;
+}
+
+int cmd_show_branch(int ac, const char **av, const char *prefix)
+{
+ struct commit *rev[MAX_REVS], *commit;
+ char *reflog_msg[MAX_REVS];
+ struct commit_list *list = NULL, *seen = NULL;
+ unsigned int rev_mask[MAX_REVS];
+ int num_rev, i, extra = 0;
+ int all_heads = 0, all_remotes = 0;
+ int all_mask, all_revs;
+ enum rev_sort_order sort_order = REV_SORT_IN_GRAPH_ORDER;
+ char *head;
+ struct object_id head_oid;
+ int merge_base = 0;
+ int independent = 0;
+ int no_name = 0;
+ int sha1_name = 0;
+ int shown_merge_point = 0;
+ int with_current_branch = 0;
+ int head_at = -1;
+ int topics = 0;
+ int dense = 1;
+ const char *reflog_base = NULL;
+ struct option builtin_show_branch_options[] = {
+ OPT_BOOL('a', "all", &all_heads,
+ N_("show remote-tracking and local branches")),
+ OPT_BOOL('r', "remotes", &all_remotes,
+ N_("show remote-tracking branches")),
+ OPT__COLOR(&showbranch_use_color,
+ N_("color '*!+-' corresponding to the branch")),
+ { OPTION_INTEGER, 0, "more", &extra, N_("n"),
+ N_("show <n> more commits after the common ancestor"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t)1 },
+ OPT_SET_INT(0, "list", &extra, N_("synonym to more=-1"), -1),
+ OPT_BOOL(0, "no-name", &no_name, N_("suppress naming strings")),
+ OPT_BOOL(0, "current", &with_current_branch,
+ N_("include the current branch")),
+ OPT_BOOL(0, "sha1-name", &sha1_name,
+ N_("name commits with their object names")),
+ OPT_BOOL(0, "merge-base", &merge_base,
+ N_("show possible merge bases")),
+ OPT_BOOL(0, "independent", &independent,
+ N_("show refs unreachable from any other ref")),
+ OPT_SET_INT(0, "topo-order", &sort_order,
+ N_("show commits in topological order"),
+ REV_SORT_IN_GRAPH_ORDER),
+ OPT_BOOL(0, "topics", &topics,
+ N_("show only commits not on the first branch")),
+ OPT_SET_INT(0, "sparse", &dense,
+ N_("show merges reachable from only one tip"), 0),
+ OPT_SET_INT(0, "date-order", &sort_order,
+ N_("topologically sort, maintaining date order "
+ "where possible"),
+ REV_SORT_BY_COMMIT_DATE),
+ OPT_CALLBACK_F('g', "reflog", &reflog_base, N_("<n>[,<base>]"),
+ N_("show <n> most recent ref-log entries starting at "
+ "base"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG,
+ parse_reflog_param),
+ OPT_END()
+ };
+
+ init_commit_name_slab(&name_slab);
+
+ git_config(git_show_branch_config, NULL);
+
+ /* If nothing is specified, try the default first */
+ if (ac == 1 && default_args.nr) {
+ ac = default_args.nr;
+ av = default_args.v;
+ }
+
+ ac = parse_options(ac, av, prefix, builtin_show_branch_options,
+ show_branch_usage, PARSE_OPT_STOP_AT_NON_OPTION);
+ if (all_heads)
+ all_remotes = 1;
+
+ if (extra || reflog) {
+ /* "listing" mode is incompatible with
+ * independent nor merge-base modes.
+ */
+ if (independent || merge_base)
+ usage_with_options(show_branch_usage,
+ builtin_show_branch_options);
+ if (reflog && ((0 < extra) || all_heads || all_remotes))
+ /*
+ * Asking for --more in reflog mode does not
+ * make sense. --list is Ok.
+ *
+ * Also --all and --remotes do not make sense either.
+ */
+ die(_("options '%s' and '%s' cannot be used together"), "--reflog",
+ "--all/--remotes/--independent/--merge-base");
+ }
+
+ if (with_current_branch && reflog)
+ die(_("options '%s' and '%s' cannot be used together"),
+ "--reflog", "--current");
+
+ /* If nothing is specified, show all branches by default */
+ if (ac <= topics && all_heads + all_remotes == 0)
+ all_heads = 1;
+
+ if (reflog) {
+ struct object_id oid;
+ char *ref;
+ int base = 0;
+ unsigned int flags = 0;
+
+ if (ac == 0) {
+ static const char *fake_av[2];
+
+ fake_av[0] = resolve_refdup("HEAD",
+ RESOLVE_REF_READING, &oid,
+ NULL);
+ fake_av[1] = NULL;
+ av = fake_av;
+ ac = 1;
+ if (!*av)
+ die(_("no branches given, and HEAD is not valid"));
+ }
+ if (ac != 1)
+ die(_("--reflog option needs one branch name"));
+
+ if (MAX_REVS < reflog)
+ die(Q_("only %d entry can be shown at one time.",
+ "only %d entries can be shown at one time.",
+ MAX_REVS), MAX_REVS);
+ if (!dwim_ref(*av, strlen(*av), &oid, &ref, 0))
+ die(_("no such ref %s"), *av);
+
+ /* Has the base been specified? */
+ if (reflog_base) {
+ char *ep;
+ base = strtoul(reflog_base, &ep, 10);
+ if (*ep) {
+ /* Ah, that is a date spec... */
+ timestamp_t at;
+ at = approxidate(reflog_base);
+ read_ref_at(get_main_ref_store(the_repository),
+ ref, flags, at, -1, &oid, NULL,
+ NULL, NULL, &base);
+ }
+ }
+
+ for (i = 0; i < reflog; i++) {
+ char *logmsg;
+ char *nth_desc;
+ const char *msg;
+ char *end;
+ timestamp_t timestamp;
+ int tz;
+
+ if (read_ref_at(get_main_ref_store(the_repository),
+ ref, flags, 0, base + i, &oid, &logmsg,
+ &timestamp, &tz, NULL)) {
+ reflog = i;
+ break;
+ }
+
+ end = strchr(logmsg, '\n');
+ if (end)
+ *end = '\0';
+
+ msg = (*logmsg == '\0') ? "(none)" : logmsg;
+ reflog_msg[i] = xstrfmt("(%s) %s",
+ show_date(timestamp, tz,
+ DATE_MODE(RELATIVE)),
+ msg);
+ free(logmsg);
+
+ nth_desc = xstrfmt("%s@{%d}", *av, base+i);
+ append_ref(nth_desc, &oid, 1);
+ free(nth_desc);
+ }
+ free(ref);
+ }
+ else {
+ while (0 < ac) {
+ append_one_rev(*av);
+ ac--; av++;
+ }
+ if (all_heads + all_remotes)
+ snarf_refs(all_heads, all_remotes);
+ }
+
+ head = resolve_refdup("HEAD", RESOLVE_REF_READING,
+ &head_oid, NULL);
+
+ if (with_current_branch && head) {
+ int has_head = 0;
+ for (i = 0; !has_head && i < ref_name_cnt; i++) {
+ /* We are only interested in adding the branch
+ * HEAD points at.
+ */
+ if (rev_is_head(head, ref_name[i]))
+ has_head++;
+ }
+ if (!has_head) {
+ const char *name = head;
+ skip_prefix(name, "refs/heads/", &name);
+ append_one_rev(name);
+ }
+ }
+
+ if (!ref_name_cnt) {
+ fprintf(stderr, "No revs to be shown.\n");
+ exit(0);
+ }
+
+ for (num_rev = 0; ref_name[num_rev]; num_rev++) {
+ struct object_id revkey;
+ unsigned int flag = 1u << (num_rev + REV_SHIFT);
+
+ if (MAX_REVS <= num_rev)
+ die(Q_("cannot handle more than %d rev.",
+ "cannot handle more than %d revs.",
+ MAX_REVS), MAX_REVS);
+ if (get_oid(ref_name[num_rev], &revkey))
+ die(_("'%s' is not a valid ref."), ref_name[num_rev]);
+ commit = lookup_commit_reference(the_repository, &revkey);
+ if (!commit)
+ die(_("cannot find commit %s (%s)"),
+ ref_name[num_rev], oid_to_hex(&revkey));
+ parse_commit(commit);
+ mark_seen(commit, &seen);
+
+ /* rev#0 uses bit REV_SHIFT, rev#1 uses bit REV_SHIFT+1,
+ * and so on. REV_SHIFT bits from bit 0 are used for
+ * internal bookkeeping.
+ */
+ commit->object.flags |= flag;
+ if (commit->object.flags == flag)
+ commit_list_insert_by_date(commit, &list);
+ rev[num_rev] = commit;
+ }
+ for (i = 0; i < num_rev; i++)
+ rev_mask[i] = rev[i]->object.flags;
+
+ if (0 <= extra)
+ join_revs(&list, &seen, num_rev, extra);
+
+ commit_list_sort_by_date(&seen);
+
+ if (merge_base)
+ return show_merge_base(seen, num_rev);
+
+ if (independent)
+ return show_independent(rev, num_rev, rev_mask);
+
+ /* Show list; --more=-1 means list-only */
+ if (1 < num_rev || extra < 0) {
+ for (i = 0; i < num_rev; i++) {
+ int j;
+ int is_head = rev_is_head(head, ref_name[i]) &&
+ oideq(&head_oid, &rev[i]->object.oid);
+ if (extra < 0)
+ printf("%c [%s] ",
+ is_head ? '*' : ' ', ref_name[i]);
+ else {
+ for (j = 0; j < i; j++)
+ putchar(' ');
+ printf("%s%c%s [%s] ",
+ get_color_code(i),
+ is_head ? '*' : '!',
+ get_color_reset_code(), ref_name[i]);
+ }
+
+ if (!reflog) {
+ /* header lines never need name */
+ show_one_commit(rev[i], 1);
+ }
+ else
+ puts(reflog_msg[i]);
+
+ if (is_head)
+ head_at = i;
+ }
+ if (0 <= extra) {
+ for (i = 0; i < num_rev; i++)
+ putchar('-');
+ putchar('\n');
+ }
+ }
+ if (extra < 0)
+ exit(0);
+
+ /* Sort topologically */
+ sort_in_topological_order(&seen, sort_order);
+
+ /* Give names to commits */
+ if (!sha1_name && !no_name)
+ name_commits(seen, rev, ref_name, num_rev);
+
+ all_mask = ((1u << (REV_SHIFT + num_rev)) - 1);
+ all_revs = all_mask & ~((1u << REV_SHIFT) - 1);
+
+ while (seen) {
+ struct commit *commit = pop_commit(&seen);
+ int this_flag = commit->object.flags;
+ int is_merge_point = ((this_flag & all_revs) == all_revs);
+
+ shown_merge_point |= is_merge_point;
+
+ if (1 < num_rev) {
+ int is_merge = !!(commit->parents &&
+ commit->parents->next);
+ if (topics &&
+ !is_merge_point &&
+ (this_flag & (1u << REV_SHIFT)))
+ continue;
+ if (dense && is_merge &&
+ omit_in_dense(commit, rev, num_rev))
+ continue;
+ for (i = 0; i < num_rev; i++) {
+ int mark;
+ if (!(this_flag & (1u << (i + REV_SHIFT))))
+ mark = ' ';
+ else if (is_merge)
+ mark = '-';
+ else if (i == head_at)
+ mark = '*';
+ else
+ mark = '+';
+ if (mark == ' ')
+ putchar(mark);
+ else
+ printf("%s%c%s",
+ get_color_code(i),
+ mark, get_color_reset_code());
+ }
+ putchar(' ');
+ }
+ show_one_commit(commit, no_name);
+
+ if (shown_merge_point && --extra < 0)
+ break;
+ }
+ return 0;
+}
diff --git a/builtin/show-index.c b/builtin/show-index.c
new file mode 100644
index 0000000..0e0b9fb
--- /dev/null
+++ b/builtin/show-index.c
@@ -0,0 +1,108 @@
+#include "builtin.h"
+#include "cache.h"
+#include "pack.h"
+#include "parse-options.h"
+
+static const char *const show_index_usage[] = {
+ "git show-index [--object-format=<hash-algorithm>]",
+ NULL
+};
+
+int cmd_show_index(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ unsigned nr;
+ unsigned int version;
+ static unsigned int top_index[256];
+ unsigned hashsz;
+ const char *hash_name = NULL;
+ int hash_algo;
+ const struct option show_index_options[] = {
+ OPT_STRING(0, "object-format", &hash_name, N_("hash-algorithm"),
+ N_("specify the hash algorithm to use")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, show_index_options, show_index_usage, 0);
+
+ if (hash_name) {
+ hash_algo = hash_algo_by_name(hash_name);
+ if (hash_algo == GIT_HASH_UNKNOWN)
+ die(_("Unknown hash algorithm"));
+ repo_set_hash_algo(the_repository, hash_algo);
+ }
+
+ hashsz = the_hash_algo->rawsz;
+
+ if (fread(top_index, 2 * 4, 1, stdin) != 1)
+ die("unable to read header");
+ if (top_index[0] == htonl(PACK_IDX_SIGNATURE)) {
+ version = ntohl(top_index[1]);
+ if (version < 2 || version > 2)
+ die("unknown index version");
+ if (fread(top_index, 256 * 4, 1, stdin) != 1)
+ die("unable to read index");
+ } else {
+ version = 1;
+ if (fread(&top_index[2], 254 * 4, 1, stdin) != 1)
+ die("unable to read index");
+ }
+ nr = 0;
+ for (i = 0; i < 256; i++) {
+ unsigned n = ntohl(top_index[i]);
+ if (n < nr)
+ die("corrupt index file");
+ nr = n;
+ }
+ if (version == 1) {
+ for (i = 0; i < nr; i++) {
+ unsigned int offset, entry[(GIT_MAX_RAWSZ + 4) / sizeof(unsigned int)];
+
+ if (fread(entry, 4 + hashsz, 1, stdin) != 1)
+ die("unable to read entry %u/%u", i, nr);
+ offset = ntohl(entry[0]);
+ printf("%u %s\n", offset, hash_to_hex((void *)(entry+1)));
+ }
+ } else {
+ unsigned off64_nr = 0;
+ struct {
+ struct object_id oid;
+ uint32_t crc;
+ uint32_t off;
+ } *entries;
+ ALLOC_ARRAY(entries, nr);
+ for (i = 0; i < nr; i++) {
+ if (fread(entries[i].oid.hash, hashsz, 1, stdin) != 1)
+ die("unable to read sha1 %u/%u", i, nr);
+ entries[i].oid.algo = hash_algo_by_ptr(the_hash_algo);
+ }
+ for (i = 0; i < nr; i++)
+ if (fread(&entries[i].crc, 4, 1, stdin) != 1)
+ die("unable to read crc %u/%u", i, nr);
+ for (i = 0; i < nr; i++)
+ if (fread(&entries[i].off, 4, 1, stdin) != 1)
+ die("unable to read 32b offset %u/%u", i, nr);
+ for (i = 0; i < nr; i++) {
+ uint64_t offset;
+ uint32_t off = ntohl(entries[i].off);
+ if (!(off & 0x80000000)) {
+ offset = off;
+ } else {
+ uint32_t off64[2];
+ if ((off & 0x7fffffff) != off64_nr)
+ die("inconsistent 64b offset index");
+ if (fread(off64, 8, 1, stdin) != 1)
+ die("unable to read 64b offset %u", off64_nr);
+ offset = (((uint64_t)ntohl(off64[0])) << 32) |
+ ntohl(off64[1]);
+ off64_nr++;
+ }
+ printf("%" PRIuMAX " %s (%08"PRIx32")\n",
+ (uintmax_t) offset,
+ oid_to_hex(&entries[i].oid),
+ ntohl(entries[i].crc));
+ }
+ free(entries);
+ }
+ return 0;
+}
diff --git a/builtin/show-ref.c b/builtin/show-ref.c
new file mode 100644
index 0000000..3af6a53
--- /dev/null
+++ b/builtin/show-ref.c
@@ -0,0 +1,228 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "refs.h"
+#include "object-store.h"
+#include "object.h"
+#include "tag.h"
+#include "string-list.h"
+#include "parse-options.h"
+
+static const char * const show_ref_usage[] = {
+ N_("git show-ref [-q | --quiet] [--verify] [--head] [-d | --dereference]\n"
+ " [-s | --hash[=<n>]] [--abbrev[=<n>]] [--tags]\n"
+ " [--heads] [--] [<pattern>...]"),
+ N_("git show-ref --exclude-existing[=<pattern>]"),
+ NULL
+};
+
+static int deref_tags, show_head, tags_only, heads_only, found_match, verify,
+ quiet, hash_only, abbrev, exclude_arg;
+static const char **pattern;
+static const char *exclude_existing_arg;
+
+static void show_one(const char *refname, const struct object_id *oid)
+{
+ const char *hex;
+ struct object_id peeled;
+
+ if (!has_object_file(oid))
+ die("git show-ref: bad ref %s (%s)", refname,
+ oid_to_hex(oid));
+
+ if (quiet)
+ return;
+
+ hex = find_unique_abbrev(oid, abbrev);
+ if (hash_only)
+ printf("%s\n", hex);
+ else
+ printf("%s %s\n", hex, refname);
+
+ if (!deref_tags)
+ return;
+
+ if (!peel_iterated_oid(oid, &peeled)) {
+ hex = find_unique_abbrev(&peeled, abbrev);
+ printf("%s %s^{}\n", hex, refname);
+ }
+}
+
+static int show_ref(const char *refname, const struct object_id *oid,
+ int flag UNUSED, void *cbdata UNUSED)
+{
+ if (show_head && !strcmp(refname, "HEAD"))
+ goto match;
+
+ if (pattern) {
+ int reflen = strlen(refname);
+ const char **p = pattern, *m;
+ while ((m = *p++) != NULL) {
+ int len = strlen(m);
+ if (len > reflen)
+ continue;
+ if (memcmp(m, refname + reflen - len, len))
+ continue;
+ if (len == reflen)
+ goto match;
+ if (refname[reflen - len - 1] == '/')
+ goto match;
+ }
+ return 0;
+ }
+
+match:
+ found_match++;
+
+ show_one(refname, oid);
+
+ return 0;
+}
+
+static int add_existing(const char *refname,
+ const struct object_id *oid UNUSED,
+ int flag UNUSED, void *cbdata)
+{
+ struct string_list *list = (struct string_list *)cbdata;
+ string_list_insert(list, refname);
+ return 0;
+}
+
+/*
+ * read "^(?:<anything>\s)?<refname>(?:\^\{\})?$" from the standard input,
+ * and
+ * (1) strip "^{}" at the end of line if any;
+ * (2) ignore if match is provided and does not head-match refname;
+ * (3) warn if refname is not a well-formed refname and skip;
+ * (4) ignore if refname is a ref that exists in the local repository;
+ * (5) otherwise output the line.
+ */
+static int exclude_existing(const char *match)
+{
+ static struct string_list existing_refs = STRING_LIST_INIT_DUP;
+ char buf[1024];
+ int matchlen = match ? strlen(match) : 0;
+
+ for_each_ref(add_existing, &existing_refs);
+ while (fgets(buf, sizeof(buf), stdin)) {
+ char *ref;
+ int len = strlen(buf);
+
+ if (len > 0 && buf[len - 1] == '\n')
+ buf[--len] = '\0';
+ if (3 <= len && !strcmp(buf + len - 3, "^{}")) {
+ len -= 3;
+ buf[len] = '\0';
+ }
+ for (ref = buf + len; buf < ref; ref--)
+ if (isspace(ref[-1]))
+ break;
+ if (match) {
+ int reflen = buf + len - ref;
+ if (reflen < matchlen)
+ continue;
+ if (strncmp(ref, match, matchlen))
+ continue;
+ }
+ if (check_refname_format(ref, 0)) {
+ warning("ref '%s' ignored", ref);
+ continue;
+ }
+ if (!string_list_has_string(&existing_refs, ref)) {
+ printf("%s\n", buf);
+ }
+ }
+ return 0;
+}
+
+static int hash_callback(const struct option *opt, const char *arg, int unset)
+{
+ hash_only = 1;
+ /* Use full length SHA1 if no argument */
+ if (!arg)
+ return 0;
+ return parse_opt_abbrev_cb(opt, arg, unset);
+}
+
+static int exclude_existing_callback(const struct option *opt, const char *arg,
+ int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+ exclude_arg = 1;
+ *(const char **)opt->value = arg;
+ return 0;
+}
+
+static const struct option show_ref_options[] = {
+ OPT_BOOL(0, "tags", &tags_only, N_("only show tags (can be combined with heads)")),
+ OPT_BOOL(0, "heads", &heads_only, N_("only show heads (can be combined with tags)")),
+ OPT_BOOL(0, "verify", &verify, N_("stricter reference checking, "
+ "requires exact ref path")),
+ OPT_HIDDEN_BOOL('h', NULL, &show_head,
+ N_("show the HEAD reference, even if it would be filtered out")),
+ OPT_BOOL(0, "head", &show_head,
+ N_("show the HEAD reference, even if it would be filtered out")),
+ OPT_BOOL('d', "dereference", &deref_tags,
+ N_("dereference tags into object IDs")),
+ OPT_CALLBACK_F('s', "hash", &abbrev, N_("n"),
+ N_("only show SHA1 hash using <n> digits"),
+ PARSE_OPT_OPTARG, &hash_callback),
+ OPT__ABBREV(&abbrev),
+ OPT__QUIET(&quiet,
+ N_("do not print results to stdout (useful with --verify)")),
+ OPT_CALLBACK_F(0, "exclude-existing", &exclude_existing_arg,
+ N_("pattern"), N_("show refs from stdin that aren't in local repository"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG, exclude_existing_callback),
+ OPT_END()
+};
+
+int cmd_show_ref(int argc, const char **argv, const char *prefix)
+{
+ git_config(git_default_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, show_ref_options,
+ show_ref_usage, 0);
+
+ if (exclude_arg)
+ return exclude_existing(exclude_existing_arg);
+
+ pattern = argv;
+ if (!*pattern)
+ pattern = NULL;
+
+ if (verify) {
+ if (!pattern)
+ die("--verify requires a reference");
+ while (*pattern) {
+ struct object_id oid;
+
+ if ((starts_with(*pattern, "refs/") || !strcmp(*pattern, "HEAD")) &&
+ !read_ref(*pattern, &oid)) {
+ show_one(*pattern, &oid);
+ }
+ else if (!quiet)
+ die("'%s' - not a valid ref", *pattern);
+ else
+ return 1;
+ pattern++;
+ }
+ return 0;
+ }
+
+ if (show_head)
+ head_ref(show_ref, NULL);
+ if (heads_only || tags_only) {
+ if (heads_only)
+ for_each_fullref_in("refs/heads/", show_ref, NULL);
+ if (tags_only)
+ for_each_fullref_in("refs/tags/", show_ref, NULL);
+ } else {
+ for_each_ref(show_ref, NULL);
+ }
+ if (!found_match) {
+ if (verify && !quiet)
+ die("No match");
+ return 1;
+ }
+ return 0;
+}
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
new file mode 100644
index 0000000..58a2250
--- /dev/null
+++ b/builtin/sparse-checkout.c
@@ -0,0 +1,948 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "dir.h"
+#include "parse-options.h"
+#include "pathspec.h"
+#include "repository.h"
+#include "run-command.h"
+#include "strbuf.h"
+#include "string-list.h"
+#include "cache-tree.h"
+#include "lockfile.h"
+#include "resolve-undo.h"
+#include "unpack-trees.h"
+#include "wt-status.h"
+#include "quote.h"
+#include "sparse-index.h"
+#include "worktree.h"
+
+static const char *empty_base = "";
+
+static char const * const builtin_sparse_checkout_usage[] = {
+ N_("git sparse-checkout (init | list | set | add | reapply | disable) [<options>]"),
+ NULL
+};
+
+static void write_patterns_to_file(FILE *fp, struct pattern_list *pl)
+{
+ int i;
+
+ for (i = 0; i < pl->nr; i++) {
+ struct path_pattern *p = pl->patterns[i];
+
+ if (p->flags & PATTERN_FLAG_NEGATIVE)
+ fprintf(fp, "!");
+
+ fprintf(fp, "%s", p->pattern);
+
+ if (p->flags & PATTERN_FLAG_MUSTBEDIR)
+ fprintf(fp, "/");
+
+ fprintf(fp, "\n");
+ }
+}
+
+static char const * const builtin_sparse_checkout_list_usage[] = {
+ "git sparse-checkout list",
+ NULL
+};
+
+static int sparse_checkout_list(int argc, const char **argv, const char *prefix)
+{
+ static struct option builtin_sparse_checkout_list_options[] = {
+ OPT_END(),
+ };
+ struct pattern_list pl;
+ char *sparse_filename;
+ int res;
+
+ if (!core_apply_sparse_checkout)
+ die(_("this worktree is not sparse"));
+
+ argc = parse_options(argc, argv, prefix,
+ builtin_sparse_checkout_list_options,
+ builtin_sparse_checkout_list_usage, 0);
+
+ memset(&pl, 0, sizeof(pl));
+
+ pl.use_cone_patterns = core_sparse_checkout_cone;
+
+ sparse_filename = get_sparse_checkout_filename();
+ res = add_patterns_from_file_to_list(sparse_filename, "", 0, &pl, NULL, 0);
+ free(sparse_filename);
+
+ if (res < 0) {
+ warning(_("this worktree is not sparse (sparse-checkout file may not exist)"));
+ return 0;
+ }
+
+ if (pl.use_cone_patterns) {
+ int i;
+ struct pattern_entry *pe;
+ struct hashmap_iter iter;
+ struct string_list sl = STRING_LIST_INIT_DUP;
+
+ hashmap_for_each_entry(&pl.recursive_hashmap, &iter, pe, ent) {
+ /* pe->pattern starts with "/", skip it */
+ string_list_insert(&sl, pe->pattern + 1);
+ }
+
+ string_list_sort(&sl);
+
+ for (i = 0; i < sl.nr; i++) {
+ quote_c_style(sl.items[i].string, NULL, stdout, 0);
+ printf("\n");
+ }
+
+ return 0;
+ }
+
+ write_patterns_to_file(stdout, &pl);
+ clear_pattern_list(&pl);
+
+ return 0;
+}
+
+static void clean_tracked_sparse_directories(struct repository *r)
+{
+ int i, was_full = 0;
+ struct strbuf path = STRBUF_INIT;
+ size_t pathlen;
+ struct string_list_item *item;
+ struct string_list sparse_dirs = STRING_LIST_INIT_DUP;
+
+ /*
+ * If we are not using cone mode patterns, then we cannot
+ * delete directories outside of the sparse cone.
+ */
+ if (!r || !r->index || !r->worktree)
+ return;
+ if (init_sparse_checkout_patterns(r->index) ||
+ !r->index->sparse_checkout_patterns->use_cone_patterns)
+ return;
+
+ /*
+ * Use the sparse index as a data structure to assist finding
+ * directories that are safe to delete. This conversion to a
+ * sparse index will not delete directories that contain
+ * conflicted entries or submodules.
+ */
+ if (r->index->sparse_index == INDEX_EXPANDED) {
+ /*
+ * If something, such as a merge conflict or other concern,
+ * prevents us from converting to a sparse index, then do
+ * not try deleting files.
+ */
+ if (convert_to_sparse(r->index, SPARSE_INDEX_MEMORY_ONLY))
+ return;
+ was_full = 1;
+ }
+
+ strbuf_addstr(&path, r->worktree);
+ strbuf_complete(&path, '/');
+ pathlen = path.len;
+
+ /*
+ * Collect directories that have gone out of scope but also
+ * exist on disk, so there is some work to be done. We need to
+ * store the entries in a list before exploring, since that might
+ * expand the sparse-index again.
+ */
+ for (i = 0; i < r->index->cache_nr; i++) {
+ struct cache_entry *ce = r->index->cache[i];
+
+ if (S_ISSPARSEDIR(ce->ce_mode) &&
+ repo_file_exists(r, ce->name))
+ string_list_append(&sparse_dirs, ce->name);
+ }
+
+ for_each_string_list_item(item, &sparse_dirs) {
+ struct dir_struct dir = DIR_INIT;
+ struct pathspec p = { 0 };
+ struct strvec s = STRVEC_INIT;
+
+ strbuf_setlen(&path, pathlen);
+ strbuf_addstr(&path, item->string);
+
+ dir.flags |= DIR_SHOW_IGNORED_TOO;
+
+ setup_standard_excludes(&dir);
+ strvec_push(&s, path.buf);
+
+ parse_pathspec(&p, PATHSPEC_GLOB, 0, NULL, s.v);
+ fill_directory(&dir, r->index, &p);
+
+ if (dir.nr) {
+ warning(_("directory '%s' contains untracked files,"
+ " but is not in the sparse-checkout cone"),
+ item->string);
+ } else if (remove_dir_recursively(&path, 0)) {
+ /*
+ * Removal is "best effort". If something blocks
+ * the deletion, then continue with a warning.
+ */
+ warning(_("failed to remove directory '%s'"),
+ item->string);
+ }
+
+ strvec_clear(&s);
+ clear_pathspec(&p);
+ dir_clear(&dir);
+ }
+
+ string_list_clear(&sparse_dirs, 0);
+ strbuf_release(&path);
+
+ if (was_full)
+ ensure_full_index(r->index);
+}
+
+static int update_working_directory(struct pattern_list *pl)
+{
+ enum update_sparsity_result result;
+ struct unpack_trees_options o;
+ struct lock_file lock_file = LOCK_INIT;
+ struct repository *r = the_repository;
+
+ /* If no branch has been checked out, there are no updates to make. */
+ if (is_index_unborn(r->index))
+ return UPDATE_SPARSITY_SUCCESS;
+
+ r->index->sparse_checkout_patterns = pl;
+
+ memset(&o, 0, sizeof(o));
+ o.verbose_update = isatty(2);
+ o.update = 1;
+ o.head_idx = -1;
+ o.src_index = r->index;
+ o.dst_index = r->index;
+ o.skip_sparse_checkout = 0;
+ o.pl = pl;
+
+ setup_work_tree();
+
+ repo_hold_locked_index(r, &lock_file, LOCK_DIE_ON_ERROR);
+
+ setup_unpack_trees_porcelain(&o, "sparse-checkout");
+ result = update_sparsity(&o);
+ clear_unpack_trees_porcelain(&o);
+
+ if (result == UPDATE_SPARSITY_WARNINGS)
+ /*
+ * We don't do any special handling of warnings from untracked
+ * files in the way or dirty entries that can't be removed.
+ */
+ result = UPDATE_SPARSITY_SUCCESS;
+ if (result == UPDATE_SPARSITY_SUCCESS)
+ write_locked_index(r->index, &lock_file, COMMIT_LOCK);
+ else
+ rollback_lock_file(&lock_file);
+
+ clean_tracked_sparse_directories(r);
+
+ r->index->sparse_checkout_patterns = NULL;
+ return result;
+}
+
+static char *escaped_pattern(char *pattern)
+{
+ char *p = pattern;
+ struct strbuf final = STRBUF_INIT;
+
+ while (*p) {
+ if (is_glob_special(*p))
+ strbuf_addch(&final, '\\');
+
+ strbuf_addch(&final, *p);
+ p++;
+ }
+
+ return strbuf_detach(&final, NULL);
+}
+
+static void write_cone_to_file(FILE *fp, struct pattern_list *pl)
+{
+ int i;
+ struct pattern_entry *pe;
+ struct hashmap_iter iter;
+ struct string_list sl = STRING_LIST_INIT_DUP;
+ struct strbuf parent_pattern = STRBUF_INIT;
+
+ hashmap_for_each_entry(&pl->parent_hashmap, &iter, pe, ent) {
+ if (hashmap_get_entry(&pl->recursive_hashmap, pe, ent, NULL))
+ continue;
+
+ if (!hashmap_contains_parent(&pl->recursive_hashmap,
+ pe->pattern,
+ &parent_pattern))
+ string_list_insert(&sl, pe->pattern);
+ }
+
+ string_list_sort(&sl);
+ string_list_remove_duplicates(&sl, 0);
+
+ fprintf(fp, "/*\n!/*/\n");
+
+ for (i = 0; i < sl.nr; i++) {
+ char *pattern = escaped_pattern(sl.items[i].string);
+
+ if (strlen(pattern))
+ fprintf(fp, "%s/\n!%s/*/\n", pattern, pattern);
+ free(pattern);
+ }
+
+ string_list_clear(&sl, 0);
+
+ hashmap_for_each_entry(&pl->recursive_hashmap, &iter, pe, ent) {
+ if (!hashmap_contains_parent(&pl->recursive_hashmap,
+ pe->pattern,
+ &parent_pattern))
+ string_list_insert(&sl, pe->pattern);
+ }
+
+ strbuf_release(&parent_pattern);
+
+ string_list_sort(&sl);
+ string_list_remove_duplicates(&sl, 0);
+
+ for (i = 0; i < sl.nr; i++) {
+ char *pattern = escaped_pattern(sl.items[i].string);
+ fprintf(fp, "%s/\n", pattern);
+ free(pattern);
+ }
+}
+
+static int write_patterns_and_update(struct pattern_list *pl)
+{
+ char *sparse_filename;
+ FILE *fp;
+ int fd;
+ struct lock_file lk = LOCK_INIT;
+ int result;
+
+ sparse_filename = get_sparse_checkout_filename();
+
+ if (safe_create_leading_directories(sparse_filename))
+ die(_("failed to create directory for sparse-checkout file"));
+
+ fd = hold_lock_file_for_update(&lk, sparse_filename,
+ LOCK_DIE_ON_ERROR);
+ free(sparse_filename);
+
+ result = update_working_directory(pl);
+ if (result) {
+ rollback_lock_file(&lk);
+ clear_pattern_list(pl);
+ update_working_directory(NULL);
+ return result;
+ }
+
+ fp = xfdopen(fd, "w");
+
+ if (core_sparse_checkout_cone)
+ write_cone_to_file(fp, pl);
+ else
+ write_patterns_to_file(fp, pl);
+
+ fflush(fp);
+ commit_lock_file(&lk);
+
+ clear_pattern_list(pl);
+
+ return 0;
+}
+
+enum sparse_checkout_mode {
+ MODE_NO_PATTERNS = 0,
+ MODE_ALL_PATTERNS = 1,
+ MODE_CONE_PATTERNS = 2,
+};
+
+static int set_config(enum sparse_checkout_mode mode)
+{
+ /* Update to use worktree config, if not already. */
+ if (init_worktree_config(the_repository)) {
+ error(_("failed to initialize worktree config"));
+ return 1;
+ }
+
+ if (repo_config_set_worktree_gently(the_repository,
+ "core.sparseCheckout",
+ mode ? "true" : "false") ||
+ repo_config_set_worktree_gently(the_repository,
+ "core.sparseCheckoutCone",
+ mode == MODE_CONE_PATTERNS ?
+ "true" : "false"))
+ return 1;
+
+ if (mode == MODE_NO_PATTERNS)
+ return set_sparse_index_config(the_repository, 0);
+
+ return 0;
+}
+
+static int update_modes(int *cone_mode, int *sparse_index)
+{
+ int mode, record_mode;
+
+ /* Determine if we need to record the mode; ensure sparse checkout on */
+ record_mode = (*cone_mode != -1) || !core_apply_sparse_checkout;
+
+ /* If not specified, use previous definition of cone mode */
+ if (*cone_mode == -1 && core_apply_sparse_checkout)
+ *cone_mode = core_sparse_checkout_cone;
+
+ /* Set cone/non-cone mode appropriately */
+ core_apply_sparse_checkout = 1;
+ if (*cone_mode == 1 || *cone_mode == -1) {
+ mode = MODE_CONE_PATTERNS;
+ core_sparse_checkout_cone = 1;
+ } else {
+ mode = MODE_ALL_PATTERNS;
+ core_sparse_checkout_cone = 0;
+ }
+ if (record_mode && set_config(mode))
+ return 1;
+
+ /* Set sparse-index/non-sparse-index mode if specified */
+ if (*sparse_index >= 0) {
+ if (set_sparse_index_config(the_repository, *sparse_index) < 0)
+ die(_("failed to modify sparse-index config"));
+
+ /* force an index rewrite */
+ repo_read_index(the_repository);
+ the_repository->index->updated_workdir = 1;
+
+ if (!*sparse_index)
+ ensure_full_index(the_repository->index);
+ }
+
+ return 0;
+}
+
+static char const * const builtin_sparse_checkout_init_usage[] = {
+ "git sparse-checkout init [--cone] [--[no-]sparse-index]",
+ NULL
+};
+
+static struct sparse_checkout_init_opts {
+ int cone_mode;
+ int sparse_index;
+} init_opts;
+
+static int sparse_checkout_init(int argc, const char **argv, const char *prefix)
+{
+ struct pattern_list pl;
+ char *sparse_filename;
+ int res;
+ struct object_id oid;
+ struct strbuf pattern = STRBUF_INIT;
+
+ static struct option builtin_sparse_checkout_init_options[] = {
+ OPT_BOOL(0, "cone", &init_opts.cone_mode,
+ N_("initialize the sparse-checkout in cone mode")),
+ OPT_BOOL(0, "sparse-index", &init_opts.sparse_index,
+ N_("toggle the use of a sparse index")),
+ OPT_END(),
+ };
+
+ repo_read_index(the_repository);
+
+ init_opts.cone_mode = -1;
+ init_opts.sparse_index = -1;
+
+ argc = parse_options(argc, argv, prefix,
+ builtin_sparse_checkout_init_options,
+ builtin_sparse_checkout_init_usage, 0);
+
+ if (update_modes(&init_opts.cone_mode, &init_opts.sparse_index))
+ return 1;
+
+ memset(&pl, 0, sizeof(pl));
+
+ sparse_filename = get_sparse_checkout_filename();
+ res = add_patterns_from_file_to_list(sparse_filename, "", 0, &pl, NULL, 0);
+
+ /* If we already have a sparse-checkout file, use it. */
+ if (res >= 0) {
+ free(sparse_filename);
+ return update_working_directory(NULL);
+ }
+
+ if (get_oid("HEAD", &oid)) {
+ FILE *fp;
+
+ /* assume we are in a fresh repo, but update the sparse-checkout file */
+ if (safe_create_leading_directories(sparse_filename))
+ die(_("unable to create leading directories of %s"),
+ sparse_filename);
+ fp = xfopen(sparse_filename, "w");
+ if (!fp)
+ die(_("failed to open '%s'"), sparse_filename);
+
+ free(sparse_filename);
+ fprintf(fp, "/*\n!/*/\n");
+ fclose(fp);
+ return 0;
+ }
+
+ strbuf_addstr(&pattern, "/*");
+ add_pattern(strbuf_detach(&pattern, NULL), empty_base, 0, &pl, 0);
+ strbuf_addstr(&pattern, "!/*/");
+ add_pattern(strbuf_detach(&pattern, NULL), empty_base, 0, &pl, 0);
+ pl.use_cone_patterns = init_opts.cone_mode;
+
+ return write_patterns_and_update(&pl);
+}
+
+static void insert_recursive_pattern(struct pattern_list *pl, struct strbuf *path)
+{
+ struct pattern_entry *e = xmalloc(sizeof(*e));
+ e->patternlen = path->len;
+ e->pattern = strbuf_detach(path, NULL);
+ hashmap_entry_init(&e->ent, fspathhash(e->pattern));
+
+ hashmap_add(&pl->recursive_hashmap, &e->ent);
+
+ while (e->patternlen) {
+ char *slash = strrchr(e->pattern, '/');
+ char *oldpattern = e->pattern;
+ size_t newlen;
+
+ if (!slash || slash == e->pattern)
+ break;
+
+ newlen = slash - e->pattern;
+ e = xmalloc(sizeof(struct pattern_entry));
+ e->patternlen = newlen;
+ e->pattern = xstrndup(oldpattern, newlen);
+ hashmap_entry_init(&e->ent, fspathhash(e->pattern));
+
+ if (!hashmap_get_entry(&pl->parent_hashmap, e, ent, NULL))
+ hashmap_add(&pl->parent_hashmap, &e->ent);
+ }
+}
+
+static void strbuf_to_cone_pattern(struct strbuf *line, struct pattern_list *pl)
+{
+ strbuf_trim(line);
+
+ strbuf_trim_trailing_dir_sep(line);
+
+ if (strbuf_normalize_path(line))
+ die(_("could not normalize path %s"), line->buf);
+
+ if (!line->len)
+ return;
+
+ if (line->buf[0] != '/')
+ strbuf_insertstr(line, 0, "/");
+
+ insert_recursive_pattern(pl, line);
+}
+
+static void add_patterns_from_input(struct pattern_list *pl,
+ int argc, const char **argv,
+ int use_stdin)
+{
+ int i;
+ if (core_sparse_checkout_cone) {
+ struct strbuf line = STRBUF_INIT;
+
+ hashmap_init(&pl->recursive_hashmap, pl_hashmap_cmp, NULL, 0);
+ hashmap_init(&pl->parent_hashmap, pl_hashmap_cmp, NULL, 0);
+ pl->use_cone_patterns = 1;
+
+ if (use_stdin) {
+ struct strbuf unquoted = STRBUF_INIT;
+ while (!strbuf_getline(&line, stdin)) {
+ if (line.buf[0] == '"') {
+ strbuf_reset(&unquoted);
+ if (unquote_c_style(&unquoted, line.buf, NULL))
+ die(_("unable to unquote C-style string '%s'"),
+ line.buf);
+
+ strbuf_swap(&unquoted, &line);
+ }
+
+ strbuf_to_cone_pattern(&line, pl);
+ }
+
+ strbuf_release(&unquoted);
+ } else {
+ for (i = 0; i < argc; i++) {
+ strbuf_setlen(&line, 0);
+ strbuf_addstr(&line, argv[i]);
+ strbuf_to_cone_pattern(&line, pl);
+ }
+ }
+ } else {
+ if (use_stdin) {
+ struct strbuf line = STRBUF_INIT;
+
+ while (!strbuf_getline(&line, stdin)) {
+ size_t len;
+ char *buf = strbuf_detach(&line, &len);
+ add_pattern(buf, empty_base, 0, pl, 0);
+ }
+ } else {
+ for (i = 0; i < argc; i++)
+ add_pattern(argv[i], empty_base, 0, pl, 0);
+ }
+ }
+}
+
+enum modify_type {
+ REPLACE,
+ ADD,
+};
+
+static void add_patterns_cone_mode(int argc, const char **argv,
+ struct pattern_list *pl,
+ int use_stdin)
+{
+ struct strbuf buffer = STRBUF_INIT;
+ struct pattern_entry *pe;
+ struct hashmap_iter iter;
+ struct pattern_list existing;
+ char *sparse_filename = get_sparse_checkout_filename();
+
+ add_patterns_from_input(pl, argc, argv, use_stdin);
+
+ memset(&existing, 0, sizeof(existing));
+ existing.use_cone_patterns = core_sparse_checkout_cone;
+
+ if (add_patterns_from_file_to_list(sparse_filename, "", 0,
+ &existing, NULL, 0))
+ die(_("unable to load existing sparse-checkout patterns"));
+ free(sparse_filename);
+
+ if (!existing.use_cone_patterns)
+ die(_("existing sparse-checkout patterns do not use cone mode"));
+
+ hashmap_for_each_entry(&existing.recursive_hashmap, &iter, pe, ent) {
+ if (!hashmap_contains_parent(&pl->recursive_hashmap,
+ pe->pattern, &buffer) ||
+ !hashmap_contains_parent(&pl->parent_hashmap,
+ pe->pattern, &buffer)) {
+ strbuf_reset(&buffer);
+ strbuf_addstr(&buffer, pe->pattern);
+ insert_recursive_pattern(pl, &buffer);
+ }
+ }
+
+ clear_pattern_list(&existing);
+ strbuf_release(&buffer);
+}
+
+static void add_patterns_literal(int argc, const char **argv,
+ struct pattern_list *pl,
+ int use_stdin)
+{
+ char *sparse_filename = get_sparse_checkout_filename();
+ if (add_patterns_from_file_to_list(sparse_filename, "", 0,
+ pl, NULL, 0))
+ die(_("unable to load existing sparse-checkout patterns"));
+ free(sparse_filename);
+ add_patterns_from_input(pl, argc, argv, use_stdin);
+}
+
+static int modify_pattern_list(int argc, const char **argv, int use_stdin,
+ enum modify_type m)
+{
+ int result;
+ int changed_config = 0;
+ struct pattern_list *pl = xcalloc(1, sizeof(*pl));
+
+ switch (m) {
+ case ADD:
+ if (core_sparse_checkout_cone)
+ add_patterns_cone_mode(argc, argv, pl, use_stdin);
+ else
+ add_patterns_literal(argc, argv, pl, use_stdin);
+ break;
+
+ case REPLACE:
+ add_patterns_from_input(pl, argc, argv, use_stdin);
+ break;
+ }
+
+ if (!core_apply_sparse_checkout) {
+ set_config(MODE_ALL_PATTERNS);
+ core_apply_sparse_checkout = 1;
+ changed_config = 1;
+ }
+
+ result = write_patterns_and_update(pl);
+
+ if (result && changed_config)
+ set_config(MODE_NO_PATTERNS);
+
+ clear_pattern_list(pl);
+ free(pl);
+ return result;
+}
+
+static void sanitize_paths(int argc, const char **argv,
+ const char *prefix, int skip_checks)
+{
+ int i;
+
+ if (!argc)
+ return;
+
+ if (prefix && *prefix && core_sparse_checkout_cone) {
+ /*
+ * The args are not pathspecs, so unfortunately we
+ * cannot imitate how cmd_add() uses parse_pathspec().
+ */
+ int prefix_len = strlen(prefix);
+
+ for (i = 0; i < argc; i++)
+ argv[i] = prefix_path(prefix, prefix_len, argv[i]);
+ }
+
+ if (skip_checks)
+ return;
+
+ if (prefix && *prefix && !core_sparse_checkout_cone)
+ die(_("please run from the toplevel directory in non-cone mode"));
+
+ if (core_sparse_checkout_cone) {
+ for (i = 0; i < argc; i++) {
+ if (argv[i][0] == '/')
+ die(_("specify directories rather than patterns (no leading slash)"));
+ if (argv[i][0] == '!')
+ die(_("specify directories rather than patterns. If your directory starts with a '!', pass --skip-checks"));
+ if (strpbrk(argv[i], "*?[]"))
+ die(_("specify directories rather than patterns. If your directory really has any of '*?[]\\' in it, pass --skip-checks"));
+ }
+ }
+
+ for (i = 0; i < argc; i++) {
+ struct cache_entry *ce;
+ struct index_state *index = the_repository->index;
+ int pos = index_name_pos(index, argv[i], strlen(argv[i]));
+
+ if (pos < 0)
+ continue;
+ ce = index->cache[pos];
+ if (S_ISSPARSEDIR(ce->ce_mode))
+ continue;
+
+ if (core_sparse_checkout_cone)
+ die(_("'%s' is not a directory; to treat it as a directory anyway, rerun with --skip-checks"), argv[i]);
+ else
+ warning(_("pass a leading slash before paths such as '%s' if you want a single file (see NON-CONE PROBLEMS in the git-sparse-checkout manual)."), argv[i]);
+ }
+}
+
+static char const * const builtin_sparse_checkout_add_usage[] = {
+ N_("git sparse-checkout add [--skip-checks] (--stdin | <patterns>)"),
+ NULL
+};
+
+static struct sparse_checkout_add_opts {
+ int skip_checks;
+ int use_stdin;
+} add_opts;
+
+static int sparse_checkout_add(int argc, const char **argv, const char *prefix)
+{
+ static struct option builtin_sparse_checkout_add_options[] = {
+ OPT_BOOL_F(0, "skip-checks", &add_opts.skip_checks,
+ N_("skip some sanity checks on the given paths that might give false positives"),
+ PARSE_OPT_NONEG),
+ OPT_BOOL(0, "stdin", &add_opts.use_stdin,
+ N_("read patterns from standard in")),
+ OPT_END(),
+ };
+
+ if (!core_apply_sparse_checkout)
+ die(_("no sparse-checkout to add to"));
+
+ repo_read_index(the_repository);
+
+ argc = parse_options(argc, argv, prefix,
+ builtin_sparse_checkout_add_options,
+ builtin_sparse_checkout_add_usage,
+ PARSE_OPT_KEEP_UNKNOWN_OPT);
+
+ sanitize_paths(argc, argv, prefix, add_opts.skip_checks);
+
+ return modify_pattern_list(argc, argv, add_opts.use_stdin, ADD);
+}
+
+static char const * const builtin_sparse_checkout_set_usage[] = {
+ N_("git sparse-checkout set [--[no-]cone] [--[no-]sparse-index] [--skip-checks] (--stdin | <patterns>)"),
+ NULL
+};
+
+static struct sparse_checkout_set_opts {
+ int cone_mode;
+ int sparse_index;
+ int skip_checks;
+ int use_stdin;
+} set_opts;
+
+static int sparse_checkout_set(int argc, const char **argv, const char *prefix)
+{
+ int default_patterns_nr = 2;
+ const char *default_patterns[] = {"/*", "!/*/", NULL};
+
+ static struct option builtin_sparse_checkout_set_options[] = {
+ OPT_BOOL(0, "cone", &set_opts.cone_mode,
+ N_("initialize the sparse-checkout in cone mode")),
+ OPT_BOOL(0, "sparse-index", &set_opts.sparse_index,
+ N_("toggle the use of a sparse index")),
+ OPT_BOOL_F(0, "skip-checks", &set_opts.skip_checks,
+ N_("skip some sanity checks on the given paths that might give false positives"),
+ PARSE_OPT_NONEG),
+ OPT_BOOL_F(0, "stdin", &set_opts.use_stdin,
+ N_("read patterns from standard in"),
+ PARSE_OPT_NONEG),
+ OPT_END(),
+ };
+
+ repo_read_index(the_repository);
+
+ set_opts.cone_mode = -1;
+ set_opts.sparse_index = -1;
+
+ argc = parse_options(argc, argv, prefix,
+ builtin_sparse_checkout_set_options,
+ builtin_sparse_checkout_set_usage,
+ PARSE_OPT_KEEP_UNKNOWN_OPT);
+
+ if (update_modes(&set_opts.cone_mode, &set_opts.sparse_index))
+ return 1;
+
+ /*
+ * Cone mode automatically specifies the toplevel directory. For
+ * non-cone mode, if nothing is specified, manually select just the
+ * top-level directory (much as 'init' would do).
+ */
+ if (!core_sparse_checkout_cone && argc == 0) {
+ argv = default_patterns;
+ argc = default_patterns_nr;
+ } else {
+ sanitize_paths(argc, argv, prefix, set_opts.skip_checks);
+ }
+
+ return modify_pattern_list(argc, argv, set_opts.use_stdin, REPLACE);
+}
+
+static char const * const builtin_sparse_checkout_reapply_usage[] = {
+ "git sparse-checkout reapply [--[no-]cone] [--[no-]sparse-index]",
+ NULL
+};
+
+static struct sparse_checkout_reapply_opts {
+ int cone_mode;
+ int sparse_index;
+} reapply_opts;
+
+static int sparse_checkout_reapply(int argc, const char **argv,
+ const char *prefix)
+{
+ static struct option builtin_sparse_checkout_reapply_options[] = {
+ OPT_BOOL(0, "cone", &reapply_opts.cone_mode,
+ N_("initialize the sparse-checkout in cone mode")),
+ OPT_BOOL(0, "sparse-index", &reapply_opts.sparse_index,
+ N_("toggle the use of a sparse index")),
+ OPT_END(),
+ };
+
+ if (!core_apply_sparse_checkout)
+ die(_("must be in a sparse-checkout to reapply sparsity patterns"));
+
+ reapply_opts.cone_mode = -1;
+ reapply_opts.sparse_index = -1;
+
+ argc = parse_options(argc, argv, prefix,
+ builtin_sparse_checkout_reapply_options,
+ builtin_sparse_checkout_reapply_usage, 0);
+
+ repo_read_index(the_repository);
+
+ if (update_modes(&reapply_opts.cone_mode, &reapply_opts.sparse_index))
+ return 1;
+
+ return update_working_directory(NULL);
+}
+
+static char const * const builtin_sparse_checkout_disable_usage[] = {
+ "git sparse-checkout disable",
+ NULL
+};
+
+static int sparse_checkout_disable(int argc, const char **argv,
+ const char *prefix)
+{
+ static struct option builtin_sparse_checkout_disable_options[] = {
+ OPT_END(),
+ };
+ struct pattern_list pl;
+ struct strbuf match_all = STRBUF_INIT;
+
+ /*
+ * We do not exit early if !core_apply_sparse_checkout; due to the
+ * ability for users to manually muck things up between
+ * direct editing of .git/info/sparse-checkout
+ * running read-tree -m u HEAD or update-index --skip-worktree
+ * direct toggling of config options
+ * users might end up with an index with SKIP_WORKTREE bit set on
+ * some files and not know how to undo it. So, here we just
+ * forcibly return to a dense checkout regardless of initial state.
+ */
+
+ argc = parse_options(argc, argv, prefix,
+ builtin_sparse_checkout_disable_options,
+ builtin_sparse_checkout_disable_usage, 0);
+
+ repo_read_index(the_repository);
+
+ memset(&pl, 0, sizeof(pl));
+ hashmap_init(&pl.recursive_hashmap, pl_hashmap_cmp, NULL, 0);
+ hashmap_init(&pl.parent_hashmap, pl_hashmap_cmp, NULL, 0);
+ pl.use_cone_patterns = 0;
+ core_apply_sparse_checkout = 1;
+
+ strbuf_addstr(&match_all, "/*");
+ add_pattern(strbuf_detach(&match_all, NULL), empty_base, 0, &pl, 0);
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.sparse_index = 0;
+
+ if (update_working_directory(&pl))
+ die(_("error while refreshing working directory"));
+
+ clear_pattern_list(&pl);
+ return set_config(MODE_NO_PATTERNS);
+}
+
+int cmd_sparse_checkout(int argc, const char **argv, const char *prefix)
+{
+ parse_opt_subcommand_fn *fn = NULL;
+ struct option builtin_sparse_checkout_options[] = {
+ OPT_SUBCOMMAND("list", &fn, sparse_checkout_list),
+ OPT_SUBCOMMAND("init", &fn, sparse_checkout_init),
+ OPT_SUBCOMMAND("set", &fn, sparse_checkout_set),
+ OPT_SUBCOMMAND("add", &fn, sparse_checkout_add),
+ OPT_SUBCOMMAND("reapply", &fn, sparse_checkout_reapply),
+ OPT_SUBCOMMAND("disable", &fn, sparse_checkout_disable),
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, prefix,
+ builtin_sparse_checkout_options,
+ builtin_sparse_checkout_usage, 0);
+
+ git_config(git_default_config, NULL);
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ return fn(argc, argv, prefix);
+}
diff --git a/builtin/stash.c b/builtin/stash.c
new file mode 100644
index 0000000..62e3671
--- /dev/null
+++ b/builtin/stash.c
@@ -0,0 +1,1863 @@
+#define USE_THE_INDEX_COMPATIBILITY_MACROS
+#include "builtin.h"
+#include "config.h"
+#include "parse-options.h"
+#include "refs.h"
+#include "lockfile.h"
+#include "cache-tree.h"
+#include "unpack-trees.h"
+#include "merge-recursive.h"
+#include "merge-ort-wrappers.h"
+#include "strvec.h"
+#include "run-command.h"
+#include "dir.h"
+#include "entry.h"
+#include "rerere.h"
+#include "revision.h"
+#include "log-tree.h"
+#include "diffcore.h"
+#include "exec-cmd.h"
+#include "reflog.h"
+
+#define INCLUDE_ALL_FILES 2
+
+#define BUILTIN_STASH_LIST_USAGE \
+ N_("git stash list [<log-options>]")
+#define BUILTIN_STASH_SHOW_USAGE \
+ N_("git stash show [-u | --include-untracked | --only-untracked] [<diff-options>] [<stash>]")
+#define BUILTIN_STASH_DROP_USAGE \
+ N_("git stash drop [-q | --quiet] [<stash>]")
+#define BUILTIN_STASH_POP_USAGE \
+ N_("git stash pop [--index] [-q | --quiet] [<stash>]")
+#define BUILTIN_STASH_APPLY_USAGE \
+ N_("git stash apply [--index] [-q | --quiet] [<stash>]")
+#define BUILTIN_STASH_BRANCH_USAGE \
+ N_("git stash branch <branchname> [<stash>]")
+#define BUILTIN_STASH_STORE_USAGE \
+ N_("git stash store [(-m | --message) <message>] [-q | --quiet] <commit>")
+#define BUILTIN_STASH_PUSH_USAGE \
+ N_("git stash [push [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]\n" \
+ " [-u | --include-untracked] [-a | --all] [(-m | --message) <message>]\n" \
+ " [--pathspec-from-file=<file> [--pathspec-file-nul]]\n" \
+ " [--] [<pathspec>...]]")
+#define BUILTIN_STASH_SAVE_USAGE \
+ N_("git stash save [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]\n" \
+ " [-u | --include-untracked] [-a | --all] [<message>]")
+#define BUILTIN_STASH_CREATE_USAGE \
+ N_("git stash create [<message>]")
+#define BUILTIN_STASH_CLEAR_USAGE \
+ "git stash clear"
+
+static const char * const git_stash_usage[] = {
+ BUILTIN_STASH_LIST_USAGE,
+ BUILTIN_STASH_SHOW_USAGE,
+ BUILTIN_STASH_DROP_USAGE,
+ BUILTIN_STASH_POP_USAGE,
+ BUILTIN_STASH_APPLY_USAGE,
+ BUILTIN_STASH_BRANCH_USAGE,
+ BUILTIN_STASH_PUSH_USAGE,
+ BUILTIN_STASH_SAVE_USAGE,
+ BUILTIN_STASH_CLEAR_USAGE,
+ BUILTIN_STASH_CREATE_USAGE,
+ BUILTIN_STASH_STORE_USAGE,
+ NULL
+};
+
+static const char * const git_stash_list_usage[] = {
+ BUILTIN_STASH_LIST_USAGE,
+ NULL
+};
+
+static const char * const git_stash_show_usage[] = {
+ BUILTIN_STASH_SHOW_USAGE,
+ NULL
+};
+
+static const char * const git_stash_drop_usage[] = {
+ BUILTIN_STASH_DROP_USAGE,
+ NULL
+};
+
+static const char * const git_stash_pop_usage[] = {
+ BUILTIN_STASH_POP_USAGE,
+ NULL
+};
+
+static const char * const git_stash_apply_usage[] = {
+ BUILTIN_STASH_APPLY_USAGE,
+ NULL
+};
+
+static const char * const git_stash_branch_usage[] = {
+ BUILTIN_STASH_BRANCH_USAGE,
+ NULL
+};
+
+static const char * const git_stash_clear_usage[] = {
+ BUILTIN_STASH_CLEAR_USAGE,
+ NULL
+};
+
+static const char * const git_stash_store_usage[] = {
+ BUILTIN_STASH_STORE_USAGE,
+ NULL
+};
+
+static const char * const git_stash_push_usage[] = {
+ BUILTIN_STASH_PUSH_USAGE,
+ NULL
+};
+
+static const char * const git_stash_save_usage[] = {
+ BUILTIN_STASH_SAVE_USAGE,
+ NULL
+};
+
+static const char ref_stash[] = "refs/stash";
+static struct strbuf stash_index_path = STRBUF_INIT;
+
+/*
+ * w_commit is set to the commit containing the working tree
+ * b_commit is set to the base commit
+ * i_commit is set to the commit containing the index tree
+ * u_commit is set to the commit containing the untracked files tree
+ * w_tree is set to the working tree
+ * b_tree is set to the base tree
+ * i_tree is set to the index tree
+ * u_tree is set to the untracked files tree
+ */
+struct stash_info {
+ struct object_id w_commit;
+ struct object_id b_commit;
+ struct object_id i_commit;
+ struct object_id u_commit;
+ struct object_id w_tree;
+ struct object_id b_tree;
+ struct object_id i_tree;
+ struct object_id u_tree;
+ struct strbuf revision;
+ int is_stash_ref;
+ int has_u;
+};
+
+#define STASH_INFO_INIT { \
+ .revision = STRBUF_INIT, \
+}
+
+static void free_stash_info(struct stash_info *info)
+{
+ strbuf_release(&info->revision);
+}
+
+static void assert_stash_like(struct stash_info *info, const char *revision)
+{
+ if (get_oidf(&info->b_commit, "%s^1", revision) ||
+ get_oidf(&info->w_tree, "%s:", revision) ||
+ get_oidf(&info->b_tree, "%s^1:", revision) ||
+ get_oidf(&info->i_tree, "%s^2:", revision))
+ die(_("'%s' is not a stash-like commit"), revision);
+}
+
+static int get_stash_info(struct stash_info *info, int argc, const char **argv)
+{
+ int ret;
+ char *end_of_rev;
+ char *expanded_ref;
+ const char *revision;
+ const char *commit = NULL;
+ struct object_id dummy;
+ struct strbuf symbolic = STRBUF_INIT;
+
+ if (argc > 1) {
+ int i;
+ struct strbuf refs_msg = STRBUF_INIT;
+
+ for (i = 0; i < argc; i++)
+ strbuf_addf(&refs_msg, " '%s'", argv[i]);
+
+ fprintf_ln(stderr, _("Too many revisions specified:%s"),
+ refs_msg.buf);
+ strbuf_release(&refs_msg);
+
+ return -1;
+ }
+
+ if (argc == 1)
+ commit = argv[0];
+
+ if (!commit) {
+ if (!ref_exists(ref_stash)) {
+ fprintf_ln(stderr, _("No stash entries found."));
+ return -1;
+ }
+
+ strbuf_addf(&info->revision, "%s@{0}", ref_stash);
+ } else if (strspn(commit, "0123456789") == strlen(commit)) {
+ strbuf_addf(&info->revision, "%s@{%s}", ref_stash, commit);
+ } else {
+ strbuf_addstr(&info->revision, commit);
+ }
+
+ revision = info->revision.buf;
+
+ if (get_oid(revision, &info->w_commit))
+ return error(_("%s is not a valid reference"), revision);
+
+ assert_stash_like(info, revision);
+
+ info->has_u = !get_oidf(&info->u_tree, "%s^3:", revision);
+
+ end_of_rev = strchrnul(revision, '@');
+ strbuf_add(&symbolic, revision, end_of_rev - revision);
+
+ ret = dwim_ref(symbolic.buf, symbolic.len, &dummy, &expanded_ref, 0);
+ strbuf_release(&symbolic);
+ switch (ret) {
+ case 0: /* Not found, but valid ref */
+ info->is_stash_ref = 0;
+ break;
+ case 1:
+ info->is_stash_ref = !strcmp(expanded_ref, ref_stash);
+ break;
+ default: /* Invalid or ambiguous */
+ break;
+ }
+
+ free(expanded_ref);
+ return !(ret == 0 || ret == 1);
+}
+
+static int do_clear_stash(void)
+{
+ struct object_id obj;
+ if (get_oid(ref_stash, &obj))
+ return 0;
+
+ return delete_ref(NULL, ref_stash, &obj, 0);
+}
+
+static int clear_stash(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_clear_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (argc)
+ return error(_("git stash clear with arguments is "
+ "unimplemented"));
+
+ return do_clear_stash();
+}
+
+static int reset_tree(struct object_id *i_tree, int update, int reset)
+{
+ int nr_trees = 1;
+ struct unpack_trees_options opts;
+ struct tree_desc t[MAX_UNPACK_TREES];
+ struct tree *tree;
+ struct lock_file lock_file = LOCK_INIT;
+
+ repo_read_index_preload(the_repository, NULL, 0);
+ if (refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL))
+ return -1;
+
+ repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR);
+
+ memset(&opts, 0, sizeof(opts));
+
+ tree = parse_tree_indirect(i_tree);
+ if (parse_tree(tree))
+ return -1;
+
+ init_tree_desc(t, tree->buffer, tree->size);
+
+ opts.head_idx = 1;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+ opts.merge = 1;
+ opts.reset = reset ? UNPACK_RESET_PROTECT_UNTRACKED : 0;
+ opts.update = update;
+ if (update)
+ opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
+ opts.fn = oneway_merge;
+
+ if (unpack_trees(nr_trees, t, &opts))
+ return -1;
+
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+ return error(_("unable to write new index file"));
+
+ return 0;
+}
+
+static int diff_tree_binary(struct strbuf *out, struct object_id *w_commit)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ const char *w_commit_hex = oid_to_hex(w_commit);
+
+ /*
+ * Diff-tree would not be very hard to replace with a native function,
+ * however it should be done together with apply_cached.
+ */
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "diff-tree", "--binary", NULL);
+ strvec_pushf(&cp.args, "%s^2^..%s^2", w_commit_hex, w_commit_hex);
+
+ return pipe_command(&cp, NULL, 0, out, 0, NULL, 0);
+}
+
+static int apply_cached(struct strbuf *out)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * Apply currently only reads either from stdin or a file, thus
+ * apply_all_patches would have to be updated to optionally take a
+ * buffer.
+ */
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "apply", "--cached", NULL);
+ return pipe_command(&cp, out->buf, out->len, NULL, 0, NULL, 0);
+}
+
+static int reset_head(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * Reset is overall quite simple, however there is no current public
+ * API for resetting.
+ */
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "reset", "--quiet", "--refresh", NULL);
+
+ return run_command(&cp);
+}
+
+static int is_path_a_directory(const char *path)
+{
+ /*
+ * This function differs from abspath.c:is_directory() in that
+ * here we use lstat() instead of stat(); we do not want to
+ * follow symbolic links here.
+ */
+ struct stat st;
+ return (!lstat(path, &st) && S_ISDIR(st.st_mode));
+}
+
+static void add_diff_to_buf(struct diff_queue_struct *q,
+ struct diff_options *options,
+ void *data)
+{
+ int i;
+
+ for (i = 0; i < q->nr; i++) {
+ if (is_path_a_directory(q->queue[i]->one->path))
+ continue;
+
+ strbuf_addstr(data, q->queue[i]->one->path);
+
+ /* NUL-terminate: will be fed to update-index -z */
+ strbuf_addch(data, '\0');
+ }
+}
+
+static int restore_untracked(struct object_id *u_tree)
+{
+ int res;
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * We need to run restore files from a given index, but without
+ * affecting the current index, so we use GIT_INDEX_FILE with
+ * run_command to fork processes that will not interfere.
+ */
+ cp.git_cmd = 1;
+ strvec_push(&cp.args, "read-tree");
+ strvec_push(&cp.args, oid_to_hex(u_tree));
+ strvec_pushf(&cp.env, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+ if (run_command(&cp)) {
+ remove_path(stash_index_path.buf);
+ return -1;
+ }
+
+ child_process_init(&cp);
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "checkout-index", "--all", NULL);
+ strvec_pushf(&cp.env, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+
+ res = run_command(&cp);
+ remove_path(stash_index_path.buf);
+ return res;
+}
+
+static void unstage_changes_unless_new(struct object_id *orig_tree)
+{
+ /*
+ * When we enter this function, there has been a clean merge of
+ * relevant trees, and the merge logic always stages whatever merges
+ * cleanly. We want to unstage those changes, unless it corresponds
+ * to a file that didn't exist as of orig_tree.
+ *
+ * However, if any SKIP_WORKTREE path is modified relative to
+ * orig_tree, then we want to clear the SKIP_WORKTREE bit and write
+ * it to the worktree before unstaging.
+ */
+
+ struct checkout state = CHECKOUT_INIT;
+ struct diff_options diff_opts;
+ struct lock_file lock = LOCK_INIT;
+ int i;
+
+ /* If any entries have skip_worktree set, we'll have to check 'em out */
+ state.force = 1;
+ state.quiet = 1;
+ state.refresh_cache = 1;
+ state.istate = &the_index;
+
+ /*
+ * Step 1: get a difference between orig_tree (which corresponding
+ * to the index before a merge was run) and the current index
+ * (reflecting the changes brought in by the merge).
+ */
+ diff_setup(&diff_opts);
+ diff_opts.flags.recursive = 1;
+ diff_opts.detect_rename = 0;
+ diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+ diff_setup_done(&diff_opts);
+
+ do_diff_cache(orig_tree, &diff_opts);
+ diffcore_std(&diff_opts);
+
+ /* Iterate over the paths that changed due to the merge... */
+ for (i = 0; i < diff_queued_diff.nr; i++) {
+ struct diff_filepair *p;
+ struct cache_entry *ce;
+ int pos;
+
+ /* Look up the path's position in the current index. */
+ p = diff_queued_diff.queue[i];
+ pos = index_name_pos(&the_index, p->two->path,
+ strlen(p->two->path));
+
+ /*
+ * Step 2: Place changes in the working tree
+ *
+ * Stash is about restoring changes *to the working tree*.
+ * So if the merge successfully got a new version of some
+ * path, but left it out of the working tree, then clear the
+ * SKIP_WORKTREE bit and write it to the working tree.
+ */
+ if (pos >= 0 && ce_skip_worktree(the_index.cache[pos])) {
+ struct stat st;
+
+ ce = the_index.cache[pos];
+ if (!lstat(ce->name, &st)) {
+ /* Conflicting path present; relocate it */
+ struct strbuf new_path = STRBUF_INIT;
+ int fd;
+
+ strbuf_addf(&new_path,
+ "%s.stash.XXXXXX", ce->name);
+ fd = xmkstemp(new_path.buf);
+ close(fd);
+ printf(_("WARNING: Untracked file in way of "
+ "tracked file! Renaming\n "
+ " %s -> %s\n"
+ " to make room.\n"),
+ ce->name, new_path.buf);
+ if (rename(ce->name, new_path.buf))
+ die("Failed to move %s to %s\n",
+ ce->name, new_path.buf);
+ strbuf_release(&new_path);
+ }
+ checkout_entry(ce, &state, NULL, NULL);
+ ce->ce_flags &= ~CE_SKIP_WORKTREE;
+ }
+
+ /*
+ * Step 3: "unstage" changes, as long as they are still tracked
+ */
+ if (p->one->oid_valid) {
+ /*
+ * Path existed in orig_tree; restore index entry
+ * from that tree in order to "unstage" the changes.
+ */
+ int option = ADD_CACHE_OK_TO_REPLACE;
+ if (pos < 0)
+ option = ADD_CACHE_OK_TO_ADD;
+
+ ce = make_cache_entry(&the_index,
+ p->one->mode,
+ &p->one->oid,
+ p->one->path,
+ 0, 0);
+ add_index_entry(&the_index, ce, option);
+ }
+ }
+ diff_flush(&diff_opts);
+
+ /*
+ * Step 4: write the new index to disk
+ */
+ repo_hold_locked_index(the_repository, &lock, LOCK_DIE_ON_ERROR);
+ if (write_locked_index(&the_index, &lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ die(_("Unable to write index."));
+}
+
+static int do_apply_stash(const char *prefix, struct stash_info *info,
+ int index, int quiet)
+{
+ int clean, ret;
+ int has_index = index;
+ struct merge_options o;
+ struct object_id c_tree;
+ struct object_id index_tree;
+ struct tree *head, *merge, *merge_base;
+ struct lock_file lock = LOCK_INIT;
+
+ repo_read_index_preload(the_repository, NULL, 0);
+ if (repo_refresh_and_write_index(the_repository, REFRESH_QUIET, 0, 0,
+ NULL, NULL, NULL))
+ return -1;
+
+ if (write_cache_as_tree(&c_tree, 0, NULL))
+ return error(_("cannot apply a stash in the middle of a merge"));
+
+ if (index) {
+ if (oideq(&info->b_tree, &info->i_tree) ||
+ oideq(&c_tree, &info->i_tree)) {
+ has_index = 0;
+ } else {
+ struct strbuf out = STRBUF_INIT;
+
+ if (diff_tree_binary(&out, &info->w_commit)) {
+ strbuf_release(&out);
+ return error(_("could not generate diff %s^!."),
+ oid_to_hex(&info->w_commit));
+ }
+
+ ret = apply_cached(&out);
+ strbuf_release(&out);
+ if (ret)
+ return error(_("conflicts in index. "
+ "Try without --index."));
+
+ discard_index(&the_index);
+ repo_read_index(the_repository);
+ if (write_cache_as_tree(&index_tree, 0, NULL))
+ return error(_("could not save index tree"));
+
+ reset_head();
+ discard_index(&the_index);
+ repo_read_index(the_repository);
+ }
+ }
+
+ init_merge_options(&o, the_repository);
+
+ o.branch1 = "Updated upstream";
+ o.branch2 = "Stashed changes";
+ o.ancestor = "Stash base";
+
+ if (oideq(&info->b_tree, &c_tree))
+ o.branch1 = "Version stash was based on";
+
+ if (quiet)
+ o.verbosity = 0;
+
+ if (o.verbosity >= 3)
+ printf_ln(_("Merging %s with %s"), o.branch1, o.branch2);
+
+ head = lookup_tree(o.repo, &c_tree);
+ merge = lookup_tree(o.repo, &info->w_tree);
+ merge_base = lookup_tree(o.repo, &info->b_tree);
+
+ repo_hold_locked_index(o.repo, &lock, LOCK_DIE_ON_ERROR);
+ clean = merge_ort_nonrecursive(&o, head, merge, merge_base);
+
+ /*
+ * If 'clean' >= 0, reverse the value for 'ret' so 'ret' is 0 when the
+ * merge was clean, and nonzero if the merge was unclean or encountered
+ * an error.
+ */
+ ret = clean >= 0 ? !clean : clean;
+
+ if (ret < 0)
+ rollback_lock_file(&lock);
+ else if (write_locked_index(o.repo->index, &lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ ret = error(_("could not write index"));
+
+ if (ret) {
+ rerere(0);
+
+ if (index)
+ fprintf_ln(stderr, _("Index was not unstashed."));
+
+ goto restore_untracked;
+ }
+
+ if (has_index) {
+ if (reset_tree(&index_tree, 0, 0))
+ ret = -1;
+ } else {
+ unstage_changes_unless_new(&c_tree);
+ }
+
+restore_untracked:
+ if (info->has_u && restore_untracked(&info->u_tree))
+ ret = error(_("could not restore untracked files from stash"));
+
+ if (!quiet) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * Status is quite simple and could be replaced with calls to
+ * wt_status in the future, but it adds complexities which may
+ * require more tests.
+ */
+ cp.git_cmd = 1;
+ cp.dir = prefix;
+ strvec_pushf(&cp.env, GIT_WORK_TREE_ENVIRONMENT"=%s",
+ absolute_path(get_git_work_tree()));
+ strvec_pushf(&cp.env, GIT_DIR_ENVIRONMENT"=%s",
+ absolute_path(get_git_dir()));
+ strvec_push(&cp.args, "status");
+ run_command(&cp);
+ }
+
+ return ret;
+}
+
+static int apply_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret = -1;
+ int quiet = 0;
+ int index = 0;
+ struct stash_info info = STASH_INFO_INIT;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_BOOL(0, "index", &index,
+ N_("attempt to recreate the index")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_apply_usage, 0);
+
+ if (get_stash_info(&info, argc, argv))
+ goto cleanup;
+
+ ret = do_apply_stash(prefix, &info, index, quiet);
+cleanup:
+ free_stash_info(&info);
+ return ret;
+}
+
+static int reject_reflog_ent(struct object_id *ooid UNUSED,
+ struct object_id *noid UNUSED,
+ const char *email UNUSED,
+ timestamp_t timestamp UNUSED,
+ int tz UNUSED, const char *message UNUSED,
+ void *cb_data UNUSED)
+{
+ return 1;
+}
+
+static int reflog_is_empty(const char *refname)
+{
+ return !for_each_reflog_ent(refname, reject_reflog_ent, NULL);
+}
+
+static int do_drop_stash(struct stash_info *info, int quiet)
+{
+ if (!reflog_delete(info->revision.buf,
+ EXPIRE_REFLOGS_REWRITE | EXPIRE_REFLOGS_UPDATE_REF,
+ 0)) {
+ if (!quiet)
+ printf_ln(_("Dropped %s (%s)"), info->revision.buf,
+ oid_to_hex(&info->w_commit));
+ } else {
+ return error(_("%s: Could not drop stash entry"),
+ info->revision.buf);
+ }
+
+ if (reflog_is_empty(ref_stash))
+ do_clear_stash();
+
+ return 0;
+}
+
+static int get_stash_info_assert(struct stash_info *info, int argc,
+ const char **argv)
+{
+ int ret = get_stash_info(info, argc, argv);
+
+ if (ret < 0)
+ return ret;
+
+ if (!info->is_stash_ref)
+ return error(_("'%s' is not a stash reference"), info->revision.buf);
+
+ return 0;
+}
+
+static int drop_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret = -1;
+ int quiet = 0;
+ struct stash_info info = STASH_INFO_INIT;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_drop_usage, 0);
+
+ if (get_stash_info_assert(&info, argc, argv))
+ goto cleanup;
+
+ ret = do_drop_stash(&info, quiet);
+cleanup:
+ free_stash_info(&info);
+ return ret;
+}
+
+static int pop_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret = -1;
+ int index = 0;
+ int quiet = 0;
+ struct stash_info info = STASH_INFO_INIT;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_BOOL(0, "index", &index,
+ N_("attempt to recreate the index")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_pop_usage, 0);
+
+ if (get_stash_info_assert(&info, argc, argv))
+ goto cleanup;
+
+ if ((ret = do_apply_stash(prefix, &info, index, quiet)))
+ printf_ln(_("The stash entry is kept in case "
+ "you need it again."));
+ else
+ ret = do_drop_stash(&info, quiet);
+
+cleanup:
+ free_stash_info(&info);
+ return ret;
+}
+
+static int branch_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret = -1;
+ const char *branch = NULL;
+ struct stash_info info = STASH_INFO_INIT;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct option options[] = {
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_branch_usage, 0);
+
+ if (!argc) {
+ fprintf_ln(stderr, _("No branch name specified"));
+ return -1;
+ }
+
+ branch = argv[0];
+
+ if (get_stash_info(&info, argc - 1, argv + 1))
+ goto cleanup;
+
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "checkout", "-b", NULL);
+ strvec_push(&cp.args, branch);
+ strvec_push(&cp.args, oid_to_hex(&info.b_commit));
+ ret = run_command(&cp);
+ if (!ret)
+ ret = do_apply_stash(prefix, &info, 1, 0);
+ if (!ret && info.is_stash_ref)
+ ret = do_drop_stash(&info, 0);
+
+cleanup:
+ free_stash_info(&info);
+ return ret;
+}
+
+static int list_stash(int argc, const char **argv, const char *prefix)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct option options[] = {
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_list_usage,
+ PARSE_OPT_KEEP_UNKNOWN_OPT);
+
+ if (!ref_exists(ref_stash))
+ return 0;
+
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "log", "--format=%gd: %gs", "-g",
+ "--first-parent", NULL);
+ strvec_pushv(&cp.args, argv);
+ strvec_push(&cp.args, ref_stash);
+ strvec_push(&cp.args, "--");
+ return run_command(&cp);
+}
+
+static int show_stat = 1;
+static int show_patch;
+static int show_include_untracked;
+
+static int git_stash_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "stash.showstat")) {
+ show_stat = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "stash.showpatch")) {
+ show_patch = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "stash.showincludeuntracked")) {
+ show_include_untracked = git_config_bool(var, value);
+ return 0;
+ }
+ return git_diff_basic_config(var, value, cb);
+}
+
+static void diff_include_untracked(const struct stash_info *info, struct diff_options *diff_opt)
+{
+ const struct object_id *oid[] = { &info->w_commit, &info->u_tree };
+ struct tree *tree[ARRAY_SIZE(oid)];
+ struct tree_desc tree_desc[ARRAY_SIZE(oid)];
+ struct unpack_trees_options unpack_tree_opt = { 0 };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(oid); i++) {
+ tree[i] = parse_tree_indirect(oid[i]);
+ if (parse_tree(tree[i]) < 0)
+ die(_("failed to parse tree"));
+ init_tree_desc(&tree_desc[i], tree[i]->buffer, tree[i]->size);
+ }
+
+ unpack_tree_opt.head_idx = -1;
+ unpack_tree_opt.src_index = &the_index;
+ unpack_tree_opt.dst_index = &the_index;
+ unpack_tree_opt.merge = 1;
+ unpack_tree_opt.fn = stash_worktree_untracked_merge;
+
+ if (unpack_trees(ARRAY_SIZE(tree_desc), tree_desc, &unpack_tree_opt))
+ die(_("failed to unpack trees"));
+
+ do_diff_cache(&info->b_commit, diff_opt);
+}
+
+static int show_stash(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ int ret = -1;
+ struct stash_info info = STASH_INFO_INIT;
+ struct rev_info rev;
+ struct strvec stash_args = STRVEC_INIT;
+ struct strvec revision_args = STRVEC_INIT;
+ enum {
+ UNTRACKED_NONE,
+ UNTRACKED_INCLUDE,
+ UNTRACKED_ONLY
+ } show_untracked = show_include_untracked ? UNTRACKED_INCLUDE : UNTRACKED_NONE;
+ struct option options[] = {
+ OPT_SET_INT('u', "include-untracked", &show_untracked,
+ N_("include untracked files in the stash"),
+ UNTRACKED_INCLUDE),
+ OPT_SET_INT_F(0, "only-untracked", &show_untracked,
+ N_("only show untracked files in the stash"),
+ UNTRACKED_ONLY, PARSE_OPT_NONEG),
+ OPT_END()
+ };
+ int do_usage = 0;
+
+ init_diff_ui_defaults();
+ git_config(git_diff_ui_config, NULL);
+ init_revisions(&rev, prefix);
+
+ argc = parse_options(argc, argv, prefix, options, git_stash_show_usage,
+ PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT |
+ PARSE_OPT_KEEP_DASHDASH);
+
+ strvec_push(&revision_args, argv[0]);
+ for (i = 1; i < argc; i++) {
+ if (argv[i][0] != '-')
+ strvec_push(&stash_args, argv[i]);
+ else
+ strvec_push(&revision_args, argv[i]);
+ }
+
+ if (get_stash_info(&info, stash_args.nr, stash_args.v))
+ goto cleanup;
+
+ /*
+ * The config settings are applied only if there are not passed
+ * any options.
+ */
+ if (revision_args.nr == 1) {
+ if (show_stat)
+ rev.diffopt.output_format = DIFF_FORMAT_DIFFSTAT;
+
+ if (show_patch)
+ rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
+
+ if (!show_stat && !show_patch) {
+ ret = 0;
+ goto cleanup;
+ }
+ }
+
+ argc = setup_revisions(revision_args.nr, revision_args.v, &rev, NULL);
+ if (argc > 1)
+ goto usage;
+ if (!rev.diffopt.output_format) {
+ rev.diffopt.output_format = DIFF_FORMAT_PATCH;
+ diff_setup_done(&rev.diffopt);
+ }
+
+ rev.diffopt.flags.recursive = 1;
+ setup_diff_pager(&rev.diffopt);
+ switch (show_untracked) {
+ case UNTRACKED_NONE:
+ diff_tree_oid(&info.b_commit, &info.w_commit, "", &rev.diffopt);
+ break;
+ case UNTRACKED_ONLY:
+ if (info.has_u)
+ diff_root_tree_oid(&info.u_tree, "", &rev.diffopt);
+ break;
+ case UNTRACKED_INCLUDE:
+ if (info.has_u)
+ diff_include_untracked(&info, &rev.diffopt);
+ else
+ diff_tree_oid(&info.b_commit, &info.w_commit, "", &rev.diffopt);
+ break;
+ }
+ log_tree_diff_flush(&rev);
+
+ ret = diff_result_code(&rev.diffopt, 0);
+cleanup:
+ strvec_clear(&stash_args);
+ free_stash_info(&info);
+ release_revisions(&rev);
+ if (do_usage)
+ usage_with_options(git_stash_show_usage, options);
+ return ret;
+usage:
+ do_usage = 1;
+ goto cleanup;
+}
+
+static int do_store_stash(const struct object_id *w_commit, const char *stash_msg,
+ int quiet)
+{
+ if (!stash_msg)
+ stash_msg = "Created via \"git stash store\".";
+
+ if (update_ref(stash_msg, ref_stash, w_commit, NULL,
+ REF_FORCE_CREATE_REFLOG,
+ quiet ? UPDATE_REFS_QUIET_ON_ERR :
+ UPDATE_REFS_MSG_ON_ERR)) {
+ if (!quiet) {
+ fprintf_ln(stderr, _("Cannot update %s with %s"),
+ ref_stash, oid_to_hex(w_commit));
+ }
+ return -1;
+ }
+
+ return 0;
+}
+
+static int store_stash(int argc, const char **argv, const char *prefix)
+{
+ int quiet = 0;
+ const char *stash_msg = NULL;
+ struct object_id obj;
+ struct object_context dummy;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet")),
+ OPT_STRING('m', "message", &stash_msg, "message",
+ N_("stash message")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_store_usage,
+ PARSE_OPT_KEEP_UNKNOWN_OPT);
+
+ if (argc != 1) {
+ if (!quiet)
+ fprintf_ln(stderr, _("\"git stash store\" requires one "
+ "<commit> argument"));
+ return -1;
+ }
+
+ if (get_oid_with_context(the_repository,
+ argv[0], quiet ? GET_OID_QUIETLY : 0, &obj,
+ &dummy)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot update %s with %s"),
+ ref_stash, argv[0]);
+ return -1;
+ }
+
+ return do_store_stash(&obj, stash_msg, quiet);
+}
+
+static void add_pathspecs(struct strvec *args,
+ const struct pathspec *ps) {
+ int i;
+
+ for (i = 0; i < ps->nr; i++)
+ strvec_push(args, ps->items[i].original);
+}
+
+/*
+ * `untracked_files` will be filled with the names of untracked files.
+ * The return value is:
+ *
+ * = 0 if there are not any untracked files
+ * > 0 if there are untracked files
+ */
+static int get_untracked_files(const struct pathspec *ps, int include_untracked,
+ struct strbuf *untracked_files)
+{
+ int i;
+ int found = 0;
+ struct dir_struct dir = DIR_INIT;
+
+ if (include_untracked != INCLUDE_ALL_FILES)
+ setup_standard_excludes(&dir);
+
+ fill_directory(&dir, the_repository->index, ps);
+ for (i = 0; i < dir.nr; i++) {
+ struct dir_entry *ent = dir.entries[i];
+ found++;
+ strbuf_addstr(untracked_files, ent->name);
+ /* NUL-terminate: will be fed to update-index -z */
+ strbuf_addch(untracked_files, '\0');
+ }
+
+ dir_clear(&dir);
+ return found;
+}
+
+/*
+ * The return value of `check_changes_tracked_files()` can be:
+ *
+ * < 0 if there was an error
+ * = 0 if there are no changes.
+ * > 0 if there are changes.
+ */
+static int check_changes_tracked_files(const struct pathspec *ps)
+{
+ int result;
+ struct rev_info rev;
+ struct object_id dummy;
+ int ret = 0;
+
+ /* No initial commit. */
+ if (get_oid("HEAD", &dummy))
+ return -1;
+
+ if (repo_read_index(the_repository) < 0)
+ return -1;
+
+ init_revisions(&rev, NULL);
+ copy_pathspec(&rev.prune_data, ps);
+
+ rev.diffopt.flags.quick = 1;
+ rev.diffopt.flags.ignore_submodules = 1;
+ rev.abbrev = 0;
+
+ add_head_to_pending(&rev);
+ diff_setup_done(&rev.diffopt);
+
+ result = run_diff_index(&rev, 1);
+ if (diff_result_code(&rev.diffopt, result)) {
+ ret = 1;
+ goto done;
+ }
+
+ result = run_diff_files(&rev, 0);
+ if (diff_result_code(&rev.diffopt, result)) {
+ ret = 1;
+ goto done;
+ }
+
+done:
+ release_revisions(&rev);
+ return ret;
+}
+
+/*
+ * The function will fill `untracked_files` with the names of untracked files
+ * It will return 1 if there were any changes and 0 if there were not.
+ */
+static int check_changes(const struct pathspec *ps, int include_untracked,
+ struct strbuf *untracked_files)
+{
+ int ret = 0;
+ if (check_changes_tracked_files(ps))
+ ret = 1;
+
+ if (include_untracked && get_untracked_files(ps, include_untracked,
+ untracked_files))
+ ret = 1;
+
+ return ret;
+}
+
+static int save_untracked_files(struct stash_info *info, struct strbuf *msg,
+ struct strbuf files)
+{
+ int ret = 0;
+ struct strbuf untracked_msg = STRBUF_INIT;
+ struct child_process cp_upd_index = CHILD_PROCESS_INIT;
+ struct index_state istate = { NULL };
+
+ cp_upd_index.git_cmd = 1;
+ strvec_pushl(&cp_upd_index.args, "update-index", "-z", "--add",
+ "--remove", "--stdin", NULL);
+ strvec_pushf(&cp_upd_index.env, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+
+ strbuf_addf(&untracked_msg, "untracked files on %s\n", msg->buf);
+ if (pipe_command(&cp_upd_index, files.buf, files.len, NULL, 0,
+ NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (write_index_as_tree(&info->u_tree, &istate, stash_index_path.buf, 0,
+ NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (commit_tree(untracked_msg.buf, untracked_msg.len,
+ &info->u_tree, NULL, &info->u_commit, NULL, NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+done:
+ discard_index(&istate);
+ strbuf_release(&untracked_msg);
+ remove_path(stash_index_path.buf);
+ return ret;
+}
+
+static int stash_staged(struct stash_info *info, struct strbuf *out_patch,
+ int quiet)
+{
+ int ret = 0;
+ struct child_process cp_diff_tree = CHILD_PROCESS_INIT;
+ struct index_state istate = { NULL };
+
+ if (write_index_as_tree(&info->w_tree, &istate, the_repository->index_file,
+ 0, NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_diff_tree.git_cmd = 1;
+ strvec_pushl(&cp_diff_tree.args, "diff-tree", "-p", "-U1", "HEAD",
+ oid_to_hex(&info->w_tree), "--", NULL);
+ if (pipe_command(&cp_diff_tree, NULL, 0, out_patch, 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (!out_patch->len) {
+ if (!quiet)
+ fprintf_ln(stderr, _("No staged changes"));
+ ret = 1;
+ }
+
+done:
+ discard_index(&istate);
+ return ret;
+}
+
+static int stash_patch(struct stash_info *info, const struct pathspec *ps,
+ struct strbuf *out_patch, int quiet)
+{
+ int ret = 0;
+ struct child_process cp_read_tree = CHILD_PROCESS_INIT;
+ struct child_process cp_diff_tree = CHILD_PROCESS_INIT;
+ struct index_state istate = { NULL };
+ char *old_index_env = NULL, *old_repo_index_file;
+
+ remove_path(stash_index_path.buf);
+
+ cp_read_tree.git_cmd = 1;
+ strvec_pushl(&cp_read_tree.args, "read-tree", "HEAD", NULL);
+ strvec_pushf(&cp_read_tree.env, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+ if (run_command(&cp_read_tree)) {
+ ret = -1;
+ goto done;
+ }
+
+ /* Find out what the user wants. */
+ old_repo_index_file = the_repository->index_file;
+ the_repository->index_file = stash_index_path.buf;
+ old_index_env = xstrdup_or_null(getenv(INDEX_ENVIRONMENT));
+ setenv(INDEX_ENVIRONMENT, the_repository->index_file, 1);
+
+ ret = run_add_interactive(NULL, "--patch=stash", ps);
+
+ the_repository->index_file = old_repo_index_file;
+ if (old_index_env && *old_index_env)
+ setenv(INDEX_ENVIRONMENT, old_index_env, 1);
+ else
+ unsetenv(INDEX_ENVIRONMENT);
+ FREE_AND_NULL(old_index_env);
+
+ /* State of the working tree. */
+ if (write_index_as_tree(&info->w_tree, &istate, stash_index_path.buf, 0,
+ NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_diff_tree.git_cmd = 1;
+ strvec_pushl(&cp_diff_tree.args, "diff-tree", "-p", "-U1", "HEAD",
+ oid_to_hex(&info->w_tree), "--", NULL);
+ if (pipe_command(&cp_diff_tree, NULL, 0, out_patch, 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (!out_patch->len) {
+ if (!quiet)
+ fprintf_ln(stderr, _("No changes selected"));
+ ret = 1;
+ }
+
+done:
+ discard_index(&istate);
+ remove_path(stash_index_path.buf);
+ return ret;
+}
+
+static int stash_working_tree(struct stash_info *info, const struct pathspec *ps)
+{
+ int ret = 0;
+ struct rev_info rev;
+ struct child_process cp_upd_index = CHILD_PROCESS_INIT;
+ struct strbuf diff_output = STRBUF_INIT;
+ struct index_state istate = { NULL };
+
+ init_revisions(&rev, NULL);
+ copy_pathspec(&rev.prune_data, ps);
+
+ set_alternate_index_output(stash_index_path.buf);
+ if (reset_tree(&info->i_tree, 0, 0)) {
+ ret = -1;
+ goto done;
+ }
+ set_alternate_index_output(NULL);
+
+ rev.diffopt.output_format = DIFF_FORMAT_CALLBACK;
+ rev.diffopt.format_callback = add_diff_to_buf;
+ rev.diffopt.format_callback_data = &diff_output;
+
+ if (repo_read_index_preload(the_repository, &rev.diffopt.pathspec, 0) < 0) {
+ ret = -1;
+ goto done;
+ }
+
+ add_pending_object(&rev, parse_object(the_repository, &info->b_commit),
+ "");
+ if (run_diff_index(&rev, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_upd_index.git_cmd = 1;
+ strvec_pushl(&cp_upd_index.args, "update-index",
+ "--ignore-skip-worktree-entries",
+ "-z", "--add", "--remove", "--stdin", NULL);
+ strvec_pushf(&cp_upd_index.env, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+
+ if (pipe_command(&cp_upd_index, diff_output.buf, diff_output.len,
+ NULL, 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (write_index_as_tree(&info->w_tree, &istate, stash_index_path.buf, 0,
+ NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+done:
+ discard_index(&istate);
+ release_revisions(&rev);
+ strbuf_release(&diff_output);
+ remove_path(stash_index_path.buf);
+ return ret;
+}
+
+static int do_create_stash(const struct pathspec *ps, struct strbuf *stash_msg_buf,
+ int include_untracked, int patch_mode, int only_staged,
+ struct stash_info *info, struct strbuf *patch,
+ int quiet)
+{
+ int ret = 0;
+ int flags = 0;
+ int untracked_commit_option = 0;
+ const char *head_short_sha1 = NULL;
+ const char *branch_ref = NULL;
+ const char *branch_name = "(no branch)";
+ struct commit *head_commit = NULL;
+ struct commit_list *parents = NULL;
+ struct strbuf msg = STRBUF_INIT;
+ struct strbuf commit_tree_label = STRBUF_INIT;
+ struct strbuf untracked_files = STRBUF_INIT;
+
+ prepare_fallback_ident("git stash", "git@stash");
+
+ repo_read_index_preload(the_repository, NULL, 0);
+ if (repo_refresh_and_write_index(the_repository, REFRESH_QUIET, 0, 0,
+ NULL, NULL, NULL) < 0) {
+ ret = -1;
+ goto done;
+ }
+
+ if (get_oid("HEAD", &info->b_commit)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("You do not have "
+ "the initial commit yet"));
+ ret = -1;
+ goto done;
+ } else {
+ head_commit = lookup_commit(the_repository, &info->b_commit);
+ }
+
+ if (!check_changes(ps, include_untracked, &untracked_files)) {
+ ret = 1;
+ goto done;
+ }
+
+ branch_ref = resolve_ref_unsafe("HEAD", 0, NULL, &flags);
+ if (flags & REF_ISSYMREF)
+ skip_prefix(branch_ref, "refs/heads/", &branch_name);
+ head_short_sha1 = find_unique_abbrev(&head_commit->object.oid,
+ DEFAULT_ABBREV);
+ strbuf_addf(&msg, "%s: %s ", branch_name, head_short_sha1);
+ pp_commit_easy(CMIT_FMT_ONELINE, head_commit, &msg);
+
+ strbuf_addf(&commit_tree_label, "index on %s\n", msg.buf);
+ commit_list_insert(head_commit, &parents);
+ if (write_cache_as_tree(&info->i_tree, 0, NULL) ||
+ commit_tree(commit_tree_label.buf, commit_tree_label.len,
+ &info->i_tree, parents, &info->i_commit, NULL, NULL)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current "
+ "index state"));
+ ret = -1;
+ goto done;
+ }
+
+ if (include_untracked) {
+ if (save_untracked_files(info, &msg, untracked_files)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save "
+ "the untracked files"));
+ ret = -1;
+ goto done;
+ }
+ untracked_commit_option = 1;
+ }
+ if (patch_mode) {
+ ret = stash_patch(info, ps, patch, quiet);
+ if (ret < 0) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current "
+ "worktree state"));
+ goto done;
+ } else if (ret > 0) {
+ goto done;
+ }
+ } else if (only_staged) {
+ ret = stash_staged(info, patch, quiet);
+ if (ret < 0) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current "
+ "staged state"));
+ goto done;
+ } else if (ret > 0) {
+ goto done;
+ }
+ } else {
+ if (stash_working_tree(info, ps)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current "
+ "worktree state"));
+ ret = -1;
+ goto done;
+ }
+ }
+
+ if (!stash_msg_buf->len)
+ strbuf_addf(stash_msg_buf, "WIP on %s", msg.buf);
+ else
+ strbuf_insertf(stash_msg_buf, 0, "On %s: ", branch_name);
+
+ /*
+ * `parents` will be empty after calling `commit_tree()`, so there is
+ * no need to call `free_commit_list()`
+ */
+ parents = NULL;
+ if (untracked_commit_option)
+ commit_list_insert(lookup_commit(the_repository,
+ &info->u_commit),
+ &parents);
+ commit_list_insert(lookup_commit(the_repository, &info->i_commit),
+ &parents);
+ commit_list_insert(head_commit, &parents);
+
+ if (commit_tree(stash_msg_buf->buf, stash_msg_buf->len, &info->w_tree,
+ parents, &info->w_commit, NULL, NULL)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot record "
+ "working tree state"));
+ ret = -1;
+ goto done;
+ }
+
+done:
+ strbuf_release(&commit_tree_label);
+ strbuf_release(&msg);
+ strbuf_release(&untracked_files);
+ return ret;
+}
+
+static int create_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret;
+ struct strbuf stash_msg_buf = STRBUF_INIT;
+ struct stash_info info = STASH_INFO_INIT;
+ struct pathspec ps;
+
+ /* Starting with argv[1], since argv[0] is "create" */
+ strbuf_join_argv(&stash_msg_buf, argc - 1, ++argv, ' ');
+
+ memset(&ps, 0, sizeof(ps));
+ if (!check_changes_tracked_files(&ps))
+ return 0;
+
+ ret = do_create_stash(&ps, &stash_msg_buf, 0, 0, 0, &info,
+ NULL, 0);
+ if (!ret)
+ printf_ln("%s", oid_to_hex(&info.w_commit));
+
+ free_stash_info(&info);
+ strbuf_release(&stash_msg_buf);
+ return ret;
+}
+
+static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int quiet,
+ int keep_index, int patch_mode, int include_untracked, int only_staged)
+{
+ int ret = 0;
+ struct stash_info info = STASH_INFO_INIT;
+ struct strbuf patch = STRBUF_INIT;
+ struct strbuf stash_msg_buf = STRBUF_INIT;
+ struct strbuf untracked_files = STRBUF_INIT;
+
+ if (patch_mode && keep_index == -1)
+ keep_index = 1;
+
+ if (patch_mode && include_untracked) {
+ fprintf_ln(stderr, _("Can't use --patch and --include-untracked"
+ " or --all at the same time"));
+ ret = -1;
+ goto done;
+ }
+
+ /* --patch overrides --staged */
+ if (patch_mode)
+ only_staged = 0;
+
+ if (only_staged && include_untracked) {
+ fprintf_ln(stderr, _("Can't use --staged and --include-untracked"
+ " or --all at the same time"));
+ ret = -1;
+ goto done;
+ }
+
+ repo_read_index_preload(the_repository, NULL, 0);
+ if (!include_untracked && ps->nr) {
+ int i;
+ char *ps_matched = xcalloc(ps->nr, 1);
+
+ /* TODO: audit for interaction with sparse-index. */
+ ensure_full_index(&the_index);
+ for (i = 0; i < the_index.cache_nr; i++)
+ ce_path_match(&the_index, the_index.cache[i], ps,
+ ps_matched);
+
+ if (report_path_error(ps_matched, ps)) {
+ fprintf_ln(stderr, _("Did you forget to 'git add'?"));
+ ret = -1;
+ free(ps_matched);
+ goto done;
+ }
+ free(ps_matched);
+ }
+
+ if (repo_refresh_and_write_index(the_repository, REFRESH_QUIET, 0, 0,
+ NULL, NULL, NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (!check_changes(ps, include_untracked, &untracked_files)) {
+ if (!quiet)
+ printf_ln(_("No local changes to save"));
+ goto done;
+ }
+
+ if (!reflog_exists(ref_stash) && do_clear_stash()) {
+ ret = -1;
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot initialize stash"));
+ goto done;
+ }
+
+ if (stash_msg)
+ strbuf_addstr(&stash_msg_buf, stash_msg);
+ if (do_create_stash(ps, &stash_msg_buf, include_untracked, patch_mode, only_staged,
+ &info, &patch, quiet)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (do_store_stash(&info.w_commit, stash_msg_buf.buf, 1)) {
+ ret = -1;
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current status"));
+ goto done;
+ }
+
+ if (!quiet)
+ printf_ln(_("Saved working directory and index state %s"),
+ stash_msg_buf.buf);
+
+ if (!(patch_mode || only_staged)) {
+ if (include_untracked && !ps->nr) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ if (startup_info->original_cwd) {
+ cp.dir = startup_info->original_cwd;
+ strvec_pushf(&cp.env, "%s=%s",
+ GIT_WORK_TREE_ENVIRONMENT,
+ the_repository->worktree);
+ }
+ strvec_pushl(&cp.args, "clean", "--force",
+ "--quiet", "-d", ":/", NULL);
+ if (include_untracked == INCLUDE_ALL_FILES)
+ strvec_push(&cp.args, "-x");
+ if (run_command(&cp)) {
+ ret = -1;
+ goto done;
+ }
+ }
+ discard_index(&the_index);
+ if (ps->nr) {
+ struct child_process cp_add = CHILD_PROCESS_INIT;
+ struct child_process cp_diff = CHILD_PROCESS_INIT;
+ struct child_process cp_apply = CHILD_PROCESS_INIT;
+ struct strbuf out = STRBUF_INIT;
+
+ cp_add.git_cmd = 1;
+ strvec_push(&cp_add.args, "add");
+ if (!include_untracked)
+ strvec_push(&cp_add.args, "-u");
+ if (include_untracked == INCLUDE_ALL_FILES)
+ strvec_push(&cp_add.args, "--force");
+ strvec_push(&cp_add.args, "--");
+ add_pathspecs(&cp_add.args, ps);
+ if (run_command(&cp_add)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_diff.git_cmd = 1;
+ strvec_pushl(&cp_diff.args, "diff-index", "-p",
+ "--cached", "--binary", "HEAD", "--",
+ NULL);
+ add_pathspecs(&cp_diff.args, ps);
+ if (pipe_command(&cp_diff, NULL, 0, &out, 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_apply.git_cmd = 1;
+ strvec_pushl(&cp_apply.args, "apply", "--index",
+ "-R", NULL);
+ if (pipe_command(&cp_apply, out.buf, out.len, NULL, 0,
+ NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+ } else {
+ struct child_process cp = CHILD_PROCESS_INIT;
+ cp.git_cmd = 1;
+ /* BUG: this nukes untracked files in the way */
+ strvec_pushl(&cp.args, "reset", "--hard", "-q",
+ "--no-recurse-submodules", NULL);
+ if (run_command(&cp)) {
+ ret = -1;
+ goto done;
+ }
+ }
+
+ if (keep_index == 1 && !is_null_oid(&info.i_tree)) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "checkout", "--no-overlay",
+ oid_to_hex(&info.i_tree), "--", NULL);
+ if (!ps->nr)
+ strvec_push(&cp.args, ":/");
+ else
+ add_pathspecs(&cp.args, ps);
+ if (run_command(&cp)) {
+ ret = -1;
+ goto done;
+ }
+ }
+ goto done;
+ } else {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "apply", "-R", NULL);
+
+ if (pipe_command(&cp, patch.buf, patch.len, NULL, 0, NULL, 0)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot remove "
+ "worktree changes"));
+ ret = -1;
+ goto done;
+ }
+
+ if (keep_index < 1) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "reset", "-q", "--refresh", "--",
+ NULL);
+ add_pathspecs(&cp.args, ps);
+ if (run_command(&cp)) {
+ ret = -1;
+ goto done;
+ }
+ }
+ goto done;
+ }
+
+done:
+ free_stash_info(&info);
+ strbuf_release(&stash_msg_buf);
+ return ret;
+}
+
+static int push_stash(int argc, const char **argv, const char *prefix,
+ int push_assumed)
+{
+ int force_assume = 0;
+ int keep_index = -1;
+ int only_staged = 0;
+ int patch_mode = 0;
+ int include_untracked = 0;
+ int quiet = 0;
+ int pathspec_file_nul = 0;
+ const char *stash_msg = NULL;
+ const char *pathspec_from_file = NULL;
+ struct pathspec ps;
+ struct option options[] = {
+ OPT_BOOL('k', "keep-index", &keep_index,
+ N_("keep index")),
+ OPT_BOOL('S', "staged", &only_staged,
+ N_("stash staged changes only")),
+ OPT_BOOL('p', "patch", &patch_mode,
+ N_("stash in patch mode")),
+ OPT__QUIET(&quiet, N_("quiet mode")),
+ OPT_BOOL('u', "include-untracked", &include_untracked,
+ N_("include untracked files in stash")),
+ OPT_SET_INT('a', "all", &include_untracked,
+ N_("include ignore files"), 2),
+ OPT_STRING('m', "message", &stash_msg, N_("message"),
+ N_("stash message")),
+ OPT_PATHSPEC_FROM_FILE(&pathspec_from_file),
+ OPT_PATHSPEC_FILE_NUL(&pathspec_file_nul),
+ OPT_END()
+ };
+
+ if (argc) {
+ force_assume = !strcmp(argv[0], "-p");
+ argc = parse_options(argc, argv, prefix, options,
+ push_assumed ? git_stash_usage :
+ git_stash_push_usage,
+ PARSE_OPT_KEEP_DASHDASH);
+ }
+
+ if (argc) {
+ if (!strcmp(argv[0], "--")) {
+ argc--;
+ argv++;
+ } else if (push_assumed && !force_assume) {
+ die("subcommand wasn't specified; 'push' can't be assumed due to unexpected token '%s'",
+ argv[0]);
+ }
+ }
+
+ parse_pathspec(&ps, 0, PATHSPEC_PREFER_FULL | PATHSPEC_PREFIX_ORIGIN,
+ prefix, argv);
+
+ if (pathspec_from_file) {
+ if (patch_mode)
+ die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--patch");
+
+ if (only_staged)
+ die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--staged");
+
+ if (ps.nr)
+ die(_("'%s' and pathspec arguments cannot be used together"), "--pathspec-from-file");
+
+ parse_pathspec_file(&ps, 0,
+ PATHSPEC_PREFER_FULL | PATHSPEC_PREFIX_ORIGIN,
+ prefix, pathspec_from_file, pathspec_file_nul);
+ } else if (pathspec_file_nul) {
+ die(_("the option '%s' requires '%s'"), "--pathspec-file-nul", "--pathspec-from-file");
+ }
+
+ return do_push_stash(&ps, stash_msg, quiet, keep_index, patch_mode,
+ include_untracked, only_staged);
+}
+
+static int push_stash_unassumed(int argc, const char **argv, const char *prefix)
+{
+ return push_stash(argc, argv, prefix, 0);
+}
+
+static int save_stash(int argc, const char **argv, const char *prefix)
+{
+ int keep_index = -1;
+ int only_staged = 0;
+ int patch_mode = 0;
+ int include_untracked = 0;
+ int quiet = 0;
+ int ret = 0;
+ const char *stash_msg = NULL;
+ struct pathspec ps;
+ struct strbuf stash_msg_buf = STRBUF_INIT;
+ struct option options[] = {
+ OPT_BOOL('k', "keep-index", &keep_index,
+ N_("keep index")),
+ OPT_BOOL('S', "staged", &only_staged,
+ N_("stash staged changes only")),
+ OPT_BOOL('p', "patch", &patch_mode,
+ N_("stash in patch mode")),
+ OPT__QUIET(&quiet, N_("quiet mode")),
+ OPT_BOOL('u', "include-untracked", &include_untracked,
+ N_("include untracked files in stash")),
+ OPT_SET_INT('a', "all", &include_untracked,
+ N_("include ignore files"), 2),
+ OPT_STRING('m', "message", &stash_msg, "message",
+ N_("stash message")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_save_usage,
+ PARSE_OPT_KEEP_DASHDASH);
+
+ if (argc)
+ stash_msg = strbuf_join_argv(&stash_msg_buf, argc, argv, ' ');
+
+ memset(&ps, 0, sizeof(ps));
+ ret = do_push_stash(&ps, stash_msg, quiet, keep_index,
+ patch_mode, include_untracked, only_staged);
+
+ strbuf_release(&stash_msg_buf);
+ return ret;
+}
+
+int cmd_stash(int argc, const char **argv, const char *prefix)
+{
+ pid_t pid = getpid();
+ const char *index_file;
+ struct strvec args = STRVEC_INIT;
+ parse_opt_subcommand_fn *fn = NULL;
+ struct option options[] = {
+ OPT_SUBCOMMAND("apply", &fn, apply_stash),
+ OPT_SUBCOMMAND("clear", &fn, clear_stash),
+ OPT_SUBCOMMAND("drop", &fn, drop_stash),
+ OPT_SUBCOMMAND("pop", &fn, pop_stash),
+ OPT_SUBCOMMAND("branch", &fn, branch_stash),
+ OPT_SUBCOMMAND("list", &fn, list_stash),
+ OPT_SUBCOMMAND("show", &fn, show_stash),
+ OPT_SUBCOMMAND("store", &fn, store_stash),
+ OPT_SUBCOMMAND("create", &fn, create_stash),
+ OPT_SUBCOMMAND("push", &fn, push_stash_unassumed),
+ OPT_SUBCOMMAND_F("save", &fn, save_stash, PARSE_OPT_NOCOMPLETE),
+ OPT_END()
+ };
+
+ git_config(git_stash_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, options, git_stash_usage,
+ PARSE_OPT_SUBCOMMAND_OPTIONAL |
+ PARSE_OPT_KEEP_UNKNOWN_OPT |
+ PARSE_OPT_KEEP_DASHDASH);
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ index_file = get_index_file();
+ strbuf_addf(&stash_index_path, "%s.stash.%" PRIuMAX, index_file,
+ (uintmax_t)pid);
+
+ if (fn)
+ return !!fn(argc, argv, prefix);
+ else if (!argc)
+ return !!push_stash_unassumed(0, NULL, prefix);
+
+ /* Assume 'stash push' */
+ strvec_push(&args, "push");
+ strvec_pushv(&args, argv);
+ return !!push_stash(args.nr, args.v, prefix, 1);
+}
diff --git a/builtin/stripspace.c b/builtin/stripspace.c
new file mode 100644
index 0000000..1e34cf2
--- /dev/null
+++ b/builtin/stripspace.c
@@ -0,0 +1,65 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "parse-options.h"
+#include "strbuf.h"
+
+static void comment_lines(struct strbuf *buf)
+{
+ char *msg;
+ size_t len;
+
+ msg = strbuf_detach(buf, &len);
+ strbuf_add_commented_lines(buf, msg, len);
+ free(msg);
+}
+
+static const char * const stripspace_usage[] = {
+ "git stripspace [-s | --strip-comments]",
+ "git stripspace [-c | --comment-lines]",
+ NULL
+};
+
+enum stripspace_mode {
+ STRIP_DEFAULT = 0,
+ STRIP_COMMENTS,
+ COMMENT_LINES
+};
+
+int cmd_stripspace(int argc, const char **argv, const char *prefix)
+{
+ struct strbuf buf = STRBUF_INIT;
+ enum stripspace_mode mode = STRIP_DEFAULT;
+ int nongit;
+
+ const struct option options[] = {
+ OPT_CMDMODE('s', "strip-comments", &mode,
+ N_("skip and remove all lines starting with comment character"),
+ STRIP_COMMENTS),
+ OPT_CMDMODE('c', "comment-lines", &mode,
+ N_("prepend comment character and space to each line"),
+ COMMENT_LINES),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, stripspace_usage, 0);
+ if (argc)
+ usage_with_options(stripspace_usage, options);
+
+ if (mode == STRIP_COMMENTS || mode == COMMENT_LINES) {
+ setup_git_directory_gently(&nongit);
+ git_config(git_default_config, NULL);
+ }
+
+ if (strbuf_read(&buf, 0, 1024) < 0)
+ die_errno("could not read the input");
+
+ if (mode == STRIP_DEFAULT || mode == STRIP_COMMENTS)
+ strbuf_stripspace(&buf, mode == STRIP_COMMENTS);
+ else
+ comment_lines(&buf);
+
+ write_or_die(1, buf.buf, buf.len);
+ strbuf_release(&buf);
+ return 0;
+}
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
new file mode 100644
index 0000000..05f2c9b
--- /dev/null
+++ b/builtin/submodule--helper.c
@@ -0,0 +1,3395 @@
+#define USE_THE_INDEX_VARIABLE
+#include "builtin.h"
+#include "repository.h"
+#include "cache.h"
+#include "config.h"
+#include "parse-options.h"
+#include "quote.h"
+#include "pathspec.h"
+#include "dir.h"
+#include "submodule.h"
+#include "submodule-config.h"
+#include "string-list.h"
+#include "run-command.h"
+#include "remote.h"
+#include "refs.h"
+#include "refspec.h"
+#include "connect.h"
+#include "revision.h"
+#include "diffcore.h"
+#include "diff.h"
+#include "object-store.h"
+#include "advice.h"
+#include "branch.h"
+#include "list-objects-filter-options.h"
+
+#define OPT_QUIET (1 << 0)
+#define OPT_CACHED (1 << 1)
+#define OPT_RECURSIVE (1 << 2)
+#define OPT_FORCE (1 << 3)
+
+typedef void (*each_submodule_fn)(const struct cache_entry *list_item,
+ void *cb_data);
+
+static int repo_get_default_remote(struct repository *repo, char **default_remote)
+{
+ char *dest = NULL;
+ struct strbuf sb = STRBUF_INIT;
+ struct ref_store *store = get_main_ref_store(repo);
+ const char *refname = refs_resolve_ref_unsafe(store, "HEAD", 0, NULL,
+ NULL);
+
+ if (!refname)
+ return die_message(_("No such ref: %s"), "HEAD");
+
+ /* detached HEAD */
+ if (!strcmp(refname, "HEAD")) {
+ *default_remote = xstrdup("origin");
+ return 0;
+ }
+
+ if (!skip_prefix(refname, "refs/heads/", &refname))
+ return die_message(_("Expecting a full ref name, got %s"),
+ refname);
+
+ strbuf_addf(&sb, "branch.%s.remote", refname);
+ if (repo_config_get_string(repo, sb.buf, &dest))
+ *default_remote = xstrdup("origin");
+ else
+ *default_remote = dest;
+
+ strbuf_release(&sb);
+ return 0;
+}
+
+static int get_default_remote_submodule(const char *module_path, char **default_remote)
+{
+ struct repository subrepo;
+ int ret;
+
+ if (repo_submodule_init(&subrepo, the_repository, module_path,
+ null_oid()) < 0)
+ return die_message(_("could not get a repository handle for submodule '%s'"),
+ module_path);
+ ret = repo_get_default_remote(&subrepo, default_remote);
+ repo_clear(&subrepo);
+
+ return ret;
+}
+
+static char *get_default_remote(void)
+{
+ char *default_remote;
+ int code = repo_get_default_remote(the_repository, &default_remote);
+
+ if (code)
+ exit(code);
+
+ return default_remote;
+}
+
+static char *resolve_relative_url(const char *rel_url, const char *up_path, int quiet)
+{
+ char *remoteurl, *resolved_url;
+ char *remote = get_default_remote();
+ struct strbuf remotesb = STRBUF_INIT;
+
+ strbuf_addf(&remotesb, "remote.%s.url", remote);
+ if (git_config_get_string(remotesb.buf, &remoteurl)) {
+ if (!quiet)
+ warning(_("could not look up configuration '%s'. "
+ "Assuming this repository is its own "
+ "authoritative upstream."),
+ remotesb.buf);
+ remoteurl = xgetcwd();
+ }
+ resolved_url = relative_url(remoteurl, rel_url, up_path);
+
+ free(remote);
+ free(remoteurl);
+ strbuf_release(&remotesb);
+
+ return resolved_url;
+}
+
+/* the result should be freed by the caller. */
+static char *get_submodule_displaypath(const char *path, const char *prefix)
+{
+ const char *super_prefix = get_super_prefix();
+
+ if (prefix && super_prefix) {
+ BUG("cannot have prefix '%s' and superprefix '%s'",
+ prefix, super_prefix);
+ } else if (prefix) {
+ struct strbuf sb = STRBUF_INIT;
+ char *displaypath = xstrdup(relative_path(path, prefix, &sb));
+ strbuf_release(&sb);
+ return displaypath;
+ } else if (super_prefix) {
+ return xstrfmt("%s%s", super_prefix, path);
+ } else {
+ return xstrdup(path);
+ }
+}
+
+static char *compute_rev_name(const char *sub_path, const char* object_id)
+{
+ struct strbuf sb = STRBUF_INIT;
+ const char ***d;
+
+ static const char *describe_bare[] = { NULL };
+
+ static const char *describe_tags[] = { "--tags", NULL };
+
+ static const char *describe_contains[] = { "--contains", NULL };
+
+ static const char *describe_all_always[] = { "--all", "--always", NULL };
+
+ static const char **describe_argv[] = { describe_bare, describe_tags,
+ describe_contains,
+ describe_all_always, NULL };
+
+ for (d = describe_argv; *d; d++) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+ prepare_submodule_repo_env(&cp.env);
+ cp.dir = sub_path;
+ cp.git_cmd = 1;
+ cp.no_stderr = 1;
+
+ strvec_push(&cp.args, "describe");
+ strvec_pushv(&cp.args, *d);
+ strvec_push(&cp.args, object_id);
+
+ if (!capture_command(&cp, &sb, 0)) {
+ strbuf_strip_suffix(&sb, "\n");
+ return strbuf_detach(&sb, NULL);
+ }
+ }
+
+ strbuf_release(&sb);
+ return NULL;
+}
+
+struct module_list {
+ const struct cache_entry **entries;
+ int alloc, nr;
+};
+#define MODULE_LIST_INIT { 0 }
+
+static void module_list_release(struct module_list *ml)
+{
+ free(ml->entries);
+}
+
+static int module_list_compute(const char **argv,
+ const char *prefix,
+ struct pathspec *pathspec,
+ struct module_list *list)
+{
+ int i, result = 0;
+ char *ps_matched = NULL;
+
+ parse_pathspec(pathspec, 0,
+ PATHSPEC_PREFER_FULL,
+ prefix, argv);
+
+ if (pathspec->nr)
+ ps_matched = xcalloc(pathspec->nr, 1);
+
+ if (repo_read_index(the_repository) < 0)
+ die(_("index file corrupt"));
+
+ for (i = 0; i < the_index.cache_nr; i++) {
+ const struct cache_entry *ce = the_index.cache[i];
+
+ if (!match_pathspec(&the_index, pathspec, ce->name, ce_namelen(ce),
+ 0, ps_matched, 1) ||
+ !S_ISGITLINK(ce->ce_mode))
+ continue;
+
+ ALLOC_GROW(list->entries, list->nr + 1, list->alloc);
+ list->entries[list->nr++] = ce;
+ while (i + 1 < the_index.cache_nr &&
+ !strcmp(ce->name, the_index.cache[i + 1]->name))
+ /*
+ * Skip entries with the same name in different stages
+ * to make sure an entry is returned only once.
+ */
+ i++;
+ }
+
+ if (ps_matched && report_path_error(ps_matched, pathspec))
+ result = -1;
+
+ free(ps_matched);
+
+ return result;
+}
+
+static void module_list_active(struct module_list *list)
+{
+ int i;
+ struct module_list active_modules = MODULE_LIST_INIT;
+
+ for (i = 0; i < list->nr; i++) {
+ const struct cache_entry *ce = list->entries[i];
+
+ if (!is_submodule_active(the_repository, ce->name))
+ continue;
+
+ ALLOC_GROW(active_modules.entries,
+ active_modules.nr + 1,
+ active_modules.alloc);
+ active_modules.entries[active_modules.nr++] = ce;
+ }
+
+ module_list_release(list);
+ *list = active_modules;
+}
+
+static char *get_up_path(const char *path)
+{
+ int i;
+ struct strbuf sb = STRBUF_INIT;
+
+ for (i = count_slashes(path); i; i--)
+ strbuf_addstr(&sb, "../");
+
+ /*
+ * Check if 'path' ends with slash or not
+ * for having the same output for dir/sub_dir
+ * and dir/sub_dir/
+ */
+ if (!is_dir_sep(path[strlen(path) - 1]))
+ strbuf_addstr(&sb, "../");
+
+ return strbuf_detach(&sb, NULL);
+}
+
+static void for_each_listed_submodule(const struct module_list *list,
+ each_submodule_fn fn, void *cb_data)
+{
+ int i;
+
+ for (i = 0; i < list->nr; i++)
+ fn(list->entries[i], cb_data);
+}
+
+struct foreach_cb {
+ int argc;
+ const char **argv;
+ const char *prefix;
+ int quiet;
+ int recursive;
+};
+#define FOREACH_CB_INIT { 0 }
+
+static void runcommand_in_submodule_cb(const struct cache_entry *list_item,
+ void *cb_data)
+{
+ struct foreach_cb *info = cb_data;
+ const char *path = list_item->name;
+ const struct object_id *ce_oid = &list_item->oid;
+ const struct submodule *sub;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ char *displaypath;
+
+ displaypath = get_submodule_displaypath(path, info->prefix);
+
+ sub = submodule_from_path(the_repository, null_oid(), path);
+
+ if (!sub)
+ die(_("No url found for submodule path '%s' in .gitmodules"),
+ displaypath);
+
+ if (!is_submodule_populated_gently(path, NULL))
+ goto cleanup;
+
+ prepare_submodule_repo_env(&cp.env);
+
+ /*
+ * For the purpose of executing <command> in the submodule,
+ * separate shell is used for the purpose of running the
+ * child process.
+ */
+ cp.use_shell = 1;
+ cp.dir = path;
+
+ /*
+ * NEEDSWORK: the command currently has access to the variables $name,
+ * $sm_path, $displaypath, $sha1 and $toplevel only when the command
+ * contains a single argument. This is done for maintaining a faithful
+ * translation from shell script.
+ */
+ if (info->argc == 1) {
+ char *toplevel = xgetcwd();
+ struct strbuf sb = STRBUF_INIT;
+
+ strvec_pushf(&cp.env, "name=%s", sub->name);
+ strvec_pushf(&cp.env, "sm_path=%s", path);
+ strvec_pushf(&cp.env, "displaypath=%s", displaypath);
+ strvec_pushf(&cp.env, "sha1=%s",
+ oid_to_hex(ce_oid));
+ strvec_pushf(&cp.env, "toplevel=%s", toplevel);
+
+ /*
+ * Since the path variable was accessible from the script
+ * before porting, it is also made available after porting.
+ * The environment variable "PATH" has a very special purpose
+ * on windows. And since environment variables are
+ * case-insensitive in windows, it interferes with the
+ * existing PATH variable. Hence, to avoid that, we expose
+ * path via the args strvec and not via env.
+ */
+ sq_quote_buf(&sb, path);
+ strvec_pushf(&cp.args, "path=%s; %s",
+ sb.buf, info->argv[0]);
+ strbuf_release(&sb);
+ free(toplevel);
+ } else {
+ strvec_pushv(&cp.args, info->argv);
+ }
+
+ if (!info->quiet)
+ printf(_("Entering '%s'\n"), displaypath);
+
+ if (info->argv[0] && run_command(&cp))
+ die(_("run_command returned non-zero status for %s\n."),
+ displaypath);
+
+ if (info->recursive) {
+ struct child_process cpr = CHILD_PROCESS_INIT;
+
+ cpr.git_cmd = 1;
+ cpr.dir = path;
+ prepare_submodule_repo_env(&cpr.env);
+
+ strvec_pushl(&cpr.args, "--super-prefix", NULL);
+ strvec_pushf(&cpr.args, "%s/", displaypath);
+ strvec_pushl(&cpr.args, "submodule--helper", "foreach", "--recursive",
+ NULL);
+
+ if (info->quiet)
+ strvec_push(&cpr.args, "--quiet");
+
+ strvec_push(&cpr.args, "--");
+ strvec_pushv(&cpr.args, info->argv);
+
+ if (run_command(&cpr))
+ die(_("run_command returned non-zero status while "
+ "recursing in the nested submodules of %s\n."),
+ displaypath);
+ }
+
+cleanup:
+ free(displaypath);
+}
+
+static int module_foreach(int argc, const char **argv, const char *prefix)
+{
+ struct foreach_cb info = FOREACH_CB_INIT;
+ struct pathspec pathspec = { 0 };
+ struct module_list list = MODULE_LIST_INIT;
+ struct option module_foreach_options[] = {
+ OPT__QUIET(&info.quiet, N_("suppress output of entering each submodule command")),
+ OPT_BOOL(0, "recursive", &info.recursive,
+ N_("recurse into nested submodules")),
+ OPT_END()
+ };
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule foreach [--quiet] [--recursive] [--] <command>"),
+ NULL
+ };
+ int ret = 1;
+
+ argc = parse_options(argc, argv, prefix, module_foreach_options,
+ git_submodule_helper_usage, 0);
+
+ if (module_list_compute(NULL, prefix, &pathspec, &list) < 0)
+ goto cleanup;
+
+ info.argc = argc;
+ info.argv = argv;
+ info.prefix = prefix;
+
+ for_each_listed_submodule(&list, runcommand_in_submodule_cb, &info);
+
+ ret = 0;
+cleanup:
+ module_list_release(&list);
+ clear_pathspec(&pathspec);
+ return ret;
+}
+
+static int starts_with_dot_slash(const char *const path)
+{
+ return path_match_flags(path, PATH_MATCH_STARTS_WITH_DOT_SLASH |
+ PATH_MATCH_XPLATFORM);
+}
+
+static int starts_with_dot_dot_slash(const char *const path)
+{
+ return path_match_flags(path, PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH |
+ PATH_MATCH_XPLATFORM);
+}
+
+struct init_cb {
+ const char *prefix;
+ unsigned int flags;
+};
+#define INIT_CB_INIT { 0 }
+
+static void init_submodule(const char *path, const char *prefix,
+ unsigned int flags)
+{
+ const struct submodule *sub;
+ struct strbuf sb = STRBUF_INIT;
+ const char *upd;
+ char *url = NULL, *displaypath;
+
+ displaypath = get_submodule_displaypath(path, prefix);
+
+ sub = submodule_from_path(the_repository, null_oid(), path);
+
+ if (!sub)
+ die(_("No url found for submodule path '%s' in .gitmodules"),
+ displaypath);
+
+ /*
+ * NEEDSWORK: In a multi-working-tree world, this needs to be
+ * set in the per-worktree config.
+ *
+ * Set active flag for the submodule being initialized
+ */
+ if (!is_submodule_active(the_repository, path)) {
+ strbuf_addf(&sb, "submodule.%s.active", sub->name);
+ git_config_set_gently(sb.buf, "true");
+ strbuf_reset(&sb);
+ }
+
+ /*
+ * Copy url setting when it is not set yet.
+ * To look up the url in .git/config, we must not fall back to
+ * .gitmodules, so look it up directly.
+ */
+ strbuf_addf(&sb, "submodule.%s.url", sub->name);
+ if (git_config_get_string(sb.buf, &url)) {
+ if (!sub->url)
+ die(_("No url found for submodule path '%s' in .gitmodules"),
+ displaypath);
+
+ url = xstrdup(sub->url);
+
+ /* Possibly a url relative to parent */
+ if (starts_with_dot_dot_slash(url) ||
+ starts_with_dot_slash(url)) {
+ char *oldurl = url;
+
+ url = resolve_relative_url(oldurl, NULL, 0);
+ free(oldurl);
+ }
+
+ if (git_config_set_gently(sb.buf, url))
+ die(_("Failed to register url for submodule path '%s'"),
+ displaypath);
+ if (!(flags & OPT_QUIET))
+ fprintf(stderr,
+ _("Submodule '%s' (%s) registered for path '%s'\n"),
+ sub->name, url, displaypath);
+ }
+ strbuf_reset(&sb);
+
+ /* Copy "update" setting when it is not set yet */
+ strbuf_addf(&sb, "submodule.%s.update", sub->name);
+ if (git_config_get_string_tmp(sb.buf, &upd) &&
+ sub->update_strategy.type != SM_UPDATE_UNSPECIFIED) {
+ if (sub->update_strategy.type == SM_UPDATE_COMMAND) {
+ fprintf(stderr, _("warning: command update mode suggested for submodule '%s'\n"),
+ sub->name);
+ upd = "none";
+ } else {
+ upd = submodule_update_type_to_string(sub->update_strategy.type);
+ }
+
+ if (git_config_set_gently(sb.buf, upd))
+ die(_("Failed to register update mode for submodule path '%s'"), displaypath);
+ }
+ strbuf_release(&sb);
+ free(displaypath);
+ free(url);
+}
+
+static void init_submodule_cb(const struct cache_entry *list_item, void *cb_data)
+{
+ struct init_cb *info = cb_data;
+
+ init_submodule(list_item->name, info->prefix, info->flags);
+}
+
+static int module_init(int argc, const char **argv, const char *prefix)
+{
+ struct init_cb info = INIT_CB_INIT;
+ struct pathspec pathspec = { 0 };
+ struct module_list list = MODULE_LIST_INIT;
+ int quiet = 0;
+ struct option module_init_options[] = {
+ OPT__QUIET(&quiet, N_("suppress output for initializing a submodule")),
+ OPT_END()
+ };
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule init [<options>] [<path>]"),
+ NULL
+ };
+ int ret = 1;
+
+ argc = parse_options(argc, argv, prefix, module_init_options,
+ git_submodule_helper_usage, 0);
+
+ if (module_list_compute(argv, prefix, &pathspec, &list) < 0)
+ goto cleanup;
+
+ /*
+ * If there are no path args and submodule.active is set then,
+ * by default, only initialize 'active' modules.
+ */
+ if (!argc && git_config_get_value_multi("submodule.active"))
+ module_list_active(&list);
+
+ info.prefix = prefix;
+ if (quiet)
+ info.flags |= OPT_QUIET;
+
+ for_each_listed_submodule(&list, init_submodule_cb, &info);
+
+ ret = 0;
+cleanup:
+ module_list_release(&list);
+ clear_pathspec(&pathspec);
+ return ret;
+}
+
+struct status_cb {
+ const char *prefix;
+ unsigned int flags;
+};
+#define STATUS_CB_INIT { 0 }
+
+static void print_status(unsigned int flags, char state, const char *path,
+ const struct object_id *oid, const char *displaypath)
+{
+ if (flags & OPT_QUIET)
+ return;
+
+ printf("%c%s %s", state, oid_to_hex(oid), displaypath);
+
+ if (state == ' ' || state == '+') {
+ char *name = compute_rev_name(path, oid_to_hex(oid));
+
+ if (name)
+ printf(" (%s)", name);
+ free(name);
+ }
+
+ printf("\n");
+}
+
+static int handle_submodule_head_ref(const char *refname UNUSED,
+ const struct object_id *oid,
+ int flags UNUSED,
+ void *cb_data)
+{
+ struct object_id *output = cb_data;
+
+ if (oid)
+ oidcpy(output, oid);
+
+ return 0;
+}
+
+static void status_submodule(const char *path, const struct object_id *ce_oid,
+ unsigned int ce_flags, const char *prefix,
+ unsigned int flags)
+{
+ char *displaypath;
+ struct strvec diff_files_args = STRVEC_INIT;
+ struct rev_info rev = REV_INFO_INIT;
+ int diff_files_result;
+ struct strbuf buf = STRBUF_INIT;
+ const char *git_dir;
+ struct setup_revision_opt opt = {
+ .free_removed_argv_elements = 1,
+ };
+
+ if (!submodule_from_path(the_repository, null_oid(), path))
+ die(_("no submodule mapping found in .gitmodules for path '%s'"),
+ path);
+
+ displaypath = get_submodule_displaypath(path, prefix);
+
+ if ((CE_STAGEMASK & ce_flags) >> CE_STAGESHIFT) {
+ print_status(flags, 'U', path, null_oid(), displaypath);
+ goto cleanup;
+ }
+
+ strbuf_addf(&buf, "%s/.git", path);
+ git_dir = read_gitfile(buf.buf);
+ if (!git_dir)
+ git_dir = buf.buf;
+
+ if (!is_submodule_active(the_repository, path) ||
+ !is_git_directory(git_dir)) {
+ print_status(flags, '-', path, ce_oid, displaypath);
+ strbuf_release(&buf);
+ goto cleanup;
+ }
+ strbuf_release(&buf);
+
+ strvec_pushl(&diff_files_args, "diff-files",
+ "--ignore-submodules=dirty", "--quiet", "--",
+ path, NULL);
+
+ git_config(git_diff_basic_config, NULL);
+
+ repo_init_revisions(the_repository, &rev, NULL);
+ rev.abbrev = 0;
+ setup_revisions(diff_files_args.nr, diff_files_args.v, &rev, &opt);
+ diff_files_result = run_diff_files(&rev, 0);
+
+ if (!diff_result_code(&rev.diffopt, diff_files_result)) {
+ print_status(flags, ' ', path, ce_oid,
+ displaypath);
+ } else if (!(flags & OPT_CACHED)) {
+ struct object_id oid;
+ struct ref_store *refs = get_submodule_ref_store(path);
+
+ if (!refs) {
+ print_status(flags, '-', path, ce_oid, displaypath);
+ goto cleanup;
+ }
+ if (refs_head_ref(refs, handle_submodule_head_ref, &oid))
+ die(_("could not resolve HEAD ref inside the "
+ "submodule '%s'"), path);
+
+ print_status(flags, '+', path, &oid, displaypath);
+ } else {
+ print_status(flags, '+', path, ce_oid, displaypath);
+ }
+
+ if (flags & OPT_RECURSIVE) {
+ struct child_process cpr = CHILD_PROCESS_INIT;
+
+ cpr.git_cmd = 1;
+ cpr.dir = path;
+ prepare_submodule_repo_env(&cpr.env);
+
+ strvec_push(&cpr.args, "--super-prefix");
+ strvec_pushf(&cpr.args, "%s/", displaypath);
+ strvec_pushl(&cpr.args, "submodule--helper", "status",
+ "--recursive", NULL);
+
+ if (flags & OPT_CACHED)
+ strvec_push(&cpr.args, "--cached");
+
+ if (flags & OPT_QUIET)
+ strvec_push(&cpr.args, "--quiet");
+
+ if (run_command(&cpr))
+ die(_("failed to recurse into submodule '%s'"), path);
+ }
+
+cleanup:
+ strvec_clear(&diff_files_args);
+ free(displaypath);
+ release_revisions(&rev);
+}
+
+static void status_submodule_cb(const struct cache_entry *list_item,
+ void *cb_data)
+{
+ struct status_cb *info = cb_data;
+
+ status_submodule(list_item->name, &list_item->oid, list_item->ce_flags,
+ info->prefix, info->flags);
+}
+
+static int module_status(int argc, const char **argv, const char *prefix)
+{
+ struct status_cb info = STATUS_CB_INIT;
+ struct pathspec pathspec = { 0 };
+ struct module_list list = MODULE_LIST_INIT;
+ int quiet = 0;
+ struct option module_status_options[] = {
+ OPT__QUIET(&quiet, N_("suppress submodule status output")),
+ OPT_BIT(0, "cached", &info.flags, N_("use commit stored in the index instead of the one stored in the submodule HEAD"), OPT_CACHED),
+ OPT_BIT(0, "recursive", &info.flags, N_("recurse into nested submodules"), OPT_RECURSIVE),
+ OPT_END()
+ };
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule status [--quiet] [--cached] [--recursive] [<path>...]"),
+ NULL
+ };
+ int ret = 1;
+
+ argc = parse_options(argc, argv, prefix, module_status_options,
+ git_submodule_helper_usage, 0);
+
+ if (module_list_compute(argv, prefix, &pathspec, &list) < 0)
+ goto cleanup;
+
+ info.prefix = prefix;
+ if (quiet)
+ info.flags |= OPT_QUIET;
+
+ for_each_listed_submodule(&list, status_submodule_cb, &info);
+
+ ret = 0;
+cleanup:
+ module_list_release(&list);
+ clear_pathspec(&pathspec);
+ return ret;
+}
+
+struct module_cb {
+ unsigned int mod_src;
+ unsigned int mod_dst;
+ struct object_id oid_src;
+ struct object_id oid_dst;
+ char status;
+ char *sm_path;
+};
+#define MODULE_CB_INIT { 0 }
+
+static void module_cb_release(struct module_cb *mcb)
+{
+ free(mcb->sm_path);
+}
+
+struct module_cb_list {
+ struct module_cb **entries;
+ int alloc, nr;
+};
+#define MODULE_CB_LIST_INIT { 0 }
+
+static void module_cb_list_release(struct module_cb_list *mcbl)
+{
+ int i;
+
+ for (i = 0; i < mcbl->nr; i++) {
+ struct module_cb *mcb = mcbl->entries[i];
+
+ module_cb_release(mcb);
+ free(mcb);
+ }
+ free(mcbl->entries);
+}
+
+struct summary_cb {
+ int argc;
+ const char **argv;
+ const char *prefix;
+ unsigned int cached: 1;
+ unsigned int for_status: 1;
+ unsigned int files: 1;
+ int summary_limit;
+};
+#define SUMMARY_CB_INIT { 0 }
+
+enum diff_cmd {
+ DIFF_INDEX,
+ DIFF_FILES
+};
+
+static char *verify_submodule_committish(const char *sm_path,
+ const char *committish)
+{
+ struct child_process cp_rev_parse = CHILD_PROCESS_INIT;
+ struct strbuf result = STRBUF_INIT;
+
+ cp_rev_parse.git_cmd = 1;
+ cp_rev_parse.dir = sm_path;
+ prepare_submodule_repo_env(&cp_rev_parse.env);
+ strvec_pushl(&cp_rev_parse.args, "rev-parse", "-q", "--short", NULL);
+ strvec_pushf(&cp_rev_parse.args, "%s^0", committish);
+ strvec_push(&cp_rev_parse.args, "--");
+
+ if (capture_command(&cp_rev_parse, &result, 0))
+ return NULL;
+
+ strbuf_trim_trailing_newline(&result);
+ return strbuf_detach(&result, NULL);
+}
+
+static void print_submodule_summary(struct summary_cb *info, const char *errmsg,
+ int total_commits, const char *displaypath,
+ const char *src_abbrev, const char *dst_abbrev,
+ struct module_cb *p)
+{
+ if (p->status == 'T') {
+ if (S_ISGITLINK(p->mod_dst))
+ printf(_("* %s %s(blob)->%s(submodule)"),
+ displaypath, src_abbrev, dst_abbrev);
+ else
+ printf(_("* %s %s(submodule)->%s(blob)"),
+ displaypath, src_abbrev, dst_abbrev);
+ } else {
+ printf("* %s %s...%s",
+ displaypath, src_abbrev, dst_abbrev);
+ }
+
+ if (total_commits < 0)
+ printf(":\n");
+ else
+ printf(" (%d):\n", total_commits);
+
+ if (errmsg) {
+ printf(_("%s"), errmsg);
+ } else if (total_commits > 0) {
+ struct child_process cp_log = CHILD_PROCESS_INIT;
+
+ cp_log.git_cmd = 1;
+ cp_log.dir = p->sm_path;
+ prepare_submodule_repo_env(&cp_log.env);
+ strvec_pushl(&cp_log.args, "log", NULL);
+
+ if (S_ISGITLINK(p->mod_src) && S_ISGITLINK(p->mod_dst)) {
+ if (info->summary_limit > 0)
+ strvec_pushf(&cp_log.args, "-%d",
+ info->summary_limit);
+
+ strvec_pushl(&cp_log.args, "--pretty= %m %s",
+ "--first-parent", NULL);
+ strvec_pushf(&cp_log.args, "%s...%s",
+ src_abbrev, dst_abbrev);
+ } else if (S_ISGITLINK(p->mod_dst)) {
+ strvec_pushl(&cp_log.args, "--pretty= > %s",
+ "-1", dst_abbrev, NULL);
+ } else {
+ strvec_pushl(&cp_log.args, "--pretty= < %s",
+ "-1", src_abbrev, NULL);
+ }
+ run_command(&cp_log);
+ }
+ printf("\n");
+}
+
+static void generate_submodule_summary(struct summary_cb *info,
+ struct module_cb *p)
+{
+ char *displaypath, *src_abbrev = NULL, *dst_abbrev;
+ int missing_src = 0, missing_dst = 0;
+ struct strbuf errmsg = STRBUF_INIT;
+ int total_commits = -1;
+
+ if (!info->cached && oideq(&p->oid_dst, null_oid())) {
+ if (S_ISGITLINK(p->mod_dst)) {
+ struct ref_store *refs = get_submodule_ref_store(p->sm_path);
+
+ if (refs)
+ refs_head_ref(refs, handle_submodule_head_ref, &p->oid_dst);
+ } else if (S_ISLNK(p->mod_dst) || S_ISREG(p->mod_dst)) {
+ struct stat st;
+ int fd = open(p->sm_path, O_RDONLY);
+
+ if (fd < 0 || fstat(fd, &st) < 0 ||
+ index_fd(&the_index, &p->oid_dst, fd, &st, OBJ_BLOB,
+ p->sm_path, 0))
+ error(_("couldn't hash object from '%s'"), p->sm_path);
+ } else {
+ /* for a submodule removal (mode:0000000), don't warn */
+ if (p->mod_dst)
+ warning(_("unexpected mode %o\n"), p->mod_dst);
+ }
+ }
+
+ if (S_ISGITLINK(p->mod_src)) {
+ if (p->status != 'D')
+ src_abbrev = verify_submodule_committish(p->sm_path,
+ oid_to_hex(&p->oid_src));
+ if (!src_abbrev) {
+ missing_src = 1;
+ /*
+ * As `rev-parse` failed, we fallback to getting
+ * the abbreviated hash using oid_src. We do
+ * this as we might still need the abbreviated
+ * hash in cases like a submodule type change, etc.
+ */
+ src_abbrev = xstrndup(oid_to_hex(&p->oid_src), 7);
+ }
+ } else {
+ /*
+ * The source does not point to a submodule.
+ * So, we fallback to getting the abbreviation using
+ * oid_src as we might still need the abbreviated
+ * hash in cases like submodule add, etc.
+ */
+ src_abbrev = xstrndup(oid_to_hex(&p->oid_src), 7);
+ }
+
+ if (S_ISGITLINK(p->mod_dst)) {
+ dst_abbrev = verify_submodule_committish(p->sm_path,
+ oid_to_hex(&p->oid_dst));
+ if (!dst_abbrev) {
+ missing_dst = 1;
+ /*
+ * As `rev-parse` failed, we fallback to getting
+ * the abbreviated hash using oid_dst. We do
+ * this as we might still need the abbreviated
+ * hash in cases like a submodule type change, etc.
+ */
+ dst_abbrev = xstrndup(oid_to_hex(&p->oid_dst), 7);
+ }
+ } else {
+ /*
+ * The destination does not point to a submodule.
+ * So, we fallback to getting the abbreviation using
+ * oid_dst as we might still need the abbreviated
+ * hash in cases like a submodule removal, etc.
+ */
+ dst_abbrev = xstrndup(oid_to_hex(&p->oid_dst), 7);
+ }
+
+ displaypath = get_submodule_displaypath(p->sm_path, info->prefix);
+
+ if (!missing_src && !missing_dst) {
+ struct child_process cp_rev_list = CHILD_PROCESS_INIT;
+ struct strbuf sb_rev_list = STRBUF_INIT;
+
+ strvec_pushl(&cp_rev_list.args, "rev-list",
+ "--first-parent", "--count", NULL);
+ if (S_ISGITLINK(p->mod_src) && S_ISGITLINK(p->mod_dst))
+ strvec_pushf(&cp_rev_list.args, "%s...%s",
+ src_abbrev, dst_abbrev);
+ else
+ strvec_push(&cp_rev_list.args, S_ISGITLINK(p->mod_src) ?
+ src_abbrev : dst_abbrev);
+ strvec_push(&cp_rev_list.args, "--");
+
+ cp_rev_list.git_cmd = 1;
+ cp_rev_list.dir = p->sm_path;
+ prepare_submodule_repo_env(&cp_rev_list.env);
+
+ if (!capture_command(&cp_rev_list, &sb_rev_list, 0))
+ total_commits = atoi(sb_rev_list.buf);
+
+ strbuf_release(&sb_rev_list);
+ } else {
+ /*
+ * Don't give error msg for modification whose dst is not
+ * submodule, i.e., deleted or changed to blob
+ */
+ if (S_ISGITLINK(p->mod_dst)) {
+ if (missing_src && missing_dst) {
+ strbuf_addf(&errmsg, " Warn: %s doesn't contain commits %s and %s\n",
+ displaypath, oid_to_hex(&p->oid_src),
+ oid_to_hex(&p->oid_dst));
+ } else {
+ strbuf_addf(&errmsg, " Warn: %s doesn't contain commit %s\n",
+ displaypath, missing_src ?
+ oid_to_hex(&p->oid_src) :
+ oid_to_hex(&p->oid_dst));
+ }
+ }
+ }
+
+ print_submodule_summary(info, errmsg.len ? errmsg.buf : NULL,
+ total_commits, displaypath, src_abbrev,
+ dst_abbrev, p);
+
+ free(displaypath);
+ free(src_abbrev);
+ free(dst_abbrev);
+ strbuf_release(&errmsg);
+}
+
+static void prepare_submodule_summary(struct summary_cb *info,
+ struct module_cb_list *list)
+{
+ int i;
+ for (i = 0; i < list->nr; i++) {
+ const struct submodule *sub;
+ struct module_cb *p = list->entries[i];
+ struct strbuf sm_gitdir = STRBUF_INIT;
+
+ if (p->status == 'D' || p->status == 'T') {
+ generate_submodule_summary(info, p);
+ continue;
+ }
+
+ if (info->for_status && p->status != 'A' &&
+ (sub = submodule_from_path(the_repository,
+ null_oid(), p->sm_path))) {
+ char *config_key = NULL;
+ const char *value;
+ int ignore_all = 0;
+
+ config_key = xstrfmt("submodule.%s.ignore",
+ sub->name);
+ if (!git_config_get_string_tmp(config_key, &value))
+ ignore_all = !strcmp(value, "all");
+ else if (sub->ignore)
+ ignore_all = !strcmp(sub->ignore, "all");
+
+ free(config_key);
+ if (ignore_all)
+ continue;
+ }
+
+ /* Also show added or modified modules which are checked out */
+ strbuf_addstr(&sm_gitdir, p->sm_path);
+ if (is_nonbare_repository_dir(&sm_gitdir))
+ generate_submodule_summary(info, p);
+ strbuf_release(&sm_gitdir);
+ }
+}
+
+static void submodule_summary_callback(struct diff_queue_struct *q,
+ struct diff_options *options,
+ void *data)
+{
+ int i;
+ struct module_cb_list *list = data;
+ for (i = 0; i < q->nr; i++) {
+ struct diff_filepair *p = q->queue[i];
+ struct module_cb *temp;
+
+ if (!S_ISGITLINK(p->one->mode) && !S_ISGITLINK(p->two->mode))
+ continue;
+ temp = (struct module_cb*)malloc(sizeof(struct module_cb));
+ temp->mod_src = p->one->mode;
+ temp->mod_dst = p->two->mode;
+ temp->oid_src = p->one->oid;
+ temp->oid_dst = p->two->oid;
+ temp->status = p->status;
+ temp->sm_path = xstrdup(p->one->path);
+
+ ALLOC_GROW(list->entries, list->nr + 1, list->alloc);
+ list->entries[list->nr++] = temp;
+ }
+}
+
+static const char *get_diff_cmd(enum diff_cmd diff_cmd)
+{
+ switch (diff_cmd) {
+ case DIFF_INDEX: return "diff-index";
+ case DIFF_FILES: return "diff-files";
+ default: BUG("bad diff_cmd value %d", diff_cmd);
+ }
+}
+
+static int compute_summary_module_list(struct object_id *head_oid,
+ struct summary_cb *info,
+ enum diff_cmd diff_cmd)
+{
+ struct strvec diff_args = STRVEC_INIT;
+ struct rev_info rev;
+ struct setup_revision_opt opt = {
+ .free_removed_argv_elements = 1,
+ };
+ struct module_cb_list list = MODULE_CB_LIST_INIT;
+ int ret = 0;
+
+ strvec_push(&diff_args, get_diff_cmd(diff_cmd));
+ if (info->cached)
+ strvec_push(&diff_args, "--cached");
+ strvec_pushl(&diff_args, "--ignore-submodules=dirty", "--raw", NULL);
+ if (head_oid)
+ strvec_push(&diff_args, oid_to_hex(head_oid));
+ strvec_push(&diff_args, "--");
+ if (info->argc)
+ strvec_pushv(&diff_args, info->argv);
+
+ git_config(git_diff_basic_config, NULL);
+ init_revisions(&rev, info->prefix);
+ rev.abbrev = 0;
+ precompose_argv_prefix(diff_args.nr, diff_args.v, NULL);
+ setup_revisions(diff_args.nr, diff_args.v, &rev, &opt);
+ rev.diffopt.output_format = DIFF_FORMAT_NO_OUTPUT | DIFF_FORMAT_CALLBACK;
+ rev.diffopt.format_callback = submodule_summary_callback;
+ rev.diffopt.format_callback_data = &list;
+
+ if (!info->cached) {
+ if (diff_cmd == DIFF_INDEX)
+ setup_work_tree();
+ if (repo_read_index_preload(the_repository, &rev.diffopt.pathspec, 0) < 0) {
+ perror("repo_read_index_preload");
+ ret = -1;
+ goto cleanup;
+ }
+ } else if (repo_read_index(the_repository) < 0) {
+ perror("repo_read_cache");
+ ret = -1;
+ goto cleanup;
+ }
+
+ if (diff_cmd == DIFF_INDEX)
+ run_diff_index(&rev, info->cached);
+ else
+ run_diff_files(&rev, 0);
+ prepare_submodule_summary(info, &list);
+cleanup:
+ strvec_clear(&diff_args);
+ release_revisions(&rev);
+ module_cb_list_release(&list);
+ return ret;
+}
+
+static int module_summary(int argc, const char **argv, const char *prefix)
+{
+ struct summary_cb info = SUMMARY_CB_INIT;
+ int cached = 0;
+ int for_status = 0;
+ int files = 0;
+ int summary_limit = -1;
+ enum diff_cmd diff_cmd = DIFF_INDEX;
+ struct object_id head_oid;
+ int ret;
+ struct option module_summary_options[] = {
+ OPT_BOOL(0, "cached", &cached,
+ N_("use the commit stored in the index instead of the submodule HEAD")),
+ OPT_BOOL(0, "files", &files,
+ N_("compare the commit in the index with that in the submodule HEAD")),
+ OPT_BOOL(0, "for-status", &for_status,
+ N_("skip submodules with 'ignore_config' value set to 'all'")),
+ OPT_INTEGER('n', "summary-limit", &summary_limit,
+ N_("limit the summary size")),
+ OPT_END()
+ };
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule summary [<options>] [<commit>] [--] [<path>]"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, module_summary_options,
+ git_submodule_helper_usage, 0);
+
+ if (!summary_limit)
+ return 0;
+
+ if (!get_oid(argc ? argv[0] : "HEAD", &head_oid)) {
+ if (argc) {
+ argv++;
+ argc--;
+ }
+ } else if (!argc || !strcmp(argv[0], "HEAD")) {
+ /* before the first commit: compare with an empty tree */
+ oidcpy(&head_oid, the_hash_algo->empty_tree);
+ if (argc) {
+ argv++;
+ argc--;
+ }
+ } else {
+ if (get_oid("HEAD", &head_oid))
+ die(_("could not fetch a revision for HEAD"));
+ }
+
+ if (files) {
+ if (cached)
+ die(_("options '%s' and '%s' cannot be used together"), "--cached", "--files");
+ diff_cmd = DIFF_FILES;
+ }
+
+ info.argc = argc;
+ info.argv = argv;
+ info.prefix = prefix;
+ info.cached = !!cached;
+ info.files = !!files;
+ info.for_status = !!for_status;
+ info.summary_limit = summary_limit;
+
+ ret = compute_summary_module_list((diff_cmd == DIFF_INDEX) ? &head_oid : NULL,
+ &info, diff_cmd);
+ return ret;
+}
+
+struct sync_cb {
+ const char *prefix;
+ unsigned int flags;
+};
+#define SYNC_CB_INIT { 0 }
+
+static void sync_submodule(const char *path, const char *prefix,
+ unsigned int flags)
+{
+ const struct submodule *sub;
+ char *remote_key = NULL;
+ char *sub_origin_url, *super_config_url, *displaypath, *default_remote;
+ struct strbuf sb = STRBUF_INIT;
+ char *sub_config_path = NULL;
+ int code;
+
+ if (!is_submodule_active(the_repository, path))
+ return;
+
+ sub = submodule_from_path(the_repository, null_oid(), path);
+
+ if (sub && sub->url) {
+ if (starts_with_dot_dot_slash(sub->url) ||
+ starts_with_dot_slash(sub->url)) {
+ char *up_path = get_up_path(path);
+
+ sub_origin_url = resolve_relative_url(sub->url, up_path, 1);
+ super_config_url = resolve_relative_url(sub->url, NULL, 1);
+ free(up_path);
+ } else {
+ sub_origin_url = xstrdup(sub->url);
+ super_config_url = xstrdup(sub->url);
+ }
+ } else {
+ sub_origin_url = xstrdup("");
+ super_config_url = xstrdup("");
+ }
+
+ displaypath = get_submodule_displaypath(path, prefix);
+
+ if (!(flags & OPT_QUIET))
+ printf(_("Synchronizing submodule url for '%s'\n"),
+ displaypath);
+
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "submodule.%s.url", sub->name);
+ if (git_config_set_gently(sb.buf, super_config_url))
+ die(_("failed to register url for submodule path '%s'"),
+ displaypath);
+
+ if (!is_submodule_populated_gently(path, NULL))
+ goto cleanup;
+
+ strbuf_reset(&sb);
+ code = get_default_remote_submodule(path, &default_remote);
+ if (code)
+ exit(code);
+
+ remote_key = xstrfmt("remote.%s.url", default_remote);
+ free(default_remote);
+
+ submodule_to_gitdir(&sb, path);
+ strbuf_addstr(&sb, "/config");
+
+ if (git_config_set_in_file_gently(sb.buf, remote_key, sub_origin_url))
+ die(_("failed to update remote for submodule '%s'"),
+ path);
+
+ if (flags & OPT_RECURSIVE) {
+ struct child_process cpr = CHILD_PROCESS_INIT;
+
+ cpr.git_cmd = 1;
+ cpr.dir = path;
+ prepare_submodule_repo_env(&cpr.env);
+
+ strvec_push(&cpr.args, "--super-prefix");
+ strvec_pushf(&cpr.args, "%s/", displaypath);
+ strvec_pushl(&cpr.args, "submodule--helper", "sync",
+ "--recursive", NULL);
+
+ if (flags & OPT_QUIET)
+ strvec_push(&cpr.args, "--quiet");
+
+ if (run_command(&cpr))
+ die(_("failed to recurse into submodule '%s'"),
+ path);
+ }
+
+cleanup:
+ free(super_config_url);
+ free(sub_origin_url);
+ strbuf_release(&sb);
+ free(remote_key);
+ free(displaypath);
+ free(sub_config_path);
+}
+
+static void sync_submodule_cb(const struct cache_entry *list_item, void *cb_data)
+{
+ struct sync_cb *info = cb_data;
+
+ sync_submodule(list_item->name, info->prefix, info->flags);
+}
+
+static int module_sync(int argc, const char **argv, const char *prefix)
+{
+ struct sync_cb info = SYNC_CB_INIT;
+ struct pathspec pathspec = { 0 };
+ struct module_list list = MODULE_LIST_INIT;
+ int quiet = 0;
+ int recursive = 0;
+ struct option module_sync_options[] = {
+ OPT__QUIET(&quiet, N_("suppress output of synchronizing submodule url")),
+ OPT_BOOL(0, "recursive", &recursive,
+ N_("recurse into nested submodules")),
+ OPT_END()
+ };
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule sync [--quiet] [--recursive] [<path>]"),
+ NULL
+ };
+ int ret = 1;
+
+ argc = parse_options(argc, argv, prefix, module_sync_options,
+ git_submodule_helper_usage, 0);
+
+ if (module_list_compute(argv, prefix, &pathspec, &list) < 0)
+ goto cleanup;
+
+ info.prefix = prefix;
+ if (quiet)
+ info.flags |= OPT_QUIET;
+ if (recursive)
+ info.flags |= OPT_RECURSIVE;
+
+ for_each_listed_submodule(&list, sync_submodule_cb, &info);
+
+ ret = 0;
+cleanup:
+ module_list_release(&list);
+ clear_pathspec(&pathspec);
+ return ret;
+}
+
+struct deinit_cb {
+ const char *prefix;
+ unsigned int flags;
+};
+#define DEINIT_CB_INIT { 0 }
+
+static void deinit_submodule(const char *path, const char *prefix,
+ unsigned int flags)
+{
+ const struct submodule *sub;
+ char *displaypath = NULL;
+ struct child_process cp_config = CHILD_PROCESS_INIT;
+ struct strbuf sb_config = STRBUF_INIT;
+ char *sub_git_dir = xstrfmt("%s/.git", path);
+
+ sub = submodule_from_path(the_repository, null_oid(), path);
+
+ if (!sub || !sub->name)
+ goto cleanup;
+
+ displaypath = get_submodule_displaypath(path, prefix);
+
+ /* remove the submodule work tree (unless the user already did it) */
+ if (is_directory(path)) {
+ struct strbuf sb_rm = STRBUF_INIT;
+ const char *format;
+
+ if (is_directory(sub_git_dir)) {
+ if (!(flags & OPT_QUIET))
+ warning(_("Submodule work tree '%s' contains a .git "
+ "directory. This will be replaced with a "
+ ".git file by using absorbgitdirs."),
+ displaypath);
+
+ absorb_git_dir_into_superproject(path);
+
+ }
+
+ if (!(flags & OPT_FORCE)) {
+ struct child_process cp_rm = CHILD_PROCESS_INIT;
+
+ cp_rm.git_cmd = 1;
+ strvec_pushl(&cp_rm.args, "rm", "-qn",
+ path, NULL);
+
+ if (run_command(&cp_rm))
+ die(_("Submodule work tree '%s' contains local "
+ "modifications; use '-f' to discard them"),
+ displaypath);
+ }
+
+ strbuf_addstr(&sb_rm, path);
+
+ if (!remove_dir_recursively(&sb_rm, 0))
+ format = _("Cleared directory '%s'\n");
+ else
+ format = _("Could not remove submodule work tree '%s'\n");
+
+ if (!(flags & OPT_QUIET))
+ printf(format, displaypath);
+
+ submodule_unset_core_worktree(sub);
+
+ strbuf_release(&sb_rm);
+ }
+
+ if (mkdir(path, 0777))
+ printf(_("could not create empty submodule directory %s"),
+ displaypath);
+
+ cp_config.git_cmd = 1;
+ strvec_pushl(&cp_config.args, "config", "--get-regexp", NULL);
+ strvec_pushf(&cp_config.args, "submodule.%s\\.", sub->name);
+
+ /* remove the .git/config entries (unless the user already did it) */
+ if (!capture_command(&cp_config, &sb_config, 0) && sb_config.len) {
+ char *sub_key = xstrfmt("submodule.%s", sub->name);
+
+ /*
+ * remove the whole section so we have a clean state when
+ * the user later decides to init this submodule again
+ */
+ git_config_rename_section_in_file(NULL, sub_key, NULL);
+ if (!(flags & OPT_QUIET))
+ printf(_("Submodule '%s' (%s) unregistered for path '%s'\n"),
+ sub->name, sub->url, displaypath);
+ free(sub_key);
+ }
+
+cleanup:
+ free(displaypath);
+ free(sub_git_dir);
+ strbuf_release(&sb_config);
+}
+
+static void deinit_submodule_cb(const struct cache_entry *list_item,
+ void *cb_data)
+{
+ struct deinit_cb *info = cb_data;
+ deinit_submodule(list_item->name, info->prefix, info->flags);
+}
+
+static int module_deinit(int argc, const char **argv, const char *prefix)
+{
+ struct deinit_cb info = DEINIT_CB_INIT;
+ struct pathspec pathspec = { 0 };
+ struct module_list list = MODULE_LIST_INIT;
+ int quiet = 0;
+ int force = 0;
+ int all = 0;
+ struct option module_deinit_options[] = {
+ OPT__QUIET(&quiet, N_("suppress submodule status output")),
+ OPT__FORCE(&force, N_("remove submodule working trees even if they contain local changes"), 0),
+ OPT_BOOL(0, "all", &all, N_("unregister all submodules")),
+ OPT_END()
+ };
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule deinit [--quiet] [-f | --force] [--all | [--] [<path>...]]"),
+ NULL
+ };
+ int ret = 1;
+
+ argc = parse_options(argc, argv, prefix, module_deinit_options,
+ git_submodule_helper_usage, 0);
+
+ if (all && argc) {
+ error("pathspec and --all are incompatible");
+ usage_with_options(git_submodule_helper_usage,
+ module_deinit_options);
+ }
+
+ if (!argc && !all)
+ die(_("Use '--all' if you really want to deinitialize all submodules"));
+
+ if (module_list_compute(argv, prefix, &pathspec, &list) < 0)
+ goto cleanup;
+
+ info.prefix = prefix;
+ if (quiet)
+ info.flags |= OPT_QUIET;
+ if (force)
+ info.flags |= OPT_FORCE;
+
+ for_each_listed_submodule(&list, deinit_submodule_cb, &info);
+
+ ret = 0;
+cleanup:
+ module_list_release(&list);
+ clear_pathspec(&pathspec);
+ return ret;
+}
+
+struct module_clone_data {
+ const char *prefix;
+ const char *path;
+ const char *name;
+ const char *url;
+ const char *depth;
+ struct list_objects_filter_options *filter_options;
+ unsigned int quiet: 1;
+ unsigned int progress: 1;
+ unsigned int dissociate: 1;
+ unsigned int require_init: 1;
+ int single_branch;
+};
+#define MODULE_CLONE_DATA_INIT { \
+ .single_branch = -1, \
+}
+
+struct submodule_alternate_setup {
+ const char *submodule_name;
+ enum SUBMODULE_ALTERNATE_ERROR_MODE {
+ SUBMODULE_ALTERNATE_ERROR_DIE,
+ SUBMODULE_ALTERNATE_ERROR_INFO,
+ SUBMODULE_ALTERNATE_ERROR_IGNORE
+ } error_mode;
+ struct string_list *reference;
+};
+#define SUBMODULE_ALTERNATE_SETUP_INIT { \
+ .error_mode = SUBMODULE_ALTERNATE_ERROR_IGNORE, \
+}
+
+static const char alternate_error_advice[] = N_(
+"An alternate computed from a superproject's alternate is invalid.\n"
+"To allow Git to clone without an alternate in such a case, set\n"
+"submodule.alternateErrorStrategy to 'info' or, equivalently, clone with\n"
+"'--reference-if-able' instead of '--reference'."
+);
+
+static int add_possible_reference_from_superproject(
+ struct object_directory *odb, void *sas_cb)
+{
+ struct submodule_alternate_setup *sas = sas_cb;
+ size_t len;
+
+ /*
+ * If the alternate object store is another repository, try the
+ * standard layout with .git/(modules/<name>)+/objects
+ */
+ if (strip_suffix(odb->path, "/objects", &len)) {
+ struct repository alternate;
+ char *sm_alternate;
+ struct strbuf sb = STRBUF_INIT;
+ struct strbuf err = STRBUF_INIT;
+ strbuf_add(&sb, odb->path, len);
+
+ if (repo_init(&alternate, sb.buf, NULL) < 0)
+ die(_("could not get a repository handle for gitdir '%s'"),
+ sb.buf);
+
+ /*
+ * We need to end the new path with '/' to mark it as a dir,
+ * otherwise a submodule name containing '/' will be broken
+ * as the last part of a missing submodule reference would
+ * be taken as a file name.
+ */
+ strbuf_reset(&sb);
+ submodule_name_to_gitdir(&sb, &alternate, sas->submodule_name);
+ strbuf_addch(&sb, '/');
+ repo_clear(&alternate);
+
+ sm_alternate = compute_alternate_path(sb.buf, &err);
+ if (sm_alternate) {
+ char *p = strbuf_detach(&sb, NULL);
+
+ string_list_append(sas->reference, p)->util = p;
+ free(sm_alternate);
+ } else {
+ switch (sas->error_mode) {
+ case SUBMODULE_ALTERNATE_ERROR_DIE:
+ if (advice_enabled(ADVICE_SUBMODULE_ALTERNATE_ERROR_STRATEGY_DIE))
+ advise(_(alternate_error_advice));
+ die(_("submodule '%s' cannot add alternate: %s"),
+ sas->submodule_name, err.buf);
+ case SUBMODULE_ALTERNATE_ERROR_INFO:
+ fprintf_ln(stderr, _("submodule '%s' cannot add alternate: %s"),
+ sas->submodule_name, err.buf);
+ case SUBMODULE_ALTERNATE_ERROR_IGNORE:
+ ; /* nothing */
+ }
+ }
+ strbuf_release(&sb);
+ }
+
+ return 0;
+}
+
+static void prepare_possible_alternates(const char *sm_name,
+ struct string_list *reference)
+{
+ char *sm_alternate = NULL, *error_strategy = NULL;
+ struct submodule_alternate_setup sas = SUBMODULE_ALTERNATE_SETUP_INIT;
+
+ git_config_get_string("submodule.alternateLocation", &sm_alternate);
+ if (!sm_alternate)
+ return;
+
+ git_config_get_string("submodule.alternateErrorStrategy", &error_strategy);
+
+ if (!error_strategy)
+ error_strategy = xstrdup("die");
+
+ sas.submodule_name = sm_name;
+ sas.reference = reference;
+ if (!strcmp(error_strategy, "die"))
+ sas.error_mode = SUBMODULE_ALTERNATE_ERROR_DIE;
+ else if (!strcmp(error_strategy, "info"))
+ sas.error_mode = SUBMODULE_ALTERNATE_ERROR_INFO;
+ else if (!strcmp(error_strategy, "ignore"))
+ sas.error_mode = SUBMODULE_ALTERNATE_ERROR_IGNORE;
+ else
+ die(_("Value '%s' for submodule.alternateErrorStrategy is not recognized"), error_strategy);
+
+ if (!strcmp(sm_alternate, "superproject"))
+ foreach_alt_odb(add_possible_reference_from_superproject, &sas);
+ else if (!strcmp(sm_alternate, "no"))
+ ; /* do nothing */
+ else
+ die(_("Value '%s' for submodule.alternateLocation is not recognized"), sm_alternate);
+
+ free(sm_alternate);
+ free(error_strategy);
+}
+
+static char *clone_submodule_sm_gitdir(const char *name)
+{
+ struct strbuf sb = STRBUF_INIT;
+ char *sm_gitdir;
+
+ submodule_name_to_gitdir(&sb, the_repository, name);
+ sm_gitdir = absolute_pathdup(sb.buf);
+ strbuf_release(&sb);
+
+ return sm_gitdir;
+}
+
+static int clone_submodule(const struct module_clone_data *clone_data,
+ struct string_list *reference)
+{
+ char *p;
+ char *sm_gitdir = clone_submodule_sm_gitdir(clone_data->name);
+ char *sm_alternate = NULL, *error_strategy = NULL;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ const char *clone_data_path = clone_data->path;
+ char *to_free = NULL;
+
+ if (!is_absolute_path(clone_data->path))
+ clone_data_path = to_free = xstrfmt("%s/%s", get_git_work_tree(),
+ clone_data->path);
+
+ if (validate_submodule_git_dir(sm_gitdir, clone_data->name) < 0)
+ die(_("refusing to create/use '%s' in another submodule's "
+ "git dir"), sm_gitdir);
+
+ if (!file_exists(sm_gitdir)) {
+ if (safe_create_leading_directories_const(sm_gitdir) < 0)
+ die(_("could not create directory '%s'"), sm_gitdir);
+
+ prepare_possible_alternates(clone_data->name, reference);
+
+ strvec_push(&cp.args, "clone");
+ strvec_push(&cp.args, "--no-checkout");
+ if (clone_data->quiet)
+ strvec_push(&cp.args, "--quiet");
+ if (clone_data->progress)
+ strvec_push(&cp.args, "--progress");
+ if (clone_data->depth && *(clone_data->depth))
+ strvec_pushl(&cp.args, "--depth", clone_data->depth, NULL);
+ if (reference->nr) {
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, reference)
+ strvec_pushl(&cp.args, "--reference",
+ item->string, NULL);
+ }
+ if (clone_data->dissociate)
+ strvec_push(&cp.args, "--dissociate");
+ if (sm_gitdir && *sm_gitdir)
+ strvec_pushl(&cp.args, "--separate-git-dir", sm_gitdir, NULL);
+ if (clone_data->filter_options && clone_data->filter_options->choice)
+ strvec_pushf(&cp.args, "--filter=%s",
+ expand_list_objects_filter_spec(
+ clone_data->filter_options));
+ if (clone_data->single_branch >= 0)
+ strvec_push(&cp.args, clone_data->single_branch ?
+ "--single-branch" :
+ "--no-single-branch");
+
+ strvec_push(&cp.args, "--");
+ strvec_push(&cp.args, clone_data->url);
+ strvec_push(&cp.args, clone_data_path);
+
+ cp.git_cmd = 1;
+ prepare_submodule_repo_env(&cp.env);
+ cp.no_stdin = 1;
+
+ if(run_command(&cp))
+ die(_("clone of '%s' into submodule path '%s' failed"),
+ clone_data->url, clone_data_path);
+ } else {
+ char *path;
+
+ if (clone_data->require_init && !access(clone_data_path, X_OK) &&
+ !is_empty_dir(clone_data_path))
+ die(_("directory not empty: '%s'"), clone_data_path);
+ if (safe_create_leading_directories_const(clone_data_path) < 0)
+ die(_("could not create directory '%s'"), clone_data_path);
+ path = xstrfmt("%s/index", sm_gitdir);
+ unlink_or_warn(path);
+ free(path);
+ }
+
+ connect_work_tree_and_git_dir(clone_data_path, sm_gitdir, 0);
+
+ p = git_pathdup_submodule(clone_data_path, "config");
+ if (!p)
+ die(_("could not get submodule directory for '%s'"), clone_data_path);
+
+ /* setup alternateLocation and alternateErrorStrategy in the cloned submodule if needed */
+ git_config_get_string("submodule.alternateLocation", &sm_alternate);
+ if (sm_alternate)
+ git_config_set_in_file(p, "submodule.alternateLocation",
+ sm_alternate);
+ git_config_get_string("submodule.alternateErrorStrategy", &error_strategy);
+ if (error_strategy)
+ git_config_set_in_file(p, "submodule.alternateErrorStrategy",
+ error_strategy);
+
+ free(sm_alternate);
+ free(error_strategy);
+
+ free(sm_gitdir);
+ free(p);
+ free(to_free);
+ return 0;
+}
+
+static int module_clone(int argc, const char **argv, const char *prefix)
+{
+ int dissociate = 0, quiet = 0, progress = 0, require_init = 0;
+ struct module_clone_data clone_data = MODULE_CLONE_DATA_INIT;
+ struct string_list reference = STRING_LIST_INIT_NODUP;
+ struct list_objects_filter_options filter_options =
+ LIST_OBJECTS_FILTER_INIT;
+
+ struct option module_clone_options[] = {
+ OPT_STRING(0, "prefix", &clone_data.prefix,
+ N_("path"),
+ N_("alternative anchor for relative paths")),
+ OPT_STRING(0, "path", &clone_data.path,
+ N_("path"),
+ N_("where the new submodule will be cloned to")),
+ OPT_STRING(0, "name", &clone_data.name,
+ N_("string"),
+ N_("name of the new submodule")),
+ OPT_STRING(0, "url", &clone_data.url,
+ N_("string"),
+ N_("url where to clone the submodule from")),
+ OPT_STRING_LIST(0, "reference", &reference,
+ N_("repo"),
+ N_("reference repository")),
+ OPT_BOOL(0, "dissociate", &dissociate,
+ N_("use --reference only while cloning")),
+ OPT_STRING(0, "depth", &clone_data.depth,
+ N_("string"),
+ N_("depth for shallow clones")),
+ OPT__QUIET(&quiet, "suppress output for cloning a submodule"),
+ OPT_BOOL(0, "progress", &progress,
+ N_("force cloning progress")),
+ OPT_BOOL(0, "require-init", &require_init,
+ N_("disallow cloning into non-empty directory")),
+ OPT_BOOL(0, "single-branch", &clone_data.single_branch,
+ N_("clone only one branch, HEAD or --branch")),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
+ OPT_END()
+ };
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule--helper clone [--prefix=<path>] [--quiet] "
+ "[--reference <repository>] [--name <name>] [--depth <depth>] "
+ "[--single-branch] [--filter <filter-spec>] "
+ "--url <url> --path <path>"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, module_clone_options,
+ git_submodule_helper_usage, 0);
+
+ clone_data.dissociate = !!dissociate;
+ clone_data.quiet = !!quiet;
+ clone_data.progress = !!progress;
+ clone_data.require_init = !!require_init;
+ clone_data.filter_options = &filter_options;
+
+ if (argc || !clone_data.url || !clone_data.path || !*(clone_data.path))
+ usage_with_options(git_submodule_helper_usage,
+ module_clone_options);
+
+ clone_submodule(&clone_data, &reference);
+ list_objects_filter_release(&filter_options);
+ string_list_clear(&reference, 1);
+ return 0;
+}
+
+static int determine_submodule_update_strategy(struct repository *r,
+ int just_cloned,
+ const char *path,
+ enum submodule_update_type update,
+ struct submodule_update_strategy *out)
+{
+ const struct submodule *sub = submodule_from_path(r, null_oid(), path);
+ char *key;
+ const char *val;
+ int ret;
+
+ key = xstrfmt("submodule.%s.update", sub->name);
+
+ if (update) {
+ out->type = update;
+ } else if (!repo_config_get_string_tmp(r, key, &val)) {
+ if (parse_submodule_update_strategy(val, out) < 0) {
+ ret = die_message(_("Invalid update mode '%s' configured for submodule path '%s'"),
+ val, path);
+ goto cleanup;
+ }
+ } else if (sub->update_strategy.type != SM_UPDATE_UNSPECIFIED) {
+ if (sub->update_strategy.type == SM_UPDATE_COMMAND)
+ BUG("how did we read update = !command from .gitmodules?");
+ out->type = sub->update_strategy.type;
+ out->command = sub->update_strategy.command;
+ } else
+ out->type = SM_UPDATE_CHECKOUT;
+
+ if (just_cloned &&
+ (out->type == SM_UPDATE_MERGE ||
+ out->type == SM_UPDATE_REBASE ||
+ out->type == SM_UPDATE_NONE))
+ out->type = SM_UPDATE_CHECKOUT;
+
+ ret = 0;
+cleanup:
+ free(key);
+ return ret;
+}
+
+struct update_clone_data {
+ const struct submodule *sub;
+ struct object_id oid;
+ unsigned just_cloned;
+};
+
+struct submodule_update_clone {
+ /* index into 'update_data.list', the list of submodules to look into for cloning */
+ int current;
+
+ /* configuration parameters which are passed on to the children */
+ const struct update_data *update_data;
+
+ /* to be consumed by update_submodule() */
+ struct update_clone_data *update_clone;
+ int update_clone_nr; int update_clone_alloc;
+
+ /* If we want to stop as fast as possible and return an error */
+ unsigned quickstop : 1;
+
+ /* failed clones to be retried again */
+ const struct cache_entry **failed_clones;
+ int failed_clones_nr, failed_clones_alloc;
+};
+#define SUBMODULE_UPDATE_CLONE_INIT { 0 }
+
+static void submodule_update_clone_release(struct submodule_update_clone *suc)
+{
+ free(suc->update_clone);
+ free(suc->failed_clones);
+}
+
+struct update_data {
+ const char *prefix;
+ char *displaypath;
+ enum submodule_update_type update_default;
+ struct object_id suboid;
+ struct string_list references;
+ struct submodule_update_strategy update_strategy;
+ struct list_objects_filter_options *filter_options;
+ struct module_list list;
+ int depth;
+ int max_jobs;
+ int single_branch;
+ int recommend_shallow;
+ unsigned int require_init;
+ unsigned int force;
+ unsigned int quiet;
+ unsigned int nofetch;
+ unsigned int remote;
+ unsigned int progress;
+ unsigned int dissociate;
+ unsigned int init;
+ unsigned int warn_if_uninitialized;
+ unsigned int recursive;
+
+ /* copied over from update_clone_data */
+ struct object_id oid;
+ unsigned int just_cloned;
+ const char *sm_path;
+};
+#define UPDATE_DATA_INIT { \
+ .update_strategy = SUBMODULE_UPDATE_STRATEGY_INIT, \
+ .list = MODULE_LIST_INIT, \
+ .recommend_shallow = -1, \
+ .references = STRING_LIST_INIT_DUP, \
+ .single_branch = -1, \
+ .max_jobs = 1, \
+}
+
+static void update_data_release(struct update_data *ud)
+{
+ free(ud->displaypath);
+ module_list_release(&ud->list);
+}
+
+static void next_submodule_warn_missing(struct submodule_update_clone *suc,
+ struct strbuf *out, const char *displaypath)
+{
+ /*
+ * Only mention uninitialized submodules when their
+ * paths have been specified.
+ */
+ if (suc->update_data->warn_if_uninitialized) {
+ strbuf_addf(out,
+ _("Submodule path '%s' not initialized"),
+ displaypath);
+ strbuf_addch(out, '\n');
+ strbuf_addstr(out,
+ _("Maybe you want to use 'update --init'?"));
+ strbuf_addch(out, '\n');
+ }
+}
+
+/**
+ * Determine whether 'ce' needs to be cloned. If so, prepare the 'child' to
+ * run the clone. Returns 1 if 'ce' needs to be cloned, 0 otherwise.
+ */
+static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
+ struct child_process *child,
+ struct submodule_update_clone *suc,
+ struct strbuf *out)
+{
+ const struct submodule *sub = NULL;
+ const char *url = NULL;
+ const char *update_string;
+ enum submodule_update_type update_type;
+ char *key;
+ const struct update_data *ud = suc->update_data;
+ char *displaypath = get_submodule_displaypath(ce->name, ud->prefix);
+ struct strbuf sb = STRBUF_INIT;
+ int needs_cloning = 0;
+ int need_free_url = 0;
+
+ if (ce_stage(ce)) {
+ strbuf_addf(out, _("Skipping unmerged submodule %s"), displaypath);
+ strbuf_addch(out, '\n');
+ goto cleanup;
+ }
+
+ sub = submodule_from_path(the_repository, null_oid(), ce->name);
+
+ if (!sub) {
+ next_submodule_warn_missing(suc, out, displaypath);
+ goto cleanup;
+ }
+
+ key = xstrfmt("submodule.%s.update", sub->name);
+ if (!repo_config_get_string_tmp(the_repository, key, &update_string)) {
+ update_type = parse_submodule_update_type(update_string);
+ } else {
+ update_type = sub->update_strategy.type;
+ }
+ free(key);
+
+ if (suc->update_data->update_strategy.type == SM_UPDATE_NONE
+ || (suc->update_data->update_strategy.type == SM_UPDATE_UNSPECIFIED
+ && update_type == SM_UPDATE_NONE)) {
+ strbuf_addf(out, _("Skipping submodule '%s'"), displaypath);
+ strbuf_addch(out, '\n');
+ goto cleanup;
+ }
+
+ /* Check if the submodule has been initialized. */
+ if (!is_submodule_active(the_repository, ce->name)) {
+ next_submodule_warn_missing(suc, out, displaypath);
+ goto cleanup;
+ }
+
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "submodule.%s.url", sub->name);
+ if (repo_config_get_string_tmp(the_repository, sb.buf, &url)) {
+ if (starts_with_dot_slash(sub->url) ||
+ starts_with_dot_dot_slash(sub->url)) {
+ url = resolve_relative_url(sub->url, NULL, 0);
+ need_free_url = 1;
+ } else
+ url = sub->url;
+ }
+
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "%s/.git", ce->name);
+ needs_cloning = !file_exists(sb.buf);
+
+ ALLOC_GROW(suc->update_clone, suc->update_clone_nr + 1,
+ suc->update_clone_alloc);
+ oidcpy(&suc->update_clone[suc->update_clone_nr].oid, &ce->oid);
+ suc->update_clone[suc->update_clone_nr].just_cloned = needs_cloning;
+ suc->update_clone[suc->update_clone_nr].sub = sub;
+ suc->update_clone_nr++;
+
+ if (!needs_cloning)
+ goto cleanup;
+
+ child->git_cmd = 1;
+ child->no_stdin = 1;
+ child->stdout_to_stderr = 1;
+ child->err = -1;
+ strvec_push(&child->args, "submodule--helper");
+ strvec_push(&child->args, "clone");
+ if (suc->update_data->progress)
+ strvec_push(&child->args, "--progress");
+ if (suc->update_data->quiet)
+ strvec_push(&child->args, "--quiet");
+ if (suc->update_data->prefix)
+ strvec_pushl(&child->args, "--prefix", suc->update_data->prefix, NULL);
+ if (suc->update_data->recommend_shallow && sub->recommend_shallow == 1)
+ strvec_push(&child->args, "--depth=1");
+ else if (suc->update_data->depth)
+ strvec_pushf(&child->args, "--depth=%d", suc->update_data->depth);
+ if (suc->update_data->filter_options && suc->update_data->filter_options->choice)
+ strvec_pushf(&child->args, "--filter=%s",
+ expand_list_objects_filter_spec(suc->update_data->filter_options));
+ if (suc->update_data->require_init)
+ strvec_push(&child->args, "--require-init");
+ strvec_pushl(&child->args, "--path", sub->path, NULL);
+ strvec_pushl(&child->args, "--name", sub->name, NULL);
+ strvec_pushl(&child->args, "--url", url, NULL);
+ if (suc->update_data->references.nr) {
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, &suc->update_data->references)
+ strvec_pushl(&child->args, "--reference", item->string, NULL);
+ }
+ if (suc->update_data->dissociate)
+ strvec_push(&child->args, "--dissociate");
+ if (suc->update_data->single_branch >= 0)
+ strvec_push(&child->args, suc->update_data->single_branch ?
+ "--single-branch" :
+ "--no-single-branch");
+
+cleanup:
+ free(displaypath);
+ strbuf_release(&sb);
+ if (need_free_url)
+ free((void*)url);
+
+ return needs_cloning;
+}
+
+static int update_clone_get_next_task(struct child_process *child,
+ struct strbuf *err,
+ void *suc_cb,
+ void **idx_task_cb)
+{
+ struct submodule_update_clone *suc = suc_cb;
+ const struct cache_entry *ce;
+ int index;
+
+ for (; suc->current < suc->update_data->list.nr; suc->current++) {
+ ce = suc->update_data->list.entries[suc->current];
+ if (prepare_to_clone_next_submodule(ce, child, suc, err)) {
+ int *p = xmalloc(sizeof(*p));
+
+ *p = suc->current;
+ *idx_task_cb = p;
+ suc->current++;
+ return 1;
+ }
+ }
+
+ /*
+ * The loop above tried cloning each submodule once, now try the
+ * stragglers again, which we can imagine as an extension of the
+ * entry list.
+ */
+ index = suc->current - suc->update_data->list.nr;
+ if (index < suc->failed_clones_nr) {
+ int *p;
+
+ ce = suc->failed_clones[index];
+ if (!prepare_to_clone_next_submodule(ce, child, suc, err)) {
+ suc->current ++;
+ strbuf_addstr(err, "BUG: submodule considered for "
+ "cloning, doesn't need cloning "
+ "any more?\n");
+ return 0;
+ }
+ p = xmalloc(sizeof(*p));
+ *p = suc->current;
+ *idx_task_cb = p;
+ suc->current ++;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int update_clone_start_failure(struct strbuf *err,
+ void *suc_cb,
+ void *idx_task_cb)
+{
+ struct submodule_update_clone *suc = suc_cb;
+
+ suc->quickstop = 1;
+ return 1;
+}
+
+static int update_clone_task_finished(int result,
+ struct strbuf *err,
+ void *suc_cb,
+ void *idx_task_cb)
+{
+ const struct cache_entry *ce;
+ struct submodule_update_clone *suc = suc_cb;
+ int *idxP = idx_task_cb;
+ int idx = *idxP;
+
+ free(idxP);
+
+ if (!result)
+ return 0;
+
+ if (idx < suc->update_data->list.nr) {
+ ce = suc->update_data->list.entries[idx];
+ strbuf_addf(err, _("Failed to clone '%s'. Retry scheduled"),
+ ce->name);
+ strbuf_addch(err, '\n');
+ ALLOC_GROW(suc->failed_clones,
+ suc->failed_clones_nr + 1,
+ suc->failed_clones_alloc);
+ suc->failed_clones[suc->failed_clones_nr++] = ce;
+ return 0;
+ } else {
+ idx -= suc->update_data->list.nr;
+ ce = suc->failed_clones[idx];
+ strbuf_addf(err, _("Failed to clone '%s' a second time, aborting"),
+ ce->name);
+ strbuf_addch(err, '\n');
+ suc->quickstop = 1;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int git_update_clone_config(const char *var, const char *value,
+ void *cb)
+{
+ int *max_jobs = cb;
+
+ if (!strcmp(var, "submodule.fetchjobs"))
+ *max_jobs = parse_submodule_fetchjobs(var, value);
+ return 0;
+}
+
+static int is_tip_reachable(const char *path, const struct object_id *oid)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct strbuf rev = STRBUF_INIT;
+ char *hex = oid_to_hex(oid);
+
+ cp.git_cmd = 1;
+ cp.dir = path;
+ cp.no_stderr = 1;
+ strvec_pushl(&cp.args, "rev-list", "-n", "1", hex, "--not", "--all", NULL);
+
+ prepare_submodule_repo_env(&cp.env);
+
+ if (capture_command(&cp, &rev, GIT_MAX_HEXSZ + 1) || rev.len)
+ return 0;
+
+ return 1;
+}
+
+static int fetch_in_submodule(const char *module_path, int depth, int quiet,
+ const struct object_id *oid)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ prepare_submodule_repo_env(&cp.env);
+ cp.git_cmd = 1;
+ cp.dir = module_path;
+
+ strvec_push(&cp.args, "fetch");
+ if (quiet)
+ strvec_push(&cp.args, "--quiet");
+ if (depth)
+ strvec_pushf(&cp.args, "--depth=%d", depth);
+ if (oid) {
+ char *hex = oid_to_hex(oid);
+ char *remote = get_default_remote();
+
+ strvec_pushl(&cp.args, remote, hex, NULL);
+ free(remote);
+ }
+
+ return run_command(&cp);
+}
+
+static int run_update_command(const struct update_data *ud, int subforce)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ char *oid = oid_to_hex(&ud->oid);
+ int ret;
+
+ switch (ud->update_strategy.type) {
+ case SM_UPDATE_CHECKOUT:
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "checkout", "-q", NULL);
+ if (subforce)
+ strvec_push(&cp.args, "-f");
+ break;
+ case SM_UPDATE_REBASE:
+ cp.git_cmd = 1;
+ strvec_push(&cp.args, "rebase");
+ if (ud->quiet)
+ strvec_push(&cp.args, "--quiet");
+ break;
+ case SM_UPDATE_MERGE:
+ cp.git_cmd = 1;
+ strvec_push(&cp.args, "merge");
+ if (ud->quiet)
+ strvec_push(&cp.args, "--quiet");
+ break;
+ case SM_UPDATE_COMMAND:
+ cp.use_shell = 1;
+ strvec_push(&cp.args, ud->update_strategy.command);
+ break;
+ default:
+ BUG("unexpected update strategy type: %d",
+ ud->update_strategy.type);
+ }
+ strvec_push(&cp.args, oid);
+
+ cp.dir = ud->sm_path;
+ prepare_submodule_repo_env(&cp.env);
+ if ((ret = run_command(&cp))) {
+ switch (ud->update_strategy.type) {
+ case SM_UPDATE_CHECKOUT:
+ die_message(_("Unable to checkout '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
+ /* No "ret" assignment, use "git checkout"'s */
+ break;
+ case SM_UPDATE_REBASE:
+ ret = die_message(_("Unable to rebase '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
+ break;
+ case SM_UPDATE_MERGE:
+ ret = die_message(_("Unable to merge '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
+ break;
+ case SM_UPDATE_COMMAND:
+ ret = die_message(_("Execution of '%s %s' failed in submodule path '%s'"),
+ ud->update_strategy.command, oid, ud->displaypath);
+ break;
+ default:
+ BUG("unexpected update strategy type: %d",
+ ud->update_strategy.type);
+ }
+
+ return ret;
+ }
+
+ if (ud->quiet)
+ return 0;
+
+ switch (ud->update_strategy.type) {
+ case SM_UPDATE_CHECKOUT:
+ printf(_("Submodule path '%s': checked out '%s'\n"),
+ ud->displaypath, oid);
+ break;
+ case SM_UPDATE_REBASE:
+ printf(_("Submodule path '%s': rebased into '%s'\n"),
+ ud->displaypath, oid);
+ break;
+ case SM_UPDATE_MERGE:
+ printf(_("Submodule path '%s': merged in '%s'\n"),
+ ud->displaypath, oid);
+ break;
+ case SM_UPDATE_COMMAND:
+ printf(_("Submodule path '%s': '%s %s'\n"),
+ ud->displaypath, ud->update_strategy.command, oid);
+ break;
+ default:
+ BUG("unexpected update strategy type: %d",
+ ud->update_strategy.type);
+ }
+
+ return 0;
+}
+
+static int run_update_procedure(const struct update_data *ud)
+{
+ int subforce = is_null_oid(&ud->suboid) || ud->force;
+
+ if (!ud->nofetch) {
+ /*
+ * Run fetch only if `oid` isn't present or it
+ * is not reachable from a ref.
+ */
+ if (!is_tip_reachable(ud->sm_path, &ud->oid) &&
+ fetch_in_submodule(ud->sm_path, ud->depth, ud->quiet, NULL) &&
+ !ud->quiet)
+ fprintf_ln(stderr,
+ _("Unable to fetch in submodule path '%s'; "
+ "trying to directly fetch %s:"),
+ ud->displaypath, oid_to_hex(&ud->oid));
+ /*
+ * Now we tried the usual fetch, but `oid` may
+ * not be reachable from any of the refs.
+ */
+ if (!is_tip_reachable(ud->sm_path, &ud->oid) &&
+ fetch_in_submodule(ud->sm_path, ud->depth, ud->quiet, &ud->oid))
+ return die_message(_("Fetched in submodule path '%s', but it did not "
+ "contain %s. Direct fetching of that commit failed."),
+ ud->displaypath, oid_to_hex(&ud->oid));
+ }
+
+ return run_update_command(ud, subforce);
+}
+
+static int remote_submodule_branch(const char *path, const char **branch)
+{
+ const struct submodule *sub;
+ char *key;
+ *branch = NULL;
+
+ sub = submodule_from_path(the_repository, null_oid(), path);
+ if (!sub)
+ return die_message(_("could not initialize submodule at path '%s'"),
+ path);
+
+ key = xstrfmt("submodule.%s.branch", sub->name);
+ if (repo_config_get_string_tmp(the_repository, key, branch))
+ *branch = sub->branch;
+ free(key);
+
+ if (!*branch) {
+ *branch = "HEAD";
+ return 0;
+ }
+
+ if (!strcmp(*branch, ".")) {
+ const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+
+ if (!refname)
+ return die_message(_("No such ref: %s"), "HEAD");
+
+ /* detached HEAD */
+ if (!strcmp(refname, "HEAD"))
+ return die_message(_("Submodule (%s) branch configured to inherit "
+ "branch from superproject, but the superproject "
+ "is not on any branch"), sub->name);
+
+ if (!skip_prefix(refname, "refs/heads/", &refname))
+ return die_message(_("Expecting a full ref name, got %s"),
+ refname);
+
+ *branch = refname;
+ return 0;
+ }
+
+ /* Our "branch" is coming from repo_config_get_string_tmp() */
+ return 0;
+}
+
+static int ensure_core_worktree(const char *path)
+{
+ const char *cw;
+ struct repository subrepo;
+
+ if (repo_submodule_init(&subrepo, the_repository, path, null_oid()))
+ return die_message(_("could not get a repository handle for submodule '%s'"),
+ path);
+
+ if (!repo_config_get_string_tmp(&subrepo, "core.worktree", &cw)) {
+ char *cfg_file, *abs_path;
+ const char *rel_path;
+ struct strbuf sb = STRBUF_INIT;
+
+ cfg_file = repo_git_path(&subrepo, "config");
+
+ abs_path = absolute_pathdup(path);
+ rel_path = relative_path(abs_path, subrepo.gitdir, &sb);
+
+ git_config_set_in_file(cfg_file, "core.worktree", rel_path);
+
+ free(cfg_file);
+ free(abs_path);
+ strbuf_release(&sb);
+ }
+
+ repo_clear(&subrepo);
+ return 0;
+}
+
+static const char *submodule_update_type_to_label(enum submodule_update_type type)
+{
+ switch (type) {
+ case SM_UPDATE_CHECKOUT:
+ return "checkout";
+ case SM_UPDATE_MERGE:
+ return "merge";
+ case SM_UPDATE_REBASE:
+ return "rebase";
+ case SM_UPDATE_UNSPECIFIED:
+ case SM_UPDATE_NONE:
+ case SM_UPDATE_COMMAND:
+ break;
+ }
+ BUG("unreachable with type %d", type);
+}
+
+static void update_data_to_args(const struct update_data *update_data,
+ struct strvec *args)
+{
+ enum submodule_update_type update_type = update_data->update_default;
+
+ if (update_data->displaypath) {
+ strvec_push(args, "--super-prefix");
+ strvec_pushf(args, "%s/", update_data->displaypath);
+ }
+ strvec_pushl(args, "submodule--helper", "update", "--recursive", NULL);
+ strvec_pushf(args, "--jobs=%d", update_data->max_jobs);
+ if (update_data->quiet)
+ strvec_push(args, "--quiet");
+ if (update_data->force)
+ strvec_push(args, "--force");
+ if (update_data->init)
+ strvec_push(args, "--init");
+ if (update_data->remote)
+ strvec_push(args, "--remote");
+ if (update_data->nofetch)
+ strvec_push(args, "--no-fetch");
+ if (update_data->dissociate)
+ strvec_push(args, "--dissociate");
+ if (update_data->progress)
+ strvec_push(args, "--progress");
+ if (update_data->require_init)
+ strvec_push(args, "--require-init");
+ if (update_data->depth)
+ strvec_pushf(args, "--depth=%d", update_data->depth);
+ if (update_type != SM_UPDATE_UNSPECIFIED)
+ strvec_pushf(args, "--%s",
+ submodule_update_type_to_label(update_type));
+
+ if (update_data->references.nr) {
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, &update_data->references)
+ strvec_pushl(args, "--reference", item->string, NULL);
+ }
+ if (update_data->filter_options && update_data->filter_options->choice)
+ strvec_pushf(args, "--filter=%s",
+ expand_list_objects_filter_spec(
+ update_data->filter_options));
+ if (update_data->recommend_shallow == 0)
+ strvec_push(args, "--no-recommend-shallow");
+ else if (update_data->recommend_shallow == 1)
+ strvec_push(args, "--recommend-shallow");
+ if (update_data->single_branch >= 0)
+ strvec_push(args, update_data->single_branch ?
+ "--single-branch" :
+ "--no-single-branch");
+}
+
+static int update_submodule(struct update_data *update_data)
+{
+ int ret;
+
+ ret = determine_submodule_update_strategy(the_repository,
+ update_data->just_cloned,
+ update_data->sm_path,
+ update_data->update_default,
+ &update_data->update_strategy);
+ if (ret)
+ return ret;
+
+ if (update_data->just_cloned)
+ oidcpy(&update_data->suboid, null_oid());
+ else if (resolve_gitlink_ref(update_data->sm_path, "HEAD", &update_data->suboid))
+ return die_message(_("Unable to find current revision in submodule path '%s'"),
+ update_data->displaypath);
+
+ if (update_data->remote) {
+ char *remote_name;
+ const char *branch;
+ char *remote_ref;
+ int code;
+
+ code = get_default_remote_submodule(update_data->sm_path, &remote_name);
+ if (code)
+ return code;
+ code = remote_submodule_branch(update_data->sm_path, &branch);
+ if (code)
+ return code;
+ remote_ref = xstrfmt("refs/remotes/%s/%s", remote_name, branch);
+
+ free(remote_name);
+
+ if (!update_data->nofetch) {
+ if (fetch_in_submodule(update_data->sm_path, update_data->depth,
+ 0, NULL))
+ return die_message(_("Unable to fetch in submodule path '%s'"),
+ update_data->sm_path);
+ }
+
+ if (resolve_gitlink_ref(update_data->sm_path, remote_ref, &update_data->oid))
+ return die_message(_("Unable to find %s revision in submodule path '%s'"),
+ remote_ref, update_data->sm_path);
+
+ free(remote_ref);
+ }
+
+ if (!oideq(&update_data->oid, &update_data->suboid) || update_data->force) {
+ ret = run_update_procedure(update_data);
+ if (ret)
+ return ret;
+ }
+
+ if (update_data->recursive) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct update_data next = *update_data;
+
+ next.prefix = NULL;
+ oidcpy(&next.oid, null_oid());
+ oidcpy(&next.suboid, null_oid());
+
+ cp.dir = update_data->sm_path;
+ cp.git_cmd = 1;
+ prepare_submodule_repo_env(&cp.env);
+ update_data_to_args(&next, &cp.args);
+
+ ret = run_command(&cp);
+ if (ret)
+ die_message(_("Failed to recurse into submodule path '%s'"),
+ update_data->displaypath);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int update_submodules(struct update_data *update_data)
+{
+ int i, ret = 0;
+ struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
+ const struct run_process_parallel_opts opts = {
+ .tr2_category = "submodule",
+ .tr2_label = "parallel/update",
+
+ .processes = update_data->max_jobs,
+
+ .get_next_task = update_clone_get_next_task,
+ .start_failure = update_clone_start_failure,
+ .task_finished = update_clone_task_finished,
+ .data = &suc,
+ };
+
+ suc.update_data = update_data;
+ run_processes_parallel(&opts);
+
+ /*
+ * We saved the output and put it out all at once now.
+ * That means:
+ * - the listener does not have to interleave their (checkout)
+ * work with our fetching. The writes involved in a
+ * checkout involve more straightforward sequential I/O.
+ * - the listener can avoid doing any work if fetching failed.
+ */
+ if (suc.quickstop) {
+ ret = 1;
+ goto cleanup;
+ }
+
+ for (i = 0; i < suc.update_clone_nr; i++) {
+ struct update_clone_data ucd = suc.update_clone[i];
+ int code;
+
+ oidcpy(&update_data->oid, &ucd.oid);
+ update_data->just_cloned = ucd.just_cloned;
+ update_data->sm_path = ucd.sub->path;
+
+ code = ensure_core_worktree(update_data->sm_path);
+ if (code)
+ goto fail;
+
+ update_data->displaypath = get_submodule_displaypath(
+ update_data->sm_path, update_data->prefix);
+ code = update_submodule(update_data);
+ FREE_AND_NULL(update_data->displaypath);
+fail:
+ if (!code)
+ continue;
+ ret = code;
+ if (ret == 128)
+ goto cleanup;
+ }
+
+cleanup:
+ submodule_update_clone_release(&suc);
+ string_list_clear(&update_data->references, 0);
+ return ret;
+}
+
+static int module_update(int argc, const char **argv, const char *prefix)
+{
+ struct pathspec pathspec = { 0 };
+ struct pathspec pathspec2 = { 0 };
+ struct update_data opt = UPDATE_DATA_INIT;
+ struct list_objects_filter_options filter_options =
+ LIST_OBJECTS_FILTER_INIT;
+ int ret;
+ struct option module_update_options[] = {
+ OPT__FORCE(&opt.force, N_("force checkout updates"), 0),
+ OPT_BOOL(0, "init", &opt.init,
+ N_("initialize uninitialized submodules before update")),
+ OPT_BOOL(0, "remote", &opt.remote,
+ N_("use SHA-1 of submodule's remote tracking branch")),
+ OPT_BOOL(0, "recursive", &opt.recursive,
+ N_("traverse submodules recursively")),
+ OPT_BOOL('N', "no-fetch", &opt.nofetch,
+ N_("don't fetch new objects from the remote site")),
+ OPT_SET_INT(0, "checkout", &opt.update_default,
+ N_("use the 'checkout' update strategy (default)"),
+ SM_UPDATE_CHECKOUT),
+ OPT_SET_INT('m', "merge", &opt.update_default,
+ N_("use the 'merge' update strategy"),
+ SM_UPDATE_MERGE),
+ OPT_SET_INT('r', "rebase", &opt.update_default,
+ N_("use the 'rebase' update strategy"),
+ SM_UPDATE_REBASE),
+ OPT_STRING_LIST(0, "reference", &opt.references, N_("repo"),
+ N_("reference repository")),
+ OPT_BOOL(0, "dissociate", &opt.dissociate,
+ N_("use --reference only while cloning")),
+ OPT_INTEGER(0, "depth", &opt.depth,
+ N_("create a shallow clone truncated to the "
+ "specified number of revisions")),
+ OPT_INTEGER('j', "jobs", &opt.max_jobs,
+ N_("parallel jobs")),
+ OPT_BOOL(0, "recommend-shallow", &opt.recommend_shallow,
+ N_("whether the initial clone should follow the shallow recommendation")),
+ OPT__QUIET(&opt.quiet, N_("don't print cloning progress")),
+ OPT_BOOL(0, "progress", &opt.progress,
+ N_("force cloning progress")),
+ OPT_BOOL(0, "require-init", &opt.require_init,
+ N_("disallow cloning into non-empty directory, implies --init")),
+ OPT_BOOL(0, "single-branch", &opt.single_branch,
+ N_("clone only one branch, HEAD or --branch")),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
+ OPT_END()
+ };
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule [--quiet] update"
+ " [--init [--filter=<filter-spec>]] [--remote]"
+ " [-N|--no-fetch] [-f|--force]"
+ " [--checkout|--merge|--rebase]"
+ " [--[no-]recommend-shallow] [--reference <repository>]"
+ " [--recursive] [--[no-]single-branch] [--] [<path>...]"),
+ NULL
+ };
+
+ update_clone_config_from_gitmodules(&opt.max_jobs);
+ git_config(git_update_clone_config, &opt.max_jobs);
+
+ argc = parse_options(argc, argv, prefix, module_update_options,
+ git_submodule_helper_usage, 0);
+
+ if (opt.require_init)
+ opt.init = 1;
+
+ if (filter_options.choice && !opt.init) {
+ usage_with_options(git_submodule_helper_usage,
+ module_update_options);
+ }
+
+ opt.filter_options = &filter_options;
+ opt.prefix = prefix;
+
+ if (opt.update_default)
+ opt.update_strategy.type = opt.update_default;
+
+ if (module_list_compute(argv, prefix, &pathspec, &opt.list) < 0) {
+ ret = 1;
+ goto cleanup;
+ }
+
+ if (pathspec.nr)
+ opt.warn_if_uninitialized = 1;
+
+ if (opt.init) {
+ struct module_list list = MODULE_LIST_INIT;
+ struct init_cb info = INIT_CB_INIT;
+
+ if (module_list_compute(argv, opt.prefix,
+ &pathspec2, &list) < 0) {
+ module_list_release(&list);
+ ret = 1;
+ goto cleanup;
+ }
+
+ /*
+ * If there are no path args and submodule.active is set then,
+ * by default, only initialize 'active' modules.
+ */
+ if (!argc && git_config_get_value_multi("submodule.active"))
+ module_list_active(&list);
+
+ info.prefix = opt.prefix;
+ if (opt.quiet)
+ info.flags |= OPT_QUIET;
+
+ for_each_listed_submodule(&list, init_submodule_cb, &info);
+ module_list_release(&list);
+ }
+
+ ret = update_submodules(&opt);
+cleanup:
+ update_data_release(&opt);
+ list_objects_filter_release(&filter_options);
+ clear_pathspec(&pathspec);
+ clear_pathspec(&pathspec2);
+ return ret;
+}
+
+static int push_check(int argc, const char **argv, const char *prefix)
+{
+ struct remote *remote;
+ const char *superproject_head;
+ char *head;
+ int detached_head = 0;
+ struct object_id head_oid;
+
+ if (argc < 3)
+ die("submodule--helper push-check requires at least 2 arguments");
+
+ /*
+ * superproject's resolved head ref.
+ * if HEAD then the superproject is in a detached head state, otherwise
+ * it will be the resolved head ref.
+ */
+ superproject_head = argv[1];
+ argv++;
+ argc--;
+ /* Get the submodule's head ref and determine if it is detached */
+ head = resolve_refdup("HEAD", 0, &head_oid, NULL);
+ if (!head)
+ die(_("Failed to resolve HEAD as a valid ref."));
+ if (!strcmp(head, "HEAD"))
+ detached_head = 1;
+
+ /*
+ * The remote must be configured.
+ * This is to avoid pushing to the exact same URL as the parent.
+ */
+ remote = pushremote_get(argv[1]);
+ if (!remote || remote->origin == REMOTE_UNCONFIGURED)
+ die("remote '%s' not configured", argv[1]);
+
+ /* Check the refspec */
+ if (argc > 2) {
+ int i;
+ struct ref *local_refs = get_local_heads();
+ struct refspec refspec = REFSPEC_INIT_PUSH;
+
+ refspec_appendn(&refspec, argv + 2, argc - 2);
+
+ for (i = 0; i < refspec.nr; i++) {
+ const struct refspec_item *rs = &refspec.items[i];
+
+ if (rs->pattern || rs->matching)
+ continue;
+
+ /* LHS must match a single ref */
+ switch (count_refspec_match(rs->src, local_refs, NULL)) {
+ case 1:
+ break;
+ case 0:
+ /*
+ * If LHS matches 'HEAD' then we need to ensure
+ * that it matches the same named branch
+ * checked out in the superproject.
+ */
+ if (!strcmp(rs->src, "HEAD")) {
+ if (!detached_head &&
+ !strcmp(head, superproject_head))
+ break;
+ die("HEAD does not match the named branch in the superproject");
+ }
+ /* fallthrough */
+ default:
+ die("src refspec '%s' must name a ref",
+ rs->src);
+ }
+ }
+ refspec_clear(&refspec);
+ }
+ free(head);
+
+ return 0;
+}
+
+static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ struct pathspec pathspec = { 0 };
+ struct module_list list = MODULE_LIST_INIT;
+ struct option embed_gitdir_options[] = {
+ OPT_END()
+ };
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule absorbgitdirs [<options>] [<path>...]"),
+ NULL
+ };
+ int ret = 1;
+
+ argc = parse_options(argc, argv, prefix, embed_gitdir_options,
+ git_submodule_helper_usage, 0);
+
+ if (module_list_compute(argv, prefix, &pathspec, &list) < 0)
+ goto cleanup;
+
+ for (i = 0; i < list.nr; i++)
+ absorb_git_dir_into_superproject(list.entries[i]->name);
+
+ ret = 0;
+cleanup:
+ clear_pathspec(&pathspec);
+ module_list_release(&list);
+ return ret;
+}
+
+static int module_set_url(int argc, const char **argv, const char *prefix)
+{
+ int quiet = 0;
+ const char *newurl;
+ const char *path;
+ char *config_name;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("suppress output for setting url of a submodule")),
+ OPT_END()
+ };
+ const char *const usage[] = {
+ N_("git submodule set-url [--quiet] <path> <newurl>"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+
+ if (argc != 2 || !(path = argv[0]) || !(newurl = argv[1]))
+ usage_with_options(usage, options);
+
+ config_name = xstrfmt("submodule.%s.url", path);
+
+ config_set_in_gitmodules_file_gently(config_name, newurl);
+ sync_submodule(path, prefix, quiet ? OPT_QUIET : 0);
+
+ free(config_name);
+
+ return 0;
+}
+
+static int module_set_branch(int argc, const char **argv, const char *prefix)
+{
+ int opt_default = 0, ret;
+ const char *opt_branch = NULL;
+ const char *path;
+ char *config_name;
+ struct option options[] = {
+ /*
+ * We accept the `quiet` option for uniformity across subcommands,
+ * though there is nothing to make less verbose in this subcommand.
+ */
+ OPT_NOOP_NOARG('q', "quiet"),
+
+ OPT_BOOL('d', "default", &opt_default,
+ N_("set the default tracking branch to master")),
+ OPT_STRING('b', "branch", &opt_branch, N_("branch"),
+ N_("set the default tracking branch")),
+ OPT_END()
+ };
+ const char *const usage[] = {
+ N_("git submodule set-branch [-q|--quiet] (-d|--default) <path>"),
+ N_("git submodule set-branch [-q|--quiet] (-b|--branch) <branch> <path>"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+
+ if (!opt_branch && !opt_default)
+ die(_("--branch or --default required"));
+
+ if (opt_branch && opt_default)
+ die(_("options '%s' and '%s' cannot be used together"), "--branch", "--default");
+
+ if (argc != 1 || !(path = argv[0]))
+ usage_with_options(usage, options);
+
+ config_name = xstrfmt("submodule.%s.branch", path);
+ ret = config_set_in_gitmodules_file_gently(config_name, opt_branch);
+
+ free(config_name);
+ return !!ret;
+}
+
+static int module_create_branch(int argc, const char **argv, const char *prefix)
+{
+ enum branch_track track;
+ int quiet = 0, force = 0, reflog = 0, dry_run = 0;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("print only error messages")),
+ OPT__FORCE(&force, N_("force creation"), 0),
+ OPT_BOOL(0, "create-reflog", &reflog,
+ N_("create the branch's reflog")),
+ OPT_CALLBACK_F('t', "track", &track, "(direct|inherit)",
+ N_("set branch tracking configuration"),
+ PARSE_OPT_OPTARG,
+ parse_opt_tracking_mode),
+ OPT__DRY_RUN(&dry_run,
+ N_("show whether the branch would be created")),
+ OPT_END()
+ };
+ const char *const usage[] = {
+ N_("git submodule--helper create-branch [-f|--force] [--create-reflog] [-q|--quiet] [-t|--track] [-n|--dry-run] <name> <start-oid> <start-name>"),
+ NULL
+ };
+
+ git_config(git_default_config, NULL);
+ track = git_branch_track;
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+
+ if (argc != 3)
+ usage_with_options(usage, options);
+
+ if (!quiet && !dry_run)
+ printf_ln(_("creating branch '%s'"), argv[0]);
+
+ create_branches_recursively(the_repository, argv[0], argv[1], argv[2],
+ force, reflog, quiet, track, dry_run);
+ return 0;
+}
+
+struct add_data {
+ const char *prefix;
+ const char *branch;
+ const char *reference_path;
+ char *sm_path;
+ const char *sm_name;
+ const char *repo;
+ const char *realrepo;
+ int depth;
+ unsigned int force: 1;
+ unsigned int quiet: 1;
+ unsigned int progress: 1;
+ unsigned int dissociate: 1;
+};
+#define ADD_DATA_INIT { .depth = -1 }
+
+static void append_fetch_remotes(struct strbuf *msg, const char *git_dir_path)
+{
+ struct child_process cp_remote = CHILD_PROCESS_INIT;
+ struct strbuf sb_remote_out = STRBUF_INIT;
+
+ cp_remote.git_cmd = 1;
+ strvec_pushf(&cp_remote.env,
+ "GIT_DIR=%s", git_dir_path);
+ strvec_push(&cp_remote.env, "GIT_WORK_TREE=.");
+ strvec_pushl(&cp_remote.args, "remote", "-v", NULL);
+ if (!capture_command(&cp_remote, &sb_remote_out, 0)) {
+ char *next_line;
+ char *line = sb_remote_out.buf;
+
+ while ((next_line = strchr(line, '\n')) != NULL) {
+ size_t len = next_line - line;
+
+ if (strip_suffix_mem(line, &len, " (fetch)"))
+ strbuf_addf(msg, " %.*s\n", (int)len, line);
+ line = next_line + 1;
+ }
+ }
+
+ strbuf_release(&sb_remote_out);
+}
+
+static int add_submodule(const struct add_data *add_data)
+{
+ char *submod_gitdir_path;
+ struct module_clone_data clone_data = MODULE_CLONE_DATA_INIT;
+ struct string_list reference = STRING_LIST_INIT_NODUP;
+ int ret = -1;
+
+ /* perhaps the path already exists and is already a git repo, else clone it */
+ if (is_directory(add_data->sm_path)) {
+ struct strbuf sm_path = STRBUF_INIT;
+ strbuf_addstr(&sm_path, add_data->sm_path);
+ submod_gitdir_path = xstrfmt("%s/.git", add_data->sm_path);
+ if (is_nonbare_repository_dir(&sm_path))
+ printf(_("Adding existing repo at '%s' to the index\n"),
+ add_data->sm_path);
+ else
+ die(_("'%s' already exists and is not a valid git repo"),
+ add_data->sm_path);
+ strbuf_release(&sm_path);
+ free(submod_gitdir_path);
+ } else {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ submod_gitdir_path = xstrfmt(".git/modules/%s", add_data->sm_name);
+
+ if (is_directory(submod_gitdir_path)) {
+ if (!add_data->force) {
+ struct strbuf msg = STRBUF_INIT;
+ char *die_msg;
+
+ strbuf_addf(&msg, _("A git directory for '%s' is found "
+ "locally with remote(s):\n"),
+ add_data->sm_name);
+
+ append_fetch_remotes(&msg, submod_gitdir_path);
+ free(submod_gitdir_path);
+
+ strbuf_addf(&msg, _("If you want to reuse this local git "
+ "directory instead of cloning again from\n"
+ " %s\n"
+ "use the '--force' option. If the local git "
+ "directory is not the correct repo\n"
+ "or you are unsure what this means choose "
+ "another name with the '--name' option."),
+ add_data->realrepo);
+
+ die_msg = strbuf_detach(&msg, NULL);
+ die("%s", die_msg);
+ } else {
+ printf(_("Reactivating local git directory for "
+ "submodule '%s'\n"), add_data->sm_name);
+ }
+ }
+ free(submod_gitdir_path);
+
+ clone_data.prefix = add_data->prefix;
+ clone_data.path = add_data->sm_path;
+ clone_data.name = add_data->sm_name;
+ clone_data.url = add_data->realrepo;
+ clone_data.quiet = add_data->quiet;
+ clone_data.progress = add_data->progress;
+ if (add_data->reference_path) {
+ char *p = xstrdup(add_data->reference_path);
+
+ string_list_append(&reference, p)->util = p;
+ }
+ clone_data.dissociate = add_data->dissociate;
+ if (add_data->depth >= 0)
+ clone_data.depth = xstrfmt("%d", add_data->depth);
+
+ if (clone_submodule(&clone_data, &reference))
+ goto cleanup;
+
+ prepare_submodule_repo_env(&cp.env);
+ cp.git_cmd = 1;
+ cp.dir = add_data->sm_path;
+ /*
+ * NOTE: we only get here if add_data->force is true, so
+ * passing --force to checkout is reasonable.
+ */
+ strvec_pushl(&cp.args, "checkout", "-f", "-q", NULL);
+
+ if (add_data->branch) {
+ strvec_pushl(&cp.args, "-B", add_data->branch, NULL);
+ strvec_pushf(&cp.args, "origin/%s", add_data->branch);
+ }
+
+ if (run_command(&cp))
+ die(_("unable to checkout submodule '%s'"), add_data->sm_path);
+ }
+ ret = 0;
+cleanup:
+ string_list_clear(&reference, 1);
+ return ret;
+}
+
+static int config_submodule_in_gitmodules(const char *name, const char *var, const char *value)
+{
+ char *key;
+ int ret;
+
+ if (!is_writing_gitmodules_ok())
+ die(_("please make sure that the .gitmodules file is in the working tree"));
+
+ key = xstrfmt("submodule.%s.%s", name, var);
+ ret = config_set_in_gitmodules_file_gently(key, value);
+ free(key);
+
+ return ret;
+}
+
+static void configure_added_submodule(struct add_data *add_data)
+{
+ char *key;
+ const char *val;
+ struct child_process add_submod = CHILD_PROCESS_INIT;
+ struct child_process add_gitmodules = CHILD_PROCESS_INIT;
+
+ key = xstrfmt("submodule.%s.url", add_data->sm_name);
+ git_config_set_gently(key, add_data->realrepo);
+ free(key);
+
+ add_submod.git_cmd = 1;
+ strvec_pushl(&add_submod.args, "add",
+ "--no-warn-embedded-repo", NULL);
+ if (add_data->force)
+ strvec_push(&add_submod.args, "--force");
+ strvec_pushl(&add_submod.args, "--", add_data->sm_path, NULL);
+
+ if (run_command(&add_submod))
+ die(_("Failed to add submodule '%s'"), add_data->sm_path);
+
+ if (config_submodule_in_gitmodules(add_data->sm_name, "path", add_data->sm_path) ||
+ config_submodule_in_gitmodules(add_data->sm_name, "url", add_data->repo))
+ die(_("Failed to register submodule '%s'"), add_data->sm_path);
+
+ if (add_data->branch) {
+ if (config_submodule_in_gitmodules(add_data->sm_name,
+ "branch", add_data->branch))
+ die(_("Failed to register submodule '%s'"), add_data->sm_path);
+ }
+
+ add_gitmodules.git_cmd = 1;
+ strvec_pushl(&add_gitmodules.args,
+ "add", "--force", "--", ".gitmodules", NULL);
+
+ if (run_command(&add_gitmodules))
+ die(_("Failed to register submodule '%s'"), add_data->sm_path);
+
+ /*
+ * NEEDSWORK: In a multi-working-tree world this needs to be
+ * set in the per-worktree config.
+ */
+ /*
+ * NEEDSWORK: In the longer run, we need to get rid of this
+ * pattern of querying "submodule.active" before calling
+ * is_submodule_active(), since that function needs to find
+ * out the value of "submodule.active" again anyway.
+ */
+ if (!git_config_get_string_tmp("submodule.active", &val)) {
+ /*
+ * If the submodule being added isn't already covered by the
+ * current configured pathspec, set the submodule's active flag
+ */
+ if (!is_submodule_active(the_repository, add_data->sm_path)) {
+ key = xstrfmt("submodule.%s.active", add_data->sm_name);
+ git_config_set_gently(key, "true");
+ free(key);
+ }
+ } else {
+ key = xstrfmt("submodule.%s.active", add_data->sm_name);
+ git_config_set_gently(key, "true");
+ free(key);
+ }
+}
+
+static void die_on_index_match(const char *path, int force)
+{
+ struct pathspec ps;
+ const char *args[] = { path, NULL };
+ parse_pathspec(&ps, 0, PATHSPEC_PREFER_CWD, NULL, args);
+
+ if (repo_read_index_preload(the_repository, NULL, 0) < 0)
+ die(_("index file corrupt"));
+
+ if (ps.nr) {
+ int i;
+ char *ps_matched = xcalloc(ps.nr, 1);
+
+ /* TODO: audit for interaction with sparse-index. */
+ ensure_full_index(&the_index);
+
+ /*
+ * Since there is only one pathspec, we just need
+ * need to check ps_matched[0] to know if a cache
+ * entry matched.
+ */
+ for (i = 0; i < the_index.cache_nr; i++) {
+ ce_path_match(&the_index, the_index.cache[i], &ps,
+ ps_matched);
+
+ if (ps_matched[0]) {
+ if (!force)
+ die(_("'%s' already exists in the index"),
+ path);
+ if (!S_ISGITLINK(the_index.cache[i]->ce_mode))
+ die(_("'%s' already exists in the index "
+ "and is not a submodule"), path);
+ break;
+ }
+ }
+ free(ps_matched);
+ }
+ clear_pathspec(&ps);
+}
+
+static void die_on_repo_without_commits(const char *path)
+{
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addstr(&sb, path);
+ if (is_nonbare_repository_dir(&sb)) {
+ struct object_id oid;
+ if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
+ die(_("'%s' does not have a commit checked out"), path);
+ }
+ strbuf_release(&sb);
+}
+
+static int module_add(int argc, const char **argv, const char *prefix)
+{
+ int force = 0, quiet = 0, progress = 0, dissociate = 0;
+ struct add_data add_data = ADD_DATA_INIT;
+ char *to_free = NULL;
+ struct option options[] = {
+ OPT_STRING('b', "branch", &add_data.branch, N_("branch"),
+ N_("branch of repository to add as submodule")),
+ OPT__FORCE(&force, N_("allow adding an otherwise ignored submodule path"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT__QUIET(&quiet, N_("print only error messages")),
+ OPT_BOOL(0, "progress", &progress, N_("force cloning progress")),
+ OPT_STRING(0, "reference", &add_data.reference_path, N_("repository"),
+ N_("reference repository")),
+ OPT_BOOL(0, "dissociate", &dissociate, N_("borrow the objects from reference repositories")),
+ OPT_STRING(0, "name", &add_data.sm_name, N_("name"),
+ N_("sets the submodule's name to the given string "
+ "instead of defaulting to its path")),
+ OPT_INTEGER(0, "depth", &add_data.depth, N_("depth for shallow clones")),
+ OPT_END()
+ };
+ const char *const usage[] = {
+ N_("git submodule add [<options>] [--] <repository> [<path>]"),
+ NULL
+ };
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 1;
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+
+ if (!is_writing_gitmodules_ok())
+ die(_("please make sure that the .gitmodules file is in the working tree"));
+
+ if (prefix && *prefix &&
+ add_data.reference_path && !is_absolute_path(add_data.reference_path))
+ add_data.reference_path = xstrfmt("%s%s", prefix, add_data.reference_path);
+
+ if (argc == 0 || argc > 2)
+ usage_with_options(usage, options);
+
+ add_data.repo = argv[0];
+ if (argc == 1)
+ add_data.sm_path = git_url_basename(add_data.repo, 0, 0);
+ else
+ add_data.sm_path = xstrdup(argv[1]);
+
+ if (prefix && *prefix && !is_absolute_path(add_data.sm_path)) {
+ char *sm_path = add_data.sm_path;
+
+ add_data.sm_path = xstrfmt("%s%s", prefix, sm_path);
+ free(sm_path);
+ }
+
+ if (starts_with_dot_dot_slash(add_data.repo) ||
+ starts_with_dot_slash(add_data.repo)) {
+ if (prefix)
+ die(_("Relative path can only be used from the toplevel "
+ "of the working tree"));
+
+ /* dereference source url relative to parent's url */
+ to_free = resolve_relative_url(add_data.repo, NULL, 1);
+ add_data.realrepo = to_free;
+ } else if (is_dir_sep(add_data.repo[0]) || strchr(add_data.repo, ':')) {
+ add_data.realrepo = add_data.repo;
+ } else {
+ die(_("repo URL: '%s' must be absolute or begin with ./|../"),
+ add_data.repo);
+ }
+
+ /*
+ * normalize path:
+ * multiple //; leading ./; /./; /../;
+ */
+ normalize_path_copy(add_data.sm_path, add_data.sm_path);
+ strip_dir_trailing_slashes(add_data.sm_path);
+
+ die_on_index_match(add_data.sm_path, force);
+ die_on_repo_without_commits(add_data.sm_path);
+
+ if (!force) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ cp.no_stdout = 1;
+ strvec_pushl(&cp.args, "add", "--dry-run", "--ignore-missing",
+ "--no-warn-embedded-repo", add_data.sm_path, NULL);
+ if ((ret = pipe_command(&cp, NULL, 0, NULL, 0, &sb, 0))) {
+ strbuf_complete_line(&sb);
+ fputs(sb.buf, stderr);
+ goto cleanup;
+ }
+ }
+
+ if(!add_data.sm_name)
+ add_data.sm_name = add_data.sm_path;
+
+ if (check_submodule_name(add_data.sm_name))
+ die(_("'%s' is not a valid submodule name"), add_data.sm_name);
+
+ add_data.prefix = prefix;
+ add_data.force = !!force;
+ add_data.quiet = !!quiet;
+ add_data.progress = !!progress;
+ add_data.dissociate = !!dissociate;
+
+ if (add_submodule(&add_data))
+ goto cleanup;
+ configure_added_submodule(&add_data);
+
+ ret = 0;
+cleanup:
+ free(add_data.sm_path);
+ free(to_free);
+ strbuf_release(&sb);
+
+ return ret;
+}
+
+int cmd_submodule__helper(int argc, const char **argv, const char *prefix)
+{
+ const char *cmd = argv[0];
+ const char *subcmd;
+ parse_opt_subcommand_fn *fn = NULL;
+ const char *const usage[] = {
+ N_("git submodule--helper <command>"),
+ NULL
+ };
+ struct option options[] = {
+ OPT_SUBCOMMAND("clone", &fn, module_clone),
+ OPT_SUBCOMMAND("add", &fn, module_add),
+ OPT_SUBCOMMAND("update", &fn, module_update),
+ OPT_SUBCOMMAND("foreach", &fn, module_foreach),
+ OPT_SUBCOMMAND("init", &fn, module_init),
+ OPT_SUBCOMMAND("status", &fn, module_status),
+ OPT_SUBCOMMAND("sync", &fn, module_sync),
+ OPT_SUBCOMMAND("deinit", &fn, module_deinit),
+ OPT_SUBCOMMAND("summary", &fn, module_summary),
+ OPT_SUBCOMMAND("push-check", &fn, push_check),
+ OPT_SUBCOMMAND("absorbgitdirs", &fn, absorb_git_dirs),
+ OPT_SUBCOMMAND("set-url", &fn, module_set_url),
+ OPT_SUBCOMMAND("set-branch", &fn, module_set_branch),
+ OPT_SUBCOMMAND("create-branch", &fn, module_create_branch),
+ OPT_END()
+ };
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ subcmd = argv[0];
+
+ if (strcmp(subcmd, "clone") && strcmp(subcmd, "update") &&
+ strcmp(subcmd, "foreach") && strcmp(subcmd, "status") &&
+ strcmp(subcmd, "sync") && strcmp(subcmd, "absorbgitdirs") &&
+ get_super_prefix())
+ /*
+ * xstrfmt() rather than "%s %s" to keep the translated
+ * string identical to git.c's.
+ */
+ die(_("%s doesn't support --super-prefix"),
+ xstrfmt("'%s %s'", cmd, subcmd));
+
+ return fn(argc, argv, prefix);
+}
diff --git a/builtin/symbolic-ref.c b/builtin/symbolic-ref.c
new file mode 100644
index 0000000..e00768a
--- /dev/null
+++ b/builtin/symbolic-ref.c
@@ -0,0 +1,87 @@
+#include "builtin.h"
+#include "config.h"
+#include "cache.h"
+#include "refs.h"
+#include "parse-options.h"
+
+static const char * const git_symbolic_ref_usage[] = {
+ N_("git symbolic-ref [-m <reason>] <name> <ref>"),
+ N_("git symbolic-ref [-q] [--short] [--no-recurse] <name>"),
+ N_("git symbolic-ref --delete [-q] <name>"),
+ NULL
+};
+
+static int check_symref(const char *HEAD, int quiet, int shorten, int recurse, int print)
+{
+ int resolve_flags, flag;
+ const char *refname;
+
+ resolve_flags = (recurse ? 0 : RESOLVE_REF_NO_RECURSE);
+ refname = resolve_ref_unsafe(HEAD, resolve_flags, NULL, &flag);
+
+ if (!refname)
+ die("No such ref: %s", HEAD);
+ else if (!(flag & REF_ISSYMREF)) {
+ if (!quiet)
+ die("ref %s is not a symbolic ref", HEAD);
+ else
+ return 1;
+ }
+ if (print) {
+ char *to_free = NULL;
+ if (shorten)
+ refname = to_free = shorten_unambiguous_ref(refname, 0);
+ puts(refname);
+ free(to_free);
+ }
+ return 0;
+}
+
+int cmd_symbolic_ref(int argc, const char **argv, const char *prefix)
+{
+ int quiet = 0, delete = 0, shorten = 0, recurse = 1, ret = 0;
+ const char *msg = NULL;
+ struct option options[] = {
+ OPT__QUIET(&quiet,
+ N_("suppress error message for non-symbolic (detached) refs")),
+ OPT_BOOL('d', "delete", &delete, N_("delete symbolic ref")),
+ OPT_BOOL(0, "short", &shorten, N_("shorten ref output")),
+ OPT_BOOL(0, "recurse", &recurse, N_("recursively dereference (default)")),
+ OPT_STRING('m', NULL, &msg, N_("reason"), N_("reason of the update")),
+ OPT_END(),
+ };
+
+ git_config(git_default_config, NULL);
+ argc = parse_options(argc, argv, prefix, options,
+ git_symbolic_ref_usage, 0);
+ if (msg && !*msg)
+ die("Refusing to perform update with empty message");
+
+ if (delete) {
+ if (argc != 1)
+ usage_with_options(git_symbolic_ref_usage, options);
+ ret = check_symref(argv[0], 1, 0, 0, 0);
+ if (ret)
+ die("Cannot delete %s, not a symbolic ref", argv[0]);
+ if (!strcmp(argv[0], "HEAD"))
+ die("deleting '%s' is not allowed", argv[0]);
+ return delete_ref(NULL, argv[0], NULL, REF_NO_DEREF);
+ }
+
+ switch (argc) {
+ case 1:
+ ret = check_symref(argv[0], quiet, shorten, recurse, 1);
+ break;
+ case 2:
+ if (!strcmp(argv[0], "HEAD") &&
+ !starts_with(argv[1], "refs/"))
+ die("Refusing to point HEAD outside of refs/");
+ if (check_refname_format(argv[1], REFNAME_ALLOW_ONELEVEL) < 0)
+ die("Refusing to set '%s' to invalid ref '%s'", argv[0], argv[1]);
+ ret = !!create_symref(argv[0], argv[1], msg);
+ break;
+ default:
+ usage_with_options(git_symbolic_ref_usage, options);
+ }
+ return ret;
+}
diff --git a/builtin/tag.c b/builtin/tag.c
new file mode 100644
index 0000000..d428c45
--- /dev/null
+++ b/builtin/tag.c
@@ -0,0 +1,647 @@
+/*
+ * Builtin "git tag"
+ *
+ * Copyright (c) 2007 Kristian Høgsberg <krh@redhat.com>,
+ * Carlos Rica <jasampler@gmail.com>
+ * Based on git-tag.sh and mktag.c by Linus Torvalds.
+ */
+
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "refs.h"
+#include "object-store.h"
+#include "tag.h"
+#include "run-command.h"
+#include "parse-options.h"
+#include "diff.h"
+#include "revision.h"
+#include "gpg-interface.h"
+#include "oid-array.h"
+#include "column.h"
+#include "ref-filter.h"
+#include "date.h"
+
+static const char * const git_tag_usage[] = {
+ N_("git tag [-a | -s | -u <key-id>] [-f] [-m <msg> | -F <file>] [-e]\n"
+ " <tagname> [<commit> | <object>]"),
+ N_("git tag -d <tagname>..."),
+ N_("git tag [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>]\n"
+ " [--points-at <object>] [--column[=<options>] | --no-column]\n"
+ " [--create-reflog] [--sort=<key>] [--format=<format>]\n"
+ " [--merged <commit>] [--no-merged <commit>] [<pattern>...]"),
+ N_("git tag -v [--format=<format>] <tagname>..."),
+ NULL
+};
+
+static unsigned int colopts;
+static int force_sign_annotate;
+static int config_sign_tag = -1; /* unspecified */
+
+static int list_tags(struct ref_filter *filter, struct ref_sorting *sorting,
+ struct ref_format *format)
+{
+ struct ref_array array;
+ struct strbuf output = STRBUF_INIT;
+ struct strbuf err = STRBUF_INIT;
+ char *to_free = NULL;
+ int i;
+
+ memset(&array, 0, sizeof(array));
+
+ if (filter->lines == -1)
+ filter->lines = 0;
+
+ if (!format->format) {
+ if (filter->lines) {
+ to_free = xstrfmt("%s %%(contents:lines=%d)",
+ "%(align:15)%(refname:lstrip=2)%(end)",
+ filter->lines);
+ format->format = to_free;
+ } else
+ format->format = "%(refname:lstrip=2)";
+ }
+
+ if (verify_ref_format(format))
+ die(_("unable to parse format string"));
+ filter->with_commit_tag_algo = 1;
+ filter_refs(&array, filter, FILTER_REFS_TAGS);
+ ref_array_sort(sorting, &array);
+
+ for (i = 0; i < array.nr; i++) {
+ strbuf_reset(&output);
+ strbuf_reset(&err);
+ if (format_ref_array_item(array.items[i], format, &output, &err))
+ die("%s", err.buf);
+ fwrite(output.buf, 1, output.len, stdout);
+ putchar('\n');
+ }
+
+ strbuf_release(&err);
+ strbuf_release(&output);
+ ref_array_clear(&array);
+ free(to_free);
+
+ return 0;
+}
+
+typedef int (*each_tag_name_fn)(const char *name, const char *ref,
+ const struct object_id *oid, void *cb_data);
+
+static int for_each_tag_name(const char **argv, each_tag_name_fn fn,
+ void *cb_data)
+{
+ const char **p;
+ struct strbuf ref = STRBUF_INIT;
+ int had_error = 0;
+ struct object_id oid;
+
+ for (p = argv; *p; p++) {
+ strbuf_reset(&ref);
+ strbuf_addf(&ref, "refs/tags/%s", *p);
+ if (read_ref(ref.buf, &oid)) {
+ error(_("tag '%s' not found."), *p);
+ had_error = 1;
+ continue;
+ }
+ if (fn(*p, ref.buf, &oid, cb_data))
+ had_error = 1;
+ }
+ strbuf_release(&ref);
+ return had_error;
+}
+
+static int collect_tags(const char *name, const char *ref,
+ const struct object_id *oid, void *cb_data)
+{
+ struct string_list *ref_list = cb_data;
+
+ string_list_append(ref_list, ref);
+ ref_list->items[ref_list->nr - 1].util = oiddup(oid);
+ return 0;
+}
+
+static int delete_tags(const char **argv)
+{
+ int result;
+ struct string_list refs_to_delete = STRING_LIST_INIT_DUP;
+ struct string_list_item *item;
+
+ result = for_each_tag_name(argv, collect_tags, (void *)&refs_to_delete);
+ if (delete_refs(NULL, &refs_to_delete, REF_NO_DEREF))
+ result = 1;
+
+ for_each_string_list_item(item, &refs_to_delete) {
+ const char *name = item->string;
+ struct object_id *oid = item->util;
+ if (!ref_exists(name))
+ printf(_("Deleted tag '%s' (was %s)\n"),
+ item->string + 10,
+ find_unique_abbrev(oid, DEFAULT_ABBREV));
+
+ free(oid);
+ }
+ string_list_clear(&refs_to_delete, 0);
+ return result;
+}
+
+static int verify_tag(const char *name, const char *ref,
+ const struct object_id *oid, void *cb_data)
+{
+ int flags;
+ struct ref_format *format = cb_data;
+ flags = GPG_VERIFY_VERBOSE;
+
+ if (format->format)
+ flags = GPG_VERIFY_OMIT_STATUS;
+
+ if (gpg_verify_tag(oid, name, flags))
+ return -1;
+
+ if (format->format)
+ pretty_print_ref(name, oid, format);
+
+ return 0;
+}
+
+static int do_sign(struct strbuf *buffer)
+{
+ return sign_buffer(buffer, buffer, get_signing_key());
+}
+
+static const char tag_template[] =
+ N_("\nWrite a message for tag:\n %s\n"
+ "Lines starting with '%c' will be ignored.\n");
+
+static const char tag_template_nocleanup[] =
+ N_("\nWrite a message for tag:\n %s\n"
+ "Lines starting with '%c' will be kept; you may remove them"
+ " yourself if you want to.\n");
+
+static int git_tag_config(const char *var, const char *value, void *cb)
+{
+ int status;
+
+ if (!strcmp(var, "tag.gpgsign")) {
+ config_sign_tag = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (!strcmp(var, "tag.sort")) {
+ if (!value)
+ return config_error_nonbool(var);
+ string_list_append(cb, value);
+ return 0;
+ }
+
+ status = git_gpg_config(var, value, cb);
+ if (status)
+ return status;
+ if (!strcmp(var, "tag.forcesignannotated")) {
+ force_sign_annotate = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (starts_with(var, "column."))
+ return git_column_config(var, value, "tag", &colopts);
+ return git_color_default_config(var, value, cb);
+}
+
+static void write_tag_body(int fd, const struct object_id *oid)
+{
+ unsigned long size;
+ enum object_type type;
+ char *buf, *sp, *orig;
+ struct strbuf payload = STRBUF_INIT;
+ struct strbuf signature = STRBUF_INIT;
+
+ orig = buf = read_object_file(oid, &type, &size);
+ if (!buf)
+ return;
+ if (parse_signature(buf, size, &payload, &signature)) {
+ buf = payload.buf;
+ size = payload.len;
+ }
+ /* skip header */
+ sp = strstr(buf, "\n\n");
+
+ if (!sp || !size || type != OBJ_TAG) {
+ free(buf);
+ return;
+ }
+ sp += 2; /* skip the 2 LFs */
+ write_or_die(fd, sp, buf + size - sp);
+
+ free(orig);
+ strbuf_release(&payload);
+ strbuf_release(&signature);
+}
+
+static int build_tag_object(struct strbuf *buf, int sign, struct object_id *result)
+{
+ if (sign && do_sign(buf) < 0)
+ return error(_("unable to sign the tag"));
+ if (write_object_file(buf->buf, buf->len, OBJ_TAG, result) < 0)
+ return error(_("unable to write tag file"));
+ return 0;
+}
+
+struct create_tag_options {
+ unsigned int message_given:1;
+ unsigned int use_editor:1;
+ unsigned int sign;
+ enum {
+ CLEANUP_NONE,
+ CLEANUP_SPACE,
+ CLEANUP_ALL
+ } cleanup_mode;
+};
+
+static const char message_advice_nested_tag[] =
+ N_("You have created a nested tag. The object referred to by your new tag is\n"
+ "already a tag. If you meant to tag the object that it points to, use:\n"
+ "\n"
+ "\tgit tag -f %s %s^{}");
+
+static void create_tag(const struct object_id *object, const char *object_ref,
+ const char *tag,
+ struct strbuf *buf, struct create_tag_options *opt,
+ struct object_id *prev, struct object_id *result)
+{
+ enum object_type type;
+ struct strbuf header = STRBUF_INIT;
+ char *path = NULL;
+
+ type = oid_object_info(the_repository, object, NULL);
+ if (type <= OBJ_NONE)
+ die(_("bad object type."));
+
+ if (type == OBJ_TAG)
+ advise_if_enabled(ADVICE_NESTED_TAG, _(message_advice_nested_tag),
+ tag, object_ref);
+
+ strbuf_addf(&header,
+ "object %s\n"
+ "type %s\n"
+ "tag %s\n"
+ "tagger %s\n\n",
+ oid_to_hex(object),
+ type_name(type),
+ tag,
+ git_committer_info(IDENT_STRICT));
+
+ if (!opt->message_given || opt->use_editor) {
+ int fd;
+
+ /* write the template message before editing: */
+ path = git_pathdup("TAG_EDITMSG");
+ fd = xopen(path, O_CREAT | O_TRUNC | O_WRONLY, 0600);
+
+ if (opt->message_given) {
+ write_or_die(fd, buf->buf, buf->len);
+ strbuf_reset(buf);
+ } else if (!is_null_oid(prev)) {
+ write_tag_body(fd, prev);
+ } else {
+ struct strbuf buf = STRBUF_INIT;
+ strbuf_addch(&buf, '\n');
+ if (opt->cleanup_mode == CLEANUP_ALL)
+ strbuf_commented_addf(&buf, _(tag_template), tag, comment_line_char);
+ else
+ strbuf_commented_addf(&buf, _(tag_template_nocleanup), tag, comment_line_char);
+ write_or_die(fd, buf.buf, buf.len);
+ strbuf_release(&buf);
+ }
+ close(fd);
+
+ if (launch_editor(path, buf, NULL)) {
+ fprintf(stderr,
+ _("Please supply the message using either -m or -F option.\n"));
+ exit(1);
+ }
+ }
+
+ if (opt->cleanup_mode != CLEANUP_NONE)
+ strbuf_stripspace(buf, opt->cleanup_mode == CLEANUP_ALL);
+
+ if (!opt->message_given && !buf->len)
+ die(_("no tag message?"));
+
+ strbuf_insert(buf, 0, header.buf, header.len);
+ strbuf_release(&header);
+
+ if (build_tag_object(buf, opt->sign, result) < 0) {
+ if (path)
+ fprintf(stderr, _("The tag message has been left in %s\n"),
+ path);
+ exit(128);
+ }
+ if (path) {
+ unlink_or_warn(path);
+ free(path);
+ }
+}
+
+static void create_reflog_msg(const struct object_id *oid, struct strbuf *sb)
+{
+ enum object_type type;
+ struct commit *c;
+ char *buf;
+ unsigned long size;
+ int subject_len = 0;
+ const char *subject_start;
+
+ char *rla = getenv("GIT_REFLOG_ACTION");
+ if (rla) {
+ strbuf_addstr(sb, rla);
+ } else {
+ strbuf_addstr(sb, "tag: tagging ");
+ strbuf_add_unique_abbrev(sb, oid, DEFAULT_ABBREV);
+ }
+
+ strbuf_addstr(sb, " (");
+ type = oid_object_info(the_repository, oid, NULL);
+ switch (type) {
+ default:
+ strbuf_addstr(sb, "object of unknown type");
+ break;
+ case OBJ_COMMIT:
+ if ((buf = read_object_file(oid, &type, &size))) {
+ subject_len = find_commit_subject(buf, &subject_start);
+ strbuf_insert(sb, sb->len, subject_start, subject_len);
+ } else {
+ strbuf_addstr(sb, "commit object");
+ }
+ free(buf);
+
+ if ((c = lookup_commit_reference(the_repository, oid)))
+ strbuf_addf(sb, ", %s", show_date(c->date, 0, DATE_MODE(SHORT)));
+ break;
+ case OBJ_TREE:
+ strbuf_addstr(sb, "tree object");
+ break;
+ case OBJ_BLOB:
+ strbuf_addstr(sb, "blob object");
+ break;
+ case OBJ_TAG:
+ strbuf_addstr(sb, "other tag object");
+ break;
+ }
+ strbuf_addch(sb, ')');
+}
+
+struct msg_arg {
+ int given;
+ struct strbuf buf;
+};
+
+static int parse_msg_arg(const struct option *opt, const char *arg, int unset)
+{
+ struct msg_arg *msg = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ if (!arg)
+ return -1;
+ if (msg->buf.len)
+ strbuf_addstr(&(msg->buf), "\n\n");
+ strbuf_addstr(&(msg->buf), arg);
+ msg->given = 1;
+ return 0;
+}
+
+static int strbuf_check_tag_ref(struct strbuf *sb, const char *name)
+{
+ if (name[0] == '-')
+ return -1;
+
+ strbuf_reset(sb);
+ strbuf_addf(sb, "refs/tags/%s", name);
+
+ return check_refname_format(sb->buf, 0);
+}
+
+int cmd_tag(int argc, const char **argv, const char *prefix)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf ref = STRBUF_INIT;
+ struct strbuf reflog_msg = STRBUF_INIT;
+ struct object_id object, prev;
+ const char *object_ref, *tag;
+ struct create_tag_options opt;
+ char *cleanup_arg = NULL;
+ int create_reflog = 0;
+ int annotate = 0, force = 0;
+ int cmdmode = 0, create_tag_object = 0;
+ const char *msgfile = NULL, *keyid = NULL;
+ struct msg_arg msg = { .buf = STRBUF_INIT };
+ struct ref_transaction *transaction;
+ struct strbuf err = STRBUF_INIT;
+ struct ref_filter filter;
+ struct ref_sorting *sorting;
+ struct string_list sorting_options = STRING_LIST_INIT_DUP;
+ struct ref_format format = REF_FORMAT_INIT;
+ int icase = 0;
+ int edit_flag = 0;
+ struct option options[] = {
+ OPT_CMDMODE('l', "list", &cmdmode, N_("list tag names"), 'l'),
+ { OPTION_INTEGER, 'n', NULL, &filter.lines, N_("n"),
+ N_("print <n> lines of each tag message"),
+ PARSE_OPT_OPTARG, NULL, 1 },
+ OPT_CMDMODE('d', "delete", &cmdmode, N_("delete tags"), 'd'),
+ OPT_CMDMODE('v', "verify", &cmdmode, N_("verify tags"), 'v'),
+
+ OPT_GROUP(N_("Tag creation options")),
+ OPT_BOOL('a', "annotate", &annotate,
+ N_("annotated tag, needs a message")),
+ OPT_CALLBACK_F('m', "message", &msg, N_("message"),
+ N_("tag message"), PARSE_OPT_NONEG, parse_msg_arg),
+ OPT_FILENAME('F', "file", &msgfile, N_("read message from file")),
+ OPT_BOOL('e', "edit", &edit_flag, N_("force edit of tag message")),
+ OPT_BOOL('s', "sign", &opt.sign, N_("annotated and GPG-signed tag")),
+ OPT_CLEANUP(&cleanup_arg),
+ OPT_STRING('u', "local-user", &keyid, N_("key-id"),
+ N_("use another key to sign the tag")),
+ OPT__FORCE(&force, N_("replace the tag if exists"), 0),
+ OPT_BOOL(0, "create-reflog", &create_reflog, N_("create a reflog")),
+
+ OPT_GROUP(N_("Tag listing options")),
+ OPT_COLUMN(0, "column", &colopts, N_("show tag list in columns")),
+ OPT_CONTAINS(&filter.with_commit, N_("print only tags that contain the commit")),
+ OPT_NO_CONTAINS(&filter.no_commit, N_("print only tags that don't contain the commit")),
+ OPT_WITH(&filter.with_commit, N_("print only tags that contain the commit")),
+ OPT_WITHOUT(&filter.no_commit, N_("print only tags that don't contain the commit")),
+ OPT_MERGED(&filter, N_("print only tags that are merged")),
+ OPT_NO_MERGED(&filter, N_("print only tags that are not merged")),
+ OPT_REF_SORT(&sorting_options),
+ {
+ OPTION_CALLBACK, 0, "points-at", &filter.points_at, N_("object"),
+ N_("print only tags of the object"), PARSE_OPT_LASTARG_DEFAULT,
+ parse_opt_object_name, (intptr_t) "HEAD"
+ },
+ OPT_STRING( 0 , "format", &format.format, N_("format"),
+ N_("format to use for the output")),
+ OPT__COLOR(&format.use_color, N_("respect format colors")),
+ OPT_BOOL('i', "ignore-case", &icase, N_("sorting and filtering are case insensitive")),
+ OPT_END()
+ };
+ int ret = 0;
+ const char *only_in_list = NULL;
+
+ setup_ref_filter_porcelain_msg();
+
+ git_config(git_tag_config, &sorting_options);
+
+ memset(&opt, 0, sizeof(opt));
+ memset(&filter, 0, sizeof(filter));
+ filter.lines = -1;
+ opt.sign = -1;
+
+ argc = parse_options(argc, argv, prefix, options, git_tag_usage, 0);
+
+ if (!cmdmode) {
+ if (argc == 0)
+ cmdmode = 'l';
+ else if (filter.with_commit || filter.no_commit ||
+ filter.reachable_from || filter.unreachable_from ||
+ filter.points_at.nr || filter.lines != -1)
+ cmdmode = 'l';
+ }
+
+ if (cmdmode == 'l')
+ setup_auto_pager("tag", 1);
+
+ if (opt.sign == -1)
+ opt.sign = cmdmode ? 0 : config_sign_tag > 0;
+
+ if (keyid) {
+ opt.sign = 1;
+ set_signing_key(keyid);
+ }
+ create_tag_object = (opt.sign || annotate || msg.given || msgfile);
+
+ if ((create_tag_object || force) && (cmdmode != 0))
+ usage_with_options(git_tag_usage, options);
+
+ finalize_colopts(&colopts, -1);
+ if (cmdmode == 'l' && filter.lines != -1) {
+ if (explicitly_enable_column(colopts))
+ die(_("options '%s' and '%s' cannot be used together"), "--column", "-n");
+ colopts = 0;
+ }
+ sorting = ref_sorting_options(&sorting_options);
+ ref_sorting_set_sort_flags_all(sorting, REF_SORTING_ICASE, icase);
+ filter.ignore_case = icase;
+ if (cmdmode == 'l') {
+ if (column_active(colopts)) {
+ struct column_options copts;
+ memset(&copts, 0, sizeof(copts));
+ copts.padding = 2;
+ run_column_filter(colopts, &copts);
+ }
+ filter.name_patterns = argv;
+ ret = list_tags(&filter, sorting, &format);
+ if (column_active(colopts))
+ stop_column_filter();
+ goto cleanup;
+ }
+ if (filter.lines != -1)
+ only_in_list = "-n";
+ else if (filter.with_commit)
+ only_in_list = "--contains";
+ else if (filter.no_commit)
+ only_in_list = "--no-contains";
+ else if (filter.points_at.nr)
+ only_in_list = "--points-at";
+ else if (filter.reachable_from)
+ only_in_list = "--merged";
+ else if (filter.unreachable_from)
+ only_in_list = "--no-merged";
+ if (only_in_list)
+ die(_("the '%s' option is only allowed in list mode"), only_in_list);
+ if (cmdmode == 'd') {
+ ret = delete_tags(argv);
+ goto cleanup;
+ }
+ if (cmdmode == 'v') {
+ if (format.format && verify_ref_format(&format))
+ usage_with_options(git_tag_usage, options);
+ ret = for_each_tag_name(argv, verify_tag, &format);
+ goto cleanup;
+ }
+
+ if (msg.given || msgfile) {
+ if (msg.given && msgfile)
+ die(_("options '%s' and '%s' cannot be used together"), "-F", "-m");
+ if (msg.given)
+ strbuf_addbuf(&buf, &(msg.buf));
+ else {
+ if (!strcmp(msgfile, "-")) {
+ if (strbuf_read(&buf, 0, 1024) < 0)
+ die_errno(_("cannot read '%s'"), msgfile);
+ } else {
+ if (strbuf_read_file(&buf, msgfile, 1024) < 0)
+ die_errno(_("could not open or read '%s'"),
+ msgfile);
+ }
+ }
+ }
+
+ tag = argv[0];
+
+ object_ref = argc == 2 ? argv[1] : "HEAD";
+ if (argc > 2)
+ die(_("too many arguments"));
+
+ if (get_oid(object_ref, &object))
+ die(_("Failed to resolve '%s' as a valid ref."), object_ref);
+
+ if (strbuf_check_tag_ref(&ref, tag))
+ die(_("'%s' is not a valid tag name."), tag);
+
+ if (read_ref(ref.buf, &prev))
+ oidclr(&prev);
+ else if (!force)
+ die(_("tag '%s' already exists"), tag);
+
+ opt.message_given = msg.given || msgfile;
+ opt.use_editor = edit_flag;
+
+ if (!cleanup_arg || !strcmp(cleanup_arg, "strip"))
+ opt.cleanup_mode = CLEANUP_ALL;
+ else if (!strcmp(cleanup_arg, "verbatim"))
+ opt.cleanup_mode = CLEANUP_NONE;
+ else if (!strcmp(cleanup_arg, "whitespace"))
+ opt.cleanup_mode = CLEANUP_SPACE;
+ else
+ die(_("Invalid cleanup mode %s"), cleanup_arg);
+
+ create_reflog_msg(&object, &reflog_msg);
+
+ if (create_tag_object) {
+ if (force_sign_annotate && !annotate)
+ opt.sign = 1;
+ create_tag(&object, object_ref, tag, &buf, &opt, &prev, &object);
+ }
+
+ transaction = ref_transaction_begin(&err);
+ if (!transaction ||
+ ref_transaction_update(transaction, ref.buf, &object, &prev,
+ create_reflog ? REF_FORCE_CREATE_REFLOG : 0,
+ reflog_msg.buf, &err) ||
+ ref_transaction_commit(transaction, &err))
+ die("%s", err.buf);
+ ref_transaction_free(transaction);
+ if (force && !is_null_oid(&prev) && !oideq(&prev, &object))
+ printf(_("Updated tag '%s' (was %s)\n"), tag,
+ find_unique_abbrev(&prev, DEFAULT_ABBREV));
+
+cleanup:
+ ref_sorting_release(sorting);
+ strbuf_release(&buf);
+ strbuf_release(&ref);
+ strbuf_release(&reflog_msg);
+ strbuf_release(&msg.buf);
+ strbuf_release(&err);
+ return ret;
+}
diff --git a/builtin/unpack-file.c b/builtin/unpack-file.c
new file mode 100644
index 0000000..9e8119d
--- /dev/null
+++ b/builtin/unpack-file.c
@@ -0,0 +1,38 @@
+#include "builtin.h"
+#include "config.h"
+#include "object-store.h"
+
+static char *create_temp_file(struct object_id *oid)
+{
+ static char path[50];
+ void *buf;
+ enum object_type type;
+ unsigned long size;
+ int fd;
+
+ buf = read_object_file(oid, &type, &size);
+ if (!buf || type != OBJ_BLOB)
+ die("unable to read blob object %s", oid_to_hex(oid));
+
+ xsnprintf(path, sizeof(path), ".merge_file_XXXXXX");
+ fd = xmkstemp(path);
+ if (write_in_full(fd, buf, size) < 0)
+ die_errno("unable to write temp-file");
+ close(fd);
+ return path;
+}
+
+int cmd_unpack_file(int argc, const char **argv, const char *prefix)
+{
+ struct object_id oid;
+
+ if (argc != 2 || !strcmp(argv[1], "-h"))
+ usage("git unpack-file <blob>");
+ if (get_oid(argv[1], &oid))
+ die("Not a valid object name %s", argv[1]);
+
+ git_config(git_default_config, NULL);
+
+ puts(create_temp_file(&oid));
+ return 0;
+}
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
new file mode 100644
index 0000000..43789b8
--- /dev/null
+++ b/builtin/unpack-objects.c
@@ -0,0 +1,686 @@
+#include "builtin.h"
+#include "cache.h"
+#include "bulk-checkin.h"
+#include "config.h"
+#include "object-store.h"
+#include "object.h"
+#include "delta.h"
+#include "pack.h"
+#include "blob.h"
+#include "commit.h"
+#include "tag.h"
+#include "tree.h"
+#include "tree-walk.h"
+#include "progress.h"
+#include "decorate.h"
+#include "fsck.h"
+
+static int dry_run, quiet, recover, has_errors, strict;
+static const char unpack_usage[] = "git unpack-objects [-n] [-q] [-r] [--strict]";
+
+/* We always read in 4kB chunks. */
+static unsigned char buffer[4096];
+static unsigned int offset, len;
+static off_t consumed_bytes;
+static off_t max_input_size;
+static git_hash_ctx ctx;
+static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
+static struct progress *progress;
+
+/*
+ * When running under --strict mode, objects whose reachability are
+ * suspect are kept in core without getting written in the object
+ * store.
+ */
+struct obj_buffer {
+ char *buffer;
+ unsigned long size;
+};
+
+static struct decoration obj_decorate;
+
+static struct obj_buffer *lookup_object_buffer(struct object *base)
+{
+ return lookup_decoration(&obj_decorate, base);
+}
+
+static void add_object_buffer(struct object *object, char *buffer, unsigned long size)
+{
+ struct obj_buffer *obj;
+ CALLOC_ARRAY(obj, 1);
+ obj->buffer = buffer;
+ obj->size = size;
+ if (add_decoration(&obj_decorate, object, obj))
+ die("object %s tried to add buffer twice!", oid_to_hex(&object->oid));
+}
+
+/*
+ * Make sure at least "min" bytes are available in the buffer, and
+ * return the pointer to the buffer.
+ */
+static void *fill(int min)
+{
+ if (min <= len)
+ return buffer + offset;
+ if (min > sizeof(buffer))
+ die("cannot fill %d bytes", min);
+ if (offset) {
+ the_hash_algo->update_fn(&ctx, buffer, offset);
+ memmove(buffer, buffer + offset, len);
+ offset = 0;
+ }
+ do {
+ ssize_t ret = xread(0, buffer + len, sizeof(buffer) - len);
+ if (ret <= 0) {
+ if (!ret)
+ die("early EOF");
+ die_errno("read error on input");
+ }
+ len += ret;
+ } while (len < min);
+ return buffer;
+}
+
+static void use(int bytes)
+{
+ if (bytes > len)
+ die("used more bytes than were available");
+ len -= bytes;
+ offset += bytes;
+
+ /* make sure off_t is sufficiently large not to wrap */
+ if (signed_add_overflows(consumed_bytes, bytes))
+ die("pack too large for current definition of off_t");
+ consumed_bytes += bytes;
+ if (max_input_size && consumed_bytes > max_input_size)
+ die(_("pack exceeds maximum allowed size"));
+ display_throughput(progress, consumed_bytes);
+}
+
+/*
+ * Decompress zstream from the standard input into a newly
+ * allocated buffer of specified size and return the buffer.
+ * The caller is responsible to free the returned buffer.
+ *
+ * But for dry_run mode, "get_data()" is only used to check the
+ * integrity of data, and the returned buffer is not used at all.
+ * Therefore, in dry_run mode, "get_data()" will release the small
+ * allocated buffer which is reused to hold temporary zstream output
+ * and return NULL instead of returning garbage data.
+ */
+static void *get_data(unsigned long size)
+{
+ git_zstream stream;
+ unsigned long bufsize = dry_run && size > 8192 ? 8192 : size;
+ void *buf = xmallocz(bufsize);
+
+ memset(&stream, 0, sizeof(stream));
+
+ stream.next_out = buf;
+ stream.avail_out = bufsize;
+ stream.next_in = fill(1);
+ stream.avail_in = len;
+ git_inflate_init(&stream);
+
+ for (;;) {
+ int ret = git_inflate(&stream, 0);
+ use(len - stream.avail_in);
+ if (stream.total_out == size && ret == Z_STREAM_END)
+ break;
+ if (ret != Z_OK) {
+ error("inflate returned %d", ret);
+ FREE_AND_NULL(buf);
+ if (!recover)
+ exit(1);
+ has_errors = 1;
+ break;
+ }
+ stream.next_in = fill(1);
+ stream.avail_in = len;
+ if (dry_run) {
+ /* reuse the buffer in dry_run mode */
+ stream.next_out = buf;
+ stream.avail_out = bufsize > size - stream.total_out ?
+ size - stream.total_out :
+ bufsize;
+ }
+ }
+ git_inflate_end(&stream);
+ if (dry_run)
+ FREE_AND_NULL(buf);
+ return buf;
+}
+
+struct delta_info {
+ struct object_id base_oid;
+ unsigned nr;
+ off_t base_offset;
+ unsigned long size;
+ void *delta;
+ struct delta_info *next;
+};
+
+static struct delta_info *delta_list;
+
+static void add_delta_to_list(unsigned nr, const struct object_id *base_oid,
+ off_t base_offset,
+ void *delta, unsigned long size)
+{
+ struct delta_info *info = xmalloc(sizeof(*info));
+
+ oidcpy(&info->base_oid, base_oid);
+ info->base_offset = base_offset;
+ info->size = size;
+ info->delta = delta;
+ info->nr = nr;
+ info->next = delta_list;
+ delta_list = info;
+}
+
+struct obj_info {
+ off_t offset;
+ struct object_id oid;
+ struct object *obj;
+};
+
+/* Remember to update object flag allocation in object.h */
+#define FLAG_OPEN (1u<<20)
+#define FLAG_WRITTEN (1u<<21)
+
+static struct obj_info *obj_list;
+static unsigned nr_objects;
+
+/*
+ * Called only from check_object() after it verified this object
+ * is Ok.
+ */
+static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf)
+{
+ struct object_id oid;
+
+ if (write_object_file(obj_buf->buffer, obj_buf->size,
+ obj->type, &oid) < 0)
+ die("failed to write object %s", oid_to_hex(&obj->oid));
+ obj->flags |= FLAG_WRITTEN;
+}
+
+/*
+ * At the very end of the processing, write_rest() scans the objects
+ * that have reachability requirements and calls this function.
+ * Verify its reachability and validity recursively and write it out.
+ */
+static int check_object(struct object *obj, enum object_type type,
+ void *data, struct fsck_options *options)
+{
+ struct obj_buffer *obj_buf;
+
+ if (!obj)
+ return 1;
+
+ if (obj->flags & FLAG_WRITTEN)
+ return 0;
+
+ if (type != OBJ_ANY && obj->type != type)
+ die("object type mismatch");
+
+ if (!(obj->flags & FLAG_OPEN)) {
+ unsigned long size;
+ int type = oid_object_info(the_repository, &obj->oid, &size);
+ if (type != obj->type || type <= 0)
+ die("object of unexpected type");
+ obj->flags |= FLAG_WRITTEN;
+ return 0;
+ }
+
+ obj_buf = lookup_object_buffer(obj);
+ if (!obj_buf)
+ die("Whoops! Cannot find object '%s'", oid_to_hex(&obj->oid));
+ if (fsck_object(obj, obj_buf->buffer, obj_buf->size, &fsck_options))
+ die("fsck error in packed object");
+ fsck_options.walk = check_object;
+ if (fsck_walk(obj, NULL, &fsck_options))
+ die("Error on reachable objects of %s", oid_to_hex(&obj->oid));
+ write_cached_object(obj, obj_buf);
+ return 0;
+}
+
+static void write_rest(void)
+{
+ unsigned i;
+ for (i = 0; i < nr_objects; i++) {
+ if (obj_list[i].obj)
+ check_object(obj_list[i].obj, OBJ_ANY, NULL, NULL);
+ }
+}
+
+static void added_object(unsigned nr, enum object_type type,
+ void *data, unsigned long size);
+
+/*
+ * Write out nr-th object from the list, now we know the contents
+ * of it. Under --strict, this buffers structured objects in-core,
+ * to be checked at the end.
+ */
+static void write_object(unsigned nr, enum object_type type,
+ void *buf, unsigned long size)
+{
+ if (!strict) {
+ if (write_object_file(buf, size, type,
+ &obj_list[nr].oid) < 0)
+ die("failed to write object");
+ added_object(nr, type, buf, size);
+ free(buf);
+ obj_list[nr].obj = NULL;
+ } else if (type == OBJ_BLOB) {
+ struct blob *blob;
+ if (write_object_file(buf, size, type,
+ &obj_list[nr].oid) < 0)
+ die("failed to write object");
+ added_object(nr, type, buf, size);
+ free(buf);
+
+ blob = lookup_blob(the_repository, &obj_list[nr].oid);
+ if (blob)
+ blob->object.flags |= FLAG_WRITTEN;
+ else
+ die("invalid blob object");
+ obj_list[nr].obj = NULL;
+ } else {
+ struct object *obj;
+ int eaten;
+ hash_object_file(the_hash_algo, buf, size, type,
+ &obj_list[nr].oid);
+ added_object(nr, type, buf, size);
+ obj = parse_object_buffer(the_repository, &obj_list[nr].oid,
+ type, size, buf,
+ &eaten);
+ if (!obj)
+ die("invalid %s", type_name(type));
+ add_object_buffer(obj, buf, size);
+ obj->flags |= FLAG_OPEN;
+ obj_list[nr].obj = obj;
+ }
+}
+
+static void resolve_delta(unsigned nr, enum object_type type,
+ void *base, unsigned long base_size,
+ void *delta, unsigned long delta_size)
+{
+ void *result;
+ unsigned long result_size;
+
+ result = patch_delta(base, base_size,
+ delta, delta_size,
+ &result_size);
+ if (!result)
+ die("failed to apply delta");
+ free(delta);
+ write_object(nr, type, result, result_size);
+}
+
+/*
+ * We now know the contents of an object (which is nr-th in the pack);
+ * resolve all the deltified objects that are based on it.
+ */
+static void added_object(unsigned nr, enum object_type type,
+ void *data, unsigned long size)
+{
+ struct delta_info **p = &delta_list;
+ struct delta_info *info;
+
+ while ((info = *p) != NULL) {
+ if (oideq(&info->base_oid, &obj_list[nr].oid) ||
+ info->base_offset == obj_list[nr].offset) {
+ *p = info->next;
+ p = &delta_list;
+ resolve_delta(info->nr, type, data, size,
+ info->delta, info->size);
+ free(info);
+ continue;
+ }
+ p = &info->next;
+ }
+}
+
+static void unpack_non_delta_entry(enum object_type type, unsigned long size,
+ unsigned nr)
+{
+ void *buf = get_data(size);
+
+ if (buf)
+ write_object(nr, type, buf, size);
+}
+
+struct input_zstream_data {
+ git_zstream *zstream;
+ unsigned char buf[8192];
+ int status;
+};
+
+static const void *feed_input_zstream(struct input_stream *in_stream,
+ unsigned long *readlen)
+{
+ struct input_zstream_data *data = in_stream->data;
+ git_zstream *zstream = data->zstream;
+ void *in = fill(1);
+
+ if (in_stream->is_finished) {
+ *readlen = 0;
+ return NULL;
+ }
+
+ zstream->next_out = data->buf;
+ zstream->avail_out = sizeof(data->buf);
+ zstream->next_in = in;
+ zstream->avail_in = len;
+
+ data->status = git_inflate(zstream, 0);
+
+ in_stream->is_finished = data->status != Z_OK;
+ use(len - zstream->avail_in);
+ *readlen = sizeof(data->buf) - zstream->avail_out;
+
+ return data->buf;
+}
+
+static void stream_blob(unsigned long size, unsigned nr)
+{
+ git_zstream zstream = { 0 };
+ struct input_zstream_data data = { 0 };
+ struct input_stream in_stream = {
+ .read = feed_input_zstream,
+ .data = &data,
+ };
+ struct obj_info *info = &obj_list[nr];
+
+ data.zstream = &zstream;
+ git_inflate_init(&zstream);
+
+ if (stream_loose_object(&in_stream, size, &info->oid))
+ die(_("failed to write object in stream"));
+
+ if (data.status != Z_STREAM_END)
+ die(_("inflate returned (%d)"), data.status);
+ git_inflate_end(&zstream);
+
+ if (strict) {
+ struct blob *blob = lookup_blob(the_repository, &info->oid);
+
+ if (!blob)
+ die(_("invalid blob object from stream"));
+ blob->object.flags |= FLAG_WRITTEN;
+ }
+ info->obj = NULL;
+}
+
+static int resolve_against_held(unsigned nr, const struct object_id *base,
+ void *delta_data, unsigned long delta_size)
+{
+ struct object *obj;
+ struct obj_buffer *obj_buffer;
+ obj = lookup_object(the_repository, base);
+ if (!obj)
+ return 0;
+ obj_buffer = lookup_object_buffer(obj);
+ if (!obj_buffer)
+ return 0;
+ resolve_delta(nr, obj->type, obj_buffer->buffer,
+ obj_buffer->size, delta_data, delta_size);
+ return 1;
+}
+
+static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
+ unsigned nr)
+{
+ void *delta_data, *base;
+ unsigned long base_size;
+ struct object_id base_oid;
+
+ if (type == OBJ_REF_DELTA) {
+ oidread(&base_oid, fill(the_hash_algo->rawsz));
+ use(the_hash_algo->rawsz);
+ delta_data = get_data(delta_size);
+ if (!delta_data)
+ return;
+ if (has_object_file(&base_oid))
+ ; /* Ok we have this one */
+ else if (resolve_against_held(nr, &base_oid,
+ delta_data, delta_size))
+ return; /* we are done */
+ else {
+ /* cannot resolve yet --- queue it */
+ oidclr(&obj_list[nr].oid);
+ add_delta_to_list(nr, &base_oid, 0, delta_data, delta_size);
+ return;
+ }
+ } else {
+ unsigned base_found = 0;
+ unsigned char *pack, c;
+ off_t base_offset;
+ unsigned lo, mid, hi;
+
+ pack = fill(1);
+ c = *pack;
+ use(1);
+ base_offset = c & 127;
+ while (c & 128) {
+ base_offset += 1;
+ if (!base_offset || MSB(base_offset, 7))
+ die("offset value overflow for delta base object");
+ pack = fill(1);
+ c = *pack;
+ use(1);
+ base_offset = (base_offset << 7) + (c & 127);
+ }
+ base_offset = obj_list[nr].offset - base_offset;
+ if (base_offset <= 0 || base_offset >= obj_list[nr].offset)
+ die("offset value out of bound for delta base object");
+
+ delta_data = get_data(delta_size);
+ if (!delta_data)
+ return;
+ lo = 0;
+ hi = nr;
+ while (lo < hi) {
+ mid = lo + (hi - lo) / 2;
+ if (base_offset < obj_list[mid].offset) {
+ hi = mid;
+ } else if (base_offset > obj_list[mid].offset) {
+ lo = mid + 1;
+ } else {
+ oidcpy(&base_oid, &obj_list[mid].oid);
+ base_found = !is_null_oid(&base_oid);
+ break;
+ }
+ }
+ if (!base_found) {
+ /*
+ * The delta base object is itself a delta that
+ * has not been resolved yet.
+ */
+ oidclr(&obj_list[nr].oid);
+ add_delta_to_list(nr, null_oid(), base_offset,
+ delta_data, delta_size);
+ return;
+ }
+ }
+
+ if (resolve_against_held(nr, &base_oid, delta_data, delta_size))
+ return;
+
+ base = read_object_file(&base_oid, &type, &base_size);
+ if (!base) {
+ error("failed to read delta-pack base object %s",
+ oid_to_hex(&base_oid));
+ if (!recover)
+ exit(1);
+ has_errors = 1;
+ return;
+ }
+ resolve_delta(nr, type, base, base_size, delta_data, delta_size);
+ free(base);
+}
+
+static void unpack_one(unsigned nr)
+{
+ unsigned shift;
+ unsigned char *pack;
+ unsigned long size, c;
+ enum object_type type;
+
+ obj_list[nr].offset = consumed_bytes;
+
+ pack = fill(1);
+ c = *pack;
+ use(1);
+ type = (c >> 4) & 7;
+ size = (c & 15);
+ shift = 4;
+ while (c & 0x80) {
+ pack = fill(1);
+ c = *pack;
+ use(1);
+ size += (c & 0x7f) << shift;
+ shift += 7;
+ }
+
+ switch (type) {
+ case OBJ_BLOB:
+ if (!dry_run && size > big_file_threshold) {
+ stream_blob(size, nr);
+ return;
+ }
+ /* fallthrough */
+ case OBJ_COMMIT:
+ case OBJ_TREE:
+ case OBJ_TAG:
+ unpack_non_delta_entry(type, size, nr);
+ return;
+ case OBJ_REF_DELTA:
+ case OBJ_OFS_DELTA:
+ unpack_delta_entry(type, size, nr);
+ return;
+ default:
+ error("bad object type %d", type);
+ has_errors = 1;
+ if (recover)
+ return;
+ exit(1);
+ }
+}
+
+static void unpack_all(void)
+{
+ int i;
+ struct pack_header *hdr = fill(sizeof(struct pack_header));
+
+ nr_objects = ntohl(hdr->hdr_entries);
+
+ if (ntohl(hdr->hdr_signature) != PACK_SIGNATURE)
+ die("bad pack file");
+ if (!pack_version_ok(hdr->hdr_version))
+ die("unknown pack file version %"PRIu32,
+ ntohl(hdr->hdr_version));
+ use(sizeof(struct pack_header));
+
+ if (!quiet)
+ progress = start_progress(_("Unpacking objects"), nr_objects);
+ CALLOC_ARRAY(obj_list, nr_objects);
+ begin_odb_transaction();
+ for (i = 0; i < nr_objects; i++) {
+ unpack_one(i);
+ display_progress(progress, i + 1);
+ }
+ end_odb_transaction();
+ stop_progress(&progress);
+
+ if (delta_list)
+ die("unresolved deltas left after unpacking");
+}
+
+int cmd_unpack_objects(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ struct object_id oid;
+
+ read_replace_refs = 0;
+
+ git_config(git_default_config, NULL);
+
+ quiet = !isatty(2);
+
+ for (i = 1 ; i < argc; i++) {
+ const char *arg = argv[i];
+
+ if (*arg == '-') {
+ if (!strcmp(arg, "-n")) {
+ dry_run = 1;
+ continue;
+ }
+ if (!strcmp(arg, "-q")) {
+ quiet = 1;
+ continue;
+ }
+ if (!strcmp(arg, "-r")) {
+ recover = 1;
+ continue;
+ }
+ if (!strcmp(arg, "--strict")) {
+ strict = 1;
+ continue;
+ }
+ if (skip_prefix(arg, "--strict=", &arg)) {
+ strict = 1;
+ fsck_set_msg_types(&fsck_options, arg);
+ continue;
+ }
+ if (starts_with(arg, "--pack_header=")) {
+ struct pack_header *hdr;
+ char *c;
+
+ hdr = (struct pack_header *)buffer;
+ hdr->hdr_signature = htonl(PACK_SIGNATURE);
+ hdr->hdr_version = htonl(strtoul(arg + 14, &c, 10));
+ if (*c != ',')
+ die("bad %s", arg);
+ hdr->hdr_entries = htonl(strtoul(c + 1, &c, 10));
+ if (*c)
+ die("bad %s", arg);
+ len = sizeof(*hdr);
+ continue;
+ }
+ if (skip_prefix(arg, "--max-input-size=", &arg)) {
+ max_input_size = strtoumax(arg, NULL, 10);
+ continue;
+ }
+ usage(unpack_usage);
+ }
+
+ /* We don't take any non-flag arguments now.. Maybe some day */
+ usage(unpack_usage);
+ }
+ the_hash_algo->init_fn(&ctx);
+ unpack_all();
+ the_hash_algo->update_fn(&ctx, buffer, offset);
+ the_hash_algo->final_oid_fn(&oid, &ctx);
+ if (strict) {
+ write_rest();
+ if (fsck_finish(&fsck_options))
+ die(_("fsck error in pack objects"));
+ }
+ if (!hasheq(fill(the_hash_algo->rawsz), oid.hash))
+ die("final sha1 did not match");
+ use(the_hash_algo->rawsz);
+
+ /* Write the last part of the buffer to stdout */
+ while (len) {
+ int ret = xwrite(1, buffer + offset, len);
+ if (ret <= 0)
+ break;
+ len -= ret;
+ offset += ret;
+ }
+
+ /* All done */
+ return has_errors;
+}
diff --git a/builtin/update-index.c b/builtin/update-index.c
new file mode 100644
index 0000000..82d5902
--- /dev/null
+++ b/builtin/update-index.c
@@ -0,0 +1,1307 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ */
+#define USE_THE_INDEX_COMPATIBILITY_MACROS
+#include "cache.h"
+#include "bulk-checkin.h"
+#include "config.h"
+#include "lockfile.h"
+#include "quote.h"
+#include "cache-tree.h"
+#include "tree-walk.h"
+#include "builtin.h"
+#include "refs.h"
+#include "resolve-undo.h"
+#include "parse-options.h"
+#include "pathspec.h"
+#include "dir.h"
+#include "split-index.h"
+#include "fsmonitor.h"
+
+/*
+ * Default to not allowing changes to the list of files. The
+ * tool doesn't actually care, but this makes it harder to add
+ * files to the revision control by mistake by doing something
+ * like "git update-index *" and suddenly having all the object
+ * files be revision controlled.
+ */
+static int allow_add;
+static int allow_remove;
+static int allow_replace;
+static int info_only;
+static int force_remove;
+static int verbose;
+static int mark_valid_only;
+static int mark_skip_worktree_only;
+static int mark_fsmonitor_only;
+static int ignore_skip_worktree_entries;
+#define MARK_FLAG 1
+#define UNMARK_FLAG 2
+static struct strbuf mtime_dir = STRBUF_INIT;
+
+/* Untracked cache mode */
+enum uc_mode {
+ UC_UNSPECIFIED = -1,
+ UC_DISABLE = 0,
+ UC_ENABLE,
+ UC_TEST,
+ UC_FORCE
+};
+
+__attribute__((format (printf, 1, 2)))
+static void report(const char *fmt, ...)
+{
+ va_list vp;
+
+ if (!verbose)
+ return;
+
+ /*
+ * It is possible, though unlikely, that a caller could use the verbose
+ * output to synchronize with addition of objects to the object
+ * database. The current implementation of ODB transactions leaves
+ * objects invisible while a transaction is active, so flush the
+ * transaction here before reporting a change made by update-index.
+ */
+ flush_odb_transaction();
+ va_start(vp, fmt);
+ vprintf(fmt, vp);
+ putchar('\n');
+ va_end(vp);
+}
+
+static void remove_test_directory(void)
+{
+ if (mtime_dir.len)
+ remove_dir_recursively(&mtime_dir, 0);
+}
+
+static const char *get_mtime_path(const char *path)
+{
+ static struct strbuf sb = STRBUF_INIT;
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "%s/%s", mtime_dir.buf, path);
+ return sb.buf;
+}
+
+static void xmkdir(const char *path)
+{
+ path = get_mtime_path(path);
+ if (mkdir(path, 0700))
+ die_errno(_("failed to create directory %s"), path);
+}
+
+static int xstat_mtime_dir(struct stat *st)
+{
+ if (stat(mtime_dir.buf, st))
+ die_errno(_("failed to stat %s"), mtime_dir.buf);
+ return 0;
+}
+
+static int create_file(const char *path)
+{
+ int fd;
+ path = get_mtime_path(path);
+ fd = xopen(path, O_CREAT | O_RDWR, 0644);
+ return fd;
+}
+
+static void xunlink(const char *path)
+{
+ path = get_mtime_path(path);
+ if (unlink(path))
+ die_errno(_("failed to delete file %s"), path);
+}
+
+static void xrmdir(const char *path)
+{
+ path = get_mtime_path(path);
+ if (rmdir(path))
+ die_errno(_("failed to delete directory %s"), path);
+}
+
+static void avoid_racy(void)
+{
+ /*
+ * not use if we could usleep(10) if USE_NSEC is defined. The
+ * field nsec could be there, but the OS could choose to
+ * ignore it?
+ */
+ sleep(1);
+}
+
+static int test_if_untracked_cache_is_supported(void)
+{
+ struct stat st;
+ struct stat_data base;
+ int fd, ret = 0;
+ char *cwd;
+
+ strbuf_addstr(&mtime_dir, "mtime-test-XXXXXX");
+ if (!mkdtemp(mtime_dir.buf))
+ die_errno("Could not make temporary directory");
+
+ cwd = xgetcwd();
+ fprintf(stderr, _("Testing mtime in '%s' "), cwd);
+ free(cwd);
+
+ atexit(remove_test_directory);
+ xstat_mtime_dir(&st);
+ fill_stat_data(&base, &st);
+ fputc('.', stderr);
+
+ avoid_racy();
+ fd = create_file("newfile");
+ xstat_mtime_dir(&st);
+ if (!match_stat_data(&base, &st)) {
+ close(fd);
+ fputc('\n', stderr);
+ fprintf_ln(stderr,_("directory stat info does not "
+ "change after adding a new file"));
+ goto done;
+ }
+ fill_stat_data(&base, &st);
+ fputc('.', stderr);
+
+ avoid_racy();
+ xmkdir("new-dir");
+ xstat_mtime_dir(&st);
+ if (!match_stat_data(&base, &st)) {
+ close(fd);
+ fputc('\n', stderr);
+ fprintf_ln(stderr, _("directory stat info does not change "
+ "after adding a new directory"));
+ goto done;
+ }
+ fill_stat_data(&base, &st);
+ fputc('.', stderr);
+
+ avoid_racy();
+ write_or_die(fd, "data", 4);
+ close(fd);
+ xstat_mtime_dir(&st);
+ if (match_stat_data(&base, &st)) {
+ fputc('\n', stderr);
+ fprintf_ln(stderr, _("directory stat info changes "
+ "after updating a file"));
+ goto done;
+ }
+ fputc('.', stderr);
+
+ avoid_racy();
+ close(create_file("new-dir/new"));
+ xstat_mtime_dir(&st);
+ if (match_stat_data(&base, &st)) {
+ fputc('\n', stderr);
+ fprintf_ln(stderr, _("directory stat info changes after "
+ "adding a file inside subdirectory"));
+ goto done;
+ }
+ fputc('.', stderr);
+
+ avoid_racy();
+ xunlink("newfile");
+ xstat_mtime_dir(&st);
+ if (!match_stat_data(&base, &st)) {
+ fputc('\n', stderr);
+ fprintf_ln(stderr, _("directory stat info does not "
+ "change after deleting a file"));
+ goto done;
+ }
+ fill_stat_data(&base, &st);
+ fputc('.', stderr);
+
+ avoid_racy();
+ xunlink("new-dir/new");
+ xrmdir("new-dir");
+ xstat_mtime_dir(&st);
+ if (!match_stat_data(&base, &st)) {
+ fputc('\n', stderr);
+ fprintf_ln(stderr, _("directory stat info does not "
+ "change after deleting a directory"));
+ goto done;
+ }
+
+ if (rmdir(mtime_dir.buf))
+ die_errno(_("failed to delete directory %s"), mtime_dir.buf);
+ fprintf_ln(stderr, _(" OK"));
+ ret = 1;
+
+done:
+ strbuf_release(&mtime_dir);
+ return ret;
+}
+
+static int mark_ce_flags(const char *path, int flag, int mark)
+{
+ int namelen = strlen(path);
+ int pos = index_name_pos(&the_index, path, namelen);
+ if (0 <= pos) {
+ mark_fsmonitor_invalid(&the_index, the_index.cache[pos]);
+ if (mark)
+ the_index.cache[pos]->ce_flags |= flag;
+ else
+ the_index.cache[pos]->ce_flags &= ~flag;
+ the_index.cache[pos]->ce_flags |= CE_UPDATE_IN_BASE;
+ cache_tree_invalidate_path(&the_index, path);
+ the_index.cache_changed |= CE_ENTRY_CHANGED;
+ return 0;
+ }
+ return -1;
+}
+
+static int remove_one_path(const char *path)
+{
+ if (!allow_remove)
+ return error("%s: does not exist and --remove not passed", path);
+ if (remove_file_from_index(&the_index, path))
+ return error("%s: cannot remove from the index", path);
+ return 0;
+}
+
+/*
+ * Handle a path that couldn't be lstat'ed. It's either:
+ * - missing file (ENOENT or ENOTDIR). That's ok if we're
+ * supposed to be removing it and the removal actually
+ * succeeds.
+ * - permission error. That's never ok.
+ */
+static int process_lstat_error(const char *path, int err)
+{
+ if (is_missing_file_error(err))
+ return remove_one_path(path);
+ return error("lstat(\"%s\"): %s", path, strerror(err));
+}
+
+static int add_one_path(const struct cache_entry *old, const char *path, int len, struct stat *st)
+{
+ int option;
+ struct cache_entry *ce;
+
+ /* Was the old index entry already up-to-date? */
+ if (old && !ce_stage(old) && !ie_match_stat(&the_index, old, st, 0))
+ return 0;
+
+ ce = make_empty_cache_entry(&the_index, len);
+ memcpy(ce->name, path, len);
+ ce->ce_flags = create_ce_flags(0);
+ ce->ce_namelen = len;
+ fill_stat_cache_info(&the_index, ce, st);
+ ce->ce_mode = ce_mode_from_stat(old, st->st_mode);
+
+ if (index_path(&the_index, &ce->oid, path, st,
+ info_only ? 0 : HASH_WRITE_OBJECT)) {
+ discard_cache_entry(ce);
+ return -1;
+ }
+ option = allow_add ? ADD_CACHE_OK_TO_ADD : 0;
+ option |= allow_replace ? ADD_CACHE_OK_TO_REPLACE : 0;
+ if (add_index_entry(&the_index, ce, option)) {
+ discard_cache_entry(ce);
+ return error("%s: cannot add to the index - missing --add option?", path);
+ }
+ return 0;
+}
+
+/*
+ * Handle a path that was a directory. Four cases:
+ *
+ * - it's already a gitlink in the index, and we keep it that
+ * way, and update it if we can (if we cannot find the HEAD,
+ * we're going to keep it unchanged in the index!)
+ *
+ * - it's a *file* in the index, in which case it should be
+ * removed as a file if removal is allowed, since it doesn't
+ * exist as such any more. If removal isn't allowed, it's
+ * an error.
+ *
+ * (NOTE! This is old and arguably fairly strange behaviour.
+ * We might want to make this an error unconditionally, and
+ * use "--force-remove" if you actually want to force removal).
+ *
+ * - it used to exist as a subdirectory (ie multiple files with
+ * this particular prefix) in the index, in which case it's wrong
+ * to try to update it as a directory.
+ *
+ * - it doesn't exist at all in the index, but it is a valid
+ * git directory, and it should be *added* as a gitlink.
+ */
+static int process_directory(const char *path, int len, struct stat *st)
+{
+ struct object_id oid;
+ int pos = index_name_pos(&the_index, path, len);
+
+ /* Exact match: file or existing gitlink */
+ if (pos >= 0) {
+ const struct cache_entry *ce = the_index.cache[pos];
+ if (S_ISGITLINK(ce->ce_mode)) {
+
+ /* Do nothing to the index if there is no HEAD! */
+ if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
+ return 0;
+
+ return add_one_path(ce, path, len, st);
+ }
+ /* Should this be an unconditional error? */
+ return remove_one_path(path);
+ }
+
+ /* Inexact match: is there perhaps a subdirectory match? */
+ pos = -pos-1;
+ while (pos < the_index.cache_nr) {
+ const struct cache_entry *ce = the_index.cache[pos++];
+
+ if (strncmp(ce->name, path, len))
+ break;
+ if (ce->name[len] > '/')
+ break;
+ if (ce->name[len] < '/')
+ continue;
+
+ /* Subdirectory match - error out */
+ return error("%s: is a directory - add individual files instead", path);
+ }
+
+ /* No match - should we add it as a gitlink? */
+ if (!resolve_gitlink_ref(path, "HEAD", &oid))
+ return add_one_path(NULL, path, len, st);
+
+ /* Error out. */
+ return error("%s: is a directory - add files inside instead", path);
+}
+
+static int process_path(const char *path, struct stat *st, int stat_errno)
+{
+ int pos, len;
+ const struct cache_entry *ce;
+
+ len = strlen(path);
+ if (has_symlink_leading_path(path, len))
+ return error("'%s' is beyond a symbolic link", path);
+
+ pos = cache_name_pos(path, len);
+ ce = pos < 0 ? NULL : the_index.cache[pos];
+ if (ce && ce_skip_worktree(ce)) {
+ /*
+ * working directory version is assumed "good"
+ * so updating it does not make sense.
+ * On the other hand, removing it from index should work
+ */
+ if (!ignore_skip_worktree_entries && allow_remove &&
+ remove_file_from_index(&the_index, path))
+ return error("%s: cannot remove from the index", path);
+ return 0;
+ }
+
+ /*
+ * First things first: get the stat information, to decide
+ * what to do about the pathname!
+ */
+ if (stat_errno)
+ return process_lstat_error(path, stat_errno);
+
+ if (S_ISDIR(st->st_mode))
+ return process_directory(path, len, st);
+
+ return add_one_path(ce, path, len, st);
+}
+
+static int add_cacheinfo(unsigned int mode, const struct object_id *oid,
+ const char *path, int stage)
+{
+ int len, option;
+ struct cache_entry *ce;
+
+ if (!verify_path(path, mode))
+ return error("Invalid path '%s'", path);
+
+ len = strlen(path);
+ ce = make_empty_cache_entry(&the_index, len);
+
+ oidcpy(&ce->oid, oid);
+ memcpy(ce->name, path, len);
+ ce->ce_flags = create_ce_flags(stage);
+ ce->ce_namelen = len;
+ ce->ce_mode = create_ce_mode(mode);
+ if (assume_unchanged)
+ ce->ce_flags |= CE_VALID;
+ option = allow_add ? ADD_CACHE_OK_TO_ADD : 0;
+ option |= allow_replace ? ADD_CACHE_OK_TO_REPLACE : 0;
+ if (add_index_entry(&the_index, ce, option))
+ return error("%s: cannot add to the index - missing --add option?",
+ path);
+ report("add '%s'", path);
+ return 0;
+}
+
+static void chmod_path(char flip, const char *path)
+{
+ int pos;
+ struct cache_entry *ce;
+
+ pos = index_name_pos(&the_index, path, strlen(path));
+ if (pos < 0)
+ goto fail;
+ ce = the_index.cache[pos];
+ if (chmod_index_entry(&the_index, ce, flip) < 0)
+ goto fail;
+
+ report("chmod %cx '%s'", flip, path);
+ return;
+ fail:
+ die("git update-index: cannot chmod %cx '%s'", flip, path);
+}
+
+static void update_one(const char *path)
+{
+ int stat_errno = 0;
+ struct stat st;
+
+ if (mark_valid_only || mark_skip_worktree_only || force_remove ||
+ mark_fsmonitor_only)
+ st.st_mode = 0;
+ else if (lstat(path, &st) < 0) {
+ st.st_mode = 0;
+ stat_errno = errno;
+ } /* else stat is valid */
+
+ if (!verify_path(path, st.st_mode)) {
+ fprintf(stderr, "Ignoring path %s\n", path);
+ return;
+ }
+ if (mark_valid_only) {
+ if (mark_ce_flags(path, CE_VALID, mark_valid_only == MARK_FLAG))
+ die("Unable to mark file %s", path);
+ return;
+ }
+ if (mark_skip_worktree_only) {
+ if (mark_ce_flags(path, CE_SKIP_WORKTREE, mark_skip_worktree_only == MARK_FLAG))
+ die("Unable to mark file %s", path);
+ return;
+ }
+ if (mark_fsmonitor_only) {
+ if (mark_ce_flags(path, CE_FSMONITOR_VALID, mark_fsmonitor_only == MARK_FLAG))
+ die("Unable to mark file %s", path);
+ return;
+ }
+
+ if (force_remove) {
+ if (remove_file_from_index(&the_index, path))
+ die("git update-index: unable to remove %s", path);
+ report("remove '%s'", path);
+ return;
+ }
+ if (process_path(path, &st, stat_errno))
+ die("Unable to process path %s", path);
+ report("add '%s'", path);
+}
+
+static void read_index_info(int nul_term_line)
+{
+ const int hexsz = the_hash_algo->hexsz;
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf uq = STRBUF_INIT;
+ strbuf_getline_fn getline_fn;
+
+ getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+ while (getline_fn(&buf, stdin) != EOF) {
+ char *ptr, *tab;
+ char *path_name;
+ struct object_id oid;
+ unsigned int mode;
+ unsigned long ul;
+ int stage;
+
+ /* This reads lines formatted in one of three formats:
+ *
+ * (1) mode SP sha1 TAB path
+ * The first format is what "git apply --index-info"
+ * reports, and used to reconstruct a partial tree
+ * that is used for phony merge base tree when falling
+ * back on 3-way merge.
+ *
+ * (2) mode SP type SP sha1 TAB path
+ * The second format is to stuff "git ls-tree" output
+ * into the index file.
+ *
+ * (3) mode SP sha1 SP stage TAB path
+ * This format is to put higher order stages into the
+ * index file and matches "git ls-files --stage" output.
+ */
+ errno = 0;
+ ul = strtoul(buf.buf, &ptr, 8);
+ if (ptr == buf.buf || *ptr != ' '
+ || errno || (unsigned int) ul != ul)
+ goto bad_line;
+ mode = ul;
+
+ tab = strchr(ptr, '\t');
+ if (!tab || tab - ptr < hexsz + 1)
+ goto bad_line;
+
+ if (tab[-2] == ' ' && '0' <= tab[-1] && tab[-1] <= '3') {
+ stage = tab[-1] - '0';
+ ptr = tab + 1; /* point at the head of path */
+ tab = tab - 2; /* point at tail of sha1 */
+ }
+ else {
+ stage = 0;
+ ptr = tab + 1; /* point at the head of path */
+ }
+
+ if (get_oid_hex(tab - hexsz, &oid) ||
+ tab[-(hexsz + 1)] != ' ')
+ goto bad_line;
+
+ path_name = ptr;
+ if (!nul_term_line && path_name[0] == '"') {
+ strbuf_reset(&uq);
+ if (unquote_c_style(&uq, path_name, NULL)) {
+ die("git update-index: bad quoting of path name");
+ }
+ path_name = uq.buf;
+ }
+
+ if (!verify_path(path_name, mode)) {
+ fprintf(stderr, "Ignoring path %s\n", path_name);
+ continue;
+ }
+
+ if (!mode) {
+ /* mode == 0 means there is no such path -- remove */
+ if (remove_file_from_index(&the_index, path_name))
+ die("git update-index: unable to remove %s",
+ ptr);
+ }
+ else {
+ /* mode ' ' sha1 '\t' name
+ * ptr[-1] points at tab,
+ * ptr[-41] is at the beginning of sha1
+ */
+ ptr[-(hexsz + 2)] = ptr[-1] = 0;
+ if (add_cacheinfo(mode, &oid, path_name, stage))
+ die("git update-index: unable to update %s",
+ path_name);
+ }
+ continue;
+
+ bad_line:
+ die("malformed index info %s", buf.buf);
+ }
+ strbuf_release(&buf);
+ strbuf_release(&uq);
+}
+
+static const char * const update_index_usage[] = {
+ N_("git update-index [<options>] [--] [<file>...]"),
+ NULL
+};
+
+static struct object_id head_oid;
+static struct object_id merge_head_oid;
+
+static struct cache_entry *read_one_ent(const char *which,
+ struct object_id *ent, const char *path,
+ int namelen, int stage)
+{
+ unsigned short mode;
+ struct object_id oid;
+ struct cache_entry *ce;
+
+ if (get_tree_entry(the_repository, ent, path, &oid, &mode)) {
+ if (which)
+ error("%s: not in %s branch.", path, which);
+ return NULL;
+ }
+ if (!the_index.sparse_index && mode == S_IFDIR) {
+ if (which)
+ error("%s: not a blob in %s branch.", path, which);
+ return NULL;
+ }
+ ce = make_empty_cache_entry(&the_index, namelen);
+
+ oidcpy(&ce->oid, &oid);
+ memcpy(ce->name, path, namelen);
+ ce->ce_flags = create_ce_flags(stage);
+ ce->ce_namelen = namelen;
+ ce->ce_mode = create_ce_mode(mode);
+ return ce;
+}
+
+static int unresolve_one(const char *path)
+{
+ int namelen = strlen(path);
+ int pos;
+ int ret = 0;
+ struct cache_entry *ce_2 = NULL, *ce_3 = NULL;
+
+ /* See if there is such entry in the index. */
+ pos = index_name_pos(&the_index, path, namelen);
+ if (0 <= pos) {
+ /* already merged */
+ pos = unmerge_index_entry_at(&the_index, pos);
+ if (pos < the_index.cache_nr) {
+ const struct cache_entry *ce = the_index.cache[pos];
+ if (ce_stage(ce) &&
+ ce_namelen(ce) == namelen &&
+ !memcmp(ce->name, path, namelen))
+ return 0;
+ }
+ /* no resolve-undo information; fall back */
+ } else {
+ /* If there isn't, either it is unmerged, or
+ * resolved as "removed" by mistake. We do not
+ * want to do anything in the former case.
+ */
+ pos = -pos-1;
+ if (pos < the_index.cache_nr) {
+ const struct cache_entry *ce = the_index.cache[pos];
+ if (ce_namelen(ce) == namelen &&
+ !memcmp(ce->name, path, namelen)) {
+ fprintf(stderr,
+ "%s: skipping still unmerged path.\n",
+ path);
+ goto free_return;
+ }
+ }
+ }
+
+ /* Grab blobs from given path from HEAD and MERGE_HEAD,
+ * stuff HEAD version in stage #2,
+ * stuff MERGE_HEAD version in stage #3.
+ */
+ ce_2 = read_one_ent("our", &head_oid, path, namelen, 2);
+ ce_3 = read_one_ent("their", &merge_head_oid, path, namelen, 3);
+
+ if (!ce_2 || !ce_3) {
+ ret = -1;
+ goto free_return;
+ }
+ if (oideq(&ce_2->oid, &ce_3->oid) &&
+ ce_2->ce_mode == ce_3->ce_mode) {
+ fprintf(stderr, "%s: identical in both, skipping.\n",
+ path);
+ goto free_return;
+ }
+
+ remove_file_from_index(&the_index, path);
+ if (add_index_entry(&the_index, ce_2, ADD_CACHE_OK_TO_ADD)) {
+ error("%s: cannot add our version to the index.", path);
+ ret = -1;
+ goto free_return;
+ }
+ if (!add_index_entry(&the_index, ce_3, ADD_CACHE_OK_TO_ADD))
+ return 0;
+ error("%s: cannot add their version to the index.", path);
+ ret = -1;
+ free_return:
+ discard_cache_entry(ce_2);
+ discard_cache_entry(ce_3);
+ return ret;
+}
+
+static void read_head_pointers(void)
+{
+ if (read_ref("HEAD", &head_oid))
+ die("No HEAD -- no initial commit yet?");
+ if (read_ref("MERGE_HEAD", &merge_head_oid)) {
+ fprintf(stderr, "Not in the middle of a merge.\n");
+ exit(0);
+ }
+}
+
+static int do_unresolve(int ac, const char **av,
+ const char *prefix, int prefix_length)
+{
+ int i;
+ int err = 0;
+
+ /* Read HEAD and MERGE_HEAD; if MERGE_HEAD does not exist, we
+ * are not doing a merge, so exit with success status.
+ */
+ read_head_pointers();
+
+ for (i = 1; i < ac; i++) {
+ const char *arg = av[i];
+ char *p = prefix_path(prefix, prefix_length, arg);
+ err |= unresolve_one(p);
+ free(p);
+ }
+ return err;
+}
+
+static int do_reupdate(const char **paths,
+ const char *prefix)
+{
+ /* Read HEAD and run update-index on paths that are
+ * merged and already different between index and HEAD.
+ */
+ int pos;
+ int has_head = 1;
+ struct pathspec pathspec;
+
+ parse_pathspec(&pathspec, 0,
+ PATHSPEC_PREFER_CWD,
+ prefix, paths);
+
+ if (read_ref("HEAD", &head_oid))
+ /* If there is no HEAD, that means it is an initial
+ * commit. Update everything in the index.
+ */
+ has_head = 0;
+ redo:
+ for (pos = 0; pos < the_index.cache_nr; pos++) {
+ const struct cache_entry *ce = the_index.cache[pos];
+ struct cache_entry *old = NULL;
+ int save_nr;
+ char *path;
+
+ if (ce_stage(ce) || !ce_path_match(&the_index, ce, &pathspec, NULL))
+ continue;
+ if (has_head)
+ old = read_one_ent(NULL, &head_oid,
+ ce->name, ce_namelen(ce), 0);
+ if (old && ce->ce_mode == old->ce_mode &&
+ oideq(&ce->oid, &old->oid)) {
+ discard_cache_entry(old);
+ continue; /* unchanged */
+ }
+
+ /* At this point, we know the contents of the sparse directory are
+ * modified with respect to HEAD, so we expand the index and restart
+ * to process each path individually
+ */
+ if (S_ISSPARSEDIR(ce->ce_mode)) {
+ ensure_full_index(&the_index);
+ goto redo;
+ }
+
+ /* Be careful. The working tree may not have the
+ * path anymore, in which case, under 'allow_remove',
+ * or worse yet 'allow_replace', active_nr may decrease.
+ */
+ save_nr = the_index.cache_nr;
+ path = xstrdup(ce->name);
+ update_one(path);
+ free(path);
+ discard_cache_entry(old);
+ if (save_nr != the_index.cache_nr)
+ goto redo;
+ }
+ clear_pathspec(&pathspec);
+ return 0;
+}
+
+struct refresh_params {
+ unsigned int flags;
+ int *has_errors;
+};
+
+static int refresh(struct refresh_params *o, unsigned int flag)
+{
+ setup_work_tree();
+ repo_read_index(the_repository);
+ *o->has_errors |= refresh_index(&the_index, o->flags | flag, NULL,
+ NULL, NULL);
+ if (has_racy_timestamp(&the_index)) {
+ /*
+ * Even if nothing else has changed, updating the file
+ * increases the chance that racy timestamps become
+ * non-racy, helping future run-time performance.
+ * We do that even in case of "errors" returned by
+ * refresh_index() as these are no actual errors.
+ * cmd_status() does the same.
+ */
+ the_index.cache_changed |= SOMETHING_CHANGED;
+ }
+ return 0;
+}
+
+static int refresh_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ return refresh(opt->value, 0);
+}
+
+static int really_refresh_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ return refresh(opt->value, REFRESH_REALLY);
+}
+
+static int chmod_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ char *flip = opt->value;
+ BUG_ON_OPT_NEG(unset);
+ if ((arg[0] != '-' && arg[0] != '+') || arg[1] != 'x' || arg[2])
+ return error("option 'chmod' expects \"+x\" or \"-x\"");
+ *flip = arg[0];
+ return 0;
+}
+
+static int resolve_undo_clear_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ resolve_undo_clear_index(&the_index);
+ return 0;
+}
+
+static int parse_new_style_cacheinfo(const char *arg,
+ unsigned int *mode,
+ struct object_id *oid,
+ const char **path)
+{
+ unsigned long ul;
+ char *endp;
+ const char *p;
+
+ if (!arg)
+ return -1;
+
+ errno = 0;
+ ul = strtoul(arg, &endp, 8);
+ if (errno || endp == arg || *endp != ',' || (unsigned int) ul != ul)
+ return -1; /* not a new-style cacheinfo */
+ *mode = ul;
+ endp++;
+ if (parse_oid_hex(endp, oid, &p) || *p != ',')
+ return -1;
+ *path = p + 1;
+ return 0;
+}
+
+static enum parse_opt_result cacheinfo_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
+{
+ struct object_id oid;
+ unsigned int mode;
+ const char *path;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+
+ if (!parse_new_style_cacheinfo(ctx->argv[1], &mode, &oid, &path)) {
+ if (add_cacheinfo(mode, &oid, path, 0))
+ die("git update-index: --cacheinfo cannot add %s", path);
+ ctx->argv++;
+ ctx->argc--;
+ return 0;
+ }
+ if (ctx->argc <= 3)
+ return error("option 'cacheinfo' expects <mode>,<sha1>,<path>");
+ if (strtoul_ui(*++ctx->argv, 8, &mode) ||
+ get_oid_hex(*++ctx->argv, &oid) ||
+ add_cacheinfo(mode, &oid, *++ctx->argv, 0))
+ die("git update-index: --cacheinfo cannot add %s", *ctx->argv);
+ ctx->argc -= 3;
+ return 0;
+}
+
+static enum parse_opt_result stdin_cacheinfo_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
+{
+ int *nul_term_line = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+
+ if (ctx->argc != 1)
+ return error("option '%s' must be the last argument", opt->long_name);
+ allow_add = allow_replace = allow_remove = 1;
+ read_index_info(*nul_term_line);
+ return 0;
+}
+
+static enum parse_opt_result stdin_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
+{
+ int *read_from_stdin = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+
+ if (ctx->argc != 1)
+ return error("option '%s' must be the last argument", opt->long_name);
+ *read_from_stdin = 1;
+ return 0;
+}
+
+static enum parse_opt_result unresolve_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
+{
+ int *has_errors = opt->value;
+ const char *prefix = startup_info->prefix;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+
+ /* consume remaining arguments. */
+ *has_errors = do_unresolve(ctx->argc, ctx->argv,
+ prefix, prefix ? strlen(prefix) : 0);
+ if (*has_errors)
+ the_index.cache_changed = 0;
+
+ ctx->argv += ctx->argc - 1;
+ ctx->argc = 1;
+ return 0;
+}
+
+static enum parse_opt_result reupdate_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
+{
+ int *has_errors = opt->value;
+ const char *prefix = startup_info->prefix;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+
+ /* consume remaining arguments. */
+ setup_work_tree();
+ *has_errors = do_reupdate(ctx->argv + 1, prefix);
+ if (*has_errors)
+ the_index.cache_changed = 0;
+
+ ctx->argv += ctx->argc - 1;
+ ctx->argc = 1;
+ return 0;
+}
+
+int cmd_update_index(int argc, const char **argv, const char *prefix)
+{
+ int newfd, entries, has_errors = 0, nul_term_line = 0;
+ enum uc_mode untracked_cache = UC_UNSPECIFIED;
+ int read_from_stdin = 0;
+ int prefix_length = prefix ? strlen(prefix) : 0;
+ int preferred_index_format = 0;
+ char set_executable_bit = 0;
+ struct refresh_params refresh_args = {0, &has_errors};
+ int lock_error = 0;
+ int split_index = -1;
+ int force_write = 0;
+ int fsmonitor = -1;
+ struct lock_file lock_file = LOCK_INIT;
+ struct parse_opt_ctx_t ctx;
+ strbuf_getline_fn getline_fn;
+ int parseopt_state = PARSE_OPT_UNKNOWN;
+ struct repository *r = the_repository;
+ struct option options[] = {
+ OPT_BIT('q', NULL, &refresh_args.flags,
+ N_("continue refresh even when index needs update"),
+ REFRESH_QUIET),
+ OPT_BIT(0, "ignore-submodules", &refresh_args.flags,
+ N_("refresh: ignore submodules"),
+ REFRESH_IGNORE_SUBMODULES),
+ OPT_SET_INT(0, "add", &allow_add,
+ N_("do not ignore new files"), 1),
+ OPT_SET_INT(0, "replace", &allow_replace,
+ N_("let files replace directories and vice-versa"), 1),
+ OPT_SET_INT(0, "remove", &allow_remove,
+ N_("notice files missing from worktree"), 1),
+ OPT_BIT(0, "unmerged", &refresh_args.flags,
+ N_("refresh even if index contains unmerged entries"),
+ REFRESH_UNMERGED),
+ OPT_CALLBACK_F(0, "refresh", &refresh_args, NULL,
+ N_("refresh stat information"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG,
+ refresh_callback),
+ OPT_CALLBACK_F(0, "really-refresh", &refresh_args, NULL,
+ N_("like --refresh, but ignore assume-unchanged setting"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG,
+ really_refresh_callback),
+ {OPTION_LOWLEVEL_CALLBACK, 0, "cacheinfo", NULL,
+ N_("<mode>,<object>,<path>"),
+ N_("add the specified entry to the index"),
+ PARSE_OPT_NOARG | /* disallow --cacheinfo=<mode> form */
+ PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP,
+ NULL, 0,
+ cacheinfo_callback},
+ OPT_CALLBACK_F(0, "chmod", &set_executable_bit, "(+|-)x",
+ N_("override the executable bit of the listed files"),
+ PARSE_OPT_NONEG,
+ chmod_callback),
+ {OPTION_SET_INT, 0, "assume-unchanged", &mark_valid_only, NULL,
+ N_("mark files as \"not changing\""),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, MARK_FLAG},
+ {OPTION_SET_INT, 0, "no-assume-unchanged", &mark_valid_only, NULL,
+ N_("clear assumed-unchanged bit"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, UNMARK_FLAG},
+ {OPTION_SET_INT, 0, "skip-worktree", &mark_skip_worktree_only, NULL,
+ N_("mark files as \"index-only\""),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, MARK_FLAG},
+ {OPTION_SET_INT, 0, "no-skip-worktree", &mark_skip_worktree_only, NULL,
+ N_("clear skip-worktree bit"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, UNMARK_FLAG},
+ OPT_BOOL(0, "ignore-skip-worktree-entries", &ignore_skip_worktree_entries,
+ N_("do not touch index-only entries")),
+ OPT_SET_INT(0, "info-only", &info_only,
+ N_("add to index only; do not add content to object database"), 1),
+ OPT_SET_INT(0, "force-remove", &force_remove,
+ N_("remove named paths even if present in worktree"), 1),
+ OPT_BOOL('z', NULL, &nul_term_line,
+ N_("with --stdin: input lines are terminated by null bytes")),
+ {OPTION_LOWLEVEL_CALLBACK, 0, "stdin", &read_from_stdin, NULL,
+ N_("read list of paths to be updated from standard input"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG,
+ NULL, 0, stdin_callback},
+ {OPTION_LOWLEVEL_CALLBACK, 0, "index-info", &nul_term_line, NULL,
+ N_("add entries from standard input to the index"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG,
+ NULL, 0, stdin_cacheinfo_callback},
+ {OPTION_LOWLEVEL_CALLBACK, 0, "unresolve", &has_errors, NULL,
+ N_("repopulate stages #2 and #3 for the listed paths"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG,
+ NULL, 0, unresolve_callback},
+ {OPTION_LOWLEVEL_CALLBACK, 'g', "again", &has_errors, NULL,
+ N_("only update entries that differ from HEAD"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG,
+ NULL, 0, reupdate_callback},
+ OPT_BIT(0, "ignore-missing", &refresh_args.flags,
+ N_("ignore files missing from worktree"),
+ REFRESH_IGNORE_MISSING),
+ OPT_SET_INT(0, "verbose", &verbose,
+ N_("report actions to standard output"), 1),
+ OPT_CALLBACK_F(0, "clear-resolve-undo", NULL, NULL,
+ N_("(for porcelains) forget saved unresolved conflicts"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG,
+ resolve_undo_clear_callback),
+ OPT_INTEGER(0, "index-version", &preferred_index_format,
+ N_("write index in this format")),
+ OPT_BOOL(0, "split-index", &split_index,
+ N_("enable or disable split index")),
+ OPT_BOOL(0, "untracked-cache", &untracked_cache,
+ N_("enable/disable untracked cache")),
+ OPT_SET_INT(0, "test-untracked-cache", &untracked_cache,
+ N_("test if the filesystem supports untracked cache"), UC_TEST),
+ OPT_SET_INT(0, "force-untracked-cache", &untracked_cache,
+ N_("enable untracked cache without testing the filesystem"), UC_FORCE),
+ OPT_SET_INT(0, "force-write-index", &force_write,
+ N_("write out the index even if is not flagged as changed"), 1),
+ OPT_BOOL(0, "fsmonitor", &fsmonitor,
+ N_("enable or disable file system monitor")),
+ {OPTION_SET_INT, 0, "fsmonitor-valid", &mark_fsmonitor_only, NULL,
+ N_("mark files as fsmonitor valid"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, MARK_FLAG},
+ {OPTION_SET_INT, 0, "no-fsmonitor-valid", &mark_fsmonitor_only, NULL,
+ N_("clear fsmonitor valid bit"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, UNMARK_FLAG},
+ OPT_END()
+ };
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(update_index_usage, options);
+
+ git_config(git_default_config, NULL);
+
+ prepare_repo_settings(r);
+ the_repository->settings.command_requires_full_index = 0;
+
+ /* we will diagnose later if it turns out that we need to update it */
+ newfd = repo_hold_locked_index(the_repository, &lock_file, 0);
+ if (newfd < 0)
+ lock_error = errno;
+
+ entries = repo_read_index(the_repository);
+ if (entries < 0)
+ die("cache corrupted");
+
+ the_index.updated_skipworktree = 1;
+
+ /*
+ * Custom copy of parse_options() because we want to handle
+ * filename arguments as they come.
+ */
+ parse_options_start(&ctx, argc, argv, prefix,
+ options, PARSE_OPT_STOP_AT_NON_OPTION);
+
+ /*
+ * Allow the object layer to optimize adding multiple objects in
+ * a batch.
+ */
+ begin_odb_transaction();
+ while (ctx.argc) {
+ if (parseopt_state != PARSE_OPT_DONE)
+ parseopt_state = parse_options_step(&ctx, options,
+ update_index_usage);
+ if (!ctx.argc)
+ break;
+ switch (parseopt_state) {
+ case PARSE_OPT_HELP:
+ case PARSE_OPT_ERROR:
+ exit(129);
+ case PARSE_OPT_COMPLETE:
+ exit(0);
+ case PARSE_OPT_NON_OPTION:
+ case PARSE_OPT_DONE:
+ {
+ const char *path = ctx.argv[0];
+ char *p;
+
+ setup_work_tree();
+ p = prefix_path(prefix, prefix_length, path);
+ update_one(p);
+ if (set_executable_bit)
+ chmod_path(set_executable_bit, p);
+ free(p);
+ ctx.argc--;
+ ctx.argv++;
+ break;
+ }
+ case PARSE_OPT_UNKNOWN:
+ if (ctx.argv[0][1] == '-')
+ error("unknown option '%s'", ctx.argv[0] + 2);
+ else
+ error("unknown switch '%c'", *ctx.opt);
+ usage_with_options(update_index_usage, options);
+ }
+ }
+ argc = parse_options_end(&ctx);
+
+ getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+ if (preferred_index_format) {
+ if (preferred_index_format < INDEX_FORMAT_LB ||
+ INDEX_FORMAT_UB < preferred_index_format)
+ die("index-version %d not in range: %d..%d",
+ preferred_index_format,
+ INDEX_FORMAT_LB, INDEX_FORMAT_UB);
+
+ if (the_index.version != preferred_index_format)
+ the_index.cache_changed |= SOMETHING_CHANGED;
+ the_index.version = preferred_index_format;
+ }
+
+ if (read_from_stdin) {
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf unquoted = STRBUF_INIT;
+
+ setup_work_tree();
+ while (getline_fn(&buf, stdin) != EOF) {
+ char *p;
+ if (!nul_term_line && buf.buf[0] == '"') {
+ strbuf_reset(&unquoted);
+ if (unquote_c_style(&unquoted, buf.buf, NULL))
+ die("line is badly quoted");
+ strbuf_swap(&buf, &unquoted);
+ }
+ p = prefix_path(prefix, prefix_length, buf.buf);
+ update_one(p);
+ if (set_executable_bit)
+ chmod_path(set_executable_bit, p);
+ free(p);
+ }
+ strbuf_release(&unquoted);
+ strbuf_release(&buf);
+ }
+
+ /*
+ * By now we have added all of the new objects
+ */
+ end_odb_transaction();
+
+ if (split_index > 0) {
+ if (git_config_get_split_index() == 0)
+ warning(_("core.splitIndex is set to false; "
+ "remove or change it, if you really want to "
+ "enable split index"));
+ if (the_index.split_index)
+ the_index.cache_changed |= SPLIT_INDEX_ORDERED;
+ else
+ add_split_index(&the_index);
+ } else if (!split_index) {
+ if (git_config_get_split_index() == 1)
+ warning(_("core.splitIndex is set to true; "
+ "remove or change it, if you really want to "
+ "disable split index"));
+ remove_split_index(&the_index);
+ }
+
+ prepare_repo_settings(r);
+ switch (untracked_cache) {
+ case UC_UNSPECIFIED:
+ break;
+ case UC_DISABLE:
+ if (r->settings.core_untracked_cache == UNTRACKED_CACHE_WRITE)
+ warning(_("core.untrackedCache is set to true; "
+ "remove or change it, if you really want to "
+ "disable the untracked cache"));
+ remove_untracked_cache(&the_index);
+ report(_("Untracked cache disabled"));
+ break;
+ case UC_TEST:
+ setup_work_tree();
+ return !test_if_untracked_cache_is_supported();
+ case UC_ENABLE:
+ case UC_FORCE:
+ if (r->settings.core_untracked_cache == UNTRACKED_CACHE_REMOVE)
+ warning(_("core.untrackedCache is set to false; "
+ "remove or change it, if you really want to "
+ "enable the untracked cache"));
+ add_untracked_cache(&the_index);
+ report(_("Untracked cache enabled for '%s'"), get_git_work_tree());
+ break;
+ default:
+ BUG("bad untracked_cache value: %d", untracked_cache);
+ }
+
+ if (fsmonitor > 0) {
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(r);
+ enum fsmonitor_reason reason = fsm_settings__get_reason(r);
+
+ /*
+ * The user wants to turn on FSMonitor using the command
+ * line argument. (We don't know (or care) whether that
+ * is the IPC or HOOK version.)
+ *
+ * Use one of the __get routines to force load the FSMonitor
+ * config settings into the repo-settings. That will detect
+ * whether the file system is compatible so that we can stop
+ * here with a nice error message.
+ */
+ if (reason > FSMONITOR_REASON_OK)
+ die("%s",
+ fsm_settings__get_incompatible_msg(r, reason));
+
+ if (fsm_mode == FSMONITOR_MODE_DISABLED) {
+ warning(_("core.fsmonitor is unset; "
+ "set it if you really want to "
+ "enable fsmonitor"));
+ }
+ add_fsmonitor(&the_index);
+ report(_("fsmonitor enabled"));
+ } else if (!fsmonitor) {
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(r);
+ if (fsm_mode > FSMONITOR_MODE_DISABLED)
+ warning(_("core.fsmonitor is set; "
+ "remove it if you really want to "
+ "disable fsmonitor"));
+ remove_fsmonitor(&the_index);
+ report(_("fsmonitor disabled"));
+ }
+
+ if (the_index.cache_changed || force_write) {
+ if (newfd < 0) {
+ if (refresh_args.flags & REFRESH_QUIET)
+ exit(128);
+ unable_to_lock_die(get_index_file(), lock_error);
+ }
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+ die("Unable to write new index file");
+ }
+
+ rollback_lock_file(&lock_file);
+
+ return has_errors ? 1 : 0;
+}
diff --git a/builtin/update-ref.c b/builtin/update-ref.c
new file mode 100644
index 0000000..a84e7b4
--- /dev/null
+++ b/builtin/update-ref.c
@@ -0,0 +1,579 @@
+#include "cache.h"
+#include "config.h"
+#include "refs.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "quote.h"
+#include "strvec.h"
+
+static const char * const git_update_ref_usage[] = {
+ N_("git update-ref [<options>] -d <refname> [<old-val>]"),
+ N_("git update-ref [<options>] <refname> <new-val> [<old-val>]"),
+ N_("git update-ref [<options>] --stdin [-z]"),
+ NULL
+};
+
+static char line_termination = '\n';
+static unsigned int update_flags;
+static unsigned int default_flags;
+static unsigned create_reflog_flag;
+static const char *msg;
+
+/*
+ * Parse one whitespace- or NUL-terminated, possibly C-quoted argument
+ * and append the result to arg. Return a pointer to the terminator.
+ * Die if there is an error in how the argument is C-quoted. This
+ * function is only used if not -z.
+ */
+static const char *parse_arg(const char *next, struct strbuf *arg)
+{
+ if (*next == '"') {
+ const char *orig = next;
+
+ if (unquote_c_style(arg, next, &next))
+ die("badly quoted argument: %s", orig);
+ if (*next && !isspace(*next))
+ die("unexpected character after quoted argument: %s", orig);
+ } else {
+ while (*next && !isspace(*next))
+ strbuf_addch(arg, *next++);
+ }
+
+ return next;
+}
+
+/*
+ * Parse the reference name immediately after "command SP". If not
+ * -z, then handle C-quoting. Return a pointer to a newly allocated
+ * string containing the name of the reference, or NULL if there was
+ * an error. Update *next to point at the character that terminates
+ * the argument. Die if C-quoting is malformed or the reference name
+ * is invalid.
+ */
+static char *parse_refname(const char **next)
+{
+ struct strbuf ref = STRBUF_INIT;
+
+ if (line_termination) {
+ /* Without -z, use the next argument */
+ *next = parse_arg(*next, &ref);
+ } else {
+ /* With -z, use everything up to the next NUL */
+ strbuf_addstr(&ref, *next);
+ *next += ref.len;
+ }
+
+ if (!ref.len) {
+ strbuf_release(&ref);
+ return NULL;
+ }
+
+ if (check_refname_format(ref.buf, REFNAME_ALLOW_ONELEVEL))
+ die("invalid ref format: %s", ref.buf);
+
+ return strbuf_detach(&ref, NULL);
+}
+
+/*
+ * The value being parsed is <oldvalue> (as opposed to <newvalue>; the
+ * difference affects which error messages are generated):
+ */
+#define PARSE_SHA1_OLD 0x01
+
+/*
+ * For backwards compatibility, accept an empty string for update's
+ * <newvalue> in binary mode to be equivalent to specifying zeros.
+ */
+#define PARSE_SHA1_ALLOW_EMPTY 0x02
+
+/*
+ * Parse an argument separator followed by the next argument, if any.
+ * If there is an argument, convert it to a SHA-1, write it to sha1,
+ * set *next to point at the character terminating the argument, and
+ * return 0. If there is no argument at all (not even the empty
+ * string), return 1 and leave *next unchanged. If the value is
+ * provided but cannot be converted to a SHA-1, die. flags can
+ * include PARSE_SHA1_OLD and/or PARSE_SHA1_ALLOW_EMPTY.
+ */
+static int parse_next_oid(const char **next, const char *end,
+ struct object_id *oid,
+ const char *command, const char *refname,
+ int flags)
+{
+ struct strbuf arg = STRBUF_INIT;
+ int ret = 0;
+
+ if (*next == end)
+ goto eof;
+
+ if (line_termination) {
+ /* Without -z, consume SP and use next argument */
+ if (!**next || **next == line_termination)
+ return 1;
+ if (**next != ' ')
+ die("%s %s: expected SP but got: %s",
+ command, refname, *next);
+ (*next)++;
+ *next = parse_arg(*next, &arg);
+ if (arg.len) {
+ if (get_oid(arg.buf, oid))
+ goto invalid;
+ } else {
+ /* Without -z, an empty value means all zeros: */
+ oidclr(oid);
+ }
+ } else {
+ /* With -z, read the next NUL-terminated line */
+ if (**next)
+ die("%s %s: expected NUL but got: %s",
+ command, refname, *next);
+ (*next)++;
+ if (*next == end)
+ goto eof;
+ strbuf_addstr(&arg, *next);
+ *next += arg.len;
+
+ if (arg.len) {
+ if (get_oid(arg.buf, oid))
+ goto invalid;
+ } else if (flags & PARSE_SHA1_ALLOW_EMPTY) {
+ /* With -z, treat an empty value as all zeros: */
+ warning("%s %s: missing <newvalue>, treating as zero",
+ command, refname);
+ oidclr(oid);
+ } else {
+ /*
+ * With -z, an empty non-required value means
+ * unspecified:
+ */
+ ret = 1;
+ }
+ }
+
+ strbuf_release(&arg);
+
+ return ret;
+
+ invalid:
+ die(flags & PARSE_SHA1_OLD ?
+ "%s %s: invalid <oldvalue>: %s" :
+ "%s %s: invalid <newvalue>: %s",
+ command, refname, arg.buf);
+
+ eof:
+ die(flags & PARSE_SHA1_OLD ?
+ "%s %s: unexpected end of input when reading <oldvalue>" :
+ "%s %s: unexpected end of input when reading <newvalue>",
+ command, refname);
+}
+
+
+/*
+ * The following five parse_cmd_*() functions parse the corresponding
+ * command. In each case, next points at the character following the
+ * command name and the following space. They each return a pointer
+ * to the character terminating the command, and die with an
+ * explanatory message if there are any parsing problems. All of
+ * these functions handle either text or binary format input,
+ * depending on how line_termination is set.
+ */
+
+static void parse_cmd_update(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ struct strbuf err = STRBUF_INIT;
+ char *refname;
+ struct object_id new_oid, old_oid;
+ int have_old;
+
+ refname = parse_refname(&next);
+ if (!refname)
+ die("update: missing <ref>");
+
+ if (parse_next_oid(&next, end, &new_oid, "update", refname,
+ PARSE_SHA1_ALLOW_EMPTY))
+ die("update %s: missing <newvalue>", refname);
+
+ have_old = !parse_next_oid(&next, end, &old_oid, "update", refname,
+ PARSE_SHA1_OLD);
+
+ if (*next != line_termination)
+ die("update %s: extra input: %s", refname, next);
+
+ if (ref_transaction_update(transaction, refname,
+ &new_oid, have_old ? &old_oid : NULL,
+ update_flags | create_reflog_flag,
+ msg, &err))
+ die("%s", err.buf);
+
+ update_flags = default_flags;
+ free(refname);
+ strbuf_release(&err);
+}
+
+static void parse_cmd_create(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ struct strbuf err = STRBUF_INIT;
+ char *refname;
+ struct object_id new_oid;
+
+ refname = parse_refname(&next);
+ if (!refname)
+ die("create: missing <ref>");
+
+ if (parse_next_oid(&next, end, &new_oid, "create", refname, 0))
+ die("create %s: missing <newvalue>", refname);
+
+ if (is_null_oid(&new_oid))
+ die("create %s: zero <newvalue>", refname);
+
+ if (*next != line_termination)
+ die("create %s: extra input: %s", refname, next);
+
+ if (ref_transaction_create(transaction, refname, &new_oid,
+ update_flags | create_reflog_flag,
+ msg, &err))
+ die("%s", err.buf);
+
+ update_flags = default_flags;
+ free(refname);
+ strbuf_release(&err);
+}
+
+static void parse_cmd_delete(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ struct strbuf err = STRBUF_INIT;
+ char *refname;
+ struct object_id old_oid;
+ int have_old;
+
+ refname = parse_refname(&next);
+ if (!refname)
+ die("delete: missing <ref>");
+
+ if (parse_next_oid(&next, end, &old_oid, "delete", refname,
+ PARSE_SHA1_OLD)) {
+ have_old = 0;
+ } else {
+ if (is_null_oid(&old_oid))
+ die("delete %s: zero <oldvalue>", refname);
+ have_old = 1;
+ }
+
+ if (*next != line_termination)
+ die("delete %s: extra input: %s", refname, next);
+
+ if (ref_transaction_delete(transaction, refname,
+ have_old ? &old_oid : NULL,
+ update_flags, msg, &err))
+ die("%s", err.buf);
+
+ update_flags = default_flags;
+ free(refname);
+ strbuf_release(&err);
+}
+
+static void parse_cmd_verify(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ struct strbuf err = STRBUF_INIT;
+ char *refname;
+ struct object_id old_oid;
+
+ refname = parse_refname(&next);
+ if (!refname)
+ die("verify: missing <ref>");
+
+ if (parse_next_oid(&next, end, &old_oid, "verify", refname,
+ PARSE_SHA1_OLD))
+ oidclr(&old_oid);
+
+ if (*next != line_termination)
+ die("verify %s: extra input: %s", refname, next);
+
+ if (ref_transaction_verify(transaction, refname, &old_oid,
+ update_flags, &err))
+ die("%s", err.buf);
+
+ update_flags = default_flags;
+ free(refname);
+ strbuf_release(&err);
+}
+
+static void report_ok(const char *command)
+{
+ fprintf(stdout, "%s: ok\n", command);
+ fflush(stdout);
+}
+
+static void parse_cmd_option(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ const char *rest;
+ if (skip_prefix(next, "no-deref", &rest) && *rest == line_termination)
+ update_flags |= REF_NO_DEREF;
+ else
+ die("option unknown: %s", next);
+}
+
+static void parse_cmd_start(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ if (*next != line_termination)
+ die("start: extra input: %s", next);
+ report_ok("start");
+}
+
+static void parse_cmd_prepare(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ struct strbuf error = STRBUF_INIT;
+ if (*next != line_termination)
+ die("prepare: extra input: %s", next);
+ if (ref_transaction_prepare(transaction, &error))
+ die("prepare: %s", error.buf);
+ report_ok("prepare");
+}
+
+static void parse_cmd_abort(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ struct strbuf error = STRBUF_INIT;
+ if (*next != line_termination)
+ die("abort: extra input: %s", next);
+ if (ref_transaction_abort(transaction, &error))
+ die("abort: %s", error.buf);
+ report_ok("abort");
+}
+
+static void parse_cmd_commit(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ struct strbuf error = STRBUF_INIT;
+ if (*next != line_termination)
+ die("commit: extra input: %s", next);
+ if (ref_transaction_commit(transaction, &error))
+ die("commit: %s", error.buf);
+ report_ok("commit");
+ ref_transaction_free(transaction);
+}
+
+enum update_refs_state {
+ /* Non-transactional state open for updates. */
+ UPDATE_REFS_OPEN,
+ /* A transaction has been started. */
+ UPDATE_REFS_STARTED,
+ /* References are locked and ready for commit */
+ UPDATE_REFS_PREPARED,
+ /* Transaction has been committed or closed. */
+ UPDATE_REFS_CLOSED,
+};
+
+static const struct parse_cmd {
+ const char *prefix;
+ void (*fn)(struct ref_transaction *, const char *, const char *);
+ unsigned args;
+ enum update_refs_state state;
+} command[] = {
+ { "update", parse_cmd_update, 3, UPDATE_REFS_OPEN },
+ { "create", parse_cmd_create, 2, UPDATE_REFS_OPEN },
+ { "delete", parse_cmd_delete, 2, UPDATE_REFS_OPEN },
+ { "verify", parse_cmd_verify, 2, UPDATE_REFS_OPEN },
+ { "option", parse_cmd_option, 1, UPDATE_REFS_OPEN },
+ { "start", parse_cmd_start, 0, UPDATE_REFS_STARTED },
+ { "prepare", parse_cmd_prepare, 0, UPDATE_REFS_PREPARED },
+ { "abort", parse_cmd_abort, 0, UPDATE_REFS_CLOSED },
+ { "commit", parse_cmd_commit, 0, UPDATE_REFS_CLOSED },
+};
+
+static void update_refs_stdin(void)
+{
+ struct strbuf input = STRBUF_INIT, err = STRBUF_INIT;
+ enum update_refs_state state = UPDATE_REFS_OPEN;
+ struct ref_transaction *transaction;
+ int i, j;
+
+ transaction = ref_transaction_begin(&err);
+ if (!transaction)
+ die("%s", err.buf);
+
+ /* Read each line dispatch its command */
+ while (!strbuf_getwholeline(&input, stdin, line_termination)) {
+ const struct parse_cmd *cmd = NULL;
+
+ if (*input.buf == line_termination)
+ die("empty command in input");
+ else if (isspace(*input.buf))
+ die("whitespace before command: %s", input.buf);
+
+ for (i = 0; i < ARRAY_SIZE(command); i++) {
+ const char *prefix = command[i].prefix;
+ char c;
+
+ if (!starts_with(input.buf, prefix))
+ continue;
+
+ /*
+ * If the command has arguments, verify that it's
+ * followed by a space. Otherwise, it shall be followed
+ * by a line terminator.
+ */
+ c = command[i].args ? ' ' : line_termination;
+ if (input.buf[strlen(prefix)] != c)
+ continue;
+
+ cmd = &command[i];
+ break;
+ }
+ if (!cmd)
+ die("unknown command: %s", input.buf);
+
+ /*
+ * Read additional arguments if NUL-terminated. Do not raise an
+ * error in case there is an early EOF to let the command
+ * handle missing arguments with a proper error message.
+ */
+ for (j = 1; line_termination == '\0' && j < cmd->args; j++)
+ if (strbuf_appendwholeline(&input, stdin, line_termination))
+ break;
+
+ switch (state) {
+ case UPDATE_REFS_OPEN:
+ case UPDATE_REFS_STARTED:
+ if (state == UPDATE_REFS_STARTED && cmd->state == UPDATE_REFS_STARTED)
+ die("cannot restart ongoing transaction");
+ /* Do not downgrade a transaction to a non-transaction. */
+ if (cmd->state >= state)
+ state = cmd->state;
+ break;
+ case UPDATE_REFS_PREPARED:
+ if (cmd->state != UPDATE_REFS_CLOSED)
+ die("prepared transactions can only be closed");
+ state = cmd->state;
+ break;
+ case UPDATE_REFS_CLOSED:
+ if (cmd->state != UPDATE_REFS_STARTED)
+ die("transaction is closed");
+
+ /*
+ * Open a new transaction if we're currently closed and
+ * get a "start".
+ */
+ state = cmd->state;
+ transaction = ref_transaction_begin(&err);
+ if (!transaction)
+ die("%s", err.buf);
+
+ break;
+ }
+
+ cmd->fn(transaction, input.buf + strlen(cmd->prefix) + !!cmd->args,
+ input.buf + input.len);
+ }
+
+ switch (state) {
+ case UPDATE_REFS_OPEN:
+ /* Commit by default if no transaction was requested. */
+ if (ref_transaction_commit(transaction, &err))
+ die("%s", err.buf);
+ ref_transaction_free(transaction);
+ break;
+ case UPDATE_REFS_STARTED:
+ case UPDATE_REFS_PREPARED:
+ /* If using a transaction, we want to abort it. */
+ if (ref_transaction_abort(transaction, &err))
+ die("%s", err.buf);
+ break;
+ case UPDATE_REFS_CLOSED:
+ /* Otherwise no need to do anything, the transaction was closed already. */
+ break;
+ }
+
+ strbuf_release(&err);
+ strbuf_release(&input);
+}
+
+int cmd_update_ref(int argc, const char **argv, const char *prefix)
+{
+ const char *refname, *oldval;
+ struct object_id oid, oldoid;
+ int delete = 0, no_deref = 0, read_stdin = 0, end_null = 0;
+ int create_reflog = 0;
+ struct option options[] = {
+ OPT_STRING( 'm', NULL, &msg, N_("reason"), N_("reason of the update")),
+ OPT_BOOL('d', NULL, &delete, N_("delete the reference")),
+ OPT_BOOL( 0 , "no-deref", &no_deref,
+ N_("update <refname> not the one it points to")),
+ OPT_BOOL('z', NULL, &end_null, N_("stdin has NUL-terminated arguments")),
+ OPT_BOOL( 0 , "stdin", &read_stdin, N_("read updates from stdin")),
+ OPT_BOOL( 0 , "create-reflog", &create_reflog, N_("create a reflog")),
+ OPT_END(),
+ };
+
+ git_config(git_default_config, NULL);
+ argc = parse_options(argc, argv, prefix, options, git_update_ref_usage,
+ 0);
+ if (msg && !*msg)
+ die("Refusing to perform update with empty message.");
+
+ create_reflog_flag = create_reflog ? REF_FORCE_CREATE_REFLOG : 0;
+
+ if (no_deref) {
+ default_flags = REF_NO_DEREF;
+ update_flags = default_flags;
+ }
+
+ if (read_stdin) {
+ if (delete || argc > 0)
+ usage_with_options(git_update_ref_usage, options);
+ if (end_null)
+ line_termination = '\0';
+ update_refs_stdin();
+ return 0;
+ }
+
+ if (end_null)
+ usage_with_options(git_update_ref_usage, options);
+
+ if (delete) {
+ if (argc < 1 || argc > 2)
+ usage_with_options(git_update_ref_usage, options);
+ refname = argv[0];
+ oldval = argv[1];
+ } else {
+ const char *value;
+ if (argc < 2 || argc > 3)
+ usage_with_options(git_update_ref_usage, options);
+ refname = argv[0];
+ value = argv[1];
+ oldval = argv[2];
+ if (get_oid(value, &oid))
+ die("%s: not a valid SHA1", value);
+ }
+
+ if (oldval) {
+ if (!*oldval)
+ /*
+ * The empty string implies that the reference
+ * must not already exist:
+ */
+ oidclr(&oldoid);
+ else if (get_oid(oldval, &oldoid))
+ die("%s: not a valid old SHA1", oldval);
+ }
+
+ if (delete)
+ /*
+ * For purposes of backwards compatibility, we treat
+ * NULL_SHA1 as "don't care" here:
+ */
+ return delete_ref(msg, refname,
+ (oldval && !is_null_oid(&oldoid)) ? &oldoid : NULL,
+ default_flags);
+ else
+ return update_ref(msg, refname, &oid, oldval ? &oldoid : NULL,
+ default_flags | create_reflog_flag,
+ UPDATE_REFS_DIE_ON_ERR);
+}
diff --git a/builtin/update-server-info.c b/builtin/update-server-info.c
new file mode 100644
index 0000000..d2239c9
--- /dev/null
+++ b/builtin/update-server-info.c
@@ -0,0 +1,26 @@
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "parse-options.h"
+
+static const char * const update_server_info_usage[] = {
+ "git update-server-info [-f | --force]",
+ NULL
+};
+
+int cmd_update_server_info(int argc, const char **argv, const char *prefix)
+{
+ int force = 0;
+ struct option options[] = {
+ OPT__FORCE(&force, N_("update the info files from scratch"), 0),
+ OPT_END()
+ };
+
+ git_config(git_default_config, NULL);
+ argc = parse_options(argc, argv, prefix, options,
+ update_server_info_usage, 0);
+ if (argc > 0)
+ usage_with_options(update_server_info_usage, options);
+
+ return !!update_server_info(force);
+}
diff --git a/builtin/upload-archive.c b/builtin/upload-archive.c
new file mode 100644
index 0000000..945ee2b
--- /dev/null
+++ b/builtin/upload-archive.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2006 Franck Bui-Huu
+ */
+#include "cache.h"
+#include "builtin.h"
+#include "archive.h"
+#include "pkt-line.h"
+#include "sideband.h"
+#include "run-command.h"
+#include "strvec.h"
+
+static const char upload_archive_usage[] =
+ "git upload-archive <repository>";
+
+static const char deadchild[] =
+"git upload-archive: archiver died with error";
+
+#define MAX_ARGS (64)
+
+int cmd_upload_archive_writer(int argc, const char **argv, const char *prefix)
+{
+ struct strvec sent_argv = STRVEC_INIT;
+ const char *arg_cmd = "argument ";
+
+ if (argc != 2 || !strcmp(argv[1], "-h"))
+ usage(upload_archive_usage);
+
+ if (!enter_repo(argv[1], 0))
+ die("'%s' does not appear to be a git repository", argv[1]);
+
+ init_archivers();
+
+ /* put received options in sent_argv[] */
+ strvec_push(&sent_argv, "git-upload-archive");
+ for (;;) {
+ char *buf = packet_read_line(0, NULL);
+ if (!buf)
+ break; /* got a flush */
+ if (sent_argv.nr > MAX_ARGS)
+ die("Too many options (>%d)", MAX_ARGS - 1);
+
+ if (!starts_with(buf, arg_cmd))
+ die("'argument' token or flush expected");
+ strvec_push(&sent_argv, buf + strlen(arg_cmd));
+ }
+
+ /* parse all options sent by the client */
+ return write_archive(sent_argv.nr, sent_argv.v, prefix,
+ the_repository, NULL, 1);
+}
+
+__attribute__((format (printf, 1, 2)))
+static void error_clnt(const char *fmt, ...)
+{
+ struct strbuf buf = STRBUF_INIT;
+ va_list params;
+
+ va_start(params, fmt);
+ strbuf_vaddf(&buf, fmt, params);
+ va_end(params);
+ send_sideband(1, 3, buf.buf, buf.len, LARGE_PACKET_MAX);
+ die("sent error to the client: %s", buf.buf);
+}
+
+static ssize_t process_input(int child_fd, int band)
+{
+ char buf[16384];
+ ssize_t sz = read(child_fd, buf, sizeof(buf));
+ if (sz < 0) {
+ if (errno != EAGAIN && errno != EINTR)
+ error_clnt("read error: %s\n", strerror(errno));
+ return sz;
+ }
+ send_sideband(1, band, buf, sz, LARGE_PACKET_MAX);
+ return sz;
+}
+
+int cmd_upload_archive(int argc, const char **argv, const char *prefix)
+{
+ struct child_process writer = CHILD_PROCESS_INIT;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(upload_archive_usage);
+
+ /*
+ * Set up sideband subprocess.
+ *
+ * We (parent) monitor and read from child, sending its fd#1 and fd#2
+ * multiplexed out to our fd#1. If the child dies, we tell the other
+ * end over channel #3.
+ */
+ writer.out = writer.err = -1;
+ writer.git_cmd = 1;
+ strvec_push(&writer.args, "upload-archive--writer");
+ strvec_pushv(&writer.args, argv + 1);
+ if (start_command(&writer)) {
+ int err = errno;
+ packet_write_fmt(1, "NACK unable to spawn subprocess\n");
+ die("upload-archive: %s", strerror(err));
+ }
+
+ packet_write_fmt(1, "ACK\n");
+ packet_flush(1);
+
+ while (1) {
+ struct pollfd pfd[2];
+
+ pfd[0].fd = writer.out;
+ pfd[0].events = POLLIN;
+ pfd[1].fd = writer.err;
+ pfd[1].events = POLLIN;
+ if (poll(pfd, 2, -1) < 0) {
+ if (errno != EINTR) {
+ error_errno("poll failed resuming");
+ sleep(1);
+ }
+ continue;
+ }
+ if (pfd[1].revents & POLLIN)
+ /* Status stream ready */
+ if (process_input(pfd[1].fd, 2))
+ continue;
+ if (pfd[0].revents & POLLIN)
+ /* Data stream ready */
+ if (process_input(pfd[0].fd, 1))
+ continue;
+
+ if (finish_command(&writer))
+ error_clnt("%s", deadchild);
+ packet_flush(1);
+ break;
+ }
+ return 0;
+}
diff --git a/builtin/upload-pack.c b/builtin/upload-pack.c
new file mode 100644
index 0000000..25b69da
--- /dev/null
+++ b/builtin/upload-pack.c
@@ -0,0 +1,75 @@
+#include "cache.h"
+#include "builtin.h"
+#include "exec-cmd.h"
+#include "pkt-line.h"
+#include "parse-options.h"
+#include "protocol.h"
+#include "upload-pack.h"
+#include "serve.h"
+
+static const char * const upload_pack_usage[] = {
+ N_("git-upload-pack [--[no-]strict] [--timeout=<n>] [--stateless-rpc]\n"
+ " [--advertise-refs] <directory>"),
+ NULL
+};
+
+int cmd_upload_pack(int argc, const char **argv, const char *prefix)
+{
+ const char *dir;
+ int strict = 0;
+ int advertise_refs = 0;
+ int stateless_rpc = 0;
+ int timeout = 0;
+ struct option options[] = {
+ OPT_BOOL(0, "stateless-rpc", &stateless_rpc,
+ N_("quit after a single request/response exchange")),
+ OPT_HIDDEN_BOOL(0, "http-backend-info-refs", &advertise_refs,
+ N_("serve up the info/refs for git-http-backend")),
+ OPT_ALIAS(0, "advertise-refs", "http-backend-info-refs"),
+ OPT_BOOL(0, "strict", &strict,
+ N_("do not try <directory>/.git/ if <directory> is no Git directory")),
+ OPT_INTEGER(0, "timeout", &timeout,
+ N_("interrupt transfer after <n> seconds of inactivity")),
+ OPT_END()
+ };
+
+ packet_trace_identity("upload-pack");
+ read_replace_refs = 0;
+
+ argc = parse_options(argc, argv, prefix, options, upload_pack_usage, 0);
+
+ if (argc != 1)
+ usage_with_options(upload_pack_usage, options);
+
+ setup_path();
+
+ dir = argv[0];
+
+ if (!enter_repo(dir, strict))
+ die("'%s' does not appear to be a git repository", dir);
+
+ switch (determine_protocol_version_server()) {
+ case protocol_v2:
+ if (advertise_refs)
+ protocol_v2_advertise_capabilities();
+ else
+ protocol_v2_serve_loop(stateless_rpc);
+ break;
+ case protocol_v1:
+ /*
+ * v1 is just the original protocol with a version string,
+ * so just fall through after writing the version string.
+ */
+ if (advertise_refs || !stateless_rpc)
+ packet_write_fmt(1, "version 1\n");
+
+ /* fallthrough */
+ case protocol_v0:
+ upload_pack(advertise_refs, stateless_rpc, timeout);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
+
+ return 0;
+}
diff --git a/builtin/var.c b/builtin/var.c
new file mode 100644
index 0000000..491db27
--- /dev/null
+++ b/builtin/var.c
@@ -0,0 +1,101 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Eric Biederman, 2005
+ */
+#include "builtin.h"
+#include "config.h"
+#include "refs.h"
+
+static const char var_usage[] = "git var (-l | <variable>)";
+
+static const char *editor(int flag)
+{
+ const char *pgm = git_editor();
+
+ if (!pgm && flag & IDENT_STRICT)
+ die("Terminal is dumb, but EDITOR unset");
+
+ return pgm;
+}
+
+static const char *pager(int flag)
+{
+ const char *pgm = git_pager(1);
+
+ if (!pgm)
+ pgm = "cat";
+ return pgm;
+}
+
+static const char *default_branch(int flag)
+{
+ return git_default_branch_name(1);
+}
+
+struct git_var {
+ const char *name;
+ const char *(*read)(int);
+};
+static struct git_var git_vars[] = {
+ { "GIT_COMMITTER_IDENT", git_committer_info },
+ { "GIT_AUTHOR_IDENT", git_author_info },
+ { "GIT_EDITOR", editor },
+ { "GIT_PAGER", pager },
+ { "GIT_DEFAULT_BRANCH", default_branch },
+ { "", NULL },
+};
+
+static void list_vars(void)
+{
+ struct git_var *ptr;
+ const char *val;
+
+ for (ptr = git_vars; ptr->read; ptr++)
+ if ((val = ptr->read(0)))
+ printf("%s=%s\n", ptr->name, val);
+}
+
+static const char *read_var(const char *var)
+{
+ struct git_var *ptr;
+ const char *val;
+ val = NULL;
+ for (ptr = git_vars; ptr->read; ptr++) {
+ if (strcmp(var, ptr->name) == 0) {
+ val = ptr->read(IDENT_STRICT);
+ break;
+ }
+ }
+ return val;
+}
+
+static int show_config(const char *var, const char *value, void *cb)
+{
+ if (value)
+ printf("%s=%s\n", var, value);
+ else
+ printf("%s\n", var);
+ return git_default_config(var, value, cb);
+}
+
+int cmd_var(int argc, const char **argv, const char *prefix)
+{
+ const char *val = NULL;
+ if (argc != 2)
+ usage(var_usage);
+
+ if (strcmp(argv[1], "-l") == 0) {
+ git_config(show_config, NULL);
+ list_vars();
+ return 0;
+ }
+ git_config(git_default_config, NULL);
+ val = read_var(argv[1]);
+ if (!val)
+ usage(var_usage);
+
+ printf("%s\n", val);
+
+ return 0;
+}
diff --git a/builtin/verify-commit.c b/builtin/verify-commit.c
new file mode 100644
index 0000000..3ebad32
--- /dev/null
+++ b/builtin/verify-commit.c
@@ -0,0 +1,90 @@
+/*
+ * Builtin "git commit-commit"
+ *
+ * Copyright (c) 2014 Michael J Gruber <git@drmicha.warpmail.net>
+ *
+ * Based on git-verify-tag
+ */
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "object-store.h"
+#include "repository.h"
+#include "commit.h"
+#include "run-command.h"
+#include "parse-options.h"
+#include "gpg-interface.h"
+
+static const char * const verify_commit_usage[] = {
+ N_("git verify-commit [-v | --verbose] [--raw] <commit>..."),
+ NULL
+};
+
+static int run_gpg_verify(struct commit *commit, unsigned flags)
+{
+ struct signature_check signature_check;
+ int ret;
+
+ memset(&signature_check, 0, sizeof(signature_check));
+
+ ret = check_commit_signature(commit, &signature_check);
+ print_signature_buffer(&signature_check, flags);
+
+ signature_check_clear(&signature_check);
+ return ret;
+}
+
+static int verify_commit(const char *name, unsigned flags)
+{
+ struct object_id oid;
+ struct object *obj;
+
+ if (get_oid(name, &oid))
+ return error("commit '%s' not found.", name);
+
+ obj = parse_object(the_repository, &oid);
+ if (!obj)
+ return error("%s: unable to read file.", name);
+ if (obj->type != OBJ_COMMIT)
+ return error("%s: cannot verify a non-commit object of type %s.",
+ name, type_name(obj->type));
+
+ return run_gpg_verify((struct commit *)obj, flags);
+}
+
+static int git_verify_commit_config(const char *var, const char *value, void *cb)
+{
+ int status = git_gpg_config(var, value, cb);
+ if (status)
+ return status;
+ return git_default_config(var, value, cb);
+}
+
+int cmd_verify_commit(int argc, const char **argv, const char *prefix)
+{
+ int i = 1, verbose = 0, had_error = 0;
+ unsigned flags = 0;
+ const struct option verify_commit_options[] = {
+ OPT__VERBOSE(&verbose, N_("print commit contents")),
+ OPT_BIT(0, "raw", &flags, N_("print raw gpg status output"), GPG_VERIFY_RAW),
+ OPT_END()
+ };
+
+ git_config(git_verify_commit_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, verify_commit_options,
+ verify_commit_usage, PARSE_OPT_KEEP_ARGV0);
+ if (argc <= i)
+ usage_with_options(verify_commit_usage, verify_commit_options);
+
+ if (verbose)
+ flags |= GPG_VERIFY_VERBOSE;
+
+ /* sometimes the program was terminated because this signal
+ * was received in the process of writing the gpg input: */
+ signal(SIGPIPE, SIG_IGN);
+ while (i < argc)
+ if (verify_commit(argv[i++], flags))
+ had_error = 1;
+ return had_error;
+}
diff --git a/builtin/verify-pack.c b/builtin/verify-pack.c
new file mode 100644
index 0000000..27d6f75
--- /dev/null
+++ b/builtin/verify-pack.c
@@ -0,0 +1,90 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "run-command.h"
+#include "parse-options.h"
+
+#define VERIFY_PACK_VERBOSE 01
+#define VERIFY_PACK_STAT_ONLY 02
+
+static int verify_one_pack(const char *path, unsigned int flags, const char *hash_algo)
+{
+ struct child_process index_pack = CHILD_PROCESS_INIT;
+ struct strvec *argv = &index_pack.args;
+ struct strbuf arg = STRBUF_INIT;
+ int verbose = flags & VERIFY_PACK_VERBOSE;
+ int stat_only = flags & VERIFY_PACK_STAT_ONLY;
+ int err;
+
+ strvec_push(argv, "index-pack");
+
+ if (stat_only)
+ strvec_push(argv, "--verify-stat-only");
+ else if (verbose)
+ strvec_push(argv, "--verify-stat");
+ else
+ strvec_push(argv, "--verify");
+
+ if (hash_algo)
+ strvec_pushf(argv, "--object-format=%s", hash_algo);
+
+ /*
+ * In addition to "foo.pack" we accept "foo.idx" and "foo";
+ * normalize these forms to "foo.pack" for "index-pack --verify".
+ */
+ strbuf_addstr(&arg, path);
+ if (strbuf_strip_suffix(&arg, ".idx") ||
+ !ends_with(arg.buf, ".pack"))
+ strbuf_addstr(&arg, ".pack");
+ strvec_push(argv, arg.buf);
+
+ index_pack.git_cmd = 1;
+
+ err = run_command(&index_pack);
+
+ if (verbose || stat_only) {
+ if (err)
+ printf("%s: bad\n", arg.buf);
+ else {
+ if (!stat_only)
+ printf("%s: ok\n", arg.buf);
+ }
+ }
+ strbuf_release(&arg);
+
+ return err;
+}
+
+static const char * const verify_pack_usage[] = {
+ N_("git verify-pack [-v | --verbose] [-s | --stat-only] [--] <pack>.idx..."),
+ NULL
+};
+
+int cmd_verify_pack(int argc, const char **argv, const char *prefix)
+{
+ int err = 0;
+ unsigned int flags = 0;
+ const char *object_format = NULL;
+ int i;
+ const struct option verify_pack_options[] = {
+ OPT_BIT('v', "verbose", &flags, N_("verbose"),
+ VERIFY_PACK_VERBOSE),
+ OPT_BIT('s', "stat-only", &flags, N_("show statistics only"),
+ VERIFY_PACK_STAT_ONLY),
+ OPT_STRING(0, "object-format", &object_format, N_("hash"),
+ N_("specify the hash algorithm to use")),
+ OPT_END()
+ };
+
+ git_config(git_default_config, NULL);
+ argc = parse_options(argc, argv, prefix, verify_pack_options,
+ verify_pack_usage, 0);
+ if (argc < 1)
+ usage_with_options(verify_pack_usage, verify_pack_options);
+ for (i = 0; i < argc; i++) {
+ if (verify_one_pack(argv[i], flags, object_format))
+ err = 1;
+ }
+
+ return err;
+}
diff --git a/builtin/verify-tag.c b/builtin/verify-tag.c
new file mode 100644
index 0000000..2175669
--- /dev/null
+++ b/builtin/verify-tag.c
@@ -0,0 +1,77 @@
+/*
+ * Builtin "git verify-tag"
+ *
+ * Copyright (c) 2007 Carlos Rica <jasampler@gmail.com>
+ *
+ * Based on git-verify-tag.sh
+ */
+#include "cache.h"
+#include "config.h"
+#include "builtin.h"
+#include "tag.h"
+#include "run-command.h"
+#include "parse-options.h"
+#include "gpg-interface.h"
+#include "ref-filter.h"
+
+static const char * const verify_tag_usage[] = {
+ N_("git verify-tag [-v | --verbose] [--format=<format>] [--raw] <tag>..."),
+ NULL
+};
+
+static int git_verify_tag_config(const char *var, const char *value, void *cb)
+{
+ int status = git_gpg_config(var, value, cb);
+ if (status)
+ return status;
+ return git_default_config(var, value, cb);
+}
+
+int cmd_verify_tag(int argc, const char **argv, const char *prefix)
+{
+ int i = 1, verbose = 0, had_error = 0;
+ unsigned flags = 0;
+ struct ref_format format = REF_FORMAT_INIT;
+ const struct option verify_tag_options[] = {
+ OPT__VERBOSE(&verbose, N_("print tag contents")),
+ OPT_BIT(0, "raw", &flags, N_("print raw gpg status output"), GPG_VERIFY_RAW),
+ OPT_STRING(0, "format", &format.format, N_("format"), N_("format to use for the output")),
+ OPT_END()
+ };
+
+ git_config(git_verify_tag_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, verify_tag_options,
+ verify_tag_usage, PARSE_OPT_KEEP_ARGV0);
+ if (argc <= i)
+ usage_with_options(verify_tag_usage, verify_tag_options);
+
+ if (verbose)
+ flags |= GPG_VERIFY_VERBOSE;
+
+ if (format.format) {
+ if (verify_ref_format(&format))
+ usage_with_options(verify_tag_usage,
+ verify_tag_options);
+ flags |= GPG_VERIFY_OMIT_STATUS;
+ }
+
+ while (i < argc) {
+ struct object_id oid;
+ const char *name = argv[i++];
+
+ if (get_oid(name, &oid)) {
+ had_error = !!error("tag '%s' not found.", name);
+ continue;
+ }
+
+ if (gpg_verify_tag(&oid, name, flags)) {
+ had_error = 1;
+ continue;
+ }
+
+ if (format.format)
+ pretty_print_ref(name, &oid, &format);
+ }
+ return had_error;
+}
diff --git a/builtin/worktree.c b/builtin/worktree.c
new file mode 100644
index 0000000..4a24d53
--- /dev/null
+++ b/builtin/worktree.c
@@ -0,0 +1,1195 @@
+#include "cache.h"
+#include "checkout.h"
+#include "config.h"
+#include "builtin.h"
+#include "dir.h"
+#include "parse-options.h"
+#include "strvec.h"
+#include "branch.h"
+#include "refs.h"
+#include "run-command.h"
+#include "hook.h"
+#include "sigchain.h"
+#include "submodule.h"
+#include "utf8.h"
+#include "worktree.h"
+#include "quote.h"
+
+#define BUILTIN_WORKTREE_ADD_USAGE \
+ N_("git worktree add [-f] [--detach] [--checkout] [--lock [--reason <string>]]\n" \
+ " [-b <new-branch>] <path> [<commit-ish>]")
+#define BUILTIN_WORKTREE_LIST_USAGE \
+ N_("git worktree list [-v | --porcelain [-z]]")
+#define BUILTIN_WORKTREE_LOCK_USAGE \
+ N_("git worktree lock [--reason <string>] <worktree>")
+#define BUILTIN_WORKTREE_MOVE_USAGE \
+ N_("git worktree move <worktree> <new-path>")
+#define BUILTIN_WORKTREE_PRUNE_USAGE \
+ N_("git worktree prune [-n] [-v] [--expire <expire>]")
+#define BUILTIN_WORKTREE_REMOVE_USAGE \
+ N_("git worktree remove [-f] <worktree>")
+#define BUILTIN_WORKTREE_REPAIR_USAGE \
+ N_("git worktree repair [<path>...]")
+#define BUILTIN_WORKTREE_UNLOCK_USAGE \
+ N_("git worktree unlock <worktree>")
+
+static const char * const git_worktree_usage[] = {
+ BUILTIN_WORKTREE_ADD_USAGE,
+ BUILTIN_WORKTREE_LIST_USAGE,
+ BUILTIN_WORKTREE_LOCK_USAGE,
+ BUILTIN_WORKTREE_MOVE_USAGE,
+ BUILTIN_WORKTREE_PRUNE_USAGE,
+ BUILTIN_WORKTREE_REMOVE_USAGE,
+ BUILTIN_WORKTREE_REPAIR_USAGE,
+ BUILTIN_WORKTREE_UNLOCK_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_add_usage[] = {
+ BUILTIN_WORKTREE_ADD_USAGE,
+ NULL,
+};
+
+static const char * const git_worktree_list_usage[] = {
+ BUILTIN_WORKTREE_LIST_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_lock_usage[] = {
+ BUILTIN_WORKTREE_LOCK_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_move_usage[] = {
+ BUILTIN_WORKTREE_MOVE_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_prune_usage[] = {
+ BUILTIN_WORKTREE_PRUNE_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_remove_usage[] = {
+ BUILTIN_WORKTREE_REMOVE_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_repair_usage[] = {
+ BUILTIN_WORKTREE_REPAIR_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_unlock_usage[] = {
+ BUILTIN_WORKTREE_UNLOCK_USAGE,
+ NULL
+};
+
+struct add_opts {
+ int force;
+ int detach;
+ int quiet;
+ int checkout;
+ const char *keep_locked;
+};
+
+static int show_only;
+static int verbose;
+static int guess_remote;
+static timestamp_t expire;
+
+static int git_worktree_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "worktree.guessremote")) {
+ guess_remote = git_config_bool(var, value);
+ return 0;
+ }
+
+ return git_default_config(var, value, cb);
+}
+
+static int delete_git_dir(const char *id)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int ret;
+
+ strbuf_addstr(&sb, git_common_path("worktrees/%s", id));
+ ret = remove_dir_recursively(&sb, 0);
+ if (ret < 0 && errno == ENOTDIR)
+ ret = unlink(sb.buf);
+ if (ret)
+ error_errno(_("failed to delete '%s'"), sb.buf);
+ strbuf_release(&sb);
+ return ret;
+}
+
+static void delete_worktrees_dir_if_empty(void)
+{
+ rmdir(git_path("worktrees")); /* ignore failed removal */
+}
+
+static void prune_worktree(const char *id, const char *reason)
+{
+ if (show_only || verbose)
+ fprintf_ln(stderr, _("Removing %s/%s: %s"), "worktrees", id, reason);
+ if (!show_only)
+ delete_git_dir(id);
+}
+
+static int prune_cmp(const void *a, const void *b)
+{
+ const struct string_list_item *x = a;
+ const struct string_list_item *y = b;
+ int c;
+
+ if ((c = fspathcmp(x->string, y->string)))
+ return c;
+ /*
+ * paths same; prune_dupes() removes all but the first worktree entry
+ * having the same path, so sort main worktree ('util' is NULL) above
+ * linked worktrees ('util' not NULL) since main worktree can't be
+ * removed
+ */
+ if (!x->util)
+ return -1;
+ if (!y->util)
+ return 1;
+ /* paths same; sort by .git/worktrees/<id> */
+ return strcmp(x->util, y->util);
+}
+
+static void prune_dups(struct string_list *l)
+{
+ int i;
+
+ QSORT(l->items, l->nr, prune_cmp);
+ for (i = 1; i < l->nr; i++) {
+ if (!fspathcmp(l->items[i].string, l->items[i - 1].string))
+ prune_worktree(l->items[i].util, "duplicate entry");
+ }
+}
+
+static void prune_worktrees(void)
+{
+ struct strbuf reason = STRBUF_INIT;
+ struct strbuf main_path = STRBUF_INIT;
+ struct string_list kept = STRING_LIST_INIT_NODUP;
+ DIR *dir = opendir(git_path("worktrees"));
+ struct dirent *d;
+ if (!dir)
+ return;
+ while ((d = readdir_skip_dot_and_dotdot(dir)) != NULL) {
+ char *path;
+ strbuf_reset(&reason);
+ if (should_prune_worktree(d->d_name, &reason, &path, expire))
+ prune_worktree(d->d_name, reason.buf);
+ else if (path)
+ string_list_append(&kept, path)->util = xstrdup(d->d_name);
+ }
+ closedir(dir);
+
+ strbuf_add_absolute_path(&main_path, get_git_common_dir());
+ /* massage main worktree absolute path to match 'gitdir' content */
+ strbuf_strip_suffix(&main_path, "/.");
+ string_list_append(&kept, strbuf_detach(&main_path, NULL));
+ prune_dups(&kept);
+ string_list_clear(&kept, 1);
+
+ if (!show_only)
+ delete_worktrees_dir_if_empty();
+ strbuf_release(&reason);
+}
+
+static int prune(int ac, const char **av, const char *prefix)
+{
+ struct option options[] = {
+ OPT__DRY_RUN(&show_only, N_("do not remove, show only")),
+ OPT__VERBOSE(&verbose, N_("report pruned working trees")),
+ OPT_EXPIRY_DATE(0, "expire", &expire,
+ N_("expire working trees older than <time>")),
+ OPT_END()
+ };
+
+ expire = TIME_MAX;
+ ac = parse_options(ac, av, prefix, options, git_worktree_prune_usage,
+ 0);
+ if (ac)
+ usage_with_options(git_worktree_prune_usage, options);
+ prune_worktrees();
+ return 0;
+}
+
+static char *junk_work_tree;
+static char *junk_git_dir;
+static int is_junk;
+static pid_t junk_pid;
+
+static void remove_junk(void)
+{
+ struct strbuf sb = STRBUF_INIT;
+ if (!is_junk || getpid() != junk_pid)
+ return;
+ if (junk_git_dir) {
+ strbuf_addstr(&sb, junk_git_dir);
+ remove_dir_recursively(&sb, 0);
+ strbuf_reset(&sb);
+ }
+ if (junk_work_tree) {
+ strbuf_addstr(&sb, junk_work_tree);
+ remove_dir_recursively(&sb, 0);
+ }
+ strbuf_release(&sb);
+}
+
+static void remove_junk_on_signal(int signo)
+{
+ remove_junk();
+ sigchain_pop(signo);
+ raise(signo);
+}
+
+static const char *worktree_basename(const char *path, int *olen)
+{
+ const char *name;
+ int len;
+
+ len = strlen(path);
+ while (len && is_dir_sep(path[len - 1]))
+ len--;
+
+ for (name = path + len - 1; name > path; name--)
+ if (is_dir_sep(*name)) {
+ name++;
+ break;
+ }
+
+ *olen = len;
+ return name;
+}
+
+/* check that path is viable location for worktree */
+static void check_candidate_path(const char *path,
+ int force,
+ struct worktree **worktrees,
+ const char *cmd)
+{
+ struct worktree *wt;
+ int locked;
+
+ if (file_exists(path) && !is_empty_dir(path))
+ die(_("'%s' already exists"), path);
+
+ wt = find_worktree_by_path(worktrees, path);
+ if (!wt)
+ return;
+
+ locked = !!worktree_lock_reason(wt);
+ if ((!locked && force) || (locked && force > 1)) {
+ if (delete_git_dir(wt->id))
+ die(_("unusable worktree destination '%s'"), path);
+ return;
+ }
+
+ if (locked)
+ die(_("'%s' is a missing but locked worktree;\nuse '%s -f -f' to override, or 'unlock' and 'prune' or 'remove' to clear"), path, cmd);
+ else
+ die(_("'%s' is a missing but already registered worktree;\nuse '%s -f' to override, or 'prune' or 'remove' to clear"), path, cmd);
+}
+
+static void copy_sparse_checkout(const char *worktree_git_dir)
+{
+ char *from_file = git_pathdup("info/sparse-checkout");
+ char *to_file = xstrfmt("%s/info/sparse-checkout", worktree_git_dir);
+
+ if (file_exists(from_file)) {
+ if (safe_create_leading_directories(to_file) ||
+ copy_file(to_file, from_file, 0666))
+ error(_("failed to copy '%s' to '%s'; sparse-checkout may not work correctly"),
+ from_file, to_file);
+ }
+
+ free(from_file);
+ free(to_file);
+}
+
+static void copy_filtered_worktree_config(const char *worktree_git_dir)
+{
+ char *from_file = git_pathdup("config.worktree");
+ char *to_file = xstrfmt("%s/config.worktree", worktree_git_dir);
+
+ if (file_exists(from_file)) {
+ struct config_set cs = { { 0 } };
+ const char *core_worktree;
+ int bare;
+
+ if (safe_create_leading_directories(to_file) ||
+ copy_file(to_file, from_file, 0666)) {
+ error(_("failed to copy worktree config from '%s' to '%s'"),
+ from_file, to_file);
+ goto worktree_copy_cleanup;
+ }
+
+ git_configset_init(&cs);
+ git_configset_add_file(&cs, from_file);
+
+ if (!git_configset_get_bool(&cs, "core.bare", &bare) &&
+ bare &&
+ git_config_set_multivar_in_file_gently(
+ to_file, "core.bare", NULL, "true", 0))
+ error(_("failed to unset '%s' in '%s'"),
+ "core.bare", to_file);
+ if (!git_configset_get_value(&cs, "core.worktree", &core_worktree) &&
+ git_config_set_in_file_gently(to_file,
+ "core.worktree", NULL))
+ error(_("failed to unset '%s' in '%s'"),
+ "core.worktree", to_file);
+
+ git_configset_clear(&cs);
+ }
+
+worktree_copy_cleanup:
+ free(from_file);
+ free(to_file);
+}
+
+static int checkout_worktree(const struct add_opts *opts,
+ struct strvec *child_env)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "reset", "--hard", "--no-recurse-submodules", NULL);
+ if (opts->quiet)
+ strvec_push(&cp.args, "--quiet");
+ strvec_pushv(&cp.env, child_env->v);
+ return run_command(&cp);
+}
+
+static int add_worktree(const char *path, const char *refname,
+ const struct add_opts *opts)
+{
+ struct strbuf sb_git = STRBUF_INIT, sb_repo = STRBUF_INIT;
+ struct strbuf sb = STRBUF_INIT, realpath = STRBUF_INIT;
+ const char *name;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct strvec child_env = STRVEC_INIT;
+ unsigned int counter = 0;
+ int len, ret;
+ struct strbuf symref = STRBUF_INIT;
+ struct commit *commit = NULL;
+ int is_branch = 0;
+ struct strbuf sb_name = STRBUF_INIT;
+ struct worktree **worktrees;
+
+ worktrees = get_worktrees();
+ check_candidate_path(path, opts->force, worktrees, "add");
+ free_worktrees(worktrees);
+ worktrees = NULL;
+
+ /* is 'refname' a branch or commit? */
+ if (!opts->detach && !strbuf_check_branch_ref(&symref, refname) &&
+ ref_exists(symref.buf)) {
+ is_branch = 1;
+ if (!opts->force)
+ die_if_checked_out(symref.buf, 0);
+ }
+ commit = lookup_commit_reference_by_name(refname);
+ if (!commit)
+ die(_("invalid reference: %s"), refname);
+
+ name = worktree_basename(path, &len);
+ strbuf_add(&sb, name, path + len - name);
+ sanitize_refname_component(sb.buf, &sb_name);
+ if (!sb_name.len)
+ BUG("How come '%s' becomes empty after sanitization?", sb.buf);
+ strbuf_reset(&sb);
+ name = sb_name.buf;
+ git_path_buf(&sb_repo, "worktrees/%s", name);
+ len = sb_repo.len;
+ if (safe_create_leading_directories_const(sb_repo.buf))
+ die_errno(_("could not create leading directories of '%s'"),
+ sb_repo.buf);
+
+ while (mkdir(sb_repo.buf, 0777)) {
+ counter++;
+ if ((errno != EEXIST) || !counter /* overflow */)
+ die_errno(_("could not create directory of '%s'"),
+ sb_repo.buf);
+ strbuf_setlen(&sb_repo, len);
+ strbuf_addf(&sb_repo, "%d", counter);
+ }
+ name = strrchr(sb_repo.buf, '/') + 1;
+
+ junk_pid = getpid();
+ atexit(remove_junk);
+ sigchain_push_common(remove_junk_on_signal);
+
+ junk_git_dir = xstrdup(sb_repo.buf);
+ is_junk = 1;
+
+ /*
+ * lock the incomplete repo so prune won't delete it, unlock
+ * after the preparation is over.
+ */
+ strbuf_addf(&sb, "%s/locked", sb_repo.buf);
+ if (opts->keep_locked)
+ write_file(sb.buf, "%s", opts->keep_locked);
+ else
+ write_file(sb.buf, _("initializing"));
+
+ strbuf_addf(&sb_git, "%s/.git", path);
+ if (safe_create_leading_directories_const(sb_git.buf))
+ die_errno(_("could not create leading directories of '%s'"),
+ sb_git.buf);
+ junk_work_tree = xstrdup(path);
+
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "%s/gitdir", sb_repo.buf);
+ strbuf_realpath(&realpath, sb_git.buf, 1);
+ write_file(sb.buf, "%s", realpath.buf);
+ strbuf_realpath(&realpath, get_git_common_dir(), 1);
+ write_file(sb_git.buf, "gitdir: %s/worktrees/%s",
+ realpath.buf, name);
+ /*
+ * This is to keep resolve_ref() happy. We need a valid HEAD
+ * or is_git_directory() will reject the directory. Any value which
+ * looks like an object ID will do since it will be immediately
+ * replaced by the symbolic-ref or update-ref invocation in the new
+ * worktree.
+ */
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "%s/HEAD", sb_repo.buf);
+ write_file(sb.buf, "%s", oid_to_hex(null_oid()));
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "%s/commondir", sb_repo.buf);
+ write_file(sb.buf, "../..");
+
+ /*
+ * If the current worktree has sparse-checkout enabled, then copy
+ * the sparse-checkout patterns from the current worktree.
+ */
+ if (core_apply_sparse_checkout)
+ copy_sparse_checkout(sb_repo.buf);
+
+ /*
+ * If we are using worktree config, then copy all current config
+ * values from the current worktree into the new one, that way the
+ * new worktree behaves the same as this one.
+ */
+ if (repository_format_worktree_config)
+ copy_filtered_worktree_config(sb_repo.buf);
+
+ strvec_pushf(&child_env, "%s=%s", GIT_DIR_ENVIRONMENT, sb_git.buf);
+ strvec_pushf(&child_env, "%s=%s", GIT_WORK_TREE_ENVIRONMENT, path);
+ cp.git_cmd = 1;
+
+ if (!is_branch)
+ strvec_pushl(&cp.args, "update-ref", "HEAD",
+ oid_to_hex(&commit->object.oid), NULL);
+ else {
+ strvec_pushl(&cp.args, "symbolic-ref", "HEAD",
+ symref.buf, NULL);
+ if (opts->quiet)
+ strvec_push(&cp.args, "--quiet");
+ }
+
+ strvec_pushv(&cp.env, child_env.v);
+ ret = run_command(&cp);
+ if (ret)
+ goto done;
+
+ if (opts->checkout &&
+ (ret = checkout_worktree(opts, &child_env)))
+ goto done;
+
+ is_junk = 0;
+ FREE_AND_NULL(junk_work_tree);
+ FREE_AND_NULL(junk_git_dir);
+
+done:
+ if (ret || !opts->keep_locked) {
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "%s/locked", sb_repo.buf);
+ unlink_or_warn(sb.buf);
+ }
+
+ /*
+ * Hook failure does not warrant worktree deletion, so run hook after
+ * is_junk is cleared, but do return appropriate code when hook fails.
+ */
+ if (!ret && opts->checkout) {
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+
+ strvec_pushl(&opt.env, "GIT_DIR", "GIT_WORK_TREE", NULL);
+ strvec_pushl(&opt.args,
+ oid_to_hex(null_oid()),
+ oid_to_hex(&commit->object.oid),
+ "1",
+ NULL);
+ opt.dir = path;
+
+ ret = run_hooks_opt("post-checkout", &opt);
+ }
+
+ strvec_clear(&child_env);
+ strbuf_release(&sb);
+ strbuf_release(&symref);
+ strbuf_release(&sb_repo);
+ strbuf_release(&sb_git);
+ strbuf_release(&sb_name);
+ strbuf_release(&realpath);
+ return ret;
+}
+
+static void print_preparing_worktree_line(int detach,
+ const char *branch,
+ const char *new_branch,
+ int force_new_branch)
+{
+ if (force_new_branch) {
+ struct commit *commit = lookup_commit_reference_by_name(new_branch);
+ if (!commit)
+ fprintf_ln(stderr, _("Preparing worktree (new branch '%s')"), new_branch);
+ else
+ fprintf_ln(stderr, _("Preparing worktree (resetting branch '%s'; was at %s)"),
+ new_branch,
+ find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV));
+ } else if (new_branch) {
+ fprintf_ln(stderr, _("Preparing worktree (new branch '%s')"), new_branch);
+ } else {
+ struct strbuf s = STRBUF_INIT;
+ if (!detach && !strbuf_check_branch_ref(&s, branch) &&
+ ref_exists(s.buf))
+ fprintf_ln(stderr, _("Preparing worktree (checking out '%s')"),
+ branch);
+ else {
+ struct commit *commit = lookup_commit_reference_by_name(branch);
+ if (!commit)
+ die(_("invalid reference: %s"), branch);
+ fprintf_ln(stderr, _("Preparing worktree (detached HEAD %s)"),
+ find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV));
+ }
+ strbuf_release(&s);
+ }
+}
+
+static const char *dwim_branch(const char *path, const char **new_branch)
+{
+ int n;
+ int branch_exists;
+ const char *s = worktree_basename(path, &n);
+ const char *branchname = xstrndup(s, n);
+ struct strbuf ref = STRBUF_INIT;
+
+ UNLEAK(branchname);
+
+ branch_exists = !strbuf_check_branch_ref(&ref, branchname) &&
+ ref_exists(ref.buf);
+ strbuf_release(&ref);
+ if (branch_exists)
+ return branchname;
+
+ *new_branch = branchname;
+ if (guess_remote) {
+ struct object_id oid;
+ const char *remote =
+ unique_tracking_name(*new_branch, &oid, NULL);
+ return remote;
+ }
+ return NULL;
+}
+
+static int add(int ac, const char **av, const char *prefix)
+{
+ struct add_opts opts;
+ const char *new_branch_force = NULL;
+ char *path;
+ const char *branch;
+ const char *new_branch = NULL;
+ const char *opt_track = NULL;
+ const char *lock_reason = NULL;
+ int keep_locked = 0;
+ struct option options[] = {
+ OPT__FORCE(&opts.force,
+ N_("checkout <branch> even if already checked out in other worktree"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_STRING('b', NULL, &new_branch, N_("branch"),
+ N_("create a new branch")),
+ OPT_STRING('B', NULL, &new_branch_force, N_("branch"),
+ N_("create or reset a branch")),
+ OPT_BOOL('d', "detach", &opts.detach, N_("detach HEAD at named commit")),
+ OPT_BOOL(0, "checkout", &opts.checkout, N_("populate the new working tree")),
+ OPT_BOOL(0, "lock", &keep_locked, N_("keep the new working tree locked")),
+ OPT_STRING(0, "reason", &lock_reason, N_("string"),
+ N_("reason for locking")),
+ OPT__QUIET(&opts.quiet, N_("suppress progress reporting")),
+ OPT_PASSTHRU(0, "track", &opt_track, NULL,
+ N_("set up tracking mode (see git-branch(1))"),
+ PARSE_OPT_NOARG | PARSE_OPT_OPTARG),
+ OPT_BOOL(0, "guess-remote", &guess_remote,
+ N_("try to match the new branch name with a remote-tracking branch")),
+ OPT_END()
+ };
+
+ memset(&opts, 0, sizeof(opts));
+ opts.checkout = 1;
+ ac = parse_options(ac, av, prefix, options, git_worktree_add_usage, 0);
+ if (!!opts.detach + !!new_branch + !!new_branch_force > 1)
+ die(_("options '%s', '%s', and '%s' cannot be used together"), "-b", "-B", "--detach");
+ if (lock_reason && !keep_locked)
+ die(_("the option '%s' requires '%s'"), "--reason", "--lock");
+ if (lock_reason)
+ opts.keep_locked = lock_reason;
+ else if (keep_locked)
+ opts.keep_locked = _("added with --lock");
+
+ if (ac < 1 || ac > 2)
+ usage_with_options(git_worktree_add_usage, options);
+
+ path = prefix_filename(prefix, av[0]);
+ branch = ac < 2 ? "HEAD" : av[1];
+
+ if (!strcmp(branch, "-"))
+ branch = "@{-1}";
+
+ if (new_branch_force) {
+ struct strbuf symref = STRBUF_INIT;
+
+ new_branch = new_branch_force;
+
+ if (!opts.force &&
+ !strbuf_check_branch_ref(&symref, new_branch) &&
+ ref_exists(symref.buf))
+ die_if_checked_out(symref.buf, 0);
+ strbuf_release(&symref);
+ }
+
+ if (ac < 2 && !new_branch && !opts.detach) {
+ const char *s = dwim_branch(path, &new_branch);
+ if (s)
+ branch = s;
+ }
+
+ if (ac == 2 && !new_branch && !opts.detach) {
+ struct object_id oid;
+ struct commit *commit;
+ const char *remote;
+
+ commit = lookup_commit_reference_by_name(branch);
+ if (!commit) {
+ remote = unique_tracking_name(branch, &oid, NULL);
+ if (remote) {
+ new_branch = branch;
+ branch = remote;
+ }
+ }
+ }
+ if (!opts.quiet)
+ print_preparing_worktree_line(opts.detach, branch, new_branch, !!new_branch_force);
+
+ if (new_branch) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+ cp.git_cmd = 1;
+ strvec_push(&cp.args, "branch");
+ if (new_branch_force)
+ strvec_push(&cp.args, "--force");
+ if (opts.quiet)
+ strvec_push(&cp.args, "--quiet");
+ strvec_push(&cp.args, new_branch);
+ strvec_push(&cp.args, branch);
+ if (opt_track)
+ strvec_push(&cp.args, opt_track);
+ if (run_command(&cp))
+ return -1;
+ branch = new_branch;
+ } else if (opt_track) {
+ die(_("--[no-]track can only be used if a new branch is created"));
+ }
+
+ UNLEAK(path);
+ UNLEAK(opts);
+ return add_worktree(path, branch, &opts);
+}
+
+static void show_worktree_porcelain(struct worktree *wt, int line_terminator)
+{
+ const char *reason;
+
+ printf("worktree %s%c", wt->path, line_terminator);
+ if (wt->is_bare)
+ printf("bare%c", line_terminator);
+ else {
+ printf("HEAD %s%c", oid_to_hex(&wt->head_oid), line_terminator);
+ if (wt->is_detached)
+ printf("detached%c", line_terminator);
+ else if (wt->head_ref)
+ printf("branch %s%c", wt->head_ref, line_terminator);
+ }
+
+ reason = worktree_lock_reason(wt);
+ if (reason) {
+ fputs("locked", stdout);
+ if (*reason) {
+ fputc(' ', stdout);
+ write_name_quoted(reason, stdout, line_terminator);
+ } else {
+ fputc(line_terminator, stdout);
+ }
+ }
+
+ reason = worktree_prune_reason(wt, expire);
+ if (reason)
+ printf("prunable %s%c", reason, line_terminator);
+
+ fputc(line_terminator, stdout);
+}
+
+static void show_worktree(struct worktree *wt, int path_maxlen, int abbrev_len)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int cur_path_len = strlen(wt->path);
+ int path_adj = cur_path_len - utf8_strwidth(wt->path);
+ const char *reason;
+
+ strbuf_addf(&sb, "%-*s ", 1 + path_maxlen + path_adj, wt->path);
+ if (wt->is_bare)
+ strbuf_addstr(&sb, "(bare)");
+ else {
+ strbuf_addf(&sb, "%-*s ", abbrev_len,
+ find_unique_abbrev(&wt->head_oid, DEFAULT_ABBREV));
+ if (wt->is_detached)
+ strbuf_addstr(&sb, "(detached HEAD)");
+ else if (wt->head_ref) {
+ char *ref = shorten_unambiguous_ref(wt->head_ref, 0);
+ strbuf_addf(&sb, "[%s]", ref);
+ free(ref);
+ } else
+ strbuf_addstr(&sb, "(error)");
+ }
+
+ reason = worktree_lock_reason(wt);
+ if (verbose && reason && *reason)
+ strbuf_addf(&sb, "\n\tlocked: %s", reason);
+ else if (reason)
+ strbuf_addstr(&sb, " locked");
+
+ reason = worktree_prune_reason(wt, expire);
+ if (verbose && reason)
+ strbuf_addf(&sb, "\n\tprunable: %s", reason);
+ else if (reason)
+ strbuf_addstr(&sb, " prunable");
+
+ printf("%s\n", sb.buf);
+ strbuf_release(&sb);
+}
+
+static void measure_widths(struct worktree **wt, int *abbrev, int *maxlen)
+{
+ int i;
+
+ for (i = 0; wt[i]; i++) {
+ int sha1_len;
+ int path_len = strlen(wt[i]->path);
+
+ if (path_len > *maxlen)
+ *maxlen = path_len;
+ sha1_len = strlen(find_unique_abbrev(&wt[i]->head_oid, *abbrev));
+ if (sha1_len > *abbrev)
+ *abbrev = sha1_len;
+ }
+}
+
+static int pathcmp(const void *a_, const void *b_)
+{
+ const struct worktree *const *a = a_;
+ const struct worktree *const *b = b_;
+ return fspathcmp((*a)->path, (*b)->path);
+}
+
+static void pathsort(struct worktree **wt)
+{
+ int n = 0;
+ struct worktree **p = wt;
+
+ while (*p++)
+ n++;
+ QSORT(wt, n, pathcmp);
+}
+
+static int list(int ac, const char **av, const char *prefix)
+{
+ int porcelain = 0;
+ int line_terminator = '\n';
+
+ struct option options[] = {
+ OPT_BOOL(0, "porcelain", &porcelain, N_("machine-readable output")),
+ OPT__VERBOSE(&verbose, N_("show extended annotations and reasons, if available")),
+ OPT_EXPIRY_DATE(0, "expire", &expire,
+ N_("add 'prunable' annotation to worktrees older than <time>")),
+ OPT_SET_INT('z', NULL, &line_terminator,
+ N_("terminate records with a NUL character"), '\0'),
+ OPT_END()
+ };
+
+ expire = TIME_MAX;
+ ac = parse_options(ac, av, prefix, options, git_worktree_list_usage, 0);
+ if (ac)
+ usage_with_options(git_worktree_list_usage, options);
+ else if (verbose && porcelain)
+ die(_("options '%s' and '%s' cannot be used together"), "--verbose", "--porcelain");
+ else if (!line_terminator && !porcelain)
+ die(_("the option '%s' requires '%s'"), "-z", "--porcelain");
+ else {
+ struct worktree **worktrees = get_worktrees();
+ int path_maxlen = 0, abbrev = DEFAULT_ABBREV, i;
+
+ /* sort worktrees by path but keep main worktree at top */
+ pathsort(worktrees + 1);
+
+ if (!porcelain)
+ measure_widths(worktrees, &abbrev, &path_maxlen);
+
+ for (i = 0; worktrees[i]; i++) {
+ if (porcelain)
+ show_worktree_porcelain(worktrees[i],
+ line_terminator);
+ else
+ show_worktree(worktrees[i], path_maxlen, abbrev);
+ }
+ free_worktrees(worktrees);
+ }
+ return 0;
+}
+
+static int lock_worktree(int ac, const char **av, const char *prefix)
+{
+ const char *reason = "", *old_reason;
+ struct option options[] = {
+ OPT_STRING(0, "reason", &reason, N_("string"),
+ N_("reason for locking")),
+ OPT_END()
+ };
+ struct worktree **worktrees, *wt;
+
+ ac = parse_options(ac, av, prefix, options, git_worktree_lock_usage, 0);
+ if (ac != 1)
+ usage_with_options(git_worktree_lock_usage, options);
+
+ worktrees = get_worktrees();
+ wt = find_worktree(worktrees, prefix, av[0]);
+ if (!wt)
+ die(_("'%s' is not a working tree"), av[0]);
+ if (is_main_worktree(wt))
+ die(_("The main working tree cannot be locked or unlocked"));
+
+ old_reason = worktree_lock_reason(wt);
+ if (old_reason) {
+ if (*old_reason)
+ die(_("'%s' is already locked, reason: %s"),
+ av[0], old_reason);
+ die(_("'%s' is already locked"), av[0]);
+ }
+
+ write_file(git_common_path("worktrees/%s/locked", wt->id),
+ "%s", reason);
+ free_worktrees(worktrees);
+ return 0;
+}
+
+static int unlock_worktree(int ac, const char **av, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+ struct worktree **worktrees, *wt;
+ int ret;
+
+ ac = parse_options(ac, av, prefix, options, git_worktree_unlock_usage, 0);
+ if (ac != 1)
+ usage_with_options(git_worktree_unlock_usage, options);
+
+ worktrees = get_worktrees();
+ wt = find_worktree(worktrees, prefix, av[0]);
+ if (!wt)
+ die(_("'%s' is not a working tree"), av[0]);
+ if (is_main_worktree(wt))
+ die(_("The main working tree cannot be locked or unlocked"));
+ if (!worktree_lock_reason(wt))
+ die(_("'%s' is not locked"), av[0]);
+ ret = unlink_or_warn(git_common_path("worktrees/%s/locked", wt->id));
+ free_worktrees(worktrees);
+ return ret;
+}
+
+static void validate_no_submodules(const struct worktree *wt)
+{
+ struct index_state istate = { NULL };
+ struct strbuf path = STRBUF_INIT;
+ int i, found_submodules = 0;
+
+ if (is_directory(worktree_git_path(wt, "modules"))) {
+ /*
+ * There could be false positives, e.g. the "modules"
+ * directory exists but is empty. But it's a rare case and
+ * this simpler check is probably good enough for now.
+ */
+ found_submodules = 1;
+ } else if (read_index_from(&istate, worktree_git_path(wt, "index"),
+ get_worktree_git_dir(wt)) > 0) {
+ for (i = 0; i < istate.cache_nr; i++) {
+ struct cache_entry *ce = istate.cache[i];
+ int err;
+
+ if (!S_ISGITLINK(ce->ce_mode))
+ continue;
+
+ strbuf_reset(&path);
+ strbuf_addf(&path, "%s/%s", wt->path, ce->name);
+ if (!is_submodule_populated_gently(path.buf, &err))
+ continue;
+
+ found_submodules = 1;
+ break;
+ }
+ }
+ discard_index(&istate);
+ strbuf_release(&path);
+
+ if (found_submodules)
+ die(_("working trees containing submodules cannot be moved or removed"));
+}
+
+static int move_worktree(int ac, const char **av, const char *prefix)
+{
+ int force = 0;
+ struct option options[] = {
+ OPT__FORCE(&force,
+ N_("force move even if worktree is dirty or locked"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_END()
+ };
+ struct worktree **worktrees, *wt;
+ struct strbuf dst = STRBUF_INIT;
+ struct strbuf errmsg = STRBUF_INIT;
+ const char *reason = NULL;
+ char *path;
+
+ ac = parse_options(ac, av, prefix, options, git_worktree_move_usage,
+ 0);
+ if (ac != 2)
+ usage_with_options(git_worktree_move_usage, options);
+
+ path = prefix_filename(prefix, av[1]);
+ strbuf_addstr(&dst, path);
+ free(path);
+
+ worktrees = get_worktrees();
+ wt = find_worktree(worktrees, prefix, av[0]);
+ if (!wt)
+ die(_("'%s' is not a working tree"), av[0]);
+ if (is_main_worktree(wt))
+ die(_("'%s' is a main working tree"), av[0]);
+ if (is_directory(dst.buf)) {
+ const char *sep = find_last_dir_sep(wt->path);
+
+ if (!sep)
+ die(_("could not figure out destination name from '%s'"),
+ wt->path);
+ strbuf_trim_trailing_dir_sep(&dst);
+ strbuf_addstr(&dst, sep);
+ }
+ check_candidate_path(dst.buf, force, worktrees, "move");
+
+ validate_no_submodules(wt);
+
+ if (force < 2)
+ reason = worktree_lock_reason(wt);
+ if (reason) {
+ if (*reason)
+ die(_("cannot move a locked working tree, lock reason: %s\nuse 'move -f -f' to override or unlock first"),
+ reason);
+ die(_("cannot move a locked working tree;\nuse 'move -f -f' to override or unlock first"));
+ }
+ if (validate_worktree(wt, &errmsg, 0))
+ die(_("validation failed, cannot move working tree: %s"),
+ errmsg.buf);
+ strbuf_release(&errmsg);
+
+ if (rename(wt->path, dst.buf) == -1)
+ die_errno(_("failed to move '%s' to '%s'"), wt->path, dst.buf);
+
+ update_worktree_location(wt, dst.buf);
+
+ strbuf_release(&dst);
+ free_worktrees(worktrees);
+ return 0;
+}
+
+/*
+ * Note, "git status --porcelain" is used to determine if it's safe to
+ * delete a whole worktree. "git status" does not ignore user
+ * configuration, so if a normal "git status" shows "clean" for the
+ * user, then it's ok to remove it.
+ *
+ * This assumption may be a bad one. We may want to ignore
+ * (potentially bad) user settings and only delete a worktree when
+ * it's absolutely safe to do so from _our_ point of view because we
+ * know better.
+ */
+static void check_clean_worktree(struct worktree *wt,
+ const char *original_path)
+{
+ struct child_process cp;
+ char buf[1];
+ int ret;
+
+ /*
+ * Until we sort this out, all submodules are "dirty" and
+ * will abort this function.
+ */
+ validate_no_submodules(wt);
+
+ child_process_init(&cp);
+ strvec_pushf(&cp.env, "%s=%s/.git",
+ GIT_DIR_ENVIRONMENT, wt->path);
+ strvec_pushf(&cp.env, "%s=%s",
+ GIT_WORK_TREE_ENVIRONMENT, wt->path);
+ strvec_pushl(&cp.args, "status",
+ "--porcelain", "--ignore-submodules=none",
+ NULL);
+ cp.git_cmd = 1;
+ cp.dir = wt->path;
+ cp.out = -1;
+ ret = start_command(&cp);
+ if (ret)
+ die_errno(_("failed to run 'git status' on '%s'"),
+ original_path);
+ ret = xread(cp.out, buf, sizeof(buf));
+ if (ret)
+ die(_("'%s' contains modified or untracked files, use --force to delete it"),
+ original_path);
+ close(cp.out);
+ ret = finish_command(&cp);
+ if (ret)
+ die_errno(_("failed to run 'git status' on '%s', code %d"),
+ original_path, ret);
+}
+
+static int delete_git_work_tree(struct worktree *wt)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 0;
+
+ strbuf_addstr(&sb, wt->path);
+ if (remove_dir_recursively(&sb, 0)) {
+ error_errno(_("failed to delete '%s'"), sb.buf);
+ ret = -1;
+ }
+ strbuf_release(&sb);
+ return ret;
+}
+
+static int remove_worktree(int ac, const char **av, const char *prefix)
+{
+ int force = 0;
+ struct option options[] = {
+ OPT__FORCE(&force,
+ N_("force removal even if worktree is dirty or locked"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_END()
+ };
+ struct worktree **worktrees, *wt;
+ struct strbuf errmsg = STRBUF_INIT;
+ const char *reason = NULL;
+ int ret = 0;
+
+ ac = parse_options(ac, av, prefix, options, git_worktree_remove_usage, 0);
+ if (ac != 1)
+ usage_with_options(git_worktree_remove_usage, options);
+
+ worktrees = get_worktrees();
+ wt = find_worktree(worktrees, prefix, av[0]);
+ if (!wt)
+ die(_("'%s' is not a working tree"), av[0]);
+ if (is_main_worktree(wt))
+ die(_("'%s' is a main working tree"), av[0]);
+ if (force < 2)
+ reason = worktree_lock_reason(wt);
+ if (reason) {
+ if (*reason)
+ die(_("cannot remove a locked working tree, lock reason: %s\nuse 'remove -f -f' to override or unlock first"),
+ reason);
+ die(_("cannot remove a locked working tree;\nuse 'remove -f -f' to override or unlock first"));
+ }
+ if (validate_worktree(wt, &errmsg, WT_VALIDATE_WORKTREE_MISSING_OK))
+ die(_("validation failed, cannot remove working tree: %s"),
+ errmsg.buf);
+ strbuf_release(&errmsg);
+
+ if (file_exists(wt->path)) {
+ if (!force)
+ check_clean_worktree(wt, av[0]);
+
+ ret |= delete_git_work_tree(wt);
+ }
+ /*
+ * continue on even if ret is non-zero, there's no going back
+ * from here.
+ */
+ ret |= delete_git_dir(wt->id);
+ delete_worktrees_dir_if_empty();
+
+ free_worktrees(worktrees);
+ return ret;
+}
+
+static void report_repair(int iserr, const char *path, const char *msg, void *cb_data)
+{
+ if (!iserr) {
+ fprintf_ln(stderr, _("repair: %s: %s"), msg, path);
+ } else {
+ int *exit_status = (int *)cb_data;
+ fprintf_ln(stderr, _("error: %s: %s"), msg, path);
+ *exit_status = 1;
+ }
+}
+
+static int repair(int ac, const char **av, const char *prefix)
+{
+ const char **p;
+ const char *self[] = { ".", NULL };
+ struct option options[] = {
+ OPT_END()
+ };
+ int rc = 0;
+
+ ac = parse_options(ac, av, prefix, options, git_worktree_repair_usage, 0);
+ p = ac > 0 ? av : self;
+ for (; *p; p++)
+ repair_worktree_at_path(*p, report_repair, &rc);
+ repair_worktrees(report_repair, &rc);
+ return rc;
+}
+
+int cmd_worktree(int ac, const char **av, const char *prefix)
+{
+ parse_opt_subcommand_fn *fn = NULL;
+ struct option options[] = {
+ OPT_SUBCOMMAND("add", &fn, add),
+ OPT_SUBCOMMAND("prune", &fn, prune),
+ OPT_SUBCOMMAND("list", &fn, list),
+ OPT_SUBCOMMAND("lock", &fn, lock_worktree),
+ OPT_SUBCOMMAND("unlock", &fn, unlock_worktree),
+ OPT_SUBCOMMAND("move", &fn, move_worktree),
+ OPT_SUBCOMMAND("remove", &fn, remove_worktree),
+ OPT_SUBCOMMAND("repair", &fn, repair),
+ OPT_END()
+ };
+
+ git_config(git_worktree_config, NULL);
+
+ if (!prefix)
+ prefix = "";
+
+ ac = parse_options(ac, av, prefix, options, git_worktree_usage, 0);
+ return fn(ac, av, prefix);
+}
diff --git a/builtin/write-tree.c b/builtin/write-tree.c
new file mode 100644
index 0000000..45d6170
--- /dev/null
+++ b/builtin/write-tree.c
@@ -0,0 +1,57 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ */
+#define USE_THE_INDEX_COMPATIBILITY_MACROS
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "tree.h"
+#include "cache-tree.h"
+#include "parse-options.h"
+
+static const char * const write_tree_usage[] = {
+ N_("git write-tree [--missing-ok] [--prefix=<prefix>/]"),
+ NULL
+};
+
+int cmd_write_tree(int argc, const char **argv, const char *cmd_prefix)
+{
+ int flags = 0, ret;
+ const char *tree_prefix = NULL;
+ struct object_id oid;
+ const char *me = "git-write-tree";
+ struct option write_tree_options[] = {
+ OPT_BIT(0, "missing-ok", &flags, N_("allow missing objects"),
+ WRITE_TREE_MISSING_OK),
+ OPT_STRING(0, "prefix", &tree_prefix, N_("<prefix>/"),
+ N_("write tree object for a subdirectory <prefix>")),
+ { OPTION_BIT, 0, "ignore-cache-tree", &flags, NULL,
+ N_("only useful for debugging"),
+ PARSE_OPT_HIDDEN | PARSE_OPT_NOARG, NULL,
+ WRITE_TREE_IGNORE_CACHE_TREE },
+ OPT_END()
+ };
+
+ git_config(git_default_config, NULL);
+ argc = parse_options(argc, argv, cmd_prefix, write_tree_options,
+ write_tree_usage, 0);
+
+ ret = write_cache_as_tree(&oid, flags, tree_prefix);
+ switch (ret) {
+ case 0:
+ printf("%s\n", oid_to_hex(&oid));
+ break;
+ case WRITE_TREE_UNREADABLE_INDEX:
+ die("%s: error reading the index", me);
+ break;
+ case WRITE_TREE_UNMERGED_INDEX:
+ die("%s: error building trees", me);
+ break;
+ case WRITE_TREE_PREFIX_ERROR:
+ die("%s: prefix %s not found", me, tree_prefix);
+ break;
+ }
+ return ret;
+}