From 1740bbb38d1bae48ad91aca021117735c4c4988a Mon Sep 17 00:00:00 2001 From: Patrick Steinhardt Date: Wed, 1 Apr 2020 15:16:18 +0200 Subject: [PATCH 01/14] merge: cache negative cache results for similarity metrics When computing renames, we cache the hash signatures for each of the potentially conflicting entries so that we do not need to repeatedly read the file and can at least halfway efficiently determine whether two files are similar enough to be deemed a rename. In order to make the hash signatures meaningful, we require at least four lines of data to be present, resulting in at least four different hashes that can be compared. Files that are deemed too small are not cached at all and will thus be repeatedly re-hashed, which is usually not a huge issue. The issue with above heuristic is in case a file does _not_ have at least four lines, where a line is anything separated by a consecutive run of "\n" or "\0" characters. For example "a\nb" is two lines, but "a\0\0b" is also just two lines. Taken to the extreme, a file that has megabytes of consecutive space- or NUL-only may also be deemed as too small and thus not get cached. As a result, we will repeatedly load its blob, calculate its hash signature just to finally throw it away as we notice it's not of any value. When you've got a comparitively big file that you compare against a big set of potentially renamed files, then the cost simply expodes. The issue can be trivially fixed by introducing negative cache entries. Whenever we determine that a given blob does not have a meaningful representation via a hash signature, we store this negative cache marker and will from then on not hash it again, but also ignore it as a potential rename target. This should help the "normal" case already where you have a lot of small files as rename candidates, but in the above scenario it's savings are extraordinarily high. To verify we do not hit the issue anymore with described solution, this commit adds a test that uses the exact same setup described above with one 50 megabyte blob of '\0' characters and 1000 other files that get renamed. Without the negative cache: $ time ./libgit2_clar -smerge::trees::renames::cache_recomputation >/dev/null real 11m48.377s user 11m11.576s sys 0m35.187s And with the negative cache: $ time ./libgit2_clar -smerge::trees::renames::cache_recomputation >/dev/null real 0m1.972s user 0m1.851s sys 0m0.118s So this represents a ~350-fold performance improvement, but it obviously depends on how many files you have and how big the blob is. The test number were chosen in a way that one will immediately notice as soon as the bug resurfaces. --- src/merge.c | 27 +++++++++---- tests/merge/trees/renames.c | 77 +++++++++++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+), 7 deletions(-) diff --git a/src/merge.c b/src/merge.c index 05a776e45..afe69e564 100644 --- a/src/merge.c +++ b/src/merge.c @@ -68,6 +68,16 @@ struct merge_diff_df_data { git_merge_diff *prev_conflict; }; +/* + * This acts as a negative cache entry marker. In case we've tried to calculate + * similarity metrics for a given blob already but `git_hashsig` determined + * that it's too small in order to have a meaningful hash signature, we will + * insert the address of this marker instead of `NULL`. Like this, we can + * easily check whether we have checked a gien entry already and skip doing the + * calculation again and again. + */ +static int cache_invalid_marker; + /* Merge base computation */ int merge_bases_many(git_commit_list **out, git_revwalk **walk_out, git_repository *repo, size_t length, const git_oid input_array[]) @@ -1027,6 +1037,9 @@ static int index_entry_similarity_calc( git_object_size_t blobsize; int error; + if (*out || *out == &cache_invalid_marker) + return 0; + *out = NULL; if ((error = git_blob_lookup(&blob, repo, &entry->id)) < 0) @@ -1047,6 +1060,8 @@ static int index_entry_similarity_calc( error = opts->metric->buffer_signature(out, &diff_file, git_blob_rawcontent(blob), (size_t)blobsize, opts->metric->payload); + if (error == GIT_EBUFS) + *out = &cache_invalid_marker; git_blob_free(blob); @@ -1069,18 +1084,16 @@ static int index_entry_similarity_inexact( return 0; /* update signature cache if needed */ - if (!cache[a_idx] && (error = index_entry_similarity_calc(&cache[a_idx], repo, a, opts)) < 0) - return error; - if (!cache[b_idx] && (error = index_entry_similarity_calc(&cache[b_idx], repo, b, opts)) < 0) + if ((error = index_entry_similarity_calc(&cache[a_idx], repo, a, opts)) < 0 || + (error = index_entry_similarity_calc(&cache[b_idx], repo, b, opts)) < 0) return error; /* some metrics may not wish to process this file (too big / too small) */ - if (!cache[a_idx] || !cache[b_idx]) + if (cache[a_idx] == &cache_invalid_marker || cache[b_idx] == &cache_invalid_marker) return 0; /* compare signatures */ - if (opts->metric->similarity( - &score, cache[a_idx], cache[b_idx], opts->metric->payload) < 0) + if (opts->metric->similarity(&score, cache[a_idx], cache[b_idx], opts->metric->payload) < 0) return -1; /* clip score */ @@ -1550,7 +1563,7 @@ int git_merge_diff_list__find_renames( done: if (cache != NULL) { for (i = 0; i < cache_size; ++i) { - if (cache[i] != NULL) + if (cache[i] != NULL && cache[i] != &cache_invalid_marker) opts->metric->free_signature(cache[i], opts->metric->payload); } diff --git a/tests/merge/trees/renames.c b/tests/merge/trees/renames.c index e0b12af3d..c515aaf1b 100644 --- a/tests/merge/trees/renames.c +++ b/tests/merge/trees/renames.c @@ -274,3 +274,80 @@ void test_merge_trees_renames__submodules(void) cl_assert(merge_test_index(index, merge_index_entries, 7)); git_index_free(index); } + +void test_merge_trees_renames__cache_recomputation(void) +{ + git_oid blob, binary, ancestor_oid, theirs_oid, ours_oid; + git_merge_options opts = GIT_MERGE_OPTIONS_INIT; + git_buf path = GIT_BUF_INIT; + git_treebuilder *builder; + git_tree *ancestor_tree, *their_tree, *our_tree; + git_index *index; + size_t blob_size; + void *data; + size_t i; + + cl_git_pass(git_oid_fromstr(&blob, "a2d8d1824c68541cca94ffb90f79291eba495921")); + + /* + * Create a 50MB blob that consists of NUL bytes only. It is important + * that this blob is of a special format, most importantly it cannot + * contain more than four non-consecutive newlines or NUL bytes. This + * is because of git_hashsig's inner workings where all files with less + * than four "lines" are deemed to small. + */ + blob_size = 50 * 1024 * 1024; + cl_assert(data = git__calloc(blob_size, 1)); + cl_git_pass(git_blob_create_from_buffer(&binary, repo, data, blob_size)); + + /* + * Create the common ancestor, which has 1000 dummy blobs and the binary + * blob. The dummy blobs serve as potential rename targets for the + * dummy blob. + */ + cl_git_pass(git_treebuilder_new(&builder, repo, NULL)); + for (i = 0; i < 1000; i++) { + cl_git_pass(git_buf_printf(&path, "%"PRIuMAX".txt", i)); + cl_git_pass(git_treebuilder_insert(NULL, builder, path.ptr, &blob, GIT_FILEMODE_BLOB)); + git_buf_clear(&path); + } + cl_git_pass(git_treebuilder_insert(NULL, builder, "original.bin", &binary, GIT_FILEMODE_BLOB)); + cl_git_pass(git_treebuilder_write(&ancestor_oid, builder)); + + /* We now the binary blob in our tree. */ + cl_git_pass(git_treebuilder_remove(builder, "original.bin")); + cl_git_pass(git_treebuilder_insert(NULL, builder, "renamed.bin", &binary, GIT_FILEMODE_BLOB)); + cl_git_pass(git_treebuilder_write(&ours_oid, builder)); + + git_treebuilder_free(builder); + + /* And move everything into a subdirectory in their tree. */ + cl_git_pass(git_treebuilder_new(&builder, repo, NULL)); + cl_git_pass(git_treebuilder_insert(NULL, builder, "subdir", &ancestor_oid, GIT_FILEMODE_TREE)); + cl_git_pass(git_treebuilder_write(&theirs_oid, builder)); + + /* + * Now merge ancestor, ours and theirs. As `git_hashsig` refuses to + * create a hash signature for the 50MB binary file, we historically + * didn't cache the hashsig computation for it. As a result, we now + * started looking up the 50MB blob and scanning it at least 1000 + * times, which takes a long time. + * + * The number of 1000 blobs is chosen in such a way that it's + * noticeable when the bug creeps in again, as it takes around 12 + * minutes on my machine to compute the following merge. + */ + opts.target_limit = 5000; + cl_git_pass(git_tree_lookup(&ancestor_tree, repo, &ancestor_oid)); + cl_git_pass(git_tree_lookup(&their_tree, repo, &theirs_oid)); + cl_git_pass(git_tree_lookup(&our_tree, repo, &ours_oid)); + cl_git_pass(git_merge_trees(&index, repo, ancestor_tree, our_tree, their_tree, &opts)); + + git_treebuilder_free(builder); + git_buf_dispose(&path); + git_index_free(index); + git_tree_free(ancestor_tree); + git_tree_free(their_tree); + git_tree_free(our_tree); + git__free(data); +} From aee48d27add60ea7677d7133b3b28e6ce8181db6 Mon Sep 17 00:00:00 2001 From: Seth Junot Date: Sat, 4 Apr 2020 16:35:33 -0700 Subject: [PATCH 02/14] Fix typo causing removal of symbol 'git_worktree_prune_init_options' Commit 0b5ba0d replaced this function with an "option_init" equivallent, but misspelled the replacement function. As a result, this symbol has been missing from libgit2.so ever since. --- src/worktree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/worktree.c b/src/worktree.c index e171afbb2..74b32f97c 100644 --- a/src/worktree.c +++ b/src/worktree.c @@ -506,7 +506,7 @@ int git_worktree_prune_options_init( return 0; } -int git_worktree_pruneinit_options(git_worktree_prune_options *opts, +int git_worktree_prune_init_options(git_worktree_prune_options *opts, unsigned int version) { return git_worktree_prune_options_init(opts, version); From 88ced11585ebaf7e9f43ab0a0a72459306707890 Mon Sep 17 00:00:00 2001 From: nia Date: Sun, 5 Apr 2020 18:33:14 +0100 Subject: [PATCH 03/14] deps: ntlmclient: use htobe64 on NetBSD too --- deps/ntlmclient/compat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/ntlmclient/compat.h b/deps/ntlmclient/compat.h index 555fa3fe4..f4d859aec 100644 --- a/deps/ntlmclient/compat.h +++ b/deps/ntlmclient/compat.h @@ -25,7 +25,7 @@ /* See man page endian(3) */ # include # define htonll htobe64 -#elif defined(__OpenBSD__) +#elif defined(__NetBSD__) || defined(__OpenBSD__) /* See man page htobe64(3) */ # include # define htonll htobe64 From 234a2e6e622415af84dd402df1c24e278dfcdea2 Mon Sep 17 00:00:00 2001 From: Philip Kelley Date: Sun, 10 May 2020 21:43:38 -0700 Subject: [PATCH 04/14] Fix uninitialized stack memory and NULL ptr dereference in stash_to_index Caught by static analysis. --- src/stash.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/stash.c b/src/stash.c index 4a13d0530..790f56fdd 100644 --- a/src/stash.c +++ b/src/stash.c @@ -173,7 +173,7 @@ static int stash_to_index( git_index *index, const char *path) { - git_index *repo_index; + git_index *repo_index = NULL; git_index_entry entry = {{0}}; struct stat st; int error; @@ -187,7 +187,7 @@ static int stash_to_index( return error; git_index_entry__init_from_stat(&entry, &st, - (repo_index != NULL || !repo_index->distrust_filemode)); + (repo_index == NULL || !repo_index->distrust_filemode)); entry.path = path; From c229591e3285e57e74c24f9d939ffcabd0a5694f Mon Sep 17 00:00:00 2001 From: Suhaib Mujahid Date: Tue, 12 May 2020 10:55:14 -0400 Subject: [PATCH 05/14] feat: Check the version in package.json --- script/release.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/script/release.py b/script/release.py index e0f29538e..3d8e9b806 100755 --- a/script/release.py +++ b/script/release.py @@ -56,6 +56,17 @@ def verify_version(version): if v[0] != v[1]: raise Error("version.h: define '{}' does not match (got '{}', expected '{}')".format(k, v[0], v[1])) + with open('package.json') as f: + pkg = json.load(f) + + try: + pkg_version = Version(pkg["version"]) + except KeyError as err: + raise Error("package.json: missing the field {}".format(err)) + + if pkg_version != version: + raise Error("package.json: version does not match (got '{}', expected '{}')".format(pkg_version, version)) + def generate_relnotes(tree, version): with open('docs/changelog.md') as f: lines = f.readlines() From c0bf387ffc6914bbba3a77fac2d50f61bf7769d6 Mon Sep 17 00:00:00 2001 From: Wil Shipley Date: Wed, 6 May 2020 19:57:07 -0700 Subject: [PATCH 06/14] config: ignore unreadable configuration files Modified `config_file_open()` so it returns 0 if the config file is not readable, which happens on global config files under macOS sandboxing (note that for some reason `access(F_OK)` DOES work with sandboxing, but it is lying). Without this read check sandboxed applications on macOS can not open any repository, because `config_file_read()` will return GIT_ERROR when it cannot read the global /Users/username/.gitconfig file, and the upper layers will just completely abort on GIT_ERROR when attempting to load the global config file, so no repositories can be opened. --- src/config_file.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/config_file.c b/src/config_file.c index c9e36493e..b1e002836 100644 --- a/src/config_file.c +++ b/src/config_file.c @@ -111,6 +111,15 @@ static int config_file_open(git_config_backend *cfg, git_config_level_t level, c if (!git_path_exists(b->file.path)) return 0; + /* + * git silently ignores configuration files that are not + * readable. We emulate that behavior. This is particularly + * important for sandboxed applications on macOS where the + * git configuration files may not be readable. + */ + if (p_access(b->file.path, R_OK) < 0) + return GIT_ENOTFOUND; + if (res < 0 || (res = config_file_read(b->entries, repo, &b->file, level, 0)) < 0) { git_config_entries_free(b->entries); b->entries = NULL; From 8edc39df3b9268960775be840e15e4e5ad8dfd25 Mon Sep 17 00:00:00 2001 From: Edward Thomson Date: Sat, 23 May 2020 11:42:19 +0100 Subject: [PATCH 07/14] config: test that unreadable files are treated as notfound --- tests/config/read.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/config/read.c b/tests/config/read.c index 008dfd9fc..ba97302f7 100644 --- a/tests/config/read.c +++ b/tests/config/read.c @@ -849,6 +849,23 @@ void test_config_read__invalid_quoted_third_section(void) git_config_free(cfg); } +void test_config_read__unreadable_file_ignored(void) +{ + git_buf buf = GIT_BUF_INIT; + git_config *cfg; + int ret; + + cl_set_cleanup(&clean_test_config, NULL); + cl_git_mkfile("./testconfig", "[some] var = value\n[some \"OtheR\"] var = value"); + cl_git_pass(p_chmod("./testconfig", 0)); + + ret = git_config_open_ondisk(&cfg, "./test/config"); + cl_assert(ret == 0 || ret == GIT_ENOTFOUND); + + git_config_free(cfg); + git_buf_dispose(&buf); +} + void test_config_read__single_line(void) { git_buf buf = GIT_BUF_INIT; From e2294859bdd1a9f65d04e1b132c2a0466b0c2bd1 Mon Sep 17 00:00:00 2001 From: Patrick Wang Date: Tue, 26 May 2020 04:53:09 +0800 Subject: [PATCH 08/14] index: write v4: bugfix: prefix path with strip_len, not same_len According to index-format.txt of git, the path of an entry is prefixed with N, where N indicates the length of bytes to be stripped. --- src/index.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/index.c b/src/index.c index 907bd6d93..36a8bdb7b 100644 --- a/src/index.c +++ b/src/index.c @@ -2744,7 +2744,7 @@ static int write_disk_entry(git_filebuf *file, git_index_entry *entry, const cha ++same_len; } path_len -= same_len; - varint_len = git_encode_varint(NULL, 0, same_len); + varint_len = git_encode_varint(NULL, 0, strlen(last) - same_len); } disk_size = index_entry_size(path_len, varint_len, entry->flags); @@ -2795,7 +2795,7 @@ static int write_disk_entry(git_filebuf *file, git_index_entry *entry, const cha if (last) { varint_len = git_encode_varint((unsigned char *) path, - disk_size, same_len); + disk_size, strlen(last) - same_len); assert(varint_len > 0); path += varint_len; disk_size -= varint_len; From 3939e810493b4dde595f9b523e24ad1b684ba9bb Mon Sep 17 00:00:00 2001 From: Patrick Wang Date: Tue, 26 May 2020 20:36:13 +0800 Subject: [PATCH 09/14] tests: index::version: write v4 index: re-open repo to read written v4 index The `git_index_free()` merely decrement the reference counter from 2 to 1, and does not "free" the index. Thus, the following `git_repository_index()` merely increase the counter to 2, instead of read index from disk. The written index is not read and parsed, which makes this test case effectively becomes a no-op. --- tests/index/version.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/index/version.c b/tests/index/version.c index 3827df861..b6c0b7918 100644 --- a/tests/index/version.c +++ b/tests/index/version.c @@ -43,6 +43,7 @@ void test_index_version__can_write_v4(void) "xz", "xyzzyx" }; + git_repository *repo; git_index_entry entry; git_index *index; size_t i; @@ -63,7 +64,8 @@ void test_index_version__can_write_v4(void) cl_git_pass(git_index_write(index)); git_index_free(index); - cl_git_pass(git_repository_index(&index, g_repo)); + cl_git_pass(git_repository_open(&repo, git_repository_path(g_repo))); + cl_git_pass(git_repository_index(&index, repo)); cl_assert(git_index_version(index) == 4); for (i = 0; i < ARRAY_SIZE(paths); i++) { @@ -74,6 +76,7 @@ void test_index_version__can_write_v4(void) } git_index_free(index); + git_repository_free(repo); } void test_index_version__v4_uses_path_compression(void) From 7a6566e391f50845ea81e3dc18d0da69790c31ee Mon Sep 17 00:00:00 2001 From: Edward Thomson Date: Sat, 30 May 2020 15:21:48 +0100 Subject: [PATCH 10/14] online::clone: test a googlesource URL Google Git (googlesource.com) behaves differently than git proper. Test that we can communicate with it. --- tests/online/clone.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/online/clone.c b/tests/online/clone.c index 034d0c2e8..9107956bd 100644 --- a/tests/online/clone.c +++ b/tests/online/clone.c @@ -11,6 +11,7 @@ #define BB_REPO_URL "https://libgit3@bitbucket.org/libgit2/testgitrepository.git" #define BB_REPO_URL_WITH_PASS "https://libgit3:libgit3@bitbucket.org/libgit2/testgitrepository.git" #define BB_REPO_URL_WITH_WRONG_PASS "https://libgit3:wrong@bitbucket.org/libgit2/testgitrepository.git" +#define GOOGLESOURCE_REPO_URL "https://chromium.googlesource.com/external/github.com/sergi/go-diff" #define SSH_REPO_URL "ssh://github.com/libgit2/TestGitRepository" @@ -463,6 +464,13 @@ void test_online_clone__bitbucket_falls_back_to_specified_creds(void) cl_fixture_cleanup("./foo"); } +void test_online_clone__googlesource(void) +{ + cl_git_pass(git_clone(&g_repo, GOOGLESOURCE_REPO_URL, "./foo", &g_options)); + git_repository_free(g_repo); g_repo = NULL; + cl_fixture_cleanup("./foo"); +} + static int cancel_at_half(const git_indexer_progress *stats, void *payload) { GIT_UNUSED(payload); From ed045f0912ad6cbc512a98ebfbdd6ea08682f4ad Mon Sep 17 00:00:00 2001 From: Edward Thomson Date: Mon, 1 Jun 2020 19:10:38 +0100 Subject: [PATCH 11/14] httpclient: read_body should return 0 at EOF When users call `git_http_client_read_body`, it should return 0 at the end of a message. When the `on_message_complete` callback is called, this will set `client->state` to `DONE`. In our read loop, we look for this condition and exit. Without this, when there is no data left except the end of message chunk (`0\r\n`) in the http stream, we would block by reading the three bytes off the stream but not making progress in any `on_body` callbacks. Listening to the `on_message_complete` callback allows us to stop trying to read from the socket when we've read the end of message chunk. --- src/transports/httpclient.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/transports/httpclient.c b/src/transports/httpclient.c index bde67ca9f..af90129df 100644 --- a/src/transports/httpclient.c +++ b/src/transports/httpclient.c @@ -1419,15 +1419,20 @@ int git_http_client_read_body( client->parser.data = &parser_context; /* - * Clients expect to get a non-zero amount of data from us. - * With a sufficiently small buffer, one might only read a chunk - * length. Loop until we actually have data to return. + * Clients expect to get a non-zero amount of data from us, + * so we either block until we have data to return, until we + * hit EOF or there's an error. Do this in a loop, since we + * may end up reading only some stream metadata (like chunk + * information). */ while (!parser_context.output_written) { error = client_read_and_parse(client); if (error <= 0) goto done; + + if (client->state == DONE) + break; } assert(parser_context.output_written <= INT_MAX); From bc61161b9879e03842a8df6764d40a49497e765e Mon Sep 17 00:00:00 2001 From: Edward Thomson Date: Mon, 1 Jun 2020 23:53:55 +0100 Subject: [PATCH 12/14] httpclient: don't read more than the client wants When `git_http_client_read_body` is invoked, it provides the size of the buffer that can be read into. This will be set as the parser context's `output_size` member. Use this as an upper limit on our reads, and ensure that we do not read more than the client requests. --- src/transports/httpclient.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/transports/httpclient.c b/src/transports/httpclient.c index af90129df..72a65f00f 100644 --- a/src/transports/httpclient.c +++ b/src/transports/httpclient.c @@ -1038,6 +1038,7 @@ on_error: GIT_INLINE(int) client_read(git_http_client *client) { + http_parser_context *parser_context = client->parser.data; git_stream *stream; char *buf = client->read_buf.ptr + client->read_buf.size; size_t max_len; @@ -1054,6 +1055,9 @@ GIT_INLINE(int) client_read(git_http_client *client) max_len = client->read_buf.asize - client->read_buf.size; max_len = min(max_len, INT_MAX); + if (parser_context->output_size) + max_len = min(max_len, parser_context->output_size); + if (max_len == 0) { git_error_set(GIT_ERROR_HTTP, "no room in output buffer"); return -1; From 79507cd8b2fb89555fa1a900d84f24e629fdf902 Mon Sep 17 00:00:00 2001 From: Edward Thomson Date: Mon, 1 Jun 2020 22:44:14 +0100 Subject: [PATCH 13/14] httpclient: clear the read_buf on new requests The httpclient implementation keeps a `read_buf` that holds the data in the body of the response after the headers have been written. We store that data for subsequent calls to `git_http_client_read_body`. If we want to stop reading body data and send another request, we need to clear that cached data. Clear the cached body data on new requests, just like we read any outstanding data from the socket. --- src/transports/httpclient.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/transports/httpclient.c b/src/transports/httpclient.c index 72a65f00f..010baa604 100644 --- a/src/transports/httpclient.c +++ b/src/transports/httpclient.c @@ -1195,7 +1195,7 @@ static void complete_response_body(git_http_client *client) /* If we're not keeping alive, don't bother. */ if (!client->keepalive) { client->connected = 0; - return; + goto done; } parser_context.client = client; @@ -1209,6 +1209,9 @@ static void complete_response_body(git_http_client *client) git_error_clear(); client->connected = 0; } + +done: + git_buf_clear(&client->read_buf); } int git_http_client_send_request( From 11dca3ca20af4bfa74d926db5693ed2314ecceb2 Mon Sep 17 00:00:00 2001 From: Edward Thomson Date: Wed, 3 Jun 2020 10:23:09 +0100 Subject: [PATCH 14/14] v1.0.1: prepare the changelog --- docs/changelog.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/docs/changelog.md b/docs/changelog.md index 97cf94c6b..c2423c338 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,3 +1,33 @@ +v1.0.1 +------ + +This is a bugfix release with the following changes: + +- Calculating information about renamed files during merges is more + efficient because dissimilarity about files is now being cached and + no longer needs to be recomputed. + +- The `git_worktree_prune_init_options` has been correctly restored for + backward compatibility. In v1.0 it was incorrectly deprecated with a + typo. + +- The optional ntlmclient dependency now supports NetBSD. + +- A bug where attempting to stash on a bare repository may have failed + has been fixed. + +- Configuration files that are unreadable due to permissions are now + silently ignored, and treated as if they do not exist. This matches + git's behavior; previously this case would have been an error. + +- v4 index files are now correctly written; previously we would read + them correctly but would not write the prefix-compression accurately, + causing corruption. + +- A bug where the smart HTTP transport could not read large data packets + has been fixed. Previously, fetching from servers like Gerrit, that + sent large data packets, would error. + v1.0 ----