diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-07 05:28:39 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-07 05:28:39 +0000 |
commit | 4138bce0c92f9e5e7879983988f135c4509ff3bc (patch) | |
tree | 68fef474c8e4c37d355b92acbec8c5a731661fd8 /debian | |
parent | Releasing progress-linux version 3.2.1-3+deb10u1progress5u1. (diff) | |
download | knot-resolver-4138bce0c92f9e5e7879983988f135c4509ff3bc.tar.xz knot-resolver-4138bce0c92f9e5e7879983988f135c4509ff3bc.zip |
Merging debian version 3.2.1-3+deb10u2.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | debian/changelog | 10 | ||||
-rw-r--r-- | debian/patches/CVE-2019-10190.patch | 293 | ||||
-rw-r--r-- | debian/patches/CVE-2019-10191.patch | 158 | ||||
-rw-r--r-- | debian/patches/CVE-2019-19331.patch | 371 | ||||
-rw-r--r-- | debian/patches/CVE-2020-12667-part1.patch | 90 | ||||
-rw-r--r-- | debian/patches/CVE-2020-12667-part2.patch | 86 | ||||
-rw-r--r-- | debian/patches/series | 5 |
7 files changed, 1013 insertions, 0 deletions
diff --git a/debian/changelog b/debian/changelog index 6c772ea..1d320c7 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,13 @@ +knot-resolver (3.2.1-3+deb10u2) buster-security; urgency=high + + * Non-maintainer upload by the LTS team. + * Fix CVE-2019-10190, CVE-2019-10191, CVE-2019-19331, CVE-2020-12667: + Several security vulnerabilities have been discovered in knot-resolver, a + caching, DNSSEC-validating DNS resolver which may allow remote attackers to + bypass DNSSEC validation or cause a denial-of-service. + + -- Markus Koschany <apo@debian.org> Thu, 25 Apr 2024 14:42:02 +0200 + knot-resolver (3.2.1-3+deb10u1progress5u1) engywuck-security; urgency=high * Uploading to engywuck-security, remaining changes: diff --git a/debian/patches/CVE-2019-10190.patch b/debian/patches/CVE-2019-10190.patch new file mode 100644 index 0000000..c13a7df --- /dev/null +++ b/debian/patches/CVE-2019-10190.patch @@ -0,0 +1,293 @@ +From: Markus Koschany <apo@debian.org> +Date: Mon, 11 Mar 2024 14:16:29 +0100 +Subject: CVE-2019-10190 + +Bug-Debian: https://bugs.debian.org/932048 +Origin: https://gitlab.labs.nic.cz/knot/knot-resolver/merge_requests/827 +--- + lib/layer/iterate.c | 2 +- + lib/resolve.c | 96 +++++++++++++++++++++++------------------ + lib/resolve.h | 2 +- + modules/cookies/cookiemonster.c | 4 +- + modules/hints/hints.c | 2 +- + 5 files changed, 59 insertions(+), 47 deletions(-) + +diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c +index f7dd630..ee2607e 100644 +--- a/lib/layer/iterate.c ++++ b/lib/layer/iterate.c +@@ -557,7 +557,7 @@ static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral, + } else { + int cnt_ = 0; + state = update_nsaddr(rr, query->parent, &cnt_); +- if (state == KR_STATE_FAIL) { ++ if (state & KR_STATE_FAIL) { + return state; + } + } +diff --git a/lib/resolve.c b/lib/resolve.c +index 8718f25..b4b3657 100644 +--- a/lib/resolve.c ++++ b/lib/resolve.c +@@ -458,7 +458,7 @@ static int answer_prepare(struct kr_request *req, knot_pkt_t *query) + static int write_extra_records(const rr_array_t *arr, uint16_t reorder, knot_pkt_t *answer) + { + for (size_t i = 0; i < arr->len; ++i) { +- int err = knot_pkt_put_rotate(answer, 0, arr->at[i], reorder, 0); ++ int err = knot_pkt_put_rotate(answer, 0, arr->at[i], reorder, KNOT_PF_NOTRUNC); + if (err != KNOT_EOK) { + return err == KNOT_ESPACE ? kr_ok() : kr_error(err); + } +@@ -548,42 +548,52 @@ static int answer_padding(struct kr_request *request) + return kr_ok(); + } + +-static int answer_fail(struct kr_request *request) ++/* Make a clean SERVFAIL answer. */ ++static void answer_fail(struct kr_request *request) + { ++ /* Note: OPT in SERVFAIL response is still useful for cookies/additional info. */ + knot_pkt_t *answer = request->answer; ++ knot_rrset_t *opt_rr = answer->opt_rr; /* it gets NULLed below */ + int ret = kr_pkt_clear_payload(answer); + knot_wire_clear_ad(answer->wire); + knot_wire_clear_aa(answer->wire); + knot_wire_set_rcode(answer->wire, KNOT_RCODE_SERVFAIL); +- if (ret == 0 && answer->opt_rr) { +- /* OPT in SERVFAIL response is still useful for cookies/additional info. */ ++ if (ret == 0 && opt_rr) { + knot_pkt_begin(answer, KNOT_ADDITIONAL); + answer_padding(request); /* Ignore failed padding in SERVFAIL answer. */ +- ret = edns_put(answer, false); ++ answer->opt_rr = opt_rr; ++ edns_put(answer, false); + } +- return ret; + } + +-static int answer_finalize(struct kr_request *request, int state) ++static void answer_finalize(struct kr_request *request) + { + struct kr_rplan *rplan = &request->rplan; + knot_pkt_t *answer = request->answer; + +- /* Always set SERVFAIL for bogus answers. */ +- if (state == KR_STATE_FAIL && rplan->pending.len > 0) { +- struct kr_query *last = array_tail(rplan->pending); +- if ((last->flags.DNSSEC_WANT) && (last->flags.DNSSEC_BOGUS)) { +- return answer_fail(request); +- } +- } +- +- struct kr_query *last = rplan->resolved.len > 0 ? array_tail(rplan->resolved) : NULL; ++ struct kr_query *const last = ++ rplan->resolved.len > 0 ? array_tail(rplan->resolved) : NULL; + /* TODO ^^^^ this is slightly fragile */ + ++ if (!last) { ++ /* Suspicious: no kr_query got resolved (not even from cache), ++ * so let's (defensively) SERVFAIL the request. ++ * ATM many checks below depend on `last` anyway, ++ * so this helps to avoid surprises. */ ++ answer_fail(request); ++ return; ++ } ++ /* TODO: clean this up in !660 or followup, and it isn't foolproof anyway. */ ++ if (last->flags.DNSSEC_BOGUS ++ || (rplan->pending.len > 0 && array_tail(rplan->pending)->flags.DNSSEC_BOGUS)) { ++ answer_fail(request); ++ return; ++ } ++ + /* AD flag. We can only change `secure` from true to false. + * Be conservative. Primary approach: check ranks of all RRs in wire. + * Only "negative answers" need special handling. */ +- bool secure = last != NULL && state == KR_STATE_DONE /*< suspicious otherwise */ ++ bool secure = last != NULL && request->state == KR_STATE_DONE /*< suspicious otherwise */ + && knot_pkt_qtype(answer) != KNOT_RRTYPE_RRSIG; + if (last && (last->flags.STUB)) { + secure = false; /* don't trust forwarding for now */ +@@ -604,7 +614,8 @@ static int answer_finalize(struct kr_request *request, int state) + if (write_extra_ranked_records(&request->answ_selected, reorder, + answer, &secure, &answ_all_cnames)) + { +- return answer_fail(request); ++ answer_fail(request); ++ return; + } + } + +@@ -614,25 +625,29 @@ static int answer_finalize(struct kr_request *request, int state) + } + if (write_extra_ranked_records(&request->auth_selected, reorder, + answer, &secure, NULL)) { +- return answer_fail(request); ++ answer_fail(request); ++ return; + } + /* Write additional records. */ + knot_pkt_begin(answer, KNOT_ADDITIONAL); + if (write_extra_records(&request->additional, reorder, answer)) { +- return answer_fail(request); ++ answer_fail(request); ++ return; + } + /* Write EDNS information */ + if (answer->opt_rr) { + if (request->qsource.flags.tls) { + if (answer_padding(request) != kr_ok()) { +- return answer_fail(request); ++ answer_fail(request); ++ return; + } + } + knot_pkt_begin(answer, KNOT_ADDITIONAL); + int ret = knot_pkt_put(answer, KNOT_COMPR_HINT_NONE, + answer->opt_rr, KNOT_PF_FREE); + if (ret != KNOT_EOK) { +- return answer_fail(request); ++ answer_fail(request); ++ return; + } + } + +@@ -667,8 +682,6 @@ static int answer_finalize(struct kr_request *request, int state) + if (!secure) { + knot_wire_clear_ad(answer->wire); + } +- +- return kr_ok(); + } + + static int query_finalize(struct kr_request *request, struct kr_query *qry, knot_pkt_t *pkt) +@@ -852,7 +865,7 @@ static void update_nslist_score(struct kr_request *request, struct kr_query *qry + { + struct kr_context *ctx = request->ctx; + /* On successful answer, update preference list RTT and penalise timer */ +- if (request->state != KR_STATE_FAIL) { ++ if (!(request->state & KR_STATE_FAIL)) { + /* Update RTT information for preference list */ + update_nslist_rtt(ctx, qry, src); + /* Do not complete NS address resolution on soft-fail. */ +@@ -928,7 +941,7 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k + update_nslist_score(request, qry, src, packet); + } + /* Resolution failed, invalidate current NS. */ +- if (request->state == KR_STATE_FAIL) { ++ if (request->state & KR_STATE_FAIL) { + invalidate_ns(rplan, qry); + qry->flags.RESOLVED = false; + } +@@ -1283,7 +1296,7 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot + int state = KR_STATE_FAIL; + do { + state = ns_fetch_cut(qry, requested_name, request, packet); +- if (state == KR_STATE_DONE || state == KR_STATE_FAIL) { ++ if (state == KR_STATE_DONE || (state & KR_STATE_FAIL)) { + return state; + } else if (state == KR_STATE_CONSUME) { + requested_name = knot_wire_next_label(requested_name, NULL); +@@ -1353,7 +1366,7 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t + /* Resolve current query and produce dependent or finish */ + request->state = KR_STATE_PRODUCE; + ITERATE_LAYERS(request, qry, produce, packet); +- if (request->state != KR_STATE_FAIL && knot_wire_get_qr(packet->wire)) { ++ if (!(request->state & KR_STATE_FAIL) && knot_wire_get_qr(packet->wire)) { + /* Produced an answer from cache, consume it. */ + qry->secret = 0; + request->state = KR_STATE_CONSUME; +@@ -1541,7 +1554,7 @@ int kr_resolve_checkout(struct kr_request *request, const struct sockaddr *src, + * don't affect the resolution or rest of the processing. */ + int state = request->state; + ITERATE_LAYERS(request, qry, checkout, packet, dst, type); +- if (request->state == KR_STATE_FAIL) { ++ if (request->state & KR_STATE_FAIL) { + request->state = state; /* Restore */ + return kr_error(ECANCELED); + } +@@ -1589,25 +1602,24 @@ int kr_resolve_checkout(struct kr_request *request, const struct sockaddr *src, + + int kr_resolve_finish(struct kr_request *request, int state) + { ++ request->state = state; + /* Finalize answer and construct wire-buffer. */ + ITERATE_LAYERS(request, NULL, answer_finalize); +- if (request->state == KR_STATE_FAIL) { +- state = KR_STATE_FAIL; +- } else if (answer_finalize(request, state) != 0) { +- state = KR_STATE_FAIL; +- } ++ answer_finalize(request); + +- /* Error during processing, internal failure */ +- if (state != KR_STATE_DONE) { +- knot_pkt_t *answer = request->answer; +- if (knot_wire_get_rcode(answer->wire) == KNOT_RCODE_NOERROR) { +- knot_wire_clear_ad(answer->wire); +- knot_wire_clear_aa(answer->wire); +- knot_wire_set_rcode(answer->wire, KNOT_RCODE_SERVFAIL); ++ /* Defensive style, in case someone has forgotten. ++ * Beware: non-empty answers do make sense even with SERVFAIL case, etc. */ ++ if (request->state != KR_STATE_DONE) { ++ uint8_t *wire = request->answer->wire; ++ switch (knot_wire_get_rcode(wire)) { ++ case KNOT_RCODE_NOERROR: ++ case KNOT_RCODE_NXDOMAIN: ++ knot_wire_clear_ad(wire); ++ knot_wire_clear_aa(wire); ++ knot_wire_set_rcode(wire, KNOT_RCODE_SERVFAIL); + } + } + +- request->state = state; + ITERATE_LAYERS(request, NULL, finish); + + #ifndef NOVERBOSELOG +diff --git a/lib/resolve.h b/lib/resolve.h +index 60d80bb..18372a3 100644 +--- a/lib/resolve.h ++++ b/lib/resolve.h +@@ -308,7 +308,7 @@ int kr_resolve_checkout(struct kr_request *request, const struct sockaddr *src, + * be destroyed, as it's owned by caller. + * + * @param request request state +- * @param state either DONE or FAIL state ++ * @param state either DONE or FAIL state (to be assigned to request->state) + * @return DONE + */ + KR_EXPORT +diff --git a/modules/cookies/cookiemonster.c b/modules/cookies/cookiemonster.c +index 7af7afa..6f00192 100644 +--- a/modules/cookies/cookiemonster.c ++++ b/modules/cookies/cookiemonster.c +@@ -423,7 +423,7 @@ int check_request(kr_layer_t *ctx) + /* Request has no server cookie. */ + return_state = invalid_sc_status(return_state, false, + ignore_badcookie, req, answer); +- if (return_state == KR_STATE_FAIL) { ++ if (return_state & KR_STATE_FAIL) { + return return_state; + } + goto answer_add_cookies; +@@ -449,7 +449,7 @@ int check_request(kr_layer_t *ctx) + /* Invalid server cookie. */ + return_state = invalid_sc_status(return_state, true, + ignore_badcookie, req, answer); +- if (return_state == KR_STATE_FAIL) { ++ if (return_state & KR_STATE_FAIL) { + return return_state; + } + goto answer_add_cookies; +diff --git a/modules/hints/hints.c b/modules/hints/hints.c +index 84e2e99..f55e85b 100644 +--- a/modules/hints/hints.c ++++ b/modules/hints/hints.c +@@ -133,7 +133,7 @@ static int satisfy_forward(struct kr_zonecut *hints, knot_pkt_t *pkt, struct kr_ + static int query(kr_layer_t *ctx, knot_pkt_t *pkt) + { + struct kr_query *qry = ctx->req->current_query; +- if (!qry || ctx->state & (KR_STATE_FAIL)) { ++ if (!qry || (ctx->state & KR_STATE_FAIL)) { + return ctx->state; + } + diff --git a/debian/patches/CVE-2019-10191.patch b/debian/patches/CVE-2019-10191.patch new file mode 100644 index 0000000..1d4a4b5 --- /dev/null +++ b/debian/patches/CVE-2019-10191.patch @@ -0,0 +1,158 @@ +From: Markus Koschany <apo@debian.org> +Date: Mon, 11 Mar 2024 14:24:02 +0100 +Subject: CVE-2019-10191 + +Bug-Debian: https://bugs.debian.org/932048 +Origin: https://gitlab.labs.nic.cz/knot/knot-resolver/merge_requests/839 +--- + daemon/lua/kres-gen.lua | 1 + + daemon/worker.c | 5 ++++- + lib/cache/api.c | 4 +++- + lib/cache/impl.h | 3 ++- + lib/layer.h | 7 ++++++- + lib/layer/iterate.c | 11 ++++++++++- + lib/resolve.c | 2 ++ + lib/rplan.h | 2 ++ + 8 files changed, 30 insertions(+), 5 deletions(-) + +diff --git a/daemon/lua/kres-gen.lua b/daemon/lua/kres-gen.lua +index eeb8ff7..9e9f586 100644 +--- a/daemon/lua/kres-gen.lua ++++ b/daemon/lua/kres-gen.lua +@@ -120,6 +120,7 @@ struct kr_qflags { + _Bool DNS64_MARK : 1; + _Bool CACHE_TRIED : 1; + _Bool NO_NS_FOUND : 1; ++ _Bool PKT_IS_SANE : 1; + }; + typedef struct { + knot_rrset_t **at; +diff --git a/daemon/worker.c b/daemon/worker.c +index 117cc91..1235979 100644 +--- a/daemon/worker.c ++++ b/daemon/worker.c +@@ -1611,8 +1611,11 @@ int worker_submit(struct session *session, knot_pkt_t *query) + return kr_error(ENOMEM); + } + } else if (query) { /* response from upstream */ +- task = session_tasklist_del_msgid(session, knot_wire_get_id(query->wire)); ++ const uint16_t id = knot_wire_get_id(query->wire); ++ task = session_tasklist_del_msgid(session, id); + if (task == NULL) { ++ VERBOSE_MSG(NULL, "=> ignoring packet with mismatching ID %d\n", ++ (int)id); + return kr_error(ENOENT); + } + assert(!session_flags(session)->closing); +diff --git a/lib/cache/api.c b/lib/cache/api.c +index c0591d6..4c7f3d2 100644 +--- a/lib/cache/api.c ++++ b/lib/cache/api.c +@@ -408,7 +408,9 @@ int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt) + /* LATER(optim.): typically we also have corresponding NS record in the list, + * so we might save a cache operation. */ + +- stash_pkt(pkt, qry, req, has_optout); ++ if (qry->flags.PKT_IS_SANE && check_dname_for_lf(knot_pkt_qname(pkt), qry)) { ++ stash_pkt(pkt, qry, req, has_optout); ++ } + + finally: + if (unauth_cnt) { +diff --git a/lib/cache/impl.h b/lib/cache/impl.h +index a08f355..ffb5e67 100644 +--- a/lib/cache/impl.h ++++ b/lib/cache/impl.h +@@ -253,7 +253,8 @@ void entry_list_memcpy(struct entry_apex *ea, entry_list_t list); + /* Packet caching; implementation in ./entry_pkt.c */ + + /** Stash the packet into cache (if suitable, etc.) +- * \param has_optout whether the packet contains an opt-out NSEC3 */ ++ * \param has_optout whether the packet contains an opt-out NSEC3 ++ * It assumes check_dname_for_lf(). */ + void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry, + const struct kr_request *req, bool has_optout); + +diff --git a/lib/layer.h b/lib/layer.h +index 0909cb7..011d279 100644 +--- a/lib/layer.h ++++ b/lib/layer.h +@@ -48,7 +48,12 @@ + enum kr_layer_state { + KR_STATE_CONSUME = 1 << 0, /*!< Consume data. */ + KR_STATE_PRODUCE = 1 << 1, /*!< Produce data. */ +- KR_STATE_DONE = 1 << 2, /*!< Finished successfully. */ ++ ++ /*! Finished successfully or a special case: in CONSUME phase this can ++ * be used (by iterator) to do a transition to PRODUCE phase again, ++ * in which case the packet wasn't accepted for some reason. */ ++ KR_STATE_DONE = 1 << 2, ++ + KR_STATE_FAIL = 1 << 3, /*!< Error. */ + KR_STATE_YIELD = 1 << 4, /*!< Paused, waiting for a sub-query. */ + }; +diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c +index ee2607e..feaadf0 100644 +--- a/lib/layer/iterate.c ++++ b/lib/layer/iterate.c +@@ -81,10 +81,14 @@ static bool is_paired_to_query(const knot_pkt_t *answer, struct kr_query *query) + uint16_t qtype = query->stype; + const knot_dname_t *qname = minimized_qname(query, &qtype); + ++ /* ID should already match, thanks to session_tasklist_del_msgid() ++ * in worker_submit(), but it won't hurt to check again. */ + return query->id == knot_wire_get_id(answer->wire) && +- knot_wire_get_qdcount(answer->wire) > 0 && ++ knot_wire_get_qdcount(answer->wire) == 1 && + query->sclass == knot_pkt_qclass(answer) && + qtype == knot_pkt_qtype(answer) && ++ /* qry->secret had been xor-applied to answer already, ++ * so this also checks for correctness of case randomization */ + knot_dname_is_equal(qname, knot_pkt_qname(answer)); + } + +@@ -1029,6 +1033,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt) + if (!query) { + return ctx->state; + } ++ query->flags.PKT_IS_SANE = false; + + WITH_VERBOSE(query) { + if (query->flags.TRACE) { +@@ -1072,6 +1077,10 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt) + return KR_STATE_CONSUME; + } + ++ /* If exiting above here, there's no sense to put it into packet cache. ++ * The most important part is to check for spoofing: is_paired_to_query() */ ++ query->flags.PKT_IS_SANE = true; ++ + #ifndef NOVERBOSELOG + const knot_lookup_t *rcode = knot_lookup_by_id(knot_rcode_names, knot_wire_get_rcode(pkt->wire)); + #endif +diff --git a/lib/resolve.c b/lib/resolve.c +index b4b3657..b771cdc 100644 +--- a/lib/resolve.c ++++ b/lib/resolve.c +@@ -150,6 +150,8 @@ static void randomized_qname_case(knot_dname_t * restrict qname, uint32_t secret + assert(qname); + const int len = knot_dname_size(qname) - 2; /* Skip first, last label. */ + for (int i = 0; i < len; ++i) { ++ /* Note: this relies on the fact that correct label lengths ++ * can't pass the isletter() test (by "luck"). */ + if (isletter(*++qname)) { + *qname ^= ((secret >> (i & 31)) & 1) * 0x20; + } +diff --git a/lib/rplan.h b/lib/rplan.h +index 21b0b0b..7e82b03 100644 +--- a/lib/rplan.h ++++ b/lib/rplan.h +@@ -64,6 +64,8 @@ struct kr_qflags { + bool DNS64_MARK : 1; /**< Internal mark for dns64 module. */ + bool CACHE_TRIED : 1; /**< Internal to cache module. */ + bool NO_NS_FOUND : 1; /**< No valid NS found during last PRODUCE stage. */ ++ bool PKT_IS_SANE : 1; /**< Set by iterator in consume phase to indicate whether ++ * some basic aspects of the packet are OK, e.g. QNAME. */ + }; + + /** Combine flags together. This means set union for simple flags. */ diff --git a/debian/patches/CVE-2019-19331.patch b/debian/patches/CVE-2019-19331.patch new file mode 100644 index 0000000..67420ea --- /dev/null +++ b/debian/patches/CVE-2019-19331.patch @@ -0,0 +1,371 @@ +From: Markus Koschany <apo@debian.org> +Date: Mon, 11 Mar 2024 14:37:42 +0200 +Subject: CVE-2019-19331 + +Bug-Debian: https://bugs.debian.org/946181 +Origin: https://gitlab.nic.cz/knot/knot-resolver/-/commit/782682db6d4b17d9d1559a1d13ae718a07eb260e +--- + daemon/lua/kres-gen.lua | 2 + + daemon/lua/kres-gen.sh | 1 + + lib/cache/api.c | 1 + + lib/dnssec.c | 1 + + lib/layer/iterate.c | 17 +++++- + lib/resolve.c | 1 + + lib/utils.c | 143 +++++++++++++++++++++++++++++++++++++++++++++--- + lib/utils.h | 10 +++- + modules/dns64/dns64.lua | 1 + + 9 files changed, 167 insertions(+), 10 deletions(-) + +diff --git a/daemon/lua/kres-gen.lua b/daemon/lua/kres-gen.lua +index 9e9f586..a92288b 100644 +--- a/daemon/lua/kres-gen.lua ++++ b/daemon/lua/kres-gen.lua +@@ -135,6 +135,7 @@ struct ranked_rr_array_entry { + _Bool yielded : 1; + _Bool to_wire : 1; + _Bool expiring : 1; ++ _Bool in_progress : 1; + knot_rrset_t *rr; + }; + typedef struct ranked_rr_array_entry ranked_rr_array_entry_t; +@@ -310,6 +311,7 @@ int kr_bitcmp(const char *, const char *, int); + int kr_family_len(int); + struct sockaddr *kr_straddr_socket(const char *, int); + int kr_ranked_rrarray_add(ranked_rr_array_t *, const knot_rrset_t *, uint8_t, _Bool, uint32_t, knot_mm_t *); ++int kr_ranked_rrarray_finalize(ranked_rr_array_t *, uint32_t, knot_mm_t *); + void kr_qflags_set(struct kr_qflags *, struct kr_qflags); + void kr_qflags_clear(struct kr_qflags *, struct kr_qflags); + int kr_zonecut_add(struct kr_zonecut *, const knot_dname_t *, const void *, int); +diff --git a/daemon/lua/kres-gen.sh b/daemon/lua/kres-gen.sh +index 538fe23..6c4aa64 100755 +--- a/daemon/lua/kres-gen.sh ++++ b/daemon/lua/kres-gen.sh +@@ -163,6 +163,7 @@ EOF + kr_family_len + kr_straddr_socket + kr_ranked_rrarray_add ++ kr_ranked_rrarray_finalize + kr_qflags_set + kr_qflags_clear + kr_zonecut_add +diff --git a/lib/cache/api.c b/lib/cache/api.c +index 4c7f3d2..90aaf0b 100644 +--- a/lib/cache/api.c ++++ b/lib/cache/api.c +@@ -605,6 +605,7 @@ static int stash_rrarray_entry(ranked_rr_array_t *arr, int arr_i, + /* TODO: ATM we assume that some properties are the same + * for all RRSIGs in the set (esp. label count). */ + ranked_rr_array_entry_t *e = arr->at[j]; ++ assert(!e->in_progress); + bool ok = e->qry_uid == qry->uid && !e->cached + && e->rr->type == KNOT_RRTYPE_RRSIG + && knot_rrsig_type_covered(e->rr->rrs.rdata) == rr->type +diff --git a/lib/dnssec.c b/lib/dnssec.c +index 4f8ad8a..9973656 100644 +--- a/lib/dnssec.c ++++ b/lib/dnssec.c +@@ -447,6 +447,7 @@ int kr_dnssec_matches_name_and_type(const ranked_rr_array_t *rrs, uint32_t qry_u + int ret = kr_error(ENOENT); + for (size_t i = 0; i < rrs->len; ++i) { + const ranked_rr_array_entry_t *entry = rrs->at[i]; ++ assert(!entry->in_progress); + const knot_rrset_t *nsec = entry->rr; + if (entry->qry_uid != qry_uid || entry->yielded) { + continue; +diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c +index feaadf0..0423a3d 100644 +--- a/lib/layer/iterate.c ++++ b/lib/layer/iterate.c +@@ -1119,13 +1119,15 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt) + return resolve_error(pkt, req); + } + ++ int state; + /* Forwarding/stub mode is special. */ + if (query->flags.STUB) { +- return process_stub(pkt, req); ++ state = process_stub(pkt, req); ++ goto rrarray_finalize; + } + + /* Resolve authority to see if it's referral or authoritative. */ +- int state = process_authority(pkt, req); ++ state = process_authority(pkt, req); + switch(state) { + case KR_STATE_CONSUME: /* Not referral, process answer. */ + VERBOSE_MSG("<= rcode: %s\n", rcode ? rcode->name : "??"); +@@ -1139,6 +1141,17 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt) + break; + } + ++rrarray_finalize: ++ /* Finish construction of libknot-format RRsets. */ ++ (void)0; ++ ranked_rr_array_t *selected[] = kr_request_selected(req); ++ for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) { ++ int ret = kr_ranked_rrarray_finalize(selected[i], query->uid, &req->pool); ++ if (unlikely(ret)) { ++ return KR_STATE_FAIL; ++ } ++ } ++ + return state; + } + +diff --git a/lib/resolve.c b/lib/resolve.c +index b771cdc..65b167f 100644 +--- a/lib/resolve.c ++++ b/lib/resolve.c +@@ -484,6 +484,7 @@ static int write_extra_ranked_records(const ranked_rr_array_t *arr, uint16_t reo + + for (size_t i = 0; i < arr->len; ++i) { + ranked_rr_array_entry_t * entry = arr->at[i]; ++ assert(!entry->in_progress); + if (!entry->to_wire) { + continue; + } +diff --git a/lib/utils.c b/lib/utils.c +index fe9ab03..2b093f6 100644 +--- a/lib/utils.c ++++ b/lib/utils.c +@@ -677,6 +677,11 @@ static int to_wire_ensure_unique(ranked_rr_array_t *array, size_t index) + return kr_ok(); + } + ++/* Implementation overview of _add() and _finalize(): ++ * - for rdata we just maintain a list of pointers (in knot_rrset_t::additional) ++ * - we only construct the final rdataset at the end (and thus more efficiently) ++ */ ++typedef array_t(knot_rdata_t *) rdata_array_t; + int kr_ranked_rrarray_add(ranked_rr_array_t *array, const knot_rrset_t *rr, + uint8_t rank, bool to_wire, uint32_t qry_uid, knot_mm_t *pool) + { +@@ -691,47 +696,93 @@ int kr_ranked_rrarray_add(ranked_rr_array_t *array, const knot_rrset_t *rr, + } + if (stashed->qry_uid != qry_uid) { + break; ++ /* We do not guarantee merging RRs "across" any point that switched ++ * to processing a different upstream packet (i.e. qry_uid). ++ * In particular, iterator never returns KR_STATE_YIELD. */ + } + if (!rrsets_match(stashed->rr, rr)) { + continue; + } + /* Found the entry to merge with. Check consistency and merge. */ +- bool ok = stashed->rank == rank && !stashed->cached; ++ bool ok = stashed->rank == rank && !stashed->cached && stashed->in_progress; + if (!ok) { + assert(false); + return kr_error(EEXIST); + } ++ /* assert(rr->rrs.count == 1); */ ++ /* ^^ shouldn't be a problem for this function, but it's probably a bug */ ++ + /* It may happen that an RRset is first considered useful + * (to_wire = false, e.g. due to being part of glue), + * and later we may find we also want it in the answer. */ + stashed->to_wire = stashed->to_wire || to_wire; + +- return knot_rdataset_merge(&stashed->rr->rrs, &rr->rrs, pool); ++ /* We just add the reference into this in_progress RRset. */ ++ rdata_array_t *ra = stashed->rr->additional; ++ if (ra == NULL) { ++ /* RRset not in array format yet -> convert it. */ ++ ra = stashed->rr->additional = mm_alloc(pool, sizeof(*ra)); ++ if (!ra) { ++ return kr_error(ENOMEM); ++ } ++ array_init(*ra); ++ int ret = array_reserve_mm(*ra, stashed->rr->rrs.count + rr->rrs.count, ++ kr_memreserve, pool); ++ if (ret) { ++ return kr_error(ret); ++ } ++ knot_rdata_t *r_it = stashed->rr->rrs.rdata; ++ for (int ri = 0; ri < stashed->rr->rrs.count; ++ ++ri, r_it = knot_rdataset_next(r_it)) { ++ if (array_push(*ra, r_it) < 0) { ++ abort(); ++ } ++ } ++ } else { ++ int ret = array_reserve_mm(*ra, ra->len + rr->rrs.count, ++ kr_memreserve, pool); ++ if (ret) { ++ return kr_error(ret); ++ } ++ } ++ /* Append to the array. */ ++ knot_rdata_t *r_it = rr->rrs.rdata; ++ for (int ri = 0; ri < rr->rrs.count; ++ ++ri, r_it = knot_rdataset_next(r_it)) { ++ if (array_push(*ra, r_it) < 0) { ++ abort(); ++ } ++ } ++ return kr_ok(); + } + + /* No stashed rrset found, add */ + int ret = array_reserve_mm(*array, array->len + 1, kr_memreserve, pool); +- if (ret != 0) { +- return kr_error(ENOMEM); ++ if (ret) { ++ return kr_error(ret); + } + + ranked_rr_array_entry_t *entry = mm_alloc(pool, sizeof(ranked_rr_array_entry_t)); + if (!entry) { + return kr_error(ENOMEM); + } +- knot_rrset_t *copy = knot_rrset_copy(rr, pool); +- if (!copy) { ++ ++ knot_rrset_t *rr_new = knot_rrset_new(rr->owner, rr->type, rr->rclass, rr->ttl, pool); ++ if (!rr_new) { + mm_free(pool, entry); + return kr_error(ENOMEM); + } ++ rr_new->rrs = rr->rrs; ++ assert(rr_new->additional == NULL); + + entry->qry_uid = qry_uid; +- entry->rr = copy; ++ entry->rr = rr_new; + entry->rank = rank; + entry->revalidation_cnt = 0; + entry->cached = false; + entry->yielded = false; + entry->to_wire = to_wire; ++ entry->in_progress = true; + if (array_push(*array, entry) < 0) { + /* Silence coverity. It shouldn't be possible to happen, + * due to the array_reserve_mm call above. */ +@@ -742,6 +793,84 @@ int kr_ranked_rrarray_add(ranked_rr_array_t *array, const knot_rrset_t *rr, + return to_wire_ensure_unique(array, array->len - 1); + } + ++/** Comparator for qsort() on an array of knot_data_t pointers. */ ++static int rdata_p_cmp(const void *rp1, const void *rp2) ++{ ++ /* Just correct types of the parameters and pass them dereferenced. */ ++ const knot_rdata_t ++ *const *r1 = rp1, ++ *const *r2 = rp2; ++ return knot_rdata_cmp(*r1, *r2); ++} ++int kr_ranked_rrarray_finalize(ranked_rr_array_t *array, uint32_t qry_uid, knot_mm_t *pool) ++{ ++ for (ssize_t array_i = array->len - 1; array_i >= 0; --array_i) { ++ ranked_rr_array_entry_t *stashed = array->at[array_i]; ++ if (stashed->qry_uid != qry_uid) { ++ continue; /* We apparently can't always short-cut the cycle. */ ++ } ++ if (!stashed->in_progress) { ++ continue; ++ } ++ rdata_array_t *ra = stashed->rr->additional; ++ if (!ra) { ++ /* No array, so we just need to copy the rdataset. */ ++ knot_rdataset_t *rds = &stashed->rr->rrs; ++ knot_rdataset_t tmp = *rds; ++ int ret = knot_rdataset_copy(rds, &tmp, pool); ++ if (ret) { ++ return kr_error(ret); ++ } ++ } else { ++ /* Multiple RRs; first: sort the array. */ ++ stashed->rr->additional = NULL; ++ qsort(ra->at, ra->len, sizeof(ra->at[0]), rdata_p_cmp); ++ /* Prune duplicates: NULL all except the last instance. */ ++ int dup_count = 0; ++ for (int i = 0; i + 1 < ra->len; ++i) { ++ if (knot_rdata_cmp(ra->at[i], ra->at[i + 1]) == 0) { ++ ra->at[i] = NULL; ++ ++dup_count; ++ QRVERBOSE(NULL, "iter", "deleted duplicate RR\n"); ++ } ++ } ++ /* Prepare rdataset, except rdata contents. */ ++ int size_sum = 0; ++ for (int i = 0; i < ra->len; ++i) { ++ if (ra->at[i]) { ++ size_sum += knot_rdata_size(ra->at[i]->len); ++ } ++ } ++ knot_rdataset_t *rds = &stashed->rr->rrs; ++ rds->count = ra->len - dup_count; ++ #if KNOT_VERSION_HEX >= 0x020900 ++ rds->size = size_sum; ++ #endif ++ if (size_sum) { ++ rds->rdata = mm_alloc(pool, size_sum); ++ if (!rds->rdata) { ++ return kr_error(ENOMEM); ++ } ++ } else { ++ rds->rdata = NULL; ++ } ++ /* Everything is ready; now just copy all the rdata. */ ++ uint8_t *raw_it = (uint8_t *)rds->rdata; ++ for (int i = 0; i < ra->len; ++i) { ++ if (ra->at[i] && size_sum/*linters*/) { ++ const int size = knot_rdata_size(ra->at[i]->len); ++ memcpy(raw_it, ra->at[i], size); ++ raw_it += size; ++ } ++ } ++ assert(raw_it == (uint8_t *)rds->rdata + size_sum); ++ } ++ stashed->in_progress = false; ++ } ++ return kr_ok(); ++} ++ ++ + int kr_ranked_rrarray_set_wire(ranked_rr_array_t *array, bool to_wire, + uint32_t qry_uid, bool check_dups, + bool (*extraCheck)(const ranked_rr_array_entry_t *)) +diff --git a/lib/utils.h b/lib/utils.h +index 21eabac..3040e66 100644 +--- a/lib/utils.h ++++ b/lib/utils.h +@@ -165,6 +165,7 @@ struct ranked_rr_array_entry { + bool yielded : 1; + bool to_wire : 1; /**< whether to be put into the answer */ + bool expiring : 1; /**< low remaining TTL; see is_expiring; only used in cache ATM */ ++ bool in_progress : 1; /**< build of RRset in progress, i.e. different format of RR data */ + knot_rrset_t *rr; + }; + typedef struct ranked_rr_array_entry ranked_rr_array_entry_t; +@@ -351,10 +352,17 @@ KR_EXPORT + int kr_rrkey(char *key, uint16_t class, const knot_dname_t *owner, + uint16_t type, uint16_t additional); + +-/** @internal Add RRSet copy to ranked RR array. */ ++/** Add RRSet copy to a ranked RR array. ++ * ++ * To convert to standard RRs inside, you need to call _finalize() afterwards, ++ * and the memory of rr->rrs.rdata has to remain until then. ++ */ + KR_EXPORT + int kr_ranked_rrarray_add(ranked_rr_array_t *array, const knot_rrset_t *rr, + uint8_t rank, bool to_wire, uint32_t qry_uid, knot_mm_t *pool); ++/** Finalize in_progress sets - all with matching qry_uid. */ ++KR_EXPORT ++int kr_ranked_rrarray_finalize(ranked_rr_array_t *array, uint32_t qry_uid, knot_mm_t *pool); + + /** @internal Mark the RRSets from particular query as + * "have (not) to be recorded in the final answer". +diff --git a/modules/dns64/dns64.lua b/modules/dns64/dns64.lua +index 8e3eaa0..8700661 100644 +--- a/modules/dns64/dns64.lua ++++ b/modules/dns64/dns64.lua +@@ -98,6 +98,7 @@ function M.layer.consume(state, req, pkt) + req.pool) + end + end ++ ffi.C.kr_ranked_rrarray_finalize(req.answ_selected, qry.uid, req.pool); + end + + return M diff --git a/debian/patches/CVE-2020-12667-part1.patch b/debian/patches/CVE-2020-12667-part1.patch new file mode 100644 index 0000000..e8505f1 --- /dev/null +++ b/debian/patches/CVE-2020-12667-part1.patch @@ -0,0 +1,90 @@ +From: Markus Koschany <apo@debian.org> +Date: Mon, 11 Mar 2024 16:40:42 +0200 +Subject: CVE-2020-12667 part1 + +Bug-Debian: https://bugs.debian.org/961076 +Origin: https://gitlab.labs.nic.cz/knot/knot-resolver/-/commit/54f05e4d7b2e47c0bdd30b84272fc503cc65304b +--- + daemon/lua/kres-gen.lua | 1 + + lib/defines.h | 1 + + lib/resolve.c | 15 +++++++++++---- + lib/resolve.h | 1 + + 4 files changed, 14 insertions(+), 4 deletions(-) + +diff --git a/daemon/lua/kres-gen.lua b/daemon/lua/kres-gen.lua +index a92288b..ad2f40e 100644 +--- a/daemon/lua/kres-gen.lua ++++ b/daemon/lua/kres-gen.lua +@@ -200,6 +200,7 @@ struct kr_request { + knot_mm_t pool; + unsigned int uid; + void *daemon_context; ++ unsigned int count_no_nsaddr; + }; + enum kr_rank {KR_RANK_INITIAL, KR_RANK_OMIT, KR_RANK_TRY, KR_RANK_INDET = 4, KR_RANK_BOGUS, KR_RANK_MISMATCH, KR_RANK_MISSING, KR_RANK_INSECURE, KR_RANK_AUTH = 16, KR_RANK_SECURE = 32}; + struct kr_cache { +diff --git a/lib/defines.h b/lib/defines.h +index 84da059..aa3a349 100644 +--- a/lib/defines.h ++++ b/lib/defines.h +@@ -64,6 +64,7 @@ static inline int KR_COLD kr_error(int x) { + #define KR_CNAME_CHAIN_LIMIT 40 /* Built-in maximum CNAME chain length */ + #define KR_TIMEOUT_LIMIT 4 /* Maximum number of retries after timeout. */ + #define KR_QUERY_NSRETRY_LIMIT 4 /* Maximum number of retries per query. */ ++#define KR_COUNT_NO_NSADDR_LIMIT 5 + + /* + * Defines. +diff --git a/lib/resolve.c b/lib/resolve.c +index 65b167f..7e1f334 100644 +--- a/lib/resolve.c ++++ b/lib/resolve.c +@@ -306,10 +306,10 @@ static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name + return KR_STATE_PRODUCE; + } + +-static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param) ++static int ns_resolve_addr(struct kr_query *qry, struct kr_request *req) + { +- struct kr_rplan *rplan = ¶m->rplan; +- struct kr_context *ctx = param->ctx; ++ struct kr_rplan *rplan = &req->rplan; ++ struct kr_context *ctx = req->ctx; + + + /* Start NS queries from root, to avoid certain cases +@@ -340,7 +340,9 @@ static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param) + return kr_error(EAGAIN); + } + /* No IPv4 nor IPv6, flag server as unusable. */ +- VERBOSE_MSG(qry, "=> unresolvable NS address, bailing out\n"); ++ ++req->count_no_nsaddr; ++ VERBOSE_MSG(qry, "=> unresolvable NS address, bailing out (counter: %u)\n", ++ req->count_no_nsaddr); + qry->ns.reputation |= KR_NS_NOIP4 | KR_NS_NOIP6; + kr_nsrep_update_rep(&qry->ns, qry->ns.reputation, ctx->cache_rep); + invalidate_ns(rplan, qry); +@@ -1408,6 +1410,11 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t + + ns_election: + ++ if (unlikely(request->count_no_nsaddr >= KR_COUNT_NO_NSADDR_LIMIT)) { ++ VERBOSE_MSG(qry, "=> too many unresolvable NSs, bail out " ++ "(mitigation for NXNSAttack CVE-2020-12667)\n"); ++ return KR_STATE_FAIL; ++ } + /* If the query has already selected a NS and is waiting for IPv4/IPv6 record, + * elect best address only, otherwise elect a completely new NS. + */ +diff --git a/lib/resolve.h b/lib/resolve.h +index 18372a3..5900a8c 100644 +--- a/lib/resolve.h ++++ b/lib/resolve.h +@@ -233,6 +233,7 @@ struct kr_request { + knot_mm_t pool; + unsigned int uid; /** for logging purposes only */ + void *daemon_context; /** pointer to worker from daemon. Can be used in modules. */ ++ unsigned int count_no_nsaddr; + }; + + /** Initializer for an array of *_selected. */ diff --git a/debian/patches/CVE-2020-12667-part2.patch b/debian/patches/CVE-2020-12667-part2.patch new file mode 100644 index 0000000..7ca9173 --- /dev/null +++ b/debian/patches/CVE-2020-12667-part2.patch @@ -0,0 +1,86 @@ +From: Markus Koschany <apo@debian.org> +Date: Mon, 11 Mar 2024 16:41:21 +0200 +Subject: CVE-2020-12667 part2 + +Bug-Debian: https://bugs.debian.org/961076 +Origin: https://gitlab.labs.nic.cz/knot/knot-resolver/-/commit/ba7b89db780fe3884b4e90090318e25ee5afb118 +--- + daemon/lua/kres-gen.lua | 1 + + lib/defines.h | 1 + + lib/layer/iterate.c | 3 ++- + lib/resolve.c | 11 +++++++++++ + lib/resolve.h | 1 + + 5 files changed, 16 insertions(+), 1 deletion(-) + +diff --git a/daemon/lua/kres-gen.lua b/daemon/lua/kres-gen.lua +index ad2f40e..5867f0f 100644 +--- a/daemon/lua/kres-gen.lua ++++ b/daemon/lua/kres-gen.lua +@@ -201,6 +201,7 @@ struct kr_request { + unsigned int uid; + void *daemon_context; + unsigned int count_no_nsaddr; ++ unsigned int count_fail_row; + }; + enum kr_rank {KR_RANK_INITIAL, KR_RANK_OMIT, KR_RANK_TRY, KR_RANK_INDET = 4, KR_RANK_BOGUS, KR_RANK_MISMATCH, KR_RANK_MISSING, KR_RANK_INSECURE, KR_RANK_AUTH = 16, KR_RANK_SECURE = 32}; + struct kr_cache { +diff --git a/lib/defines.h b/lib/defines.h +index aa3a349..2eee6cf 100644 +--- a/lib/defines.h ++++ b/lib/defines.h +@@ -65,6 +65,7 @@ static inline int KR_COLD kr_error(int x) { + #define KR_TIMEOUT_LIMIT 4 /* Maximum number of retries after timeout. */ + #define KR_QUERY_NSRETRY_LIMIT 4 /* Maximum number of retries per query. */ + #define KR_COUNT_NO_NSADDR_LIMIT 5 ++#define KR_CONSUME_FAIL_ROW_LIMIT 3 /* Maximum number of KR_STATE_FAIL in a row. */ + + /* + * Defines. +diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c +index 0423a3d..40087a1 100644 +--- a/lib/layer/iterate.c ++++ b/lib/layer/iterate.c +@@ -875,7 +875,8 @@ static int process_stub(knot_pkt_t *pkt, struct kr_request *req) + } + + +-/** Error handling, RFC1034 5.3.3, 4d. */ ++/** Error handling, RFC1034 5.3.3, 4d. ++ * NOTE: returing this does not prevent further queries (by itself). */ + static int resolve_error(knot_pkt_t *pkt, struct kr_request *req) + { + return KR_STATE_FAIL; +diff --git a/lib/resolve.c b/lib/resolve.c +index 7e1f334..fbe1aab 100644 +--- a/lib/resolve.c ++++ b/lib/resolve.c +@@ -951,6 +951,17 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k + qry->flags.RESOLVED = false; + } + ++ /* For multiple errors in a row; invalidate_ns() is not enough. */ ++ if (!qry->flags.CACHED) { ++ if (request->state & KR_STATE_FAIL) { ++ if (++request->count_fail_row > KR_CONSUME_FAIL_ROW_LIMIT) { ++ return KR_STATE_FAIL; ++ } ++ } else { ++ request->count_fail_row = 0; ++ } ++ } ++ + /* Pop query if resolved. */ + if (request->state == KR_STATE_YIELD) { + return KR_STATE_PRODUCE; /* Requery */ +diff --git a/lib/resolve.h b/lib/resolve.h +index 5900a8c..4d8b385 100644 +--- a/lib/resolve.h ++++ b/lib/resolve.h +@@ -234,6 +234,7 @@ struct kr_request { + unsigned int uid; /** for logging purposes only */ + void *daemon_context; /** pointer to worker from daemon. Can be used in modules. */ + unsigned int count_no_nsaddr; ++ unsigned int count_fail_row; + }; + + /** Initializer for an array of *_selected. */ diff --git a/debian/patches/series b/debian/patches/series index e78c925..9f3fb06 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -2,4 +2,9 @@ 0002-avoid-invocations-of-git-during-make-installcheck.patch 0003-Avoid-clobbering-CXX-flags-when-compiling-lua-aho-co.patch CVE-2022-40188.patch +CVE-2019-10190.patch +CVE-2019-10191.patch +CVE-2019-19331.patch +CVE-2020-12667-part1.patch +CVE-2020-12667-part2.patch progress-linux/0001-kresd-restart.patch |