summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--debian/.gitlab-ci.yml9
-rw-r--r--debian/changelog11
-rw-r--r--debian/gbp.conf10
-rw-r--r--debian/patches/CVE-2022-40188.patch160
-rw-r--r--debian/patches/series1
-rw-r--r--debian/tests/control2
6 files changed, 182 insertions, 11 deletions
diff --git a/debian/.gitlab-ci.yml b/debian/.gitlab-ci.yml
new file mode 100644
index 0000000..4816152
--- /dev/null
+++ b/debian/.gitlab-ci.yml
@@ -0,0 +1,9 @@
+include:
+ - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/recipes/debian.yml
+
+variables:
+ RELEASE: 'buster'
+ SALSA_CI_COMPONENTS: 'main contrib non-free'
+ SALSA_CI_DISABLE_REPROTEST: 1
+ SALSA_CI_DISABLE_LINTIAN: 1
+ SALSA_CI_DISABLE_PIUPARTS: 1
diff --git a/debian/changelog b/debian/changelog
index 4af50d4..4db2798 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,14 @@
+knot-resolver (3.2.1-3+deb10u1) buster-security; urgency=high
+
+ * CVE-2022-4018: Prevent an issue where remote attackers could cause
+ a denial of service via CPU consumption by exploiting algorithmic
+ complexity; during an attack, an authoritative server would return large
+ nameserver or address sets.
+ * Mark one of the autopkgtests as "flaky"; we want to still run it.
+ * Add debian/.gitlab-ci.yml, but don't test piuparts.
+
+ -- Chris Lamb <lamby@debian.org> Fri, 07 Oct 2022 10:17:02 -0700
+
knot-resolver (3.2.1-3) unstable; urgency=medium
* knot-resolver-module-http is arch: all, not arch: any
diff --git a/debian/gbp.conf b/debian/gbp.conf
deleted file mode 100644
index ff17343..0000000
--- a/debian/gbp.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-[DEFAULT]
-debian-branch = debian/master
-debian-tag = debian/%(version)s
-upstream-branch = upstream
-upstream-tag = upstream/%(version)s
-upstream-vcs-tag = v%(version)s
-pristine-tar = True
-
-[dch]
-meta = 1
diff --git a/debian/patches/CVE-2022-40188.patch b/debian/patches/CVE-2022-40188.patch
new file mode 100644
index 0000000..29edd74
--- /dev/null
+++ b/debian/patches/CVE-2022-40188.patch
@@ -0,0 +1,160 @@
+From: Chris Lamb <lamby@debian.org>
+Date: Thu, 6 Oct 2022 14:12:26 -0700
+Subject: [PATCH] lib/zonecut + iterator: limit large NS sets
+
+rom f6577a20e493c7fbdac124d7544bf1846b084185 Mon Sep 17 00:00:00 2001
+
+rom f6577a20e493c7fbdac124d7544bf1846b084185 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Vladim=C3=ADr=20=C4=8Cun=C3=A1t?= <vladimir.cunat@nic.cz>
+Date: Wed, 17 Aug 2022 16:34:06 +0200
+Subject: [PATCH] lib/zonecut + iterator: limit large NS sets
+
+It's a mitigation for CVE-2022-40188 and similar DoS attempts.
+It's using really trivial approaches, at least for now.
+---
+ lib/layer/iterate.c | 15 +++++++++++++++
+ lib/zonecut.c | 33 +++++++++++++++++++++++++++++++--
+ lib/zonecut.h | 2 ++
+ 3 files changed, 48 insertions(+), 2 deletions(-)
+
+diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c
+index cf57cc5..f7dd630 100644
+--- a/lib/layer/iterate.c
++++ b/lib/layer/iterate.c
+@@ -193,6 +193,8 @@ static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query, int *gl
+ return KR_STATE_CONSUME;
+ }
+
++enum { GLUE_COUNT_THROTTLE = 26 };
++
+ /** @internal From \a pkt, fetch glue records for name \a ns, and update the cut etc.
+ *
+ * \param glue_cnt the number of accepted addresses (to be incremented)
+@@ -227,6 +229,10 @@ static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, bool in_bailiwic
+ continue;
+ }
+ (void) update_nsaddr(rr, req->current_query, glue_cnt);
++ /* If we reach limit on total glue addresses,
++ * we only load the first one per NS name (the one just above). */
++ if (*glue_cnt > GLUE_COUNT_THROTTLE)
++ break;
+ }
+ }
+ }
+@@ -427,6 +433,7 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
+ const knot_dname_t *current_zone_cut = qry->zone_cut.name;
+ bool ns_record_exists = false;
+ int glue_cnt = 0;
++ int ns_count = 0;
+ /* Update zone cut information. */
+ for (unsigned i = 0; i < ns->count; ++i) {
+ const knot_rrset_t *rr = knot_pkt_rr(ns, i);
+@@ -438,6 +445,11 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
+ case KR_STATE_FAIL: return state; break;
+ default: /* continue */ break;
+ }
++
++ if (++ns_count >= 13) {
++ VERBOSE_MSG("<= authority: many glue NSs, skipping the rest\n");
++ break;
++ }
+ } else if (rr->type == KNOT_RRTYPE_SOA
+ && knot_dname_in_bailiwick(rr->owner, qry->zone_cut.name) > 0) {
+ /* SOA below cut in authority indicates different authority,
+@@ -465,6 +477,9 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
+ VERBOSE_MSG("<= loaded %d glue addresses\n", glue_cnt);
+ }
+
++ if (glue_cnt > GLUE_COUNT_THROTTLE) {
++ VERBOSE_MSG("<= (some may have been omitted due to being too many)\n");
++ }
+
+ if ((qry->flags.DNSSEC_WANT) && (result == KR_STATE_CONSUME)) {
+ if (knot_wire_get_aa(pkt->wire) == 0 &&
+diff --git a/lib/zonecut.c b/lib/zonecut.c
+index 5e54d97..cc25062 100644
+--- a/lib/zonecut.c
++++ b/lib/zonecut.c
+@@ -299,6 +299,7 @@ int kr_zonecut_set_sbelt(struct kr_context *ctx, struct kr_zonecut *cut)
+
+ /** Fetch address for zone cut. Any rank is accepted (i.e. glue as well). */
+ static addrset_info_t fetch_addr(pack_t *addrs, const knot_dname_t *ns, uint16_t rrtype,
++ int *addr_budget,
+ knot_mm_t *mm_pool, const struct kr_query *qry)
+ // LATER(optim.): excessive data copying
+ {
+@@ -332,6 +333,12 @@ static addrset_info_t fetch_addr(pack_t *addrs, const knot_dname_t *ns, uint16_t
+ return AI_UNKNOWN;
+ }
+
++ *addr_budget -= cached_rr.rrs.count - 1;
++ if (*addr_budget < 0) {
++ cached_rr.rrs.count += *addr_budget;
++ *addr_budget = 0;
++ }
++
+ /* Reserve memory in *addrs. Implementation detail:
+ * pack_t cares for lengths, so we don't store those in the data. */
+ const size_t pack_extra_size = knot_rdataset_size(&cached_rr.rrs)
+@@ -406,6 +413,20 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
+ return ret;
+ }
+
++ /* Consider at most 13 first NSs (like root). It's a trivial approach
++ * to limit our resources when choosing NSs. Otherwise DoS might be viable.
++ * We're not aware of any reasonable use case for having many NSs. */
++ if (ns_rds.count > 13) {
++ auto_free char *name_txt = kr_dname_text(name);
++ VERBOSE_MSG(qry, "NS %s too large, reducing from %d names\n",
++ name_txt, (int)ns_rds.count);
++ ns_rds.count = 13;
++ }
++ /* Also trivially limit the total address count:
++ * first A and first AAAA are for free per NS,
++ * but the rest get a shared small limit and get skipped if exhausted. */
++ int addr_budget = 8;
++
+ /* Insert name servers for this zone cut, addresses will be looked up
+ * on-demand (either from cache or iteratively) */
+ bool all_bad = true; /**< All NSs (seen so far) are in a bad state. */
+@@ -431,10 +452,10 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
+ unsigned reputation = (cached) ? *cached : 0;
+ infos[0] = (reputation & KR_NS_NOIP4) || qry->flags.NO_IPV4
+ ? AI_REPUT
+- : fetch_addr(*pack, ns_name, KNOT_RRTYPE_A, cut->pool, qry);
++ : fetch_addr(*pack, ns_name, KNOT_RRTYPE_A, &addr_budget, cut->pool, qry);
+ infos[1] = (reputation & KR_NS_NOIP6) || qry->flags.NO_IPV6
+ ? AI_REPUT
+- : fetch_addr(*pack, ns_name, KNOT_RRTYPE_AAAA, cut->pool, qry);
++ : fetch_addr(*pack, ns_name, KNOT_RRTYPE_AAAA, &addr_budget, cut->pool, qry);
+
+ #if 0 /* rather unlikely to be useful unless changing some zcut code */
+ WITH_VERBOSE(qry) {
+@@ -480,6 +501,14 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
+ VERBOSE_MSG(qry, "cut %s: all NSs bad, count = %d\n",
+ name_txt, (int)ns_rds.count);
+ } }
++
++ assert(addr_budget >= 0);
++ if (addr_budget <= 0) {
++ auto_free char *name_txt = kr_dname_text(name);
++ VERBOSE_MSG(qry, "NS %s have too many addresses together, reduced\n",
++ name_txt);
++ }
++
+ knot_rdataset_clear(&ns_rds, cut->pool);
+ return all_bad ? ELOOP : kr_ok();
+ }
+diff --git a/lib/zonecut.h b/lib/zonecut.h
+index 2808c66..70dd4df 100644
+--- a/lib/zonecut.h
++++ b/lib/zonecut.h
+@@ -151,6 +151,8 @@ int kr_zonecut_set_sbelt(struct kr_context *ctx, struct kr_zonecut *cut);
+ /**
+ * Populate zone cut address set from cache.
+ *
++ * The size is limited to avoid possibility of doing too much CPU work.
++ *
+ * @param ctx resolution context (to fetch data from LRU caches)
+ * @param cut zone cut to be populated
+ * @param name QNAME to start finding zone cut for
diff --git a/debian/patches/series b/debian/patches/series
index 815a16c..825dbcf 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,3 +1,4 @@
0001-Update-documentation-of-keyfile-ro.patch
0002-avoid-invocations-of-git-during-make-installcheck.patch
0003-Avoid-clobbering-CXX-flags-when-compiling-lua-aho-co.patch
+CVE-2022-40188.patch
diff --git a/debian/tests/control b/debian/tests/control
index 975fc1e..1afc914 100644
--- a/debian/tests/control
+++ b/debian/tests/control
@@ -3,7 +3,7 @@ Depends:
lua-cqueues,
@,
@builddeps@,
-Restrictions: allow-stderr
+Restrictions: allow-stderr, flaky
Tests: roundtrip
Depends: