summaryrefslogtreecommitdiffstats
path: root/debian/patches/CVE-2022-40188.patch
blob: 29edd74debe1d43aaa0324859490ce43dfc18c49 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
From: Chris Lamb <lamby@debian.org>
Date: Thu, 6 Oct 2022 14:12:26 -0700
Subject: [PATCH] lib/zonecut + iterator: limit large NS sets

rom f6577a20e493c7fbdac124d7544bf1846b084185 Mon Sep 17 00:00:00 2001

rom f6577a20e493c7fbdac124d7544bf1846b084185 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Vladim=C3=ADr=20=C4=8Cun=C3=A1t?= <vladimir.cunat@nic.cz>
Date: Wed, 17 Aug 2022 16:34:06 +0200
Subject: [PATCH] lib/zonecut + iterator: limit large NS sets

It's a mitigation for CVE-2022-40188 and similar DoS attempts.
It's using really trivial approaches, at least for now.
---
 lib/layer/iterate.c | 15 +++++++++++++++
 lib/zonecut.c       | 33 +++++++++++++++++++++++++++++++--
 lib/zonecut.h       |  2 ++
 3 files changed, 48 insertions(+), 2 deletions(-)

diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c
index cf57cc5..f7dd630 100644
--- a/lib/layer/iterate.c
+++ b/lib/layer/iterate.c
@@ -193,6 +193,8 @@ static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query, int *gl
 	return KR_STATE_CONSUME;
 }
 
+enum { GLUE_COUNT_THROTTLE = 26 };
+
 /** @internal From \a pkt, fetch glue records for name \a ns, and update the cut etc.
  *
  * \param glue_cnt the number of accepted addresses (to be incremented)
@@ -227,6 +229,10 @@ static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, bool in_bailiwic
 				continue;
 			}
 			(void) update_nsaddr(rr, req->current_query, glue_cnt);
+			/* If we reach limit on total glue addresses,
+			 * we only load the first one per NS name (the one just above). */
+			if (*glue_cnt > GLUE_COUNT_THROTTLE)
+				break;
 		}
 	}
 }
@@ -427,6 +433,7 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
 	const knot_dname_t *current_zone_cut = qry->zone_cut.name;
 	bool ns_record_exists = false;
 	int glue_cnt = 0;
+	int ns_count = 0;
 	/* Update zone cut information. */
 	for (unsigned i = 0; i < ns->count; ++i) {
 		const knot_rrset_t *rr = knot_pkt_rr(ns, i);
@@ -438,6 +445,11 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
 			case KR_STATE_FAIL: return state; break;
 			default:              /* continue */ break;
 			}
+
+			if (++ns_count >= 13) {
+				VERBOSE_MSG("<= authority: many glue NSs, skipping the rest\n");
+				break;
+			}
 		} else if (rr->type == KNOT_RRTYPE_SOA
 			   && knot_dname_in_bailiwick(rr->owner, qry->zone_cut.name) > 0) {
 			/* SOA below cut in authority indicates different authority,
@@ -465,6 +477,9 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
 		VERBOSE_MSG("<= loaded %d glue addresses\n", glue_cnt);
 	}
 
+	if (glue_cnt > GLUE_COUNT_THROTTLE) {
+		VERBOSE_MSG("<= (some may have been omitted due to being too many)\n");
+	}
 
 	if ((qry->flags.DNSSEC_WANT) && (result == KR_STATE_CONSUME)) {
 		if (knot_wire_get_aa(pkt->wire) == 0 &&
diff --git a/lib/zonecut.c b/lib/zonecut.c
index 5e54d97..cc25062 100644
--- a/lib/zonecut.c
+++ b/lib/zonecut.c
@@ -299,6 +299,7 @@ int kr_zonecut_set_sbelt(struct kr_context *ctx, struct kr_zonecut *cut)
 
 /** Fetch address for zone cut.  Any rank is accepted (i.e. glue as well). */
 static addrset_info_t fetch_addr(pack_t *addrs, const knot_dname_t *ns, uint16_t rrtype,
+				 int *addr_budget,
 				 knot_mm_t *mm_pool, const struct kr_query *qry)
 // LATER(optim.): excessive data copying
 {
@@ -332,6 +333,12 @@ static addrset_info_t fetch_addr(pack_t *addrs, const knot_dname_t *ns, uint16_t
 		return AI_UNKNOWN;
 	}
 
+	*addr_budget -= cached_rr.rrs.count - 1;
+	if (*addr_budget < 0) {
+		cached_rr.rrs.count += *addr_budget;
+		*addr_budget = 0;
+	}
+
 	/* Reserve memory in *addrs.  Implementation detail:
 	 * pack_t cares for lengths, so we don't store those in the data. */
 	const size_t pack_extra_size = knot_rdataset_size(&cached_rr.rrs)
@@ -406,6 +413,20 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
 		return ret;
 	}
 
+	/* Consider at most 13 first NSs (like root).  It's a trivial approach
+	 * to limit our resources when choosing NSs.  Otherwise DoS might be viable.
+	 * We're not aware of any reasonable use case for having many NSs. */
+	if (ns_rds.count > 13) {
+		auto_free char *name_txt = kr_dname_text(name);
+		VERBOSE_MSG(qry, "NS %s too large, reducing from %d names\n",
+				name_txt, (int)ns_rds.count);
+		ns_rds.count = 13;
+	}
+	/* Also trivially limit the total address count:
+	 * first A and first AAAA are for free per NS,
+	 * but the rest get a shared small limit and get skipped if exhausted. */
+	int addr_budget = 8;
+
 	/* Insert name servers for this zone cut, addresses will be looked up
 	 * on-demand (either from cache or iteratively) */
 	bool all_bad = true; /**< All NSs (seen so far) are in a bad state. */
@@ -431,10 +452,10 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
 		unsigned reputation = (cached) ? *cached : 0;
 		infos[0] = (reputation & KR_NS_NOIP4) || qry->flags.NO_IPV4
 			? AI_REPUT
-			: fetch_addr(*pack, ns_name, KNOT_RRTYPE_A, cut->pool, qry);
+			: fetch_addr(*pack, ns_name, KNOT_RRTYPE_A, &addr_budget, cut->pool, qry);
 		infos[1] = (reputation & KR_NS_NOIP6) || qry->flags.NO_IPV6
 			? AI_REPUT
-			: fetch_addr(*pack, ns_name, KNOT_RRTYPE_AAAA, cut->pool, qry);
+			: fetch_addr(*pack, ns_name, KNOT_RRTYPE_AAAA, &addr_budget, cut->pool, qry);
 
 		#if 0 /* rather unlikely to be useful unless changing some zcut code */
 		WITH_VERBOSE(qry) {
@@ -480,6 +501,14 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
 		VERBOSE_MSG(qry, "cut %s: all NSs bad, count = %d\n",
 				name_txt, (int)ns_rds.count);
 	} }
+
+	assert(addr_budget >= 0);
+	if (addr_budget <= 0) {
+		auto_free char *name_txt = kr_dname_text(name);
+		VERBOSE_MSG(qry, "NS %s have too many addresses together, reduced\n",
+				name_txt);
+	}
+
 	knot_rdataset_clear(&ns_rds, cut->pool);
 	return all_bad ? ELOOP : kr_ok();
 }
diff --git a/lib/zonecut.h b/lib/zonecut.h
index 2808c66..70dd4df 100644
--- a/lib/zonecut.h
+++ b/lib/zonecut.h
@@ -151,6 +151,8 @@ int kr_zonecut_set_sbelt(struct kr_context *ctx, struct kr_zonecut *cut);
 /**
  * Populate zone cut address set from cache.
  *
+ * The size is limited to avoid possibility of doing too much CPU work.
+ *
  * @param ctx       resolution context (to fetch data from LRU caches)
  * @param cut       zone cut to be populated
  * @param name      QNAME to start finding zone cut for