diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-05 04:15:13 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-05 04:15:13 +0000 |
commit | 28229892456560eae4adb8f45428cbb0efb96cf9 (patch) | |
tree | 884b9a5c59416787bdb5e24ddb447735b22b3b5b /lib | |
parent | Adding upstream version 5.7.2. (diff) | |
download | knot-resolver-28229892456560eae4adb8f45428cbb0efb96cf9.tar.xz knot-resolver-28229892456560eae4adb8f45428cbb0efb96cf9.zip |
Adding upstream version 5.7.3.upstream/5.7.3upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | lib/cache/api.c | 2 | ||||
-rw-r--r-- | lib/cache/peek.c | 3 | ||||
-rw-r--r-- | lib/dnssec.c | 2 | ||||
-rw-r--r-- | lib/dnssec/nsec.c | 20 | ||||
-rw-r--r-- | lib/dnssec/nsec3.c | 12 | ||||
-rw-r--r-- | lib/dnssec/signature.c | 2 | ||||
-rw-r--r-- | lib/dnssec/ta.c | 13 | ||||
-rw-r--r-- | lib/generic/array.h | 14 | ||||
-rw-r--r-- | lib/generic/lru.h | 5 | ||||
-rw-r--r-- | lib/generic/queue.c | 10 | ||||
-rw-r--r-- | lib/generic/queue.h | 6 | ||||
-rw-r--r-- | lib/generic/trie.c | 4 | ||||
-rw-r--r-- | lib/layer/iterate.c | 7 | ||||
-rw-r--r-- | lib/layer/validate.c | 2 | ||||
-rw-r--r-- | lib/log.c | 2 | ||||
-rw-r--r-- | lib/resolve.c | 22 | ||||
-rw-r--r-- | lib/selection.c | 2 | ||||
-rw-r--r-- | lib/utils.c | 7 | ||||
-rw-r--r-- | lib/utils.h | 6 | ||||
-rw-r--r-- | lib/zonecut.c | 2 |
20 files changed, 87 insertions, 56 deletions
diff --git a/lib/cache/api.c b/lib/cache/api.c index f71a8d0..7327903 100644 --- a/lib/cache/api.c +++ b/lib/cache/api.c @@ -517,7 +517,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry, goto return_needs_pkt; const knot_dname_t *encloser = rr->owner; /**< the closest encloser name */ for (int i = 0; i < wild_labels; ++i) { - encloser = knot_wire_next_label(encloser, NULL); + encloser = knot_dname_next_label(encloser); } /* Construct the key under which RRs will be stored, diff --git a/lib/cache/peek.c b/lib/cache/peek.c index e1901ac..f0bb79c 100644 --- a/lib/cache/peek.c +++ b/lib/cache/peek.c @@ -174,6 +174,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt) knot_db_val_bound(v), new_ttl); return ret == kr_ok() ? KR_STATE_DONE : ctx->state; } + default:; // Continue below } /* We have to try proving from NSEC*. */ @@ -359,7 +360,7 @@ static int peek_encloser( /** Name of the closest (provable) encloser. */ const knot_dname_t *clencl_name = qry->sname; for (int l = sname_labels; l > clencl_labels; --l) - clencl_name = knot_wire_next_label(clencl_name, NULL); + clencl_name = knot_dname_next_label(clencl_name); /**** 3. source of synthesis checks, in case the next closer name was covered. **** 3a. We want to query for NSEC* of source of synthesis (SS) or its diff --git a/lib/dnssec.c b/lib/dnssec.c index 12b8f20..eb4b33b 100644 --- a/lib/dnssec.c +++ b/lib/dnssec.c @@ -362,7 +362,7 @@ static int kr_rrset_validate_with_key(kr_rrset_validation_ctx_t *vctx, const int covered_labels = knot_dname_labels(covered->owner, NULL) - knot_dname_is_wildcard(covered->owner); - for (uint16_t i = 0; i < vctx->rrs->len; ++i) { + for (size_t i = 0; i < vctx->rrs->len; ++i) { /* Consider every RRSIG that matches and comes from the same query. */ const knot_rrset_t *rrsig = vctx->rrs->at[i]->rr; const bool ok = vctx->rrs->at[i]->qry_uid == vctx->qry_uid diff --git a/lib/dnssec/nsec.c b/lib/dnssec/nsec.c index 8b17247..be34d92 100644 --- a/lib/dnssec/nsec.c +++ b/lib/dnssec/nsec.c @@ -16,7 +16,7 @@ #include "lib/defines.h" #include "lib/dnssec/nsec.h" #include "lib/utils.h" -#include "resolve.h" +#include "lib/resolve.h" int kr_nsec_children_in_zone_check(const uint8_t *bm, uint16_t bm_size) { @@ -81,15 +81,13 @@ static int dname_cmp(const knot_dname_t *d1, const knot_dname_t *d2) dname_reverse(d1, d1_len, d1_rev_arr); dname_reverse(d2, d2_len, d2_rev_arr); - int res = 0; - while (res == 0 && d1_rev != NULL) { - res = lf_cmp(d1_rev, d2_rev); - d1_rev = knot_wire_next_label(d1_rev, NULL); - d2_rev = knot_wire_next_label(d2_rev, NULL); - } - - kr_require(res != 0 || d2_rev == NULL); - return res; + do { + int res = lf_cmp(d1_rev, d2_rev); + if (res != 0 || d1_rev[0] == '\0') + return res; + d1_rev = knot_dname_next_label(d1_rev); + d2_rev = knot_dname_next_label(d2_rev); + } while (true); } @@ -251,7 +249,7 @@ int kr_nsec_negative(const ranked_rr_array_t *rrrs, uint32_t qry_uid, ssynth[1] = '*'; const knot_dname_t *clencl = sname; for (int l = sname_labels; l > clencl_labels; --l) - clencl = knot_wire_next_label(clencl, NULL); + clencl = knot_dname_next_label(clencl); (void)!!knot_dname_store(&ssynth[2], clencl); // Try to (dis)prove the source of synthesis by a covering or matching NSEC. diff --git a/lib/dnssec/nsec3.c b/lib/dnssec/nsec3.c index 4199f25..4ff2750 100644 --- a/lib/dnssec/nsec3.c +++ b/lib/dnssec/nsec3.c @@ -143,7 +143,7 @@ static int closest_encloser_match(int *flags, const knot_rrset_t *nsec3, goto fail; } - const knot_dname_t *encloser = knot_wire_next_label(name, NULL); + const knot_dname_t *encloser = knot_dname_next_label(name); *skipped = 1; /* Avoid doing too much work on SHA1, mitigating: @@ -154,7 +154,7 @@ static int closest_encloser_match(int *flags, const knot_rrset_t *nsec3, const int max_labels = knot_dname_labels(nsec3->owner, NULL) - 1 + kr_nsec3_max_depth(¶ms); for (int l = knot_dname_labels(encloser, NULL); l > max_labels; --l) { - encloser = knot_wire_next_label(encloser, NULL); + encloser = knot_dname_next_label(encloser); ++(*skipped); } @@ -174,7 +174,7 @@ static int closest_encloser_match(int *flags, const knot_rrset_t *nsec3, if (!encloser[0]) break; - encloser = knot_wire_next_label(encloser, NULL); + encloser = knot_dname_next_label(encloser); ++(*skipped); } @@ -404,7 +404,7 @@ static int closest_encloser_proof(const knot_pkt_t *pkt, for (unsigned j = 0; j < skipped; ++j) { if (kr_fails_assert(next_closer[0])) return kr_error(EINVAL); - next_closer = knot_wire_next_label(next_closer, NULL); + next_closer = knot_dname_next_label(next_closer); } for (unsigned j = 0; j < sec->count; ++j) { const knot_rrset_t *rrset_j = knot_pkt_rr(sec, j); @@ -425,7 +425,7 @@ static int closest_encloser_proof(const knot_pkt_t *pkt, if ((flags & FLG_CLOSEST_PROVABLE_ENCLOSER) && (flags & FLG_NAME_COVERED) && next_closer) { if (encloser_name && next_closer[0]) - *encloser_name = knot_wire_next_label(next_closer, NULL); + *encloser_name = knot_dname_next_label(next_closer); if (matching_encloser_nsec3) *matching_encloser_nsec3 = matching; if (covering_next_nsec3) @@ -569,7 +569,7 @@ int kr_nsec3_wildcard_answer_response_check(const knot_pkt_t *pkt, knot_section_ for (int i = 0; i < trim_to_next; ++i) { if (kr_fails_assert(sname[0])) return kr_error(EINVAL); - sname = knot_wire_next_label(sname, NULL); + sname = knot_dname_next_label(sname); } int flags = 0; diff --git a/lib/dnssec/signature.c b/lib/dnssec/signature.c index aadb5cb..12ed09e 100644 --- a/lib/dnssec/signature.c +++ b/lib/dnssec/signature.c @@ -208,7 +208,7 @@ static int sign_ctx_add_records(dnssec_sign_ctx_t *ctx, const knot_rrset_t *cove for (int j = 0; j < trim_labels; ++j) { if (kr_fails_assert(beginp[0])) return kr_error(EINVAL); - beginp = (uint8_t *) knot_wire_next_label(beginp, NULL); + beginp = (uint8_t *) knot_dname_next_label(beginp); if (kr_fails_assert(beginp)) return kr_error(EFAULT); } diff --git a/lib/dnssec/ta.c b/lib/dnssec/ta.c index becf7d8..13659c1 100644 --- a/lib/dnssec/ta.c +++ b/lib/dnssec/ta.c @@ -28,9 +28,9 @@ const knot_dname_t * kr_ta_closest(const struct kr_context *ctx, const knot_dnam kr_require(ctx && name); if (type == KNOT_RRTYPE_DS && name[0] != '\0') { /* DS is parent-side record, so the parent name needs to be covered. */ - name = knot_wire_next_label(name, NULL); + name = knot_dname_next_label(name); } - while (name) { + do { struct kr_context *ctx_nc = (struct kr_context *)/*const-cast*/ctx; if (kr_ta_get(ctx_nc->trust_anchors, name)) { return name; @@ -38,9 +38,12 @@ const knot_dname_t * kr_ta_closest(const struct kr_context *ctx, const knot_dnam if (kr_ta_get(ctx_nc->negative_anchors, name)) { return NULL; } - name = knot_wire_next_label(name, NULL); - } - return NULL; + if (name[0] == '\0') { + return NULL; + } else { + name = knot_dname_next_label(name); + } + } while (true); } /* @internal Create DS from DNSKEY, caller MUST free dst if successful. */ diff --git a/lib/generic/array.h b/lib/generic/array.h index 9f35118..9bea546 100644 --- a/lib/generic/array.h +++ b/lib/generic/array.h @@ -113,7 +113,7 @@ static inline void array_std_free(void *baton, void *p) * Mempool usage: pass kr_memreserve and a knot_mm_t* . * @return 0 if success, <0 on failure */ #define array_reserve_mm(array, n, reserve, baton) \ - (reserve)((baton), (void **) &(array).at, sizeof((array).at[0]), (n), &(array).cap) + (reserve)((baton), (void **) &(array).at, array_member_size((array)), (n), &(array).cap) /** * Push value at the end of the array, resize it if necessary. @@ -122,9 +122,9 @@ static inline void array_std_free(void *baton, void *p) * @return element index on success, <0 on failure */ #define array_push_mm(array, val, reserve, baton) \ - (int)((array).len < (array).cap ? ((array).at[(array).len] = val, (array).len++) \ + (int)((array).len < (array).cap ? ((array).at[(array).len] = (val), (array).len++) \ : (array_reserve_mm(array, ((array).cap + 1), reserve, baton) < 0 ? -1 \ - : ((array).at[(array).len] = val, (array).len++))) + : ((array).at[(array).len] = (val), (array).len++))) /** * Push value at the end of the array, resize it if necessary (plain malloc/free). @@ -152,6 +152,12 @@ static inline void array_std_free(void *baton, void *p) * @warning Undefined if the array is empty. */ #define array_tail(array) \ - (array).at[(array).len - 1] + (array).at[(array).len - 1] + +/** + * Return the size of a singular member in the array. + */ +#define array_member_size(array) \ + (sizeof((array).at[0])) // NOLINT(bugprone-sizeof-expression): usually a false-positive /** @} */ diff --git a/lib/generic/lru.h b/lib/generic/lru.h index 448c1b9..1c1dd81 100644 --- a/lib/generic/lru.h +++ b/lib/generic/lru.h @@ -130,7 +130,10 @@ #define lru_get_new(table, key_, len_, is_new) \ (__typeof__((table)->pdata_t)) \ lru_get_impl(&(table)->lru, (key_), (len_), \ - sizeof(*(table)->pdata_t), true, is_new) + lru_member_size((table)), true, is_new) + +#define lru_member_size(table) \ + (sizeof(*(table)->pdata_t)) // NOLINT(bugprone-sizeof-expression): usually a false-positive /** * @brief Apply a function to every item in LRU. diff --git a/lib/generic/queue.c b/lib/generic/queue.c index 5bed153..29609dd 100644 --- a/lib/generic/queue.c +++ b/lib/generic/queue.c @@ -62,7 +62,7 @@ void * queue_push_impl(struct queue *q) if (t->begin * 2 >= t->cap) { /* Utilization is below 50%, so let's shift (no overlap). * (size_t cast is to avoid unintended sign-extension) */ - memcpy(t->data, t->data + t->begin * q->item_size, + memcpy(t->data, t->data + t->begin * (size_t)q->item_size, (size_t) (t->end - t->begin) * (size_t) q->item_size); t->end -= t->begin; t->begin = 0; @@ -76,7 +76,7 @@ void * queue_push_impl(struct queue *q) kr_require(t->end < t->cap); ++(q->len); ++(t->end); - return t->data + q->item_size * (t->end - 1); + return t->data + (size_t)q->item_size * (t->end - 1); } /* Return pointer to the space for the new element. */ @@ -98,8 +98,8 @@ void * queue_push_head_impl(struct queue *q) * Computations here are simplified due to h->begin == 0. * (size_t cast is to avoid unintended sign-extension) */ const int cnt = h->end; - memcpy(h->data + (h->cap - cnt) * q->item_size, h->data, - (size_t) cnt * (size_t) q->item_size); + memcpy(h->data + ((size_t)h->cap - cnt) * q->item_size, h->data, + (size_t)cnt * (size_t)q->item_size); h->begin = h->cap - cnt; h->end = h->cap; } else { @@ -113,7 +113,7 @@ void * queue_push_head_impl(struct queue *q) kr_require(h->begin > 0); --(h->begin); ++(q->len); - return h->data + q->item_size * h->begin; + return h->data + (size_t)q->item_size * h->begin; } void queue_pop_impl(struct queue *q) diff --git a/lib/generic/queue.h b/lib/generic/queue.h index 3fa52ce..fc2a86f 100644 --- a/lib/generic/queue.h +++ b/lib/generic/queue.h @@ -71,7 +71,7 @@ /** @brief Initialize a queue. You can malloc() it the usual way. */ #define queue_init(q) do { \ (void)(((__typeof__(((q).pdata_t)))0) == (void *)0); /* typecheck queue_t */ \ - queue_init_impl(&(q).queue, sizeof(*(q).pdata_t)); \ + queue_init_impl(&(q).queue, queue_member_size((q))); \ } while (false) /** @brief De-initialize a queue: make it invalid and free any inner allocations. */ @@ -105,6 +105,10 @@ #define queue_len(q) \ ((const size_t)(q).queue.len) +/** @brief Return the size of a single element in the queue. */ +#define queue_member_size(q) \ + (sizeof(*(q).pdata_t)) // NOLINT(bugprone-sizeof-expression): usually a false-positive + /** @brief Type for queue iterator, parametrized by value type. * It's a simple structure that owns no other resources. diff --git a/lib/generic/trie.c b/lib/generic/trie.c index f9aceda..21254eb 100644 --- a/lib/generic/trie.c +++ b/lib/generic/trie.c @@ -470,6 +470,10 @@ static int ns_longer_alloc(nstack_t *ns) memcpy(st, ns->stack, ns->len * sizeof(node_t *)); } else { st = realloc(ns->stack, new_size); + if (st == NULL) { + free(ns->stack); // left behind by realloc, callers bail out + ns->stack = NULL; + } } if (st == NULL) return KNOT_ENOMEM; diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c index dfb7c87..656bc2d 100644 --- a/lib/layer/iterate.c +++ b/lib/layer/iterate.c @@ -51,7 +51,7 @@ static const knot_dname_t *minimized_qname(struct kr_query *query, uint16_t *qty int cut_labels = knot_dname_labels(query->zone_cut.name, NULL); int qname_labels = knot_dname_labels(qname, NULL); while(qname[0] && qname_labels > cut_labels + 1) { - qname = knot_wire_next_label(qname, NULL); + qname = knot_dname_next_label(qname); qname_labels -= 1; } @@ -825,7 +825,10 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req) } } else if (!query->parent) { /* Answer for initial query */ - const bool to_wire = ((pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) != 0); + const bool to_wire = ((pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) != 0) + /* We need to cover the case of positive wildcard answer + * with over-limit NSEC3 iterations. */ + || query->flags.DNSSEC_WEXPAND; state = pick_authority(pkt, req, to_wire); if (state != kr_ok()) { return KR_STATE_FAIL; diff --git a/lib/layer/validate.c b/lib/layer/validate.c index 3bdb205..af20b2e 100644 --- a/lib/layer/validate.c +++ b/lib/layer/validate.c @@ -709,7 +709,7 @@ static int check_validation_result(kr_layer_t *ctx, const knot_pkt_t *pkt, ranke invalid_entry = entry; break; } else if (kr_rank_test(entry->rank, KR_RANK_MISSING) && - !invalid_entry) { + !invalid_entry) { // NOLINT(bugprone-branch-clone) invalid_entry = entry; } else if (kr_rank_test(entry->rank, KR_RANK_OMIT)) { continue; @@ -124,7 +124,7 @@ void kr_log_fmt(enum kr_log_group group, kr_log_level_t level, const char *file, } va_start(args, fmt); - vfprintf(stream, fmt, args); + (void)vfprintf(stream, fmt, args); va_end(args); } } diff --git a/lib/resolve.c b/lib/resolve.c index e24a40b..d8198c3 100644 --- a/lib/resolve.c +++ b/lib/resolve.c @@ -184,7 +184,7 @@ static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct k * otherwise this would risk leaking information to parent if the NODATA TTD > zone cut TTD. */ int labels = knot_dname_labels(target, NULL) - knot_dname_labels(cut_name, NULL); while (target[0] && labels > 2) { - target = knot_wire_next_label(target, NULL); + target = knot_dname_next_label(target); --labels; } for (int i = 0; i < labels; ++i) { @@ -196,7 +196,7 @@ static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct k break; } kr_assert(target[0]); - target = knot_wire_next_label(target, NULL); + target = knot_dname_next_label(target); } kr_cache_commit(cache); #endif @@ -853,6 +853,8 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo if (transport && !qry->flags.CACHED) { if (!(request->state & KR_STATE_FAIL)) { /* Do not complete NS address resolution on soft-fail. */ + if (kr_fails_assert(packet->wire)) + return KR_STATE_FAIL; const int rcode = knot_wire_get_rcode(packet->wire); if (rcode != KNOT_RCODE_SERVFAIL && rcode != KNOT_RCODE_REFUSED) { qry->flags.AWAIT_IPV6 = false; @@ -886,7 +888,7 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo } /* Pop query if resolved. */ - if (request->state == KR_STATE_YIELD) { + if (request->state == KR_STATE_YIELD) { // NOLINT(bugprone-branch-clone) return KR_STATE_PRODUCE; /* Requery */ } else if (qry->flags.RESOLVED) { kr_rplan_pop(rplan, qry); @@ -1006,7 +1008,7 @@ static int forward_trust_chain_check(struct kr_request *request, struct kr_query int cut_labels = knot_dname_labels(qry->zone_cut.name, NULL); int wanted_name_labels = knot_dname_labels(wanted_name, NULL); while (wanted_name[0] && wanted_name_labels > cut_labels + name_offset) { - wanted_name = knot_wire_next_label(wanted_name, NULL); + wanted_name = knot_dname_next_label(wanted_name); wanted_name_labels -= 1; } minimized = (wanted_name != qry->sname); @@ -1051,7 +1053,7 @@ static int forward_trust_chain_check(struct kr_request *request, struct kr_query /* set `nods` */ if ((qry->stype == KNOT_RRTYPE_DS) && - knot_dname_is_equal(wanted_name, qry->sname)) { + knot_dname_is_equal(wanted_name, qry->sname)) { // NOLINT(bugprone-branch-clone) nods = true; } else if (resume && !ds_req) { nods = false; @@ -1232,11 +1234,11 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot const knot_dname_t *parent = qry->parent->zone_cut.name; if (parent[0] != '\0' && knot_dname_in_bailiwick(qry->sname, parent) >= 0) { - requested_name = knot_wire_next_label(parent, NULL); + requested_name = knot_dname_next_label(parent); } - } else if ((qry->stype == KNOT_RRTYPE_DS) && (qry->sname[0] != '\0')) { + } else if ((qry->stype == KNOT_RRTYPE_DS) && (requested_name[0] != '\0')) { /* If this is explicit DS query, start from encloser too. */ - requested_name = knot_wire_next_label(requested_name, NULL); + requested_name = knot_dname_next_label(requested_name); } int state = KR_STATE_FAIL; @@ -1245,7 +1247,8 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot if (state == KR_STATE_DONE || (state & KR_STATE_FAIL)) { return state; } else if (state == KR_STATE_CONSUME) { - requested_name = knot_wire_next_label(requested_name, NULL); + kr_require(requested_name[0] != '\0'); + requested_name = knot_dname_next_label(requested_name); } } while (state == KR_STATE_CONSUME); @@ -1611,6 +1614,7 @@ int kr_resolve_finish(struct kr_request *request, int state) knot_wire_clear_ad(wire); knot_wire_clear_aa(wire); knot_wire_set_rcode(wire, KNOT_RCODE_SERVFAIL); + default:; // Do nothing } } } diff --git a/lib/selection.c b/lib/selection.c index c25782e..cce5d42 100644 --- a/lib/selection.c +++ b/lib/selection.c @@ -149,7 +149,7 @@ struct rtt_state get_rtt_state(const uint8_t *ip, size_t len, knot_db_val_t key = cache_key(ip, len); - if (cache->api->read(db, stats, &key, &value, 1)) { + if (cache->api->read(db, stats, &key, &value, 1)) { // NOLINT(bugprone-branch-clone) state = default_rtt_state; } else if (kr_fails_assert(value.len == sizeof(struct rtt_state))) { // shouldn't happen but let's be more robust diff --git a/lib/utils.c b/lib/utils.c index 8b7e127..2a0635e 100644 --- a/lib/utils.c +++ b/lib/utils.c @@ -921,9 +921,8 @@ int kr_ranked_rrarray_add(ranked_rr_array_t *array, const knot_rrset_t *rr, static int rdata_p_cmp(const void *rp1, const void *rp2) { /* Just correct types of the parameters and pass them dereferenced. */ - const knot_rdata_t - *const *r1 = rp1, - *const *r2 = rp2; + const knot_rdata_t *const *r1 = (const knot_rdata_t *const *)rp1; + const knot_rdata_t *const *r2 = (const knot_rdata_t *const *)rp2; return knot_rdata_cmp(*r1, *r2); } int kr_ranked_rrarray_finalize(ranked_rr_array_t *array, uint32_t qry_uid, knot_mm_t *pool) @@ -948,7 +947,7 @@ int kr_ranked_rrarray_finalize(ranked_rr_array_t *array, uint32_t qry_uid, knot_ } else { /* Multiple RRs; first: sort the array. */ stashed->rr->additional = NULL; - qsort(ra->at, ra->len, sizeof(ra->at[0]), rdata_p_cmp); + qsort((void *)ra->at, ra->len, array_member_size(*ra), rdata_p_cmp); /* Prune duplicates: NULL all except the last instance. */ int dup_count = 0; for (int i = 0; i + 1 < ra->len; ++i) { diff --git a/lib/utils.h b/lib/utils.h index fab13fe..e03b473 100644 --- a/lib/utils.h +++ b/lib/utils.h @@ -616,4 +616,10 @@ static inline size_t kr_dname_prefixlen(const uint8_t *name, unsigned nlabels) #endif ); } +#if KNOT_VERSION_HEX < 0x030400 +static inline const knot_dname_t * knot_dname_next_label(const knot_dname_t *dname) +{ + return knot_wire_next_label(dname, NULL); +} +#endif diff --git a/lib/zonecut.c b/lib/zonecut.c index 2bbd26f..aea38e4 100644 --- a/lib/zonecut.c +++ b/lib/zonecut.c @@ -580,7 +580,7 @@ int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut, trie_clear(cut->nsset); /* Subtract label from QNAME. */ if (!is_root) { - label = knot_wire_next_label(label, NULL); + label = knot_dname_next_label(label); } else { ret = kr_error(ENOENT); break; |