diff options
Diffstat (limited to '')
-rw-r--r-- | lib/layer/validate.c | 1366 |
1 files changed, 1366 insertions, 0 deletions
diff --git a/lib/layer/validate.c b/lib/layer/validate.c new file mode 100644 index 0000000..93f1d4f --- /dev/null +++ b/lib/layer/validate.c @@ -0,0 +1,1366 @@ +/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz> + * SPDX-License-Identifier: GPL-3.0-or-later + */ + +#include <errno.h> +#include <sys/time.h> +#include <stdio.h> +#include <string.h> + +#include <contrib/cleanup.h> +#include <libknot/packet/wire.h> +#include <libknot/rrtype/rdname.h> +#include <libknot/rrtype/rrsig.h> +#include <libdnssec/error.h> + +#include "lib/dnssec/nsec.h" +#include "lib/dnssec/nsec3.h" +#include "lib/dnssec/ta.h" +#include "lib/dnssec.h" +#include "lib/layer.h" +#include "lib/resolve.h" +#include "lib/rplan.h" +#include "lib/utils.h" +#include "lib/defines.h" +#include "lib/module.h" +#include "lib/selection.h" + +#define VERBOSE_MSG(qry, ...) kr_log_q(qry, VALIDATOR, __VA_ARGS__) + +#define MAX_REVALIDATION_CNT 2 + +/** + * Search in section for given type. + * @param sec Packet section. + * @param type Type to search for. + * @return True if found. + */ +static bool section_has_type(const knot_pktsection_t *sec, uint16_t type) +{ + if (!sec) { + return false; + } + + for (unsigned i = 0; i < sec->count; ++i) { + const knot_rrset_t *rr = knot_pkt_rr(sec, i); + if (rr->type == type) { + return true; + } + } + + return false; +} + +static bool pkt_has_type(const knot_pkt_t *pkt, uint16_t type) +{ + if (!pkt) { + return false; + } + + if (section_has_type(knot_pkt_section(pkt, KNOT_ANSWER), type)) { + return true; + } + if (section_has_type(knot_pkt_section(pkt, KNOT_AUTHORITY), type)) { + return true; + } + return section_has_type(knot_pkt_section(pkt, KNOT_ADDITIONAL), type); +} + +static void log_bogus_rrsig(kr_rrset_validation_ctx_t *vctx, + const knot_rrset_t *rr, const char *msg) { + if (kr_log_is_debug_qry(VALIDATOR, vctx->log_qry)) { + auto_free char *name_text = kr_dname_text(rr->owner); + auto_free char *type_text = kr_rrtype_text(rr->type); + VERBOSE_MSG(vctx->log_qry, ">< %s: %s %s " + "(%u matching RRSIGs, %u expired, %u not yet valid, " + "%u invalid signer, %u invalid label count, %u invalid key, " + "%u invalid crypto, %u invalid NSEC)\n", + msg, name_text, type_text, vctx->rrs_counters.matching_name_type, + vctx->rrs_counters.expired, vctx->rrs_counters.notyet, + vctx->rrs_counters.signer_invalid, vctx->rrs_counters.labels_invalid, + vctx->rrs_counters.key_invalid, vctx->rrs_counters.crypto_invalid, + vctx->rrs_counters.nsec_invalid); + } +} + +/** Check that given CNAME could be generated by given DNAME (no DNSSEC validation). */ +static bool cname_matches_dname(const knot_rrset_t *rr_cn, const knot_rrset_t *rr_dn) +{ + if (kr_fails_assert(rr_cn->type == KNOT_RRTYPE_CNAME && rr_dn->type == KNOT_RRTYPE_DNAME)) + return false; + /* When DNAME substitution happens, let's consider the "prefix" + * that is carried over and the "suffix" that is replaced. + * (Here we consider the label order used in wire and presentation.) */ + const int prefix_labels = knot_dname_in_bailiwick(rr_cn->owner, rr_dn->owner); + if (prefix_labels < 1) + return false; + const knot_dname_t *cn_target = knot_cname_name(rr_cn->rrs.rdata); + const knot_dname_t *dn_target = knot_dname_target(rr_dn->rrs.rdata); + /* ^ We silently use the first RR in each RRset. Could be e.g. logged. */ + /* Check that the suffixes are correct - and even prefix label counts. */ + if (knot_dname_in_bailiwick(cn_target, dn_target) != prefix_labels) + return false; + /* Check that prefixes match. Find end of the first one and compare. */ + const knot_dname_t *cn_se = rr_cn->owner; + for (int i = 0; i < prefix_labels; ++i) + cn_se += 1 + *cn_se; + return strncmp((const char *)rr_cn->owner, (const char *)cn_target, + cn_se - rr_cn->owner) == 0; + /* ^ We use the fact that dnames are always zero-terminated + * to avoid any possible over-read in cn_target. */ +} + +static void mark_insecure_parents(const struct kr_query *qry); +static void rank_records(struct kr_query *qry, bool any_rank, enum kr_rank rank_to_set, + const knot_dname_t *bailiwick); + +static bool maybe_downgrade_nsec3(const ranked_rr_array_entry_t *e, struct kr_query *qry, + const kr_rrset_validation_ctx_t *vctx) +{ + bool required_conditions = + e->rr->type == KNOT_RRTYPE_NSEC3 + && kr_rank_test(e->rank, KR_RANK_SECURE) + // extra careful: avoid downgrade if SNAME isn't in bailiwick of signer + && knot_dname_in_bailiwick(qry->sname, vctx->zone_name) >= 0; + if (!required_conditions) + return false; + + const knot_rdataset_t *rrs = &e->rr->rrs; + knot_rdata_t *rd = rrs->rdata; + for (int j = 0; j < rrs->count; ++j, rd = knot_rdataset_next(rd)) { + if (knot_nsec3_iters(rd) > KR_NSEC3_MAX_ITERATIONS) + goto do_downgrade; + } + return false; + +do_downgrade: // we do this deep inside calls because of having signer name available + VERBOSE_MSG(qry, "<= DNSSEC downgraded due to NSEC3 iterations %d > %d\n", + (int)knot_nsec3_iters(rd), (int)KR_NSEC3_MAX_ITERATIONS); + qry->flags.DNSSEC_WANT = false; + qry->flags.DNSSEC_INSECURE = true; + rank_records(qry, true, KR_RANK_INSECURE, vctx->zone_name); + mark_insecure_parents(qry); + return true; +} + +#define KNOT_EDOWNGRADED (KNOT_ERROR_MIN - 1) + +static int validate_section(kr_rrset_validation_ctx_t *vctx, struct kr_query *qry, + knot_mm_t *pool) +{ + struct kr_request *req = qry->request; + if (!vctx) { + return kr_error(EINVAL); + } + + /* Can't use qry->zone_cut.name directly, as this name can + * change when updating cut information before validation. + */ + vctx->zone_name = vctx->keys ? vctx->keys->owner : NULL; + + for (ssize_t i = 0; i < vctx->rrs->len; ++i) { + ranked_rr_array_entry_t *entry = vctx->rrs->at[i]; + knot_rrset_t * const rr = entry->rr; + + if (entry->yielded || vctx->qry_uid != entry->qry_uid) { + continue; + } + + if (kr_rank_test(entry->rank, KR_RANK_OMIT) + || kr_rank_test(entry->rank, KR_RANK_SECURE)) { + continue; /* these are already OK */ + } + + if (!knot_dname_is_equal(qry->zone_cut.name, rr->owner)/*optim.*/ + && !kr_ta_closest(qry->request->ctx, rr->owner, rr->type)) { + /* We have NTA "between" our (perceived) zone cut and the RR. */ + kr_rank_set(&entry->rank, KR_RANK_INSECURE); + continue; + } + + if (rr->type == KNOT_RRTYPE_RRSIG) { + const knot_dname_t *signer_name = knot_rrsig_signer_name(rr->rrs.rdata); + if (!knot_dname_is_equal(vctx->zone_name, signer_name)) { + kr_rank_set(&entry->rank, KR_RANK_MISMATCH); + vctx->err_cnt += 1; + break; + } + if (!kr_rank_test(entry->rank, KR_RANK_BOGUS)) + kr_rank_set(&entry->rank, KR_RANK_OMIT); + continue; + } + + uint8_t rank_orig = entry->rank; + int validation_result = kr_rrset_validate(vctx, rr); + + /* Handle the case of CNAMEs synthesized from DNAMEs (they don't have RRSIGs). */ + if (rr->type == KNOT_RRTYPE_CNAME && validation_result == kr_error(ENOENT)) { + for (ssize_t j = 0; j < vctx->rrs->len; ++j) { + ranked_rr_array_entry_t *e_dname = vctx->rrs->at[j]; + if ((e_dname->rr->type == KNOT_RRTYPE_DNAME) + /* If the order is wrong, we will need two passes. */ + && kr_rank_test(e_dname->rank, KR_RANK_SECURE) + && cname_matches_dname(rr, e_dname->rr)) { + /* Now we believe the CNAME is OK. */ + validation_result = kr_ok(); + break; + } + } + if (validation_result != kr_ok()) { + vctx->cname_norrsig_cnt += 1; + } + } + + if (validation_result == kr_ok()) { + kr_rank_set(&entry->rank, KR_RANK_SECURE); + + /* Downgrade zone to insecure if certain NSEC3 record occurs. */ + if (unlikely(maybe_downgrade_nsec3(entry, qry, vctx))) + return kr_error(KNOT_EDOWNGRADED); + + } else if (kr_rank_test(rank_orig, KR_RANK_TRY)) { + /* RFC 4035 section 2.2: + * NS RRsets that appear at delegation points (...) + * MUST NOT be signed */ + if (vctx->rrs_counters.matching_name_type > 0) + log_bogus_rrsig(vctx, rr, + "found unexpected signatures for non-authoritative data which failed to validate, continuing"); + vctx->result = kr_ok(); + kr_rank_set(&entry->rank, KR_RANK_TRY); + /* ^^ BOGUS would be more accurate, but it might change + * to MISMATCH on revalidation, e.g. in test val_referral_nods :-/ + */ + + } else if (validation_result == kr_error(ENOENT) + && vctx->rrs_counters.matching_name_type == 0) { + /* no RRSIGs found */ + kr_rank_set(&entry->rank, KR_RANK_MISSING); + vctx->err_cnt += 1; + kr_request_set_extended_error(req, KNOT_EDNS_EDE_RRSIG_MISS, "JZAJ"); + log_bogus_rrsig(vctx, rr, "no valid RRSIGs found"); + } else { + kr_rank_set(&entry->rank, KR_RANK_BOGUS); + vctx->err_cnt += 1; + if (vctx->rrs_counters.expired > 0) + kr_request_set_extended_error(req, KNOT_EDNS_EDE_SIG_EXPIRED, "YFJ2"); + else if (vctx->rrs_counters.notyet > 0) + kr_request_set_extended_error(req, KNOT_EDNS_EDE_SIG_NOTYET, "UBBS"); + else + kr_request_set_extended_error(req, KNOT_EDNS_EDE_BOGUS, "I74V"); + log_bogus_rrsig(vctx, rr, "bogus signatures"); + } + } + return kr_ok(); +} + +static int validate_records(struct kr_request *req, knot_pkt_t *answer, knot_mm_t *pool, bool has_nsec3) +{ + struct kr_query *qry = req->current_query; + if (!qry->zone_cut.key) { + VERBOSE_MSG(qry, "<= no DNSKEY, can't validate\n"); + return kr_error(EBADMSG); + } + + kr_rrset_validation_ctx_t vctx = { + .pkt = answer, + .rrs = &req->answ_selected, + .section_id = KNOT_ANSWER, + .keys = qry->zone_cut.key, + .zone_name = qry->zone_cut.name, + .timestamp = qry->timestamp.tv_sec, + .ttl_min = req->ctx->cache.ttl_min, + .qry_uid = qry->uid, + .has_nsec3 = has_nsec3, + .flags = 0, + .err_cnt = 0, + .cname_norrsig_cnt = 0, + .result = 0, + .log_qry = qry, + }; + + int ret = validate_section(&vctx, qry, pool); + if (vctx.err_cnt && vctx.err_cnt == vctx.cname_norrsig_cnt) { + VERBOSE_MSG(qry, ">< all validation errors are missing RRSIGs on CNAMES, trying again in hope for DNAMEs\n"); + vctx.err_cnt = vctx.cname_norrsig_cnt = vctx.result = 0; + ret = validate_section(&vctx, qry, pool); + } + req->answ_validated = (vctx.err_cnt == 0); + if (ret != kr_ok()) { + return ret; + } + + uint32_t an_flags = vctx.flags; + vctx.rrs = &req->auth_selected; + vctx.section_id = KNOT_AUTHORITY; + vctx.flags = 0; + vctx.err_cnt = 0; + vctx.result = 0; + + ret = validate_section(&vctx, qry, pool); + req->auth_validated = (vctx.err_cnt == 0); + if (ret != kr_ok()) { + return ret; + } + + /* Records were validated. + * If there is wildcard expansion in answer, + * or optout - flag the query. + */ + if (an_flags & KR_DNSSEC_VFLG_WEXPAND) { + qry->flags.DNSSEC_WEXPAND = true; + } + if (an_flags & KR_DNSSEC_VFLG_OPTOUT) { + qry->flags.DNSSEC_OPTOUT = true; + } + + return ret; +} + +static int validate_keyset(struct kr_request *req, knot_pkt_t *answer, bool has_nsec3) +{ + /* Merge DNSKEY records from answer that are below/at current cut. */ + struct kr_query *qry = req->current_query; + bool updated_key = false; + const knot_pktsection_t *an = knot_pkt_section(answer, KNOT_ANSWER); + for (unsigned i = 0; i < an->count; ++i) { + const knot_rrset_t *rr = knot_pkt_rr(an, i); + if (rr->type != KNOT_RRTYPE_DNSKEY + || knot_dname_in_bailiwick(rr->owner, qry->zone_cut.name) < 0) { + continue; + } + /* Merge with zone cut (or replace ancestor key). */ + if (!qry->zone_cut.key || !knot_dname_is_equal(qry->zone_cut.key->owner, rr->owner)) { + qry->zone_cut.key = knot_rrset_copy(rr, qry->zone_cut.pool); + if (!qry->zone_cut.key) { + return kr_error(ENOMEM); + } + updated_key = true; + } else { + int ret = knot_rdataset_merge(&qry->zone_cut.key->rrs, + &rr->rrs, qry->zone_cut.pool); + if (ret != 0) { + knot_rrset_free(qry->zone_cut.key, qry->zone_cut.pool); + qry->zone_cut.key = NULL; + return ret; + } + updated_key = true; + } + } + + /* Check if there's a key for current TA. */ + if (updated_key && !(qry->flags.CACHED)) { + /* Find signatures for the DNSKEY; selected by iterator from ANSWER. */ + int sig_index = -1; + for (int i = req->answ_selected.len - 1; i >= 0; --i) { + const knot_rrset_t *rrsig = req->answ_selected.at[i]->rr; + const bool ok = req->answ_selected.at[i]->qry_uid == qry->uid + && rrsig->type == KNOT_RRTYPE_RRSIG + && knot_rrsig_type_covered(rrsig->rrs.rdata) + == KNOT_RRTYPE_DNSKEY + && rrsig->rclass == KNOT_CLASS_IN + && knot_dname_is_equal(rrsig->owner, + qry->zone_cut.key->owner); + if (ok) { + sig_index = i; + break; + } + } + if (sig_index < 0) { + kr_request_set_extended_error(req, KNOT_EDNS_EDE_RRSIG_MISS, "EZDC"); + return kr_error(ENOENT); + } + const knot_rdataset_t *sig_rds = &req->answ_selected.at[sig_index]->rr->rrs; + + kr_rrset_validation_ctx_t vctx = { + .pkt = answer, + .rrs = &req->answ_selected, + .section_id = KNOT_ANSWER, + .keys = qry->zone_cut.key, + .zone_name = qry->zone_cut.name, + .timestamp = qry->timestamp.tv_sec, + .ttl_min = req->ctx->cache.ttl_min, + .qry_uid = qry->uid, + .has_nsec3 = has_nsec3, + .flags = 0, + .result = 0, + .log_qry = qry, + }; + int ret = kr_dnskeys_trusted(&vctx, sig_rds, qry->zone_cut.trust_anchor); + /* Set rank of the RRSIG. This may be needed, but I don't know why. + * In particular, black_ent.rpl may get broken otherwise. */ + kr_rank_set(&req->answ_selected.at[sig_index]->rank, + ret == 0 ? KR_RANK_SECURE : KR_RANK_BOGUS); + + if (ret != 0) { + log_bogus_rrsig(&vctx, qry->zone_cut.key, "bogus key"); + knot_rrset_free(qry->zone_cut.key, qry->zone_cut.pool); + qry->zone_cut.key = NULL; + if (vctx.rrs_counters.expired > 0) + kr_request_set_extended_error(req, KNOT_EDNS_EDE_SIG_EXPIRED, "6GJV"); + else if (vctx.rrs_counters.notyet > 0) + kr_request_set_extended_error(req, KNOT_EDNS_EDE_SIG_NOTYET, "4DJQ"); + else + kr_request_set_extended_error(req, KNOT_EDNS_EDE_BOGUS, "EXRU"); + return ret; + } + + if (vctx.flags & KR_DNSSEC_VFLG_WEXPAND) { + qry->flags.DNSSEC_WEXPAND = true; + } + if (vctx.flags & KR_DNSSEC_VFLG_OPTOUT) { + qry->flags.DNSSEC_OPTOUT = true; + } + + } + return kr_ok(); +} + +static knot_rrset_t *update_ds(struct kr_zonecut *cut, const knot_pktsection_t *sec) +{ + /* Aggregate DS records (if using multiple keys) */ + knot_rrset_t *new_ds = NULL; + for (unsigned i = 0; i < sec->count; ++i) { + const knot_rrset_t *rr = knot_pkt_rr(sec, i); + if (rr->type != KNOT_RRTYPE_DS) { + continue; + } + int ret = 0; + if (new_ds) { + ret = knot_rdataset_merge(&new_ds->rrs, &rr->rrs, cut->pool); + } else { + new_ds = knot_rrset_copy(rr, cut->pool); + if (!new_ds) { + return NULL; + } + } + if (ret != 0) { + knot_rrset_free(new_ds, cut->pool); + return NULL; + } + } + return new_ds; +} + +static void mark_insecure_parents(const struct kr_query *qry) +{ + /* If there is a chain of DS queries mark all of them, + * then mark first non-DS parent. + * Stop if parent is waiting for ns address. + * NS can be located at unsigned zone, but still will return + * valid DNSSEC records for initial query. */ + struct kr_query *parent = qry->parent; + while (parent && !parent->flags.AWAIT_IPV4 && !parent->flags.AWAIT_IPV6) { + parent->flags.DNSSEC_WANT = false; + parent->flags.DNSSEC_INSECURE = true; + if (parent->stype != KNOT_RRTYPE_DS && + parent->stype != KNOT_RRTYPE_RRSIG) { + break; + } + parent = parent->parent; + } +} + +static int update_parent_keys(struct kr_request *req, uint16_t answer_type) +{ + struct kr_query *qry = req->current_query; + struct kr_query *parent = qry->parent; + if (kr_fails_assert(parent)) + return KR_STATE_FAIL; + switch(answer_type) { + case KNOT_RRTYPE_DNSKEY: + VERBOSE_MSG(qry, "<= parent: updating DNSKEY\n"); + parent->zone_cut.key = knot_rrset_copy(qry->zone_cut.key, parent->zone_cut.pool); + if (!parent->zone_cut.key) { + return KR_STATE_FAIL; + } + break; + case KNOT_RRTYPE_DS: + VERBOSE_MSG(qry, "<= parent: updating DS\n"); + if (qry->flags.DNSSEC_INSECURE) { /* DS non-existence proven. */ + mark_insecure_parents(qry); + } else if (qry->flags.DNSSEC_NODS && !qry->flags.FORWARD) { + if (qry->flags.DNSSEC_OPTOUT) { + mark_insecure_parents(qry); + } else { + int ret = kr_dnssec_matches_name_and_type(&req->auth_selected, qry->uid, + qry->sname, KNOT_RRTYPE_NS); + if (ret == kr_ok()) { + mark_insecure_parents(qry); + } + } + } else if (qry->flags.DNSSEC_NODS && qry->flags.FORWARD) { + int ret = kr_dnssec_matches_name_and_type(&req->auth_selected, qry->uid, + qry->sname, KNOT_RRTYPE_NS); + if (ret == kr_ok()) { + mark_insecure_parents(qry); + } + } else { /* DS existence proven. */ + parent->zone_cut.trust_anchor = knot_rrset_copy(qry->zone_cut.trust_anchor, parent->zone_cut.pool); + if (!parent->zone_cut.trust_anchor) { + return KR_STATE_FAIL; + } + } + break; + default: break; + } + return kr_ok(); +} + +static int update_delegation(struct kr_request *req, struct kr_query *qry, knot_pkt_t *answer, bool has_nsec3) +{ + struct kr_zonecut *cut = &qry->zone_cut; + + /* RFC4035 3.1.4. authoritative must send either DS or proof of non-existence. + * If it contains neither, resolver must query the parent for the DS (RFC4035 5.2.). + * If DS exists, the referral is OK, + * otherwise referral is bogus (or an attempted downgrade attack). + */ + + + unsigned section = KNOT_ANSWER; + const bool referral = !knot_wire_get_aa(answer->wire); + if (referral) { + section = KNOT_AUTHORITY; + } else if (knot_pkt_qtype(answer) == KNOT_RRTYPE_DS && + !(qry->flags.CNAME) && + (knot_wire_get_rcode(answer->wire) != KNOT_RCODE_NXDOMAIN)) { + section = KNOT_ANSWER; + } else { /* N/A */ + return kr_ok(); + } + + int ret = 0; + const knot_dname_t *proved_name = knot_pkt_qname(answer); + /* Aggregate DS records (if using multiple keys) */ + knot_rrset_t *new_ds = update_ds(cut, knot_pkt_section(answer, section)); + if (!new_ds) { + /* No DS provided, check for proof of non-existence. */ + if (!has_nsec3) { + if (referral) { + /* Check if it is referral to unsigned, rfc4035 5.2 */ + ret = kr_nsec_ref_to_unsigned(&req->auth_selected, + qry->uid, proved_name); + } else { + /* No-data answer */ + ret = kr_nsec_negative(&req->auth_selected, qry->uid, + proved_name, KNOT_RRTYPE_DS); + if (ret >= 0) { + if (ret == PKT_NODATA) { + ret = kr_ok(); + } else { + ret = kr_error(ENOENT); // suspicious + } + } + } + } else { + if (referral) { + /* Check if it is referral to unsigned, rfc5155 8.9 */ + ret = kr_nsec3_ref_to_unsigned(answer); + } else { + /* No-data answer, QTYPE is DS, rfc5155 8.6 */ + ret = kr_nsec3_no_data(answer, KNOT_AUTHORITY, proved_name, KNOT_RRTYPE_DS); + } + if (ret == kr_error(KNOT_ERANGE)) { + /* Not bogus, going insecure due to optout */ + ret = 0; + } + } + + if (referral && qry->stype != KNOT_RRTYPE_DS && + ret == DNSSEC_NOT_FOUND) { + /* referral, + * qtype is not KNOT_RRTYPE_DS, NSEC\NSEC3 were not found. + * Check if DS already was fetched. */ + knot_rrset_t *ta = cut->trust_anchor; + if (knot_dname_is_equal(cut->name, ta->owner)) { + /* DS is OK */ + ret = 0; + } + } else if (ret != 0) { + VERBOSE_MSG(qry, "<= bogus proof of DS non-existence\n"); + kr_request_set_extended_error(req, KNOT_EDNS_EDE_BOGUS, "Z4I6"); + qry->flags.DNSSEC_BOGUS = true; + } else if (proved_name[0] != '\0') { /* don't go to insecure for . DS */ + qry->flags.DNSSEC_NODS = true; + /* Rank the corresponding nonauth NS as insecure. */ + for (int i = 0; i < req->auth_selected.len; ++i) { + ranked_rr_array_entry_t *ns = req->auth_selected.at[i]; + if (ns->qry_uid != qry->uid + || !ns->rr + || ns->rr->type != KNOT_RRTYPE_NS) { + continue; + } + if (!referral && !knot_dname_is_equal(qry->sname, ns->rr->owner)) { + continue; + } + /* Found the record. Note: this is slightly fragile + * in case there were more NS records in the packet. + * As it is now for referrals, kr_nsec*_ref_to_unsigned consider + * (only) the first NS record in the packet. */ + if (!kr_rank_test(ns->rank, KR_RANK_AUTH)) { /* sanity */ + ns->rank = KR_RANK_INSECURE; + } + break; + } + } + return ret; + } else if (qry->flags.FORWARD && qry->parent) { + struct kr_query *parent = qry->parent; + parent->zone_cut.name = knot_dname_copy(qry->sname, parent->zone_cut.pool); + } + + /* Extend trust anchor */ + VERBOSE_MSG(qry, "<= DS: OK\n"); + cut->trust_anchor = new_ds; + return ret; +} + +static const knot_dname_t *find_first_signer(ranked_rr_array_t *arr, struct kr_query *qry) +{ + for (size_t i = 0; i < arr->len; ++i) { + ranked_rr_array_entry_t *entry = arr->at[i]; + const knot_rrset_t *rr = entry->rr; + if (entry->yielded || + (!kr_rank_test(entry->rank, KR_RANK_INITIAL) && + !kr_rank_test(entry->rank, KR_RANK_TRY) && + !kr_rank_test(entry->rank, KR_RANK_MISMATCH))) { + continue; + } + if (rr->type != KNOT_RRTYPE_RRSIG) { + continue; + } + const knot_dname_t *signame = knot_rrsig_signer_name(rr->rrs.rdata); + if (knot_dname_in_bailiwick(rr->owner, signame) >= 0) { + return signame; + } else { + /* otherwise it's some nonsense, so we skip it */ + kr_log_q(qry, VALIDATOR, "protocol violation: " + "out-of-bailiwick RRSIG signer, skipping\n"); + } + } + return NULL; +} + +static const knot_dname_t *signature_authority(struct kr_request *req) +{ + const knot_dname_t *signer_name = find_first_signer(&req->answ_selected, req->current_query); + if (!signer_name) { + signer_name = find_first_signer(&req->auth_selected, req->current_query); + } + return signer_name; +} + +static int rrsig_not_found(const kr_layer_t * const ctx, const knot_pkt_t * const pkt, + const knot_rrset_t * const rr) +{ + /* Signatures are missing. There might be a zone cut that we've skipped + * and transitions to insecure. That can commonly happen when iterating + * and both sides of that cut are served by the same IP address(es). + * We'll try proving that the name truly is insecure - by spawning + * a DS sub-query on a suitable QNAME. + */ + struct kr_request * const req = ctx->req; + struct kr_query * const qry = req->current_query; + + if (qry->flags.FORWARD || qry->flags.STUB) { + /* Undiscovered signed cuts can't happen in the current forwarding + * algorithm, so this function shouldn't be able to help. */ + return KR_STATE_FAIL; + } + + /* Find cut_next: the name at which to try finding the "missing" zone cut. */ + const knot_dname_t * const cut_top = qry->zone_cut.name; + const int next_depth = knot_dname_in_bailiwick(rr->owner, cut_top); + if (next_depth <= 0) { + return KR_STATE_FAIL; // shouldn't happen, I think + } + /* Add one extra label to cur_top, i.e. descend one level below current zone cut */ + const knot_dname_t * const cut_next = rr->owner + + knot_dname_prefixlen(rr->owner, next_depth - 1, NULL); + + /* Spawn that DS sub-query. */ + struct kr_query * const next = kr_rplan_push(&req->rplan, qry, cut_next, + rr->rclass, KNOT_RRTYPE_DS); + if (!next) { + return KR_STATE_FAIL; + } + kr_zonecut_init(&next->zone_cut, qry->zone_cut.name, &req->pool); + kr_zonecut_copy(&next->zone_cut, &qry->zone_cut); + kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut); + next->flags.DNSSEC_WANT = true; + return KR_STATE_YIELD; +} + +static int check_validation_result(kr_layer_t *ctx, const knot_pkt_t *pkt, ranked_rr_array_t *arr) +{ + int ret = KR_STATE_DONE; + struct kr_request *req = ctx->req; + struct kr_query *qry = req->current_query; + ranked_rr_array_entry_t *invalid_entry = NULL; + for (size_t i = 0; i < arr->len; ++i) { + ranked_rr_array_entry_t *entry = arr->at[i]; + if (entry->yielded || entry->qry_uid != qry->uid) { + continue; + } + if (kr_rank_test(entry->rank, KR_RANK_MISMATCH)) { + invalid_entry = entry; + break; + } else if (kr_rank_test(entry->rank, KR_RANK_MISSING) && + !invalid_entry) { + invalid_entry = entry; + } else if (kr_rank_test(entry->rank, KR_RANK_OMIT)) { + continue; + } else if (!kr_rank_test(entry->rank, KR_RANK_SECURE) && + !invalid_entry) { + invalid_entry = entry; + } + } + + if (!invalid_entry) { + return ret; + } + + if (!kr_rank_test(invalid_entry->rank, KR_RANK_SECURE) && + (++(invalid_entry->revalidation_cnt) > MAX_REVALIDATION_CNT)) { + VERBOSE_MSG(qry, "<= continuous revalidation, fails\n"); + kr_request_set_extended_error(req, KNOT_EDNS_EDE_OTHER, + "4T4L: continuous revalidation"); + qry->flags.DNSSEC_BOGUS = true; + return KR_STATE_FAIL; + } + + const knot_rrset_t *rr = invalid_entry->rr; + if (kr_rank_test(invalid_entry->rank, KR_RANK_MISMATCH)) { + const knot_dname_t *signer_name = knot_rrsig_signer_name(rr->rrs.rdata); + if (knot_dname_in_bailiwick(signer_name, qry->zone_cut.name) > 0) { + qry->zone_cut.name = knot_dname_copy(signer_name, &req->pool); + qry->flags.AWAIT_CUT = true; + } else if (!knot_dname_is_equal(signer_name, qry->zone_cut.name)) { + if (qry->zone_cut.parent) { + memcpy(&qry->zone_cut, qry->zone_cut.parent, sizeof(qry->zone_cut)); + } else { + qry->flags.AWAIT_CUT = true; + } + qry->zone_cut.name = knot_dname_copy(signer_name, &req->pool); + } + VERBOSE_MSG(qry, ">< cut changed (new signer), needs revalidation\n"); + ret = KR_STATE_YIELD; + } else if (kr_rank_test(invalid_entry->rank, KR_RANK_MISSING)) { + ret = rrsig_not_found(ctx, pkt, rr); + } else if (!kr_rank_test(invalid_entry->rank, KR_RANK_SECURE)) { + kr_request_set_extended_error(req, KNOT_EDNS_EDE_BOGUS, "NXJA"); + qry->flags.DNSSEC_BOGUS = true; + ret = KR_STATE_FAIL; + } + + return ret; +} + +static bool check_empty_answer(kr_layer_t *ctx, knot_pkt_t *pkt) +{ + struct kr_request *req = ctx->req; + struct kr_query *qry = req->current_query; + ranked_rr_array_t *arr = &req->answ_selected; + size_t num_entries = 0; + for (size_t i = 0; i < arr->len; ++i) { + ranked_rr_array_entry_t *entry = arr->at[i]; + const knot_rrset_t *rr = entry->rr; + if (rr->type == KNOT_RRTYPE_RRSIG && qry->stype != KNOT_RRTYPE_RRSIG) { + continue; + } + if (entry->qry_uid == qry->uid) { + ++num_entries; + } + } + const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER); + return ((an->count != 0) && (num_entries == 0)) ? false : true; +} + +static int unsigned_forward(kr_layer_t *ctx, knot_pkt_t *pkt) +{ + struct kr_request *req = ctx->req; + struct kr_query *qry = req->current_query; + const uint16_t qtype = knot_pkt_qtype(pkt); + const uint8_t pkt_rcode = knot_wire_get_rcode(pkt->wire); + bool nods = false; + bool ns_exist = true; + for (int i = 0; i < req->rplan.resolved.len; ++i) { + struct kr_query *q = req->rplan.resolved.at[i]; + if (q->sclass == qry->sclass && + q->stype == KNOT_RRTYPE_DS && + knot_dname_is_equal(q->sname, qry->sname)) { + nods = true; + if (!(q->flags.DNSSEC_OPTOUT)) { + int ret = kr_dnssec_matches_name_and_type(&req->auth_selected, q->uid, + qry->sname, KNOT_RRTYPE_NS); + ns_exist = (ret == kr_ok()); + } + } + } + + if (nods && ns_exist && qtype == KNOT_RRTYPE_NS) { + qry->flags.DNSSEC_WANT = false; + qry->flags.DNSSEC_INSECURE = true; + if (qry->forward_flags.CNAME) { + if (kr_fails_assert(qry->cname_parent)) + return KR_STATE_FAIL; + qry->cname_parent->flags.DNSSEC_WANT = false; + qry->cname_parent->flags.DNSSEC_INSECURE = true; + } else if (pkt_rcode == KNOT_RCODE_NOERROR && qry->parent != NULL) { + const knot_pktsection_t *sec = knot_pkt_section(pkt, KNOT_ANSWER); + const knot_rrset_t *rr = knot_pkt_rr(sec, 0); + if (rr->type == KNOT_RRTYPE_NS) { + qry->parent->zone_cut.name = knot_dname_copy(rr->owner, &req->pool); + qry->parent->flags.DNSSEC_WANT = false; + qry->parent->flags.DNSSEC_INSECURE = true; + } + } + while (qry->parent) { + qry = qry->parent; + qry->flags.DNSSEC_WANT = false; + qry->flags.DNSSEC_INSECURE = true; + if (qry->forward_flags.CNAME) { + if (kr_fails_assert(qry->cname_parent)) + return KR_STATE_FAIL; + qry->cname_parent->flags.DNSSEC_WANT = false; + qry->cname_parent->flags.DNSSEC_INSECURE = true; + } + } + return KR_STATE_DONE; + } + + if (ctx->state == KR_STATE_YIELD) { + return KR_STATE_DONE; + } + + if (!nods && qtype != KNOT_RRTYPE_DS) { + struct kr_rplan *rplan = &req->rplan; + struct kr_query *next = kr_rplan_push(rplan, qry, qry->sname, qry->sclass, KNOT_RRTYPE_DS); + if (!next) { + return KR_STATE_FAIL; + } + kr_zonecut_set(&next->zone_cut, qry->zone_cut.name); + kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut); + next->flags.DNSSEC_WANT = true; + } + + return KR_STATE_YIELD; +} + +static int check_signer(kr_layer_t *ctx, knot_pkt_t *pkt) +{ + struct kr_request *req = ctx->req; + struct kr_query *qry = req->current_query; + const knot_dname_t *ta_name = qry->zone_cut.trust_anchor ? qry->zone_cut.trust_anchor->owner : NULL; + const knot_dname_t *signer = signature_authority(req); + if (ta_name && (!signer || !knot_dname_is_equal(ta_name, signer))) { + /* check all newly added RRSIGs */ + if (!signer) { + if (qry->flags.FORWARD) { + return unsigned_forward(ctx, pkt); + } + /* Not a DNSSEC-signed response. */ + if (ctx->state == KR_STATE_YIELD) { + /* Already yielded for revalidation. + * It means that trust chain is OK and + * transition to INSECURE hasn't occurred. + * Let the validation logic ask about RRSIG. */ + return KR_STATE_DONE; + } + /* Ask parent for DS + * to prove transition to INSECURE. */ + const uint16_t qtype = knot_pkt_qtype(pkt); + const knot_dname_t *qname = knot_pkt_qname(pkt); + if (qtype == KNOT_RRTYPE_NS && + knot_dname_in_bailiwick(qname, qry->zone_cut.name) > 0) { + /* Server is authoritative + * for both parent and child, + * and child zone is not signed. */ + qry->zone_cut.name = knot_dname_copy(qname, &req->pool); + } + } else if (knot_dname_in_bailiwick(signer, qry->zone_cut.name) > 0) { + if (!(qry->flags.FORWARD)) { + /* Key signer is below current cut, advance and refetch keys. */ + qry->zone_cut.name = knot_dname_copy(signer, &req->pool); + } else { + /* Check if DS does not exist. */ + struct kr_query *q = kr_rplan_find_resolved(&req->rplan, NULL, + signer, qry->sclass, KNOT_RRTYPE_DS); + if (q && q->flags.DNSSEC_NODS) { + qry->flags.DNSSEC_WANT = false; + qry->flags.DNSSEC_INSECURE = true; + if (qry->parent) { + qry->parent->flags.DNSSEC_WANT = false; + qry->parent->flags.DNSSEC_INSECURE = true; + } + } else if (qry->stype != KNOT_RRTYPE_DS) { + struct kr_rplan *rplan = &req->rplan; + struct kr_query *next = kr_rplan_push(rplan, qry, qry->sname, + qry->sclass, KNOT_RRTYPE_DS); + if (!next) { + return KR_STATE_FAIL; + } + kr_zonecut_set(&next->zone_cut, qry->zone_cut.name); + kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut); + next->flags.DNSSEC_WANT = true; + } + } + } else if (!knot_dname_is_equal(signer, qry->zone_cut.name)) { + /* Key signer is above the current cut, so we can't validate it. This happens when + a server is authoritative for both grandparent, parent and child zone. + Ascend to parent cut, and refetch authority for signer. */ + if (qry->zone_cut.parent) { + memcpy(&qry->zone_cut, qry->zone_cut.parent, sizeof(qry->zone_cut)); + } else { + qry->flags.AWAIT_CUT = true; + } + qry->zone_cut.name = knot_dname_copy(signer, &req->pool); + } + + /* zone cut matches, but DS/DNSKEY doesn't => refetch. */ + VERBOSE_MSG(qry, ">< cut changed, needs revalidation\n"); + if ((qry->flags.FORWARD) && qry->stype != KNOT_RRTYPE_DS) { + struct kr_rplan *rplan = &req->rplan; + struct kr_query *next = kr_rplan_push(rplan, qry, signer, + qry->sclass, KNOT_RRTYPE_DS); + if (!next) { + return KR_STATE_FAIL; + } + kr_zonecut_set(&next->zone_cut, qry->zone_cut.name); + kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut); + next->flags.DNSSEC_WANT = true; + return KR_STATE_YIELD; + } + if (!(qry->flags.FORWARD)) { + return KR_STATE_YIELD; + } + } + return KR_STATE_DONE; +} + +/** Change ranks of RRs from this single iteration: + * _INITIAL or _TRY or _MISSING -> rank_to_set. Or any rank, if any_rank == true. + * + * Optionally do this only in a `bailiwick` (if not NULL). + * Iterator shouldn't have selected such records, but we check to be sure. */ +static void rank_records(struct kr_query *qry, bool any_rank, enum kr_rank rank_to_set, + const knot_dname_t *bailiwick) +{ + struct kr_request *req = qry->request; + ranked_rr_array_t *ptrs[2] = { &req->answ_selected, &req->auth_selected }; + for (size_t i = 0; i < 2; ++i) { + ranked_rr_array_t *arr = ptrs[i]; + for (size_t j = 0; j < arr->len; ++j) { + ranked_rr_array_entry_t *entry = arr->at[j]; + if (entry->qry_uid != qry->uid) { + continue; + } + if (bailiwick && knot_dname_in_bailiwick(entry->rr->owner, + bailiwick) < 0) { + continue; + } + if (any_rank + || kr_rank_test(entry->rank, KR_RANK_INITIAL) + || kr_rank_test(entry->rank, KR_RANK_TRY) + || kr_rank_test(entry->rank, KR_RANK_MISSING)) { + kr_rank_set(&entry->rank, rank_to_set); + } + } + } +} + +static void check_wildcard(kr_layer_t *ctx) +{ + struct kr_request *req = ctx->req; + struct kr_query *qry = req->current_query; + ranked_rr_array_t *ptrs[2] = { &req->answ_selected, &req->auth_selected }; + + for (int i = 0; i < 2; ++i) { + ranked_rr_array_t *arr = ptrs[i]; + for (ssize_t j = 0; j < arr->len; ++j) { + ranked_rr_array_entry_t *entry = arr->at[j]; + const knot_rrset_t *rrsigs = entry->rr; + + if (qry->uid != entry->qry_uid) { + continue; + } + + if (rrsigs->type != KNOT_RRTYPE_RRSIG) { + continue; + } + + int owner_labels = knot_dname_labels(rrsigs->owner, NULL); + + knot_rdata_t *rdata_k = rrsigs->rrs.rdata; + for (int k = 0; k < rrsigs->rrs.count; + ++k, rdata_k = knot_rdataset_next(rdata_k)) { + if (knot_rrsig_labels(rdata_k) != owner_labels) { + qry->flags.DNSSEC_WEXPAND = true; + } + } + } + } +} + +/** Just for wildcard_adjust_to_wire() */ +static bool rr_is_for_wildcard(const ranked_rr_array_entry_t *entry) +{ + switch (kr_rrset_type_maysig(entry->rr)) { + case KNOT_RRTYPE_NSEC: + case KNOT_RRTYPE_NSEC3: + return true; + default: + return false; + } +} +/** In case of wildcard expansion, mark required authority RRs by to_wire. */ +static int wildcard_adjust_to_wire(struct kr_request *req, const struct kr_query *qry) +{ + if (!qry->parent && qry->flags.DNSSEC_WEXPAND) { + return kr_ranked_rrarray_set_wire(&req->auth_selected, true, + qry->uid, true, &rr_is_for_wildcard); + } + return kr_ok(); +} + +static int validate(kr_layer_t *ctx, knot_pkt_t *pkt) +{ + int ret = 0; + struct kr_request *req = ctx->req; + struct kr_query *qry = req->current_query; + + /* Ignore faulty or unprocessed responses. */ + if (ctx->state & (KR_STATE_FAIL|KR_STATE_CONSUME)) { + return ctx->state; + } + + /* Pass-through if user doesn't want secure answer or stub. */ + if (qry->flags.STUB) { + rank_records(qry, false, KR_RANK_OMIT, NULL); + return ctx->state; + } + uint8_t pkt_rcode = knot_wire_get_rcode(pkt->wire); + if ((qry->flags.FORWARD) && + pkt_rcode != KNOT_RCODE_NOERROR && + pkt_rcode != KNOT_RCODE_NXDOMAIN) { + do { + qry->flags.DNSSEC_BOGUS = true; + if (qry->cname_parent) { + qry->cname_parent->flags.DNSSEC_BOGUS = true; + } + qry = qry->parent; + } while (qry); + ctx->state = KR_STATE_DONE; + return ctx->state; + } + + if (!(qry->flags.DNSSEC_WANT)) { + const bool is_insec = qry->flags.CACHED && qry->flags.DNSSEC_INSECURE; + if ((qry->flags.DNSSEC_INSECURE)) { + rank_records(qry, true, KR_RANK_INSECURE, qry->zone_cut.name); + } + if (is_insec && qry->parent != NULL) { + /* We have got insecure answer from cache. + * Mark parent(s) as insecure. */ + mark_insecure_parents(qry); + VERBOSE_MSG(qry, "<= cached insecure response, going insecure\n"); + ctx->state = KR_STATE_DONE; + } else if (ctx->state == KR_STATE_YIELD) { + /* Transition to insecure state + occurred during revalidation. + if state remains YIELD, answer will not be cached. + Let cache layers to work. */ + ctx->state = KR_STATE_DONE; + } + return ctx->state; + } + + /* Pass-through if CD bit is set. */ + if (knot_wire_get_cd(req->qsource.packet->wire)) { + check_wildcard(ctx); + wildcard_adjust_to_wire(req, qry); + rank_records(qry, false, KR_RANK_OMIT, NULL); + return ctx->state; + } + /* Answer for RRSIG may not set DO=1, but all records MUST still validate. */ + bool use_signatures = (knot_pkt_qtype(pkt) != KNOT_RRTYPE_RRSIG); + if (!(qry->flags.CACHED) && !knot_pkt_has_dnssec(pkt) && !use_signatures) { + VERBOSE_MSG(qry, "<= got insecure response\n"); + kr_request_set_extended_error(req, KNOT_EDNS_EDE_BOGUS, "MISQ"); + qry->flags.DNSSEC_BOGUS = true; + return KR_STATE_FAIL; + } + + /* Check if this is a DNSKEY answer, check trust chain and store. */ + uint16_t qtype = knot_pkt_qtype(pkt); + bool has_nsec3 = pkt_has_type(pkt, KNOT_RRTYPE_NSEC3); + const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER); + const bool referral = (an->count == 0 && !knot_wire_get_aa(pkt->wire)); + + if (!(qry->flags.CACHED) && knot_wire_get_aa(pkt->wire)) { + /* Check if answer if not empty, + * but iterator has not selected any records. */ + if (!check_empty_answer(ctx, pkt)) { + VERBOSE_MSG(qry, "<= no useful RR in authoritative answer\n"); + kr_request_set_extended_error(req, KNOT_EDNS_EDE_BOGUS, "MJX6"); + qry->flags.DNSSEC_BOGUS = true; + return KR_STATE_FAIL; + } + /* Track difference between current TA and signer name. + * This indicates that the NS is auth for both parent-child, + * and we must update DS/DNSKEY to validate it. + */ + ret = check_signer(ctx, pkt); + if (ret != KR_STATE_DONE) { + return ret; + } + if (qry->flags.FORWARD && qry->flags.DNSSEC_INSECURE) { + return KR_STATE_DONE; + } + } + + if (knot_wire_get_aa(pkt->wire) && qtype == KNOT_RRTYPE_DNSKEY) { + const knot_rrset_t *ds = qry->zone_cut.trust_anchor; + if (ds && !kr_ds_algo_support(ds)) { + VERBOSE_MSG(qry, ">< all DS entries use unsupported algorithm pairs, going insecure\n"); + /* ^ the message is a bit imprecise to avoid being too verbose */ + kr_request_set_extended_error(req, KNOT_EDNS_EDE_OTHER, "LSLC: unsupported digest/key"); + qry->flags.DNSSEC_WANT = false; + qry->flags.DNSSEC_INSECURE = true; + rank_records(qry, true, KR_RANK_INSECURE, qry->zone_cut.name); + mark_insecure_parents(qry); + return KR_STATE_DONE; + } + + ret = validate_keyset(req, pkt, has_nsec3); + if (ret == kr_error(EAGAIN)) { + VERBOSE_MSG(qry, ">< cut changed, needs revalidation\n"); + return KR_STATE_YIELD; + } else if (ret != 0) { + VERBOSE_MSG(qry, "<= bad keys, broken trust chain\n"); + /* EDE code already set in validate_keyset() */ + qry->flags.DNSSEC_BOGUS = true; + return KR_STATE_FAIL; + } + } + + /* Validate all records, fail as bogus if it doesn't match. + * Do not revalidate data from cache, as it's already trusted. + * TTLs of RRsets may get lowered. */ + if (!(qry->flags.CACHED)) { + ret = validate_records(req, pkt, req->rplan.pool, has_nsec3); + if (ret == KNOT_EDOWNGRADED) { + return KR_STATE_DONE; + } else if (ret != 0) { + /* something exceptional - no DNS key, empty pointers etc + * normally it shouldn't happen */ + VERBOSE_MSG(qry, "<= couldn't validate RRSIGs\n"); + kr_request_set_extended_error(req, KNOT_EDNS_EDE_OTHER, + "O4TP: couldn't validate RRSIGs"); + qry->flags.DNSSEC_BOGUS = true; + return KR_STATE_FAIL; + } + /* check validation state and spawn subrequests */ + if (!req->answ_validated) { + ret = check_validation_result(ctx, pkt, &req->answ_selected); + if (ret != KR_STATE_DONE) { + return ret; + } + } + if (!req->auth_validated) { + ret = check_validation_result(ctx, pkt, &req->auth_selected); + if (ret != KR_STATE_DONE) { + return ret; + } + } + } + + /* Validate non-existence proof if not positive answer. + * In case of CNAME, iterator scheduled a sibling query for the target, + * so we just drop the negative piece of information and don't try to prove it. + * TODO: not ideal; with aggressive cache we'll at least avoid the extra packet. */ + if (!qry->flags.CACHED && pkt_rcode == KNOT_RCODE_NXDOMAIN && !qry->flags.CNAME) { + /* @todo If knot_pkt_qname(pkt) is used instead of qry->sname then the tests crash. */ + if (!has_nsec3) { + ret = kr_nsec_negative(&req->auth_selected, qry->uid, + qry->sname, KNOT_RRTYPE_NULL); + if (ret >= 0) { + if (ret & PKT_NXDOMAIN) { + ret = kr_ok(); + } else { + ret = kr_error(ENOENT); // probably proved NODATA + } + } + } else { + ret = kr_nsec3_name_error_response_check(pkt, KNOT_AUTHORITY, qry->sname); + } + if (has_nsec3 && (ret == kr_error(KNOT_ERANGE))) { + /* NXDOMAIN proof is OK, + * but NSEC3 that covers next closer name + * (or wildcard at next closer name) has opt-out flag. + * RFC5155 9.2; AD flag can not be set */ + qry->flags.DNSSEC_OPTOUT = true; + VERBOSE_MSG(qry, "<= can't prove NXDOMAIN due to optout, going insecure\n"); + } else if (ret != 0) { + VERBOSE_MSG(qry, "<= bad NXDOMAIN proof\n"); + kr_request_set_extended_error(req, KNOT_EDNS_EDE_NSEC_MISS, "3WKM"); + qry->flags.DNSSEC_BOGUS = true; + return KR_STATE_FAIL; + } + } + + /* @todo WTH, this needs API that just tries to find a proof and the caller + * doesn't have to worry about NSEC/NSEC3 + * @todo rework this + * CNAME: same as the NXDOMAIN case above */ + if (!qry->flags.CACHED && pkt_rcode == KNOT_RCODE_NOERROR && !qry->flags.CNAME) { + bool no_data = (an->count == 0 && knot_wire_get_aa(pkt->wire)); + if (no_data) { + /* @todo + * ? quick mechanism to determine which check to preform first + * ? merge the functionality together to share code/resources + */ + if (!has_nsec3) { + ret = kr_nsec_negative(&req->auth_selected, qry->uid, + knot_pkt_qname(pkt), knot_pkt_qtype(pkt)); + if (ret >= 0) { + if (ret == PKT_NODATA) { + ret = kr_ok(); + } else { + ret = kr_error(ENOENT); // suspicious + } + } + } else { + ret = kr_nsec3_no_data(pkt, KNOT_AUTHORITY, knot_pkt_qname(pkt), knot_pkt_qtype(pkt)); + } + if (ret != 0) { + if (has_nsec3 && (ret == kr_error(KNOT_ERANGE))) { + VERBOSE_MSG(qry, "<= can't prove NODATA due to optout, going insecure\n"); + qry->flags.DNSSEC_OPTOUT = true; + /* Could not return from here, + * we must continue, validate NSEC\NSEC3 and + * call update_parent_keys() to mark + * parent queries as insecure */ + } else { + VERBOSE_MSG(qry, "<= bad NODATA proof\n"); + kr_request_set_extended_error(req, KNOT_EDNS_EDE_NSEC_MISS, "AHXI"); + qry->flags.DNSSEC_BOGUS = true; + return KR_STATE_FAIL; + } + } + } + } + + wildcard_adjust_to_wire(req, qry); + + /* Check and update current delegation point security status. */ + ret = update_delegation(req, qry, pkt, has_nsec3); + if (ret == DNSSEC_NOT_FOUND && qry->stype != KNOT_RRTYPE_DS) { + if (ctx->state == KR_STATE_YIELD) { + VERBOSE_MSG(qry, "<= can't validate referral\n"); + kr_request_set_extended_error(req, KNOT_EDNS_EDE_BOGUS, "XLE4"); + qry->flags.DNSSEC_BOGUS = true; + return KR_STATE_FAIL; + } else { + /* Check the trust chain and query DS\DNSKEY if needed. */ + VERBOSE_MSG(qry, "<= DS\\NSEC was not found, querying for DS\n"); + return KR_STATE_YIELD; + } + } else if (ret != 0) { + return KR_STATE_FAIL; + } else if (pkt_rcode == KNOT_RCODE_NOERROR && + referral && + ((!qry->flags.DNSSEC_WANT && qry->flags.DNSSEC_INSECURE) || + (qry->flags.DNSSEC_NODS))) { + /* referral with proven DS non-existence */ + qtype = KNOT_RRTYPE_DS; + } + /* Update parent query zone cut */ + if (qry->parent) { + if (update_parent_keys(req, qtype) != 0) { + return KR_STATE_FAIL; + } + } + + if (qry->flags.FORWARD && qry->parent) { + if (pkt_rcode == KNOT_RCODE_NXDOMAIN) { + qry->parent->forward_flags.NO_MINIMIZE = true; + } + } + VERBOSE_MSG(qry, "<= answer valid, OK\n"); + return KR_STATE_DONE; +} + +/** Hide RRsets which did not validate from clients. */ +static int hide_bogus(kr_layer_t *ctx) { + if (knot_wire_get_cd(ctx->req->qsource.packet->wire)) { + return ctx->state; + } + /* We don't want to send bogus answers to clients, not even in SERVFAIL + * answers, but we cannot drop whole sections. If a CNAME chain + * SERVFAILs somewhere, the steps that were OK should be put into + * answer. + * + * There is one specific issue: currently we follow CNAME *before* + * we validate it, because... iterator comes before validator. + * Therefore some rrsets might be added into req->*_selected before + * we detected failure in validator. + * TODO: better approach, probably during work on parallel queries. + */ + const ranked_rr_array_t *sel[] = kr_request_selected(ctx->req); + for (knot_section_t sect = KNOT_ANSWER; sect <= KNOT_ADDITIONAL; ++sect) { + for (size_t i = 0; i < sel[sect]->len; ++i) { + ranked_rr_array_entry_t *e = sel[sect]->at[i]; + e->to_wire = e->to_wire + && !kr_rank_test(e->rank, KR_RANK_INDET) + && !kr_rank_test(e->rank, KR_RANK_BOGUS) + && !kr_rank_test(e->rank, KR_RANK_MISMATCH) + && !kr_rank_test(e->rank, KR_RANK_MISSING); + } + } + return ctx->state; +} + +static int validate_wrapper(kr_layer_t *ctx, knot_pkt_t *pkt) { + // Wrapper for now. + int ret = validate(ctx, pkt); + struct kr_request *req = ctx->req; + struct kr_query *qry = req->current_query; + if (ret & KR_STATE_FAIL && qry->flags.DNSSEC_BOGUS) + qry->server_selection.error(qry, req->upstream.transport, KR_SELECTION_DNSSEC_ERROR); + if (ret & KR_STATE_DONE && !qry->flags.DNSSEC_BOGUS) { + /* Don't report extended DNS errors related to validation + * when it managed to succeed (e.g. by trying different auth). */ + switch (req->extended_error.info_code) { + case KNOT_EDNS_EDE_BOGUS: + case KNOT_EDNS_EDE_NSEC_MISS: + case KNOT_EDNS_EDE_RRSIG_MISS: + case KNOT_EDNS_EDE_SIG_EXPIRED: + case KNOT_EDNS_EDE_SIG_NOTYET: + kr_request_set_extended_error(req, KNOT_EDNS_EDE_NONE, NULL); + break; + case KNOT_EDNS_EDE_DNSKEY_MISS: + case KNOT_EDNS_EDE_DNSKEY_BIT: + kr_assert(false); /* These EDE codes aren't used. */ + break; + default: break; /* Remaining codes don't indicate hard DNSSEC failure. */ + } + } + return ret; +} + + +/** Module implementation. */ +int validate_init(struct kr_module *self) +{ + static const kr_layer_api_t layer = { + .consume = &validate_wrapper, + .answer_finalize = &hide_bogus, + }; + self->layer = &layer; + return kr_ok(); +} + +KR_MODULE_EXPORT(validate) /* useless for builtin module, but let's be consistent */ + +#undef VERBOSE_MSG |