diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-05 04:15:13 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-05 04:15:13 +0000 |
commit | 672fd03e83f0333e8d8cb98c222520cd61a2f7a9 (patch) | |
tree | 271a0f975ff09b00661f2aba4b9eb2cf21e8457b | |
parent | Adding debian version 5.7.2-1. (diff) | |
download | knot-resolver-672fd03e83f0333e8d8cb98c222520cd61a2f7a9.tar.xz knot-resolver-672fd03e83f0333e8d8cb98c222520cd61a2f7a9.zip |
Merging upstream version 5.7.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
87 files changed, 1585 insertions, 912 deletions
diff --git a/.clang-tidy b/.clang-tidy index b496044..ecc9a62 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,6 +1,42 @@ --- -Checks: 'bugprone-*,cert-*,-cert-dcl03-c,-clang-analyzer-unix.Malloc,-clang-analyzer-deadcode.DeadStores,-clang-analyzer-valist.Uninitialized,readability-*,-readability-braces-*,-readability-else-after-return,-readability-redundant-declaration,-readability-non-const-parameter,google-readability-casting,misc-*,-misc-static-assert,-misc-macro-parentheses,-misc-unused-parameters' -WarningsAsErrors: 'cert-*,misc-*,readability-*,clang-analyzer-*,-readability-non-const-parameter' +Checks: + - bugprone-* + - cert-* + - google-readability-casting + - misc-* + - readability-* + + - -bugprone-assignment-in-if-condition # we explicitly put assignments into parentheses so they are very visible + - -bugprone-branch-clone + - -bugprone-easily-swappable-parameters + - -bugprone-inc-dec-in-conditions + - -bugprone-multi-level-implicit-pointer-conversion + - -bugprone-narrowing-conversions + - -bugprone-sizeof-expression # may be useful, but it's utterly broken + - -bugprone-suspicious-string-compare + - -cert-dcl03-c + - -clang-analyzer-deadcode.DeadStores + - -clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling + - -clang-analyzer-unix.Malloc + - -clang-analyzer-valist.Uninitialized + - -clang-analyzer-optin.core.EnumCastOutOfRange # libknot uses enums as flags + - -misc-include-cleaner + - -misc-macro-parentheses + - -misc-no-recursion + - -misc-static-assert + - -misc-unused-parameters + - -readability-avoid-nested-conditional-operator + - -readability-avoid-unconditional-preprocessor-if + - -readability-braces-* + - -readability-cognitive-complexity + - -readability-else-after-return + - -readability-function-cognitive-complexity + - -readability-identifier-length + - -readability-isolate-declaration + - -readability-magic-numbers + - -readability-non-const-parameter + - -readability-redundant-declaration +WarningsAsErrors: 'cert-*,clang-analyzer-*,misc-*,readability-*,-readability-non-const-parameter' HeaderFilterRegex: 'contrib/ucw/*.h' CheckOptions: - key: readability-identifier-naming diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..1da6af4 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +*.c diff=cpp +*.cpp diff=cpp diff --git a/.github/workflows/macOS.yaml b/.github/workflows/macOS.yaml new file mode 100644 index 0000000..f7fe090 --- /dev/null +++ b/.github/workflows/macOS.yaml @@ -0,0 +1,52 @@ +name: macOS + +on: push + +jobs: + build-test: + name: Build & unit tests & sanity check + runs-on: macOS-latest + strategy: + matrix: + knot-version: ['3.2', '3.3'] + + steps: + - name: Checkout resolver code + uses: actions/checkout@v2 + with: + submodules: true + + - name: Install dependecies from brew + run: + brew install cmocka luajit libuv lmdb meson nghttp2 autoconf automake m4 libtool pkg-config + + - name: Install libknot from sources + env: + KNOT_DNS_VERSION: ${{ matrix.knot-version }} + run: | + git clone -b ${KNOT_DNS_VERSION} https://gitlab.nic.cz/knot/knot-dns.git + cd knot-dns + autoreconf -fi + ./configure --prefix=${HOME}/.local/usr --disable-static --disable-fastparser --disable-documentation --disable-daemon --disable-utilities --with-lmdb=no + make -j2 install + cd .. + + - name: Build resolver + run: | + export PKG_CONFIG_PATH="${PKG_CONFIG_PATH}:${HOME}/.local/usr/lib/pkgconfig" + meson build_darwin --default-library=static --buildtype=debugoptimized --prefix=${HOME}/.local/usr -Dc_args='-fno-omit-frame-pointer' + ninja -C build_darwin -v install + + - name: Run unit tests + env: + MALLOC_CHECK_: 3 + MALLOC_PERTURB_: 223 + run: meson test -C build_darwin --suite unit + + - name: Run kresd + env: + MALLOC_CHECK_: 3 + MALLOC_PERTURB_: 223 + run: | + export DYLD_FALLBACK_LIBRARY_PATH="${DYLD_FALLBACK_LIBRARY_PATH}:${HOME}/.local/usr/lib/" + echo "quit()" | ${HOME}/.local/usr/sbin/kresd -a 127.0.0.1@53535 . diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..749bc07 --- /dev/null +++ b/.gitignore @@ -0,0 +1,83 @@ +*.6 +*.Plo +*.a +*.db +*.dylib +*.dylib.* +*.gcda +*.gcno +*.gcov +*.info +*.la +*.lo +*.log +*.mdb +*.o +*.out +*.so +*.so.* +*.swp +*~ +.deps +.dirstamp +.libs +.pytest_cache +/.build-depend +/.cache +/aclocal.m4 +/ar-lib +/autom4te.cache/* +/bench/bench_lru +/build* +/client/kresc +/compile +/compile_commands.json +/config.guess +/config.h +/config.log +/config.status +/config.sub +/configure +/control +/coverage +/coverage.stats +/daemon/kresd +/daemon/lua/*.inc +/daemon/lua/trust_anchors.lua +/depcomp +/distro/tests/*/.vagrant +/doc/.doctrees +/doc/doxyxml +/doc/html +/doc/kresd.8 +/doc/texinfo +/ephemeral_key.pem +/install-sh +/libkres.pc +/libtool +/ltmain.sh +/missing +/modules/dnstap/dnstap.pb-c.d +/pkg +/self.crt +/self.key +/stamp-h1 +/tags +/tests/dnstap/src/dnstap-test/go.sum +/tests/pytests/*/tcproxy +/tests/pytests/*/tlsproxy +/tests/pytests/pytests.*.html +/tests/pytests/*.junit.xml +/tests/test_array +/tests/test_lru +/tests/test_map +/tests/test_module +/tests/test_pack +/tests/test_set +/tests/test_utils +/tests/test_zonecut +/ylwrap +_obj +kresd.amalg.c +libkres.amalg.c +luacov.*.out diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..61e0c92 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,860 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +# vim:foldmethod=marker +variables: + DEBIAN_FRONTEND: noninteractive + LC_ALL: C.UTF-8 + GIT_SUBMODULE_STRATEGY: recursive + GIT_STRATEGY: clone # sometimes unclean submodule dirs otherwise + RESPDIFF_PRIORITY: 5 + DISTROTEST_PRIORITY: 6 + RESPDIFF_COUNT: 1 + RESPDIFF_FORCE: 0 + RESPERF_FORCE: 0 + KNOT_VERSION: '3.1' + LIBKRES_ABI: 9 + LIBKRES_NAME: libkres + MESON_TEST: meson test -C build_ci* -t 4 --print-errorlogs + PREFIX: $CI_PROJECT_DIR/.local + EMAIL: 'ci@nic' + + # IMAGE_TAG is a Git branch/tag name from https://gitlab.nic.cz/knot/knot-resolver-ci + # In general, keep it pointing to a tag - use a branch only for development. + # More info in the knot-resolver-ci repository. + IMAGE_TAG: 'v20240506' + IMAGE_PREFIX: '$CI_REGISTRY/knot/knot-resolver-ci' + +image: $IMAGE_PREFIX/debian12-knot_3_3:$IMAGE_TAG +default: + interruptible: true + tags: + - docker + - linux + - amd64 + +stages: + - build + - sanity + - test + - respdiff + - deploy + - pkgtest + + # https://docs.gitlab.com/ce/ci/jobs/job_control.html#select-different-runner-tags-for-each-parallel-matrix-job +.multi_platform: &multi_platform + parallel: + matrix: + - PLATFORM: [ amd64, arm64 ] + tags: # some will override this part + - ${PLATFORM} + - docker + - linux + +.common: &common + except: + refs: + - master@knot/knot-resolver + - master@knot/security/knot-resolver + - tags + variables: + - $SKIP_CI == "1" + tags: + - docker + - linux + - amd64 + # Tests which decided to skip themselves get orange non-failure. + allow_failure: + exit_codes: + - 77 + +.after_build: &after_build + <<: *common + needs: + - build-stable + before_script: + # meson detects changes and performs useless rebuild; hide the log + - ninja -C build_ci* &>/dev/null + - rm build_ci*/meson-logs/testlog*.txt # start with clean testlog + artifacts: + when: always + # The deckard-specific parts are a little messy, but they're hard to separate in YAML. + paths: + - build_ci*/meson-logs/testlog*.txt + - tmpdeckard* + - build_ci*/meson-logs/integration.deckard.junit.xml + reports: + junit: build_ci*/meson-logs/integration.deckard.junit.xml + +.nodep: &nodep + <<: *common + needs: [] + +# build {{{ +.build: &build + <<: *common + stage: build + artifacts: + when: always + paths: + - .local + - build_ci* + - pkg + reports: + junit: build_ci*/meson-logs/testlog.junit.xml + before_script: + - "echo \"PATH: $PATH\"" + - "echo \"Using Python at: $(which python)\"" + after_script: + - ci/fix-meson-junit.sh build_ci*/meson-logs/testlog.junit.xml + +archive: + <<: *build + except: null + script: + - apkg make-archive + +build-stable: + <<: *build + script: + - meson build_ci_stable --prefix=$PREFIX -Dmalloc=disabled -Dwerror=true -Dextra_tests=enabled + - ninja -C build_ci_stable + - ninja -C build_ci_stable install >/dev/null + - ${MESON_TEST} --suite unit --suite config --suite dnstap --no-suite snowflake + +build-deb11-knot31: + <<: *build + image: $IMAGE_PREFIX/debian11-knot_3_1:$IMAGE_TAG + script: + - meson build_ci_deb11_knot31 --prefix=$PREFIX -Dmalloc=disabled -Dwerror=true -Dextra_tests=enabled + - ninja -C build_ci_deb11_knot31 + - ninja -C build_ci_deb11_knot31 install >/dev/null + - ${MESON_TEST} --suite unit --suite config --suite dnstap --no-suite snowflake + +build-deb11-knot32: + <<: *build + image: $IMAGE_PREFIX/debian11-knot_3_2:$IMAGE_TAG + script: + - meson build_ci_deb11_knot32 --prefix=$PREFIX -Dmalloc=disabled -Dwerror=true -Dextra_tests=enabled + - ninja -C build_ci_deb11_knot32 + - ninja -C build_ci_deb11_knot32 install >/dev/null + - ${MESON_TEST} --suite unit --suite config --suite dnstap --no-suite snowflake + +build-deb12-knot32: + <<: *build + image: $IMAGE_PREFIX/debian12-knot_3_2:$IMAGE_TAG + script: + - meson build_ci_deb12_knot32 --prefix=$PREFIX -Dmalloc=disabled -Dwerror=true -Dextra_tests=enabled + - ninja -C build_ci_deb12_knot32 + - ninja -C build_ci_deb12_knot32 install >/dev/null + - ${MESON_TEST} --suite unit --suite config --suite dnstap --no-suite snowflake + +build-deb12-knot-master: + <<: *build + image: $IMAGE_PREFIX/debian12-knot_master:$IMAGE_TAG + script: + - meson build_ci_deb12_knot_master --prefix=$PREFIX -Dmalloc=disabled -Dwerror=true -Dextra_tests=enabled + - ninja -C build_ci_deb12_knot_master + - ninja -C build_ci_deb12_knot_master install >/dev/null + - ${MESON_TEST} --suite unit --suite config --suite dnstap --no-suite snowflake + allow_failure: true + +build-stable-asan-gcc: + <<: *build + script: + - CFLAGS=-fno-sanitize-recover=all meson build_ci_asan_gcc --prefix=$PREFIX -Dmalloc=jemalloc -Db_sanitize=address,undefined -Dextra_tests=enabled + - ninja -C build_ci_asan_gcc + - ninja -C build_ci_asan_gcc install >/dev/null + - MESON_TESTTHREADS=1 ${MESON_TEST} --suite unit --suite dnstap --no-suite skip_asan --no-suite snowflake + - MESON_TESTTHREADS=1 ASAN_OPTIONS=detect_leaks=0 ${MESON_TEST} --suite config --no-suite skip_asan --no-suite snowflake + + +# TODO: Clang sanitizer seems to be broken in the current version of Debian. Use +# GCC above and maybe re-enable the Clang one once we update at some point. + +#build-stable-asan-clang: +# <<: *build +# script: +# # issues with UBSan and ASan in CI: +# # - `ahocorasick.so` causes C++ problems +# # - `--default-library=shared` causes link problems +# - CC=clang CXX=clang++ CFLAGS=-fno-sanitize-recover=all CXXFLAGS=-fno-sanitize=undefined meson build_ci_asan_clang --default-library=static --prefix=$PREFIX -Dmalloc=jemalloc -Db_sanitize=address,undefined -Dextra_tests=enabled +# - ninja -C build_ci_asan_clang +# - ninja -C build_ci_asan_clang install >/dev/null +# # TODO _leaks: not sure what exactly is wrong in leak detection on config tests +# # TODO skip_asan: all three of these disappear locally when using gcc 9.1 (except some leaks) +# - MESON_TESTTHREADS=1 ASAN_OPTIONS=detect_leaks=0 ${MESON_TEST} --suite unit --suite config --suite dnstap --no-suite skip_asan --no-suite snowflake + +build:macOS: + <<: *nodep + image: python:3-alpine + only: + refs: + - branches@knot/knot-resolver + stage: build + when: delayed + start_in: 3 minutes # allow some time for mirroring, job creation + script: + - pip3 install -U requests + - python3 ./ci/gh_actions.py ${CI_COMMIT_REF_NAME} ${CI_COMMIT_SHA} + +docker: + <<: *nodep + stage: build + image: docker:latest + <<: *multi_platform + only: + refs: + - branches@knot/knot-resolver + tags: + - ${PLATFORM} + - dind + variables: + DOCKER_IMAGE_NAME: knot-resolver-test:${CI_COMMIT_SHA} + script: + - docker build --no-cache -t ${DOCKER_IMAGE_NAME} . + - echo "quit()" | docker run -i ${DOCKER_IMAGE_NAME} + after_script: # remove dangling images to avoid running out of disk space + - docker rmi ${DOCKER_IMAGE_NAME} + - docker rmi $(docker images -f "dangling=true" -q) +# }}} + +# sanity {{{ +.sanity: &sanity + <<: *nodep + stage: sanity + +authors: + <<: *sanity + only: + refs: + - /^release.*$/ + script: + - LC_ALL=en_US.UTF-8 scripts/update-authors.sh + +news: + <<: *sanity + only: + refs: + - /^release.*$/ + script: + - head -n 1 NEWS | grep -q $(date +%Y-%m-%d) + +trivial_checks: # aggregated to save some processing + <<: *sanity + script: + - ci/no_assert_check.sh + - ci/deckard_commit_check.sh + +lint:other: + <<: *sanity + script: + - meson build_ci_lint &>/dev/null + - ninja -C build_ci* pylint + - ninja -C build_ci* flake8 + - ninja -C build_ci* luacheck + +lint:pedantic: + <<: *after_build + stage: sanity + script: + - meson build_pedantic_gcc -Dwerror=true -Dc_args='-Wpedantic' -Dextra_tests=enabled + - ninja -C build_pedantic_gcc + - > + CC=clang CXX=clang++ meson build_pedantic_clang -Dwerror=true -Dextra_tests=enabled -Dc_args=' + -Wpedantic -Wno-newline-eof -Wno-gnu-zero-variadic-macro-arguments -Wno-gnu-folding-constant' + - ninja -C build_pedantic_clang + +lint:tidy: + <<: *after_build + stage: sanity + script: + - ninja -C build_ci* tidy + +# Coverity reference: https://www.synopsys.com/blogs/software-security/integrating-coverity-scan-with-gitlab-ci/ +lint:coverity: + <<: *sanity + image: $IMAGE_PREFIX/coverity:$IMAGE_TAG + only: + refs: + - nightly@knot/knot-resolver + - coverity@knot/knot-resolver + script: + - meson build_ci_cov --prefix=$PREFIX + - /opt/cov-analysis/bin/cov-build --dir cov-int ninja -C build_ci_cov + - tar cfz cov-int.tar.gz cov-int + - curl https://scan.coverity.com/builds?project=$COVERITY_SCAN_PROJECT_NAME + --form token=$COVERITY_SCAN_TOKEN --form email="knot-resolver@labs.nic.cz" + --form file=@cov-int.tar.gz --form version="`git describe --tags`" + --form description="`git describe --tags` / $CI_COMMIT_TITLE / $CI_COMMIT_REF_NAME:$CI_PIPELINE_ID" + --fail-with-body + +.kres-gen: &kres-gen + <<: *sanity + script: + - meson build_ci_lib --prefix=$PREFIX -Dkres_gen_test=false + - ninja -C build_ci_lib daemon/kresd + - ninja -C build_ci_lib kres-gen + - git diff --quiet || (git diff; exit 1) +kres-gen-31: + <<: *kres-gen + image: $IMAGE_PREFIX/debian11-knot_3_1:$IMAGE_TAG +kres-gen-32: + <<: *kres-gen + image: $IMAGE_PREFIX/debian12-knot_3_2:$IMAGE_TAG + +root.hints: + <<: *sanity + only: + refs: + - /^release.*$/ + script: + - scripts/update-root-hints.sh + +ci-image-is-tag: + <<: *sanity + image: alpine:3 + variables: + GIT_STRATEGY: none + script: + - apk add git + - ( + git ls-remote --tags --exit-code + https://gitlab.nic.cz/knot/knot-resolver-ci.git + refs/tags/$IMAGE_TAG + && echo "Everything is OK!" + ) + || (echo "'$IMAGE_TAG' is not a tag (probably a branch). Make sure to set it to a tag in production!"; exit 2) +# }}} + +# test {{{ +.test_flaky: &test_flaky + <<: *after_build + stage: test + retry: + max: 1 + when: + - script_failure + +deckard: + <<: *test_flaky + # Deckard won't work with jemalloc due to a faketime bug: + # https://github.com/wolfcw/libfaketime/issues/130 + only: # trigger job only in repos under our control (privileged runner required) + - branches@knot/knot-resolver + - branches@knot/security/knot-resolver + tags: + - privileged + - amd64 + variables: + TMPDIR: $CI_PROJECT_DIR + script: + - ${MESON_TEST} --suite integration + +respdiff:basic: + <<: *after_build + stage: test + needs: + - build-stable-asan-gcc + script: + - ulimit -n "$(ulimit -Hn)" # applies only for kresd ATM + - ./ci/respdiff/start-resolvers.sh + - ./ci/respdiff/run-respdiff-tests.sh udp + - $PREFIX/sbin/kres-cache-gc -c . -u 0 # simple GC sanity check + - cat results/respdiff.txt + - echo 'test if mismatch rate < 1.0 %' + - grep -q '^target disagrees.*0\.[0-9][0-9] %' results/respdiff.txt + after_script: + - killall --wait kresd + artifacts: + when: always + paths: + - kresd.log* + - results/*.txt + - results/*.png + - results/respdiff.db/data.mdb* + - ./*.info + +test:valgrind: + <<: *test_flaky + script: + - ${MESON_TEST} --suite unit --suite config --no-suite snowflake --wrap="valgrind --leak-check=full --trace-children=yes --quiet --suppressions=/lj.supp" + - MESON_TESTTHREADS=1 ${MESON_TEST} --wrap="valgrind --leak-check=full --trace-children=yes --quiet --suppressions=/lj.supp" --suite snowflake + +pkgtest: + stage: test + trigger: + include: ci/pkgtest.yaml + strategy: depend + needs: + - build-stable + variables: # https://gitlab.nic.cz/help/ci/yaml/README.md#artifact-downloads-to-child-pipelines + PARENT_PIPELINE_ID: $CI_PIPELINE_ID + except: + refs: + - master@knot/knot-resolver + - master@knot/security/knot-resolver + - tags + variables: + - $SKIP_CI == "1" + +pytests: + <<: *test_flaky + needs: + - build-stable-asan-gcc + artifacts: + when: always + paths: + - build_ci*/meson-logs/testlog*.txt + - tests/pytests/*.html + - tests/pytests/*.junit.xml + reports: # Can't have multiple junit XMLs? + junit: tests/pytests/pytests.parallel.junit.xml + script: + - ${MESON_TEST} --suite pytests +# }}} + +# respdiff {{{ +.condor: &condor + <<: *common + tags: + - condor + needs: [] + only: # trigger job only in repos under our control + - branches@knot/knot-resolver + - branches@knot/security/knot-resolver + # The set of respdiff+resperf jobs takes over two hours to execute. + when: manual + +.respdiff: &respdiff + <<: *condor + stage: respdiff + script: + - git diff-index --name-only origin/master | grep -qEv '^(AUTHORS|ci/|config.mk|COPYING|distro/|doc/|etc/|NEWS|README.md|scripts/|tests/|\.gitignore|\.gitlab-ci\.yml|\.travis\.yml)' || test $RESPDIFF_FORCE -gt 0 || exit 77 + - test ! -f /var/tmp/respdiff-jobs/buffer/buffer_$RESPDIFF_TEST_stats.json || test $RESPDIFF_FORCE -gt 0 || ( echo "Reference unstable, try again in ~3h or use RESPDIFF_FORCE=1."; exit 1 ) + - export LABEL=gl$(date +%s) + - export COMMITDIR="/var/tmp/respdiff-jobs/$(git rev-parse --short HEAD)-$LABEL" + - export TESTDIR="$COMMITDIR/$RESPDIFF_TEST" + - ln -s $COMMITDIR respdiff_commitdir + - > + sudo -u respdiff /var/opt/respdiff/contrib/job_manager/submit.py -w + -p $RESPDIFF_PRIORITY + -c $RESPDIFF_COUNT + $(sudo -u respdiff /var/opt/respdiff/contrib/job_manager/create.py + "$(git rev-parse --short HEAD)" -l $LABEL -t $RESPDIFF_TEST --knot-branch=$KNOT_VERSION + --respdiff-stats /var/tmp/respdiff-jobs/ref_current/*_${RESPDIFF_TEST}_stats.json) + - for f in $TESTDIR/*.json; do test -s "$f" || (cat $TESTDIR/*stderr*; cat $TESTDIR/j*_docker.txt; exit 1); done + - sudo -u respdiff /var/opt/respdiff/contrib/job_manager/plot_ref.sh $TESTDIR/.. /var/tmp/respdiff-jobs/ref_current $RESPDIFF_TEST + after_script: + - 'cp -t . respdiff_commitdir/$RESPDIFF_TEST/j* ||:' + - 'cp -t . respdiff_commitdir/*$RESPDIFF_TEST*.png ||:' + - 'cat respdiff_commitdir/$RESPDIFF_TEST/*histogram.tar.gz | tar -xf - -i ||:' + artifacts: + when: always + expire_in: 1 week + paths: + - ./j* + - ./*.png + - ./*histogram/* + +fwd-tls6-kresd.udp6: + <<: *respdiff + variables: + RESPDIFF_TEST: shortlist.fwd-tls6-kresd.udp6 + +fwd-udp6-kresd.udp6: + <<: *respdiff + variables: + RESPDIFF_TEST: shortlist.fwd-udp6-kresd.udp6 + +iter.udp6: + <<: *respdiff + variables: + RESPDIFF_TEST: shortlist.iter.udp6 + +iter.tls6: + <<: *respdiff + variables: + RESPDIFF_TEST: shortlist.iter.tls6 + +fwd-udp6-unbound.udp6: + <<: *respdiff + variables: + RESPDIFF_TEST: shortlist.fwd-udp6-unbound.udp6 + +fwd-udp6-unbound.tcp6: + <<: *respdiff + variables: + RESPDIFF_TEST: shortlist.fwd-udp6-unbound.tcp6 + +fwd-udp6-unbound.tls6: + <<: *respdiff + variables: + RESPDIFF_TEST: shortlist.fwd-udp6-unbound.tls6 + +.resperf: &resperf + <<: *condor + stage: respdiff + script: + - git diff-index --name-only origin/master | grep -qEv '^(AUTHORS|ci/|config.mk|COPYING|distro/|doc/|etc/|NEWS|README.md|scripts/|tests/|\.gitignore|\.gitlab-ci\.yml|\.travis\.yml)' || test $RESPERF_FORCE -gt 0 || exit 77 + - export LABEL=gl$(date +%s) + - export COMMITDIR="/var/tmp/respdiff-jobs/$(git rev-parse --short HEAD)-$LABEL" + - export TESTDIR="$COMMITDIR/$RESPERF_TEST" + - ln -s $COMMITDIR resperf_commitdir + - > + sudo -u respdiff /var/opt/respdiff/contrib/job_manager/submit.py -w + $(sudo -u respdiff /var/opt/respdiff/contrib/job_manager/create.py + "$(git rev-parse --short HEAD)" -l $LABEL --asan -t $RESPERF_TEST --knot-branch=$KNOT_VERSION) + - export EXITCODE=$(cat $TESTDIR/j*_exitcode) + - if [[ "$EXITCODE" == "0" ]]; then cat $TESTDIR/j*_resperf.txt; else cat $TESTDIR/j*_docker.txt; fi + - exit $EXITCODE + after_script: + - 'cp -t . resperf_commitdir/$RESPERF_TEST/j* ||:' + artifacts: + when: always + expire_in: 1 week + paths: + - ./j* + +rp:fwd-tls6.udp-asan: + <<: *resperf + variables: + RESPERF_TEST: resperf.fwd-tls6.udp + +rp:fwd-udp6.udp-asan: + <<: *resperf + variables: + RESPERF_TEST: resperf.fwd-udp6.udp + +rp:iter.udp-asan: + <<: *resperf + variables: + RESPERF_TEST: resperf.iter.udp +# }}} + +# deploy {{{ +# copy snapshot of current master to nightly branch for further processing +# (this is workaround for missing complex conditions for job limits in Gitlab) +nightly:copy: + stage: deploy + needs: [] + only: + variables: + - $CREATE_NIGHTLY == "1" + refs: + - master@knot/knot-resolver + script: + - 'tmp_file=$(mktemp)' + # delete nightly branch + - 'STATUS=$(curl --request PUT --header "PRIVATE-TOKEN: $GITLAB_API_TOKEN" -s -o ${tmp_file} -w "%{http_code}" "https://gitlab.nic.cz/api/v4/projects/147/repository/branches/nightly/unprotect")' + - '[ "x${STATUS}" == "x200" ] || { cat ${tmp_file}; rm ${tmp_file}; exit 1; }' + # no output from DELETE command + - 'STATUS=$(curl --request DELETE --header "PRIVATE-TOKEN: $GITLAB_API_TOKEN" -s -o ${tmp_file} -w "%{http_code}" "https://gitlab.nic.cz/api/v4/projects/147/repository/branches/nightly")' + # recreate nightly branch from current master + - 'STATUS=$(curl --request POST --header "PRIVATE-TOKEN: $GITLAB_API_TOKEN" -s -o ${tmp_file} -w "%{http_code}" "https://gitlab.nic.cz/api/v4/projects/147/repository/branches?branch=nightly&ref=master")' + - '[ "x${STATUS}" == "x201" ] || { cat ${tmp_file}; rm ${tmp_file}; exit 1; }' + - 'STATUS=$(curl --request PUT --header "PRIVATE-TOKEN: $GITLAB_API_TOKEN" -s -o ${tmp_file} -w "%{http_code}" "https://gitlab.nic.cz/api/v4/projects/147/repository/branches/nightly/protect")' + - '[ "x${STATUS}" == "x200" ] || { cat ${tmp_file}; rm ${tmp_file}; exit 1; }' + - 'rm ${tmp_file}' + +obs:trigger: &obs_trigger + stage: deploy + only: + variables: + - $OBS_REPO + dependencies: # wait for previous stages to finish + - archive + environment: + name: OBS/$OBS_REPO + url: https://build.opensuse.org/package/show/home:CZ-NIC:$OBS_REPO/knot-resolver + tags: + - condor + allow_failure: false # required to make when: manual action blocking + script: + - python3 -m venv ./venv + - source ./venv/bin/activate + - pip install --upgrade pip + - pip install apkg + - scripts/make-obs.sh + - echo y | scripts/build-in-obs.sh $OBS_REPO + +obs:release: + <<: *obs_trigger + only: + - tags + variables: + OBS_REPO: knot-resolver-latest + when: manual + +obs:odvr: + <<: *obs_trigger + stage: pkgtest # last stage to ensure it doesn't block anything + only: + - tags + variables: + OBS_REPO: knot-resolver-odvr + when: manual +# }}} + +# pkgtest {{{ +.deploytest: &deploytest + stage: pkgtest + only: + variables: + - $OBS_REPO =~ /^knot-resolver-devel|knot-dns-devel|knot-resolver-testing$/ + - $CI_COMMIT_TAG + dependencies: [] # wait for previous stages to finish + variables: + OBS_REPO: knot-resolver-latest + when: delayed + start_in: 3 minutes # give OBS build some time + tags: + - condor + +obs:build:all: + <<: *deploytest + only: + variables: + - $OBS_REPO =~ /^knot-resolver-devel|knot-dns-devel|knot-resolver-testing|knot-resolver-odvr$/ + - $CI_COMMIT_TAG + allow_failure: true + script: + - "osc results home:CZ-NIC:$OBS_REPO knot-resolver -w" + - version=$(sed 's/^v//' <(git describe --exact-match HEAD || git rev-parse --short HEAD) ) + - > # check version only for one (reliable) repo to avoid false negatives + ! osc ls -b home:CZ-NIC:$OBS_REPO knot-resolver Debian_9.0 x86_64 | \ + grep -E '(rpm|deb|tar\.xz)$' | grep -v $version || \ + (echo "ERROR: version mismatch"; exit 1) + - > + ! osc results home:CZ-NIC:$OBS_REPO knot-resolver --csv | \ + grep -Ev 'disabled|excluded|Rawhide|CentOS_8_EPEL' | grep -v 'succeeded' -q || \ + (echo "ERROR: build(s) failed"; exit 1) + +.distrotest: &distrotest + <<: *deploytest + # Description of the distrotest script workflow: + # 1. wait for OBS package build to complete + # 2. check the OBS build suceeded + # 3. set up some variables, dir names etc. + # 4. create a symlink with predictable name to export artifacts afterwards + # 5. create an HTCondor job and submit it to a HTCondor cluster + # 6. check exit code from condor, optionally display one of the logs and end the job with same exit code + script: + - "osc results home:CZ-NIC:$OBS_REPO knot-resolver -a x86_64 -r $DISTROTEST_REPO -w" + - > + osc results home:CZ-NIC:$OBS_REPO knot-resolver -a x86_64 -r $DISTROTEST_REPO --csv | grep 'succeeded|$' -q || \ + (echo "ERROR: build failed"; exit 1) + - export LABEL="gl$(date +%s)_$OBS_REPO" + - export COMMITDIR="/var/tmp/respdiff-jobs/$(git rev-parse --short HEAD)-$LABEL" + - export TESTDIR="$COMMITDIR/distrotest.$DISTROTEST_NAME" + - ln -s $COMMITDIR distrotest_commitdir + - sudo -u respdiff /var/opt/respdiff/contrib/job_manager/submit.py -w + -p $DISTROTEST_PRIORITY + $(sudo -u respdiff /var/opt/respdiff/contrib/job_manager/create.py + "$(git rev-parse --short HEAD)" -l $LABEL -t distrotest.$DISTROTEST_NAME + --obs-repo $OBS_REPO) + - export EXITCODE=$(cat $TESTDIR/j*_exitcode) + - if [[ "$EXITCODE" != "0" ]]; then cat $TESTDIR/j*_{vagrant.log.txt,stdout.txt}; fi + - exit $EXITCODE + after_script: + - 'cp -t . distrotest_commitdir/distrotest.$DISTROTEST_NAME/j* ||:' + artifacts: + when: always + expire_in: 1 week + paths: + - ./j* + retry: + max: 1 + when: + - script_failure + +obs:rocky8:x86_64: + <<: *distrotest + allow_failure: true + variables: + OBS_REPO: knot-resolver-latest + DISTROTEST_NAME: rocky8 + DISTROTEST_REPO: CentOS_8_EPEL + +obs:debian10:x86_64: + <<: *distrotest + only: + variables: + - $OBS_REPO =~ /^knot-resolver-devel|knot-dns-devel|knot-resolver-testing|knot-resolver-odvr$/ + - $CI_COMMIT_TAG + variables: + OBS_REPO: knot-resolver-latest + DISTROTEST_NAME: debian10 + DISTROTEST_REPO: Debian_10 + +obs:debian11:x86_64: + <<: *distrotest + only: + variables: + - $OBS_REPO =~ /^knot-resolver-devel|knot-dns-devel|knot-resolver-testing|knot-resolver-odvr$/ + - $CI_COMMIT_TAG + variables: + OBS_REPO: knot-resolver-latest + DISTROTEST_NAME: debian11 + DISTROTEST_REPO: Debian_11 + +obs:fedora35:x86_64: + <<: *distrotest + allow_failure: true + variables: + OBS_REPO: knot-resolver-latest + DISTROTEST_NAME: fedora35 + DISTROTEST_REPO: Fedora_35 + +obs:fedora36:x86_64: + <<: *distrotest + allow_failure: true + variables: + OBS_REPO: knot-resolver-latest + DISTROTEST_NAME: fedora36 + DISTROTEST_REPO: Fedora_36 + +obs:leap15:x86_64: + <<: *distrotest + allow_failure: true + variables: + OBS_REPO: knot-resolver-latest + DISTROTEST_NAME: leap15 + DISTROTEST_REPO: openSUSE_Leap_15.4 + +obs:ubuntu1804:x86_64: + <<: *distrotest + variables: + OBS_REPO: knot-resolver-latest + DISTROTEST_NAME: ubuntu1804 + DISTROTEST_REPO: xUbuntu_18.04 + +obs:ubuntu2004:x86_64: + <<: *distrotest + only: + variables: + - $OBS_REPO =~ /^knot-resolver-devel|knot-dns-devel|knot-resolver-testing|knot-resolver-odvr$/ + - $CI_COMMIT_TAG + variables: + OBS_REPO: knot-resolver-latest + DISTROTEST_NAME: ubuntu2004 + DISTROTEST_REPO: xUbuntu_20.04 + +obs:ubuntu2204:x86_64: + <<: *distrotest + allow_failure: true + variables: + OBS_REPO: knot-resolver-latest + DISTROTEST_NAME: ubuntu2204 + DISTROTEST_REPO: xUbuntu_22.04 + +.packagingtest: &packagingtest + stage: pkgtest + only: + refs: + - nightly@knot/knot-resolver + needs: [] + tags: + - dind + - amd64 + variables: + DISTRO: debian_10 + script: + - pytest -r fEsxX tests/packaging -k $DISTRO + +packaging:centos_8: + <<: *packagingtest + variables: + DISTRO: centos_8 + +packaging:centos_7: + <<: *packagingtest + variables: + DISTRO: centos_7 + +packaging:fedora_31: + <<: *packagingtest + variables: + DISTRO: fedora_31 + +packaging:fedora_32: + <<: *packagingtest + variables: + DISTRO: fedora_32 + +# }}} + +# docs: {{{ + +docs:build: + stage: deploy + needs: [] + script: + - git submodule update --init --recursive + - pip3 install -U -r doc/requirements.txt + - pip3 install -U sphinx_rtd_theme + - meson build_doc -Ddoc=enabled + - ninja -C build_doc doc + artifacts: + paths: + - doc/html + +# This job deploys the Knot Resolver documentation into a development +# environment, which may be found at +# <https://gitlab.nic.cz/knot/knot-resolver/-/environments/folders/docs-develop>. +# The actual URL is found in the `environment.url` property, where +# $CI_PROJECT_NAMESPACE will be "knot" on the upstream GitLab. +docs:develop: + stage: deploy + needs: + - docs:build + except: + refs: + - tags + script: + - echo "Propagating artifacts into develop environment" + artifacts: + paths: + - doc/html + environment: + name: docs-develop/$CI_COMMIT_REF_NAME + url: https://$CI_PROJECT_NAMESPACE.pages.nic.cz/-/knot-resolver/-/jobs/$CI_JOB_ID/artifacts/doc/html/index.html + +# This job deploys the Knot Resolver documentation into a release environment, +# which may be found at +# <https://gitlab.nic.cz/knot/knot-resolver/-/environments/folders/docs-release>. +# The actual URL is found in the `environment.url` property, where +# $CI_PROJECT_NAMESPACE will be "knot" on the upstream GitLab. +# The job requires the `DOCS_ENV_NAME` variable to be set by the user. +docs:release: + stage: deploy + needs: + - docs:build + only: + refs: + - tags + script: echo "Propagating artifacts into release environment" + artifacts: + paths: + - doc/html + environment: + name: docs-release/$CI_COMMIT_TAG + url: https://$CI_PROJECT_NAMESPACE.pages.nic.cz/-/knot-resolver/-/jobs/$CI_JOB_ID/artifacts/doc/html/index.html + +# This job pushes the Knot Resolver documentation into a new branch of the +# `websites/knot-resolver.cz` repository. +docs:website: + stage: deploy + needs: + - docs:build + when: manual + variables: + script: + - "SRC_COMMIT_REF=\"$CI_COMMIT_TAG$CI_COMMIT_BRANCH$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME\"" + - "git clone \"https://gitlab-ci-token:$WEBSITE_DOCS_CI_TOKEN@$CI_SERVER_HOST:$CI_SERVER_PORT/websites/knot-resolver.cz.git\" website" + - "cp --recursive --verbose \"doc/html\" \"website/content/documentation/$SRC_COMMIT_REF\"" + - cd website + - "git checkout -b \"docs/$SRC_COMMIT_REF\"" + - "git add \"content/documentation/$SRC_COMMIT_REF\"" + - "git commit -m \"docs: $SRC_COMMIT_REF\"" + - "git push --force --set-upstream origin \"docs/$SRC_COMMIT_REF\"" + +# }}} diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..aebacbe --- /dev/null +++ b/.gitmodules @@ -0,0 +1,9 @@ +[submodule "tests/integration/deckard"] + path = tests/integration/deckard + url = https://gitlab.nic.cz/knot/deckard.git +[submodule "modules/policy/lua-aho-corasick"] + path = modules/policy/lua-aho-corasick + url = https://gitlab.nic.cz/knot/3rdparty/lua-aho-corasick.git +[submodule "tests/config/tapered"] + path = tests/config/tapered + url = https://gitlab.nic.cz/knot/3rdparty/lua-tapered.git @@ -1,3 +1,16 @@ +Knot Resolver 5.7.3 (2024-05-30) +================================ + +Improvements +------------ +- stats: add separate metrics for IPv6 and IPv4 (!1544) + +Bugfixes +-------- +- fix NSEC3 records missing in answer for positive wildcard expansion + with the NSEC3 having over-limit iteration count (#910, !1550) + + Knot Resolver 5.7.2 (2024-03-27) ================================ diff --git a/ci/images/README.md b/ci/images/README.md deleted file mode 100644 index 3d09f60..0000000 --- a/ci/images/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Container images for CI - -## Image purpose - -### debian-11 - -The main image used by shared runners to execute most CI builds and tests. - -### debian-11-coverity - -A stripped down version of `debian-11`. It only contains build (not test) -dependencies of `kresd`. It also contains the `cov-build` tool for generating -inputs for [Coverity Scan](https://scan.coverity.com/). - -It is used by the `coverity` CI job to generate and send data to Coverity Scan -for analysis. - -To build this image, you need to retrieve the Coverity Scan token from the -dashboard and pass it to the `build.sh` script using the `COVERITY_SCAN_TOKEN` -environment variable, e.g.: - -``` -$ COVERITY_SCAN_TOKEN=the_secret_token ./build.sh debian-11-coverity -``` - -### debian-buster (10) - -Used to serve the same purpose as `debian-11`. As of 2022-03-09, it is still -used by some jobs (linters). - -## Maintenance - -The `ci/images/` directory contains utility scripts to build, push or update -the container images. - -``` -$ ./build.sh debian-11 # builds a debian-11 image locally -$ ./push.sh debian-11 # pushes the local image into target registry -$ ./update.sh debian-11 # utility wrapper that both builds and pushes the image -$ ./update.sh */ # use shell expansion of dirnames to update all images -``` - -By default, a branch of Knot DNS deemed to be stable is selected according to -the `vars.sh` file. To build an image for a different Knot DNS branch, set the -`KNOT_BRANCH` environment variable to the name of the branch, e.g.: - -``` -$ KNOT_BRANCH='3.2' ./update.sh debian-11 -``` diff --git a/ci/images/build.sh b/ci/images/build.sh deleted file mode 100755 index 1e9eabb..0000000 --- a/ci/images/build.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# build specified docker image - -CURRENT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source "${CURRENT_DIR}"/vars.sh "$@" -set -ex - -if [ -n "$COVERITY_SCAN_TOKEN" ]; then - SECRETS="$SECRETS --secret id=coverity-token,env=COVERITY_SCAN_TOKEN" -fi - -DOCKERFILE="$(realpath "${IMAGE}")/Dockerfile" - -cd "$CURRENT_DIR/../.." -export DOCKER_BUILDKIT=1 # Enables using secrets in docker-build -docker build \ - --pull \ - --no-cache \ - --tag "${FULL_NAME}" \ - --file "${DOCKERFILE}" \ - . \ - --build-arg KNOT_BRANCH=${KNOT_BRANCH} \ - $SECRETS diff --git a/ci/images/debian-11-coverity/Dockerfile b/ci/images/debian-11-coverity/Dockerfile deleted file mode 100644 index 1915614..0000000 --- a/ci/images/debian-11-coverity/Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -FROM debian:bullseye -MAINTAINER Knot Resolver <knot-resolver@labs.nic.cz> -# >= 3.0 needed because of --enable-xdp=yes -ARG KNOT_BRANCH=3.1 -ARG COVERITY_SCAN_PROJECT_NAME=CZ-NIC/knot-resolver -ENV DEBIAN_FRONTEND=noninteractive - -WORKDIR /root -CMD ["/bin/bash"] - -# generic cleanup -RUN apt-get update -qq - -# Knot and Knot Resolver dependencies -RUN apt-get install -y -qqq git make cmake pkg-config meson \ - build-essential bsdmainutils libtool autoconf libcmocka-dev \ - liburcu-dev libgnutls28-dev libedit-dev liblmdb-dev libcap-ng-dev libsystemd-dev \ - libelf-dev libmnl-dev libidn11-dev libuv1-dev \ - libluajit-5.1-dev lua-http libssl-dev libnghttp2-dev - -# LuaJIT binary for stand-alone scripting -RUN apt-get install -y -qqq luajit - -# build and install latest version of Knot DNS -RUN git clone --depth=1 --branch=$KNOT_BRANCH https://gitlab.nic.cz/knot/knot-dns.git /tmp/knot -WORKDIR /tmp/knot -RUN pwd -RUN autoreconf -if -RUN ./configure --prefix=/usr --enable-xdp=yes -RUN CFLAGS="-g" make -RUN make install -RUN ldconfig - -# curl and tar (for downloading Coverity tools and uploading logs) -RUN apt-get install -y curl tar - -RUN --mount=type=secret,id=coverity-token \ - curl -o /tmp/cov-analysis-linux64.tar.gz https://scan.coverity.com/download/cxx/linux64 \ - --form project=$COVERITY_SCAN_PROJECT_NAME --form token=$(cat /run/secrets/coverity-token) -RUN tar xfz /tmp/cov-analysis-linux64.tar.gz -RUN mv cov-analysis-linux64-* /opt/cov-analysis diff --git a/ci/images/debian-11/Dockerfile b/ci/images/debian-11/Dockerfile deleted file mode 100644 index 0241a6d..0000000 --- a/ci/images/debian-11/Dockerfile +++ /dev/null @@ -1,146 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -FROM debian:bullseye -MAINTAINER Knot Resolver <knot-resolver@labs.nic.cz> -# >= 3.0 needed because of --enable-xdp=yes -ARG KNOT_BRANCH=3.1 -ENV DEBIAN_FRONTEND=noninteractive - -WORKDIR /root -CMD ["/bin/bash"] - -# generic cleanup -RUN apt-get update -qq - -# Knot and Knot Resolver dependencies -RUN apt-get install -y -qqq git make cmake pkg-config meson \ - build-essential bsdmainutils libtool autoconf libcmocka-dev \ - liburcu-dev libgnutls28-dev libedit-dev liblmdb-dev libcap-ng-dev libsystemd-dev \ - libelf-dev libmnl-dev libidn11-dev libuv1-dev libjemalloc-dev \ - libluajit-5.1-dev lua-http libssl-dev libnghttp2-dev - -# Build and testing deps for Resolver's dnstap module (go stuff is just for testing) -RUN apt-get install -y -qqq \ - protobuf-c-compiler libprotobuf-c-dev libfstrm-dev \ - golang-any -COPY ./tests/dnstap /root/tests/dnstap -WORKDIR /root/tests/dnstap/src/dnstap-test -RUN go get . -WORKDIR /root - -# documentation dependencies -RUN apt-get install -y -qqq doxygen python3-sphinx python3-breathe python3-sphinx-rtd-theme - -# Python packages required for Deckard CI -# Python: grab latest versions from PyPi -# (Augeas binding in Debian packages are slow and buggy) -RUN apt-get install -y -qqq python3-pip wget augeas-tools -RUN pip3 install --upgrade pip -RUN pip3 install pylint -RUN pip3 install pep8 -# FIXME replace with dnspython >= 2.2.0 once released -RUN pip3 install git+https://github.com/bwelling/dnspython.git@72348d4698a8f8b209fbdf9e72738904ad31b930 -# tests/pytest dependencies: skip over broken versions -RUN pip3 install jinja2 'pytest != 6.0.0' pytest-html pytest-xdist pytest-forked -# apkg for packaging -RUN pip3 install apkg - -# packet capture tools for Deckard -RUN apt-get install --no-install-suggests --no-install-recommends -y -qqq tcpdump wireshark-common - -# Faketime for Deckard -RUN apt-get install -y -qqq faketime - -# C dependencies for python-augeas -RUN apt-get install -y -qqq libaugeas-dev libffi-dev -# Python dependencies for Deckard -RUN wget https://gitlab.nic.cz/knot/deckard/raw/master/requirements.txt -O /tmp/deckard-req.txt -RUN pip3 install -r /tmp/deckard-req.txt - -# build and install latest version of Knot DNS -RUN git clone --depth=1 --branch=$KNOT_BRANCH https://gitlab.nic.cz/knot/knot-dns.git /tmp/knot -WORKDIR /tmp/knot -RUN pwd -RUN autoreconf -if -RUN ./configure --prefix=/usr --enable-xdp=yes -RUN CFLAGS="-g" make -RUN make install -RUN ldconfig - -# Valgrind for kresd CI -RUN apt-get install valgrind -y -qqq -RUN wget https://github.com/LuaJIT/LuaJIT/raw/v2.1.0-beta3/src/lj.supp -O /lj.supp -# TODO: rebuild LuaJIT with Valgrind support - -# Lua lint for kresd CI -RUN apt-get install luarocks -y -qqq -RUN luarocks --lua-version 5.1 install luacheck - -# respdiff for kresd CI -RUN apt-get install lmdb-utils -y -qqq -RUN git clone --depth=1 https://gitlab.nic.cz/knot/respdiff /var/opt/respdiff -RUN pip3 install -r /var/opt/respdiff/requirements.txt - -# Python static analysis for respdiff -RUN pip3 install mypy -RUN pip3 install flake8 - -# Python requests for CI scripts -RUN pip3 install requests - -# docker-py for packaging tests -RUN pip3 install docker - -# Unbound for respdiff -RUN apt-get install unbound unbound-anchor -y -qqq -RUN printf "server:\n interface: 127.0.0.1@53535\n use-syslog: yes\n do-ip6: no\nremote-control:\n control-enable: no\n" >> /etc/unbound/unbound.conf - -# BIND for respdiff -RUN apt-get install bind9 -y -qqq -RUN printf '\nOPTIONS="-4 $OPTIONS"' >> /etc/default/bind9 -RUN printf 'options {\n directory "/var/cache/bind";\n listen-on port 53533 { 127.0.0.1; };\n listen-on-v6 port 53533 { ::1; };\n};\n' > /etc/bind/named.conf.options - -# PowerDNS Recursor for Deckard CI -RUN apt-get install pdns-recursor -y -qqq - -# dnsdist for Deckard CI -RUN apt-get install dnsdist -y -qqq - -# code coverage -RUN apt-get install -y -qqq lcov -RUN luarocks --lua-version 5.1 install luacov - -# LuaJIT binary for stand-alone scripting -RUN apt-get install -y -qqq luajit - -# clang for kresd CI, version updated as debian updates it -RUN apt-get install -y -qqq clang clang-tools clang-tidy - -# OpenBuildService CLI tool -RUN apt-get install -y osc - -# curl (API) -RUN apt-get install -y curl - -# configure knot-resolver-testing OBS repo for dependencies missing in Debian -RUN echo 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-testing/Debian_11/ /' > /etc/apt/sources.list.d/knot-resolver-testing.list -RUN wget -nv https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-testing/Debian_11/Release.key -O Release.key -RUN APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 apt-key add Release.key -RUN rm Release.key -RUN apt-get update -qq - -# packages from our knot-resolver-testing repo -RUN apt-get update -RUN apt-get install -y -qqq lua-psl - -# en_US.UTF-8 locale for scripts.update-authors.sh -RUN apt-get install -y -qqq locales -RUN sed -i "/en_US.UTF-8/ s/^#\(.*\)/\1/" /etc/locale.gen -RUN locale-gen - -# SonarCloud scanner -RUN wget -O /var/opt/wrapper.zip https://sonarcloud.io/static/cpp/build-wrapper-linux-x86.zip -RUN wget -O /var/opt/scanner.zip https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-5.0.1.3006-linux.zip -RUN unzip -d /var/opt /var/opt/wrapper.zip -RUN unzip -d /var/opt /var/opt/scanner.zip -ENV PATH "$PATH:/var/opt/build-wrapper-linux-x86:/var/opt/sonar-scanner-5.0.1.3006-linux/bin" diff --git a/ci/images/debian-buster/Dockerfile b/ci/images/debian-buster/Dockerfile deleted file mode 100644 index 39f4327..0000000 --- a/ci/images/debian-buster/Dockerfile +++ /dev/null @@ -1,146 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -FROM debian:buster -MAINTAINER Knot Resolver <knot-resolver@labs.nic.cz> -# >= 3.0 needed because of --enable-xdp=yes -ARG KNOT_BRANCH=3.0 -ENV DEBIAN_FRONTEND=noninteractive - -WORKDIR /root -CMD ["/bin/bash"] - -# generic cleanup -RUN apt-get update -qq -# TODO: run upgrade once buster reaches a stable release -# RUN apt-get upgrade -y -qqq - -# Knot and Knot Resolver dependencies -RUN apt-get install -y -qqq git make cmake pkg-config meson \ - build-essential bsdmainutils libtool autoconf libcmocka-dev \ - liburcu-dev libgnutls28-dev libedit-dev liblmdb-dev libcap-ng-dev libsystemd-dev \ - libelf-dev libmnl-dev libidn11-dev libuv1-dev \ - libluajit-5.1-dev lua-http libssl-dev libnghttp2-dev - -# Build and testing deps for Resolver's dnstap module (go stuff is just for testing) -RUN apt-get install -y -qqq \ - protobuf-c-compiler libprotobuf-c-dev libfstrm-dev \ - golang-any -COPY ./tests/dnstap /root/tests/dnstap -WORKDIR /root/tests/dnstap/src/dnstap-test -RUN go get . -WORKDIR /root - -# documentation dependencies -RUN apt-get install -y -qqq doxygen python3-sphinx python3-breathe python3-sphinx-rtd-theme - -# Python packages required for Deckard CI -# Python: grab latest versions from PyPi -# (Augeas binding in Debian packages are slow and buggy) -RUN apt-get install -y -qqq python3-pip wget augeas-tools -RUN pip3 install --upgrade pip -RUN pip3 install pylint -RUN pip3 install pep8 -RUN pip3 install pytest-xdist -# tests/pytest dependencies: skip over broken versions -RUN pip3 install 'dnspython != 2.0.0' 'jinja2 == 2.11.3' 'pytest != 6.0.0' pytest-html pytest-xdist - -# packet capture tools for Deckard -RUN apt-get install --no-install-suggests --no-install-recommends -y -qqq tcpdump wireshark-common - -# Faketime for Deckard -RUN apt-get install -y -qqq faketime - -# C dependencies for python-augeas -RUN apt-get install -y -qqq libaugeas-dev libffi-dev -# Python dependencies for Deckard -RUN wget https://gitlab.nic.cz/knot/deckard/raw/master/requirements.txt -O /tmp/deckard-req.txt -RUN pip3 install -r /tmp/deckard-req.txt - -# build and install latest version of Knot DNS -RUN git clone --depth=1 --branch=$KNOT_BRANCH https://gitlab.nic.cz/knot/knot-dns.git /tmp/knot -WORKDIR /tmp/knot -RUN pwd -RUN autoreconf -if -RUN ./configure --prefix=/usr --enable-xdp=yes -RUN CFLAGS="-g" make -RUN make install -RUN ldconfig - -# Valgrind for kresd CI -RUN apt-get install valgrind -y -qqq -RUN wget https://github.com/LuaJIT/LuaJIT/raw/v2.1.0-beta3/src/lj.supp -O /lj.supp -# TODO: rebuild LuaJIT with Valgrind support - -# Lua lint for kresd CI -RUN apt-get install luarocks -y -qqq -RUN luarocks --lua-version 5.1 install luacheck - -# respdiff for kresd CI -RUN apt-get install lmdb-utils -y -qqq -RUN git clone --depth=1 https://gitlab.nic.cz/knot/respdiff /var/opt/respdiff -RUN pip3 install -r /var/opt/respdiff/requirements.txt - -# Python static analysis for respdiff -RUN pip3 install mypy -RUN pip3 install flake8 - -# Python requests for CI scripts -RUN pip3 install requests - -# docker-py for packaging tests -RUN pip3 install docker - -# Unbound for respdiff -RUN apt-get install unbound unbound-anchor -y -qqq -RUN printf "server:\n interface: 127.0.0.1@53535\n use-syslog: yes\n do-ip6: no\nremote-control:\n control-enable: no\n" >> /etc/unbound/unbound.conf - -# BIND for respdiff -RUN apt-get install bind9 -y -qqq -RUN printf '\nOPTIONS="-4 $OPTIONS"' >> /etc/default/bind9 -RUN printf 'options {\n directory "/var/cache/bind";\n listen-on port 53533 { 127.0.0.1; };\n listen-on-v6 port 53533 { ::1; };\n};\n' > /etc/bind/named.conf.options - -# PowerDNS Recursor for Deckard CI -RUN apt-get install pdns-recursor -y -qqq - -# code coverage -RUN apt-get install -y -qqq lcov -RUN luarocks --lua-version 5.1 install luacov - -# LuaJIT binary for stand-alone scripting -RUN apt-get install -y -qqq luajit - -# clang for kresd CI, version updated as debian updates it -RUN apt-get install -y -qqq clang clang-tools clang-tidy - -# OpenBuildService CLI tool -RUN apt-get install -y osc - -# curl (API) -RUN apt-get install -y curl - -# configure knot-resolver-testing OBS repo for dependencies missing in Debian -RUN echo 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-testing/Debian_10/ /' > /etc/apt/sources.list.d/knot-resolver-testing.list -RUN wget -nv https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-testing/Debian_10/Release.key -O Release.key -RUN APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 apt-key add Release.key -RUN rm Release.key -RUN apt-get update -qq - -# packages from our knot-resolver-testing repo -RUN apt-get install -y -qqq lua-http lua-psl - -# en_US.UTF-8 locale for scripts.update-authors.sh -RUN apt-get install -y -qqq locales -RUN sed -i "/en_US.UTF-8/ s/^#\(.*\)/\1/" /etc/locale.gen -RUN locale-gen - -# SonarCloud scanner -RUN wget -O /var/opt/wrapper.zip https://sonarcloud.io/static/cpp/build-wrapper-linux-x86.zip -RUN wget -O /var/opt/scanner.zip https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-4.4.0.2170-linux.zip -RUN unzip -d /var/opt /var/opt/wrapper.zip -RUN unzip -d /var/opt /var/opt/scanner.zip -ENV PATH "$PATH:/var/opt/build-wrapper-linux-x86:/var/opt/sonar-scanner-4.4.0.2170-linux/bin" - -# let's get newer meson from backports -RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list -RUN apt-get update -qq -RUN apt-get -t buster-backports install -y -qqq meson diff --git a/ci/images/push.sh b/ci/images/push.sh deleted file mode 100755 index 75f5f87..0000000 --- a/ci/images/push.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -# upload docker image into registry - -CURRENT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source "${CURRENT_DIR}"/vars.sh "$@" -set -ex - -docker push "${FULL_NAME}" diff --git a/ci/images/update.sh b/ci/images/update.sh deleted file mode 100755 index 7be5172..0000000 --- a/ci/images/update.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -# build and upload docker image(s) into registry -# -# this is a simple wrapper around build.sh and update.sh -# -# to build & upload all images: ./update.sh */ - -if [[ $# -le 0 ]]; then - echo "usage: $0 IMAGE..." - exit 1 -fi -set -e - -for ARG in "$@" -do - IMAGE=${ARG%/} - echo "Building $IMAGE..." - ./build.sh $IMAGE - echo "Pushing $IMAGE..." - ./push.sh $IMAGE -done - diff --git a/ci/images/vars.sh b/ci/images/vars.sh deleted file mode 100755 index f2ea465..0000000 --- a/ci/images/vars.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -# define common variables for image build scripts - -KNOT_BRANCH="${KNOT_BRANCH:-3.1}" - -REGISTRY="registry.nic.cz/knot/knot-resolver/ci" -IMAGE=$1 -if [ -z "${IMAGE}" ]; then - echo "image name not provided" - exit 1 -fi -TAG="knot-${KNOT_BRANCH}" -FULL_NAME="${REGISTRY}/${IMAGE}:${TAG}" diff --git a/ci/pkgtest.yaml b/ci/pkgtest.yaml index b7b87c3..2ac4d4c 100644 --- a/ci/pkgtest.yaml +++ b/ci/pkgtest.yaml @@ -119,7 +119,8 @@ nixos-unstable:pkgbuild: - docker - linux - ${PLATFORM} - image: nixos/nix + # https://github.com/NixOS/nix/issues/10648#issuecomment-2101993746 + image: docker.io/nixos/nix:latest-${PLATFORM} variables: NIX_PATH: nixpkgs=https://github.com/nixos/nixpkgs/archive/nixos-unstable.tar.gz diff --git a/contrib/dynarray.h b/contrib/dynarray.h deleted file mode 100644 index 7cbb686..0000000 --- a/contrib/dynarray.h +++ /dev/null @@ -1,112 +0,0 @@ -/* Copyright (C) CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz> - * SPDX-License-Identifier: GPL-3.0-or-later - */ - -/*! - * \brief Simple write-once allocation-optimal dynamic array. - * - * Include it into your .c file - * - * prefix - identifier prefix, e.g. ptr -> struct ptr_dynarray, ptr_dynarray_add(), ... - * ntype - data type to be stored. Let it be a number, pointer or small struct - * initial_capacity - how many data items will be allocated on stack and copied with assignment - * - * prefix_dynarray_add() - add a data item - * prefix_dynarray_fix() - call EVERYTIME the array is copied from some already invalid stack - * prefix_dynarray_free() - call EVERYTIME you dismiss all copies of the array - * - */ - -#include <stdlib.h> -#include <assert.h> - -#pragma once - -#define DYNARRAY_VISIBILITY_STATIC static -#define DYNARRAY_VISIBILITY_PUBLIC -#define DYNARRAY_VISIBILITY_LIBRARY __public__ - -#define dynarray_declare(prefix, ntype, visibility, initial_capacity) \ - typedef struct prefix ## _dynarray { \ - ssize_t capacity; \ - ssize_t size; \ - ntype *(*arr)(struct prefix ## _dynarray *dynarray); \ - ntype init[initial_capacity]; \ - ntype *_arr; \ - } prefix ## _dynarray_t; \ - \ - visibility ntype *prefix ## _dynarray_arr(prefix ## _dynarray_t *dynarray); \ - visibility void prefix ## _dynarray_add(prefix ## _dynarray_t *dynarray, \ - ntype const *to_add); \ - visibility void prefix ## _dynarray_free(prefix ## _dynarray_t *dynarray); - -#define dynarray_foreach(prefix, ntype, ptr, array) \ - for (ntype *ptr = prefix ## _dynarray_arr(&(array)); \ - ptr < prefix ## _dynarray_arr(&(array)) + (array).size; ptr++) - -#define dynarray_define(prefix, ntype, visibility) \ - \ - static void prefix ## _dynarray_free__(struct prefix ## _dynarray *dynarray) \ - { \ - if (dynarray->capacity > sizeof(dynarray->init) / sizeof(*dynarray->init)) { \ - free(dynarray->_arr); \ - } \ - } \ - \ - __attribute__((unused)) \ - visibility ntype *prefix ## _dynarray_arr(struct prefix ## _dynarray *dynarray) \ - { \ - assert(dynarray->size <= dynarray->capacity); \ - return (dynarray->capacity <= sizeof(dynarray->init) / sizeof(*dynarray->init) ? \ - dynarray->init : dynarray->_arr); \ - } \ - \ - static ntype *prefix ## _dynarray_arr_init__(struct prefix ## _dynarray *dynarray) \ - { \ - assert(dynarray->capacity == sizeof(dynarray->init) / sizeof(*dynarray->init)); \ - return dynarray->init; \ - } \ - \ - static ntype *prefix ## _dynarray_arr_arr__(struct prefix ## _dynarray *dynarray) \ - { \ - assert(dynarray->capacity > sizeof(dynarray->init) / sizeof(*dynarray->init)); \ - return dynarray->_arr; \ - } \ - \ - __attribute__((unused)) \ - visibility void prefix ## _dynarray_add(struct prefix ## _dynarray *dynarray, \ - ntype const *to_add) \ - { \ - if (dynarray->capacity < 0) { \ - return; \ - } \ - if (dynarray->capacity == 0) { \ - dynarray->capacity = sizeof(dynarray->init) / sizeof(*dynarray->init); \ - dynarray->arr = prefix ## _dynarray_arr_init__; \ - } \ - if (dynarray->size >= dynarray->capacity) { \ - ssize_t new_capacity = dynarray->capacity * 2 + 1; \ - ntype *new_arr = calloc(new_capacity, sizeof(ntype)); \ - if (new_arr == NULL) { \ - prefix ## _dynarray_free__(dynarray); \ - dynarray->capacity = dynarray->size = -1; \ - return; \ - } \ - if (dynarray->capacity > 0) { \ - memcpy(new_arr, prefix ## _dynarray_arr(dynarray), \ - dynarray->capacity * sizeof(ntype)); \ - } \ - prefix ## _dynarray_free__(dynarray); \ - dynarray->_arr = new_arr; \ - dynarray->capacity = new_capacity; \ - dynarray->arr = prefix ## _dynarray_arr_arr__; \ - } \ - prefix ## _dynarray_arr(dynarray)[dynarray->size++] = *to_add; \ - } \ - \ - __attribute__((unused)) \ - visibility void prefix ## _dynarray_free(struct prefix ## _dynarray *dynarray) \ - { \ - prefix ## _dynarray_free__(dynarray); \ - memset(dynarray, 0, sizeof(*dynarray)); \ - } diff --git a/contrib/dynarray.spdx b/contrib/dynarray.spdx deleted file mode 100644 index 02911c9..0000000 --- a/contrib/dynarray.spdx +++ /dev/null @@ -1,10 +0,0 @@ -SPDXVersion: SPDX-2.1 -DataLicense: CC0-1.0 -SPDXID: SPDXRef-DOCUMENT -DocumentName: knotdns-dynarray -DocumentNamespace: http://spdx.org/spdxdocs/spdx-v2.1-ce6423dd-ac6a-4e78-90c3-5cbdef1e252c - -PackageName: knotdns-dynarray -PackageDownloadLocation: git+https://gitlab.nic.cz/knot/knot-dns.git@48c8b4f38cf5f7bf505c79b56adf7580688f6d3d#src/contrib/dynarray.h -PackageOriginator: Organization: Knot DNS contributors -PackageLicenseDeclared: GPL-3.0-or-later diff --git a/daemon/bindings/net.c b/daemon/bindings/net.c index f1fa6f3..0075d0f 100644 --- a/daemon/bindings/net.c +++ b/daemon/bindings/net.c @@ -470,7 +470,7 @@ static int net_interfaces(lua_State *L) /* Hardware address. */ char *p = buf; for (int k = 0; k < sizeof(iface.phys_addr); ++k) { - sprintf(p, "%.2x:", (uint8_t)iface.phys_addr[k]); + (void)sprintf(p, "%.2x:", (uint8_t)iface.phys_addr[k]); p += 3; } p[-1] = '\0'; @@ -794,7 +794,7 @@ static int net_tls_client(lua_State *L) /* Sort the strings for easier comparison later. */ if (newcfg->ca_files.len) { qsort(&newcfg->ca_files.at[0], newcfg->ca_files.len, - sizeof(newcfg->ca_files.at[0]), strcmp_p); + array_member_size(newcfg->ca_files), strcmp_p); } } lua_pop(L, 1); @@ -834,7 +834,7 @@ static int net_tls_client(lua_State *L) /* Sort the raw strings for easier comparison later. */ if (newcfg->pins.len) { qsort(&newcfg->pins.at[0], newcfg->pins.len, - sizeof(newcfg->pins.at[0]), cmp_sha256); + array_member_size(newcfg->pins), cmp_sha256); } } lua_pop(L, 1); @@ -1042,7 +1042,11 @@ static int net_tls_sticket_secret_file(lua_State *L) STR(net_tls_sticket_MIN_SECRET_LEN) " bytes", file_name); } - fclose(fp); + if (fclose(fp) == EOF) { + lua_error_p(L, + "net.tls_sticket_secret_file - reading of file '%s' failed", + file_name); + } struct network *net = &the_worker->engine->net; diff --git a/daemon/engine.c b/daemon/engine.c index 1d387ea..8c00a5b 100644 --- a/daemon/engine.c +++ b/daemon/engine.c @@ -52,9 +52,6 @@ #define TCP_BACKLOG_DEFAULT 128 #endif -/* Cleanup engine state every 5 minutes */ -const size_t CLEANUP_TIMER = 5*60*1000; - /* Execute byte code */ #define l_dobytecode(L, arr, len, name) \ (luaL_loadbuffer((L), (arr), (len), (name)) || lua_pcall((L), 0, LUA_MULTRET, 0)) @@ -223,7 +220,6 @@ static int l_log_groups(lua_State *L) goto bad_call; kr_log_group_reset(); - int idx = 1; lua_pushnil(L); while (lua_next(L, 1) != 0) { const char *grp_str = lua_tostring(L, -1); @@ -237,7 +233,6 @@ static int l_log_groups(lua_State *L) kr_log_warning(SYSTEM, "WARNING: unknown log group '%s'\n", lua_tostring(L, -1)); } - ++idx; lua_pop(L, 1); } } @@ -611,7 +606,7 @@ int init_lua(struct engine *engine) { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wformat" /* %1$ is not in C standard */ /* Save original package.path to package._path */ - snprintf(l_paths, MAXPATHLEN - 1, + (void)snprintf(l_paths, MAXPATHLEN - 1, "if package._path == nil then package._path = package.path end\n" "package.path = '%1$s/?.lua;%1$s/?/init.lua;'..package._path\n" "if package._cpath == nil then package._cpath = package.cpath end\n" diff --git a/daemon/io.c b/daemon/io.c index 6d548d7..9299ff2 100644 --- a/daemon/io.c +++ b/daemon/io.c @@ -151,7 +151,7 @@ static int family_to_freebind_option(sa_family_t sa_family, int *level, int *nam #define LOG_NO_FB kr_log_error(NETWORK, "your system does not support 'freebind', " \ "please remove it from your configuration\n") switch (sa_family) { - case AF_INET: + case AF_INET: // NOLINT(bugprone-branch-clone): The branches are only cloned for specific macro configs *level = IPPROTO_IP; #if defined(IP_FREEBIND) *name = IP_FREEBIND; @@ -510,7 +510,7 @@ static ssize_t tls_send(const uint8_t *buf, const size_t len, struct session *se } #endif -static void _tcp_accept(uv_stream_t *master, int status, bool tls, bool http) +static void tcp_accept_internal(uv_stream_t *master, int status, bool tls, bool http) { if (status != 0) { return; @@ -631,18 +631,18 @@ static void _tcp_accept(uv_stream_t *master, int status, bool tls, bool http) static void tcp_accept(uv_stream_t *master, int status) { - _tcp_accept(master, status, false, false); + tcp_accept_internal(master, status, false, false); } static void tls_accept(uv_stream_t *master, int status) { - _tcp_accept(master, status, true, false); + tcp_accept_internal(master, status, true, false); } #if ENABLE_DOH2 static void https_accept(uv_stream_t *master, int status) { - _tcp_accept(master, status, true, true); + tcp_accept_internal(master, status, true, true); } #endif @@ -834,16 +834,25 @@ void io_tty_process_input(uv_stream_t *stream, ssize_t nread, const uv_buf_t *bu len_s = 0; } uint32_t len_n = htonl(len_s); - fwrite(&len_n, sizeof(len_n), 1, out); - if (len_s > 0) - fwrite(message, len_s, 1, out); + if (fwrite(&len_n, sizeof(len_n), 1, out) != 1) + goto finish; + if (len_s > 0) { + if (fwrite(message, len_s, 1, out) != 1) + goto finish; + } } else { - if (message) - fprintf(out, "%s", message); - if (message || !args->quiet) - fprintf(out, "\n"); - if (!args->quiet) - fprintf(out, "> "); + if (message) { + if (fprintf(out, "%s", message) < 0) + goto finish; + } + if (message || !args->quiet) { + if (fprintf(out, "\n") < 0) + goto finish; + } + if (!args->quiet) { + if (fprintf(out, "> ") < 0) + goto finish; + } } /* Duplicate command and output to logs */ @@ -865,7 +874,7 @@ void io_tty_process_input(uv_stream_t *stream, ssize_t nread, const uv_buf_t *bu finish: /* Close if redirected */ if (stream_fd != STDIN_FILENO) { - fclose(out); + (void)fclose(out); } } diff --git a/daemon/main.c b/daemon/main.c index 41a55ad..a346a5c 100644 --- a/daemon/main.c +++ b/daemon/main.c @@ -425,9 +425,9 @@ int main(int argc, char **argv) { kr_log_group_reset(); if (setvbuf(stdout, NULL, _IONBF, 0) || setvbuf(stderr, NULL, _IONBF, 0)) { - kr_log_error(SYSTEM, "failed to to set output buffering (ignored): %s\n", + kr_log_error(SYSTEM, "failed to set output buffering (ignored): %s\n", strerror(errno)); - fflush(stderr); + (void)fflush(stderr); } if (strcmp("linux", OPERATING_SYSTEM) != 0) kr_log_warning(SYSTEM, "Knot Resolver is tested on Linux, other platforms might exhibit bugs.\n" @@ -490,7 +490,7 @@ int main(int argc, char **argv) if (ret) { kr_log_error(SYSTEM, "failed to get or set file-descriptor limit: %s\n", strerror(errno)); - } else if (rlim.rlim_cur < 512*1024) { + } else if (rlim.rlim_cur < (rlim_t)512 * 1024) { kr_log_warning(SYSTEM, "warning: hard limit for number of file-descriptors is only %ld but recommended value is 524288\n", (long)rlim.rlim_cur); } diff --git a/daemon/meson.build b/daemon/meson.build index 68a2646..8446b82 100644 --- a/daemon/meson.build +++ b/daemon/meson.build @@ -65,4 +65,5 @@ kresd = executable( export_dynamic: true, install: true, install_dir: get_option('sbindir'), + install_rpath: rpath, ) diff --git a/daemon/proxyv2.c b/daemon/proxyv2.c index aedbb91..8197003 100644 --- a/daemon/proxyv2.c +++ b/daemon/proxyv2.c @@ -279,6 +279,7 @@ ssize_t proxy_process_header(struct proxy_result *out, struct session *s, &addr->ipv6_addr.dst_addr, sizeof(out->dst_addr.ip6.sin6_addr.s6_addr)); break; + default:; /* Keep zero from initializer. */ } /* Process additional information */ @@ -287,7 +288,7 @@ ssize_t proxy_process_header(struct proxy_result *out, struct session *s, case TLV_TYPE_SSL: out->has_tls = true; break; - /* TODO: add more TLV types if needed */ + default:; /* Ignore others - add more if needed */ } } diff --git a/daemon/session.c b/daemon/session.c index ed0ff68..91d3c39 100644 --- a/daemon/session.c +++ b/daemon/session.c @@ -13,7 +13,7 @@ #include "daemon/proxyv2.h" #include "lib/generic/queue.h" -#define TLS_CHUNK_SIZE (16 * 1024) +#define TLS_CHUNK_SIZE ((size_t)16 * 1024) /* Initial max frame size: https://tools.ietf.org/html/rfc7540#section-6.5.2 */ #define HTTP_MAX_FRAME_SIZE 16384 diff --git a/daemon/tls.c b/daemon/tls.c index 2e1631b..0ab3968 100644 --- a/daemon/tls.c +++ b/daemon/tls.c @@ -23,7 +23,7 @@ #include "daemon/worker.h" #include "daemon/session.h" -#define EPHEMERAL_CERT_EXPIRATION_SECONDS_RENEW_BEFORE (60*60*24*7) +#define EPHEMERAL_CERT_EXPIRATION_SECONDS_RENEW_BEFORE ((time_t)60*60*24*7) #define GNUTLS_PIN_MIN_VERSION 0x030400 #define VERBOSE_MSG(cl_side, ...)\ @@ -659,7 +659,7 @@ static int str_replace(char **where_ptr, const char *with) return kr_ok(); } -static time_t _get_end_entity_expiration(gnutls_certificate_credentials_t creds) +static time_t get_end_entity_expiration(gnutls_certificate_credentials_t creds) { gnutls_datum_t data; gnutls_x509_crt_t cert = NULL; @@ -731,7 +731,7 @@ int tls_certificate_set(struct network *net, const char *tls_cert, const char *t return kr_error(EINVAL); } /* record the expiration date: */ - tls_credentials->valid_until = _get_end_entity_expiration(tls_credentials->credentials); + tls_credentials->valid_until = get_end_entity_expiration(tls_credentials->credentials); /* Exchange the x509 credentials */ struct tls_credentials *old_credentials = net->tls_credentials; diff --git a/daemon/tls.h b/daemon/tls.h index af1f5c9..c30444b 100644 --- a/daemon/tls.h +++ b/daemon/tls.h @@ -30,7 +30,7 @@ * So it takes 2 RTT. * As we use session tickets, there are additional messages, add one RTT mode. */ - #define TLS_MAX_HANDSHAKE_TIME (KR_CONN_RTT_MAX * 3) + #define TLS_MAX_HANDSHAKE_TIME (KR_CONN_RTT_MAX * (uint64_t)3) /** Transport session (opaque). */ struct session; diff --git a/daemon/tls_ephemeral_credentials.c b/daemon/tls_ephemeral_credentials.c index ff4682f..2b928fa 100644 --- a/daemon/tls_ephemeral_credentials.c +++ b/daemon/tls_ephemeral_credentials.c @@ -17,19 +17,19 @@ #define EPHEMERAL_PRIVKEY_FILENAME "ephemeral_key.pem" #define INVALID_HOSTNAME "dns-over-tls.invalid" -#define EPHEMERAL_CERT_EXPIRATION_SECONDS (60*60*24*90) +#define EPHEMERAL_CERT_EXPIRATION_SECONDS ((time_t)60*60*24*90) /* This is an attempt to grab an exclusive, advisory, non-blocking * lock based on a filename. At the moment it's POSIX-only, but it * should be abstract enough of an interface to make an implementation * for non-posix systems if anyone cares. */ typedef int lock_t; -static bool _lock_is_invalid(lock_t lock) +static bool lock_is_invalid(lock_t lock) { return lock == -1; } /* a blocking lock on a given filename */ -static lock_t _lock_filename(const char *fname) +static lock_t lock_filename(const char *fname) { lock_t lockfd = open(fname, O_RDONLY|O_CREAT, 0400); if (lockfd == -1) @@ -41,9 +41,9 @@ static lock_t _lock_filename(const char *fname) } return lockfd; /* for cleanup later */ } -static void _lock_unlock(lock_t *lock, const char *fname) +static void lock_unlock(lock_t *lock, const char *fname) { - if (lock && !_lock_is_invalid(*lock)) { + if (lock && !lock_is_invalid(*lock)) { flock(*lock, LOCK_UN); close(*lock); *lock = -1; @@ -61,8 +61,8 @@ static gnutls_x509_privkey_t get_ephemeral_privkey (void) /* Take a lock to ensure that two daemons started concurrently * with a shared cache don't both create the same privkey: */ - lock = _lock_filename(EPHEMERAL_PRIVKEY_FILENAME ".lock"); - if (_lock_is_invalid(lock)) { + lock = lock_filename(EPHEMERAL_PRIVKEY_FILENAME ".lock"); + if (lock_is_invalid(lock)) { kr_log_error(TLS, "unable to lock lockfile " EPHEMERAL_PRIVKEY_FILENAME ".lock\n"); goto done; } @@ -91,7 +91,7 @@ static gnutls_x509_privkey_t get_ephemeral_privkey (void) } data.size = stat.st_size; bytes_read = read(datafd, data.data, stat.st_size); - if (bytes_read != stat.st_size) { + if (bytes_read < 0 || bytes_read != stat.st_size) { kr_log_error(TLS, "unable to read ephemeral private key\n"); goto bad_data; } @@ -141,7 +141,7 @@ static gnutls_x509_privkey_t get_ephemeral_privkey (void) } } done: - _lock_unlock(&lock, EPHEMERAL_PRIVKEY_FILENAME ".lock"); + lock_unlock(&lock, EPHEMERAL_PRIVKEY_FILENAME ".lock"); if (datafd != -1) { close(datafd); } @@ -219,7 +219,7 @@ struct tls_credentials * tls_get_ephemeral_credentials(struct engine *engine) if ((privkey = get_ephemeral_privkey()) == NULL) { goto failure; } - if ((cert = get_ephemeral_cert(privkey, creds->ephemeral_servicename, now - 60*15, creds->valid_until)) == NULL) { + if ((cert = get_ephemeral_cert(privkey, creds->ephemeral_servicename, now - ((time_t)60 * 15), creds->valid_until)) == NULL) { goto failure; } if ((err = gnutls_certificate_set_x509_key(creds->credentials, &cert, 1, privkey)) < 0) { diff --git a/daemon/tls_session_ticket-srv.c b/daemon/tls_session_ticket-srv.c index b198903..26d4186 100644 --- a/daemon/tls_session_ticket-srv.c +++ b/daemon/tls_session_ticket-srv.c @@ -188,7 +188,7 @@ static void tst_key_check(uv_timer_t *timer, bool force_update) const uint64_t remain_ms = (tv_sec_next - now.tv_sec - 1) * (uint64_t)1000 + ms_until_second + 1; /* ^ +1 because we don't want to wake up half a millisecond before the epoch! */ - if (kr_fails_assert(remain_ms < (TST_KEY_LIFETIME + 1 /*rounding tolerance*/) * 1000)) + if (kr_fails_assert(remain_ms < ((uint64_t)TST_KEY_LIFETIME + 1 /*rounding tolerance*/) * 1000)) return; kr_log_debug(TLS, "session ticket: epoch %"PRIu64 ", scheduling rotation check in %"PRIu64" ms\n", diff --git a/daemon/udp_queue.c b/daemon/udp_queue.c index 1f8ff39..7ed600a 100644 --- a/daemon/udp_queue.c +++ b/daemon/udp_queue.c @@ -110,11 +110,11 @@ void udp_queue_push(int fd, struct kr_request *req, struct qr_task *task) /* Get a valid correct queue. */ if (fd >= state.udp_queues_len) { const int new_len = fd + 1; - state.udp_queues = realloc(state.udp_queues, - sizeof(state.udp_queues[0]) * new_len); + state.udp_queues = realloc(state.udp_queues, // NOLINT(bugprone-suspicious-realloc-usage): we just abort() below, so it's fine + sizeof(state.udp_queues[0]) * new_len); // NOLINT(bugprone-sizeof-expression): false-positive if (!state.udp_queues) abort(); memset(state.udp_queues + state.udp_queues_len, 0, - sizeof(state.udp_queues[0]) * (new_len - state.udp_queues_len)); + sizeof(state.udp_queues[0]) * (new_len - state.udp_queues_len)); // NOLINT(bugprone-sizeof-expression): false-positive state.udp_queues_len = new_len; } if (unlikely(state.udp_queues[fd] == NULL)) diff --git a/daemon/worker.c b/daemon/worker.c index 8b6b49e..12c08f1 100644 --- a/daemon/worker.c +++ b/daemon/worker.c @@ -195,7 +195,7 @@ static inline struct mempool *pool_borrow(struct worker_ctx *worker) { /* The implementation used to have extra caching layer, * but it didn't work well. Now it's very simple. */ - return mp_new(16 * 1024); + return mp_new((size_t)16 * 1024); } /** Return a mempool. */ static inline void pool_release(struct worker_ctx *worker, struct mempool *mp) diff --git a/daemon/zimport.c b/daemon/zimport.c index af21a15..39799b6 100644 --- a/daemon/zimport.c +++ b/daemon/zimport.c @@ -98,7 +98,7 @@ static int key_get(char buf[KEY_LEN], const knot_dname_t *name, char *lf = (char *)knot_dname_lf(name, (uint8_t *)buf); if (kr_fails_assert(lf && key_p)) return kr_error(EINVAL); - int len = lf[0]; + int len = (unsigned char)lf[0]; lf++; // point to start of data *key_p = lf; // Check that LF is right-aligned to KNOT_DNAME_MAXLEN in buf. @@ -282,7 +282,7 @@ do_digest: // hexdump the hash for logging char hash_str[digs[i].size * 2 + 1]; for (ssize_t j = 0; j < digs[i].size; ++j) - sprintf(hash_str + 2*j, "%02x", digs[i].data[j]); + (void)sprintf(hash_str + 2*j, "%02x", digs[i].data[j]); if (!z_import->digests[i].expected) { kr_log_error(PREFILL, "no ZONEMD found; computed hash: %s\n", @@ -560,7 +560,7 @@ int zi_zone_import(const zi_config_t config) if (kr_fails_assert(c && c->zone_file)) return kr_error(EINVAL); - knot_mm_t *pool = mm_ctx_mempool2(1024 * 1024); + knot_mm_t *pool = mm_ctx_mempool2((size_t)1024 * 1024); zone_import_ctx_t *z_import = mm_calloc(pool, 1, sizeof(*z_import)); if (!z_import) return kr_error(ENOMEM); z_import->pool = pool; diff --git a/lib/cache/api.c b/lib/cache/api.c index f71a8d0..7327903 100644 --- a/lib/cache/api.c +++ b/lib/cache/api.c @@ -517,7 +517,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry, goto return_needs_pkt; const knot_dname_t *encloser = rr->owner; /**< the closest encloser name */ for (int i = 0; i < wild_labels; ++i) { - encloser = knot_wire_next_label(encloser, NULL); + encloser = knot_dname_next_label(encloser); } /* Construct the key under which RRs will be stored, diff --git a/lib/cache/peek.c b/lib/cache/peek.c index e1901ac..f0bb79c 100644 --- a/lib/cache/peek.c +++ b/lib/cache/peek.c @@ -174,6 +174,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt) knot_db_val_bound(v), new_ttl); return ret == kr_ok() ? KR_STATE_DONE : ctx->state; } + default:; // Continue below } /* We have to try proving from NSEC*. */ @@ -359,7 +360,7 @@ static int peek_encloser( /** Name of the closest (provable) encloser. */ const knot_dname_t *clencl_name = qry->sname; for (int l = sname_labels; l > clencl_labels; --l) - clencl_name = knot_wire_next_label(clencl_name, NULL); + clencl_name = knot_dname_next_label(clencl_name); /**** 3. source of synthesis checks, in case the next closer name was covered. **** 3a. We want to query for NSEC* of source of synthesis (SS) or its diff --git a/lib/dnssec.c b/lib/dnssec.c index 12b8f20..eb4b33b 100644 --- a/lib/dnssec.c +++ b/lib/dnssec.c @@ -362,7 +362,7 @@ static int kr_rrset_validate_with_key(kr_rrset_validation_ctx_t *vctx, const int covered_labels = knot_dname_labels(covered->owner, NULL) - knot_dname_is_wildcard(covered->owner); - for (uint16_t i = 0; i < vctx->rrs->len; ++i) { + for (size_t i = 0; i < vctx->rrs->len; ++i) { /* Consider every RRSIG that matches and comes from the same query. */ const knot_rrset_t *rrsig = vctx->rrs->at[i]->rr; const bool ok = vctx->rrs->at[i]->qry_uid == vctx->qry_uid diff --git a/lib/dnssec/nsec.c b/lib/dnssec/nsec.c index 8b17247..be34d92 100644 --- a/lib/dnssec/nsec.c +++ b/lib/dnssec/nsec.c @@ -16,7 +16,7 @@ #include "lib/defines.h" #include "lib/dnssec/nsec.h" #include "lib/utils.h" -#include "resolve.h" +#include "lib/resolve.h" int kr_nsec_children_in_zone_check(const uint8_t *bm, uint16_t bm_size) { @@ -81,15 +81,13 @@ static int dname_cmp(const knot_dname_t *d1, const knot_dname_t *d2) dname_reverse(d1, d1_len, d1_rev_arr); dname_reverse(d2, d2_len, d2_rev_arr); - int res = 0; - while (res == 0 && d1_rev != NULL) { - res = lf_cmp(d1_rev, d2_rev); - d1_rev = knot_wire_next_label(d1_rev, NULL); - d2_rev = knot_wire_next_label(d2_rev, NULL); - } - - kr_require(res != 0 || d2_rev == NULL); - return res; + do { + int res = lf_cmp(d1_rev, d2_rev); + if (res != 0 || d1_rev[0] == '\0') + return res; + d1_rev = knot_dname_next_label(d1_rev); + d2_rev = knot_dname_next_label(d2_rev); + } while (true); } @@ -251,7 +249,7 @@ int kr_nsec_negative(const ranked_rr_array_t *rrrs, uint32_t qry_uid, ssynth[1] = '*'; const knot_dname_t *clencl = sname; for (int l = sname_labels; l > clencl_labels; --l) - clencl = knot_wire_next_label(clencl, NULL); + clencl = knot_dname_next_label(clencl); (void)!!knot_dname_store(&ssynth[2], clencl); // Try to (dis)prove the source of synthesis by a covering or matching NSEC. diff --git a/lib/dnssec/nsec3.c b/lib/dnssec/nsec3.c index 4199f25..4ff2750 100644 --- a/lib/dnssec/nsec3.c +++ b/lib/dnssec/nsec3.c @@ -143,7 +143,7 @@ static int closest_encloser_match(int *flags, const knot_rrset_t *nsec3, goto fail; } - const knot_dname_t *encloser = knot_wire_next_label(name, NULL); + const knot_dname_t *encloser = knot_dname_next_label(name); *skipped = 1; /* Avoid doing too much work on SHA1, mitigating: @@ -154,7 +154,7 @@ static int closest_encloser_match(int *flags, const knot_rrset_t *nsec3, const int max_labels = knot_dname_labels(nsec3->owner, NULL) - 1 + kr_nsec3_max_depth(¶ms); for (int l = knot_dname_labels(encloser, NULL); l > max_labels; --l) { - encloser = knot_wire_next_label(encloser, NULL); + encloser = knot_dname_next_label(encloser); ++(*skipped); } @@ -174,7 +174,7 @@ static int closest_encloser_match(int *flags, const knot_rrset_t *nsec3, if (!encloser[0]) break; - encloser = knot_wire_next_label(encloser, NULL); + encloser = knot_dname_next_label(encloser); ++(*skipped); } @@ -404,7 +404,7 @@ static int closest_encloser_proof(const knot_pkt_t *pkt, for (unsigned j = 0; j < skipped; ++j) { if (kr_fails_assert(next_closer[0])) return kr_error(EINVAL); - next_closer = knot_wire_next_label(next_closer, NULL); + next_closer = knot_dname_next_label(next_closer); } for (unsigned j = 0; j < sec->count; ++j) { const knot_rrset_t *rrset_j = knot_pkt_rr(sec, j); @@ -425,7 +425,7 @@ static int closest_encloser_proof(const knot_pkt_t *pkt, if ((flags & FLG_CLOSEST_PROVABLE_ENCLOSER) && (flags & FLG_NAME_COVERED) && next_closer) { if (encloser_name && next_closer[0]) - *encloser_name = knot_wire_next_label(next_closer, NULL); + *encloser_name = knot_dname_next_label(next_closer); if (matching_encloser_nsec3) *matching_encloser_nsec3 = matching; if (covering_next_nsec3) @@ -569,7 +569,7 @@ int kr_nsec3_wildcard_answer_response_check(const knot_pkt_t *pkt, knot_section_ for (int i = 0; i < trim_to_next; ++i) { if (kr_fails_assert(sname[0])) return kr_error(EINVAL); - sname = knot_wire_next_label(sname, NULL); + sname = knot_dname_next_label(sname); } int flags = 0; diff --git a/lib/dnssec/signature.c b/lib/dnssec/signature.c index aadb5cb..12ed09e 100644 --- a/lib/dnssec/signature.c +++ b/lib/dnssec/signature.c @@ -208,7 +208,7 @@ static int sign_ctx_add_records(dnssec_sign_ctx_t *ctx, const knot_rrset_t *cove for (int j = 0; j < trim_labels; ++j) { if (kr_fails_assert(beginp[0])) return kr_error(EINVAL); - beginp = (uint8_t *) knot_wire_next_label(beginp, NULL); + beginp = (uint8_t *) knot_dname_next_label(beginp); if (kr_fails_assert(beginp)) return kr_error(EFAULT); } diff --git a/lib/dnssec/ta.c b/lib/dnssec/ta.c index becf7d8..13659c1 100644 --- a/lib/dnssec/ta.c +++ b/lib/dnssec/ta.c @@ -28,9 +28,9 @@ const knot_dname_t * kr_ta_closest(const struct kr_context *ctx, const knot_dnam kr_require(ctx && name); if (type == KNOT_RRTYPE_DS && name[0] != '\0') { /* DS is parent-side record, so the parent name needs to be covered. */ - name = knot_wire_next_label(name, NULL); + name = knot_dname_next_label(name); } - while (name) { + do { struct kr_context *ctx_nc = (struct kr_context *)/*const-cast*/ctx; if (kr_ta_get(ctx_nc->trust_anchors, name)) { return name; @@ -38,9 +38,12 @@ const knot_dname_t * kr_ta_closest(const struct kr_context *ctx, const knot_dnam if (kr_ta_get(ctx_nc->negative_anchors, name)) { return NULL; } - name = knot_wire_next_label(name, NULL); - } - return NULL; + if (name[0] == '\0') { + return NULL; + } else { + name = knot_dname_next_label(name); + } + } while (true); } /* @internal Create DS from DNSKEY, caller MUST free dst if successful. */ diff --git a/lib/generic/array.h b/lib/generic/array.h index 9f35118..9bea546 100644 --- a/lib/generic/array.h +++ b/lib/generic/array.h @@ -113,7 +113,7 @@ static inline void array_std_free(void *baton, void *p) * Mempool usage: pass kr_memreserve and a knot_mm_t* . * @return 0 if success, <0 on failure */ #define array_reserve_mm(array, n, reserve, baton) \ - (reserve)((baton), (void **) &(array).at, sizeof((array).at[0]), (n), &(array).cap) + (reserve)((baton), (void **) &(array).at, array_member_size((array)), (n), &(array).cap) /** * Push value at the end of the array, resize it if necessary. @@ -122,9 +122,9 @@ static inline void array_std_free(void *baton, void *p) * @return element index on success, <0 on failure */ #define array_push_mm(array, val, reserve, baton) \ - (int)((array).len < (array).cap ? ((array).at[(array).len] = val, (array).len++) \ + (int)((array).len < (array).cap ? ((array).at[(array).len] = (val), (array).len++) \ : (array_reserve_mm(array, ((array).cap + 1), reserve, baton) < 0 ? -1 \ - : ((array).at[(array).len] = val, (array).len++))) + : ((array).at[(array).len] = (val), (array).len++))) /** * Push value at the end of the array, resize it if necessary (plain malloc/free). @@ -152,6 +152,12 @@ static inline void array_std_free(void *baton, void *p) * @warning Undefined if the array is empty. */ #define array_tail(array) \ - (array).at[(array).len - 1] + (array).at[(array).len - 1] + +/** + * Return the size of a singular member in the array. + */ +#define array_member_size(array) \ + (sizeof((array).at[0])) // NOLINT(bugprone-sizeof-expression): usually a false-positive /** @} */ diff --git a/lib/generic/lru.h b/lib/generic/lru.h index 448c1b9..1c1dd81 100644 --- a/lib/generic/lru.h +++ b/lib/generic/lru.h @@ -130,7 +130,10 @@ #define lru_get_new(table, key_, len_, is_new) \ (__typeof__((table)->pdata_t)) \ lru_get_impl(&(table)->lru, (key_), (len_), \ - sizeof(*(table)->pdata_t), true, is_new) + lru_member_size((table)), true, is_new) + +#define lru_member_size(table) \ + (sizeof(*(table)->pdata_t)) // NOLINT(bugprone-sizeof-expression): usually a false-positive /** * @brief Apply a function to every item in LRU. diff --git a/lib/generic/queue.c b/lib/generic/queue.c index 5bed153..29609dd 100644 --- a/lib/generic/queue.c +++ b/lib/generic/queue.c @@ -62,7 +62,7 @@ void * queue_push_impl(struct queue *q) if (t->begin * 2 >= t->cap) { /* Utilization is below 50%, so let's shift (no overlap). * (size_t cast is to avoid unintended sign-extension) */ - memcpy(t->data, t->data + t->begin * q->item_size, + memcpy(t->data, t->data + t->begin * (size_t)q->item_size, (size_t) (t->end - t->begin) * (size_t) q->item_size); t->end -= t->begin; t->begin = 0; @@ -76,7 +76,7 @@ void * queue_push_impl(struct queue *q) kr_require(t->end < t->cap); ++(q->len); ++(t->end); - return t->data + q->item_size * (t->end - 1); + return t->data + (size_t)q->item_size * (t->end - 1); } /* Return pointer to the space for the new element. */ @@ -98,8 +98,8 @@ void * queue_push_head_impl(struct queue *q) * Computations here are simplified due to h->begin == 0. * (size_t cast is to avoid unintended sign-extension) */ const int cnt = h->end; - memcpy(h->data + (h->cap - cnt) * q->item_size, h->data, - (size_t) cnt * (size_t) q->item_size); + memcpy(h->data + ((size_t)h->cap - cnt) * q->item_size, h->data, + (size_t)cnt * (size_t)q->item_size); h->begin = h->cap - cnt; h->end = h->cap; } else { @@ -113,7 +113,7 @@ void * queue_push_head_impl(struct queue *q) kr_require(h->begin > 0); --(h->begin); ++(q->len); - return h->data + q->item_size * h->begin; + return h->data + (size_t)q->item_size * h->begin; } void queue_pop_impl(struct queue *q) diff --git a/lib/generic/queue.h b/lib/generic/queue.h index 3fa52ce..fc2a86f 100644 --- a/lib/generic/queue.h +++ b/lib/generic/queue.h @@ -71,7 +71,7 @@ /** @brief Initialize a queue. You can malloc() it the usual way. */ #define queue_init(q) do { \ (void)(((__typeof__(((q).pdata_t)))0) == (void *)0); /* typecheck queue_t */ \ - queue_init_impl(&(q).queue, sizeof(*(q).pdata_t)); \ + queue_init_impl(&(q).queue, queue_member_size((q))); \ } while (false) /** @brief De-initialize a queue: make it invalid and free any inner allocations. */ @@ -105,6 +105,10 @@ #define queue_len(q) \ ((const size_t)(q).queue.len) +/** @brief Return the size of a single element in the queue. */ +#define queue_member_size(q) \ + (sizeof(*(q).pdata_t)) // NOLINT(bugprone-sizeof-expression): usually a false-positive + /** @brief Type for queue iterator, parametrized by value type. * It's a simple structure that owns no other resources. diff --git a/lib/generic/trie.c b/lib/generic/trie.c index f9aceda..21254eb 100644 --- a/lib/generic/trie.c +++ b/lib/generic/trie.c @@ -470,6 +470,10 @@ static int ns_longer_alloc(nstack_t *ns) memcpy(st, ns->stack, ns->len * sizeof(node_t *)); } else { st = realloc(ns->stack, new_size); + if (st == NULL) { + free(ns->stack); // left behind by realloc, callers bail out + ns->stack = NULL; + } } if (st == NULL) return KNOT_ENOMEM; diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c index dfb7c87..656bc2d 100644 --- a/lib/layer/iterate.c +++ b/lib/layer/iterate.c @@ -51,7 +51,7 @@ static const knot_dname_t *minimized_qname(struct kr_query *query, uint16_t *qty int cut_labels = knot_dname_labels(query->zone_cut.name, NULL); int qname_labels = knot_dname_labels(qname, NULL); while(qname[0] && qname_labels > cut_labels + 1) { - qname = knot_wire_next_label(qname, NULL); + qname = knot_dname_next_label(qname); qname_labels -= 1; } @@ -825,7 +825,10 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req) } } else if (!query->parent) { /* Answer for initial query */ - const bool to_wire = ((pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) != 0); + const bool to_wire = ((pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) != 0) + /* We need to cover the case of positive wildcard answer + * with over-limit NSEC3 iterations. */ + || query->flags.DNSSEC_WEXPAND; state = pick_authority(pkt, req, to_wire); if (state != kr_ok()) { return KR_STATE_FAIL; diff --git a/lib/layer/validate.c b/lib/layer/validate.c index 3bdb205..af20b2e 100644 --- a/lib/layer/validate.c +++ b/lib/layer/validate.c @@ -709,7 +709,7 @@ static int check_validation_result(kr_layer_t *ctx, const knot_pkt_t *pkt, ranke invalid_entry = entry; break; } else if (kr_rank_test(entry->rank, KR_RANK_MISSING) && - !invalid_entry) { + !invalid_entry) { // NOLINT(bugprone-branch-clone) invalid_entry = entry; } else if (kr_rank_test(entry->rank, KR_RANK_OMIT)) { continue; @@ -124,7 +124,7 @@ void kr_log_fmt(enum kr_log_group group, kr_log_level_t level, const char *file, } va_start(args, fmt); - vfprintf(stream, fmt, args); + (void)vfprintf(stream, fmt, args); va_end(args); } } diff --git a/lib/resolve.c b/lib/resolve.c index e24a40b..d8198c3 100644 --- a/lib/resolve.c +++ b/lib/resolve.c @@ -184,7 +184,7 @@ static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct k * otherwise this would risk leaking information to parent if the NODATA TTD > zone cut TTD. */ int labels = knot_dname_labels(target, NULL) - knot_dname_labels(cut_name, NULL); while (target[0] && labels > 2) { - target = knot_wire_next_label(target, NULL); + target = knot_dname_next_label(target); --labels; } for (int i = 0; i < labels; ++i) { @@ -196,7 +196,7 @@ static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct k break; } kr_assert(target[0]); - target = knot_wire_next_label(target, NULL); + target = knot_dname_next_label(target); } kr_cache_commit(cache); #endif @@ -853,6 +853,8 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo if (transport && !qry->flags.CACHED) { if (!(request->state & KR_STATE_FAIL)) { /* Do not complete NS address resolution on soft-fail. */ + if (kr_fails_assert(packet->wire)) + return KR_STATE_FAIL; const int rcode = knot_wire_get_rcode(packet->wire); if (rcode != KNOT_RCODE_SERVFAIL && rcode != KNOT_RCODE_REFUSED) { qry->flags.AWAIT_IPV6 = false; @@ -886,7 +888,7 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo } /* Pop query if resolved. */ - if (request->state == KR_STATE_YIELD) { + if (request->state == KR_STATE_YIELD) { // NOLINT(bugprone-branch-clone) return KR_STATE_PRODUCE; /* Requery */ } else if (qry->flags.RESOLVED) { kr_rplan_pop(rplan, qry); @@ -1006,7 +1008,7 @@ static int forward_trust_chain_check(struct kr_request *request, struct kr_query int cut_labels = knot_dname_labels(qry->zone_cut.name, NULL); int wanted_name_labels = knot_dname_labels(wanted_name, NULL); while (wanted_name[0] && wanted_name_labels > cut_labels + name_offset) { - wanted_name = knot_wire_next_label(wanted_name, NULL); + wanted_name = knot_dname_next_label(wanted_name); wanted_name_labels -= 1; } minimized = (wanted_name != qry->sname); @@ -1051,7 +1053,7 @@ static int forward_trust_chain_check(struct kr_request *request, struct kr_query /* set `nods` */ if ((qry->stype == KNOT_RRTYPE_DS) && - knot_dname_is_equal(wanted_name, qry->sname)) { + knot_dname_is_equal(wanted_name, qry->sname)) { // NOLINT(bugprone-branch-clone) nods = true; } else if (resume && !ds_req) { nods = false; @@ -1232,11 +1234,11 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot const knot_dname_t *parent = qry->parent->zone_cut.name; if (parent[0] != '\0' && knot_dname_in_bailiwick(qry->sname, parent) >= 0) { - requested_name = knot_wire_next_label(parent, NULL); + requested_name = knot_dname_next_label(parent); } - } else if ((qry->stype == KNOT_RRTYPE_DS) && (qry->sname[0] != '\0')) { + } else if ((qry->stype == KNOT_RRTYPE_DS) && (requested_name[0] != '\0')) { /* If this is explicit DS query, start from encloser too. */ - requested_name = knot_wire_next_label(requested_name, NULL); + requested_name = knot_dname_next_label(requested_name); } int state = KR_STATE_FAIL; @@ -1245,7 +1247,8 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot if (state == KR_STATE_DONE || (state & KR_STATE_FAIL)) { return state; } else if (state == KR_STATE_CONSUME) { - requested_name = knot_wire_next_label(requested_name, NULL); + kr_require(requested_name[0] != '\0'); + requested_name = knot_dname_next_label(requested_name); } } while (state == KR_STATE_CONSUME); @@ -1611,6 +1614,7 @@ int kr_resolve_finish(struct kr_request *request, int state) knot_wire_clear_ad(wire); knot_wire_clear_aa(wire); knot_wire_set_rcode(wire, KNOT_RCODE_SERVFAIL); + default:; // Do nothing } } } diff --git a/lib/selection.c b/lib/selection.c index c25782e..cce5d42 100644 --- a/lib/selection.c +++ b/lib/selection.c @@ -149,7 +149,7 @@ struct rtt_state get_rtt_state(const uint8_t *ip, size_t len, knot_db_val_t key = cache_key(ip, len); - if (cache->api->read(db, stats, &key, &value, 1)) { + if (cache->api->read(db, stats, &key, &value, 1)) { // NOLINT(bugprone-branch-clone) state = default_rtt_state; } else if (kr_fails_assert(value.len == sizeof(struct rtt_state))) { // shouldn't happen but let's be more robust diff --git a/lib/utils.c b/lib/utils.c index 8b7e127..2a0635e 100644 --- a/lib/utils.c +++ b/lib/utils.c @@ -921,9 +921,8 @@ int kr_ranked_rrarray_add(ranked_rr_array_t *array, const knot_rrset_t *rr, static int rdata_p_cmp(const void *rp1, const void *rp2) { /* Just correct types of the parameters and pass them dereferenced. */ - const knot_rdata_t - *const *r1 = rp1, - *const *r2 = rp2; + const knot_rdata_t *const *r1 = (const knot_rdata_t *const *)rp1; + const knot_rdata_t *const *r2 = (const knot_rdata_t *const *)rp2; return knot_rdata_cmp(*r1, *r2); } int kr_ranked_rrarray_finalize(ranked_rr_array_t *array, uint32_t qry_uid, knot_mm_t *pool) @@ -948,7 +947,7 @@ int kr_ranked_rrarray_finalize(ranked_rr_array_t *array, uint32_t qry_uid, knot_ } else { /* Multiple RRs; first: sort the array. */ stashed->rr->additional = NULL; - qsort(ra->at, ra->len, sizeof(ra->at[0]), rdata_p_cmp); + qsort((void *)ra->at, ra->len, array_member_size(*ra), rdata_p_cmp); /* Prune duplicates: NULL all except the last instance. */ int dup_count = 0; for (int i = 0; i + 1 < ra->len; ++i) { diff --git a/lib/utils.h b/lib/utils.h index fab13fe..e03b473 100644 --- a/lib/utils.h +++ b/lib/utils.h @@ -616,4 +616,10 @@ static inline size_t kr_dname_prefixlen(const uint8_t *name, unsigned nlabels) #endif ); } +#if KNOT_VERSION_HEX < 0x030400 +static inline const knot_dname_t * knot_dname_next_label(const knot_dname_t *dname) +{ + return knot_wire_next_label(dname, NULL); +} +#endif diff --git a/lib/zonecut.c b/lib/zonecut.c index 2bbd26f..aea38e4 100644 --- a/lib/zonecut.c +++ b/lib/zonecut.c @@ -580,7 +580,7 @@ int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut, trie_clear(cut->nsset); /* Subtract label from QNAME. */ if (!is_root) { - label = knot_wire_next_label(label, NULL); + label = knot_dname_next_label(label); } else { ret = kr_error(ENOENT); break; diff --git a/meson.build b/meson.build index 8e22e17..47c234c 100644 --- a/meson.build +++ b/meson.build @@ -4,7 +4,7 @@ project( 'knot-resolver', ['c', 'cpp'], license: 'GPLv3+', - version: '5.7.2', + version: '5.7.3', default_options: ['c_std=gnu11', 'b_ndebug=true'], meson_version: '>=0.49', ) @@ -59,6 +59,20 @@ systemd_unit_dir = prefix / 'lib' / 'systemd' / 'system' systemd_tmpfiles_dir = prefix / 'lib' / 'tmpfiles.d' systemd_sysusers_dir = prefix / 'lib' / 'sysusers.d' +## RPath +# When installing from sources into a non-standard prefix and the library is +# shared/dynamic, we need to set the executables' RPATH so that they can find +# `libkresd`, otherwise running them will fail with dynamic linkage errors +auto_prefixes = ['/', '/usr', '/usr/local'] +rpath_opt = get_option('install_rpath') +if (get_option('default_library') == 'static' or + rpath_opt == 'disabled' or + (rpath_opt == 'auto' and prefix in auto_prefixes)) + rpath = '' +else + rpath = prefix / get_option('libdir') +endif + ## Trust anchors managed_ta = get_option('managed_ta') == 'enabled' keyfile_default = etc_dir / get_option('keyfile_default') diff --git a/meson_options.txt b/meson_options.txt index 576d385..f09f46d 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -217,3 +217,15 @@ option( value: 'auto', description: 'cmocka unit tests', ) + +option( + 'install_rpath', + type: 'combo', + choices: [ + 'auto', + 'enabled', + 'disabled', + ], + value: 'auto', + description: 'add rpath to the knot resolver executables', +) diff --git a/modules/bogus_log/meson.build b/modules/bogus_log/meson.build index e2faed5..3fa8d3c 100644 --- a/modules/bogus_log/meson.build +++ b/modules/bogus_log/meson.build @@ -9,7 +9,7 @@ c_src_lint += bogus_log_src bogus_log_mod = shared_module( 'bogus_log', bogus_log_src, - dependencies: libknot, + dependencies: mod_deps, include_directories: mod_inc_dir, name_prefix: '', install: true, diff --git a/modules/dnstap/dnstap.c b/modules/dnstap/dnstap.c index 7572667..07780fc 100644 --- a/modules/dnstap/dnstap.c +++ b/modules/dnstap/dnstap.c @@ -193,6 +193,7 @@ static int dnstap_log(kr_layer_t *ctx, enum dnstap_log_phase phase) { m.socket_family = DNSTAP__SOCKET_FAMILY__INET6; m.has_socket_family = true; break; + default:; } } diff --git a/modules/dnstap/meson.build b/modules/dnstap/meson.build index e8a94bf..038bf3e 100644 --- a/modules/dnstap/meson.build +++ b/modules/dnstap/meson.build @@ -43,11 +43,10 @@ if build_dnstap dnstap_mod = shared_module( 'dnstap', dnstap_src, - dependencies: [ + dependencies: mod_deps + [ declare_dependency(sources: dnstap_pb), libfstrm, libprotobuf_c, - libknot, ], include_directories: mod_inc_dir, name_prefix: '', diff --git a/modules/edns_keepalive/meson.build b/modules/edns_keepalive/meson.build index d125ec4..8370cdb 100644 --- a/modules/edns_keepalive/meson.build +++ b/modules/edns_keepalive/meson.build @@ -9,7 +9,7 @@ c_src_lint += edns_keepalive_src edns_keepalive_mod = shared_module( 'edns_keepalive', edns_keepalive_src, - dependencies: libknot, + dependencies: mod_deps, include_directories: mod_inc_dir, name_prefix: '', install: true, diff --git a/modules/extended_error/meson.build b/modules/extended_error/meson.build index 15a1772..9de514a 100644 --- a/modules/extended_error/meson.build +++ b/modules/extended_error/meson.build @@ -9,9 +9,7 @@ c_src_lint += extended_error_src extended_error_mod = shared_module( 'extended_error', extended_error_src, - dependencies: [ - libknot, - ], + dependencies: mod_deps, include_directories: mod_inc_dir, name_prefix: '', install: true, diff --git a/modules/hints/hints.c b/modules/hints/hints.c index a3f2f30..2195ca3 100644 --- a/modules/hints/hints.c +++ b/modules/hints/hints.c @@ -194,9 +194,11 @@ static const knot_dname_t * raw_addr2reverse(const uint8_t *raw_addr, int family #undef REV_MAXLEN if (family == AF_INET) { - snprintf(reverse_addr, sizeof(reverse_addr), - "%d.%d.%d.%d.in-addr.arpa.", - raw_addr[3], raw_addr[2], raw_addr[1], raw_addr[0]); + int ret = snprintf(reverse_addr, sizeof(reverse_addr), + "%d.%d.%d.%d.in-addr.arpa.", + raw_addr[3], raw_addr[2], raw_addr[1], raw_addr[0]); + if (kr_fails_assert(ret > 0 && ret <= sizeof(reverse_addr))) + return NULL; } else if (family == AF_INET6) { char *ra_it = reverse_addr; for (int i = 15; i >= 0; --i) { @@ -262,7 +264,10 @@ static int add_reverse_pair(struct kr_zonecut *hints, const char *name, const ch return kr_error(EINVAL); } - return kr_zonecut_add(hints, key, ptr_name, knot_dname_size(ptr_name)); + size_t dname_size = knot_dname_size(ptr_name); + if (kr_fails_assert(dname_size < INT_MAX)) + return kr_error(EINVAL); + return kr_zonecut_add(hints, key, ptr_name, (int)dname_size); } /** For a given name, remove either one address or all of them (if == NULL). @@ -276,7 +281,9 @@ static int del_pair(struct hints_data *data, const char *name, const char *addr) if (!knot_dname_from_str(key, name, sizeof(key))) { return kr_error(EINVAL); } - int key_len = knot_dname_size(key); + size_t key_len = knot_dname_size(key); + if (kr_fails_assert(key_len <= KNOT_DNAME_MAXLEN)) + return kr_error(EINVAL); if (addr) { /* Remove the pair. */ @@ -286,7 +293,7 @@ static int del_pair(struct hints_data *data, const char *name, const char *addr) } const knot_dname_t *reverse_key = addr2reverse(addr); - kr_zonecut_del(&data->reverse_hints, reverse_key, key, key_len); + kr_zonecut_del(&data->reverse_hints, reverse_key, key, (int)key_len); return kr_zonecut_del(&data->hints, key, kr_inaddr(&ia.ip), kr_inaddr_len(&ia.ip)); } @@ -306,7 +313,7 @@ static int del_pair(struct hints_data *data, const char *name, const char *addr) ? AF_INET : AF_INET6; const knot_dname_t *reverse_key = raw_addr2reverse(addr_val, family); if (reverse_key != NULL) { - kr_zonecut_del(&data->reverse_hints, reverse_key, key, key_len); + kr_zonecut_del(&data->reverse_hints, reverse_key, key, (int)key_len); } } diff --git a/modules/hints/meson.build b/modules/hints/meson.build index b837918..7e681f1 100644 --- a/modules/hints/meson.build +++ b/modules/hints/meson.build @@ -9,10 +9,7 @@ c_src_lint += hints_src hints_mod = shared_module( 'hints', hints_src, - dependencies: [ - libknot, - luajit, - ], + dependencies: mod_deps, include_directories: mod_inc_dir, name_prefix: '', install: true, @@ -21,5 +18,5 @@ hints_mod = shared_module( ) config_tests += [ - ['hints', files('tests/hints.test.lua'), ['skip_asan']], + ['hints', files('tests/hints.test.lua')], ] diff --git a/modules/http/meson.build b/modules/http/meson.build index a36e9eb..7d89215 100644 --- a/modules/http/meson.build +++ b/modules/http/meson.build @@ -21,7 +21,7 @@ lua_mod_src += [ config_tests += [ ['http', files('http.test.lua')], ['http.doh', files('http_doh.test.lua')], - ['http.tls', files('test_tls/tls.test.lua')], + ['http.tls', files('test_tls/tls.test.lua'), ['skip_asan']], ] # install static files @@ -45,7 +45,7 @@ install_subdir( ) # auxiliary debug library for HTTP module - doesn't compile on Cygwin -if openssl.found() and host_machine.system() != 'cygwin' +if openssl.found() and host_machine.system() not in [ 'cygwin', 'darwin' ] debug_opensslkeylog_mod = shared_module( 'debug_opensslkeylog', ['debug_opensslkeylog.c'], diff --git a/modules/meson.build b/modules/meson.build index 3861225..48bd478 100644 --- a/modules/meson.build +++ b/modules/meson.build @@ -35,6 +35,13 @@ integr_tests += [ mod_inc_dir = include_directories('..', '../contrib', luajit.get_pkgconfig_variable('includedir')) +mod_deps = [ + contrib_dep, + libknot, + libuv, + luajit, +] + # handle more complex C/LUA modules separately subdir('bogus_log') # cookies module is not currently used diff --git a/modules/nsid/meson.build b/modules/nsid/meson.build index 354e70b..3c418bc 100644 --- a/modules/nsid/meson.build +++ b/modules/nsid/meson.build @@ -9,10 +9,7 @@ c_src_lint += nsid_src nsid_mod = shared_module( 'nsid', nsid_src, - dependencies: [ - libknot, - luajit, - ], + dependencies: mod_deps, include_directories: mod_inc_dir, name_prefix: '', install: true, diff --git a/modules/policy/lua-aho-corasick/.gitignore b/modules/policy/lua-aho-corasick/.gitignore new file mode 100644 index 0000000..04fd221 --- /dev/null +++ b/modules/policy/lua-aho-corasick/.gitignore @@ -0,0 +1,6 @@ +*.d +*.o +*.a +*.so +*_dep.txt +tests/testinput diff --git a/modules/refuse_nord/meson.build b/modules/refuse_nord/meson.build index 5142ded..7dc8b88 100644 --- a/modules/refuse_nord/meson.build +++ b/modules/refuse_nord/meson.build @@ -13,7 +13,7 @@ c_src_lint += refuse_nord_src refuse_nord_mod = shared_module( 'refuse_nord', refuse_nord_src, - dependencies: libknot, + dependencies: mod_deps, include_directories: mod_inc_dir, name_prefix: '', install: true, diff --git a/modules/stats/README.rst b/modules/stats/README.rst index 7d423aa..014c9f0 100644 --- a/modules/stats/README.rst +++ b/modules/stats/README.rst @@ -89,6 +89,8 @@ Built-in counters keep track of number of queries and answers matching specific +-----------------+----------------------------------+ | answer.slow | completed in more than 1500ms | +-----------------+----------------------------------+ +| answer.sum_ms | sum of all latencies in ms | ++-----------------+----------------------------------+ +-----------------+----------------------------------+ | **Answer flags** | diff --git a/modules/stats/meson.build b/modules/stats/meson.build index cb4ccd6..e1f4a49 100644 --- a/modules/stats/meson.build +++ b/modules/stats/meson.build @@ -14,9 +14,7 @@ integr_tests += [ stats_mod = shared_module( 'stats', stats_src, - dependencies: [ - libknot, - ], + dependencies: mod_deps, include_directories: mod_inc_dir, name_prefix: '', install: true, diff --git a/modules/stats/stats.c b/modules/stats/stats.c index ebb2877..3679615 100644 --- a/modules/stats/stats.c +++ b/modules/stats/stats.c @@ -42,11 +42,13 @@ X(answer,total) X(answer,noerror) X(answer,nodata) X(answer,nxdomain) X(answer,servfail) \ X(answer,cached) X(answer,1ms) X(answer,10ms) X(answer,50ms) X(answer,100ms) \ X(answer,250ms) X(answer,500ms) X(answer,1000ms) X(answer,1500ms) X(answer,slow) \ + X(answer,sum_ms) \ X(answer,aa) X(answer,tc) X(answer,rd) X(answer,ra) X(answer, ad) X(answer,cd) \ X(answer,edns0) X(answer,do) \ X(query,edns) X(query,dnssec) \ - X(request,total) X(request,udp) X(request,tcp) X(request,xdp) \ - X(request,dot) X(request,doh) X(request,internal) \ + X(request,total) X(request,total4) X(request,total6) X(request,internal) \ + X(request,udp4) X(request,tcp4) X(request,xdp4) X(request,dot4) X(request,doh4) \ + X(request,udp6) X(request,tcp6) X(request,xdp6) X(request,dot6) X(request,doh6) \ X(const,end) enum const_metric { @@ -63,6 +65,28 @@ static struct const_metric_elm const_metrics[] = { CONST_METRICS(X) #undef X }; + +/// These metrics are read-only views, each simply summing a pair of const_metrics items. +struct sum_metric { + const char *key; + const size_t *val1, *val2; +}; +static const struct sum_metric sum_metrics[] = { + // We're using this to aggregate v4 + v6 pairs. + #define DEF(proto) { \ + .key = "request." #proto, \ + .val1 = &const_metrics[metric_request_ ## proto ## 4].val, \ + .val2 = &const_metrics[metric_request_ ## proto ## 6].val, \ + } + DEF(udp), + DEF(tcp), + DEF(xdp), + DEF(dot), + DEF(doh), + #undef DEF +}; +static const size_t sum_metrics_len = sizeof(sum_metrics) / sizeof(sum_metrics[0]); + /** @endcond */ /** @internal LRU hash of most frequent names. */ @@ -116,7 +140,7 @@ static inline int collect_key(char *key, const knot_dname_t *name, uint16_t type if (key_len < 0) { return kr_error(key_len); } - return key_len + sizeof(type); + return key_len + (int)sizeof(type); } static void collect_sample(struct stat_data *data, struct kr_rplan *rplan) @@ -184,19 +208,26 @@ static int collect_transport(kr_layer_t *ctx) } /** - * Count each transport only once, + * Apart from the "total" stats, count each transport only once, * i.e. DoT does not count as TCP and XDP does not count as UDP. + * We have two counts for each - IPv6 and IPv4 separately. */ + const bool isIPv6 = req->qsource.addr->sa_family == AF_INET6; + #define INC_PROTO(proto) \ + stat_const_add(data, isIPv6 ? metric_request_ ## proto ## 6 \ + : metric_request_ ## proto ## 4, 1) + INC_PROTO(total); if (req->qsource.flags.http) - stat_const_add(data, metric_request_doh, 1); + INC_PROTO(doh); else if (req->qsource.flags.tls) - stat_const_add(data, metric_request_dot, 1); + INC_PROTO(dot); else if (req->qsource.flags.tcp) - stat_const_add(data, metric_request_tcp, 1); + INC_PROTO(tcp); else if (req->qsource.flags.xdp) - stat_const_add(data, metric_request_xdp, 1); + INC_PROTO(xdp); else - stat_const_add(data, metric_request_udp, 1); + INC_PROTO(udp); + #undef INC_PROTO return ctx->state; } @@ -220,6 +251,7 @@ static int collect(kr_layer_t *ctx) /* Histogram of answer latency. */ struct kr_query *first = rplan->resolved.at[0]; uint64_t elapsed = kr_now() - first->timestamp_mono; + stat_const_add(data, metric_answer_sum_ms, elapsed); if (elapsed <= 1) { stat_const_add(data, metric_answer_1ms, 1); } else if (elapsed <= 10) { @@ -272,6 +304,7 @@ static int collect(kr_layer_t *ctx) * Set nominal value of a key. * * Input: { key, val } + * Aggregate metrics can't be set. * */ static char* stats_set(void *env, struct kr_module *module, const char *args) @@ -313,26 +346,36 @@ static char* stats_get(void *env, struct kr_module *module, const char *args) struct stat_data *data = module->data; /* Expecting CHAR_BIT to be 8, this is a safe bet */ - char *ret = malloc(3 * sizeof(size_t) + 2); - if (!ret) { - return NULL; - } + char *str_value = NULL; + int ret = 0; /* Check if it exists in const map. */ for (unsigned i = 0; i < metric_const_end; ++i) { if (strcmp(const_metrics[i].key, args) == 0) { - sprintf(ret, "%zu", const_metrics[i].val); - return ret; + ret = asprintf(&str_value, "%zu", const_metrics[i].val); + if (ret < 0) + return NULL; + return str_value; + } + } + /* Check if it exists in aggregate metrics. */ + for (int i = 0; i < sum_metrics_len; ++i) { + const struct sum_metric *smi = &sum_metrics[i]; + if (strcmp(smi->key, args) == 0) { + ret = asprintf(&str_value, "%zu", *smi->val1 + *smi->val2); + if (ret < 0) + return NULL; + return str_value; } } /* Check in variable map */ trie_val_t *val = trie_get_try(data->trie, args, strlen(args)); - if (!val) { - free(ret); + if (!val) return NULL; - } - sprintf(ret, "%zu", (size_t) *val); - return ret; + ret = asprintf(&str_value, "%zu", (size_t) *val); + if (ret < 0) + return NULL; + return str_value; } /** Checks whether: @@ -356,9 +399,9 @@ static int list_entry(const char *key, uint32_t key_len, trie_val_t *val, void * struct list_entry_context *ctx = baton; if (!key_matches_prefix(key, key_len, ctx->key_prefix, ctx->key_prefix_len)) return 0; - size_t number = (size_t) *val; + size_t number = (size_t)*val; auto_free char *key_nt = strndup(key, key_len); - json_append_member(ctx->root, key_nt, json_mknumber(number)); + json_append_member(ctx->root, key_nt, json_mknumber((double)number)); return 0; } @@ -375,7 +418,14 @@ static char* stats_list(void *env, struct kr_module *module, const char *args) for (unsigned i = 0; i < metric_const_end; ++i) { struct const_metric_elm *elm = &const_metrics[i]; if (!args || strncmp(elm->key, args, args_len) == 0) { - json_append_member(root, elm->key, json_mknumber(elm->val)); + json_append_member(root, elm->key, json_mknumber((double)elm->val)); + } + } + for (int i = 0; i < sum_metrics_len; ++i) { + const struct sum_metric *elm = &sum_metrics[i]; + if (!args || strncmp(elm->key, args, args_len) == 0) { + size_t val = *elm->val1 + *elm->val2; + json_append_member(root, elm->key, json_mknumber(val)); } } struct list_entry_context ctx = { diff --git a/tests/config/meson.build b/tests/config/meson.build index a739222..dc345a8 100644 --- a/tests/config/meson.build +++ b/tests/config/meson.build @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later config_tests += [ - ['basic', files('basic.test.lua'), ['skip_asan']], - ['cache', files('cache.test.lua'), ['skip_asan']], + ['basic', files('basic.test.lua')], + ['cache', files('cache.test.lua')], ['net', files('net.test.lua'), ['config_net']], ['doh2', files('doh2.test.lua')], ['lru', files('lru.test.lua')], diff --git a/tests/dnstap/src/dnstap-test/go.mod b/tests/dnstap/src/dnstap-test/go.mod index 6b65088..2eb7287 100644 --- a/tests/dnstap/src/dnstap-test/go.mod +++ b/tests/dnstap/src/dnstap-test/go.mod @@ -1,6 +1,6 @@ module gitlab.nic.cz/knot/knot-resolver/tests/dnstap-test -go 1.17 +go 1.15 require ( github.com/cloudflare/dns v0.0.0-20151007113418-e20ffa3da443 diff --git a/tests/dnstap/src/dnstap-test/go.sum b/tests/dnstap/src/dnstap-test/go.sum deleted file mode 100644 index 1860f9e..0000000 --- a/tests/dnstap/src/dnstap-test/go.sum +++ /dev/null @@ -1,44 +0,0 @@ -github.com/cloudflare/dns v0.0.0-20151007113418-e20ffa3da443 h1:dYR6/V5rx/uaHsy4m1JuWfKYZO0r+G89BLD+XN7s9AI= -github.com/cloudflare/dns v0.0.0-20151007113418-e20ffa3da443/go.mod h1:pa4p3oKOxzbXjrV5AGD1v5xjL7skv9BvO4J0Llo3P+s= -github.com/dnstap/golang-dnstap v0.4.0 h1:KRHBoURygdGtBjDI2w4HifJfMAhhOqDuktAokaSa234= -github.com/dnstap/golang-dnstap v0.4.0/go.mod h1:FqsSdH58NAmkAvKcpyxht7i4FoBjKu8E4JUPt8ipSUs= -github.com/farsightsec/golang-framestream v0.3.0 h1:/spFQHucTle/ZIPkYqrfshQqPe2VQEzesH243TjIwqA= -github.com/farsightsec/golang-framestream v0.3.0/go.mod h1:eNde4IQyEiA5br02AouhEHCu3p3UzrCdFR4LuQHklMI= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= diff --git a/tests/dnstap/src/dnstap-test/run.sh b/tests/dnstap/src/dnstap-test/run.sh index 37822b7..70d8225 100755 --- a/tests/dnstap/src/dnstap-test/run.sh +++ b/tests/dnstap/src/dnstap-test/run.sh @@ -8,16 +8,13 @@ echo "$GOPATH" cd "$(dirname $0)" DNSTAP_TEST=dnstap-test -if [ -z "$GITLAB_CI" ]; then - type -P go >/dev/null || exit 77 - echo "Building the dnstap test and its dependencies..." - # some packages may be missing on the system right now - go get . -else - # In CI we've prebuilt dependencies into the default GOPATH. - # We're in a scratch container, so we just add the dnstap test inside. - export GOPATH=/root/go -fi +go mod tidy + +type -P go >/dev/null || exit 77 +echo "Building the dnstap test and its dependencies..." +# some packages may be missing on the system right now +go get . + DTAP_DIR="$GOPATH/src" DTAP="$DTAP_DIR/$DNSTAP_TEST" mkdir -p "$DTAP_DIR" diff --git a/tests/integration/deckard/.gitignore b/tests/integration/deckard/.gitignore new file mode 100644 index 0000000..f8109bf --- /dev/null +++ b/tests/integration/deckard/.gitignore @@ -0,0 +1,20 @@ +*.swp +/env.sh + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ +.mypy_cache/ diff --git a/tests/integration/deckard/.gitlab-ci.yml b/tests/integration/deckard/.gitlab-ci.yml new file mode 100644 index 0000000..2034cdd --- /dev/null +++ b/tests/integration/deckard/.gitlab-ci.yml @@ -0,0 +1,130 @@ +image: $CI_REGISTRY/knot/knot-resolver/ci/debian-buster:knot-3.0 +variables: + LC_ALL: C.UTF-8 + +stages: + - test + +.test: &test + stage: test + tags: + - docker + - linux + - amd64 + +.privileged_test: &privileged_test + stage: test + tags: + - privileged + +test:augeas: + <<: *test + script: + - augparse pydnstest/deckard.aug + +test:flake8: + <<: *test + script: + - python3 -m flake8 --max-line-length=100 . && echo "OK, no flake8 errors detected" + +test:mypy: + <<: *test + script: + - ci/mypy-run.sh && echo "OK, no mypy error detected" + +test:pylint: + <<: *test + script: + - ci/pylint-run.sh + +test:rplint: + <<: *test + script: + - cp ci/common.sh /tmp + - cp ci/compare-rplint.sh /tmp + - /tmp/compare-rplint.sh + +test:unittests: + <<: *privileged_test + script: + - python3 -m pytest + +# There are no tests in the repo which use this feature but others do +# and do not want to cause them breakage +test:sanity:raw_id: + <<: *privileged_test + script: + - unshare -rn ci/raw_id_check.sh + +# changes in Deckard itself must not change result of tests +test:comparative:kresd: + <<: *privileged_test + script: + # test kresd binary + - git clone --recurse-submodules -j8 --depth=1 https://gitlab.nic.cz/knot/knot-resolver.git /tmp/kresd-local-build + - pushd /tmp/kresd-local-build + - git log -1 + - meson build_local --default-library=static --prefix=/tmp/.local + - ninja -C build_local install + - popd + # compare results from latest Deckard with results from merge base + - cp ci/common.sh /tmp + - cp ci/compare-tests.sh /tmp + - cp ci/junit-compare.py /tmp + - PATH=/tmp/.local/sbin:$PATH /tmp/compare-tests.sh $(pwd)/kresd_run.sh + artifacts: + when: always + expire_in: '1 hour' + paths: + - modified_tests + - base.xml + - head.xml + +# Run all tests on the latest kresd version to ensure that we not push tests +# which do not work on latest kresd. It would lead to breakage in kresd CI. +test:latest:kresd: + <<: *privileged_test + script: + - git clone --recurse-submodules -j8 --depth=1 https://gitlab.nic.cz/knot/knot-resolver.git kresd-local-build + - pushd kresd-local-build + - git log -1 + - meson build_local --default-library=static --prefix="$PWD/../.local" + - ninja -C build_local install + - popd + - TMPDIR=$(pwd) PATH=$(pwd)/.local/sbin:$PATH ./kresd_run.sh -n $(nproc) + artifacts: + when: on_failure + expire_in: 1 week + paths: + - tmpdeckard* + +# sanity check that Unbound under Deckard still works +# I've selected the only tests which are working +# on kresd and Unbound 1.5.8 as well as 1.6.0 +test:sanity:unbound: + <<: *privileged_test + script: + - TMPDIR=$(pwd) ./unbound_run.sh --scenarios=sets/resolver/iter_hint_lame.rpl + - TMPDIR=$(pwd) ./unbound_run.sh --scenarios=sets/resolver/iter_lame_root.rpl + # these do not work with Unbound 1.5.8 which is in CI container + #- TESTS=sets/resolver/nsec_wildcard_answer_response.rpl ./unbound_run.sh + #- TESTS=sets/resolver/world_cz_lidovky_www.rpl ./unbound_run.sh + artifacts: + when: on_failure + expire_in: 1 week + paths: + - tmpdeckard* + +# sanity check that PowerDNS recursor under Deckard still works +# I've selected couple tests which are working +# on kresd and PowerDNS recursor 4.0.0~alpha2 as well as 4.0.4 +test:sanity:pdnsrecursor: + <<: *privileged_test + script: + - TMPDIR=$(pwd) ./pdns_run.sh --scenarios=sets/resolver/iter_recurse.rpl + - TMPDIR=$(pwd) ./pdns_run.sh --scenarios=sets/resolver/iter_tcbit.rpl + artifacts: + when: on_failure + expire_in: 1 week + paths: + - tmpdeckard* diff --git a/tests/integration/deckard/.gitmodules b/tests/integration/deckard/.gitmodules new file mode 100644 index 0000000..7a3c587 --- /dev/null +++ b/tests/integration/deckard/.gitmodules @@ -0,0 +1,3 @@ +[submodule "contrib/libfaketime"] + path = contrib/libfaketime + url = git://github.com/wolfcw/libfaketime.git diff --git a/tests/pytests/conftest.py b/tests/pytests/conftest.py index 4c711f8..fcf4b05 100644 --- a/tests/pytests/conftest.py +++ b/tests/pytests/conftest.py @@ -86,7 +86,7 @@ def query_before(request): # whether to send an initial query return request.param -@pytest.mark.optionalhook +@pytest.hookimpl(optionalhook=True) def pytest_metadata(metadata): # filter potentially sensitive data from GitLab CI keys_to_delete = [] for key in metadata.keys(): diff --git a/tests/pytests/test_tls.py b/tests/pytests/test_tls.py index 3e1328a..2187efb 100644 --- a/tests/pytests/test_tls.py +++ b/tests/pytests/test_tls.py @@ -1,15 +1,8 @@ # SPDX-License-Identifier: GPL-3.0-or-later """TLS-specific tests""" -import itertools -import os -from socket import AF_INET, AF_INET6 import ssl -import sys - import pytest - -from kresd import make_kresd import utils @@ -41,43 +34,3 @@ def test_tls_cert_hostname_mismatch(kresd_tt, sock_family): with pytest.raises(ssl.CertificateError): ssock.connect(dest) - - -@pytest.mark.skipif(sys.version_info < (3, 6), - reason="requires python3.6 or higher") -@pytest.mark.parametrize('sf1, sf2, sf3', itertools.product( - [AF_INET, AF_INET6], [AF_INET, AF_INET6], [AF_INET, AF_INET6])) -def test_tls_session_resumption(tmpdir, sf1, sf2, sf3): - """Attempt TLS session resumption against the same kresd instance and a different one.""" - # TODO ensure that session can't be resumed after session ticket key regeneration - # at the first kresd instance - - # NOTE TLS 1.3 is intentionally disabled for session resumption tests, - # because python's SSLSocket.session isn't compatible with TLS 1.3 - # https://docs.python.org/3/library/ssl.html?highlight=ssl%20ticket#tls-1-3 - - def connect(kresd, ctx, sf, session=None): - sock, dest = kresd.stream_socket(sf, tls=True) - ssock = ctx.wrap_socket( - sock, server_hostname='transport-test-server.com', session=session) - ssock.connect(dest) - new_session = ssock.session - assert new_session.has_ticket - assert ssock.session_reused == (session is not None) - utils.ping_alive(ssock) - ssock.close() - return new_session - - workdir = os.path.join(str(tmpdir), 'kresd') - os.makedirs(workdir) - - with make_kresd(workdir, 'tt') as kresd: - ctx = utils.make_ssl_context( - verify_location=kresd.tls_cert_path, extra_options=[ssl.OP_NO_TLSv1_3]) - session = connect(kresd, ctx, sf1) # initial conn - connect(kresd, ctx, sf2, session) # resume session on the same instance - - workdir2 = os.path.join(str(tmpdir), 'kresd2') - os.makedirs(workdir2) - with make_kresd(workdir2, 'tt') as kresd2: - connect(kresd2, ctx, sf3, session) # resume session on a different instance diff --git a/tests/pytests/utils.py b/tests/pytests/utils.py index 4b995d4..8af71aa 100644 --- a/tests/pytests/utils.py +++ b/tests/pytests/utils.py @@ -99,7 +99,7 @@ def ping_alive(sock, msgid=None): @contextmanager def expect_kresd_close(rst_ok=False): - with pytest.raises(BrokenPipeError): + with pytest.raises((BrokenPipeError, ssl.SSLEOFError)): try: time.sleep(0.2) # give kresd time to close connection with TCP FIN yield @@ -110,17 +110,12 @@ def expect_kresd_close(rst_ok=False): pytest.fail("kresd didn't close the connection") -def make_ssl_context(insecure=False, verify_location=None, extra_options=None): - # set TLS v1.2+ - context = ssl.SSLContext(ssl.PROTOCOL_TLS) - context.options |= ssl.OP_NO_SSLv2 - context.options |= ssl.OP_NO_SSLv3 - context.options |= ssl.OP_NO_TLSv1 - context.options |= ssl.OP_NO_TLSv1_1 - - if extra_options is not None: - for option in extra_options: - context.options |= option +def make_ssl_context(insecure=False, verify_location=None, + minimum_tls=ssl.TLSVersion.TLSv1_2, + maximum_tls=ssl.TLSVersion.MAXIMUM_SUPPORTED): + context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) + context.minimum_version = minimum_tls + context.maximum_version = maximum_tls if insecure: # turn off certificate verification diff --git a/tests/unit/meson.build b/tests/unit/meson.build index b10789c..747f1d3 100644 --- a/tests/unit/meson.build +++ b/tests/unit/meson.build @@ -10,7 +10,7 @@ mock_cmodule_mod = shared_module( 'mock_cmodule', mock_cmodule_src, name_prefix: '', - dependencies: libknot, + dependencies: mod_deps, include_directories: mod_inc_dir, ) @@ -20,10 +20,11 @@ foreach unit_test : unit_tests unit_test[0], unit_test[1], dependencies: [ + cmocka, contrib_dep, - libkres_dep, libknot, - cmocka, + libkres_dep, + libuv, lmdb, ], ) diff --git a/utils/cache_gc/categories.c b/utils/cache_gc/categories.c index 19dec45..aaa1ae5 100644 --- a/utils/cache_gc/categories.c +++ b/utils/cache_gc/categories.c @@ -18,7 +18,7 @@ static bool rrtype_is_infrastructure(uint16_t r) } } -static int get_random(int to) +static unsigned int get_random(int to) { // We don't need these to be really unpredictable, // but this should be cheap enough not to be noticeable. diff --git a/utils/cache_gc/db.c b/utils/cache_gc/db.c index fc4a2fd..c31ff22 100644 --- a/utils/cache_gc/db.c +++ b/utils/cache_gc/db.c @@ -9,11 +9,13 @@ #include <time.h> #include <sys/stat.h> +#define MDB_FILE "/data.mdb" + int kr_gc_cache_open(const char *cache_path, struct kr_cache *kres_db, knot_db_t ** libknot_db) { - char cache_data[strlen(cache_path) + 10]; - snprintf(cache_data, sizeof(cache_data), "%s/data.mdb", cache_path); + char cache_data[strlen(cache_path) + sizeof(MDB_FILE)]; + (void)snprintf(cache_data, sizeof(cache_data), "%s" MDB_FILE, cache_path); struct stat st = { 0 }; if (stat(cache_path, &st) || !(st.st_mode & S_IFDIR) diff --git a/utils/cache_gc/kr_cache_gc.c b/utils/cache_gc/kr_cache_gc.c index 5978345..4097c80 100644 --- a/utils/cache_gc/kr_cache_gc.c +++ b/utils/cache_gc/kr_cache_gc.c @@ -8,20 +8,12 @@ // libknot includes #include <libknot/libknot.h> -// dynarray is inside libknot since 3.1, but it's differently named -#ifdef knot_dynarray_declare - #define dynarray_declare knot_dynarray_declare - #define dynarray_define knot_dynarray_define - #define dynarray_foreach knot_dynarray_foreach -#else - #include <contrib/dynarray.h> -#endif - // resolver includes #include <lib/cache/api.h> #include <lib/cache/impl.h> #include <lib/defines.h> #include "lib/cache/cdb_lmdb.h" +#include "lib/generic/array.h" #include "lib/utils.h" #include "kr_cache_gc.h" @@ -43,41 +35,40 @@ static knot_db_val_t *dbval_copy(const knot_db_val_t * from) } // section: rrtype list +typedef array_t(uint16_t) rrtype_array_t; -dynarray_declare(rrtype, uint16_t, DYNARRAY_VISIBILITY_STATIC, 64) - dynarray_define(rrtype, uint16_t, DYNARRAY_VISIBILITY_STATIC) -static void rrtypelist_add(rrtype_dynarray_t * arr, uint16_t add_type) +static void rrtypelist_add(rrtype_array_t *arr, uint16_t add_type) { bool already_present = false; - dynarray_foreach(rrtype, uint16_t, i, *arr) { - if (*i == add_type) { + for (size_t i = 0; i < arr->len; i++) { + if (arr->at[i] == add_type) { already_present = true; break; } } if (!already_present) { - rrtype_dynarray_add(arr, &add_type); + kr_require(array_push(*arr, add_type) >= 0); } } -static void rrtypelist_print(rrtype_dynarray_t * arr) +static void rrtypelist_print(rrtype_array_t *arr) { char type_s[32] = { 0 }; - dynarray_foreach(rrtype, uint16_t, i, *arr) { - knot_rrtype_to_string(*i, type_s, sizeof(type_s)); + for (size_t i = 0; i < arr->len; i++) { + knot_rrtype_to_string(arr->at[i], type_s, sizeof(type_s)); printf(" %s", type_s); } printf("\n"); } -dynarray_declare(entry, knot_db_val_t *, DYNARRAY_VISIBILITY_STATIC, 256) - dynarray_define(entry, knot_db_val_t *, DYNARRAY_VISIBILITY_STATIC) -static void entry_dynarray_deep_free(entry_dynarray_t * d) +typedef array_t(knot_db_val_t *) entry_array_t; + +static void entry_array_deep_free(entry_array_t *d) { - dynarray_foreach(entry, knot_db_val_t *, i, *d) { - free(*i); + for (size_t i = 0; i < d->len; i++) { + free(d->at[i]); } - entry_dynarray_free(d); + array_clear(*d); } typedef struct { @@ -98,7 +89,7 @@ int cb_compute_categories(const knot_db_val_t * key, gc_record_info_t * info, typedef struct { category_t limit_category; - entry_dynarray_t to_delete; + entry_array_t to_delete; size_t cfg_temp_keys_space; size_t used_space; size_t oversize_records; @@ -117,7 +108,7 @@ int cb_delete_categories(const knot_db_val_t * key, gc_record_info_t * info, ctx->oversize_records++; free(todelete); } else { - entry_dynarray_add(&ctx->to_delete, &todelete); + kr_require(array_push(ctx->to_delete, todelete) >= 0); ctx->used_space = used; } } @@ -194,12 +185,12 @@ int kr_cache_gc(kr_cache_gc_cfg_t *cfg, kr_cache_gc_state_t **state) // Mixing ^^ page usage and entry sizes (key+value lengths) didn't work // too well, probably due to internal fragmentation after some GC cycles. // Therefore let's scale this by the ratio of these two sums. - ssize_t cats_sumsize = 0; + size_t cats_sumsize = 0; for (int i = 0; i < CATEGORIES; ++i) { cats_sumsize += cats.categories_sizes[i]; } /* use less precise variant to avoid 32-bit overflow */ - ssize_t amount_tofree = cats_sumsize / 100 * cfg->cache_to_be_freed; + size_t amount_tofree = cats_sumsize / 100 * cfg->cache_to_be_freed; kr_log_debug(CACHE, "tofree: %zd / %zd\n", amount_tofree, cats_sumsize); if (VERBOSE_STATUS) { @@ -212,8 +203,11 @@ int kr_cache_gc(kr_cache_gc_cfg_t *cfg, kr_cache_gc_state_t **state) } category_t limit_category = CATEGORIES; - while (limit_category > 0 && amount_tofree > 0) { - amount_tofree -= cats.categories_sizes[--limit_category]; + while (limit_category > 0) { + size_t cat_size = cats.categories_sizes[--limit_category]; + if (cat_size > amount_tofree) + break; + amount_tofree -= cat_size; } printf("Cache analyzed in %.0lf msecs, %zu records, limit category is %d.\n", @@ -226,13 +220,13 @@ int kr_cache_gc(kr_cache_gc_cfg_t *cfg, kr_cache_gc_state_t **state) to_del.limit_category = limit_category; ret = kr_gc_cache_iter(db, cfg, cb_delete_categories, &to_del); if (ret != KNOT_EOK) { - entry_dynarray_deep_free(&to_del.to_delete); + entry_array_deep_free(&to_del.to_delete); kr_cache_gc_free_state(state); return ret; } printf ("%zu records to be deleted using %.2lf MBytes of temporary memory, %zu records skipped due to memory limit.\n", - to_del.to_delete.size, ((double)to_del.used_space / 1048576.0), + to_del.to_delete.len, ((double)to_del.used_space / 1048576.0), to_del.oversize_records); //// 4. execute the planned deletions. @@ -242,23 +236,24 @@ int kr_cache_gc(kr_cache_gc_cfg_t *cfg, kr_cache_gc_state_t **state) kr_timer_start(&timer_delete); kr_timer_start(&timer_rw_txn); - rrtype_dynarray_t deleted_rrtypes = { 0 }; + rrtype_array_t deleted_rrtypes = { 0 }; ret = api->txn_begin(db, &txn, 0); if (ret != KNOT_EOK) { printf("Error starting R/W DB transaction (%s).\n", knot_strerror(ret)); - entry_dynarray_deep_free(&to_del.to_delete); + entry_array_deep_free(&to_del.to_delete); kr_cache_gc_free_state(state); return ret; } - dynarray_foreach(entry, knot_db_val_t *, i, to_del.to_delete) { - ret = api->del(&txn, *i); + for (size_t i = 0; i < to_del.to_delete.len; i++) { + knot_db_val_t *val = to_del.to_delete.at[i]; + ret = api->del(&txn, val); switch (ret) { case KNOT_EOK: deleted_records++; - const int entry_type = kr_gc_key_consistent(**i); + const int entry_type = kr_gc_key_consistent(*val); if (entry_type >= 0) // some "inconsistent" entries are OK rrtypelist_add(&deleted_rrtypes, entry_type); break; @@ -267,8 +262,8 @@ int kr_cache_gc(kr_cache_gc_cfg_t *cfg, kr_cache_gc_state_t **state) if (VERBOSE_STATUS) { // kresd normally only inserts (or overwrites), // so it's generally suspicious when a key goes missing. - printf("Record already gone (key len %zu): ", (*i)->len); - debug_printbin((*i)->data, (*i)->len); + printf("Record already gone (key len %zu): ", val->len); + debug_printbin(val->data, val->len); printf("\n"); } break; @@ -316,8 +311,8 @@ finish: printf("It took %.0lf msecs, %zu transactions (%s)\n\n", kr_timer_elapsed(&timer_delete) * 1000, rw_txn_count, knot_strerror(ret)); - rrtype_dynarray_free(&deleted_rrtypes); - entry_dynarray_deep_free(&to_del.to_delete); + array_clear(deleted_rrtypes); + entry_array_deep_free(&to_del.to_delete); // OK, let's close it in this case. kr_cache_gc_free_state(state); diff --git a/utils/cache_gc/main.c b/utils/cache_gc/main.c index 5adf19f..fe131cd 100644 --- a/utils/cache_gc/main.c +++ b/utils/cache_gc/main.c @@ -13,6 +13,7 @@ #include "kr_cache_gc.h" static volatile int killed = 0; +static volatile int exit_code = 0; static void got_killed(int signum) { @@ -21,12 +22,10 @@ static void got_killed(int signum) case 1: break; case 2: - exit(5); + exit_code = 5; break; - case 3: - abort(); default: - kr_assert(false); + abort(); } } @@ -60,16 +59,20 @@ int main(int argc, char *argv[]) { printf("Knot Resolver Cache Garbage Collector, version %s\n", PACKAGE_VERSION); if (setvbuf(stdout, NULL, _IONBF, 0) || setvbuf(stderr, NULL, _IONBF, 0)) { - fprintf(stderr, "Failed to to set output buffering (ignored): %s\n", + (void)fprintf(stderr, "Failed to to set output buffering (ignored): %s\n", strerror(errno)); - fflush(stderr); + (void)fflush(stderr); } - signal(SIGTERM, got_killed); - signal(SIGKILL, got_killed); - signal(SIGPIPE, got_killed); - signal(SIGCHLD, got_killed); - signal(SIGINT, got_killed); + struct sigaction act = { + .sa_handler = got_killed, + .sa_flags = SA_RESETHAND, + }; + sigemptyset(&act.sa_mask); + kr_assert(!sigaction(SIGTERM, &act, NULL)); + kr_assert(!sigaction(SIGPIPE, &act, NULL)); + kr_assert(!sigaction(SIGCHLD, &act, NULL)); + kr_assert(!sigaction(SIGINT, &act, NULL)); kr_cache_gc_cfg_t cfg = { .ro_txn_items = 200, @@ -131,7 +134,6 @@ int main(int argc, char *argv[]) return 1; } - int exit_code = 0; kr_cache_gc_state_t *gc_state = NULL; bool last_espace = false; do { diff --git a/utils/cache_gc/meson.build b/utils/cache_gc/meson.build index 40e127d..6ed86af 100644 --- a/utils/cache_gc/meson.build +++ b/utils/cache_gc/meson.build @@ -18,9 +18,12 @@ if build_utils contrib_dep, libkres_dep, libknot, + libuv, + lmdb, ], install: true, install_dir: get_option('sbindir'), + install_rpath: rpath, ) integr_tests += [ diff --git a/utils/client/.clang-tidy b/utils/client/.clang-tidy new file mode 100644 index 0000000..46c666c --- /dev/null +++ b/utils/client/.clang-tidy @@ -0,0 +1,2 @@ +--- +Checks: '-*' diff --git a/utils/client/meson.build b/utils/client/meson.build index 761c2cd..795cca3 100644 --- a/utils/client/meson.build +++ b/utils/client/meson.build @@ -33,5 +33,6 @@ if build_client ], install: true, install_dir: get_option('sbindir'), + install_rpath: rpath, ) endif |