From e4ba6dbc3f1e76890b22773807ea37fe8fa2b1bc Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 10 Apr 2024 22:34:10 +0200 Subject: Adding upstream version 4.2.2. Signed-off-by: Daniel Baumann --- tools/Get-HardenFlags.ps1 | 146 + tools/SkinnyProtocolOptimized.xml | 4190 +++++ tools/WiresharkXML.py | 312 + tools/alpine-setup.sh | 129 + tools/arch-setup.sh | 136 + tools/asn2deb | 179 + tools/asn2wrs.py | 8242 +++++++++ tools/asterix/README.md | 51 + tools/asterix/packet-asterix-template.c | 867 + tools/asterix/update-specs.py | 829 + tools/bsd-setup.sh | 202 + tools/checkAPIs.pl | 1303 ++ tools/check_dissector.py | 133 + tools/check_dissector_urls.py | 291 + tools/check_help_urls.py | 46 + tools/check_spelling.py | 493 + tools/check_static.py | 326 + tools/check_tfs.py | 595 + tools/check_typed_item_calls.py | 1775 ++ tools/check_val_to_str.py | 230 + tools/checkfiltername.pl | 790 + tools/checkhf.pl | 700 + tools/checklicenses.py | 262 + tools/colorfilters2js.py | 85 + tools/commit-msg | 7 + tools/compress-pngs.py | 89 + tools/convert-glib-types.py | 124 + tools/convert_expert_add_info_format.pl | 417 + tools/convert_proto_tree_add_text.pl | 759 + tools/cppcheck/cppcheck.sh | 158 + tools/cppcheck/includes | 7 + tools/cppcheck/suppressions | 7 + tools/debian-nightly-package.sh | 24 + tools/debian-setup.sh | 300 + tools/debug-alloc.env | 33 + tools/delete_includes.py | 427 + tools/detect_bad_alloc_patterns.py | 120 + tools/eti2wireshark.py | 1166 ++ tools/extract_asn1_from_spec.pl | 125 + tools/fix-encoding-args.pl | 698 + tools/fuzz-test.sh | 317 + tools/gen-bugnote | 54 + tools/generate-bacnet-vendors.py | 47 + tools/generate-dissector.py | 158 + tools/generate-nl80211-fields.py | 373 + tools/generate-sysdig-event.py | 412 + tools/generate_authors.py | 144 + tools/generate_cbor_pcap.py | 69 + tools/html2text.py | 249 + tools/idl2deb | 141 + tools/idl2wrs | 114 + tools/indexcap.py | 283 + tools/json2pcap/json2pcap.py | 686 + tools/lemon/CMakeLists.txt | 46 + tools/lemon/README | 52 + tools/lemon/apply-patches.sh | 16 + tools/lemon/lemon.c | 5893 +++++++ tools/lemon/lempar.c | 1068 ++ tools/lex.py | 1074 ++ tools/licensecheck.pl | 874 + tools/list_protos_in_cap.sh | 96 + tools/macos-setup-brew.sh | 173 + tools/macos-setup.sh | 3865 +++++ tools/make-authors-csv.py | 63 + tools/make-enterprises.py | 196 + tools/make-enums.py | 102 + tools/make-isobus.py | 223 + tools/make-manuf.py | 401 + tools/make-no-reassembly-profile.py | 69 + tools/make-packet-dcm.py | 247 + tools/make-pci-ids.py | 252 + tools/make-plugin-reg.py | 197 + tools/make-regs.py | 157 + tools/make-services.py | 292 + tools/make-tls-ct-logids.py | 126 + tools/make-usb.py | 164 + tools/make-version.py | 459 + tools/make_charset_table.c | 125 + tools/mingw-rpm-setup.sh | 70 + tools/msnchat | 315 + tools/msys2-setup.sh | 129 + tools/msys2checkdeps.py | 177 + tools/ncp2222.py | 16921 +++++++++++++++++++ tools/netscreen2dump.py | 137 + tools/oss-fuzzshark/build.sh | 22 + tools/parse_xml2skinny_dissector.py | 1073 ++ tools/pidl/MANIFEST | 41 + tools/pidl/META.yml | 18 + tools/pidl/Makefile.PL | 17 + tools/pidl/README | 64 + tools/pidl/TODO | 44 + tools/pidl/expr.yp | 202 + tools/pidl/idl.yp | 696 + tools/pidl/lib/Parse/Pidl.pm | 44 + tools/pidl/lib/Parse/Pidl/CUtil.pm | 52 + tools/pidl/lib/Parse/Pidl/Compat.pm | 168 + tools/pidl/lib/Parse/Pidl/Dump.pm | 294 + tools/pidl/lib/Parse/Pidl/Expr.pm | 1444 ++ tools/pidl/lib/Parse/Pidl/IDL.pm | 2664 +++ tools/pidl/lib/Parse/Pidl/NDR.pm | 1472 ++ tools/pidl/lib/Parse/Pidl/ODL.pm | 130 + tools/pidl/lib/Parse/Pidl/Samba3/ClientNDR.pm | 409 + tools/pidl/lib/Parse/Pidl/Samba3/ServerNDR.pm | 322 + tools/pidl/lib/Parse/Pidl/Samba4.pm | 133 + tools/pidl/lib/Parse/Pidl/Samba4/COM/Header.pm | 160 + tools/pidl/lib/Parse/Pidl/Samba4/COM/Proxy.pm | 225 + tools/pidl/lib/Parse/Pidl/Samba4/COM/Stub.pm | 327 + tools/pidl/lib/Parse/Pidl/Samba4/Header.pm | 537 + tools/pidl/lib/Parse/Pidl/Samba4/NDR/Client.pm | 884 + tools/pidl/lib/Parse/Pidl/Samba4/NDR/Parser.pm | 3224 ++++ tools/pidl/lib/Parse/Pidl/Samba4/NDR/Server.pm | 342 + tools/pidl/lib/Parse/Pidl/Samba4/Python.pm | 2425 +++ tools/pidl/lib/Parse/Pidl/Samba4/TDR.pm | 283 + tools/pidl/lib/Parse/Pidl/Samba4/Template.pm | 92 + tools/pidl/lib/Parse/Pidl/Typelist.pm | 354 + tools/pidl/lib/Parse/Pidl/Util.pm | 197 + tools/pidl/lib/Parse/Pidl/Wireshark/Conformance.pm | 509 + tools/pidl/lib/Parse/Pidl/Wireshark/NDR.pm | 1401 ++ tools/pidl/lib/Parse/Yapp/Driver.pm | 471 + tools/pidl/lib/wscript_build | 37 + tools/pidl/pidl | 804 + tools/pidl/tests/Util.pm | 181 + tools/pidl/tests/cutil.pl | 21 + tools/pidl/tests/dump.pl | 15 + tools/pidl/tests/header.pl | 108 + tools/pidl/tests/ndr.pl | 561 + tools/pidl/tests/ndr_align.pl | 143 + tools/pidl/tests/ndr_alloc.pl | 118 + tools/pidl/tests/ndr_array.pl | 37 + tools/pidl/tests/ndr_compat.pl | 21 + tools/pidl/tests/ndr_deprecations.pl | 26 + tools/pidl/tests/ndr_fullptr.pl | 44 + tools/pidl/tests/ndr_refptr.pl | 526 + tools/pidl/tests/ndr_represent.pl | 71 + tools/pidl/tests/ndr_simple.pl | 28 + tools/pidl/tests/ndr_string.pl | 192 + tools/pidl/tests/ndr_tagtype.pl | 66 + tools/pidl/tests/parse_idl.pl | 243 + tools/pidl/tests/samba-ndr.pl | 300 + tools/pidl/tests/samba3-cli.pl | 236 + tools/pidl/tests/samba3-srv.pl | 18 + tools/pidl/tests/tdr.pl | 49 + tools/pidl/tests/test_util.pl | 21 + tools/pidl/tests/typelist.pl | 93 + tools/pidl/tests/util.pl | 115 + tools/pidl/tests/wireshark-conf.pl | 205 + tools/pidl/tests/wireshark-ndr.pl | 274 + tools/pidl/wscript | 103 + tools/pkt-from-core.py | 477 + tools/pre-commit | 135 + tools/pre-commit-ignore.conf | 27 + tools/pre-commit-ignore.py | 59 + tools/process-x11-fields.pl | 165 + tools/process-x11-xcb.pl | 1946 +++ tools/radiotap-gen/CMakeLists.txt | 8 + tools/radiotap-gen/radiotap-gen.c | 182 + tools/randpkt-test.sh | 171 + tools/rdps.py | 142 + tools/release-update-debian-soversions.sh | 23 + tools/rpm-setup.sh | 358 + tools/sharkd_shell.py | 311 + tools/test-captures.sh | 85 + tools/test-common.sh | 160 + tools/update-appdata.py | 99 + tools/update-tools-help.py | 82 + tools/update-tx | 72 + tools/valgrind-wireshark.sh | 123 + tools/validate-clang-check.sh | 57 + tools/validate-commit.py | 274 + tools/validate-diameter-xml.sh | 91 + tools/vg-suppressions | 119 + tools/win-setup.ps1 | 331 + tools/wireshark_be.py | 260 + tools/wireshark_gen.py | 2789 +++ tools/wireshark_words.txt | 1857 ++ tools/ws-coding-style.cfg | 370 + tools/yacc.py | 3448 ++++ 177 files changed, 103416 insertions(+) create mode 100644 tools/Get-HardenFlags.ps1 create mode 100644 tools/SkinnyProtocolOptimized.xml create mode 100755 tools/WiresharkXML.py create mode 100755 tools/alpine-setup.sh create mode 100755 tools/arch-setup.sh create mode 100755 tools/asn2deb create mode 100755 tools/asn2wrs.py create mode 100644 tools/asterix/README.md create mode 100644 tools/asterix/packet-asterix-template.c create mode 100755 tools/asterix/update-specs.py create mode 100755 tools/bsd-setup.sh create mode 100755 tools/checkAPIs.pl create mode 100755 tools/check_dissector.py create mode 100755 tools/check_dissector_urls.py create mode 100755 tools/check_help_urls.py create mode 100755 tools/check_spelling.py create mode 100755 tools/check_static.py create mode 100755 tools/check_tfs.py create mode 100755 tools/check_typed_item_calls.py create mode 100755 tools/check_val_to_str.py create mode 100755 tools/checkfiltername.pl create mode 100755 tools/checkhf.pl create mode 100755 tools/checklicenses.py create mode 100644 tools/colorfilters2js.py create mode 100755 tools/commit-msg create mode 100755 tools/compress-pngs.py create mode 100755 tools/convert-glib-types.py create mode 100755 tools/convert_expert_add_info_format.pl create mode 100755 tools/convert_proto_tree_add_text.pl create mode 100755 tools/cppcheck/cppcheck.sh create mode 100644 tools/cppcheck/includes create mode 100644 tools/cppcheck/suppressions create mode 100755 tools/debian-nightly-package.sh create mode 100755 tools/debian-setup.sh create mode 100644 tools/debug-alloc.env create mode 100755 tools/delete_includes.py create mode 100644 tools/detect_bad_alloc_patterns.py create mode 100755 tools/eti2wireshark.py create mode 100755 tools/extract_asn1_from_spec.pl create mode 100755 tools/fix-encoding-args.pl create mode 100755 tools/fuzz-test.sh create mode 100755 tools/gen-bugnote create mode 100755 tools/generate-bacnet-vendors.py create mode 100755 tools/generate-dissector.py create mode 100755 tools/generate-nl80211-fields.py create mode 100755 tools/generate-sysdig-event.py create mode 100755 tools/generate_authors.py create mode 100755 tools/generate_cbor_pcap.py create mode 100755 tools/html2text.py create mode 100755 tools/idl2deb create mode 100755 tools/idl2wrs create mode 100755 tools/indexcap.py create mode 100755 tools/json2pcap/json2pcap.py create mode 100644 tools/lemon/CMakeLists.txt create mode 100644 tools/lemon/README create mode 100755 tools/lemon/apply-patches.sh create mode 100644 tools/lemon/lemon.c create mode 100644 tools/lemon/lempar.c create mode 100644 tools/lex.py create mode 100755 tools/licensecheck.pl create mode 100755 tools/list_protos_in_cap.sh create mode 100755 tools/macos-setup-brew.sh create mode 100755 tools/macos-setup.sh create mode 100755 tools/make-authors-csv.py create mode 100755 tools/make-enterprises.py create mode 100755 tools/make-enums.py create mode 100644 tools/make-isobus.py create mode 100755 tools/make-manuf.py create mode 100755 tools/make-no-reassembly-profile.py create mode 100755 tools/make-packet-dcm.py create mode 100755 tools/make-pci-ids.py create mode 100755 tools/make-plugin-reg.py create mode 100755 tools/make-regs.py create mode 100755 tools/make-services.py create mode 100755 tools/make-tls-ct-logids.py create mode 100755 tools/make-usb.py create mode 100755 tools/make-version.py create mode 100644 tools/make_charset_table.c create mode 100755 tools/mingw-rpm-setup.sh create mode 100755 tools/msnchat create mode 100644 tools/msys2-setup.sh create mode 100644 tools/msys2checkdeps.py create mode 100755 tools/ncp2222.py create mode 100755 tools/netscreen2dump.py create mode 100755 tools/oss-fuzzshark/build.sh create mode 100755 tools/parse_xml2skinny_dissector.py create mode 100644 tools/pidl/MANIFEST create mode 100644 tools/pidl/META.yml create mode 100755 tools/pidl/Makefile.PL create mode 100644 tools/pidl/README create mode 100644 tools/pidl/TODO create mode 100644 tools/pidl/expr.yp create mode 100644 tools/pidl/idl.yp create mode 100644 tools/pidl/lib/Parse/Pidl.pm create mode 100644 tools/pidl/lib/Parse/Pidl/CUtil.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Compat.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Dump.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Expr.pm create mode 100644 tools/pidl/lib/Parse/Pidl/IDL.pm create mode 100644 tools/pidl/lib/Parse/Pidl/NDR.pm create mode 100644 tools/pidl/lib/Parse/Pidl/ODL.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba3/ClientNDR.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba3/ServerNDR.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba4.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba4/COM/Header.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba4/COM/Proxy.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba4/COM/Stub.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba4/Header.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba4/NDR/Client.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba4/NDR/Parser.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba4/NDR/Server.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba4/Python.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba4/TDR.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Samba4/Template.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Typelist.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Util.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Wireshark/Conformance.pm create mode 100644 tools/pidl/lib/Parse/Pidl/Wireshark/NDR.pm create mode 100644 tools/pidl/lib/Parse/Yapp/Driver.pm create mode 100644 tools/pidl/lib/wscript_build create mode 100755 tools/pidl/pidl create mode 100644 tools/pidl/tests/Util.pm create mode 100755 tools/pidl/tests/cutil.pl create mode 100755 tools/pidl/tests/dump.pl create mode 100755 tools/pidl/tests/header.pl create mode 100755 tools/pidl/tests/ndr.pl create mode 100755 tools/pidl/tests/ndr_align.pl create mode 100755 tools/pidl/tests/ndr_alloc.pl create mode 100755 tools/pidl/tests/ndr_array.pl create mode 100755 tools/pidl/tests/ndr_compat.pl create mode 100755 tools/pidl/tests/ndr_deprecations.pl create mode 100755 tools/pidl/tests/ndr_fullptr.pl create mode 100755 tools/pidl/tests/ndr_refptr.pl create mode 100755 tools/pidl/tests/ndr_represent.pl create mode 100755 tools/pidl/tests/ndr_simple.pl create mode 100755 tools/pidl/tests/ndr_string.pl create mode 100755 tools/pidl/tests/ndr_tagtype.pl create mode 100755 tools/pidl/tests/parse_idl.pl create mode 100755 tools/pidl/tests/samba-ndr.pl create mode 100755 tools/pidl/tests/samba3-cli.pl create mode 100755 tools/pidl/tests/samba3-srv.pl create mode 100755 tools/pidl/tests/tdr.pl create mode 100755 tools/pidl/tests/test_util.pl create mode 100755 tools/pidl/tests/typelist.pl create mode 100755 tools/pidl/tests/util.pl create mode 100755 tools/pidl/tests/wireshark-conf.pl create mode 100755 tools/pidl/tests/wireshark-ndr.pl create mode 100644 tools/pidl/wscript create mode 100755 tools/pkt-from-core.py create mode 100755 tools/pre-commit create mode 100644 tools/pre-commit-ignore.conf create mode 100755 tools/pre-commit-ignore.py create mode 100755 tools/process-x11-fields.pl create mode 100755 tools/process-x11-xcb.pl create mode 100644 tools/radiotap-gen/CMakeLists.txt create mode 100644 tools/radiotap-gen/radiotap-gen.c create mode 100755 tools/randpkt-test.sh create mode 100755 tools/rdps.py create mode 100755 tools/release-update-debian-soversions.sh create mode 100755 tools/rpm-setup.sh create mode 100755 tools/sharkd_shell.py create mode 100755 tools/test-captures.sh create mode 100755 tools/test-common.sh create mode 100755 tools/update-appdata.py create mode 100755 tools/update-tools-help.py create mode 100755 tools/update-tx create mode 100755 tools/valgrind-wireshark.sh create mode 100755 tools/validate-clang-check.sh create mode 100755 tools/validate-commit.py create mode 100755 tools/validate-diameter-xml.sh create mode 100644 tools/vg-suppressions create mode 100644 tools/win-setup.ps1 create mode 100755 tools/wireshark_be.py create mode 100755 tools/wireshark_gen.py create mode 100644 tools/wireshark_words.txt create mode 100644 tools/ws-coding-style.cfg create mode 100644 tools/yacc.py (limited to 'tools') diff --git a/tools/Get-HardenFlags.ps1 b/tools/Get-HardenFlags.ps1 new file mode 100644 index 0000000..c078565 --- /dev/null +++ b/tools/Get-HardenFlags.ps1 @@ -0,0 +1,146 @@ +# +# Get-HardenFlags - Checks hardening flags on the binaries. +# +# Copyright 2015 Graham Bloice +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +#requires -version 2 + +# Get-HardenFlags does: +# call the dumpbin utility to get the binary header flags +# on all the binaries in the distribution, and then filters +# for the NXCOMPAT and DYNAMICBASE flags. + +# This script will probably fail for the forseeable future. +# +# Many of our third-party libraries are compiled using MinGW-w64. Its version +# of `ld` doesn't enable the dynamicbase, nxcompat, or high-entropy-va flags +# by default. When you *do* pass --dynamicbase it strips the relocation +# section of the executable: +# +# https://sourceware.org/bugzilla/show_bug.cgi?id=19011 +# +# As a result, none of the distributions that produce Windows applications +# and libraries have any sort of hardening flags enabled: +# +# https://mingw-w64.org/doku.php/download +# + +<# +.SYNOPSIS +Checks the NXCOMPAT and DYNAMICBASE flags on all the binaries. + +.DESCRIPTION +This script downloads and extracts third-party libraries required to compile +Wireshark. + +.PARAMETER BinaryDir +Specifies the directory where the binaries may be found. + +.INPUTS +-BinaryDir Directory containing the binaries to be checked. + +.OUTPUTS +Any binary that doesn't have the flags is written to the error stream + +.EXAMPLE +C:\PS> .\tools\Get-HardenFlags.ps1 -BinaryDir run\RelWithDebInfo +#> + +Param( + [Parameter(Mandatory=$true, Position=0)] + [String] + $BinaryDir +) + +# Create a list of 3rd party binaries that are not hardened +$SoftBins = ( + "libpixmap.dll", + "libwimp.dll", + "libgail.dll", + "airpcap.dll", + "comerr32.dll", + "k5sprt32.dll", + "krb5_32.dll", + "libatk-1.0-0.dll", + "libcairo-2.dll", + "libffi-6.dll", + "libfontconfig-1.dll", + "libfreetype-6.dll", + "libgcc_s_sjlj-1.dll", + "libgcrypt-20.dll", + "libgdk-win32-2.0-0.dll", + "libgdk_pixbuf-2.0-0.dll", + "libgio-2.0-0.dll", + "libglib-2.0-0.dll", + "libgmodule-2.0-0.dll", + "libgmp-10.dll", + "libgnutls-28.dll", + "libgobject-2.0-0.dll", + "libgpg-error-0.dll", + "libgtk-win32-2.0-0.dll", + "libharfbuzz-0.dll", + "libhogweed-2-4.dll", + "libintl-8.dll", + "libjasper-1.dll", + "libjpeg-8.dll", + "liblzma-5.dll", + "libmaxminddb.dll", + "libnettle-4-6.dll", + "libp11-kit-0.dll", + "libpango-1.0-0.dll", + "libpangocairo-1.0-0.dll", + "libpangoft2-1.0-0.dll", + "libpangowin32-1.0-0.dll", + "libpixman-1-0.dll", + "libpng15-15.dll", + "libtasn1-6.dll", + "libtiff-5.dll", + "libxml2-2.dll", +# The x64 ones that are different + "comerr64.dll", + "k5sprt64.dll", + "krb5_64.dll", + "libgcc_s_seh-1.dll", + "libgpg-error6-0.dll", + "libpng16-16.dll", +# Unfortunately the nsis uninstaller is not hardened. + "uninstall.exe" +) + +# CD into the bindir, allows Resolve-Path to work in relative mode. +Push-Location $BinaryDir +[Console]::Error.WriteLine("Checking in $BinaryDir for unhardened binaries:") + +# Retrieve the list of binaries. -Filter is quicker than -Include, but can only handle one item +$Binaries = Get-ChildItem -Path $BinaryDir -Recurse -Include *.exe,*.dll + +# Number of "soft" binaries found +$Count = 0; + +# Iterate over the list +$Binaries | ForEach-Object { + + # Get the flags + $flags = dumpbin $_ /HEADERS; + + # Check for the required flags + $match = $flags | Select-String -Pattern "NX compatible", "Dynamic base" + if ($match.Count -ne 2) { + + # Write-Error outputs error records, we simply want the filename + [Console]::Error.WriteLine((Resolve-Path $_ -Relative)) + + # Don't count files that won't ever be OK + if ($SoftBins -notcontains (Split-Path $_ -Leaf)) { + $Count++ + } + } +} + +exit $Count diff --git a/tools/SkinnyProtocolOptimized.xml b/tools/SkinnyProtocolOptimized.xml new file mode 100644 index 0000000..3eb5f83 --- /dev/null +++ b/tools/SkinnyProtocolOptimized.xml @@ -0,0 +1,4190 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tools/WiresharkXML.py b/tools/WiresharkXML.py new file mode 100755 index 0000000..02d2cad --- /dev/null +++ b/tools/WiresharkXML.py @@ -0,0 +1,312 @@ +""" +Routines for reading PDML produced from TShark. + +Copyright (c) 2003, 2013 by Gilbert Ramirez + +SPDX-License-Identifier: GPL-2.0-or-later +""" + +import sys +import xml.sax +from xml.sax.saxutils import quoteattr +import cStringIO as StringIO + +class CaptureFile: + pass + +class FoundItException(Exception): + """Used internally for exiting a tree search""" + pass + +class PacketList: + """Holds Packet objects, and has methods for finding + items within it.""" + + def __init__(self, children=None): + if children is None: + self.children = [] + else: + self.children = children + + def __getitem__(self, index): + """We act like a list.""" + return self.children[index] + + def __len__(self): + return len(self.children) + + def item_exists(self, name): + """Does an item with name 'name' exist in this + PacketList? Returns True or False.""" + for child in self.children: + if child.name == name: + return True + + try: + for child in self.children: + child._item_exists(name) + + except FoundItException: + return True + + return False + + def _item_exists(self, name): + for child in self.children: + if child.name == name: + raise FoundItException + child._item_exists(name) + + + def get_items(self, name, items=None): + """Return all items that match the name 'name'. + They are returned in order of a depth-first-search.""" + if items is None: + top_level = 1 + items = [] + else: + top_level = 0 + + for child in self.children: + if child.name == name: + items.append(child) + child.get_items(name, items) + + if top_level: + return PacketList(items) + + def get_items_before(self, name, before_item, items=None): + """Return all items that match the name 'name' that + exist before the before_item. The before_item is an object. + They results are returned in order of a depth-first-search. + This function allows you to find fields from protocols that occur + before other protocols. For example, if you have an HTTP + protocol, you can find all tcp.dstport fields *before* that HTTP + protocol. This helps analyze in the presence of tunneled protocols.""" + if items is None: + top_level = 1 + items = [] + else: + top_level = 0 + + for child in self.children: + if top_level == 1 and child == before_item: + break + if child.name == name: + items.append(child) + # Call get_items because the 'before_item' applies + # only to the top level search. + child.get_items(name, items) + + if top_level: + return PacketList(items) + + +class ProtoTreeItem(PacketList): + def __init__(self, xmlattrs): + PacketList.__init__(self) + + self.name = xmlattrs.get("name", "") + self.showname = xmlattrs.get("showname", "") + self.pos = xmlattrs.get("pos", "") + self.size = xmlattrs.get("size", "") + self.value = xmlattrs.get("value", "") + self.show = xmlattrs.get("show", "") + self.hide = xmlattrs.get("hide", "") + + def add_child(self, child): + self.children.append(child) + + def get_name(self): + return self.name + + def get_showname(self): + return self.showname + + def get_pos(self): + return self.pos + + def get_size(self): + return self.size + + def get_value(self): + return self.value + + def get_show(self): + return self.show + + def get_hide(self): + return self.hide + + def dump(self, fh=sys.stdout): + if self.name: + print >> fh, " name=%s" % (quoteattr(self.name),), + + if self.showname: + print >> fh, "showname=%s" % (quoteattr(self.showname),), + + if self.pos: + print >> fh, "pos=%s" % (quoteattr(self.pos),), + + if self.size: + print >> fh, "size=%s" % (quoteattr(self.size),), + + if self.value: + print >> fh, "value=%s" % (quoteattr(self.value),), + + if self.show: + print >> fh, "show=%s" % (quoteattr(self.show),), + + if self.hide: + print >> fh, "hide=%s" % (quoteattr(self.hide),), + +class Packet(ProtoTreeItem, PacketList): + def dump(self, fh=sys.stdout, indent=0): + print >> fh, " " * indent, "" + indent += 1 + for child in self.children: + child.dump(fh, indent) + print >> fh, " " * indent, "" + + +class Protocol(ProtoTreeItem): + + def dump(self, fh=sys.stdout, indent=0): + print >> fh, "%s> fh, '>' + + indent += 1 + for child in self.children: + child.dump(fh, indent) + print >> fh, " " * indent, "" + + +class Field(ProtoTreeItem): + + def dump(self, fh=sys.stdout, indent=0): + print >> fh, "%s> fh, ">" + indent += 1 + for child in self.children: + child.dump(fh, indent) + print >> fh, " " * indent, "" + + else: + print >> fh, "/>" + + +class ParseXML(xml.sax.handler.ContentHandler): + + ELEMENT_FILE = "pdml" + ELEMENT_FRAME = "packet" + ELEMENT_PROTOCOL = "proto" + ELEMENT_FIELD = "field" + + def __init__(self, cb): + self.cb = cb + self.chars = "" + self.element_stack = [] + + def startElement(self, name, xmlattrs): + self.chars = "" + + if name == self.ELEMENT_FILE: + # Eventually, we should check version number of pdml here + elem = CaptureFile() + + elif name == self.ELEMENT_FRAME: + elem = Packet(xmlattrs) + + elif name == self.ELEMENT_PROTOCOL: + elem = Protocol(xmlattrs) + + elif name == self.ELEMENT_FIELD: + elem = Field(xmlattrs) + + else: + sys.exit("Unknown element: %s" % (name,)) + + self.element_stack.append(elem) + + + def endElement(self, name): + elem = self.element_stack.pop() + +# if isinstance(elem, Field): +# if elem.get_name() == "frame.number": +# print >> sys.stderr, "Packet:", elem.get_show() + + # Add element as child to previous element as long + # as there is more than 1 element in the stack. Only + # one element in the stack means that the element in + # the stack is the single CaptureFile element, and we don't + # want to add this element to that, as we only want one + # Packet element in memory at a time. + if len(self.element_stack) > 1: + parent_elem = self.element_stack[-1] + parent_elem.add_child(elem) + + self.chars = "" + + # If we just finished a Packet element, hand it to the + # user's callback. + if isinstance(elem, Packet): + self.cb(elem) + + def characters(self, chars): + self.chars = self.chars + chars + + +def _create_parser(cb): + """Internal function for setting up the SAX parser.""" + + # Create a parser + parser = xml.sax.make_parser() + + # Create the handler + handler = ParseXML(cb) + + # Tell the parser to use our handler + parser.setContentHandler(handler) + + # Don't fetch the DTD, in case it is listed + parser.setFeature(xml.sax.handler.feature_external_ges, False) + + return parser + +def parse_fh(fh, cb): + """Parse a PDML file, given filehandle, and call the callback function (cb), + once for each Packet object.""" + + parser = _create_parser(cb) + + # Parse the file + parser.parse(fh) + + # Close the parser ; this is erroring out, but I'm not sure why. + #parser.close() + +def parse_string(text, cb): + """Parse the PDML contained in a string.""" + stream = StringIO.StringIO(text) + parse_fh(stream, cb) + +def _test(): + import sys + + def test_cb(obj): + pass + + filename = sys.argv[1] + fh = open(filename, "r") + parse_fh(fh, test_cb) + +if __name__ == '__main__': + _test() diff --git a/tools/alpine-setup.sh b/tools/alpine-setup.sh new file mode 100755 index 0000000..4622035 --- /dev/null +++ b/tools/alpine-setup.sh @@ -0,0 +1,129 @@ +#!/bin/bash +# Setup development environment on alpine systems +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# We drag in tools that might not be needed by all users; it's easier +# that way. +# + +set -e -u -o pipefail + +function print_usage() { + printf "\\nUtility to setup a alpine system for Wireshark Development.\\n" + printf "The basic usage installs the needed software\\n\\n" + printf "Usage: %s [--install-optional] [...other options...]\\n" "$0" + printf "\\t--install-optional: install optional software as well\\n" + printf "\\t--install-all: install everything\\n" + printf "\\t[other]: other options are passed as-is to apk\\n" +} + +ADDITIONAL=0 +OPTIONS= +for arg; do + case $arg in + --help) + print_usage + exit 0 + ;; + --install-optional) + ADDITIONAL=1 + ;; + --install-all) + ADDITIONAL=1 + ;; + *) + OPTIONS="$OPTIONS $arg" + ;; + esac +done + +# Check if the user is root +if [ "$(id -u)" -ne 0 ] +then + echo "You must be root." + exit 1 +fi + +BASIC_LIST=" + cmake + ninja + gcc + g++ + glib-dev + libgcrypt-dev + flex + tiff-dev + c-ares-dev + pcre2-dev + qt5-qtbase-dev + qt5-qttools-dev + qt5-qtmultimedia-dev + qt5-qtsvg-dev + speexdsp-dev + python3 + " + +ADDITIONAL_LIST=" + git + asciidoctor + libssh-dev + spandsp-dev + libcap-dev + libpcap-dev + libxml2-dev + libmaxminddb-dev + krb5-dev + lz4-dev + gnutls-dev + snappy-dev + nghttp2-dev + nghttp3-dev + lua5.2-dev + libnl3-dev + sbc-dev + minizip-dev + brotli-dev + perl + py3-pytest + py3-pytest-xdist + " + +# Uncomment to add PNG compression utilities used by compress-pngs: +# ADDITIONAL_LIST="$ADDITIONAL_LIST \ +# advancecomp \ +# optipng \ +# oxipng \ +# pngcrush" + +# Adds package $2 to list variable $1 if the package is found. +# If $3 is given, then this version requirement must be satisfied. +add_package() { + local list="$1" pkgname="$2" + + # fail if the package is not known + apk list $pkgname &> /dev/null || return 1 + + # package is found, append it to list + eval "${list}=\"\${${list}} \${pkgname}\"" +} + +ACTUAL_LIST=$BASIC_LIST + +# Now arrange for optional support libraries +if [ $ADDITIONAL -ne 0 ] +then + ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST" +fi + +apk update || exit 2 +apk add $ACTUAL_LIST $OPTIONS || exit 2 + +if [ $ADDITIONAL -eq 0 ] +then + printf "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n" +fi diff --git a/tools/arch-setup.sh b/tools/arch-setup.sh new file mode 100755 index 0000000..1443c52 --- /dev/null +++ b/tools/arch-setup.sh @@ -0,0 +1,136 @@ +#!/bin/bash +# Setup development environment on Arch Linux +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# We drag in tools that might not be needed by all users; it's easier +# that way. +# + +set -e -u -o pipefail + +function print_usage() { + printf "\\nUtility to setup a pacman-based system for Wireshark development.\\n" + printf "The basic usage installs the needed software\\n\\n" + printf "Usage: %s [--install-optional] [...other options...]\\n" "$0" + printf "\\t--install-optional: install optional software as well\\n" + printf "\\t--install-test-deps: install packages required to run all tests\\n" + printf "\\t--install-all: install everything\\n" + printf "\\t[other]: other options are passed as-is to pacman\\n" + printf "\\tPass --noconfirm to bypass any \"are you sure?\" messages.\\n" +} + +ADDITIONAL=0 +TESTDEPS=0 +AUR=0 +OPTIONS= +for arg; do + case $arg in + --help) + print_usage + exit 0 + ;; + --install-optional) + ADDITIONAL=1 + ;; + --install-test-deps) + TESTDEPS=1 + ;; + --install-all) + ADDITIONAL=1 + TESTDEPS=1 + AUR=1 + ;; + *) + OPTIONS="$OPTIONS $arg" + ;; + esac +done + +# Check if the user is root +if [ "$(id -u)" -ne 0 ] +then + echo "You must be root." + exit 1 +fi + +BASIC_LIST="base-devel \ + bcg729 \ + brotli \ + c-ares \ + cmake \ + git \ + glib2 \ + gnutls \ + krb5 \ + libcap \ + libgcrypt \ + libilbc \ + libmaxminddb \ + libnghttp2 \ + libnghttp3 \ + libnl \ + libpcap \ + libssh \ + libxml2 \ + lua52 \ + lz4 \ + minizip \ + ninja \ + pcre2 \ + python \ + qt6-base \ + qt6-multimedia \ + qt6-tools \ + qt6-5compat \ + sbc \ + snappy \ + spandsp \ + speexdsp \ + zlib \ + zstd" + +ADDITIONAL_LIST="asciidoctor \ + ccache \ + docbook-xml \ + docbook-xsl \ + doxygen \ + libxslt \ + perl" + +TESTDEPS_LIST="python-pytest \ + python-pytest-xdist" + +ACTUAL_LIST=$BASIC_LIST + +if [ $ADDITIONAL -ne 0 ] +then + ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST" +fi + +if [ $TESTDEPS -ne 0 ] +then + ACTUAL_LIST="$ACTUAL_LIST $TESTDEPS_LIST" +fi + +# Partial upgrades are unsupported. +pacman --sync --refresh --sysupgrade --needed $ACTUAL_LIST $OPTIONS || exit 2 + +if [ $ADDITIONAL -eq 0 ] +then + printf "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n" +fi + +if [ $TESTDEPS -eq 0 ] +then + printf "\n*** Test deps not installed. Rerun with --install-test-deps to have them.\n" +fi + +if [ $AUR -ne 0 ] +then + printf "\n*** These and other packages may also be found in the AUR: libsmi.\n" +fi diff --git a/tools/asn2deb b/tools/asn2deb new file mode 100755 index 0000000..926d34e --- /dev/null +++ b/tools/asn2deb @@ -0,0 +1,179 @@ +#!/usr/bin/env python3 + +# asn2deb - quick hack by W. Borgert to create +# Debian GNU/Linux packages from ASN.1 files for Wireshark. +# Copyright 2004, W. Borgert + +# ASN.1 module for Wireshark, use of snacc type table: +# Copyright 2003, Matthijs Melchior +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs + +# SPDX-License-Identifier: GPL-2.0-or-later + +import getopt, os, string, sys, time + +scriptinfo = """asn2deb version 2004-02-17 +Copyright 2004, W. Borgert +Free software, released under the terms of the GPL.""" + +options = {'asn': None, + 'dbopts': "", + 'email': "invalid@invalid.invalid", + 'help': 0, + 'name': "No Name", + 'preserve': 0, + 'version': 0} + +def create_file(filename, content, mode = None): + """Create a file with given content.""" + global options + if options['preserve'] and os.path.isfile(filename): + return + f = open(filename, 'w') + f.write(content) + f.close() + if mode: + os.chmod(filename, mode) + +def create_files(version, deb, email, asn, name, iso, rfc): + """Create all files for the .deb build process.""" + base = asn.lower()[:-5] + + if not os.path.isdir("packaging/debian"): + os.mkdir("packaging/debian") + + create_file("packaging/debian/rules", """#!/usr/bin/make -f + +include /usr/share/cdbs/1/rules/debhelper.mk +include /usr/share/cdbs/1/class/autotools.mk + +PREFIX=`pwd`/packaging/debian/wireshark-asn1-%s + +binary-post-install/wireshark-asn1-%s:: + rm -f $(PREFIX)/usr/lib/wireshark/plugins/%s/*.a +""" % (base, base, version), 0o755) + + create_file("packaging/debian/control", """Source: wireshark-asn1-%s +Section: net +Priority: optional +Maintainer: %s <%s> +Standards-Version: 3.6.1.0 +Build-Depends: snacc, autotools-dev, debhelper, cdbs + +Package: wireshark-asn1-%s +Architecture: all +Depends: wireshark (= %s) +Description: ASN.1/BER dissector for %s + This package provides a type table for decoding BER (Basic Encoding + Rules) data over TCP or UDP, described by an ASN.1 (Abstract Syntax + Notation 1) file '%s.asn1'. +""" % (base, name, email, base, deb, base, base)) + + create_file("packaging/debian/changelog", + """wireshark-asn1-%s (0.0.1-1) unstable; urgency=low + + * Automatically created package. + + -- %s <%s> %s +""" % (base, name, email, rfc + "\n (" + iso + ")")) + + create_file("packaging/debian/copyright", + """This package has been created automatically be asn2deb on +%s for Debian GNU/Linux. + +Wireshark: https://www.wireshark.com/ + +Copyright: + +GPL, as evidenced by existence of GPL license file \"COPYING\". +(the GNU GPL may be viewed on Debian systems in +/usr/share/common-licenses/GPL) +""" % (iso)) + +def get_wrs_version(): + """Detect version of wireshark-dev package.""" + deb = os.popen( + "dpkg-query -W --showformat='${Version}' wireshark-dev").read() + debv = string.find(deb, "-") + if debv == -1: debv = len(deb) + version = deb[string.find(deb, ":")+1:debv] + return version, deb + +def get_time(): + """Detect current time and return ISO and RFC time string.""" + currenttime = time.gmtime() + return time.strftime("%Y-%m-%d %H:%M:%S +0000", currenttime), \ + time.strftime("%a, %d %b %Y %H:%M:%S +0000", currenttime) + +def main(): + global options + process_opts(sys.argv) + iso, rfc = get_time() + version, deb = get_wrs_version() + create_files(version, deb, + options['email'], options['asn'], options['name'], + iso, rfc) + os.system("dpkg-buildpackage " + options['dbopts']) + +def process_opts(argv): + """Process command line options.""" + global options + try: + opts, args = getopt.getopt(argv[1:], "a:d:e:hn:pv", + ["asn=", + "dbopts=", + "email=", + "help", + "name=", + "preserve", + "version"]) + except getopt.GetoptError: + usage(argv[0]) + sys.exit(1) + for o, a in opts: + if o in ("-a", "--asn"): + options['asn'] = a + if o in ("-d", "--dbopts"): + options['dbopts'] = a + if o in ("-e", "--email"): + options['email'] = a + if o in ("-h", "--help"): + options['help'] = 1 + if o in ("-n", "--name"): + options['name'] = a + if o in ("-p", "--preserve"): + options['preserve'] = 1 + if o in ("-v", "--version"): + options['version'] = 1 + if options['help']: + usage(argv[0]) + sys.exit(0) + if options['version']: + print(scriptinfo) + sys.exit(0) + if not options['asn']: + print("mandatory ASN.1 file parameter missing") + sys.exit(1) + if not os.access(options['asn'], os.R_OK): + print("ASN.1 file not accessible") + sys.exit(1) + +def usage(name): + """Print usage help.""" + print("Usage: " + name + " \n" + \ + "Parameters are\n" + \ + " --asn -a asn1file, ASN.1 file to use (mandatory)\n" + \ + " --dbopts -d opts, options for dpkg-buildpackage\n" + \ + " --email -e address, use e-mail address\n" + \ + " --help -h, print help and exit\n" + \ + " --name -n name, use user name\n" + \ + " --preserve -p, do not overwrite files\n" + \ + " --version -v, print version and exit\n" + \ + "Example:\n" + \ + name + " -e me@foo.net -a bar.asn1 -n \"My Name\" " + \ + "-d \"-rfakeroot -uc -us\"") +if __name__ == '__main__': + main() diff --git a/tools/asn2wrs.py b/tools/asn2wrs.py new file mode 100755 index 0000000..6669be8 --- /dev/null +++ b/tools/asn2wrs.py @@ -0,0 +1,8242 @@ +#!/usr/bin/env python3 + +# +# asn2wrs.py +# ASN.1 to Wireshark dissector compiler +# Copyright 2004 Tomas Kukosa +# +# SPDX-License-Identifier: MIT +# + +"""ASN.1 to Wireshark dissector compiler""" + +# +# Compiler from ASN.1 specification to the Wireshark dissector +# +# Based on ASN.1 to Python compiler from Aaron S. Lav's PyZ3950 package licensed under the X Consortium license +# https://www.pobox.com/~asl2/software/PyZ3950/ +# (ASN.1 to Python compiler functionality is broken but not removed, it could be revived if necessary) +# +# It requires Dave Beazley's PLY parsing package licensed under the LGPL (tested with version 2.3) +# https://www.dabeaz.com/ply/ +# +# +# ITU-T Recommendation X.680 (07/2002), +# Information technology - Abstract Syntax Notation One (ASN.1): Specification of basic notation +# +# ITU-T Recommendation X.681 (07/2002), +# Information technology - Abstract Syntax Notation One (ASN.1): Information object specification +# +# ITU-T Recommendation X.682 (07/2002), +# Information technology - Abstract Syntax Notation One (ASN.1): Constraint specification +# +# ITU-T Recommendation X.683 (07/2002), +# Information technology - Abstract Syntax Notation One (ASN.1): Parameterization of ASN.1 specifications +# +# ITU-T Recommendation X.880 (07/1994), +# Information technology - Remote Operations: Concepts, model and notation +# + +import warnings + +import re +import sys +import os +import os.path +import time +import getopt +import traceback + +try: + from ply import lex + from ply import yacc +except ImportError: + # Fallback: use lex.py and yacc from the tools directory within the + # Wireshark source tree if python-ply is not installed. + import lex + import yacc + +if sys.version_info[0] < 3: + from string import maketrans + + +# OID name -> number conversion table +oid_names = { + '/itu-t' : 0, + '/itu' : 0, + '/ccitt' : 0, + '/itu-r' : 0, + '0/recommendation' : 0, + '0.0/a' : 1, + '0.0/b' : 2, + '0.0/c' : 3, + '0.0/d' : 4, + '0.0/e' : 5, + '0.0/f' : 6, + '0.0/g' : 7, + '0.0/h' : 8, + '0.0/i' : 9, + '0.0/j' : 10, + '0.0/k' : 11, + '0.0/l' : 12, + '0.0/m' : 13, + '0.0/n' : 14, + '0.0/o' : 15, + '0.0/p' : 16, + '0.0/q' : 17, + '0.0/r' : 18, + '0.0/s' : 19, + '0.0/t' : 20, + '0.0/tseries' : 20, + '0.0/u' : 21, + '0.0/v' : 22, + '0.0/w' : 23, + '0.0/x' : 24, + '0.0/y' : 25, + '0.0/z' : 26, + '0/question' : 1, + '0/administration' : 2, + '0/network-operator' : 3, + '0/identified-organization' : 4, + '0/r-recommendation' : 5, + '0/data' : 9, + '/iso' : 1, + '1/standard' : 0, + '1/registration-authority' : 1, + '1/member-body' : 2, + '1/identified-organization' : 3, + '/joint-iso-itu-t' : 2, + '/joint-iso-ccitt' : 2, + '2/presentation' : 0, + '2/asn1' : 1, + '2/association-control' : 2, + '2/reliable-transfer' : 3, + '2/remote-operations' : 4, + '2/ds' : 5, + '2/directory' : 5, + '2/mhs' : 6, + '2/mhs-motis' : 6, + '2/ccr' : 7, + '2/oda' : 8, + '2/ms' : 9, + '2/osi-management' : 9, + '2/transaction-processing' : 10, + '2/dor' : 11, + '2/distinguished-object-reference' : 11, + '2/reference-data-transfe' : 12, + '2/network-layer' : 13, + '2/network-layer-management' : 13, + '2/transport-layer' : 14, + '2/transport-layer-management' : 14, + '2/datalink-layer' : 15, + '2/datalink-layer-managemen' : 15, + '2/datalink-layer-management-information' : 15, + '2/country' : 16, + '2/registration-procedures' : 17, + '2/registration-procedure' : 17, + '2/physical-layer' : 18, + '2/physical-layer-management' : 18, + '2/mheg' : 19, + '2/genericULS' : 20, + '2/generic-upper-layers-security' : 20, + '2/guls' : 20, + '2/transport-layer-security-protocol' : 21, + '2/network-layer-security-protocol' : 22, + '2/international-organizations' : 23, + '2/internationalRA' : 23, + '2/sios' : 24, + '2/uuid' : 25, + '2/odp' : 26, + '2/upu' : 40, +} + +ITEM_FIELD_NAME = '_item' +UNTAG_TYPE_NAME = '_untag' + +def asn2c(id): + return id.replace('-', '_').replace('.', '_').replace('&', '_') + +input_file = None +g_conform = None +lexer = None +in_oid = False + +class LexError(Exception): + def __init__(self, tok, filename=None): + self.tok = tok + self.filename = filename + self.msg = "Unexpected character %r" % (self.tok.value[0]) + Exception.__init__(self, self.msg) + def __repr__(self): + return "%s:%d: %s" % (self.filename, self.tok.lineno, self.msg) + __str__ = __repr__ + + +class ParseError(Exception): + def __init__(self, tok, filename=None): + self.tok = tok + self.filename = filename + self.msg = "Unexpected token %s(%r)" % (self.tok.type, self.tok.value) + Exception.__init__(self, self.msg) + def __repr__(self): + return "%s:%d: %s" % (self.filename, self.tok.lineno, self.msg) + __str__ = __repr__ + + +class DuplicateError(Exception): + def __init__(self, type, ident): + self.type = type + self.ident = ident + self.msg = "Duplicate %s for %s" % (self.type, self.ident) + Exception.__init__(self, self.msg) + def __repr__(self): + return self.msg + __str__ = __repr__ + +class CompError(Exception): + def __init__(self, msg): + self.msg = msg + Exception.__init__(self, self.msg) + def __repr__(self): + return self.msg + __str__ = __repr__ + + +states = ( + ('braceignore','exclusive'), +) + +precedence = ( + ('left', 'UNION', 'BAR'), + ('left', 'INTERSECTION', 'CIRCUMFLEX'), +) +# 11 ASN.1 lexical items + +static_tokens = { + r'::=' : 'ASSIGNMENT', # 11.16 Assignment lexical item + r'\.\.' : 'RANGE', # 11.17 Range separator + r'\.\.\.' : 'ELLIPSIS', # 11.18 Ellipsis + r'\[\[' : 'LVERBRACK', # 11.19 Left version brackets + r'\]\]' : 'RVERBRACK', # 11.20 Right version brackets + # 11.26 Single character lexical items + r'\{' : 'LBRACE', + r'\}' : 'RBRACE', + r'<' : 'LT', + #r'>' : 'GT', + r',' : 'COMMA', + r'\.' : 'DOT', + r'\(' : 'LPAREN', + r'\)' : 'RPAREN', + r'\[' : 'LBRACK', + r'\]' : 'RBRACK', + r'-' : 'MINUS', + r':' : 'COLON', + #r'=' : 'EQ', + #r'"' : 'QUOTATION', + #r"'" : 'APOSTROPHE', + r';' : 'SEMICOLON', + r'@' : 'AT', + r'\!' : 'EXCLAMATION', + r'\^' : 'CIRCUMFLEX', + r'\&' : 'AMPERSAND', + r'\|' : 'BAR' +} + +# 11.27 Reserved words + +# all keys in reserved_words must start w/ upper case +reserved_words = { + 'ABSENT' : 'ABSENT', + 'ABSTRACT-SYNTAX' : 'ABSTRACT_SYNTAX', + 'ALL' : 'ALL', + 'APPLICATION' : 'APPLICATION', + 'AUTOMATIC' : 'AUTOMATIC', + 'BEGIN' : 'BEGIN', + 'BIT' : 'BIT', + 'BOOLEAN' : 'BOOLEAN', + 'BY' : 'BY', + 'CHARACTER' : 'CHARACTER', + 'CHOICE' : 'CHOICE', + 'CLASS' : 'CLASS', + 'COMPONENT' : 'COMPONENT', + 'COMPONENTS' : 'COMPONENTS', + 'CONSTRAINED' : 'CONSTRAINED', + 'CONTAINING' : 'CONTAINING', + 'DEFAULT' : 'DEFAULT', + 'DEFINITIONS' : 'DEFINITIONS', + 'EMBEDDED' : 'EMBEDDED', +# 'ENCODED' : 'ENCODED', + 'END' : 'END', + 'ENUMERATED' : 'ENUMERATED', +# 'EXCEPT' : 'EXCEPT', + 'EXPLICIT' : 'EXPLICIT', + 'EXPORTS' : 'EXPORTS', +# 'EXTENSIBILITY' : 'EXTENSIBILITY', + 'EXTERNAL' : 'EXTERNAL', + 'FALSE' : 'FALSE', + 'FROM' : 'FROM', + 'GeneralizedTime' : 'GeneralizedTime', + 'IDENTIFIER' : 'IDENTIFIER', + 'IMPLICIT' : 'IMPLICIT', +# 'IMPLIED' : 'IMPLIED', + 'IMPORTS' : 'IMPORTS', + 'INCLUDES' : 'INCLUDES', + 'INSTANCE' : 'INSTANCE', + 'INTEGER' : 'INTEGER', + 'INTERSECTION' : 'INTERSECTION', + 'MAX' : 'MAX', + 'MIN' : 'MIN', + 'MINUS-INFINITY' : 'MINUS_INFINITY', + 'NULL' : 'NULL', + 'OBJECT' : 'OBJECT', + 'ObjectDescriptor' : 'ObjectDescriptor', + 'OCTET' : 'OCTET', + 'OF' : 'OF', + 'OPTIONAL' : 'OPTIONAL', + 'PATTERN' : 'PATTERN', + 'PDV' : 'PDV', + 'PLUS-INFINITY' : 'PLUS_INFINITY', + 'PRESENT' : 'PRESENT', + 'PRIVATE' : 'PRIVATE', + 'REAL' : 'REAL', + 'RELATIVE-OID' : 'RELATIVE_OID', + 'SEQUENCE' : 'SEQUENCE', + 'SET' : 'SET', + 'SIZE' : 'SIZE', + 'STRING' : 'STRING', + 'SUCCESSORS' : 'SUCCESSORS', + 'SYNTAX' : 'SYNTAX', + 'TAGS' : 'TAGS', + 'TRUE' : 'TRUE', + 'TYPE-IDENTIFIER' : 'TYPE_IDENTIFIER', + 'UNION' : 'UNION', + 'UNIQUE' : 'UNIQUE', + 'UNIVERSAL' : 'UNIVERSAL', + 'UTCTime' : 'UTCTime', + 'WITH' : 'WITH', +# X.208 obsolete but still used + 'ANY' : 'ANY', + 'DEFINED' : 'DEFINED', +} + +for k in list(static_tokens.keys()): + if static_tokens [k] is None: + static_tokens [k] = k + +StringTypes = ['Numeric', 'Printable', 'IA5', 'BMP', 'Universal', 'UTF8', + 'Teletex', 'T61', 'Videotex', 'Graphic', 'ISO646', 'Visible', + 'General'] + +# Effective permitted-alphabet constraints are PER-visible only +# for the known-multiplier character string types (X.691 27.1) +# +# XXX: This should include BMPString (UCS2) and UniversalString (UCS4), +# but asn2wrs only suports the RestrictedCharacterStringValue +# notation of "cstring", but not that of "CharacterStringList", +# "Quadruple", or "Tuple" (See X.680 41.8), and packet-per.c does +# not support members of the permitted-alphabet being outside the +# ASCII range. We don't currently have any ASN.1 modules that need it, +# anyway. +KnownMultiplierStringTypes = ('NumericString', 'PrintableString', 'IA5String', + 'ISO646String', 'VisibleString') + +for s in StringTypes: + reserved_words[s + 'String'] = s + 'String' + +tokens = list(static_tokens.values()) \ + + list(reserved_words.values()) \ + + ['BSTRING', 'HSTRING', 'QSTRING', + 'UCASE_IDENT', 'LCASE_IDENT', 'LCASE_IDENT_ASSIGNED', 'CLASS_IDENT', + 'REAL_NUMBER', 'NUMBER', 'PYQUOTE'] + + +cur_mod = __import__ (__name__) # XXX blech! + +for (k, v) in list(static_tokens.items ()): + cur_mod.__dict__['t_' + v] = k + +# 11.10 Binary strings +def t_BSTRING (t): + r"'[01]*'B" + return t + +# 11.12 Hexadecimal strings +def t_HSTRING (t): + r"'[0-9A-Fa-f]*'H" + return t + +def t_QSTRING (t): + r'"([^"]|"")*"' + return t + +def t_UCASE_IDENT (t): + r"[A-Z](-[a-zA-Z0-9]|[a-zA-Z0-9])*" # can't end w/ '-' + if (is_class_ident(t.value)): t.type = 'CLASS_IDENT' + if (is_class_syntax(t.value)): t.type = t.value + t.type = reserved_words.get(t.value, t.type) + return t + +lcase_ident_assigned = {} +def t_LCASE_IDENT (t): + r"[a-z](-[a-zA-Z0-9]|[a-zA-Z0-9])*" # can't end w/ '-' + if (not in_oid and (t.value in lcase_ident_assigned)): t.type = 'LCASE_IDENT_ASSIGNED' + return t + +# 11.9 Real numbers +def t_REAL_NUMBER (t): + r"[0-9]+\.[0-9]*(?!\.)" + return t + +# 11.8 Numbers +def t_NUMBER (t): + r"0|([1-9][0-9]*)" + return t + +# 11.6 Comments +pyquote_str = 'PYQUOTE' +def t_COMMENT(t): + r"--(-[^\-\n]|[^\-\n])*(--|\n|-\n|$|-$)" + if (t.value.find("\n") >= 0) : t.lexer.lineno += 1 + if t.value[2:2+len (pyquote_str)] == pyquote_str: + t.value = t.value[2+len(pyquote_str):] + t.value = t.value.lstrip () + t.type = pyquote_str + return t + return None + +t_ignore = " \t\r" + +def t_NEWLINE(t): + r'\n+' + t.lexer.lineno += t.value.count("\n") + +def t_error(t): + global input_file + raise LexError(t, input_file) + +# state 'braceignore' + +def t_braceignore_lbrace(t): + r'\{' + t.lexer.level +=1 + +def t_braceignore_rbrace(t): + r'\}' + t.lexer.level -=1 + # If closing brace, return token + if t.lexer.level == 0: + t.type = 'RBRACE' + return t + +def t_braceignore_QSTRING (t): + r'"([^"]|"")*"' + t.lexer.lineno += t.value.count("\n") + +def t_braceignore_COMMENT(t): + r"--(-[^\-\n]|[^\-\n])*(--|\n|-\n|$|-$)" + if (t.value.find("\n") >= 0) : t.lexer.lineno += 1 + +def t_braceignore_nonspace(t): + r'[^\s\{\}\"-]+|-(?!-)' + +t_braceignore_ignore = " \t\r" + +def t_braceignore_NEWLINE(t): + r'\n+' + t.lexer.lineno += t.value.count("\n") + +def t_braceignore_error(t): + t.lexer.skip(1) + +class Ctx: + def __init__ (self, defined_dict, indent = 0): + self.tags_def = 'EXPLICIT' # default = explicit + self.indent_lev = 0 + self.assignments = {} + self.dependencies = {} + self.pyquotes = [] + self.defined_dict = defined_dict + self.name_ctr = 0 + def spaces (self): + return " " * (4 * self.indent_lev) + def indent (self): + self.indent_lev += 1 + def outdent (self): + self.indent_lev -= 1 + assert (self.indent_lev >= 0) + def register_assignment (self, ident, val, dependencies): + if ident in self.assignments: + raise DuplicateError("assignment", ident) + if ident in self.defined_dict: + raise Exception("cross-module duplicates for %s" % ident) + self.defined_dict [ident] = 1 + self.assignments[ident] = val + self.dependencies [ident] = dependencies + return "" + # return "#%s depends on %s" % (ident, str (dependencies)) + def register_pyquote (self, val): + self.pyquotes.append (val) + return "" + def output_assignments (self): + already_output = {} + text_list = [] + assign_keys = list(self.assignments.keys()) + to_output_count = len (assign_keys) + while True: + any_output = 0 + for (ident, val) in list(self.assignments.items ()): + if ident in already_output: + continue + ok = 1 + for d in self.dependencies [ident]: + if ((d not in already_output) and + (d in assign_keys)): + ok = 0 + if ok: + text_list.append ("%s=%s" % (ident, + self.assignments [ident])) + already_output [ident] = 1 + any_output = 1 + to_output_count -= 1 + assert (to_output_count >= 0) + if not any_output: + if to_output_count == 0: + break + # OK, we detected a cycle + cycle_list = [] + for ident in list(self.assignments.keys ()): + if ident not in already_output: + depend_list = [d for d in self.dependencies[ident] if d in assign_keys] + cycle_list.append ("%s(%s)" % (ident, ",".join (depend_list))) + + text_list.append ("# Cycle XXX " + ",".join (cycle_list)) + for (ident, val) in list(self.assignments.items ()): + if ident not in already_output: + text_list.append ("%s=%s" % (ident, self.assignments [ident])) + break + + return "\n".join (text_list) + def output_pyquotes (self): + return "\n".join (self.pyquotes) + def make_new_name (self): + self.name_ctr += 1 + return "_compiler_generated_name_%d" % (self.name_ctr,) + +#--- Flags for EXPORT, USER_DEFINED, NO_EMIT, MAKE_ENUM ------------------------------- +EF_TYPE = 0x0001 +EF_VALS = 0x0002 +EF_ENUM = 0x0004 +EF_WS_DLL = 0x0010 # exported from shared library +EF_EXTERN = 0x0020 +EF_NO_PROT = 0x0040 +EF_NO_TYPE = 0x0080 +EF_UCASE = 0x0100 +EF_TABLE = 0x0400 +EF_DEFINE = 0x0800 +EF_MODULE = 0x1000 + +#--- common dependency computation --- +# Input : list of items +# dictionary with lists of dependency +# +# +# Output : list of two outputs: +# [0] list of items in dependency +# [1] list of cycle dependency cycles +def dependency_compute(items, dependency, map_fn = lambda t: t, ignore_fn = lambda t: False): + item_ord = [] + item_cyc = [] + x = {} # already emitted + #print '# Dependency computation' + for t in items: + if map_fn(t) in x: + #print 'Continue: %s : %s' % (t, (map_fn(t)) + continue + stack = [t] + stackx = {t : dependency.get(t, [])[:]} + #print 'Push: %s : %s' % (t, str(stackx[t])) + while stack: + if stackx[stack[-1]]: # has dependencies + d = stackx[stack[-1]].pop(0) + if map_fn(d) in x or ignore_fn(d): + continue + if d in stackx: # cyclic dependency + c = stack[:] + c.reverse() + c = [d] + c[0:c.index(d)+1] + c.reverse() + item_cyc.append(c) + #print 'Cyclic: %s ' % (' -> '.join(c)) + continue + stack.append(d) + stackx[d] = dependency.get(d, [])[:] + #print 'Push: %s : %s' % (d, str(stackx[d])) + else: + #print 'Pop: %s' % (stack[-1]) + del stackx[stack[-1]] + e = map_fn(stack.pop()) + if e in x: + continue + #print 'Add: %s' % (e) + item_ord.append(e) + x[e] = True + return (item_ord, item_cyc) + +# Given a filename, return a relative path from the current directory +def relpath(filename): + return os.path.relpath(filename) + +# Given a filename, return a relative path from epan/dissectors +def rel_dissector_path(filename): + path_parts = os.path.abspath(filename).split(os.sep) + while (len(path_parts) > 3 and path_parts[0] != 'asn1'): + path_parts.pop(0) + path_parts.insert(0, '.') + return '/'.join(path_parts) + + +#--- EthCtx ------------------------------------------------------------------- +class EthCtx: + def __init__(self, conform, output, indent = 0): + self.conform = conform + self.output = output + self.conform.ectx = self + self.output.ectx = self + self.encoding = 'per' + self.aligned = False + self.default_oid_variant = '' + self.default_opentype_variant = '' + self.default_containing_variant = '_pdu_new' + self.default_embedded_pdv_cb = None + self.default_external_type_cb = None + self.remove_prefix = None + self.srcdir = None + self.emitted_pdu = {} + self.module = {} + self.module_ord = [] + self.all_type_attr = {} + self.all_tags = {} + self.all_vals = {} + + def encp(self): # encoding protocol + encp = self.encoding + return encp + + # Encoding + def Per(self): return self.encoding == 'per' + def Ber(self): return self.encoding == 'ber' + def Oer(self): return self.encoding == 'oer' + def Aligned(self): return self.aligned + def Unaligned(self): return not self.aligned + def NeedTags(self): return self.tag_opt or self.Ber() + def NAPI(self): return False # disable planned features + + def Module(self): # current module name + return self.modules[-1][0] + + def groups(self): + return self.group_by_prot or (self.conform.last_group > 0) + + def dbg(self, d): + if (self.dbgopt.find(d) >= 0): + return True + else: + return False + + def value_max(self, a, b): + if (a == 'MAX') or (b == 'MAX'): return 'MAX'; + if a == 'MIN': return b; + if b == 'MIN': return a; + try: + if (int(a) > int(b)): + return a + else: + return b + except (ValueError, TypeError): + pass + return "MAX((%s),(%s))" % (a, b) + + def value_min(self, a, b): + if (a == 'MIN') or (b == 'MIN'): return 'MIN'; + if a == 'MAX': return b; + if b == 'MAX': return a; + try: + if (int(a) < int(b)): + return a + else: + return b + except (ValueError, TypeError): + pass + return "MIN((%s),(%s))" % (a, b) + + def value_get_eth(self, val): + if isinstance(val, Value): + return val.to_str(self) + ethname = val + if val in self.value: + ethname = self.value[val]['ethname'] + return ethname + + def value_get_val(self, nm): + val = asn2c(nm) + if nm in self.value: + if self.value[nm]['import']: + v = self.get_val_from_all(nm, self.value[nm]['import']) + if v is None: + msg = 'Need value of imported value identifier %s from %s (%s)' % (nm, self.value[nm]['import'], self.value[nm]['proto']) + warnings.warn_explicit(msg, UserWarning, '', 0) + else: + val = v + else: + val = self.value[nm]['value'] + if isinstance (val, Value): + val = val.to_str(self) + else: + msg = 'Need value of unknown value identifier %s' % (nm) + warnings.warn_explicit(msg, UserWarning, '', 0) + return val + + def eth_get_type_attr(self, type): + #print "eth_get_type_attr(%s)" % (type) + types = [type] + while (not self.type[type]['import']): + val = self.type[type]['val'] + #print val + ttype = type + while (val.type == 'TaggedType'): + val = val.val + ttype += '/' + UNTAG_TYPE_NAME + if (val.type != 'Type_Ref'): + if (type != ttype): + types.append(ttype) + break + type = val.val + types.append(type) + attr = {} + #print " ", types + while len(types): + t = types.pop() + if (self.type[t]['import']): + attr.update(self.type[t]['attr']) + attr.update(self.eth_get_type_attr_from_all(t, self.type[t]['import'])) + elif (self.type[t]['val'].type == 'SelectionType'): + val = self.type[t]['val'] + (ftype, display) = val.eth_ftype(self) + attr.update({ 'TYPE' : ftype, 'DISPLAY' : display, + 'STRINGS' : val.eth_strings(), 'BITMASK' : '0' }); + else: + attr.update(self.type[t]['attr']) + attr.update(self.eth_type[self.type[t]['ethname']]['attr']) + if attr['STRINGS'].startswith('VALS64(') and '|BASE_VAL64_STRING' not in attr['DISPLAY']: + attr['DISPLAY'] += '|BASE_VAL64_STRING' + #print " ", attr + return attr + + def eth_get_type_attr_from_all(self, type, module): + attr = {} + if module in self.all_type_attr and type in self.all_type_attr[module]: + attr = self.all_type_attr[module][type] + return attr + + def get_ttag_from_all(self, type, module): + ttag = None + if module in self.all_tags and type in self.all_tags[module]: + ttag = self.all_tags[module][type] + return ttag + + def get_val_from_all(self, nm, module): + val = None + if module in self.all_vals and nm in self.all_vals[module]: + val = self.all_vals[module][nm] + return val + + def get_obj_repr(self, ident, flds=[], not_flds=[]): + def set_type_fn(cls, field, fnfield): + obj[fnfield + '_fn'] = 'NULL' + obj[fnfield + '_pdu'] = 'NULL' + if field in val and isinstance(val[field], Type_Ref): + p = val[field].eth_type_default_pars(self, '') + obj[fnfield + '_fn'] = p['TYPE_REF_FN'] + obj[fnfield + '_fn'] = obj[fnfield + '_fn'] % p # one iteration + if (self.conform.check_item('PDU', cls + '.' + field)): + obj[fnfield + '_pdu'] = 'dissect_' + self.field[val[field].val]['ethname'] + return + # end of get_type_fn() + obj = { '_name' : ident, '_ident' : asn2c(ident)} + obj['_class'] = self.oassign[ident].cls + obj['_module'] = self.oassign[ident].module + val = self.oassign[ident].val + for f in flds: + if f not in val: + return None + for f in not_flds: + if f in val: + return None + for f in list(val.keys()): + if isinstance(val[f], Node): + obj[f] = val[f].fld_obj_repr(self) + else: + obj[f] = str(val[f]) + if (obj['_class'] == 'TYPE-IDENTIFIER') or (obj['_class'] == 'ABSTRACT-SYNTAX'): + set_type_fn(obj['_class'], '&Type', '_type') + if (obj['_class'] == 'OPERATION'): + set_type_fn(obj['_class'], '&ArgumentType', '_argument') + set_type_fn(obj['_class'], '&ResultType', '_result') + if (obj['_class'] == 'ERROR'): + set_type_fn(obj['_class'], '&ParameterType', '_parameter') + return obj + + #--- eth_reg_module ----------------------------------------------------------- + def eth_reg_module(self, module): + #print "eth_reg_module(module='%s')" % (module) + name = module.get_name() + self.modules.append([name, module.get_proto(self)]) + if name in self.module: + raise DuplicateError("module", name) + self.module[name] = [] + self.module_ord.append(name) + + #--- eth_module_dep_add ------------------------------------------------------------ + def eth_module_dep_add(self, module, dep): + self.module[module].append(dep) + + #--- eth_exports ------------------------------------------------------------ + def eth_exports(self, exports): + self.exports_all = False + if ((len(exports) == 1) and (exports[0] == 'ALL')): + self.exports_all = True + return + for e in (exports): + if isinstance(e, Type_Ref): + self.exports.append(e.val) + elif isinstance(e, Class_Ref): + self.cexports.append(e.val) + else: + self.vexports.append(e) + + #--- eth_reg_assign --------------------------------------------------------- + def eth_reg_assign(self, ident, val, virt=False): + #print("eth_reg_assign(ident='%s')" % (ident), 'module=', self.Module()) + if ident in self.assign: + raise DuplicateError("assignment", ident) + self.assign[ident] = { 'val' : val , 'virt' : virt } + self.assign_ord.append(ident) + if (self.exports_all): + self.exports.append(ident) + + #--- eth_reg_vassign -------------------------------------------------------- + def eth_reg_vassign(self, vassign): + ident = vassign.ident + #print "eth_reg_vassign(ident='%s')" % (ident) + if ident in self.vassign: + raise DuplicateError("value assignment", ident) + self.vassign[ident] = vassign + self.vassign_ord.append(ident) + if (self.exports_all): + self.vexports.append(ident) + + #--- eth_reg_oassign -------------------------------------------------------- + def eth_reg_oassign(self, oassign): + ident = oassign.ident + #print "eth_reg_oassign(ident='%s')" % (ident) + if ident in self.oassign: + if self.oassign[ident] == oassign: + return # OK - already defined + else: + raise DuplicateError("information object assignment", ident) + self.oassign[ident] = oassign + self.oassign_ord.append(ident) + self.oassign_cls.setdefault(oassign.cls, []).append(ident) + + #--- eth_import_type -------------------------------------------------------- + def eth_import_type(self, ident, mod, proto): + #print ("eth_import_type(ident='%s', mod='%s', prot='%s')" % (ident, mod, proto)) + if ident in self.type: + #print ("already defined '%s' import=%s, module=%s" % (ident, str(self.type[ident]['import']), self.type[ident].get('module', '-'))) + if not self.type[ident]['import'] and (self.type[ident]['module'] == mod) : + return # OK - already defined + elif self.type[ident]['import'] and (self.type[ident]['import'] == mod) : + return # OK - already imported + else: + raise DuplicateError("type", ident) + self.type[ident] = {'import' : mod, 'proto' : proto, + 'ethname' : '' } + self.type[ident]['attr'] = { 'TYPE' : 'FT_NONE', 'DISPLAY' : 'BASE_NONE', + 'STRINGS' : 'NULL', 'BITMASK' : '0' } + mident = "$%s$%s" % (mod, ident) + if (self.conform.check_item('TYPE_ATTR', mident)): + self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', mident)) + else: + self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', ident)) + if (self.conform.check_item('IMPORT_TAG', mident)): + self.conform.copy_item('IMPORT_TAG', ident, mident) + self.type_imp.append(ident) + + #--- dummy_import_type -------------------------------------------------------- + def dummy_import_type(self, ident): + # dummy imported + if ident in self.type: + raise Exception("Try to dummy import for existing type :%s" % ident) + ethtype = asn2c(ident) + self.type[ident] = {'import' : 'xxx', 'proto' : 'xxx', + 'ethname' : ethtype } + self.type[ident]['attr'] = { 'TYPE' : 'FT_NONE', 'DISPLAY' : 'BASE_NONE', + 'STRINGS' : 'NULL', 'BITMASK' : '0' } + self.eth_type[ethtype] = { 'import' : 'xxx', 'proto' : 'xxx' , 'attr' : {}, 'ref' : []} + print("Dummy imported: %s (%s)" % (ident, ethtype)) + return ethtype + + #--- eth_import_class -------------------------------------------------------- + def eth_import_class(self, ident, mod, proto): + #print "eth_import_class(ident='%s', mod='%s', prot='%s')" % (ident, mod, proto) + if ident in self.objectclass: + #print "already defined import=%s, module=%s" % (str(self.objectclass[ident]['import']), self.objectclass[ident]['module']) + if not self.objectclass[ident]['import'] and (self.objectclass[ident]['module'] == mod) : + return # OK - already defined + elif self.objectclass[ident]['import'] and (self.objectclass[ident]['import'] == mod) : + return # OK - already imported + else: + raise DuplicateError("object class", ident) + self.objectclass[ident] = {'import' : mod, 'proto' : proto, + 'ethname' : '' } + self.objectclass_imp.append(ident) + + #--- eth_import_value ------------------------------------------------------- + def eth_import_value(self, ident, mod, proto): + #print "eth_import_value(ident='%s', mod='%s', prot='%s')" % (ident, mod, prot) + if ident in self.value: + #print "already defined import=%s, module=%s" % (str(self.value[ident]['import']), self.value[ident]['module']) + if not self.value[ident]['import'] and (self.value[ident]['module'] == mod) : + return # OK - already defined + elif self.value[ident]['import'] and (self.value[ident]['import'] == mod) : + return # OK - already imported + else: + raise DuplicateError("value", ident) + self.value[ident] = {'import' : mod, 'proto' : proto, + 'ethname' : ''} + self.value_imp.append(ident) + + #--- eth_sel_req ------------------------------------------------------------ + def eth_sel_req(self, typ, sel): + key = typ + '.' + sel + if key not in self.sel_req: + self.sel_req[key] = { 'typ' : typ , 'sel' : sel} + self.sel_req_ord.append(key) + return key + + #--- eth_comp_req ------------------------------------------------------------ + def eth_comp_req(self, type): + self.comp_req_ord.append(type) + + #--- eth_dep_add ------------------------------------------------------------ + def eth_dep_add(self, type, dep): + if type not in self.type_dep: + self.type_dep[type] = [] + self.type_dep[type].append(dep) + + #--- eth_reg_type ----------------------------------------------------------- + def eth_reg_type(self, ident, val, mod=None): + #print("eth_reg_type(ident='%s', type='%s')" % (ident, val.type)) + if ident in self.type: + if self.type[ident]['import'] and (self.type[ident]['import'] == self.Module()) : + # replace imported type + del self.type[ident] + self.type_imp.remove(ident) + else: + #print('DuplicateError: import=', self.type[ident]['import'], 'module=', self.Module()) + raise DuplicateError("type", ident) + val.ident = ident + self.type[ident] = { 'val' : val, 'import' : None } + self.type[ident]['module'] = self.Module() + self.type[ident]['proto'] = self.proto + if len(ident.split('/')) > 1: + self.type[ident]['tname'] = val.eth_tname() + else: + self.type[ident]['tname'] = asn2c(ident) + if mod : + mident = "$%s$%s" % (mod, ident) + else: + mident = None + self.type[ident]['export'] = self.conform.use_item('EXPORTS', ident) + self.type[ident]['enum'] = self.conform.use_item('MAKE_ENUM', ident) + self.type[ident]['vals_ext'] = self.conform.use_item('USE_VALS_EXT', ident) + self.type[ident]['user_def'] = self.conform.use_item('USER_DEFINED', ident) + if mident and self.conform.check_item('NO_EMIT', mident) : + self.type[ident]['no_emit'] = self.conform.use_item('NO_EMIT', mident) + else: + self.type[ident]['no_emit'] = self.conform.use_item('NO_EMIT', ident) + self.type[ident]['tname'] = self.conform.use_item('TYPE_RENAME', ident, val_dflt=self.type[ident]['tname']) + self.type[ident]['ethname'] = '' + if (val.type == 'Type_Ref') or (val.type == 'TaggedType') or (val.type == 'SelectionType') : + self.type[ident]['attr'] = {} + else: + (ftype, display) = val.eth_ftype(self) + self.type[ident]['attr'] = { 'TYPE' : ftype, 'DISPLAY' : display, + 'STRINGS' : val.eth_strings(), 'BITMASK' : '0' } + self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', ident)) + self.type_ord.append(ident) + # PDU + if (self.conform.check_item('PDU', ident)): + self.eth_reg_field(ident, ident, impl=val.HasImplicitTag(self), pdu=self.conform.use_item('PDU', ident)) + + #--- eth_reg_objectclass ---------------------------------------------------------- + def eth_reg_objectclass(self, ident, val): + #print "eth_reg_objectclass(ident='%s')" % (ident) + if ident in self.objectclass: + if self.objectclass[ident]['import'] and (self.objectclass[ident]['import'] == self.Module()) : + # replace imported object class + del self.objectclass[ident] + self.objectclass_imp.remove(ident) + elif isinstance(self.objectclass[ident]['val'], Class_Ref) and \ + isinstance(val, Class_Ref) and \ + (self.objectclass[ident]['val'].val == val.val): + pass # ignore duplicated CLASS1 ::= CLASS2 + else: + raise DuplicateError("object class", ident) + self.objectclass[ident] = { 'import' : None, 'module' : self.Module(), 'proto' : self.proto } + self.objectclass[ident]['val'] = val + self.objectclass[ident]['export'] = self.conform.use_item('EXPORTS', ident) + self.objectclass_ord.append(ident) + + #--- eth_reg_value ---------------------------------------------------------- + def eth_reg_value(self, ident, type, value, ethname=None): + #print "eth_reg_value(ident='%s')" % (ident) + if ident in self.value: + if self.value[ident]['import'] and (self.value[ident]['import'] == self.Module()) : + # replace imported value + del self.value[ident] + self.value_imp.remove(ident) + elif ethname: + self.value[ident]['ethname'] = ethname + return + else: + raise DuplicateError("value", ident) + self.value[ident] = { 'import' : None, 'module' : self.Module(), 'proto' : self.proto, + 'type' : type, 'value' : value, + 'no_emit' : False } + self.value[ident]['export'] = self.conform.use_item('EXPORTS', ident) + self.value[ident]['ethname'] = '' + if (ethname): self.value[ident]['ethname'] = ethname + self.value_ord.append(ident) + + #--- eth_reg_field ---------------------------------------------------------- + def eth_reg_field(self, ident, type, idx='', parent=None, impl=False, pdu=None): + #print "eth_reg_field(ident='%s', type='%s')" % (ident, type) + if ident in self.field: + if pdu and (type == self.field[ident]['type']): + pass # OK already created PDU + else: + raise DuplicateError("field", ident) + self.field[ident] = {'type' : type, 'idx' : idx, 'impl' : impl, 'pdu' : pdu, + 'modified' : '', 'attr' : {} } + name = ident.split('/')[-1] + if self.remove_prefix and name.startswith(self.remove_prefix): + name = name[len(self.remove_prefix):] + + if len(ident.split('/')) > 1 and name == ITEM_FIELD_NAME: # Sequence/Set of type + if len(self.field[ident]['type'].split('/')) > 1: + self.field[ident]['attr']['NAME'] = '"%s item"' % ident.split('/')[-2] + self.field[ident]['attr']['ABBREV'] = asn2c(ident.split('/')[-2] + name) + else: + self.field[ident]['attr']['NAME'] = '"%s"' % self.field[ident]['type'] + self.field[ident]['attr']['ABBREV'] = asn2c(self.field[ident]['type']) + else: + self.field[ident]['attr']['NAME'] = '"%s"' % name + self.field[ident]['attr']['ABBREV'] = asn2c(name) + if self.conform.check_item('FIELD_ATTR', ident): + self.field[ident]['modified'] = '#' + str(id(self)) + self.field[ident]['attr'].update(self.conform.use_item('FIELD_ATTR', ident)) + if (pdu): + self.field[ident]['pdu']['export'] = (self.conform.use_item('EXPORTS', ident + '_PDU') != 0) + self.pdu_ord.append(ident) + else: + self.field_ord.append(ident) + if parent: + self.eth_dep_add(parent, type) + + def eth_dummy_eag_field_required(self): + if (not self.dummy_eag_field): + self.dummy_eag_field = 'eag_field' + + #--- eth_clean -------------------------------------------------------------- + def eth_clean(self): + self.proto = self.proto_opt; + #--- ASN.1 tables ---------------- + self.assign = {} + self.assign_ord = [] + self.field = {} + self.pdu_ord = [] + self.field_ord = [] + self.type = {} + self.type_ord = [] + self.type_imp = [] + self.type_dep = {} + self.sel_req = {} + self.sel_req_ord = [] + self.comp_req_ord = [] + self.vassign = {} + self.vassign_ord = [] + self.value = {} + self.value_ord = [] + self.value_imp = [] + self.objectclass = {} + self.objectclass_ord = [] + self.objectclass_imp = [] + self.oassign = {} + self.oassign_ord = [] + self.oassign_cls = {} + #--- Modules ------------ + self.modules = [] + self.exports_all = False + self.exports = [] + self.cexports = [] + self.vexports = [] + #--- types ------------------- + self.eth_type = {} + self.eth_type_ord = [] + self.eth_export_ord = [] + self.eth_type_dupl = {} + self.named_bit = [] + #--- value dependencies ------------------- + self.value_dep = {} + #--- values ------------------- + self.eth_value = {} + self.eth_value_ord = [] + #--- fields ------------------------- + self.eth_hf = {} + self.eth_hf_ord = [] + self.eth_hfpdu_ord = [] + self.eth_hf_dupl = {} + self.dummy_eag_field = None + #--- type dependencies ------------------- + self.eth_type_ord1 = [] + self.eth_dep_cycle = [] + self.dep_cycle_eth_type = {} + #--- value dependencies and export ------------------- + self.eth_value_ord1 = [] + self.eth_vexport_ord = [] + + #--- eth_prepare ------------------------------------------------------------ + def eth_prepare(self): + self.eproto = asn2c(self.proto) + + #--- dummy types/fields for PDU registration --- + nm = 'NULL' + if (self.conform.check_item('PDU', nm)): + self.eth_reg_type('_dummy/'+nm, NullType()) + self.eth_reg_field(nm, '_dummy/'+nm, pdu=self.conform.use_item('PDU', nm)) + + #--- required PDUs ---------------------------- + for t in self.type_ord: + pdu = self.type[t]['val'].eth_need_pdu(self) + if not pdu: continue + f = pdu['type'] + pdu['reg'] = None + pdu['hidden'] = False + pdu['need_decl'] = True + if f not in self.field: + self.eth_reg_field(f, f, pdu=pdu) + + #--- values -> named values ------------------- + t_for_update = {} + for v in self.value_ord: + if (self.value[v]['type'].type == 'Type_Ref') or self.conform.check_item('ASSIGN_VALUE_TO_TYPE', v): + if self.conform.check_item('ASSIGN_VALUE_TO_TYPE', v): + tnm = self.conform.use_item('ASSIGN_VALUE_TO_TYPE', v) + else: + tnm = self.value[v]['type'].val + if tnm in self.type \ + and not self.type[tnm]['import'] \ + and (self.type[tnm]['val'].type == 'IntegerType'): + self.type[tnm]['val'].add_named_value(v, self.value[v]['value']) + self.value[v]['no_emit'] = True + t_for_update[tnm] = True + for t in list(t_for_update.keys()): + self.type[t]['attr']['STRINGS'] = self.type[t]['val'].eth_strings() + self.type[t]['attr'].update(self.conform.use_item('TYPE_ATTR', t)) + + #--- required components of --------------------------- + #print "self.comp_req_ord = ", self.comp_req_ord + for t in self.comp_req_ord: + self.type[t]['val'].eth_reg_sub(t, self, components_available=True) + + #--- required selection types --------------------------- + #print "self.sel_req_ord = ", self.sel_req_ord + for t in self.sel_req_ord: + tt = self.sel_req[t]['typ'] + if tt not in self.type: + self.dummy_import_type(t) + elif self.type[tt]['import']: + self.eth_import_type(t, self.type[tt]['import'], self.type[tt]['proto']) + else: + self.type[tt]['val'].sel_req(t, self.sel_req[t]['sel'], self) + + #--- types ------------------- + for t in self.type_imp: # imported types + nm = asn2c(t) + self.eth_type[nm] = { 'import' : self.type[t]['import'], + 'proto' : asn2c(self.type[t]['proto']), + 'attr' : {}, 'ref' : []} + self.eth_type[nm]['attr'].update(self.conform.use_item('ETYPE_ATTR', nm)) + self.type[t]['ethname'] = nm + for t in self.type_ord: # dummy import for missing type reference + tp = self.type[t]['val'] + #print "X : %s %s " % (t, tp.type) + if isinstance(tp, TaggedType): + #print "%s : %s " % (tp.type, t) + tp = tp.val + if isinstance(tp, Type_Ref): + #print "%s : %s ::= %s " % (tp.type, t, tp.val) + if tp.val not in self.type: + self.dummy_import_type(tp.val) + for t in self.type_ord: + nm = self.type[t]['tname'] + if ((nm.find('#') >= 0) or + ((len(t.split('/'))>1) and + (self.conform.get_fn_presence(t) or self.conform.check_item('FN_PARS', t) or + self.conform.get_fn_presence('/'.join((t,ITEM_FIELD_NAME))) or self.conform.check_item('FN_PARS', '/'.join((t,ITEM_FIELD_NAME)))) and + not self.conform.check_item('TYPE_RENAME', t))): + if len(t.split('/')) == 2 and t.split('/')[1] == ITEM_FIELD_NAME: # Sequence of type at the 1st level + nm = t.split('/')[0] + t.split('/')[1] + elif t.split('/')[-1] == ITEM_FIELD_NAME: # Sequence/Set of type at next levels + nm = 'T_' + self.conform.use_item('FIELD_RENAME', '/'.join(t.split('/')[0:-1]), val_dflt=t.split('/')[-2]) + t.split('/')[-1] + elif t.split('/')[-1] == UNTAG_TYPE_NAME: # Untagged type + nm = self.type['/'.join(t.split('/')[0:-1])]['ethname'] + '_U' + else: + nm = 'T_' + self.conform.use_item('FIELD_RENAME', t, val_dflt=t.split('/')[-1]) + nm = asn2c(nm) + if nm in self.eth_type: + if nm in self.eth_type_dupl: + self.eth_type_dupl[nm].append(t) + else: + self.eth_type_dupl[nm] = [self.eth_type[nm]['ref'][0], t] + nm += '_%02d' % (len(self.eth_type_dupl[nm])-1) + if nm in self.eth_type: + self.eth_type[nm]['ref'].append(t) + else: + self.eth_type_ord.append(nm) + self.eth_type[nm] = { 'import' : None, 'proto' : self.eproto, 'export' : 0, 'enum' : 0, 'vals_ext' : 0, + 'user_def' : EF_TYPE|EF_VALS, 'no_emit' : EF_TYPE|EF_VALS, + 'val' : self.type[t]['val'], + 'attr' : {}, 'ref' : [t]} + self.type[t]['ethname'] = nm + if (not self.eth_type[nm]['export'] and self.type[t]['export']): # new export + self.eth_export_ord.append(nm) + self.eth_type[nm]['export'] |= self.type[t]['export'] + self.eth_type[nm]['enum'] |= self.type[t]['enum'] + self.eth_type[nm]['vals_ext'] |= self.type[t]['vals_ext'] + self.eth_type[nm]['user_def'] &= self.type[t]['user_def'] + self.eth_type[nm]['no_emit'] &= self.type[t]['no_emit'] + if self.type[t]['attr'].get('STRINGS') == '$$': + use_ext = self.type[t]['vals_ext'] + if (use_ext): + self.eth_type[nm]['attr']['STRINGS'] = '&%s_ext' % (self.eth_vals_nm(nm)) + else: + if self.eth_type[nm]['val'].type == 'IntegerType' \ + and self.eth_type[nm]['val'].HasConstraint() \ + and self.eth_type[nm]['val'].constr.Needs64b(self): + self.eth_type[nm]['attr']['STRINGS'] = 'VALS64(%s)' % (self.eth_vals_nm(nm)) + else: + self.eth_type[nm]['attr']['STRINGS'] = 'VALS(%s)' % (self.eth_vals_nm(nm)) + self.eth_type[nm]['attr'].update(self.conform.use_item('ETYPE_ATTR', nm)) + for t in self.eth_type_ord: + bits = self.eth_type[t]['val'].eth_named_bits() + if (bits): + old_val = 0 + for (val, id) in bits: + self.named_bit.append({'name' : id, 'val' : val, + 'ethname' : 'hf_%s_%s_%s' % (self.eproto, t, asn2c(id)), + 'ftype' : 'FT_BOOLEAN', 'display' : '8', + 'strings' : 'NULL', + 'bitmask' : '0x'+('80','40','20','10','08','04','02','01')[val%8]}) + old_val = val + 1 + if self.eth_type[t]['val'].eth_need_tree(): + self.eth_type[t]['tree'] = "ett_%s_%s" % (self.eth_type[t]['proto'], t) + else: + self.eth_type[t]['tree'] = None + + #--- register values from enums ------------ + for t in self.eth_type_ord: + if (self.eth_type[t]['val'].eth_has_enum(t, self)): + self.eth_type[t]['val'].reg_enum_vals(t, self) + + #--- value dependencies ------------------- + for v in self.value_ord: + if isinstance (self.value[v]['value'], Value): + dep = self.value[v]['value'].get_dep() + else: + dep = self.value[v]['value'] + if dep and dep in self.value: + self.value_dep.setdefault(v, []).append(dep) + + #--- exports all necessary values + for v in self.value_ord: + if not self.value[v]['export']: continue + deparr = self.value_dep.get(v, []) + while deparr: + d = deparr.pop() + if not self.value[d]['import']: + if not self.value[d]['export']: + self.value[d]['export'] = EF_TYPE + deparr.extend(self.value_dep.get(d, [])) + + #--- values ------------------- + for v in self.value_imp: + nm = asn2c(v) + self.eth_value[nm] = { 'import' : self.value[v]['import'], + 'proto' : asn2c(self.value[v]['proto']), + 'ref' : []} + self.value[v]['ethname'] = nm + for v in self.value_ord: + if (self.value[v]['ethname']): + continue + if (self.value[v]['no_emit']): + continue + nm = asn2c(v) + self.eth_value[nm] = { 'import' : None, + 'proto' : asn2c(self.value[v]['proto']), + 'export' : self.value[v]['export'], 'ref' : [v] } + self.eth_value[nm]['value'] = self.value[v]['value'] + self.eth_value_ord.append(nm) + self.value[v]['ethname'] = nm + + #--- fields ------------------------- + for f in (self.pdu_ord + self.field_ord): + if len(f.split('/')) > 1 and f.split('/')[-1] == ITEM_FIELD_NAME: # Sequence/Set of type + nm = self.conform.use_item('FIELD_RENAME', '/'.join(f.split('/')[0:-1]), val_dflt=f.split('/')[-2]) + f.split('/')[-1] + else: + nm = f.split('/')[-1] + nm = self.conform.use_item('FIELD_RENAME', f, val_dflt=nm) + nm = asn2c(nm) + if (self.field[f]['pdu']): + nm += '_PDU' + if (not self.merge_modules or self.field[f]['pdu']['export']): + nm = self.eproto + '_' + nm + t = self.field[f]['type'] + if t in self.type: + ethtype = self.type[t]['ethname'] + else: # undefined type + ethtype = self.dummy_import_type(t) + ethtypemod = ethtype + self.field[f]['modified'] + if nm in self.eth_hf: + if nm in self.eth_hf_dupl: + if ethtypemod in self.eth_hf_dupl[nm]: + nm = self.eth_hf_dupl[nm][ethtypemod] + self.eth_hf[nm]['ref'].append(f) + self.field[f]['ethname'] = nm + continue + else: + nmx = nm + ('_%02d' % (len(self.eth_hf_dupl[nm]))) + self.eth_hf_dupl[nm][ethtype] = nmx + nm = nmx + else: + if (self.eth_hf[nm]['ethtype']+self.eth_hf[nm]['modified']) == ethtypemod: + self.eth_hf[nm]['ref'].append(f) + self.field[f]['ethname'] = nm + continue + else: + nmx = nm + '_01' + self.eth_hf_dupl[nm] = {self.eth_hf[nm]['ethtype']+self.eth_hf[nm]['modified'] : nm, \ + ethtypemod : nmx} + nm = nmx + if (self.field[f]['pdu']): + self.eth_hfpdu_ord.append(nm) + else: + self.eth_hf_ord.append(nm) + fullname = 'hf_%s_%s' % (self.eproto, nm) + attr = self.eth_get_type_attr(self.field[f]['type']).copy() + attr.update(self.field[f]['attr']) + if (self.NAPI() and 'NAME' in attr): + attr['NAME'] += self.field[f]['idx'] + attr.update(self.conform.use_item('EFIELD_ATTR', nm)) + use_vals_ext = self.eth_type[ethtype].get('vals_ext') + if (use_vals_ext): + attr['DISPLAY'] += '|BASE_EXT_STRING' + self.eth_hf[nm] = {'fullname' : fullname, 'pdu' : self.field[f]['pdu'], + 'ethtype' : ethtype, 'modified' : self.field[f]['modified'], + 'attr' : attr.copy(), + 'ref' : [f]} + self.field[f]['ethname'] = nm + if (self.dummy_eag_field): + # Prepending "dummy_" avoids matching checkhf.pl. + self.dummy_eag_field = 'dummy_hf_%s_%s' % (self.eproto, self.dummy_eag_field) + #--- type dependencies ------------------- + (self.eth_type_ord1, self.eth_dep_cycle) = dependency_compute(self.type_ord, self.type_dep, map_fn = lambda t: self.type[t]['ethname'], ignore_fn = lambda t: self.type[t]['import']) + i = 0 + while i < len(self.eth_dep_cycle): + t = self.type[self.eth_dep_cycle[i][0]]['ethname'] + self.dep_cycle_eth_type.setdefault(t, []).append(i) + i += 1 + + #--- value dependencies and export ------------------- + for v in self.eth_value_ord: + if self.eth_value[v]['export']: + self.eth_vexport_ord.append(v) + else: + self.eth_value_ord1.append(v) + + #--- export tags, values, ... --- + for t in self.exports: + if t not in self.type: + continue + if self.type[t]['import']: + continue + m = self.type[t]['module'] + if not self.Per() and not self.Oer(): + if m not in self.all_tags: + self.all_tags[m] = {} + self.all_tags[m][t] = self.type[t]['val'].GetTTag(self) + if m not in self.all_type_attr: + self.all_type_attr[m] = {} + self.all_type_attr[m][t] = self.eth_get_type_attr(t).copy() + for v in self.vexports: + if v not in self.value: + continue + if self.value[v]['import']: + continue + m = self.value[v]['module'] + if m not in self.all_vals: + self.all_vals[m] = {} + vv = self.value[v]['value'] + if isinstance (vv, Value): + vv = vv.to_str(self) + self.all_vals[m][v] = vv + + #--- eth_vals_nm ------------------------------------------------------------ + def eth_vals_nm(self, tname): + out = "" + if (not self.eth_type[tname]['export'] & EF_NO_PROT): + out += "%s_" % (self.eproto) + out += "%s_vals" % (tname) + return out + + #--- eth_vals --------------------------------------------------------------- + def eth_vals(self, tname, vals): + out = "" + has_enum = self.eth_type[tname]['enum'] & EF_ENUM + use_ext = self.eth_type[tname]['vals_ext'] + if (use_ext): + vals.sort(key=lambda vals_entry: int(vals_entry[0])) + if (not self.eth_type[tname]['export'] & EF_VALS): + out += 'static ' + if (self.eth_type[tname]['export'] & EF_VALS) and (self.eth_type[tname]['export'] & EF_TABLE): + out += 'static ' + if self.eth_type[tname]['val'].HasConstraint() and self.eth_type[tname]['val'].constr.Needs64b(self) \ + and self.eth_type[tname]['val'].type == 'IntegerType': + out += "const val64_string %s[] = {\n" % (self.eth_vals_nm(tname)) + else: + out += "const value_string %s[] = {\n" % (self.eth_vals_nm(tname)) + for (val, id) in vals: + if (has_enum): + vval = self.eth_enum_item(tname, id) + else: + vval = val + out += ' { %3s, "%s" },\n' % (vval, id) + out += " { 0, NULL }\n};\n" + if (use_ext): + out += "\nstatic value_string_ext %s_ext = VALUE_STRING_EXT_INIT(%s);\n" % (self.eth_vals_nm(tname), self.eth_vals_nm(tname)) + return out + + #--- eth_enum_prefix ------------------------------------------------------------ + def eth_enum_prefix(self, tname, type=False): + out = "" + if (self.eth_type[tname]['export'] & EF_ENUM): + no_prot = self.eth_type[tname]['export'] & EF_NO_PROT + else: + no_prot = self.eth_type[tname]['enum'] & EF_NO_PROT + if (not no_prot): + out += self.eproto + if ((not self.eth_type[tname]['enum'] & EF_NO_TYPE) or type): + if (out): out += '_' + out += tname + if (self.eth_type[tname]['enum'] & EF_UCASE): + out = out.upper() + if (out): out += '_' + return out + + #--- eth_enum_nm ------------------------------------------------------------ + def eth_enum_nm(self, tname): + out = self.eth_enum_prefix(tname, type=True) + out += "enum" + return out + + #--- eth_enum_item --------------------------------------------------------------- + def eth_enum_item(self, tname, ident): + out = self.eth_enum_prefix(tname) + out += asn2c(ident) + if (self.eth_type[tname]['enum'] & EF_UCASE): + out = out.upper() + return out + + #--- eth_enum --------------------------------------------------------------- + def eth_enum(self, tname, vals): + out = "" + if (self.eth_type[tname]['enum'] & EF_DEFINE): + out += "/* enumerated values for %s */\n" % (tname) + for (val, id) in vals: + out += '#define %-12s %3s\n' % (self.eth_enum_item(tname, id), val) + else: + out += "typedef enum _%s {\n" % (self.eth_enum_nm(tname)) + first_line = 1 + for (val, id) in vals: + if (first_line == 1): + first_line = 0 + else: + out += ",\n" + out += ' %-12s = %3s' % (self.eth_enum_item(tname, id), val) + out += "\n} %s;\n" % (self.eth_enum_nm(tname)) + return out + + #--- eth_bits --------------------------------------------------------------- + def eth_bits(self, tname, bits): + out = "" + out += "static int * const " + out += "%(TABLE)s[] = {\n" + for (val, id) in bits: + out += ' &hf_%s_%s_%s,\n' % (self.eproto, tname, asn2c(id)) + out += " NULL\n};\n" + return out + + #--- eth_type_fn_h ---------------------------------------------------------- + def eth_type_fn_h(self, tname): + out = "" + if (not self.eth_type[tname]['export'] & EF_TYPE): + out += 'static ' + out += "int " + if (self.Ber()): + out += "dissect_%s_%s(bool implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_)" % (self.eth_type[tname]['proto'], tname) + elif (self.Per() or self.Oer()): + out += "dissect_%s_%s(tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_)" % (self.eth_type[tname]['proto'], tname) + out += ";\n" + return out + + #--- eth_fn_call ------------------------------------------------------------ + def eth_fn_call(self, fname, ret=None, indent=2, par=None): + out = indent * ' ' + if (ret): + if (ret == 'return'): + out += 'return ' + else: + out += ret + ' = ' + out += fname + '(' + ind = len(out) + for i in range(len(par)): + if (i>0): out += ind * ' ' + out += ', '.join(par[i]) + if (i<(len(par)-1)): out += ',\n' + out += ');\n' + return out + + def output_proto_root(self): + out = '' + if self.conform.proto_root_name: + out += ' proto_item *prot_ti = proto_tree_add_item(tree, ' + self.conform.proto_root_name + ', tvb, 0, -1, ENC_NA);\n' + out += ' proto_item_set_hidden(prot_ti);\n' + return out + + #--- eth_type_fn_hdr -------------------------------------------------------- + def eth_type_fn_hdr(self, tname): + out = '\n' + if (not self.eth_type[tname]['export'] & EF_TYPE): + out += 'static ' + out += "int\n" + if (self.Ber()): + out += "dissect_%s_%s(bool implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {\n" % (self.eth_type[tname]['proto'], tname) + elif (self.Per() or self.Oer()): + out += "dissect_%s_%s(tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {\n" % (self.eth_type[tname]['proto'], tname) + #if self.conform.get_fn_presence(tname): + # out += self.conform.get_fn_text(tname, 'FN_HDR') + #el + if self.conform.check_item('PDU', tname): + out += self.output_proto_root() + + cycle_size = 0 + if self.eth_dep_cycle: + for cur_cycle in self.eth_dep_cycle: + t = self.type[cur_cycle[0]]['ethname'] + if t == tname: + cycle_size = len(cur_cycle) + break + + if cycle_size > 0: + out += f'''\ + const int proto_id = GPOINTER_TO_INT(wmem_list_frame_data(wmem_list_tail(actx->pinfo->layers))); + const unsigned cycle_size = {cycle_size}; + unsigned recursion_depth = p_get_proto_depth(actx->pinfo, proto_id); + DISSECTOR_ASSERT(recursion_depth <= MAX_RECURSION_DEPTH); + p_set_proto_depth(actx->pinfo, proto_id, recursion_depth + cycle_size); +''' + + if self.conform.get_fn_presence(self.eth_type[tname]['ref'][0]): + out += self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_HDR') + return out + + #--- eth_type_fn_ftr -------------------------------------------------------- + def eth_type_fn_ftr(self, tname): + out = '\n' + #if self.conform.get_fn_presence(tname): + # out += self.conform.get_fn_text(tname, 'FN_FTR') + #el + + add_recursion_check = False + if self.eth_dep_cycle: + for cur_cycle in self.eth_dep_cycle: + t = self.type[cur_cycle[0]]['ethname'] + if t == tname: + add_recursion_check = True + break + + if add_recursion_check: + out += '''\ + p_set_proto_depth(actx->pinfo, proto_id, recursion_depth - cycle_size); +''' + + if self.conform.get_fn_presence(self.eth_type[tname]['ref'][0]): + out += self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_FTR') + out += " return offset;\n" + out += "}\n" + return out + + #--- eth_type_fn_body ------------------------------------------------------- + def eth_type_fn_body(self, tname, body, pars=None): + out = body + #if self.conform.get_fn_body_presence(tname): + # out = self.conform.get_fn_text(tname, 'FN_BODY') + #el + if self.conform.get_fn_body_presence(self.eth_type[tname]['ref'][0]): + out = self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_BODY') + if pars: + try: + out = out % pars + except (TypeError): + pass + return out + + #--- eth_out_pdu_decl ---------------------------------------------------------- + def eth_out_pdu_decl(self, f): + t = self.eth_hf[f]['ethtype'] + out = '' + if (not self.eth_hf[f]['pdu']['export']): + out += 'static ' + out += 'int ' + out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_);\n' + return out + + #--- eth_output_hf ---------------------------------------------------------- + def eth_output_hf (self): + if not len(self.eth_hf_ord) and not len(self.eth_hfpdu_ord) and not len(self.named_bit): return + fx = self.output.file_open('hf') + for f in (self.eth_hfpdu_ord + self.eth_hf_ord): + fx.write("%-50s/* %s */\n" % ("static int %s = -1; " % (self.eth_hf[f]['fullname']), self.eth_hf[f]['ethtype'])) + if (self.named_bit): + fx.write('/* named bits */\n') + for nb in self.named_bit: + fx.write("static int %s = -1;\n" % (nb['ethname'])) + if (self.dummy_eag_field): + fx.write("static int %s = -1; /* never registered */\n" % (self.dummy_eag_field)) + self.output.file_close(fx) + + #--- eth_output_hf_arr ------------------------------------------------------ + def eth_output_hf_arr (self): + if not len(self.eth_hf_ord) and not len(self.eth_hfpdu_ord) and not len(self.named_bit): return + fx = self.output.file_open('hfarr') + for f in (self.eth_hfpdu_ord + self.eth_hf_ord): + t = self.eth_hf[f]['ethtype'] + if self.remove_prefix and t.startswith(self.remove_prefix): + t = t[len(self.remove_prefix):] + name=self.eth_hf[f]['attr']['NAME'] + try: # Python < 3 + trantab = maketrans("- ", "__") + except Exception: + trantab = str.maketrans("- ", "__") + name = name.translate(trantab) + namelower = name.lower() + tquoted_lower = '"' + t.lower() + '"' + # Try to avoid giving blurbs that give no more info than the name + if tquoted_lower == namelower or \ + t == "NULL" or \ + tquoted_lower.replace("t_", "") == namelower: + blurb = 'NULL' + else: + blurb = '"%s"' % (t) + attr = self.eth_hf[f]['attr'].copy() + if attr['TYPE'] == 'FT_NONE': + attr['ABBREV'] = '"%s.%s_element"' % (self.proto, attr['ABBREV']) + else: + attr['ABBREV'] = '"%s.%s"' % (self.proto, attr['ABBREV']) + if 'BLURB' not in attr: + attr['BLURB'] = blurb + fx.write(' { &%s,\n' % (self.eth_hf[f]['fullname'])) + fx.write(' { %(NAME)s, %(ABBREV)s,\n' % attr) + fx.write(' %(TYPE)s, %(DISPLAY)s, %(STRINGS)s, %(BITMASK)s,\n' % attr) + fx.write(' %(BLURB)s, HFILL }},\n' % attr) + for nb in self.named_bit: + flt_str = nb['ethname'] + # cut out hf_ + flt_str = flt_str[3:] + flt_str = flt_str.replace('_' , '.') + #print("filter string=%s" % (flt_str)) + fx.write(' { &%s,\n' % (nb['ethname'])) + fx.write(' { "%s", "%s",\n' % (nb['name'], flt_str)) + fx.write(' %s, %s, %s, %s,\n' % (nb['ftype'], nb['display'], nb['strings'], nb['bitmask'])) + fx.write(' NULL, HFILL }},\n') + self.output.file_close(fx) + + #--- eth_output_ett --------------------------------------------------------- + def eth_output_ett (self): + fx = self.output.file_open('ett') + fempty = True + #fx.write("static gint ett_%s = -1;\n" % (self.eproto)) + for t in self.eth_type_ord: + if self.eth_type[t]['tree']: + fx.write("static gint %s = -1;\n" % (self.eth_type[t]['tree'])) + fempty = False + self.output.file_close(fx, discard=fempty) + + #--- eth_output_ett_arr ----------------------------------------------------- + def eth_output_ett_arr(self): + fx = self.output.file_open('ettarr') + fempty = True + #fx.write(" &ett_%s,\n" % (self.eproto)) + for t in self.eth_type_ord: + if self.eth_type[t]['tree']: + fx.write(" &%s,\n" % (self.eth_type[t]['tree'])) + fempty = False + self.output.file_close(fx, discard=fempty) + + #--- eth_output_export ------------------------------------------------------ + def eth_output_export(self): + fx = self.output.file_open('exp', ext='h') + for t in self.eth_export_ord: # vals + if (self.eth_type[t]['export'] & EF_ENUM) and self.eth_type[t]['val'].eth_has_enum(t, self): + fx.write(self.eth_type[t]['val'].eth_type_enum(t, self)) + if (self.eth_type[t]['export'] & EF_VALS) and self.eth_type[t]['val'].eth_has_vals(): + if not self.eth_type[t]['export'] & EF_TABLE: + if self.eth_type[t]['export'] & EF_WS_DLL: + fx.write("WS_DLL_PUBLIC ") + else: + fx.write("extern ") + if self.eth_type[t]['val'].HasConstraint() and self.eth_type[t]['val'].constr.Needs64b(self) \ + and self.eth_type[t]['val'].type == 'IntegerType': + fx.write("const val64_string %s[];\n" % (self.eth_vals_nm(t))) + else: + fx.write("const value_string %s[];\n" % (self.eth_vals_nm(t))) + else: + fx.write(self.eth_type[t]['val'].eth_type_vals(t, self)) + for t in self.eth_export_ord: # functions + if (self.eth_type[t]['export'] & EF_TYPE): + if self.eth_type[t]['export'] & EF_EXTERN: + if self.eth_type[t]['export'] & EF_WS_DLL: + fx.write("WS_DLL_PUBLIC ") + else: + fx.write("extern ") + fx.write(self.eth_type_fn_h(t)) + for f in self.eth_hfpdu_ord: # PDUs + if (self.eth_hf[f]['pdu'] and self.eth_hf[f]['pdu']['export']): + fx.write(self.eth_out_pdu_decl(f)) + self.output.file_close(fx) + + #--- eth_output_expcnf ------------------------------------------------------ + def eth_output_expcnf(self): + fx = self.output.file_open('exp', ext='cnf') + fx.write('#.MODULE\n') + maxw = 0 + for (m, p) in self.modules: + if (len(m) > maxw): maxw = len(m) + for (m, p) in self.modules: + fx.write("%-*s %s\n" % (maxw, m, p)) + fx.write('#.END\n\n') + for cls in self.objectclass_ord: + if self.objectclass[cls]['export']: + cnm = cls + if self.objectclass[cls]['export'] & EF_MODULE: + cnm = "$%s$%s" % (self.objectclass[cls]['module'], cnm) + fx.write('#.CLASS %s\n' % (cnm)) + maxw = 2 + for fld in self.objectclass[cls]['val'].fields: + w = len(fld.fld_repr()[0]) + if (w > maxw): maxw = w + for fld in self.objectclass[cls]['val'].fields: + repr = fld.fld_repr() + fx.write('%-*s %s\n' % (maxw, repr[0], ' '.join(repr[1:]))) + fx.write('#.END\n\n') + if self.Ber(): + fx.write('#.IMPORT_TAG\n') + for t in self.eth_export_ord: # tags + if (self.eth_type[t]['export'] & EF_TYPE): + fx.write('%-24s ' % self.eth_type[t]['ref'][0]) + fx.write('%s %s\n' % self.eth_type[t]['val'].GetTag(self)) + fx.write('#.END\n\n') + fx.write('#.TYPE_ATTR\n') + for t in self.eth_export_ord: # attributes + if (self.eth_type[t]['export'] & EF_TYPE): + tnm = self.eth_type[t]['ref'][0] + if self.eth_type[t]['export'] & EF_MODULE: + tnm = "$%s$%s" % (self.type[tnm]['module'], tnm) + fx.write('%-24s ' % tnm) + attr = self.eth_get_type_attr(self.eth_type[t]['ref'][0]).copy() + fx.write('TYPE = %(TYPE)-9s DISPLAY = %(DISPLAY)-9s STRINGS = %(STRINGS)s BITMASK = %(BITMASK)s\n' % attr) + fx.write('#.END\n\n') + self.output.file_close(fx, keep_anyway=True) + + #--- eth_output_val ------------------------------------------------------ + def eth_output_val(self): + fx = self.output.file_open('val', ext='h') + for v in self.eth_value_ord1: + vv = self.eth_value[v]['value'] + if isinstance (vv, Value): + vv = vv.to_str(self) + fx.write("#define %-30s %s\n" % (v, vv)) + for t in self.eth_type_ord1: + if self.eth_type[t]['import']: + continue + if self.eth_type[t]['val'].eth_has_enum(t, self) and not (self.eth_type[t]['export'] & EF_ENUM): + fx.write(self.eth_type[t]['val'].eth_type_enum(t, self)) + self.output.file_close(fx) + + #--- eth_output_valexp ------------------------------------------------------ + def eth_output_valexp(self): + if (not len(self.eth_vexport_ord)): return + fx = self.output.file_open('valexp', ext='h') + for v in self.eth_vexport_ord: + vv = self.eth_value[v]['value'] + if isinstance (vv, Value): + vv = vv.to_str(self) + fx.write("#define %-30s %s\n" % (v, vv)) + self.output.file_close(fx) + + #--- eth_output_types ------------------------------------------------------- + def eth_output_types(self): + def out_pdu(f): + t = self.eth_hf[f]['ethtype'] + impl = 'FALSE' + out = '' + if (not self.eth_hf[f]['pdu']['export']): + out += 'static ' + out += 'int ' + out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_) {\n' + out += self.output_proto_root() + + out += ' int offset = 0;\n' + off_par = 'offset' + ret_par = 'offset' + if (self.Per()): + if (self.Aligned()): + aligned = 'TRUE' + else: + aligned = 'FALSE' + out += " asn1_ctx_t asn1_ctx;\n" + out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_PER', aligned, 'pinfo'),)) + if (self.Ber()): + out += " asn1_ctx_t asn1_ctx;\n" + out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_BER', 'TRUE', 'pinfo'),)) + par=((impl, 'tvb', off_par,'&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),) + elif (self.Per()): + par=(('tvb', off_par, '&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),) + elif (self.Oer()): + out += " asn1_ctx_t asn1_ctx;\n" + out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_OER', 'TRUE', 'pinfo'),)) + par=(('tvb', off_par,'&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),) + else: + par=((),) + out += self.eth_fn_call('dissect_%s_%s' % (self.eth_type[t]['proto'], t), ret=ret_par, par=par) + if (self.Per()): + out += ' offset += 7; offset >>= 3;\n' + out += ' return offset;\n' + out += '}\n' + return out + #end out_pdu() + fx = self.output.file_open('fn') + pos = fx.tell() + if (len(self.eth_hfpdu_ord)): + first_decl = True + for f in self.eth_hfpdu_ord: + if (self.eth_hf[f]['pdu'] and self.eth_hf[f]['pdu']['need_decl']): + if first_decl: + fx.write('/*--- PDUs declarations ---*/\n') + first_decl = False + fx.write(self.eth_out_pdu_decl(f)) + if not first_decl: + fx.write('\n') + + add_depth_define = False + if self.eth_dep_cycle: + fx.write('/*--- Cyclic dependencies ---*/\n\n') + i = 0 + while i < len(self.eth_dep_cycle): + t = self.type[self.eth_dep_cycle[i][0]]['ethname'] + if self.dep_cycle_eth_type[t][0] != i: i += 1; continue + add_depth_define = True + fx.write(''.join(['/* %s */\n' % ' -> '.join(self.eth_dep_cycle[i]) for i in self.dep_cycle_eth_type[t]])) + if not self.eth_type[t]['export'] & EF_TYPE: + fx.write(self.eth_type_fn_h(t)) + else: + fx.write('/*' + self.eth_type_fn_h(t).strip() + '*/\n') + fx.write('\n') + i += 1 + fx.write('\n') + if add_depth_define: + fx.write('#define MAX_RECURSION_DEPTH 100 // Arbitrarily chosen.\n') + for t in self.eth_type_ord1: + if self.eth_type[t]['import']: + continue + if self.eth_type[t]['val'].eth_has_vals(): + if self.eth_type[t]['no_emit'] & EF_VALS: + pass + elif self.eth_type[t]['user_def'] & EF_VALS: + if self.eth_type[t]['val'].HasConstraint() and self.eth_type[t]['val'].constr.Needs64b(self) \ + and self.eth_type[t]['val'].type == 'IntegerType': + fx.write("extern const val64_string %s[];\n" % (self.eth_vals_nm(t))) + else: + fx.write("extern const value_string %s[];\n" % (self.eth_vals_nm(t))) + elif (self.eth_type[t]['export'] & EF_VALS) and (self.eth_type[t]['export'] & EF_TABLE): + pass + else: + fx.write(self.eth_type[t]['val'].eth_type_vals(t, self)) + if self.eth_type[t]['no_emit'] & EF_TYPE: + pass + elif self.eth_type[t]['user_def'] & EF_TYPE: + fx.write(self.eth_type_fn_h(t)) + else: + fx.write(self.eth_type[t]['val'].eth_type_fn(self.eth_type[t]['proto'], t, self)) + fx.write('\n') + if (len(self.eth_hfpdu_ord)): + fx.write('/*--- PDUs ---*/\n\n') + for f in self.eth_hfpdu_ord: + if (self.eth_hf[f]['pdu']): + if (f in self.emitted_pdu): + fx.write(" /* %s already emitted */\n" % (f)) + else: + fx.write(out_pdu(f)) + self.emitted_pdu[f] = True + fx.write('\n') + fempty = pos == fx.tell() + self.output.file_close(fx, discard=fempty) + + #--- eth_output_dis_hnd ----------------------------------------------------- + def eth_output_dis_hnd(self): + fx = self.output.file_open('dis-hnd') + fempty = True + for f in self.eth_hfpdu_ord: + pdu = self.eth_hf[f]['pdu'] + if (pdu and pdu['reg'] and not pdu['hidden']): + dis = self.proto + if (pdu['reg'] != '.'): + dis += '.' + pdu['reg'] + fx.write('static dissector_handle_t %s_handle;\n' % (asn2c(dis))) + fempty = False + fx.write('\n') + self.output.file_close(fx, discard=fempty) + + #--- eth_output_dis_reg ----------------------------------------------------- + def eth_output_dis_reg(self): + fx = self.output.file_open('dis-reg') + fempty = True + for f in self.eth_hfpdu_ord: + pdu = self.eth_hf[f]['pdu'] + if (pdu and pdu['reg']): + new_prefix = '' + if (pdu['new']): new_prefix = 'new_' + dis = self.proto + if (pdu['reg'] != '.'): dis += '.' + pdu['reg'] + fx.write(' %sregister_dissector("%s", dissect_%s, proto_%s);\n' % (new_prefix, dis, f, self.eproto)) + if (not pdu['hidden']): + fx.write(' %s_handle = find_dissector("%s");\n' % (asn2c(dis), dis)) + fempty = False + fx.write('\n') + self.output.file_close(fx, discard=fempty) + + #--- eth_output_dis_tab ----------------------------------------------------- + def eth_output_dis_tab(self): + fx = self.output.file_open('dis-tab') + fempty = True + for k in self.conform.get_order('REGISTER'): + reg = self.conform.use_item('REGISTER', k) + if reg['pdu'] not in self.field: continue + f = self.field[reg['pdu']]['ethname'] + pdu = self.eth_hf[f]['pdu'] + new_prefix = '' + if (pdu['new']): new_prefix = 'new_' + if (reg['rtype'] in ('NUM', 'STR')): + rstr = '' + if (reg['rtype'] == 'STR'): + rstr = 'string' + else: + rstr = 'uint' + if (pdu['reg']): + dis = self.proto + if (pdu['reg'] != '.'): dis += '.' + pdu['reg'] + if (not pdu['hidden']): + hnd = '%s_handle' % (asn2c(dis)) + else: + hnd = 'find_dissector("%s")' % (dis) + else: + hnd = '%screate_dissector_handle(dissect_%s, proto_%s)' % (new_prefix, f, self.eproto) + rport = self.value_get_eth(reg['rport']) + fx.write(' dissector_add_%s("%s", %s, %s);\n' % (rstr, reg['rtable'], rport, hnd)) + elif (reg['rtype'] in ('BER', 'PER', 'OER')): + roid = self.value_get_eth(reg['roid']) + fx.write(' %sregister_%s_oid_dissector(%s, dissect_%s, proto_%s, %s);\n' % (new_prefix, reg['rtype'].lower(), roid, f, self.eproto, reg['roidname'])) + fempty = False + fx.write('\n') + self.output.file_close(fx, discard=fempty) + + #--- eth_output_syn_reg ----------------------------------------------------- + def eth_output_syn_reg(self): + fx = self.output.file_open('syn-reg') + fempty = True + first_decl = True + for k in self.conform.get_order('SYNTAX'): + reg = self.conform.use_item('SYNTAX', k) + if reg['pdu'] not in self.field: continue + f = self.field[reg['pdu']]['ethname'] + pdu = self.eth_hf[f]['pdu'] + new_prefix = '' + if (pdu['new']): new_prefix = 'new_' + if first_decl: + fx.write(' /*--- Syntax registrations ---*/\n') + first_decl = False + fx.write(' %sregister_ber_syntax_dissector(%s, proto_%s, dissect_%s_PDU);\n' % (new_prefix, k, self.eproto, reg['pdu'])); + fempty=False + self.output.file_close(fx, discard=fempty) + + #--- eth_output_tables ----------------------------------------------------- + def eth_output_tables(self): + for num in list(self.conform.report.keys()): + fx = self.output.file_open('table' + num) + for rep in self.conform.report[num]: + self.eth_output_table(fx, rep) + self.output.file_close(fx) + + #--- eth_output_table ----------------------------------------------------- + def eth_output_table(self, fx, rep): + if rep['type'] == 'HDR': + fx.write('\n') + if rep['var']: + var = rep['var'] + var_list = var.split('.', 1) + cls = var_list[0] + del var_list[0] + flds = [] + not_flds = [] + sort_flds = [] + for f in var_list: + if f[0] == '!': + not_flds.append(f[1:]) + continue + if f[0] == '#': + flds.append(f[1:]) + sort_flds.append(f) + continue + if f[0] == '@': + flds.append(f[1:]) + sort_flds.append(f[1:]) + continue + flds.append(f) + objs = {} + objs_ord = [] + if (cls in self.oassign_cls): + for ident in self.oassign_cls[cls]: + obj = self.get_obj_repr(ident, flds, not_flds) + if not obj: + continue + obj['_LOOP'] = var + obj['_DICT'] = str(obj) + objs[ident] = obj + objs_ord.append(ident) + if (sort_flds): + # Sort identifiers according to the matching object in objs. + # The order is determined by sort_flds, keys prefixed by a + # '#' are compared numerically. + def obj_key_fn(name): + obj = objs[name] + return list( + int(obj[f[1:]]) if f[0] == '#' else obj[f] + for f in sort_flds + ) + objs_ord.sort(key=obj_key_fn) + for ident in objs_ord: + obj = objs[ident] + try: + text = rep['text'] % obj + except (KeyError): + raise sys.exc_info()[0]("%s:%s invalid key %s for information object %s of %s" % (rep['fn'], rep['lineno'], sys.exc_info()[1], ident, var)) + fx.write(text) + else: + fx.write("/* Unknown or empty loop list %s */\n" % (var)) + else: + fx.write(rep['text']) + if rep['type'] == 'FTR': + fx.write('\n') + + #--- dupl_report ----------------------------------------------------- + def dupl_report(self): + # types + tmplist = sorted(self.eth_type_dupl.keys()) + for t in tmplist: + msg = "The same type names for different types. Explicit type renaming is recommended.\n" + msg += t + "\n" + for tt in self.eth_type_dupl[t]: + msg += " %-20s %s\n" % (self.type[tt]['ethname'], tt) + warnings.warn_explicit(msg, UserWarning, '', 0) + # fields + tmplist = list(self.eth_hf_dupl.keys()) + tmplist.sort() + for f in tmplist: + msg = "The same field names for different types. Explicit field renaming is recommended.\n" + msg += f + "\n" + for tt in list(self.eth_hf_dupl[f].keys()): + msg += " %-20s %-20s " % (self.eth_hf_dupl[f][tt], tt) + msg += ", ".join(self.eth_hf[self.eth_hf_dupl[f][tt]]['ref']) + msg += "\n" + warnings.warn_explicit(msg, UserWarning, '', 0) + + #--- eth_do_output ------------------------------------------------------------ + def eth_do_output(self): + if self.dbg('a'): + print("\n# Assignments") + for a in self.assign_ord: + v = ' ' + if (self.assign[a]['virt']): v = '*' + print('{} {}'.format(v, a)) + print("\n# Value assignments") + for a in self.vassign_ord: + print(' {}'.format(a)) + print("\n# Information object assignments") + for a in self.oassign_ord: + print(" %-12s (%s)" % (a, self.oassign[a].cls)) + if self.dbg('t'): + print("\n# Imported Types") + print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol")) + print("-" * 100) + for t in self.type_imp: + print("%-40s %-24s %-24s" % (t, self.type[t]['import'], self.type[t]['proto'])) + print("\n# Imported Values") + print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol")) + print("-" * 100) + for t in self.value_imp: + print("%-40s %-24s %-24s" % (t, self.value[t]['import'], self.value[t]['proto'])) + print("\n# Imported Object Classes") + print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol")) + print("-" * 100) + for t in self.objectclass_imp: + print("%-40s %-24s %-24s" % (t, self.objectclass[t]['import'], self.objectclass[t]['proto'])) + print("\n# Exported Types") + print("%-31s %s" % ("Wireshark type", "Export Flag")) + print("-" * 100) + for t in self.eth_export_ord: + print("%-31s 0x%02X" % (t, self.eth_type[t]['export'])) + print("\n# Exported Values") + print("%-40s %s" % ("Wireshark name", "Value")) + print("-" * 100) + for v in self.eth_vexport_ord: + vv = self.eth_value[v]['value'] + if isinstance (vv, Value): + vv = vv.to_str(self) + print("%-40s %s" % (v, vv)) + print("\n# ASN.1 Object Classes") + print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol")) + print("-" * 100) + for t in self.objectclass_ord: + print("%-40s " % (t)) + print("\n# ASN.1 Types") + print("%-49s %-24s %-24s" % ("ASN.1 unique name", "'tname'", "Wireshark type")) + print("-" * 100) + for t in self.type_ord: + print("%-49s %-24s %-24s" % (t, self.type[t]['tname'], self.type[t]['ethname'])) + print("\n# Wireshark Types") + print("Wireshark type References (ASN.1 types)") + print("-" * 100) + for t in self.eth_type_ord: + sys.stdout.write("%-31s %d" % (t, len(self.eth_type[t]['ref']))) + print(', '.join(self.eth_type[t]['ref'])) + print("\n# ASN.1 Values") + print("%-40s %-18s %-20s %s" % ("ASN.1 unique name", "Type", "Value", "Wireshark value")) + print("-" * 100) + for v in self.value_ord: + vv = self.value[v]['value'] + if isinstance (vv, Value): + vv = vv.to_str(self) + print("%-40s %-18s %-20s %s" % (v, self.value[v]['type'].eth_tname(), vv, self.value[v]['ethname'])) + #print "\n# Wireshark Values" + #print "%-40s %s" % ("Wireshark name", "Value") + #print "-" * 100 + #for v in self.eth_value_ord: + # vv = self.eth_value[v]['value'] + # if isinstance (vv, Value): + # vv = vv.to_str(self) + # print "%-40s %s" % (v, vv) + print("\n# ASN.1 Fields") + print("ASN.1 unique name Wireshark name ASN.1 type") + print("-" * 100) + for f in (self.pdu_ord + self.field_ord): + print("%-40s %-20s %s" % (f, self.field[f]['ethname'], self.field[f]['type'])) + print("\n# Wireshark Fields") + print("Wireshark name Wireshark type References (ASN.1 fields)") + print("-" * 100) + for f in (self.eth_hfpdu_ord + self.eth_hf_ord): + sys.stdout.write("%-30s %-20s %s" % (f, self.eth_hf[f]['ethtype'], len(self.eth_hf[f]['ref']))) + print(', '.join(self.eth_hf[f]['ref'])) + #print "\n# Order after dependencies" + #print '\n'.join(self.eth_type_ord1) + print("\n# Cyclic dependencies") + for c in self.eth_dep_cycle: + print(' -> '.join(c)) + self.dupl_report() + self.output.outnm = self.outnm_opt + if (not self.output.outnm): + self.output.outnm = self.proto + self.output.outnm = self.output.outnm.replace('.', '-') + if not self.justexpcnf: + self.eth_output_hf() + self.eth_output_ett() + self.eth_output_types() + self.eth_output_hf_arr() + self.eth_output_ett_arr() + self.eth_output_export() + self.eth_output_val() + self.eth_output_valexp() + self.eth_output_dis_hnd() + self.eth_output_dis_reg() + self.eth_output_dis_tab() + self.eth_output_syn_reg() + self.eth_output_tables() + if self.expcnf: + self.eth_output_expcnf() + + def dbg_modules(self): + def print_mod(m): + sys.stdout.write("%-30s " % (m)) + dep = self.module[m][:] + for i in range(len(dep)): + if dep[i] not in self.module: + dep[i] = '*' + dep[i] + print(', '.join(dep)) + # end of print_mod() + (mod_ord, mod_cyc) = dependency_compute(self.module_ord, self.module, ignore_fn = lambda t: t not in self.module) + print("\n# ASN.1 Moudules") + print("Module name Dependency") + print("-" * 100) + new_ord = False + for m in (self.module_ord): + print_mod(m) + new_ord = new_ord or (self.module_ord.index(m) != mod_ord.index(m)) + if new_ord: + print("\n# ASN.1 Moudules - in dependency order") + print("Module name Dependency") + print("-" * 100) + for m in (mod_ord): + print_mod(m) + if mod_cyc: + print("\nCyclic dependencies:") + for i in (list(range(len(mod_cyc)))): + print("%02d: %s" % (i + 1, str(mod_cyc[i]))) + + +#--- EthCnf ------------------------------------------------------------------- +class EthCnf: + def __init__(self): + self.ectx = None + self.tblcfg = {} + self.table = {} + self.order = {} + self.fn = {} + self.report = {} + self.suppress_line = False + self.include_path = [] + self.proto_root_name = None + # Value name Default value Duplicity check Usage check + self.tblcfg['EXPORTS'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['MAKE_ENUM'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['USE_VALS_EXT'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['PDU'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['SYNTAX'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['REGISTER'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['USER_DEFINED'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['NO_EMIT'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['MODULE'] = { 'val_nm' : 'proto', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : False } + self.tblcfg['OMIT_ASSIGNMENT'] = { 'val_nm' : 'omit', 'val_dflt' : False, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['NO_OMIT_ASSGN'] = { 'val_nm' : 'omit', 'val_dflt' : True, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['VIRTUAL_ASSGN'] = { 'val_nm' : 'name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['SET_TYPE'] = { 'val_nm' : 'type', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['TYPE_RENAME'] = { 'val_nm' : 'eth_name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['FIELD_RENAME'] = { 'val_nm' : 'eth_name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['IMPORT_TAG'] = { 'val_nm' : 'ttag', 'val_dflt' : (), 'chk_dup' : True, 'chk_use' : False } + self.tblcfg['FN_PARS'] = { 'val_nm' : 'pars', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['TYPE_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : False } + self.tblcfg['ETYPE_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : False } + self.tblcfg['FIELD_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['EFIELD_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True } + self.tblcfg['ASSIGNED_ID'] = { 'val_nm' : 'ids', 'val_dflt' : {}, 'chk_dup' : False,'chk_use' : False } + self.tblcfg['ASSIGN_VALUE_TO_TYPE'] = { 'val_nm' : 'name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } + + for k in list(self.tblcfg.keys()) : + self.table[k] = {} + self.order[k] = [] + + def add_item(self, table, key, fn, lineno, **kw): + if self.tblcfg[table]['chk_dup'] and key in self.table[table]: + warnings.warn_explicit("Duplicated %s for %s. Previous one is at %s:%d" % + (table, key, self.table[table][key]['fn'], self.table[table][key]['lineno']), + UserWarning, fn, lineno) + return + self.table[table][key] = {'fn' : fn, 'lineno' : lineno, 'used' : False} + self.table[table][key].update(kw) + self.order[table].append(key) + + def update_item(self, table, key, fn, lineno, **kw): + if key not in self.table[table]: + self.table[table][key] = {'fn' : fn, 'lineno' : lineno, 'used' : False} + self.order[table].append(key) + self.table[table][key][self.tblcfg[table]['val_nm']] = {} + self.table[table][key][self.tblcfg[table]['val_nm']].update(kw[self.tblcfg[table]['val_nm']]) + + def get_order(self, table): + return self.order[table] + + def check_item(self, table, key): + return key in self.table[table] + + def copy_item(self, table, dst_key, src_key): + if (src_key in self.table[table]): + self.table[table][dst_key] = self.table[table][src_key] + + def check_item_value(self, table, key, **kw): + return key in self.table[table] and kw.get('val_nm', self.tblcfg[table]['val_nm']) in self.table[table][key] + + def use_item(self, table, key, **kw): + vdflt = kw.get('val_dflt', self.tblcfg[table]['val_dflt']) + if key not in self.table[table]: return vdflt + vname = kw.get('val_nm', self.tblcfg[table]['val_nm']) + #print "use_item() - set used for %s %s" % (table, key) + self.table[table][key]['used'] = True + return self.table[table][key].get(vname, vdflt) + + def omit_assignment(self, type, ident, module): + if self.ectx.conform.use_item('OMIT_ASSIGNMENT', ident): + return True + if self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*') or \ + self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*'+type) or \ + self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*/'+module) or \ + self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*'+type+'/'+module): + return self.ectx.conform.use_item('NO_OMIT_ASSGN', ident) + return False + + def add_fn_line(self, name, ctx, line, fn, lineno): + if name not in self.fn: + self.fn[name] = {'FN_HDR' : None, 'FN_FTR' : None, 'FN_BODY' : None} + if (self.fn[name][ctx]): + self.fn[name][ctx]['text'] += line + else: + self.fn[name][ctx] = {'text' : line, 'used' : False, + 'fn' : fn, 'lineno' : lineno} + def get_fn_presence(self, name): + #print "get_fn_presence('%s'):%s" % (name, str(self.fn.has_key(name))) + #if self.fn.has_key(name): print self.fn[name] + return name in self.fn + def get_fn_body_presence(self, name): + return name in self.fn and self.fn[name]['FN_BODY'] + def get_fn_text(self, name, ctx): + if (name not in self.fn): + return ''; + if (not self.fn[name][ctx]): + return ''; + self.fn[name][ctx]['used'] = True + out = self.fn[name][ctx]['text'] + if (not self.suppress_line): + out = '#line %u "%s"\n%s\n' % (self.fn[name][ctx]['lineno'], rel_dissector_path(self.fn[name][ctx]['fn']), out); + return out + + def add_pdu(self, par, fn, lineno): + #print "add_pdu(par=%s, %s, %d)" % (str(par), fn, lineno) + (reg, hidden) = (None, False) + if (len(par) > 1): reg = par[1] + if (reg and reg[0]=='@'): (reg, hidden) = (reg[1:], True) + attr = {'new' : False, 'reg' : reg, 'hidden' : hidden, 'need_decl' : False, 'export' : False} + self.add_item('PDU', par[0], attr=attr, fn=fn, lineno=lineno) + return + + def add_syntax(self, par, fn, lineno): + #print "add_syntax(par=%s, %s, %d)" % (str(par), fn, lineno) + if( (len(par) >=2)): + name = par[1] + else: + name = '"'+par[0]+'"' + attr = { 'pdu' : par[0] } + self.add_item('SYNTAX', name, attr=attr, fn=fn, lineno=lineno) + return + + def add_register(self, pdu, par, fn, lineno): + #print "add_register(pdu=%s, par=%s, %s, %d)" % (pdu, str(par), fn, lineno) + if (par[0] in ('N', 'NUM')): rtype = 'NUM'; (pmin, pmax) = (2, 2) + elif (par[0] in ('S', 'STR')): rtype = 'STR'; (pmin, pmax) = (2, 2) + elif (par[0] in ('B', 'BER')): rtype = 'BER'; (pmin, pmax) = (1, 2) + elif (par[0] in ('P', 'PER')): rtype = 'PER'; (pmin, pmax) = (1, 2) + elif (par[0] in ('O', 'OER')): rtype = 'OER'; (pmin, pmax) = (1, 2) + else: warnings.warn_explicit("Unknown registration type '%s'" % (par[2]), UserWarning, fn, lineno); return + if ((len(par)-1) < pmin): + warnings.warn_explicit("Too few parameters for %s registration type. At least %d parameters are required" % (rtype, pmin), UserWarning, fn, lineno) + return + if ((len(par)-1) > pmax): + warnings.warn_explicit("Too many parameters for %s registration type. Only %d parameters are allowed" % (rtype, pmax), UserWarning, fn, lineno) + attr = {'pdu' : pdu, 'rtype' : rtype} + if (rtype in ('NUM', 'STR')): + attr['rtable'] = par[1] + attr['rport'] = par[2] + rkey = '/'.join([rtype, attr['rtable'], attr['rport']]) + elif (rtype in ('BER', 'PER', 'OER')): + attr['roid'] = par[1] + attr['roidname'] = '""' + if (len(par)>=3): + attr['roidname'] = par[2] + elif attr['roid'][0] != '"': + attr['roidname'] = '"' + attr['roid'] + '"' + rkey = '/'.join([rtype, attr['roid']]) + self.add_item('REGISTER', rkey, attr=attr, fn=fn, lineno=lineno) + + def check_par(self, par, pmin, pmax, fn, lineno): + for i in range(len(par)): + if par[i] == '-': + par[i] = None + continue + if par[i][0] == '#': + par[i:] = [] + break + if len(par) < pmin: + warnings.warn_explicit("Too few parameters. At least %d parameters are required" % (pmin), UserWarning, fn, lineno) + return None + if (pmax >= 0) and (len(par) > pmax): + warnings.warn_explicit("Too many parameters. Only %d parameters are allowed" % (pmax), UserWarning, fn, lineno) + return par[0:pmax] + return par + + def read(self, fn): + def get_par(line, pmin, pmax, fn, lineno): + par = line.split(None, pmax) + par = self.check_par(par, pmin, pmax, fn, lineno) + return par + + def get_par_nm(line, pmin, pmax, fn, lineno): + if pmax: + par = line.split(None, pmax) + else: + par = [line,] + for i in range(len(par)): + if par[i][0] == '#': + par[i:] = [] + break + if len(par) < pmin: + warnings.warn_explicit("Too few parameters. At least %d parameters are required" % (pmin), UserWarning, fn, lineno) + return None + if len(par) > pmax: + nmpar = par[pmax] + else: + nmpar = '' + nmpars = {} + nmpar_first = re.compile(r'^\s*(?P[_A-Z][_A-Z0-9]*)\s*=\s*') + nmpar_next = re.compile(r'\s+(?P[_A-Z][_A-Z0-9]*)\s*=\s*') + nmpar_end = re.compile(r'\s*$') + result = nmpar_first.search(nmpar) + pos = 0 + while result: + k = result.group('attr') + pos = result.end() + result = nmpar_next.search(nmpar, pos) + p1 = pos + if result: + p2 = result.start() + else: + p2 = nmpar_end.search(nmpar, pos).start() + v = nmpar[p1:p2] + nmpars[k] = v + if len(par) > pmax: + par[pmax] = nmpars + return par + + f = open(fn, "r") + lineno = 0 + is_import = False + directive = re.compile(r'^\s*#\.(?P[A-Z_][A-Z_0-9]*)(\s+|$)') + cdirective = re.compile(r'^\s*##') + report = re.compile(r'^TABLE(?P\d*)_(?PHDR|BODY|FTR)$') + comment = re.compile(r'^\s*#[^.#]') + empty = re.compile(r'^\s*$') + ctx = None + name = '' + default_flags = 0x00 + stack = [] + while True: + if not f.closed: + line = f.readline() + lineno += 1 + else: + line = None + if not line: + if not f.closed: + f.close() + if stack: + frec = stack.pop() + fn, f, lineno, is_import = frec['fn'], frec['f'], frec['lineno'], frec['is_import'] + continue + else: + break + if comment.search(line): continue + result = directive.search(line) + if result: # directive + rep_result = report.search(result.group('name')) + if result.group('name') == 'END_OF_CNF': + f.close() + elif result.group('name') == 'OPT': + ctx = result.group('name') + par = get_par(line[result.end():], 0, -1, fn=fn, lineno=lineno) + if not par: continue + self.set_opt(par[0], par[1:], fn, lineno) + ctx = None + elif result.group('name') in ('PDU', 'REGISTER', + 'MODULE', 'MODULE_IMPORT', + 'OMIT_ASSIGNMENT', 'NO_OMIT_ASSGN', + 'VIRTUAL_ASSGN', 'SET_TYPE', 'ASSIGN_VALUE_TO_TYPE', + 'TYPE_RENAME', 'FIELD_RENAME', 'TF_RENAME', 'IMPORT_TAG', + 'TYPE_ATTR', 'ETYPE_ATTR', 'FIELD_ATTR', 'EFIELD_ATTR', + 'SYNTAX'): + ctx = result.group('name') + elif result.group('name') in ('OMIT_ALL_ASSIGNMENTS', 'OMIT_ASSIGNMENTS_EXCEPT', + 'OMIT_ALL_TYPE_ASSIGNMENTS', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT', + 'OMIT_ALL_VALUE_ASSIGNMENTS', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'): + ctx = result.group('name') + key = '*' + if ctx in ('OMIT_ALL_TYPE_ASSIGNMENTS', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT'): + key += 'T' + if ctx in ('OMIT_ALL_VALUE_ASSIGNMENTS', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'): + key += 'V' + par = get_par(line[result.end():], 0, 1, fn=fn, lineno=lineno) + if par: + key += '/' + par[0] + self.add_item('OMIT_ASSIGNMENT', key, omit=True, fn=fn, lineno=lineno) + if ctx in ('OMIT_ASSIGNMENTS_EXCEPT', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'): + ctx = 'NO_OMIT_ASSGN' + else: + ctx = None + elif result.group('name') in ('EXPORTS', 'MODULE_EXPORTS', 'USER_DEFINED', 'NO_EMIT'): + ctx = result.group('name') + default_flags = EF_TYPE|EF_VALS + if ctx == 'MODULE_EXPORTS': + ctx = 'EXPORTS' + default_flags |= EF_MODULE + if ctx == 'EXPORTS': + par = get_par(line[result.end():], 0, 5, fn=fn, lineno=lineno) + else: + par = get_par(line[result.end():], 0, 1, fn=fn, lineno=lineno) + if not par: continue + p = 1 + if (par[0] == 'WITH_VALS'): default_flags |= EF_TYPE|EF_VALS + elif (par[0] == 'WITHOUT_VALS'): default_flags |= EF_TYPE; default_flags &= ~EF_VALS + elif (par[0] == 'ONLY_VALS'): default_flags &= ~EF_TYPE; default_flags |= EF_VALS + elif (ctx == 'EXPORTS'): p = 0 + else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[0]), UserWarning, fn, lineno) + for i in range(p, len(par)): + if (par[i] == 'ONLY_ENUM'): default_flags &= ~(EF_TYPE|EF_VALS); default_flags |= EF_ENUM + elif (par[i] == 'WITH_ENUM'): default_flags |= EF_ENUM + elif (par[i] == 'VALS_WITH_TABLE'): default_flags |= EF_TABLE + elif (par[i] == 'WS_DLL'): default_flags |= EF_WS_DLL + elif (par[i] == 'EXTERN'): default_flags |= EF_EXTERN + elif (par[i] == 'NO_PROT_PREFIX'): default_flags |= EF_NO_PROT + else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno) + elif result.group('name') in ('MAKE_ENUM', 'MAKE_DEFINES'): + ctx = result.group('name') + default_flags = EF_ENUM + if ctx == 'MAKE_ENUM': default_flags |= EF_NO_PROT|EF_NO_TYPE + if ctx == 'MAKE_DEFINES': default_flags |= EF_DEFINE|EF_UCASE|EF_NO_TYPE + par = get_par(line[result.end():], 0, 3, fn=fn, lineno=lineno) + for i in range(0, len(par)): + if (par[i] == 'NO_PROT_PREFIX'): default_flags |= EF_NO_PROT + elif (par[i] == 'PROT_PREFIX'): default_flags &= ~ EF_NO_PROT + elif (par[i] == 'NO_TYPE_PREFIX'): default_flags |= EF_NO_TYPE + elif (par[i] == 'TYPE_PREFIX'): default_flags &= ~ EF_NO_TYPE + elif (par[i] == 'UPPER_CASE'): default_flags |= EF_UCASE + elif (par[i] == 'NO_UPPER_CASE'): default_flags &= ~EF_UCASE + else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno) + elif result.group('name') == 'USE_VALS_EXT': + ctx = result.group('name') + default_flags = 0xFF + elif result.group('name') == 'FN_HDR': + minp = 1 + if (ctx in ('FN_PARS',)) and name: minp = 0 + par = get_par(line[result.end():], minp, 1, fn=fn, lineno=lineno) + if (not par) and (minp > 0): continue + ctx = result.group('name') + if par: name = par[0] + elif result.group('name') == 'FN_FTR': + minp = 1 + if (ctx in ('FN_PARS','FN_HDR')) and name: minp = 0 + par = get_par(line[result.end():], minp, 1, fn=fn, lineno=lineno) + if (not par) and (minp > 0): continue + ctx = result.group('name') + if par: name = par[0] + elif result.group('name') == 'FN_BODY': + par = get_par_nm(line[result.end():], 1, 1, fn=fn, lineno=lineno) + if not par: continue + ctx = result.group('name') + name = par[0] + if len(par) > 1: + self.add_item('FN_PARS', name, pars=par[1], fn=fn, lineno=lineno) + elif result.group('name') == 'FN_PARS': + par = get_par_nm(line[result.end():], 0, 1, fn=fn, lineno=lineno) + ctx = result.group('name') + if not par: + name = None + elif len(par) == 1: + name = par[0] + self.add_item(ctx, name, pars={}, fn=fn, lineno=lineno) + elif len(par) > 1: + self.add_item(ctx, par[0], pars=par[1], fn=fn, lineno=lineno) + ctx = None + elif result.group('name') == 'CLASS': + par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno) + if not par: continue + ctx = result.group('name') + name = par[0] + add_class_ident(name) + if not name.split('$')[-1].isupper(): + warnings.warn_explicit("No lower-case letters shall be included in information object class name (%s)" % (name), + UserWarning, fn, lineno) + elif result.group('name') == 'ASSIGNED_OBJECT_IDENTIFIER': + par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno) + if not par: continue + self.update_item('ASSIGNED_ID', 'OBJECT_IDENTIFIER', ids={par[0] : par[0]}, fn=fn, lineno=lineno) + elif rep_result: # Reports + num = rep_result.group('num') + type = rep_result.group('type') + if type == 'BODY': + par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno) + if not par: continue + else: + par = get_par(line[result.end():], 0, 0, fn=fn, lineno=lineno) + rep = { 'type' : type, 'var' : None, 'text' : '', 'fn' : fn, 'lineno' : lineno } + if len(par) > 0: + rep['var'] = par[0] + self.report.setdefault(num, []).append(rep) + ctx = 'TABLE' + name = num + elif result.group('name') in ('INCLUDE', 'IMPORT') : + is_imp = result.group('name') == 'IMPORT' + par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno) + if not par: + warnings.warn_explicit("%s requires parameter" % (result.group('name'),), UserWarning, fn, lineno) + continue + fname = par[0] + #print "Try include: %s" % (fname) + if (not os.path.exists(fname)): + fname = os.path.join(os.path.split(fn)[0], par[0]) + #print "Try include: %s" % (fname) + i = 0 + while not os.path.exists(fname) and (i < len(self.include_path)): + fname = os.path.join(self.include_path[i], par[0]) + #print "Try include: %s" % (fname) + i += 1 + if (not os.path.exists(fname)): + if is_imp: + continue # just ignore + else: + fname = par[0] # report error + fnew = open(fname, "r") + stack.append({'fn' : fn, 'f' : f, 'lineno' : lineno, 'is_import' : is_import}) + fn, f, lineno, is_import = par[0], fnew, 0, is_imp + elif result.group('name') == 'END': + ctx = None + else: + warnings.warn_explicit("Unknown directive '%s'" % (result.group('name')), UserWarning, fn, lineno) + continue + if not ctx: + if not empty.match(line): + warnings.warn_explicit("Non-empty line in empty context", UserWarning, fn, lineno) + elif ctx == 'OPT': + if empty.match(line): continue + par = get_par(line, 1, -1, fn=fn, lineno=lineno) + if not par: continue + self.set_opt(par[0], par[1:], fn, lineno) + elif ctx in ('EXPORTS', 'USER_DEFINED', 'NO_EMIT'): + if empty.match(line): continue + if ctx == 'EXPORTS': + par = get_par(line, 1, 6, fn=fn, lineno=lineno) + else: + par = get_par(line, 1, 2, fn=fn, lineno=lineno) + if not par: continue + flags = default_flags + p = 2 + if (len(par)>=2): + if (par[1] == 'WITH_VALS'): flags |= EF_TYPE|EF_VALS + elif (par[1] == 'WITHOUT_VALS'): flags |= EF_TYPE; flags &= ~EF_VALS + elif (par[1] == 'ONLY_VALS'): flags &= ~EF_TYPE; flags |= EF_VALS + elif (ctx == 'EXPORTS'): p = 1 + else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[1]), UserWarning, fn, lineno) + for i in range(p, len(par)): + if (par[i] == 'ONLY_ENUM'): flags &= ~(EF_TYPE|EF_VALS); flags |= EF_ENUM + elif (par[i] == 'WITH_ENUM'): flags |= EF_ENUM + elif (par[i] == 'VALS_WITH_TABLE'): flags |= EF_TABLE + elif (par[i] == 'WS_DLL'): flags |= EF_WS_DLL + elif (par[i] == 'EXTERN'): flags |= EF_EXTERN + elif (par[i] == 'NO_PROT_PREFIX'): flags |= EF_NO_PROT + else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno) + self.add_item(ctx, par[0], flag=flags, fn=fn, lineno=lineno) + elif ctx in ('MAKE_ENUM', 'MAKE_DEFINES'): + if empty.match(line): continue + par = get_par(line, 1, 4, fn=fn, lineno=lineno) + if not par: continue + flags = default_flags + for i in range(1, len(par)): + if (par[i] == 'NO_PROT_PREFIX'): flags |= EF_NO_PROT + elif (par[i] == 'PROT_PREFIX'): flags &= ~ EF_NO_PROT + elif (par[i] == 'NO_TYPE_PREFIX'): flags |= EF_NO_TYPE + elif (par[i] == 'TYPE_PREFIX'): flags &= ~ EF_NO_TYPE + elif (par[i] == 'UPPER_CASE'): flags |= EF_UCASE + elif (par[i] == 'NO_UPPER_CASE'): flags &= ~EF_UCASE + else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno) + self.add_item('MAKE_ENUM', par[0], flag=flags, fn=fn, lineno=lineno) + elif ctx == 'USE_VALS_EXT': + if empty.match(line): continue + par = get_par(line, 1, 1, fn=fn, lineno=lineno) + if not par: continue + flags = default_flags + self.add_item('USE_VALS_EXT', par[0], flag=flags, fn=fn, lineno=lineno) + elif ctx == 'PDU': + if empty.match(line): continue + par = get_par(line, 1, 5, fn=fn, lineno=lineno) + if not par: continue + self.add_pdu(par[0:2], fn, lineno) + if (len(par)>=3): + self.add_register(par[0], par[2:5], fn, lineno) + elif ctx == 'SYNTAX': + if empty.match(line): continue + par = get_par(line, 1, 2, fn=fn, lineno=lineno) + if not par: continue + if not self.check_item('PDU', par[0]): + self.add_pdu(par[0:1], fn, lineno) + self.add_syntax(par, fn, lineno) + elif ctx == 'REGISTER': + if empty.match(line): continue + par = get_par(line, 3, 4, fn=fn, lineno=lineno) + if not par: continue + if not self.check_item('PDU', par[0]): + self.add_pdu(par[0:1], fn, lineno) + self.add_register(par[0], par[1:4], fn, lineno) + elif ctx in ('MODULE', 'MODULE_IMPORT'): + if empty.match(line): continue + par = get_par(line, 2, 2, fn=fn, lineno=lineno) + if not par: continue + self.add_item('MODULE', par[0], proto=par[1], fn=fn, lineno=lineno) + elif ctx == 'IMPORT_TAG': + if empty.match(line): continue + par = get_par(line, 3, 3, fn=fn, lineno=lineno) + if not par: continue + self.add_item(ctx, par[0], ttag=(par[1], par[2]), fn=fn, lineno=lineno) + elif ctx == 'OMIT_ASSIGNMENT': + if empty.match(line): continue + par = get_par(line, 1, 1, fn=fn, lineno=lineno) + if not par: continue + self.add_item(ctx, par[0], omit=True, fn=fn, lineno=lineno) + elif ctx == 'NO_OMIT_ASSGN': + if empty.match(line): continue + par = get_par(line, 1, 1, fn=fn, lineno=lineno) + if not par: continue + self.add_item(ctx, par[0], omit=False, fn=fn, lineno=lineno) + elif ctx == 'VIRTUAL_ASSGN': + if empty.match(line): continue + par = get_par(line, 2, -1, fn=fn, lineno=lineno) + if not par: continue + if (len(par[1].split('/')) > 1) and not self.check_item('SET_TYPE', par[1]): + self.add_item('SET_TYPE', par[1], type=par[0], fn=fn, lineno=lineno) + self.add_item('VIRTUAL_ASSGN', par[1], name=par[0], fn=fn, lineno=lineno) + for nm in par[2:]: + self.add_item('SET_TYPE', nm, type=par[0], fn=fn, lineno=lineno) + if not par[0][0].isupper(): + warnings.warn_explicit("Virtual assignment should have uppercase name (%s)" % (par[0]), + UserWarning, fn, lineno) + elif ctx == 'SET_TYPE': + if empty.match(line): continue + par = get_par(line, 2, 2, fn=fn, lineno=lineno) + if not par: continue + if not self.check_item('VIRTUAL_ASSGN', par[0]): + self.add_item('SET_TYPE', par[0], type=par[1], fn=fn, lineno=lineno) + if not par[1][0].isupper(): + warnings.warn_explicit("Set type should have uppercase name (%s)" % (par[1]), + UserWarning, fn, lineno) + elif ctx == 'ASSIGN_VALUE_TO_TYPE': + if empty.match(line): continue + par = get_par(line, 2, 2, fn=fn, lineno=lineno) + if not par: continue + self.add_item(ctx, par[0], name=par[1], fn=fn, lineno=lineno) + elif ctx == 'TYPE_RENAME': + if empty.match(line): continue + par = get_par(line, 2, 2, fn=fn, lineno=lineno) + if not par: continue + self.add_item('TYPE_RENAME', par[0], eth_name=par[1], fn=fn, lineno=lineno) + if not par[1][0].isupper(): + warnings.warn_explicit("Type should be renamed to uppercase name (%s)" % (par[1]), + UserWarning, fn, lineno) + elif ctx == 'FIELD_RENAME': + if empty.match(line): continue + par = get_par(line, 2, 2, fn=fn, lineno=lineno) + if not par: continue + self.add_item('FIELD_RENAME', par[0], eth_name=par[1], fn=fn, lineno=lineno) + if not par[1][0].islower(): + warnings.warn_explicit("Field should be renamed to lowercase name (%s)" % (par[1]), + UserWarning, fn, lineno) + elif ctx == 'TF_RENAME': + if empty.match(line): continue + par = get_par(line, 2, 2, fn=fn, lineno=lineno) + if not par: continue + tmpu = par[1][0].upper() + par[1][1:] + tmpl = par[1][0].lower() + par[1][1:] + self.add_item('TYPE_RENAME', par[0], eth_name=tmpu, fn=fn, lineno=lineno) + if not tmpu[0].isupper(): + warnings.warn_explicit("Type should be renamed to uppercase name (%s)" % (par[1]), + UserWarning, fn, lineno) + self.add_item('FIELD_RENAME', par[0], eth_name=tmpl, fn=fn, lineno=lineno) + if not tmpl[0].islower(): + warnings.warn_explicit("Field should be renamed to lowercase name (%s)" % (par[1]), + UserWarning, fn, lineno) + elif ctx in ('TYPE_ATTR', 'ETYPE_ATTR', 'FIELD_ATTR', 'EFIELD_ATTR'): + if empty.match(line): continue + par = get_par_nm(line, 1, 1, fn=fn, lineno=lineno) + if not par: continue + self.add_item(ctx, par[0], attr=par[1], fn=fn, lineno=lineno) + elif ctx == 'FN_PARS': + if empty.match(line): continue + if name: + par = get_par_nm(line, 0, 0, fn=fn, lineno=lineno) + else: + par = get_par_nm(line, 1, 1, fn=fn, lineno=lineno) + if not par: continue + if name: + self.update_item(ctx, name, pars=par[0], fn=fn, lineno=lineno) + else: + self.add_item(ctx, par[0], pars=par[1], fn=fn, lineno=lineno) + elif ctx in ('FN_HDR', 'FN_FTR', 'FN_BODY'): + result = cdirective.search(line) + if result: # directive + line = '#' + line[result.end():] + self.add_fn_line(name, ctx, line, fn=fn, lineno=lineno) + elif ctx == 'CLASS': + if empty.match(line): continue + par = get_par(line, 1, 3, fn=fn, lineno=lineno) + if not par: continue + if not set_type_to_class(name, par[0], par[1:]): + warnings.warn_explicit("Could not set type of class member %s.&%s to %s" % (name, par[0], par[1]), + UserWarning, fn, lineno) + elif ctx == 'TABLE': + self.report[name][-1]['text'] += line + + def set_opt(self, opt, par, fn, lineno): + #print("set_opt: %s, %s" % (opt, par)) + if opt in ("-I",): + par = self.check_par(par, 1, 1, fn, lineno) + if not par: return + self.include_path.append(relpath(par[0])) + elif opt in ("-b", "BER", "CER", "DER"): + par = self.check_par(par, 0, 0, fn, lineno) + self.ectx.encoding = 'ber' + elif opt in ("PER",): + par = self.check_par(par, 0, 0, fn, lineno) + self.ectx.encoding = 'per' + elif opt in ("OER",): + par = self.check_par(par, 0, 0, fn, lineno) + self.ectx.encoding = 'oer' + elif opt in ("-p", "PROTO"): + par = self.check_par(par, 1, 1, fn, lineno) + if not par: return + self.ectx.proto_opt = par[0] + self.ectx.merge_modules = True + elif opt in ("ALIGNED",): + par = self.check_par(par, 0, 0, fn, lineno) + self.ectx.aligned = True + elif opt in ("-u", "UNALIGNED"): + par = self.check_par(par, 0, 0, fn, lineno) + self.ectx.aligned = False + elif opt in ("PROTO_ROOT_NAME"): + par = self.check_par(par, 1, 1, fn, lineno) + if not par: return + self.proto_root_name = par[0] + elif opt in ("-d",): + par = self.check_par(par, 1, 1, fn, lineno) + if not par: return + self.ectx.dbgopt = par[0] + elif opt in ("-e",): + par = self.check_par(par, 0, 0, fn, lineno) + self.ectx.expcnf = True + elif opt in ("-S",): + par = self.check_par(par, 0, 0, fn, lineno) + self.ectx.merge_modules = True + elif opt in ("GROUP_BY_PROT",): + par = self.check_par(par, 0, 0, fn, lineno) + self.ectx.group_by_prot = True + elif opt in ("-o",): + par = self.check_par(par, 1, 1, fn, lineno) + if not par: return + self.ectx.outnm_opt = par[0] + elif opt in ("-O",): + par = self.check_par(par, 1, 1, fn, lineno) + if not par: return + self.ectx.output.outdir = relpath(par[0]) + elif opt in ("-s",): + par = self.check_par(par, 1, 1, fn, lineno) + if not par: return + self.ectx.output.single_file = relpath(par[0]) + elif opt in ("-k",): + par = self.check_par(par, 0, 0, fn, lineno) + self.ectx.output.keep = True + elif opt in ("-L",): + par = self.check_par(par, 0, 0, fn, lineno) + self.suppress_line = True + elif opt in ("EMBEDDED_PDV_CB",): + par = self.check_par(par, 1, 1, fn, lineno) + if not par: return + self.ectx.default_embedded_pdv_cb = par[0] + elif opt in ("EXTERNAL_TYPE_CB",): + par = self.check_par(par, 1, 1, fn, lineno) + if not par: return + self.ectx.default_external_type_cb = par[0] + elif opt in ("-r",): + par = self.check_par(par, 1, 1, fn, lineno) + if not par: return + self.ectx.remove_prefix = par[0] + else: + warnings.warn_explicit("Unknown option %s" % (opt), + UserWarning, fn, lineno) + + def dbg_print(self): + print("\n# Conformance values") + print("%-15s %-4s %-15s %-20s %s" % ("File", "Line", "Table", "Key", "Value")) + print("-" * 100) + tbls = sorted(self.table.keys()) + for t in tbls: + keys = sorted(self.table[t].keys()) + for k in keys: + print("%-15s %4s %-15s %-20s %s" % ( + self.table[t][k]['fn'], self.table[t][k]['lineno'], t, k, str(self.table[t][k][self.tblcfg[t]['val_nm']]))) + + def unused_report(self): + tbls = sorted(self.table.keys()) + for t in tbls: + if not self.tblcfg[t]['chk_use']: continue + keys = sorted(self.table[t].keys()) + for k in keys: + if not self.table[t][k]['used']: + warnings.warn_explicit("Unused %s for %s" % (t, k), + UserWarning, self.table[t][k]['fn'], self.table[t][k]['lineno']) + fnms = list(self.fn.keys()) + fnms.sort() + for f in fnms: + keys = sorted(self.fn[f].keys()) + for k in keys: + if not self.fn[f][k]: continue + if not self.fn[f][k]['used']: + warnings.warn_explicit("Unused %s for %s" % (k, f), + UserWarning, self.fn[f][k]['fn'], self.fn[f][k]['lineno']) + +#--- EthOut ------------------------------------------------------------------- +class EthOut: + def __init__(self): + self.ectx = None + self.outnm = None + self.outdir = '.' + self.single_file = None + self.created_files = {} + self.created_files_ord = [] + self.keep = False + + def outcomment(self, ln, comment=None): + if comment: + return '%s %s\n' % (comment, ln) + else: + return '/* %-74s */\n' % (ln) + + def created_file_add(self, name, keep_anyway): + name = os.path.normcase(os.path.abspath(name)) + if name not in self.created_files: + self.created_files_ord.append(name) + self.created_files[name] = keep_anyway + else: + self.created_files[name] = self.created_files[name] or keep_anyway + + def created_file_exists(self, name): + name = os.path.normcase(os.path.abspath(name)) + return name in self.created_files + + #--- output_fname ------------------------------------------------------- + def output_fname(self, ftype, ext='c'): + fn = '' + if not ext in ('cnf',): + fn += 'packet-' + fn += self.outnm + if (ftype): + fn += '-' + ftype + fn += '.' + ext + return fn + #--- file_open ------------------------------------------------------- + def file_open(self, ftype, ext='c'): + fn = self.output_fname(ftype, ext=ext) + if self.created_file_exists(fn): + fx = open(fn, 'a') + else: + fx = open(fn, 'w') + comment = None + if ext in ('cnf',): + comment = '#' + fx.write(self.fhdr(fn, comment = comment)) + else: + if (not self.single_file and not self.created_file_exists(fn)): + fx.write(self.fhdr(fn)) + if not self.ectx.merge_modules: + fx.write('\n') + mstr = "--- " + if self.ectx.groups(): + mstr += "Module" + if (len(self.ectx.modules) > 1): + mstr += "s" + for (m, p) in self.ectx.modules: + mstr += " %s" % (m) + else: + mstr += "Module %s" % (self.ectx.Module()) + mstr += " --- --- ---" + fx.write(self.outcomment(mstr, comment)) + fx.write('\n') + return fx + #--- file_close ------------------------------------------------------- + def file_close(self, fx, discard=False, keep_anyway=False): + fx.close() + if discard and not self.created_file_exists(fx.name): + os.unlink(fx.name) + else: + self.created_file_add(fx.name, keep_anyway) + #--- fhdr ------------------------------------------------------- + def fhdr(self, fn, comment=None): + out = '' + out += self.outcomment('Do not modify this file. Changes will be overwritten.', comment) + out += self.outcomment('Generated automatically by the ASN.1 to Wireshark dissector compiler', comment) + out += self.outcomment(os.path.basename(fn), comment) + out += self.outcomment(' '.join(['asn2wrs.py'] + sys.argv[1:]), comment) + out += '\n' + # Make Windows path separator look like Unix path separator + out = out.replace('\\', '/') + # Change absolute paths and relative paths generated outside + # source directory to paths relative to asn1/ subdir. + out = re.sub(r'(\s)[./A-Z]\S*/dissectors\b', r'\1../..', out) + out = re.sub(r'(\s)[./A-Z]\S*/asn1/\S*?([\s/])', r'\1.\2', out) + return out + + #--- dbg_print ------------------------------------------------------- + def dbg_print(self): + print("\n# Output files") + print("\n".join(self.created_files_ord)) + print("\n") + + #--- make_single_file ------------------------------------------------------- + def make_single_file(self, suppress_line): + if (not self.single_file): return + in_nm = self.single_file + '.c' + out_nm = os.path.join(self.outdir, self.output_fname('')) + self.do_include(out_nm, in_nm, suppress_line) + in_nm = self.single_file + '.h' + if (os.path.exists(in_nm)): + out_nm = os.path.join(self.outdir, self.output_fname('', ext='h')) + self.do_include(out_nm, in_nm, suppress_line) + if (not self.keep): + for fn in self.created_files_ord: + if not self.created_files[fn]: + os.unlink(fn) + + #--- do_include ------------------------------------------------------- + def do_include(self, out_nm, in_nm, suppress_line): + def check_file(fn, fnlist): + fnfull = os.path.normcase(os.path.abspath(fn)) + if (fnfull in fnlist and os.path.exists(fnfull)): + return os.path.normpath(fn) + return None + fin = open(in_nm, "r") + fout = open(out_nm, "w") + fout.write(self.fhdr(out_nm)) + if (not suppress_line): + fout.write('/* Input file: ' + os.path.basename(in_nm) +' */\n') + fout.write('\n') + fout.write('#line %u "%s"\n' % (1, rel_dissector_path(in_nm))) + + include = re.compile(r'^\s*#\s*include\s+[<"](?P[^>"]+)[>"]', re.IGNORECASE) + + cont_linenum = 0; + + while (True): + cont_linenum = cont_linenum + 1; + line = fin.readline() + if (line == ''): break + ifile = None + result = include.search(line) + #if (result): print os.path.normcase(os.path.abspath(result.group('fname'))) + if (result): + ifile = check_file(os.path.join(os.path.split(in_nm)[0], result.group('fname')), self.created_files) + if (not ifile): + ifile = check_file(os.path.join(self.outdir, result.group('fname')), self.created_files) + if (not ifile): + ifile = check_file(result.group('fname'), self.created_files) + if (ifile): + if (not suppress_line): + fout.write('\n') + fout.write('/*--- Included file: ' + ifile + ' ---*/\n') + fout.write('#line %u "%s"\n' % (1, rel_dissector_path(ifile))) + finc = open(ifile, "r") + fout.write(finc.read()) + if (not suppress_line): + fout.write('\n') + fout.write('/*--- End of included file: ' + ifile + ' ---*/\n') + fout.write('#line %u "%s"\n' % (cont_linenum+1, rel_dissector_path(in_nm)) ) + finc.close() + else: + fout.write(line) + + fout.close() + fin.close() + + +#--- Node --------------------------------------------------------------------- +class Node: + def __init__(self,*args, **kw): + if len (args) == 0: + self.type = self.__class__.__name__ + else: + assert (len(args) == 1) + self.type = args[0] + self.__dict__.update (kw) + def str_child (self, key, child, depth): + indent = " " * (2 * depth) + keystr = indent + key + ": " + if key == 'type': # already processed in str_depth + return "" + if isinstance (child, Node): # ugh + return keystr + "\n" + child.str_depth (depth+1) + if isinstance(child, type ([])): + l = [] + for x in child: + if isinstance (x, Node): + l.append (x.str_depth (depth+1)) + else: + l.append (indent + " " + str(x) + "\n") + return keystr + "[\n" + ''.join(l) + indent + "]\n" + else: + return keystr + str (child) + "\n" + def str_depth (self, depth): # ugh + indent = " " * (2 * depth) + l = ["%s%s" % (indent, self.type)] + l.append ("".join ([self.str_child (k_v[0], k_v[1], depth + 1) for k_v in list(self.__dict__.items ())])) + return "\n".join (l) + def __repr__(self): + return "\n" + self.str_depth (0) + def to_python (self, ctx): + return self.str_depth (ctx.indent_lev) + + def eth_reg(self, ident, ectx): + pass + + def fld_obj_repr(self, ectx): + return "/* TO DO %s */" % (str(self)) + + +#--- ValueAssignment ------------------------------------------------------------- +class ValueAssignment (Node): + def __init__(self,*args, **kw) : + Node.__init__ (self,*args, **kw) + + def eth_reg(self, ident, ectx): + if ectx.conform.omit_assignment('V', self.ident, ectx.Module()): return # Assignment to omit + ectx.eth_reg_vassign(self) + ectx.eth_reg_value(self.ident, self.typ, self.val) + +#--- ObjectAssignment ------------------------------------------------------------- +class ObjectAssignment (Node): + def __init__(self,*args, **kw) : + Node.__init__ (self,*args, **kw) + + def __eq__(self, other): + if self.cls != other.cls: + return False + if len(self.val) != len(other.val): + return False + for f in (list(self.val.keys())): + if f not in other.val: + return False + if isinstance(self.val[f], Node) and isinstance(other.val[f], Node): + if not self.val[f].fld_obj_eq(other.val[f]): + return False + else: + if str(self.val[f]) != str(other.val[f]): + return False + return True + + def eth_reg(self, ident, ectx): + def make_virtual_type(cls, field, prefix): + if isinstance(self.val, str): return + if field in self.val and not isinstance(self.val[field], Type_Ref): + vnm = prefix + '-' + self.ident + virtual_tr = Type_Ref(val = vnm) + t = self.val[field] + self.val[field] = virtual_tr + ectx.eth_reg_assign(vnm, t, virt=True) + ectx.eth_reg_type(vnm, t) + t.eth_reg_sub(vnm, ectx) + if field in self.val and ectx.conform.check_item('PDU', cls + '.' + field): + ectx.eth_reg_field(self.val[field].val, self.val[field].val, impl=self.val[field].HasImplicitTag(ectx), pdu=ectx.conform.use_item('PDU', cls + '.' + field)) + return + # end of make_virtual_type() + if ectx.conform.omit_assignment('V', self.ident, ectx.Module()): return # Assignment to omit + self.module = ectx.Module() + ectx.eth_reg_oassign(self) + if (self.cls == 'TYPE-IDENTIFIER') or (self.cls == 'ABSTRACT-SYNTAX'): + make_virtual_type(self.cls, '&Type', 'TYPE') + if (self.cls == 'OPERATION'): + make_virtual_type(self.cls, '&ArgumentType', 'ARG') + make_virtual_type(self.cls, '&ResultType', 'RES') + if (self.cls == 'ERROR'): + make_virtual_type(self.cls, '&ParameterType', 'PAR') + + +#--- Type --------------------------------------------------------------------- +class Type (Node): + def __init__(self,*args, **kw) : + self.name = None + self.constr = None + self.tags = [] + self.named_list = None + Node.__init__ (self,*args, **kw) + + def IsNamed(self): + if self.name is None : + return False + else: + return True + + def HasConstraint(self): + if self.constr is None : + return False + else : + return True + + def HasSizeConstraint(self): + return self.HasConstraint() and self.constr.IsSize() + + def HasValueConstraint(self): + return self.HasConstraint() and self.constr.IsValue() + + def HasPermAlph(self): + return self.HasConstraint() and self.constr.IsPermAlph() + + def HasContentsConstraint(self): + return self.HasConstraint() and self.constr.IsContents() + + def HasOwnTag(self): + return len(self.tags) > 0 + + def HasImplicitTag(self, ectx): + return (self.HasOwnTag() and self.tags[0].IsImplicit(ectx)) + + def IndetermTag(self, ectx): + return False + + def AddTag(self, tag): + self.tags[0:0] = [tag] + + def GetTag(self, ectx): + #print "GetTag(%s)\n" % self.name; + if (self.HasOwnTag()): + return self.tags[0].GetTag(ectx) + else: + return self.GetTTag(ectx) + + def GetTTag(self, ectx): + print("#Unhandled GetTTag() in %s" % (self.type)) + print(self.str_depth(1)) + return ('BER_CLASS_unknown', 'TAG_unknown') + + def SetName(self, name): + self.name = name + + def AddConstraint(self, constr): + if not self.HasConstraint(): + self.constr = constr + else: + self.constr = Constraint(type = 'Intersection', subtype = [self.constr, constr]) + + def eth_tname(self): + return '#' + self.type + '_' + str(id(self)) + + def eth_ftype(self, ectx): + return ('FT_NONE', 'BASE_NONE') + + def eth_strings(self): + return 'NULL' + + def eth_omit_field(self): + return False + + def eth_need_tree(self): + return False + + def eth_has_vals(self): + return False + + def eth_has_enum(self, tname, ectx): + return self.eth_has_vals() and (ectx.eth_type[tname]['enum'] & EF_ENUM) + + def eth_need_pdu(self, ectx): + return None + + def eth_named_bits(self): + return None + + def eth_reg_sub(self, ident, ectx): + pass + + def get_components(self, ectx): + print("#Unhandled get_components() in %s" % (self.type)) + print(self.str_depth(1)) + return [] + + def sel_req(self, sel, ectx): + print("#Selection '%s' required for non-CHOICE type %s" % (sel, self.type)) + print(self.str_depth(1)) + + def fld_obj_eq(self, other): + return isinstance(other, Type) and (self.eth_tname() == other.eth_tname()) + + def eth_reg(self, ident, ectx, tstrip=0, tagflag=False, selflag=False, idx='', parent=None): + #print "eth_reg(): %s, ident=%s, tstrip=%d, tagflag=%s, selflag=%s, parent=%s" %(self.type, ident, tstrip, str(tagflag), str(selflag), str(parent)) + #print " ", self + if (ectx.NeedTags() and (len(self.tags) > tstrip)): + tagged_type = self + for i in range(len(self.tags)-1, tstrip-1, -1): + tagged_type = TaggedType(val=tagged_type, tstrip=i) + tagged_type.AddTag(self.tags[i]) + if not tagflag: # 1st tagged level + if self.IsNamed() and not selflag: + tagged_type.SetName(self.name) + tagged_type.eth_reg(ident, ectx, tstrip=1, tagflag=tagflag, idx=idx, parent=parent) + return + nm = '' + if ident and self.IsNamed() and not tagflag and not selflag: + nm = ident + '/' + self.name + elif ident: + nm = ident + elif self.IsNamed(): + nm = self.name + if not ident and ectx.conform.omit_assignment('T', nm, ectx.Module()): return # Assignment to omit + if not ident: # Assignment + ectx.eth_reg_assign(nm, self) + if self.type == 'Type_Ref' and not self.tr_need_own_fn(ectx): + ectx.eth_reg_type(nm, self) + virtual_tr = Type_Ref(val=ectx.conform.use_item('SET_TYPE', nm)) + if (self.type == 'Type_Ref') or ectx.conform.check_item('SET_TYPE', nm): + if ident and (ectx.conform.check_item('TYPE_RENAME', nm) or ectx.conform.get_fn_presence(nm) or selflag): + if ectx.conform.check_item('SET_TYPE', nm): + ectx.eth_reg_type(nm, virtual_tr) # dummy Type Reference + else: + ectx.eth_reg_type(nm, self) # new type + trnm = nm + elif ectx.conform.check_item('SET_TYPE', nm): + trnm = ectx.conform.use_item('SET_TYPE', nm) + elif (self.type == 'Type_Ref') and self.tr_need_own_fn(ectx): + ectx.eth_reg_type(nm, self) # need own function, e.g. for constraints + trnm = nm + else: + trnm = self.val + else: + ectx.eth_reg_type(nm, self, mod = ectx.Module()) + trnm = nm + if ectx.conform.check_item('VIRTUAL_ASSGN', nm): + vnm = ectx.conform.use_item('VIRTUAL_ASSGN', nm) + ectx.eth_reg_assign(vnm, self, virt=True) + ectx.eth_reg_type(vnm, self) + self.eth_reg_sub(vnm, ectx) + if parent and (ectx.type[parent]['val'].type == 'TaggedType'): + ectx.type[parent]['val'].eth_set_val_name(parent, trnm, ectx) + if ident and not tagflag and not self.eth_omit_field(): + ectx.eth_reg_field(nm, trnm, idx=idx, parent=parent, impl=self.HasImplicitTag(ectx)) + if ectx.conform.check_item('SET_TYPE', nm): + virtual_tr.eth_reg_sub(nm, ectx) + else: + self.eth_reg_sub(nm, ectx) + + def eth_get_size_constr(self, ectx): + (minv, maxv, ext) = ('MIN', 'MAX', False) + if self.HasSizeConstraint(): + if self.constr.IsSize(): + (minv, maxv, ext) = self.constr.GetSize(ectx) + if (self.constr.type == 'Intersection'): + if self.constr.subtype[0].IsSize(): + (minv, maxv, ext) = self.constr.subtype[0].GetSize(ectx) + elif self.constr.subtype[1].IsSize(): + (minv, maxv, ext) = self.constr.subtype[1].GetSize(ectx) + if minv == 'MIN': minv = 'NO_BOUND' + if maxv == 'MAX': maxv = 'NO_BOUND' + if (ext): ext = 'TRUE' + else: ext = 'FALSE' + return (minv, maxv, ext) + + def eth_get_value_constr(self, ectx): + (minv, maxv, ext) = ('MIN', 'MAX', False) + if self.HasValueConstraint(): + (minv, maxv, ext) = self.constr.GetValue(ectx) + if minv == 'MIN': minv = 'NO_BOUND' + if maxv == 'MAX': maxv = 'NO_BOUND' + if str(minv).isdigit(): + minv += 'U' + elif (str(minv)[0] == "-") and str(minv)[1:].isdigit(): + if (int(minv) == -(2**31)): + minv = "G_MININT32" + elif (int(minv) < -(2**31)): + minv = "G_GINT64_CONSTANT(%s)" % (str(minv)) + if str(maxv).isdigit(): + if (int(maxv) >= 2**32): + maxv = "G_GUINT64_CONSTANT(%s)" % (str(maxv)) + else: + maxv += 'U' + if (ext): ext = 'TRUE' + else: ext = 'FALSE' + return (minv, maxv, ext) + + def eth_get_alphabet_constr(self, ectx): + (alph, alphlen) = ('NULL', '0') + if self.HasPermAlph(): + alph = self.constr.GetPermAlph(ectx) + if not alph: + alph = 'NULL' + if (alph != 'NULL'): + if (((alph[0] + alph[-1]) == '""') and (not alph.count('"', 1, -1))): + alphlen = str(len(alph) - 2) + else: + alphlen = 'strlen(%s)' % (alph) + return (alph, alphlen) + + def eth_type_vals(self, tname, ectx): + if self.eth_has_vals(): + print("#Unhandled eth_type_vals('%s') in %s" % (tname, self.type)) + print(self.str_depth(1)) + return '' + + def eth_type_enum(self, tname, ectx): + if self.eth_has_enum(tname, ectx): + print("#Unhandled eth_type_enum('%s') in %s" % (tname, self.type)) + print(self.str_depth(1)) + return '' + + def eth_type_default_table(self, ectx, tname): + return '' + + def eth_type_default_body(self, ectx, tname): + print("#Unhandled eth_type_default_body('%s') in %s" % (tname, self.type)) + print(self.str_depth(1)) + return '' + + def eth_type_default_pars(self, ectx, tname): + pars = { + 'TNAME' : tname, + 'ER' : ectx.encp(), + 'FN_VARIANT' : '', + 'TREE' : 'tree', + 'TVB' : 'tvb', + 'OFFSET' : 'offset', + 'ACTX' : 'actx', + 'HF_INDEX' : 'hf_index', + 'VAL_PTR' : 'NULL', + 'IMPLICIT_TAG' : 'implicit_tag', + } + if (ectx.eth_type[tname]['tree']): + pars['ETT_INDEX'] = ectx.eth_type[tname]['tree'] + if (ectx.merge_modules): + pars['PROTOP'] = '' + else: + pars['PROTOP'] = ectx.eth_type[tname]['proto'] + '_' + return pars + + def eth_type_fn(self, proto, tname, ectx): + body = self.eth_type_default_body(ectx, tname) + pars = self.eth_type_default_pars(ectx, tname) + if ectx.conform.check_item('FN_PARS', tname): + pars.update(ectx.conform.use_item('FN_PARS', tname)) + elif ectx.conform.check_item('FN_PARS', ectx.eth_type[tname]['ref'][0]): + pars.update(ectx.conform.use_item('FN_PARS', ectx.eth_type[tname]['ref'][0])) + pars['DEFAULT_BODY'] = body + for i in range(4): + for k in list(pars.keys()): + try: + pars[k] = pars[k] % pars + except (ValueError,TypeError): + raise sys.exc_info()[0]("%s\n%s" % (str(pars), sys.exc_info()[1])) + out = '\n' + out += self.eth_type_default_table(ectx, tname) % pars + out += ectx.eth_type_fn_hdr(tname) + out += ectx.eth_type_fn_body(tname, body, pars=pars) + out += ectx.eth_type_fn_ftr(tname) + return out + +#--- Value -------------------------------------------------------------------- +class Value (Node): + def __init__(self,*args, **kw) : + self.name = None + Node.__init__ (self,*args, **kw) + + def SetName(self, name) : + self.name = name + + def to_str(self, ectx): + return str(self.val) + + def get_dep(self): + return None + + def fld_obj_repr(self, ectx): + return self.to_str(ectx) + +#--- Value_Ref ----------------------------------------------------------------- +class Value_Ref (Value): + def to_str(self, ectx): + return asn2c(self.val) + +#--- ObjectClass --------------------------------------------------------------------- +class ObjectClass (Node): + def __init__(self,*args, **kw) : + self.name = None + Node.__init__ (self,*args, **kw) + + def SetName(self, name): + self.name = name + add_class_ident(self.name) + + def eth_reg(self, ident, ectx): + if ectx.conform.omit_assignment('C', self.name, ectx.Module()): return # Assignment to omit + ectx.eth_reg_objectclass(self.name, self) + +#--- Class_Ref ----------------------------------------------------------------- +class Class_Ref (ObjectClass): + pass + +#--- ObjectClassDefn --------------------------------------------------------------------- +class ObjectClassDefn (ObjectClass): + def reg_types(self): + for fld in self.fields: + repr = fld.fld_repr() + set_type_to_class(self.name, repr[0], repr[1:]) + + +#--- Tag --------------------------------------------------------------- +class Tag (Node): + def to_python (self, ctx): + return 'asn1.TYPE(%s,%s)' % (mk_tag_str (ctx, self.tag.cls, + self.tag_typ, + self.tag.num), + self.typ.to_python (ctx)) + def IsImplicit(self, ectx): + return ((self.mode == 'IMPLICIT') or ((self.mode == 'default') and (ectx.tag_def != 'EXPLICIT'))) + + def GetTag(self, ectx): + tc = '' + if (self.cls == 'UNIVERSAL'): tc = 'BER_CLASS_UNI' + elif (self.cls == 'APPLICATION'): tc = 'BER_CLASS_APP' + elif (self.cls == 'CONTEXT'): tc = 'BER_CLASS_CON' + elif (self.cls == 'PRIVATE'): tc = 'BER_CLASS_PRI' + return (tc, self.num) + + def eth_tname(self): + n = '' + if (self.cls == 'UNIVERSAL'): n = 'U' + elif (self.cls == 'APPLICATION'): n = 'A' + elif (self.cls == 'CONTEXT'): n = 'C' + elif (self.cls == 'PRIVATE'): n = 'P' + return n + str(self.num) + +#--- Constraint --------------------------------------------------------------- +constr_cnt = 0 +class Constraint (Node): + def to_python (self, ctx): + print("Ignoring constraint:", self.type) + return self.subtype.typ.to_python (ctx) + def __str__ (self): + return "Constraint: type=%s, subtype=%s" % (self.type, self.subtype) + + def eth_tname(self): + return '#' + self.type + '_' + str(id(self)) + + def IsSize(self): + return (self.type == 'Size' and self.subtype.IsValue()) \ + or (self.type == 'Intersection' and (self.subtype[0].IsSize() or self.subtype[1].IsSize())) \ + + def GetSize(self, ectx): + (minv, maxv, ext) = ('MIN', 'MAX', False) + if self.IsSize(): + if self.type == 'Size': + (minv, maxv, ext) = self.subtype.GetValue(ectx) + ext = ext or (hasattr(self, 'ext') and self.ext) + elif self.type == 'Intersection': + if self.subtype[0].IsSize() and not self.subtype[1].IsSize(): + (minv, maxv, ext) = self.subtype[0].GetSize(ectx) + elif not self.subtype[0].IsSize() and self.subtype[1].IsSize(): + (minv, maxv, ext) = self.subtype[1].GetSize(ectx) + return (minv, maxv, ext) + + def IsValue(self): + return self.type == 'SingleValue' \ + or self.type == 'ValueRange' \ + or (self.type == 'Intersection' and (self.subtype[0].IsValue() or self.subtype[1].IsValue())) \ + or (self.type == 'Union' and (self.subtype[0].IsValue() and self.subtype[1].IsValue())) + + def GetValue(self, ectx): + (minv, maxv, ext) = ('MIN', 'MAX', False) + if self.IsValue(): + if self.type == 'SingleValue': + minv = ectx.value_get_eth(self.subtype) + maxv = ectx.value_get_eth(self.subtype) + ext = hasattr(self, 'ext') and self.ext + elif self.type == 'ValueRange': + minv = ectx.value_get_eth(self.subtype[0]) + maxv = ectx.value_get_eth(self.subtype[1]) + ext = hasattr(self, 'ext') and self.ext + elif self.type == 'Intersection': + if self.subtype[0].IsValue() and not self.subtype[1].IsValue(): + (minv, maxv, ext) = self.subtype[0].GetValue(ectx) + elif not self.subtype[0].IsValue() and self.subtype[1].IsValue(): + (minv, maxv, ext) = self.subtype[1].GetValue(ectx) + elif self.subtype[0].IsValue() and self.subtype[1].IsValue(): + v0 = self.subtype[0].GetValue(ectx) + v1 = self.subtype[1].GetValue(ectx) + (minv, maxv, ext) = (ectx.value_max(v0[0],v1[0]), ectx.value_min(v0[1],v1[1]), v0[2] and v1[2]) + elif self.type == 'Union': + if self.subtype[0].IsValue() and self.subtype[1].IsValue(): + v0 = self.subtype[0].GetValue(ectx) + v1 = self.subtype[1].GetValue(ectx) + (minv, maxv, ext) = (ectx.value_min(v0[0],v1[0]), ectx.value_max(v0[1],v1[1]), hasattr(self, 'ext') and self.ext) + return (minv, maxv, ext) + + def IsAlphabet(self): + return self.type == 'SingleValue' \ + or self.type == 'ValueRange' \ + or (self.type == 'Intersection' and (self.subtype[0].IsAlphabet() or self.subtype[1].IsAlphabet())) \ + or (self.type == 'Union' and (self.subtype[0].IsAlphabet() and self.subtype[1].IsAlphabet())) + + def GetAlphabet(self, ectx): + alph = None + if self.IsAlphabet(): + if self.type == 'SingleValue': + alph = ectx.value_get_eth(self.subtype) + elif self.type == 'ValueRange': + if ((len(self.subtype[0]) == 3) and ((self.subtype[0][0] + self.subtype[0][-1]) == '""') \ + and (len(self.subtype[1]) == 3) and ((self.subtype[1][0] + self.subtype[1][-1]) == '""')): + alph = '"' + for c in range(ord(self.subtype[0][1]), ord(self.subtype[1][1]) + 1): + alph += chr(c) + alph += '"' + elif self.type == 'Union': + if self.subtype[0].IsAlphabet() and self.subtype[1].IsAlphabet(): + a0 = self.subtype[0].GetAlphabet(ectx) + a1 = self.subtype[1].GetAlphabet(ectx) + if (((a0[0] + a0[-1]) == '""') and not a0.count('"', 1, -1) \ + and ((a1[0] + a1[-1]) == '""') and not a1.count('"', 1, -1)): + alph = '"' + a0[1:-1] + a1[1:-1] + '"' + else: + alph = a0 + ' ' + a1 + return alph + + def IsPermAlph(self): + return self.type == 'From' and self.subtype.IsAlphabet() \ + or (self.type == 'Intersection' and (self.subtype[0].IsPermAlph() or self.subtype[1].IsPermAlph())) \ + + def GetPermAlph(self, ectx): + alph = None + if self.IsPermAlph(): + if self.type == 'From': + alph = self.subtype.GetAlphabet(ectx) + elif self.type == 'Intersection': + if self.subtype[0].IsPermAlph() and not self.subtype[1].IsPermAlph(): + alph = self.subtype[0].GetPermAlph(ectx) + elif not self.subtype[0].IsPermAlph() and self.subtype[1].IsPermAlph(): + alph = self.subtype[1].GetPermAlph(ectx) + return alph + + def IsContents(self): + return self.type == 'Contents' \ + or (self.type == 'Intersection' and (self.subtype[0].IsContents() or self.subtype[1].IsContents())) \ + + def GetContents(self, ectx): + contents = None + if self.IsContents(): + if self.type == 'Contents': + if self.subtype.type == 'Type_Ref': + contents = self.subtype.val + elif self.type == 'Intersection': + if self.subtype[0].IsContents() and not self.subtype[1].IsContents(): + contents = self.subtype[0].GetContents(ectx) + elif not self.subtype[0].IsContents() and self.subtype[1].IsContents(): + contents = self.subtype[1].GetContents(ectx) + return contents + + def IsNegativ(self): + def is_neg(sval): + return isinstance(sval, str) and (sval[0] == '-') + if self.type == 'SingleValue': + return is_neg(self.subtype) + elif self.type == 'ValueRange': + if self.subtype[0] == 'MIN': return True + return is_neg(self.subtype[0]) + return False + + def eth_constrname(self): + def int2str(val): + if isinstance(val, Value_Ref): + return asn2c(val.val) + try: + if (int(val) < 0): + return 'M' + str(-int(val)) + else: + return str(int(val)) + except (ValueError, TypeError): + return asn2c(str(val)) + + ext = '' + if hasattr(self, 'ext') and self.ext: + ext = '_' + if self.type == 'SingleValue': + return int2str(self.subtype) + ext + elif self.type == 'ValueRange': + return int2str(self.subtype[0]) + '_' + int2str(self.subtype[1]) + ext + elif self.type == 'Size': + return 'SIZE_' + self.subtype.eth_constrname() + ext + else: + if (not hasattr(self, 'constr_num')): + global constr_cnt + constr_cnt += 1 + self.constr_num = constr_cnt + return 'CONSTR%03d%s' % (self.constr_num, ext) + + def Needs64b(self, ectx): + (minv, maxv, ext) = self.GetValue(ectx) + if ((str(minv).isdigit() or ((str(minv)[0] == "-") and str(minv)[1:].isdigit())) \ + and (str(maxv).isdigit() or ((str(maxv)[0] == "-") and str(maxv)[1:].isdigit())) \ + and ((abs(int(maxv) - int(minv)) >= 2**32) or (int(minv) < -2**31) or (int(maxv) >= 2**32))) \ + or (maxv == 'MAX') or (minv == 'MIN'): + return True + return False + +class Module (Node): + def to_python (self, ctx): + ctx.tag_def = self.tag_def.dfl_tag + return """#%s + %s""" % (self.ident, self.body.to_python (ctx)) + + def get_name(self): + return self.ident.val + + def get_proto(self, ectx): + if (ectx.proto): + prot = ectx.proto + else: + prot = ectx.conform.use_item('MODULE', self.get_name(), val_dflt=self.get_name()) + return prot + + def to_eth(self, ectx): + ectx.tags_def = 'EXPLICIT' # default = explicit + ectx.proto = self.get_proto(ectx) + ectx.tag_def = self.tag_def.dfl_tag + ectx.eth_reg_module(self) + self.body.to_eth(ectx) + +class Module_Body (Node): + def to_python (self, ctx): + # XXX handle exports, imports. + l = [x.to_python (ctx) for x in self.assign_list] + l = [a for a in l if a != ''] + return "\n".join (l) + + def to_eth(self, ectx): + # Exports + ectx.eth_exports(self.exports) + # Imports + for i in self.imports: + mod = i.module.val + proto = ectx.conform.use_item('MODULE', mod, val_dflt=mod) + ectx.eth_module_dep_add(ectx.Module(), mod) + for s in i.symbol_list: + if isinstance(s, Type_Ref): + ectx.eth_import_type(s.val, mod, proto) + elif isinstance(s, Value_Ref): + ectx.eth_import_value(s.val, mod, proto) + elif isinstance(s, Class_Ref): + ectx.eth_import_class(s.val, mod, proto) + else: + msg = 'Unknown kind of imported symbol %s from %s' % (str(s), mod) + warnings.warn_explicit(msg, UserWarning, '', 0) + # AssignmentList + for a in self.assign_list: + a.eth_reg('', ectx) + +class Default_Tags (Node): + def to_python (self, ctx): # not to be used directly + assert (0) + +# XXX should just calculate dependencies as we go along. +def calc_dependencies (node, dict, trace = 0): + if not hasattr (node, '__dict__'): + if trace: print("#returning, node=", node) + return + if isinstance (node, Type_Ref): + dict [node.val] = 1 + if trace: print("#Setting", node.val) + return + for (a, val) in list(node.__dict__.items ()): + if trace: print("# Testing node ", node, "attr", a, " val", val) + if a[0] == '_': + continue + elif isinstance (val, Node): + calc_dependencies (val, dict, trace) + elif isinstance (val, type ([])): + for v in val: + calc_dependencies (v, dict, trace) + + +class Type_Assign (Node): + def __init__ (self, *args, **kw): + Node.__init__ (self, *args, **kw) + if isinstance (self.val, Tag): # XXX replace with generalized get_typ_ignoring_tag (no-op for Node, override in Tag) + to_test = self.val.typ + else: + to_test = self.val + if isinstance (to_test, SequenceType): + to_test.sequence_name = self.name.name + + def to_python (self, ctx): + dep_dict = {} + calc_dependencies (self.val, dep_dict, 0) + depend_list = list(dep_dict.keys ()) + return ctx.register_assignment (self.name.name, + self.val.to_python (ctx), + depend_list) + +class PyQuote (Node): + def to_python (self, ctx): + return ctx.register_pyquote (self.val) + +#--- Type_Ref ----------------------------------------------------------------- +class Type_Ref (Type): + def to_python (self, ctx): + return self.val + + def eth_reg_sub(self, ident, ectx): + ectx.eth_dep_add(ident, self.val) + + def eth_tname(self): + if self.HasSizeConstraint(): + return asn2c(self.val) + '_' + self.constr.eth_constrname() + else: + return asn2c(self.val) + + def tr_need_own_fn(self, ectx): + return (ectx.Per() or ectx.Oer()) and self.HasSizeConstraint() + + def fld_obj_repr(self, ectx): + return self.val + + def get_components(self, ectx): + if self.val not in ectx.type or ectx.type[self.val]['import']: + msg = "Can not get COMPONENTS OF %s which is imported type" % (self.val) + warnings.warn_explicit(msg, UserWarning, '', 0) + return [] + else: + return ectx.type[self.val]['val'].get_components(ectx) + + def GetTTag(self, ectx): + #print "GetTTag(%s)\n" % self.val; + if (ectx.type[self.val]['import']): + if 'ttag' not in ectx.type[self.val]: + ttag = ectx.get_ttag_from_all(self.val, ectx.type[self.val]['import']) + if not ttag and not ectx.conform.check_item('IMPORT_TAG', self.val): + msg = 'Missing tag information for imported type %s from %s (%s)' % (self.val, ectx.type[self.val]['import'], ectx.type[self.val]['proto']) + warnings.warn_explicit(msg, UserWarning, '', 0) + ttag = ('-1/*imported*/', '-1/*imported*/') + ectx.type[self.val]['ttag'] = ectx.conform.use_item('IMPORT_TAG', self.val, val_dflt=ttag) + return ectx.type[self.val]['ttag'] + else: + return ectx.type[self.val]['val'].GetTag(ectx) + + def IndetermTag(self, ectx): + if (ectx.type[self.val]['import']): + return False + else: + return ectx.type[self.val]['val'].IndetermTag(ectx) + + def eth_type_default_pars(self, ectx, tname): + if tname: + pars = Type.eth_type_default_pars(self, ectx, tname) + else: + pars = {} + t = ectx.type[self.val]['ethname'] + pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto'] + pars['TYPE_REF_TNAME'] = t + pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s' + if self.HasSizeConstraint(): + (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx) + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),)) + elif (ectx.Per() or ectx.Oer()): + if self.HasSizeConstraint(): + body = ectx.eth_fn_call('dissect_%(ER)s_size_constrained_type', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',), + ('"%(TYPE_REF_TNAME)s"', '%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),)) + else: + body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),)) + else: + body = '#error Can not decode %s' % (tname) + return body + +#--- SelectionType ------------------------------------------------------------ +class SelectionType (Type): + def to_python (self, ctx): + return self.val + + def sel_of_typeref(self): + return self.typ.type == 'Type_Ref' + + def eth_reg_sub(self, ident, ectx): + if not self.sel_of_typeref(): + self.seltype = '' + return + self.seltype = ectx.eth_sel_req(self.typ.val, self.sel) + ectx.eth_dep_add(ident, self.seltype) + + def eth_ftype(self, ectx): + (ftype, display) = ('FT_NONE', 'BASE_NONE') + if self.sel_of_typeref() and not ectx.type[self.seltype]['import']: + (ftype, display) = ectx.type[self.typ.val]['val'].eth_ftype_sel(self.sel, ectx) + return (ftype, display) + + def GetTTag(self, ectx): + #print "GetTTag(%s)\n" % self.seltype; + if (ectx.type[self.seltype]['import']): + if 'ttag' not in ectx.type[self.seltype]: + if not ectx.conform.check_item('IMPORT_TAG', self.seltype): + msg = 'Missing tag information for imported type %s from %s (%s)' % (self.seltype, ectx.type[self.seltype]['import'], ectx.type[self.seltype]['proto']) + warnings.warn_explicit(msg, UserWarning, '', 0) + ectx.type[self.seltype]['ttag'] = ectx.conform.use_item('IMPORT_TAG', self.seltype, val_dflt=('-1 /*imported*/', '-1 /*imported*/')) + return ectx.type[self.seltype]['ttag'] + else: + return ectx.type[self.typ.val]['val'].GetTTagSel(self.sel, ectx) + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + if self.sel_of_typeref(): + t = ectx.type[self.seltype]['ethname'] + pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto'] + pars['TYPE_REF_TNAME'] = t + pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s' + return pars + + def eth_type_default_body(self, ectx, tname): + if not self.sel_of_typeref(): + body = '#error Can not decode %s' % (tname) + elif (ectx.Ber()): + body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),)) + elif (ectx.Per() or ectx.Oer()): + body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),)) + else: + body = '#error Can not decode %s' % (tname) + return body + +#--- TaggedType ----------------------------------------------------------------- +class TaggedType (Type): + def eth_tname(self): + tn = '' + for i in range(self.tstrip, len(self.val.tags)): + tn += self.val.tags[i].eth_tname() + tn += '_' + tn += self.val.eth_tname() + return tn + + def eth_set_val_name(self, ident, val_name, ectx): + #print "TaggedType::eth_set_val_name(): ident=%s, val_name=%s" % (ident, val_name) + self.val_name = val_name + ectx.eth_dep_add(ident, self.val_name) + + def eth_reg_sub(self, ident, ectx): + self.val_name = ident + '/' + UNTAG_TYPE_NAME + self.val.eth_reg(self.val_name, ectx, tstrip=self.tstrip+1, tagflag=True, parent=ident) + + def GetTTag(self, ectx): + #print "GetTTag(%s)\n" % self.seltype; + return self.GetTag(ectx) + + def eth_ftype(self, ectx): + return self.val.eth_ftype(ectx) + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + t = ectx.type[self.val_name]['ethname'] + pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto'] + pars['TYPE_REF_TNAME'] = t + pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s' + (pars['TAG_CLS'], pars['TAG_TAG']) = self.GetTag(ectx) + if self.HasImplicitTag(ectx): + pars['TAG_IMPL'] = 'TRUE' + else: + pars['TAG_IMPL'] = 'FALSE' + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_tagged_type', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(HF_INDEX)s', '%(TAG_CLS)s', '%(TAG_TAG)s', '%(TAG_IMPL)s', '%(TYPE_REF_FN)s',),)) + else: + body = '#error Can not decode tagged_type %s' % (tname) + return body + +#--- SqType ----------------------------------------------------------- +class SqType (Type): + def out_item(self, f, val, optional, ext, ectx): + if (val.eth_omit_field()): + t = ectx.type[val.ident]['ethname'] + fullname = ectx.dummy_eag_field + else: + ef = ectx.field[f]['ethname'] + t = ectx.eth_hf[ef]['ethtype'] + fullname = ectx.eth_hf[ef]['fullname'] + if (ectx.Ber()): + #print "optional=%s, e.val.HasOwnTag()=%s, e.val.IndetermTag()=%s" % (str(e.optional), str(e.val.HasOwnTag()), str(e.val.IndetermTag(ectx))) + #print val.str_depth(1) + opt = '' + if (optional): + opt = 'BER_FLAGS_OPTIONAL' + if (not val.HasOwnTag()): + if (opt): opt += '|' + opt += 'BER_FLAGS_NOOWNTAG' + elif (val.HasImplicitTag(ectx)): + if (opt): opt += '|' + opt += 'BER_FLAGS_IMPLTAG' + if (val.IndetermTag(ectx)): + if (opt): opt += '|' + opt += 'BER_FLAGS_NOTCHKTAG' + if (not opt): opt = '0' + else: + if optional: + opt = 'ASN1_OPTIONAL' + else: + opt = 'ASN1_NOT_OPTIONAL' + if (ectx.Ber()): + (tc, tn) = val.GetTag(ectx) + out = ' { %-24s, %-13s, %s, %s, dissect_%s_%s },\n' \ + % ('&'+fullname, tc, tn, opt, ectx.eth_type[t]['proto'], t) + elif (ectx.Per() or ectx.Oer()): + out = ' { %-24s, %-23s, %-17s, dissect_%s_%s },\n' \ + % ('&'+fullname, ext, opt, ectx.eth_type[t]['proto'], t) + else: + out = '' + return out + +#--- SeqType ----------------------------------------------------------- +class SeqType (SqType): + + def all_components(self): + lst = self.elt_list[:] + if hasattr(self, 'ext_list'): + lst.extend(self.ext_list) + if hasattr(self, 'elt_list2'): + lst.extend(self.elt_list2) + return lst + + def need_components(self): + lst = self.all_components() + for e in (lst): + if e.type == 'components_of': + return True + return False + + def expand_components(self, ectx): + while self.need_components(): + for i in range(len(self.elt_list)): + if self.elt_list[i].type == 'components_of': + comp = self.elt_list[i].typ.get_components(ectx) + self.elt_list[i:i+1] = comp + break + if hasattr(self, 'ext_list'): + for i in range(len(self.ext_list)): + if self.ext_list[i].type == 'components_of': + comp = self.ext_list[i].typ.get_components(ectx) + self.ext_list[i:i+1] = comp + break + if hasattr(self, 'elt_list2'): + for i in range(len(self.elt_list2)): + if self.elt_list2[i].type == 'components_of': + comp = self.elt_list2[i].typ.get_components(ectx) + self.elt_list2[i:i+1] = comp + break + + def get_components(self, ectx): + lst = self.elt_list[:] + if hasattr(self, 'elt_list2'): + lst.extend(self.elt_list2) + return lst + + def eth_reg_sub(self, ident, ectx, components_available=False): + # check if autotag is required + autotag = False + if (ectx.NeedTags() and (ectx.tag_def == 'AUTOMATIC')): + autotag = True + lst = self.all_components() + for e in (self.elt_list): + if e.val.HasOwnTag(): autotag = False; break; + # expand COMPONENTS OF + if self.need_components(): + if components_available: + self.expand_components(ectx) + else: + ectx.eth_comp_req(ident) + return + # extension addition groups + if hasattr(self, 'ext_list'): + if (ectx.Per() or ectx.Oer()): # add names + eag_num = 1 + for e in (self.ext_list): + if isinstance(e.val, ExtensionAdditionGroup): + e.val.parent_ident = ident + e.val.parent_tname = ectx.type[ident]['tname'] + if (e.val.ver): + e.val.SetName("eag_v%s" % (e.val.ver)) + else: + e.val.SetName("eag_%d" % (eag_num)) + eag_num += 1; + else: # expand + new_ext_list = [] + for e in (self.ext_list): + if isinstance(e.val, ExtensionAdditionGroup): + new_ext_list.extend(e.val.elt_list) + else: + new_ext_list.append(e) + self.ext_list = new_ext_list + # do autotag + if autotag: + atag = 0 + for e in (self.elt_list): + e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT')) + atag += 1 + if autotag and hasattr(self, 'elt_list2'): + for e in (self.elt_list2): + e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT')) + atag += 1 + if autotag and hasattr(self, 'ext_list'): + for e in (self.ext_list): + e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT')) + atag += 1 + # register components + for e in (self.elt_list): + e.val.eth_reg(ident, ectx, tstrip=1, parent=ident) + if hasattr(self, 'ext_list'): + for e in (self.ext_list): + e.val.eth_reg(ident, ectx, tstrip=1, parent=ident) + if hasattr(self, 'elt_list2'): + for e in (self.elt_list2): + e.val.eth_reg(ident, ectx, tstrip=1, parent=ident) + + def eth_type_default_table(self, ectx, tname): + #print ("eth_type_default_table(tname='%s')" % (tname)) + fname = ectx.eth_type[tname]['ref'][0] + table = "static const %(ER)s_sequence_t %(TABLE)s[] = {\n" + if hasattr(self, 'ext_list'): + ext = 'ASN1_EXTENSION_ROOT' + else: + ext = 'ASN1_NO_EXTENSIONS' + empty_ext_flag = '0' + if (len(self.elt_list)==0) and hasattr(self, 'ext_list') and (len(self.ext_list)==0) and (not hasattr(self, 'elt_list2') or (len(self.elt_list2)==0)): + empty_ext_flag = ext + for e in (self.elt_list): + f = fname + '/' + e.val.name + table += self.out_item(f, e.val, e.optional, ext, ectx) + if hasattr(self, 'ext_list'): + for e in (self.ext_list): + f = fname + '/' + e.val.name + table += self.out_item(f, e.val, e.optional, 'ASN1_NOT_EXTENSION_ROOT', ectx) + if hasattr(self, 'elt_list2'): + for e in (self.elt_list2): + f = fname + '/' + e.val.name + table += self.out_item(f, e.val, e.optional, ext, ectx) + if (ectx.Ber()): + table += " { NULL, 0, 0, 0, NULL }\n};\n" + else: + table += " { NULL, %s, 0, NULL }\n};\n" % (empty_ext_flag) + return table + +#--- SeqOfType ----------------------------------------------------------- +class SeqOfType (SqType): + def eth_type_default_table(self, ectx, tname): + #print "eth_type_default_table(tname='%s')" % (tname) + fname = ectx.eth_type[tname]['ref'][0] + if self.val.IsNamed (): + f = fname + '/' + self.val.name + else: + f = fname + '/' + ITEM_FIELD_NAME + table = "static const %(ER)s_sequence_t %(TABLE)s[1] = {\n" + table += self.out_item(f, self.val, False, 'ASN1_NO_EXTENSIONS', ectx) + table += "};\n" + return table + +#--- SequenceOfType ----------------------------------------------------------- +class SequenceOfType (SeqOfType): + def to_python (self, ctx): + # name, tag (None for no tag, EXPLICIT() for explicit), typ) + # or '' + (1,) for optional + sizestr = '' + if self.size_constr is not None: + print("#Ignoring size constraint:", self.size_constr.subtype) + return "%sasn1.SEQUENCE_OF (%s%s)" % (ctx.spaces (), + self.val.to_python (ctx), + sizestr) + + def eth_reg_sub(self, ident, ectx): + itmnm = ident + if not self.val.IsNamed (): + itmnm += '/' + ITEM_FIELD_NAME + self.val.eth_reg(itmnm, ectx, tstrip=1, idx='[##]', parent=ident) + + def eth_tname(self): + if self.val.type != 'Type_Ref': + return '#' + self.type + '_' + str(id(self)) + if not self.HasConstraint(): + return "SEQUENCE_OF_" + self.val.eth_tname() + elif self.constr.IsSize(): + return 'SEQUENCE_' + self.constr.eth_constrname() + '_OF_' + self.val.eth_tname() + else: + return '#' + self.type + '_' + str(id(self)) + + def eth_ftype(self, ectx): + return ('FT_UINT32', 'BASE_DEC') + + def eth_need_tree(self): + return True + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_SEQUENCE') + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx) + pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence_of' + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + if (ectx.constraints_check and self.HasSizeConstraint()): + body = ectx.eth_fn_call('dissect_%(ER)s_constrained_sequence_of', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),)) + else: + body = ectx.eth_fn_call('dissect_%(ER)s_sequence_of', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),)) + elif ((ectx.Per() or ectx.Oer()) and not self.HasConstraint()): + body = ectx.eth_fn_call('dissect_%(ER)s_sequence_of', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(ETT_INDEX)s', '%(TABLE)s',),)) + elif ((ectx.Per() or ectx.Oer()) and self.constr.type == 'Size'): + body = ectx.eth_fn_call('dissect_%(ER)s_constrained_sequence_of', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(ETT_INDEX)s', '%(TABLE)s',), + ('%(MIN_VAL)s', '%(MAX_VAL)s','%(EXT)s'),)) + else: + body = '#error Can not decode SequenceOfType %s' % (tname) + return body + + +#--- SetOfType ---------------------------------------------------------------- +class SetOfType (SeqOfType): + def eth_reg_sub(self, ident, ectx): + itmnm = ident + if not self.val.IsNamed (): + itmnm += '/' + ITEM_FIELD_NAME + self.val.eth_reg(itmnm, ectx, tstrip=1, idx='(##)', parent=ident) + + def eth_tname(self): + if self.val.type != 'Type_Ref': + return '#' + self.type + '_' + str(id(self)) + if not self.HasConstraint(): + return "SET_OF_" + self.val.eth_tname() + elif self.constr.IsSize(): + return 'SET_' + self.constr.eth_constrname() + '_OF_' + self.val.eth_tname() + else: + return '#' + self.type + '_' + str(id(self)) + + def eth_ftype(self, ectx): + return ('FT_UINT32', 'BASE_DEC') + + def eth_need_tree(self): + return True + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_SET') + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx) + pars['TABLE'] = '%(PROTOP)s%(TNAME)s_set_of' + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + if (ectx.constraints_check and self.HasSizeConstraint()): + body = ectx.eth_fn_call('dissect_%(ER)s_constrained_set_of', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),)) + else: + body = ectx.eth_fn_call('dissect_%(ER)s_set_of', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),)) + elif (ectx.Per() and not self.HasConstraint()): + body = ectx.eth_fn_call('dissect_%(ER)s_set_of', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(ETT_INDEX)s', '%(TABLE)s',),)) + elif (ectx.Per() and self.constr.type == 'Size'): + body = ectx.eth_fn_call('dissect_%(ER)s_constrained_set_of', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(ETT_INDEX)s', '%(TABLE)s',), + ('%(MIN_VAL)s', '%(MAX_VAL)s','%(EXT)s',),)) + else: + body = '#error Can not decode SetOfType %s' % (tname) + return body + +def mk_tag_str (ctx, cls, typ, num): + + # XXX should do conversion to int earlier! + val = int (num) + typ = typ.upper() + if typ == 'DEFAULT': + typ = ctx.tags_def + return 'asn1.%s(%d,cls=asn1.%s_FLAG)' % (typ, val, cls) # XXX still ned + +#--- SequenceType ------------------------------------------------------------- +class SequenceType (SeqType): + def to_python (self, ctx): + # name, tag (None for no tag, EXPLICIT() for explicit), typ) + # or '' + (1,) for optional + # XXX should also collect names for SEQUENCE inside SEQUENCE or + # CHOICE or SEQUENCE_OF (where should the SEQUENCE_OF name come + # from? for others, element or arm name would be fine) + seq_name = getattr (self, 'sequence_name', None) + if seq_name is None: + seq_name = 'None' + else: + seq_name = "'" + seq_name + "'" + if 'ext_list' in self.__dict__: + return "%sasn1.SEQUENCE ([%s], ext=[%s], seq_name = %s)" % (ctx.spaces (), + self.elts_to_py (self.elt_list, ctx), + self.elts_to_py (self.ext_list, ctx), seq_name) + else: + return "%sasn1.SEQUENCE ([%s]), seq_name = %s" % (ctx.spaces (), + self.elts_to_py (self.elt_list, ctx), seq_name) + def elts_to_py (self, list, ctx): + # we have elt_type, val= named_type, maybe default=, optional= + # named_type node: either ident = or typ = + # need to dismember these in order to generate Python output syntax. + ctx.indent () + def elt_to_py (e): + assert (e.type == 'elt_type') + nt = e.val + optflag = e.optional + #assert (not hasattr (e, 'default')) # XXX add support for DEFAULT! + assert (nt.type == 'named_type') + tagstr = 'None' + identstr = nt.ident + if hasattr (nt.typ, 'type') and nt.typ.type == 'tag': # ugh + tagstr = mk_tag_str (ctx,nt.typ.tag.cls, + nt.typ.tag.tag_typ,nt.typ.tag.num) + + + nt = nt.typ + return "('%s',%s,%s,%d)" % (identstr, tagstr, + nt.typ.to_python (ctx), optflag) + indentstr = ",\n" + ctx.spaces () + rv = indentstr.join ([elt_to_py (e) for e in list]) + ctx.outdent () + return rv + + def eth_need_tree(self): + return True + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_SEQUENCE') + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence' + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_sequence', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),)) + elif (ectx.Per() or ectx.Oer()): + body = ectx.eth_fn_call('dissect_%(ER)s_sequence', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(ETT_INDEX)s', '%(TABLE)s',),)) + else: + body = '#error Can not decode SequenceType %s' % (tname) + return body + +#--- ExtensionAdditionGroup --------------------------------------------------- +class ExtensionAdditionGroup (SeqType): + def __init__(self,*args, **kw) : + self.parent_ident = None + self.parent_tname = None + SeqType.__init__ (self,*args, **kw) + + def eth_omit_field(self): + return True + + def eth_tname(self): + if (self.parent_tname and self.IsNamed()): + return self.parent_tname + "_" + self.name + else: + return SeqType.eth_tname(self) + + def eth_reg_sub(self, ident, ectx): + ectx.eth_dummy_eag_field_required() + ectx.eth_dep_add(self.parent_ident, ident) + SeqType.eth_reg_sub(self, ident, ectx) + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence' + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Per()): + body = ectx.eth_fn_call('dissect_%(ER)s_sequence_eag', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(TABLE)s',),)) + else: + body = '#error Can not decode ExtensionAdditionGroup %s' % (tname) + return body + + +#--- SetType ------------------------------------------------------------------ +class SetType (SeqType): + + def eth_need_tree(self): + return True + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_SET') + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + pars['TABLE'] = '%(PROTOP)s%(TNAME)s_set' + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_set', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),)) + elif (ectx.Per()): + body = ectx.eth_fn_call('dissect_%(ER)s_set', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(ETT_INDEX)s', '%(TABLE)s',),)) + else: + body = '#error Can not decode SetType %s' % (tname) + return body + +#--- ChoiceType --------------------------------------------------------------- +class ChoiceType (Type): + def to_python (self, ctx): + # name, tag (None for no tag, EXPLICIT() for explicit), typ) + # or '' + (1,) for optional + if 'ext_list' in self.__dict__: + return "%sasn1.CHOICE ([%s], ext=[%s])" % (ctx.spaces (), + self.elts_to_py (self.elt_list, ctx), + self.elts_to_py (self.ext_list, ctx)) + else: + return "%sasn1.CHOICE ([%s])" % (ctx.spaces (), self.elts_to_py (self.elt_list, ctx)) + def elts_to_py (self, list, ctx): + ctx.indent () + def elt_to_py (nt): + assert (nt.type == 'named_type') + tagstr = 'None' + if hasattr (nt, 'ident'): + identstr = nt.ident + else: + if hasattr (nt.typ, 'val'): + identstr = nt.typ.val # XXX, making up name + elif hasattr (nt.typ, 'name'): + identstr = nt.typ.name + else: + identstr = ctx.make_new_name () + + if hasattr (nt.typ, 'type') and nt.typ.type == 'tag': # ugh + tagstr = mk_tag_str (ctx,nt.typ.tag.cls, + nt.typ.tag.tag_typ,nt.typ.tag.num) + + + nt = nt.typ + return "('%s',%s,%s)" % (identstr, tagstr, + nt.typ.to_python (ctx)) + indentstr = ",\n" + ctx.spaces () + rv = indentstr.join ([elt_to_py (e) for e in list]) + ctx.outdent () + return rv + + def eth_reg_sub(self, ident, ectx): + #print "eth_reg_sub(ident='%s')" % (ident) + # check if autotag is required + autotag = False + if (ectx.NeedTags() and (ectx.tag_def == 'AUTOMATIC')): + autotag = True + for e in (self.elt_list): + if e.HasOwnTag(): autotag = False; break; + if autotag and hasattr(self, 'ext_list'): + for e in (self.ext_list): + if e.HasOwnTag(): autotag = False; break; + # do autotag + if autotag: + atag = 0 + for e in (self.elt_list): + e.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT')) + atag += 1 + if autotag and hasattr(self, 'ext_list'): + for e in (self.ext_list): + e.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT')) + atag += 1 + for e in (self.elt_list): + e.eth_reg(ident, ectx, tstrip=1, parent=ident) + if ectx.conform.check_item('EXPORTS', ident + '.' + e.name): + ectx.eth_sel_req(ident, e.name) + if hasattr(self, 'ext_list'): + for e in (self.ext_list): + e.eth_reg(ident, ectx, tstrip=1, parent=ident) + if ectx.conform.check_item('EXPORTS', ident + '.' + e.name): + ectx.eth_sel_req(ident, e.name) + + def sel_item(self, ident, sel, ectx): + lst = self.elt_list[:] + if hasattr(self, 'ext_list'): + lst.extend(self.ext_list) + ee = None + for e in (self.elt_list): + if e.IsNamed() and (e.name == sel): + ee = e + break + if not ee: + print("#CHOICE %s does not contain item %s" % (ident, sel)) + return ee + + def sel_req(self, ident, sel, ectx): + #print "sel_req(ident='%s', sel=%s)\n%s" % (ident, sel, str(self)) + ee = self.sel_item(ident, sel, ectx) + if ee: + ee.eth_reg(ident, ectx, tstrip=0, selflag=True) + + def eth_ftype(self, ectx): + return ('FT_UINT32', 'BASE_DEC') + + def eth_ftype_sel(self, sel, ectx): + ee = self.sel_item('', sel, ectx) + if ee: + return ee.eth_ftype(ectx) + else: + return ('FT_NONE', 'BASE_NONE') + + def eth_strings(self): + return '$$' + + def eth_need_tree(self): + return True + + def eth_has_vals(self): + return True + + def GetTTag(self, ectx): + lst = self.elt_list + cls = 'BER_CLASS_ANY/*choice*/' + #if hasattr(self, 'ext_list'): + # lst.extend(self.ext_list) + #if (len(lst) > 0): + # cls = lst[0].GetTag(ectx)[0] + #for e in (lst): + # if (e.GetTag(ectx)[0] != cls): + # cls = '-1/*choice*/' + return (cls, '-1/*choice*/') + + def GetTTagSel(self, sel, ectx): + ee = self.sel_item('', sel, ectx) + if ee: + return ee.GetTag(ectx) + else: + return ('BER_CLASS_ANY/*unknown selection*/', '-1/*unknown selection*/') + + def IndetermTag(self, ectx): + #print "Choice IndetermTag()=%s" % (str(not self.HasOwnTag())) + return not self.HasOwnTag() + + def detect_tagval(self, ectx): + tagval = False + lst = self.elt_list[:] + if hasattr(self, 'ext_list'): + lst.extend(self.ext_list) + if (len(lst) > 0) and (not (ectx.Per() or ectx.Oer()) or lst[0].HasOwnTag()): + t = lst[0].GetTag(ectx)[0] + tagval = True + else: + t = '' + tagval = False + if (t == 'BER_CLASS_UNI'): + tagval = False + for e in (lst): + if not (ectx.Per() or ectx.Oer()) or e.HasOwnTag(): + tt = e.GetTag(ectx)[0] + else: + tt = '' + tagval = False + if (tt != t): + tagval = False + return tagval + + def get_vals(self, ectx): + tagval = self.detect_tagval(ectx) + vals = [] + cnt = 0 + for e in (self.elt_list): + if (tagval): val = e.GetTag(ectx)[1] + else: val = str(cnt) + vals.append((val, e.name)) + cnt += 1 + if hasattr(self, 'ext_list'): + for e in (self.ext_list): + if (tagval): val = e.GetTag(ectx)[1] + else: val = str(cnt) + vals.append((val, e.name)) + cnt += 1 + return vals + + def eth_type_vals(self, tname, ectx): + out = '\n' + vals = self.get_vals(ectx) + out += ectx.eth_vals(tname, vals) + return out + + def reg_enum_vals(self, tname, ectx): + vals = self.get_vals(ectx) + for (val, id) in vals: + ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id)) + + def eth_type_enum(self, tname, ectx): + out = '\n' + vals = self.get_vals(ectx) + out += ectx.eth_enum(tname, vals) + return out + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + pars['TABLE'] = '%(PROTOP)s%(TNAME)s_choice' + return pars + + def eth_type_default_table(self, ectx, tname): + def out_item(val, e, ext, ectx): + has_enum = ectx.eth_type[tname]['enum'] & EF_ENUM + if (has_enum): + vval = ectx.eth_enum_item(tname, e.name) + else: + vval = val + f = fname + '/' + e.name + ef = ectx.field[f]['ethname'] + t = ectx.eth_hf[ef]['ethtype'] + if (ectx.Ber()): + opt = '' + if (not e.HasOwnTag()): + opt = 'BER_FLAGS_NOOWNTAG' + elif (e.HasImplicitTag(ectx)): + if (opt): opt += '|' + opt += 'BER_FLAGS_IMPLTAG' + if (not opt): opt = '0' + if (ectx.Ber()): + (tc, tn) = e.GetTag(ectx) + out = ' { %3s, %-24s, %-13s, %s, %s, dissect_%s_%s },\n' \ + % (vval, '&'+ectx.eth_hf[ef]['fullname'], tc, tn, opt, ectx.eth_type[t]['proto'], t) + elif (ectx.Per() or ectx.Oer()): + out = ' { %3s, %-24s, %-23s, dissect_%s_%s },\n' \ + % (vval, '&'+ectx.eth_hf[ef]['fullname'], ext, ectx.eth_type[t]['proto'], t) + else: + out = '' + return out + # end out_item() + #print "eth_type_default_table(tname='%s')" % (tname) + fname = ectx.eth_type[tname]['ref'][0] + tagval = self.detect_tagval(ectx) + table = "static const %(ER)s_choice_t %(TABLE)s[] = {\n" + cnt = 0 + if hasattr(self, 'ext_list'): + ext = 'ASN1_EXTENSION_ROOT' + else: + ext = 'ASN1_NO_EXTENSIONS' + empty_ext_flag = '0' + if (len(self.elt_list)==0) and hasattr(self, 'ext_list') and (len(self.ext_list)==0): + empty_ext_flag = ext + for e in (self.elt_list): + if (tagval): val = e.GetTag(ectx)[1] + else: val = str(cnt) + table += out_item(val, e, ext, ectx) + cnt += 1 + if hasattr(self, 'ext_list'): + for e in (self.ext_list): + if (tagval): val = e.GetTag(ectx)[1] + else: val = str(cnt) + table += out_item(val, e, 'ASN1_NOT_EXTENSION_ROOT', ectx) + cnt += 1 + if (ectx.Ber()): + table += " { 0, NULL, 0, 0, 0, NULL }\n};\n" + else: + table += " { 0, NULL, %s, NULL }\n};\n" % (empty_ext_flag) + return table + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_choice', ret='offset', + par=(('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s'), + ('%(VAL_PTR)s',),)) + elif (ectx.Per() or ectx.Oer()): + body = ectx.eth_fn_call('dissect_%(ER)s_choice', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(ETT_INDEX)s', '%(TABLE)s',), + ('%(VAL_PTR)s',),)) + else: + body = '#error Can not decode ChoiceType %s' % (tname) + return body + +#--- ChoiceValue ---------------------------------------------------- +class ChoiceValue (Value): + def to_str(self, ectx): + return self.val.to_str(ectx) + + def fld_obj_eq(self, other): + return isinstance(other, ChoiceValue) and (self.choice == other.choice) and (str(self.val.val) == str(other.val.val)) + +#--- EnumeratedType ----------------------------------------------------------- +class EnumeratedType (Type): + def to_python (self, ctx): + def strify_one (named_num): + return "%s=%s" % (named_num.ident, named_num.val) + return "asn1.ENUM(%s)" % ",".join (map (strify_one, self.val)) + + def eth_ftype(self, ectx): + return ('FT_UINT32', 'BASE_DEC') + + def eth_strings(self): + return '$$' + + def eth_has_vals(self): + return True + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_ENUMERATED') + + def get_vals_etc(self, ectx): + vals = [] + lastv = 0 + used = {} + maxv = 0 + root_num = 0 + ext_num = 0 + map_table = [] + for e in (self.val): + if e.type == 'NamedNumber': + used[int(e.val)] = True + for e in (self.val): + if e.type == 'NamedNumber': + val = int(e.val) + else: + while lastv in used: + lastv += 1 + val = lastv + used[val] = True + vals.append((val, e.ident)) + map_table.append(val) + root_num += 1 + if val > maxv: + maxv = val + if self.ext is not None: + for e in (self.ext): + if e.type == 'NamedNumber': + used[int(e.val)] = True + for e in (self.ext): + if e.type == 'NamedNumber': + val = int(e.val) + else: + while lastv in used: + lastv += 1 + val = lastv + used[val] = True + vals.append((val, e.ident)) + map_table.append(val) + ext_num += 1 + if val > maxv: + maxv = val + need_map = False + for i in range(len(map_table)): + need_map = need_map or (map_table[i] != i) + if (not need_map): + map_table = None + return (vals, root_num, ext_num, map_table) + + def eth_type_vals(self, tname, ectx): + out = '\n' + vals = self.get_vals_etc(ectx)[0] + out += ectx.eth_vals(tname, vals) + return out + + def reg_enum_vals(self, tname, ectx): + vals = self.get_vals_etc(ectx)[0] + for (val, id) in vals: + ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id)) + + def eth_type_enum(self, tname, ectx): + out = '\n' + vals = self.get_vals_etc(ectx)[0] + out += ectx.eth_enum(tname, vals) + return out + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + (root_num, ext_num, map_table) = self.get_vals_etc(ectx)[1:] + if self.ext is not None: + ext = 'TRUE' + else: + ext = 'FALSE' + pars['ROOT_NUM'] = str(root_num) + pars['EXT'] = ext + pars['EXT_NUM'] = str(ext_num) + if (map_table): + pars['TABLE'] = '%(PROTOP)s%(TNAME)s_value_map' + else: + pars['TABLE'] = 'NULL' + return pars + + def eth_type_default_table(self, ectx, tname): + if (not ectx.Per() and not ectx.Oer()): return '' + map_table = self.get_vals_etc(ectx)[3] + if map_table is None: return '' + table = "static uint32_t %(TABLE)s[%(ROOT_NUM)s+%(EXT_NUM)s] = {" + table += ", ".join([str(v) for v in map_table]) + table += "};\n" + return table + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + if (ectx.constraints_check and self.HasValueConstraint()): + body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + else: + body = ectx.eth_fn_call('dissect_%(ER)s_integer', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'), + ('%(VAL_PTR)s',),)) + elif (ectx.Per() or ectx.Oer()): + body = ectx.eth_fn_call('dissect_%(ER)s_enumerated', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(ROOT_NUM)s', '%(VAL_PTR)s', '%(EXT)s', '%(EXT_NUM)s', '%(TABLE)s',),)) + else: + body = '#error Can not decode EnumeratedType %s' % (tname) + return body + +#--- EmbeddedPDVType ----------------------------------------------------------- +class EmbeddedPDVType (Type): + def eth_tname(self): + return 'EMBEDDED_PDV' + + def eth_ftype(self, ectx): + return ('FT_NONE', 'BASE_NONE') + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_EMBEDDED_PDV') + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + if ectx.default_embedded_pdv_cb: + pars['TYPE_REF_FN'] = ectx.default_embedded_pdv_cb + else: + pars['TYPE_REF_FN'] = 'NULL' + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_EmbeddedPDV_Type', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),)) + elif (ectx.Per()): + body = ectx.eth_fn_call('dissect_%(ER)s_embedded_pdv', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),)) + else: + body = '#error Can not decode EmbeddedPDVType %s' % (tname) + return body + +#--- ExternalType ----------------------------------------------------------- +class ExternalType (Type): + def eth_tname(self): + return 'EXTERNAL' + + def eth_ftype(self, ectx): + return ('FT_NONE', 'BASE_NONE') + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_EXTERNAL') + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + if ectx.default_external_type_cb: + pars['TYPE_REF_FN'] = ectx.default_external_type_cb + else: + pars['TYPE_REF_FN'] = 'NULL' + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),)) + elif (ectx.Per()): + body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),)) + else: + body = '#error Can not decode ExternalType %s' % (tname) + return body + +#--- OpenType ----------------------------------------------------------- +class OpenType (Type): + def to_python (self, ctx): + return "asn1.ANY" + + def single_type(self): + if (self.HasConstraint() and + self.constr.type == 'Type' and + self.constr.subtype.type == 'Type_Ref'): + return self.constr.subtype.val + return None + + def eth_reg_sub(self, ident, ectx): + t = self.single_type() + if t: + ectx.eth_dep_add(ident, t) + + def eth_tname(self): + t = self.single_type() + if t: + return 'OpenType_' + t + else: + return Type.eth_tname(self) + + def eth_ftype(self, ectx): + return ('FT_NONE', 'BASE_NONE') + + def GetTTag(self, ectx): + return ('BER_CLASS_ANY', '0') + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + pars['FN_VARIANT'] = ectx.default_opentype_variant + t = self.single_type() + if t: + t = ectx.type[t]['ethname'] + pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto'] + pars['TYPE_REF_TNAME'] = t + pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s' + else: + pars['TYPE_REF_FN'] = 'NULL' + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Per() or ectx.Oer()): + body = ectx.eth_fn_call('dissect_%(ER)s_open_type%(FN_VARIANT)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),)) + else: + body = '#error Can not decode OpenType %s' % (tname) + return body + +#--- InstanceOfType ----------------------------------------------------------- +class InstanceOfType (Type): + def eth_tname(self): + return 'INSTANCE_OF' + + def eth_ftype(self, ectx): + return ('FT_NONE', 'BASE_NONE') + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_EXTERNAL') + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + if ectx.default_external_type_cb: + pars['TYPE_REF_FN'] = ectx.default_external_type_cb + else: + pars['TYPE_REF_FN'] = 'NULL' + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),)) + else: + body = '#error Can not decode %s' % (tname) + return body + +#--- AnyType ----------------------------------------------------------- +class AnyType (Type): + def to_python (self, ctx): + return "asn1.ANY" + + def eth_ftype(self, ectx): + return ('FT_NONE', 'BASE_NONE') + + def GetTTag(self, ectx): + return ('BER_CLASS_ANY', '0') + + def eth_type_default_body(self, ectx, tname): + body = '#error Can not decode %s' % (tname) + return body + +class Literal (Node): + def to_python (self, ctx): + return self.val + +#--- NullType ----------------------------------------------------------------- +class NullType (Type): + def to_python (self, ctx): + return 'asn1.NULL' + + def eth_tname(self): + return 'NULL' + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_NULL') + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_null', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),)) + elif (ectx.Per() or ectx.Oer()): + body = ectx.eth_fn_call('dissect_%(ER)s_null', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),)) + else: + body = '#error Can not decode %s' % (tname) + return body + +#--- NullValue ---------------------------------------------------- +class NullValue (Value): + def to_str(self, ectx): + return 'NULL' + +#--- RealType ----------------------------------------------------------------- +class RealType (Type): + def to_python (self, ctx): + return 'asn1.REAL' + + def eth_tname(self): + return 'REAL' + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_REAL') + + def eth_ftype(self, ectx): + return ('FT_DOUBLE', 'BASE_NONE') + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_real', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'), + ('%(VAL_PTR)s',),)) + elif (ectx.Per()): + body = ectx.eth_fn_call('dissect_%(ER)s_real', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + else: + body = '#error Can not decode %s' % (tname) + return body + +#--- BooleanType -------------------------------------------------------------- +class BooleanType (Type): + def to_python (self, ctx): + return 'asn1.BOOLEAN' + + def eth_tname(self): + return 'BOOLEAN' + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_BOOLEAN') + + def eth_ftype(self, ectx): + return ('FT_BOOLEAN', 'BASE_NONE') + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s'),)) + elif (ectx.Per()): + body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + elif (ectx.Oer()): + body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + else: + body = '#error Can not decode %s' % (tname) + return body + +#--- OctetStringType ---------------------------------------------------------- +class OctetStringType (Type): + def to_python (self, ctx): + return 'asn1.OCTSTRING' + + def eth_tname(self): + if not self.HasConstraint(): + return 'OCTET_STRING' + elif self.constr.type == 'Size': + return 'OCTET_STRING' + '_' + self.constr.eth_constrname() + else: + return '#' + self.type + '_' + str(id(self)) + + def eth_ftype(self, ectx): + return ('FT_BYTES', 'BASE_NONE') + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_OCTETSTRING') + + def eth_need_pdu(self, ectx): + pdu = None + if self.HasContentsConstraint(): + t = self.constr.GetContents(ectx) + if t and (ectx.default_containing_variant in ('_pdu', '_pdu_new')): + pdu = { 'type' : t, + 'new' : ectx.default_containing_variant == '_pdu_new' } + return pdu + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx) + if self.HasContentsConstraint(): + pars['FN_VARIANT'] = ectx.default_containing_variant + t = self.constr.GetContents(ectx) + if t: + if pars['FN_VARIANT'] in ('_pdu', '_pdu_new'): + t = ectx.field[t]['ethname'] + pars['TYPE_REF_PROTO'] = '' + pars['TYPE_REF_TNAME'] = t + pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_TNAME)s' + else: + t = ectx.type[t]['ethname'] + pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto'] + pars['TYPE_REF_TNAME'] = t + pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s' + else: + pars['TYPE_REF_FN'] = 'NULL' + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + if (ectx.constraints_check and self.HasSizeConstraint()): + body = ectx.eth_fn_call('dissect_%(ER)s_constrained_octet_string', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + else: + body = ectx.eth_fn_call('dissect_%(ER)s_octet_string', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'), + ('%(VAL_PTR)s',),)) + elif (ectx.Per() or ectx.Oer()): + if self.HasContentsConstraint(): + body = ectx.eth_fn_call('dissect_%(ER)s_octet_string_containing%(FN_VARIANT)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(TYPE_REF_FN)s',),)) + else: + body = ectx.eth_fn_call('dissect_%(ER)s_octet_string', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(VAL_PTR)s',),)) + else: + body = '#error Can not decode %s' % (tname) + return body + +#--- CharacterStringType ------------------------------------------------------ +class CharacterStringType (Type): + def eth_tname(self): + if not self.HasConstraint(): + return self.eth_tsname() + elif self.constr.type == 'Size': + return self.eth_tsname() + '_' + self.constr.eth_constrname() + else: + return '#' + self.type + '_' + str(id(self)) + + def eth_ftype(self, ectx): + return ('FT_STRING', 'BASE_NONE') + +class RestrictedCharacterStringType (CharacterStringType): + def to_python (self, ctx): + return 'asn1.' + self.eth_tsname() + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_' + self.eth_tsname()) + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx) + (pars['STRING_TYPE'], pars['STRING_TAG']) = (self.eth_tsname(), self.GetTTag(ectx)[1]) + (pars['ALPHABET'], pars['ALPHABET_LEN']) = self.eth_get_alphabet_constr(ectx) + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + if (ectx.constraints_check and self.HasSizeConstraint()): + body = ectx.eth_fn_call('dissect_%(ER)s_constrained_restricted_string', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(STRING_TAG)s'), + ('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + else: + body = ectx.eth_fn_call('dissect_%(ER)s_restricted_string', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(STRING_TAG)s'), + ('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'), + ('%(VAL_PTR)s',),)) + elif (ectx.Per() and self.HasPermAlph() and self.eth_tsname() in KnownMultiplierStringTypes): + # XXX: If there is a permitted alphabet but it is extensible, + # then the permitted-alphabet is not PER-visible and should be + # ignored. (X.691 9.3.10, 9.3.18) We don't handle extensible + # permitted-alphabets. + body = ectx.eth_fn_call('dissect_%(ER)s_restricted_character_string', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(ALPHABET)s', '%(ALPHABET_LEN)s'), + ('%(VAL_PTR)s',),)) + elif (ectx.Per()): + if (self.eth_tsname() == 'GeneralString'): + body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),)) + elif (self.eth_tsname() == 'GeneralizedTime' or self.eth_tsname() == 'UTCTime'): + body = ectx.eth_fn_call('dissect_%(ER)s_VisibleString', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s'), + ('%(VAL_PTR)s',),)) + elif (self.eth_tsname() in KnownMultiplierStringTypes): + body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s'), + ('%(VAL_PTR)s',),)) + else: + body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),)) + elif (ectx.Oer()): + body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),)) + else: + body = '#error Can not decode %s' % (tname) + return body + +class BMPStringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'BMPString' + +class GeneralStringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'GeneralString' + +class GraphicStringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'GraphicString' + +class IA5StringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'IA5String' + +class NumericStringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'NumericString' + +class PrintableStringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'PrintableString' + +class TeletexStringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'TeletexString' + +class T61StringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'T61String' + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_TeletexString') + +class UniversalStringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'UniversalString' + +class UTF8StringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'UTF8String' + +class VideotexStringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'VideotexString' + +class VisibleStringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'VisibleString' + +class ISO646StringType (RestrictedCharacterStringType): + def eth_tsname(self): + return 'ISO646String' + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_VisibleString') + +class UnrestrictedCharacterStringType (CharacterStringType): + def to_python (self, ctx): + return 'asn1.UnrestrictedCharacterString' + def eth_tsname(self): + return 'CHARACTER_STRING' + +#--- UsefulType --------------------------------------------------------------- +class GeneralizedTime (RestrictedCharacterStringType): + def eth_tsname(self): + return 'GeneralizedTime' + + def eth_ftype(self, ectx): + if (ectx.Ber()): + return ('FT_ABSOLUTE_TIME', 'ABSOLUTE_TIME_LOCAL') + else: + return ('FT_STRING', 'BASE_NONE') + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),)) + return body + else: + return RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname) + +class UTCTime (RestrictedCharacterStringType): + def eth_tsname(self): + return 'UTCTime' + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', 'NULL', 'NULL'),)) + return body + else: + return RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname) + +class ObjectDescriptor (RestrictedCharacterStringType): + def eth_tsname(self): + return 'ObjectDescriptor' + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname) + elif (ectx.Per()): + body = ectx.eth_fn_call('dissect_%(ER)s_object_descriptor', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + else: + body = '#error Can not decode %s' % (tname) + return body + +#--- ObjectIdentifierType ----------------------------------------------------- +class ObjectIdentifierType (Type): + def to_python (self, ctx): + return 'asn1.OBJECT_IDENTIFIER' + + def eth_tname(self): + return 'OBJECT_IDENTIFIER' + + def eth_ftype(self, ectx): + return ('FT_OID', 'BASE_NONE') + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_OID') + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + pars['FN_VARIANT'] = ectx.default_oid_variant + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_object_identifier%(FN_VARIANT)s', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + elif (ectx.Per()): + body = ectx.eth_fn_call('dissect_%(ER)s_object_identifier%(FN_VARIANT)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + elif (ectx.Oer()): + body = ectx.eth_fn_call('dissect_%(ER)s_object_identifier%(FN_VARIANT)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + else: + body = '#error Can not decode %s' % (tname) + return body + +#--- ObjectIdentifierValue ---------------------------------------------------- +class ObjectIdentifierValue (Value): + def get_num(self, path, val): + return str(oid_names.get(path + '/' + val, val)) + + def to_str(self, ectx): + out = '' + path = '' + first = True + sep = '' + for v in self.comp_list: + if isinstance(v, Node) and (v.type == 'name_and_number'): + vstr = v.number + elif v.isdigit(): + vstr = v + else: + vstr = self.get_num(path, v) + if not first and not vstr.isdigit(): + vstr = ectx.value_get_val(vstr) + if first: + if vstr.isdigit(): + out += '"' + vstr + else: + out += ectx.value_get_eth(vstr) + '"' + else: + out += sep + vstr + path += sep + vstr + first = False + sep = '.' + out += '"' + return out + + def get_dep(self): + v = self.comp_list[0] + if isinstance(v, Node) and (v.type == 'name_and_number'): + return None + elif v.isdigit(): + return None + else: + vstr = self.get_num('', v) + if vstr.isdigit(): + return None + else: + return vstr + +class NamedNumber(Node): + def to_python (self, ctx): + return "('%s',%s)" % (self.ident, self.val) + def __lt__(self, other): + return int(self.val) < int(other.val) + +class NamedNumListBase(Node): + def to_python (self, ctx): + return "asn1.%s_class ([%s])" % (self.asn1_typ,",".join ( + [x.to_python (ctx) for x in self.named_list])) + +#--- RelativeOIDType ---------------------------------------------------------- +class RelativeOIDType (Type): + + def eth_tname(self): + return 'RELATIVE_OID' + + def eth_ftype(self, ectx): + return ('FT_REL_OID', 'BASE_NONE') + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_RELATIVE_OID') + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + pars['FN_VARIANT'] = ectx.default_oid_variant + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + body = ectx.eth_fn_call('dissect_%(ER)s_relative_oid%(FN_VARIANT)s', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + elif (ectx.Per()): + body = ectx.eth_fn_call('dissect_%(ER)s_relative_oid%(FN_VARIANT)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + else: + body = '#error Can not decode relative_oid %s' % (tname) + return body + + +#--- IntegerType -------------------------------------------------------------- +class IntegerType (Type): + def to_python (self, ctx): + return "asn1.INTEGER_class ([%s])" % (",".join ( + [x.to_python (ctx) for x in self.named_list])) + + def add_named_value(self, ident, val): + e = NamedNumber(ident = ident, val = val) + if not self.named_list: + self.named_list = [] + self.named_list.append(e) + + def eth_tname(self): + if self.named_list: + return Type.eth_tname(self) + if not self.HasConstraint(): + return 'INTEGER' + elif self.constr.type == 'SingleValue' or self.constr.type == 'ValueRange': + return 'INTEGER' + '_' + self.constr.eth_constrname() + else: + return 'INTEGER' + '_' + self.constr.eth_tname() + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_INTEGER') + + + def eth_ftype(self, ectx): + if self.HasConstraint(): + if not self.constr.IsNegativ(): + if self.constr.Needs64b(ectx): + return ('FT_UINT64', 'BASE_DEC') + else: + return ('FT_UINT32', 'BASE_DEC') + if self.constr.Needs64b(ectx): + return ('FT_INT64', 'BASE_DEC') + return ('FT_INT32', 'BASE_DEC') + + def eth_strings(self): + if (self.named_list): + return '$$' + else: + return 'NULL' + + def eth_has_vals(self): + if (self.named_list): + return True + else: + return False + + def get_vals(self, ectx): + vals = [] + for e in (self.named_list): + vals.append((int(e.val), e.ident)) + return vals + + def eth_type_vals(self, tname, ectx): + if not self.eth_has_vals(): return '' + out = '\n' + vals = self.get_vals(ectx) + out += ectx.eth_vals(tname, vals) + return out + + def reg_enum_vals(self, tname, ectx): + vals = self.get_vals(ectx) + for (val, id) in vals: + ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id)) + + def eth_type_enum(self, tname, ectx): + if not self.eth_has_enum(tname, ectx): return '' + out = '\n' + vals = self.get_vals(ectx) + out += ectx.eth_enum(tname, vals) + return out + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + if self.HasValueConstraint(): + (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_value_constr(ectx) + if (pars['FN_VARIANT'] == '') and self.constr.Needs64b(ectx): + if ectx.Ber(): pars['FN_VARIANT'] = '64' + else: + if (ectx.Oer() and pars['MAX_VAL'] == 'NO_BOUND'): + pars['FN_VARIANT'] = '_64b_no_ub' + else: + pars['FN_VARIANT'] = '_64b' + return pars + + def eth_type_default_body(self, ectx, tname): + if (ectx.Ber()): + if (ectx.constraints_check and self.HasValueConstraint()): + body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer%(FN_VARIANT)s', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) + else: + body = ectx.eth_fn_call('dissect_%(ER)s_integer%(FN_VARIANT)s', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'), + ('%(VAL_PTR)s',),)) + elif (ectx.Per() or ectx.Oer()): + if (self.HasValueConstraint()): + body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer%(FN_VARIANT)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(VAL_PTR)s', '%(EXT)s'),)) + else: + body = ectx.eth_fn_call('dissect_%(ER)s_integer%(FN_VARIANT)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s'),)) + else: + body = '#error Can not decode %s' % (tname) + return body + +#--- BitStringType ------------------------------------------------------------ +class BitStringType (Type): + def to_python (self, ctx): + return "asn1.BITSTRING_class ([%s])" % (",".join ( + [x.to_python (ctx) for x in self.named_list])) + + def eth_tname(self): + if self.named_list: + return Type.eth_tname(self) + elif not self.HasConstraint(): + return 'BIT_STRING' + elif self.constr.IsSize(): + return 'BIT_STRING' + '_' + self.constr.eth_constrname() + else: + return '#' + self.type + '_' + str(id(self)) + + def GetTTag(self, ectx): + return ('BER_CLASS_UNI', 'BER_UNI_TAG_BITSTRING') + + def eth_ftype(self, ectx): + return ('FT_BYTES', 'BASE_NONE') + + def eth_need_tree(self): + return self.named_list + + def eth_need_pdu(self, ectx): + pdu = None + if self.HasContentsConstraint(): + t = self.constr.GetContents(ectx) + if t and (ectx.default_containing_variant in ('_pdu', '_pdu_new')): + pdu = { 'type' : t, + 'new' : ectx.default_containing_variant == '_pdu_new' } + return pdu + + def sortNamedBits(self): + return self.named_list.val + + def eth_named_bits(self): + bits = [] + if (self.named_list): + sorted_list = self.named_list + sorted_list.sort() + expected_bit_no = 0; + for e in (sorted_list): + # Fill the table with "spare_bit" for "un named bits" + if (int(e.val) != 0) and (expected_bit_no != int(e.val)): + while ( expected_bit_no < int(e.val)): + bits.append((expected_bit_no, ("spare_bit%u" % (expected_bit_no)))) + expected_bit_no = expected_bit_no + 1 + #print ("Adding named bits to list %s bit no %d" % (e.ident, int (e.val))) + bits.append((int(e.val), e.ident)) + expected_bit_no = int(e.val) + 1 + return bits + + def eth_type_default_pars(self, ectx, tname): + pars = Type.eth_type_default_pars(self, ectx, tname) + pars['LEN_PTR'] = 'NULL' + (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx) + if 'ETT_INDEX' not in pars: + pars['ETT_INDEX'] = '-1' + pars['TABLE'] = 'NULL' + if self.eth_named_bits(): + pars['TABLE'] = '%(PROTOP)s%(TNAME)s_bits' + if self.HasContentsConstraint(): + pars['FN_VARIANT'] = ectx.default_containing_variant + t = self.constr.GetContents(ectx) + if t: + if pars['FN_VARIANT'] in ('_pdu', '_pdu_new'): + t = ectx.field[t]['ethname'] + pars['TYPE_REF_PROTO'] = '' + pars['TYPE_REF_TNAME'] = t + pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_TNAME)s' + else: + t = ectx.type[t]['ethname'] + pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto'] + pars['TYPE_REF_TNAME'] = t + pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s' + else: + pars['TYPE_REF_FN'] = 'NULL' + return pars + + def eth_type_default_table(self, ectx, tname): + #print ("eth_type_default_table(tname='%s')" % (tname)) + table = '' + bits = self.eth_named_bits() + if (bits): + table = ectx.eth_bits(tname, bits) + return table + + def eth_type_default_body(self, ectx, tname): + bits = self.eth_named_bits() + if (ectx.Ber()): + if (ectx.constraints_check and self.HasSizeConstraint()): + body = ectx.eth_fn_call('dissect_%(ER)s_constrained_bitstring', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%s' % len(bits),'%(HF_INDEX)s', '%(ETT_INDEX)s',), + ('%(VAL_PTR)s',),)) + else: + body = ectx.eth_fn_call('dissect_%(ER)s_bitstring', ret='offset', + par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), + ('%(TABLE)s', '%s' % len(bits), '%(HF_INDEX)s', '%(ETT_INDEX)s',), + ('%(VAL_PTR)s',),)) + elif (ectx.Per() or ectx.Oer()): + if self.HasContentsConstraint(): + body = ectx.eth_fn_call('dissect_%(ER)s_bit_string_containing%(FN_VARIANT)s', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(TYPE_REF_FN)s'),)) + else: + body = ectx.eth_fn_call('dissect_%(ER)s_bit_string', ret='offset', + par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), + ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s','%(TABLE)s', '%s' % len(bits), '%(VAL_PTR)s', '%(LEN_PTR)s'),)) + else: + body = '#error Can not decode %s' % (tname) + return body + +#--- BStringValue ------------------------------------------------------------ +bstring_tab = { + '0000' : '0', + '0001' : '1', + '0010' : '2', + '0011' : '3', + '0100' : '4', + '0101' : '5', + '0110' : '6', + '0111' : '7', + '1000' : '8', + '1001' : '9', + '1010' : 'A', + '1011' : 'B', + '1100' : 'C', + '1101' : 'D', + '1110' : 'E', + '1111' : 'F', +} +class BStringValue (Value): + def to_str(self, ectx): + v = self.val[1:-2] + if len(v) % 8: + v += '0' * (8 - len(v) % 8) + vv = '0x' + for i in (list(range(0, len(v), 4))): + vv += bstring_tab[v[i:i+4]] + return vv + +#--- HStringValue ------------------------------------------------------------ +class HStringValue (Value): + def to_str(self, ectx): + vv = '0x' + vv += self.val[1:-2] + return vv + def __int__(self): + return int(self.val[1:-2], 16) + +#--- FieldSpec ---------------------------------------------------------------- +class FieldSpec (Node): + def __init__(self,*args, **kw) : + self.name = None + Node.__init__ (self,*args, **kw) + + def SetName(self, name): + self.name = name + + def get_repr(self): + return ['#UNSUPPORTED_' + self.type] + + def fld_repr(self): + repr = [self.name] + repr.extend(self.get_repr()) + return repr + +class TypeFieldSpec (FieldSpec): + def get_repr(self): + return [] + +class FixedTypeValueFieldSpec (FieldSpec): + def get_repr(self): + if isinstance(self.typ, Type_Ref): + repr = ['TypeReference', self.typ.val] + else: + repr = [self.typ.type] + return repr + +class VariableTypeValueFieldSpec (FieldSpec): + def get_repr(self): + return ['_' + self.type] + +class FixedTypeValueSetFieldSpec (FieldSpec): + def get_repr(self): + return ['_' + self.type] + +class ObjectFieldSpec (FieldSpec): + def get_repr(self): + return ['ClassReference', self.cls.val] + +class ObjectSetFieldSpec (FieldSpec): + def get_repr(self): + return ['ClassReference', self.cls.val] + +#============================================================================== + +def p_module_list_1 (t): + 'module_list : module_list ModuleDefinition' + t[0] = t[1] + [t[2]] + +def p_module_list_2 (t): + 'module_list : ModuleDefinition' + t[0] = [t[1]] + + +#--- ITU-T Recommendation X.680 ----------------------------------------------- + + +# 11 ASN.1 lexical items -------------------------------------------------------- + +# 11.2 Type references +def p_type_ref (t): + 'type_ref : UCASE_IDENT' + t[0] = Type_Ref(val=t[1]) + +# 11.3 Identifiers +def p_identifier (t): + 'identifier : LCASE_IDENT' + t[0] = t[1] + +# 11.4 Value references +# cause reduce/reduce conflict +#def p_valuereference (t): +# 'valuereference : LCASE_IDENT' +# t[0] = Value_Ref(val=t[1]) + +# 11.5 Module references +def p_modulereference (t): + 'modulereference : UCASE_IDENT' + t[0] = t[1] + + +# 12 Module definition -------------------------------------------------------- + +# 12.1 +def p_ModuleDefinition (t): + 'ModuleDefinition : ModuleIdentifier DEFINITIONS TagDefault ASSIGNMENT ModuleBegin BEGIN ModuleBody END' + t[0] = Module (ident = t[1], tag_def = t[3], body = t[7]) + +def p_ModuleBegin (t): + 'ModuleBegin : ' + if t[-4].val == 'Remote-Operations-Information-Objects': + x880_module_begin() + +def p_TagDefault_1 (t): + '''TagDefault : EXPLICIT TAGS + | IMPLICIT TAGS + | AUTOMATIC TAGS ''' + t[0] = Default_Tags (dfl_tag = t[1]) + +def p_TagDefault_2 (t): + 'TagDefault : ' + # 12.2 The "TagDefault" is taken as EXPLICIT TAGS if it is "empty". + t[0] = Default_Tags (dfl_tag = 'EXPLICIT') + +def p_ModuleIdentifier_1 (t): + 'ModuleIdentifier : modulereference DefinitiveIdentifier' # name, oid + t [0] = Node('module_ident', val = t[1], ident = t[2]) + +def p_ModuleIdentifier_2 (t): + 'ModuleIdentifier : modulereference' # name, oid + t [0] = Node('module_ident', val = t[1], ident = None) + +def p_DefinitiveIdentifier (t): + 'DefinitiveIdentifier : ObjectIdentifierValue' + t[0] = t[1] + +#def p_module_ref (t): +# 'module_ref : UCASE_IDENT' +# t[0] = t[1] + +def p_ModuleBody_1 (t): + 'ModuleBody : Exports Imports AssignmentList' + t[0] = Module_Body (exports = t[1], imports = t[2], assign_list = t[3]) + +def p_ModuleBody_2 (t): + 'ModuleBody : ' + t[0] = Node ('module_body', exports = [], imports = [], assign_list = []) + +def p_Exports_1 (t): + 'Exports : EXPORTS syms_exported SEMICOLON' + t[0] = t[2] + +def p_Exports_2 (t): + 'Exports : EXPORTS ALL SEMICOLON' + t[0] = [ 'ALL' ] + +def p_Exports_3 (t): + 'Exports : ' + t[0] = [ 'ALL' ] + +def p_syms_exported_1 (t): + 'syms_exported : exp_sym_list' + t[0] = t[1] + +def p_syms_exported_2 (t): + 'syms_exported : ' + t[0] = [] + +def p_exp_sym_list_1 (t): + 'exp_sym_list : Symbol' + t[0] = [t[1]] + +def p_exp_sym_list_2 (t): + 'exp_sym_list : exp_sym_list COMMA Symbol' + t[0] = t[1] + [t[3]] + + +def p_Imports_1 (t): + 'Imports : importsbegin IMPORTS SymbolsImported SEMICOLON' + t[0] = t[3] + global lcase_ident_assigned + lcase_ident_assigned = {} + +def p_importsbegin (t): + 'importsbegin : ' + global lcase_ident_assigned + global g_conform + lcase_ident_assigned = {} + lcase_ident_assigned.update(g_conform.use_item('ASSIGNED_ID', 'OBJECT_IDENTIFIER')) + +def p_Imports_2 (t): + 'Imports : ' + t[0] = [] + +def p_SymbolsImported_1(t): + 'SymbolsImported : ' + t[0] = [] + +def p_SymbolsImported_2 (t): + 'SymbolsImported : SymbolsFromModuleList' + t[0] = t[1] + +def p_SymbolsFromModuleList_1 (t): + 'SymbolsFromModuleList : SymbolsFromModuleList SymbolsFromModule' + t[0] = t[1] + [t[2]] + +def p_SymbolsFromModuleList_2 (t): + 'SymbolsFromModuleList : SymbolsFromModule' + t[0] = [t[1]] + +def p_SymbolsFromModule (t): + '''SymbolsFromModule : SymbolList FROM GlobalModuleReference + | SymbolList FROM GlobalModuleReference WITH SUCCESSORS''' + t[0] = Node ('SymbolList', symbol_list = t[1], module = t[3]) + for s in (t[0].symbol_list): + if (isinstance(s, Value_Ref)): lcase_ident_assigned[s.val] = t[3] + import_symbols_from_module(t[0].module, t[0].symbol_list) + +def import_symbols_from_module(module, symbol_list): + if module.val == 'Remote-Operations-Information-Objects': + for i in range(len(symbol_list)): + s = symbol_list[i] + if isinstance(s, Type_Ref) or isinstance(s, Class_Ref): + x880_import(s.val) + if isinstance(s, Type_Ref) and is_class_ident(s.val): + symbol_list[i] = Class_Ref (val = s.val) + return + for i in range(len(symbol_list)): + s = symbol_list[i] + if isinstance(s, Type_Ref) and is_class_ident("$%s$%s" % (module.val, s.val)): + import_class_from_module(module.val, s.val) + if isinstance(s, Type_Ref) and is_class_ident(s.val): + symbol_list[i] = Class_Ref (val = s.val) + +def p_GlobalModuleReference (t): + 'GlobalModuleReference : modulereference AssignedIdentifier' + t [0] = Node('module_ident', val = t[1], ident = t[2]) + +def p_AssignedIdentifier_1 (t): + 'AssignedIdentifier : ObjectIdentifierValue' + t[0] = t[1] + +def p_AssignedIdentifier_2 (t): + 'AssignedIdentifier : LCASE_IDENT_ASSIGNED' + t[0] = t[1] + +def p_AssignedIdentifier_3 (t): + 'AssignedIdentifier : ' + pass + +def p_SymbolList_1 (t): + 'SymbolList : Symbol' + t[0] = [t[1]] + +def p_SymbolList_2 (t): + 'SymbolList : SymbolList COMMA Symbol' + t[0] = t[1] + [t[3]] + +def p_Symbol (t): + '''Symbol : Reference + | ParameterizedReference''' + t[0] = t[1] + +def p_Reference_1 (t): + '''Reference : type_ref + | objectclassreference ''' + t[0] = t[1] + +def p_Reference_2 (t): + '''Reference : LCASE_IDENT_ASSIGNED + | identifier ''' # instead of valuereference wich causes reduce/reduce conflict + t[0] = Value_Ref(val=t[1]) + +def p_AssignmentList_1 (t): + 'AssignmentList : AssignmentList Assignment' + t[0] = t[1] + [t[2]] + +def p_AssignmentList_2 (t): + 'AssignmentList : Assignment SEMICOLON' + t[0] = [t[1]] + +def p_AssignmentList_3 (t): + 'AssignmentList : Assignment' + t[0] = [t[1]] + +def p_Assignment (t): + '''Assignment : TypeAssignment + | ValueAssignment + | ValueSetTypeAssignment + | ObjectClassAssignment + | ObjectAssignment + | ObjectSetAssignment + | ParameterizedAssignment + | pyquote ''' + t[0] = t[1] + + +# 13 Referencing type and value definitions ----------------------------------- + +# 13.1 +def p_DefinedType (t): + '''DefinedType : ExternalTypeReference + | type_ref + | ParameterizedType''' + t[0] = t[1] + +def p_DefinedValue_1(t): + '''DefinedValue : ExternalValueReference''' + t[0] = t[1] + +def p_DefinedValue_2(t): + '''DefinedValue : identifier ''' # instead of valuereference wich causes reduce/reduce conflict + t[0] = Value_Ref(val=t[1]) + +# 13.6 +def p_ExternalTypeReference (t): + 'ExternalTypeReference : modulereference DOT type_ref' + t[0] = Node ('ExternalTypeReference', module = t[1], typ = t[3]) + +def p_ExternalValueReference (t): + 'ExternalValueReference : modulereference DOT identifier' + t[0] = Node ('ExternalValueReference', module = t[1], ident = t[3]) + + +# 15 Assigning types and values ----------------------------------------------- + +# 15.1 +def p_TypeAssignment (t): + 'TypeAssignment : UCASE_IDENT ASSIGNMENT Type' + t[0] = t[3] + t[0].SetName(t[1]) + +# 15.2 +def p_ValueAssignment (t): + 'ValueAssignment : LCASE_IDENT ValueType ASSIGNMENT Value' + t[0] = ValueAssignment(ident = t[1], typ = t[2], val = t[4]) + +# only "simple" types are supported to simplify grammer +def p_ValueType (t): + '''ValueType : type_ref + | BooleanType + | IntegerType + | ObjectIdentifierType + | OctetStringType + | RealType ''' + + t[0] = t[1] + +# 15.6 +def p_ValueSetTypeAssignment (t): + 'ValueSetTypeAssignment : UCASE_IDENT ValueType ASSIGNMENT ValueSet' + t[0] = Node('ValueSetTypeAssignment', name=t[1], typ=t[2], val=t[4]) + +# 15.7 +def p_ValueSet (t): + 'ValueSet : lbraceignore rbraceignore' + t[0] = None + + +# 16 Definition of types and values ------------------------------------------- + +# 16.1 +def p_Type (t): + '''Type : BuiltinType + | ReferencedType + | ConstrainedType''' + t[0] = t[1] + +# 16.2 +def p_BuiltinType (t): + '''BuiltinType : AnyType + | BitStringType + | BooleanType + | CharacterStringType + | ChoiceType + | EmbeddedPDVType + | EnumeratedType + | ExternalType + | InstanceOfType + | IntegerType + | NullType + | ObjectClassFieldType + | ObjectIdentifierType + | OctetStringType + | RealType + | RelativeOIDType + | SequenceType + | SequenceOfType + | SetType + | SetOfType + | TaggedType''' + t[0] = t[1] + +# 16.3 +def p_ReferencedType (t): + '''ReferencedType : DefinedType + | UsefulType + | SelectionType''' + t[0] = t[1] + +# 16.5 +def p_NamedType (t): + 'NamedType : identifier Type' + t[0] = t[2] + t[0].SetName (t[1]) + +# 16.7 +def p_Value (t): + '''Value : BuiltinValue + | ReferencedValue + | ObjectClassFieldValue''' + t[0] = t[1] + +# 16.9 +def p_BuiltinValue (t): + '''BuiltinValue : BooleanValue + | ChoiceValue + | IntegerValue + | ObjectIdentifierValue + | RealValue + | SequenceValue + | hex_string + | binary_string + | char_string''' # XXX we don't support {data} here + t[0] = t[1] + +# 16.11 +def p_ReferencedValue (t): + '''ReferencedValue : DefinedValue + | ValueFromObject''' + t[0] = t[1] + +# 16.13 +#def p_NamedValue (t): +# 'NamedValue : identifier Value' +# t[0] = Node ('NamedValue', ident = t[1], value = t[2]) + + +# 17 Notation for the boolean type -------------------------------------------- + +# 17.1 +def p_BooleanType (t): + 'BooleanType : BOOLEAN' + t[0] = BooleanType () + +# 17.2 +def p_BooleanValue (t): + '''BooleanValue : TRUE + | FALSE''' + t[0] = t[1] + + +# 18 Notation for the integer type -------------------------------------------- + +# 18.1 +def p_IntegerType_1 (t): + 'IntegerType : INTEGER' + t[0] = IntegerType (named_list = None) + +def p_IntegerType_2 (t): + 'IntegerType : INTEGER LBRACE NamedNumberList RBRACE' + t[0] = IntegerType(named_list = t[3]) + +def p_NamedNumberList_1 (t): + 'NamedNumberList : NamedNumber' + t[0] = [t[1]] + +def p_NamedNumberList_2 (t): + 'NamedNumberList : NamedNumberList COMMA NamedNumber' + t[0] = t[1] + [t[3]] + +def p_NamedNumber (t): + '''NamedNumber : identifier LPAREN SignedNumber RPAREN + | identifier LPAREN DefinedValue RPAREN''' + t[0] = NamedNumber(ident = t[1], val = t[3]) + +def p_SignedNumber_1 (t): + 'SignedNumber : NUMBER' + t[0] = t [1] + +def p_SignedNumber_2 (t): + 'SignedNumber : MINUS NUMBER' + t[0] = '-' + t[2] + +# 18.9 +def p_IntegerValue (t): + 'IntegerValue : SignedNumber' + t[0] = t [1] + +# 19 Notation for the enumerated type ----------------------------------------- + +# 19.1 +def p_EnumeratedType (t): + 'EnumeratedType : ENUMERATED LBRACE Enumerations RBRACE' + t[0] = EnumeratedType (val = t[3]['val'], ext = t[3]['ext']) + +def p_Enumerations_1 (t): + 'Enumerations : Enumeration' + t[0] = { 'val' : t[1], 'ext' : None } + +def p_Enumerations_2 (t): + 'Enumerations : Enumeration COMMA ELLIPSIS ExceptionSpec' + t[0] = { 'val' : t[1], 'ext' : [] } + +def p_Enumerations_3 (t): + 'Enumerations : Enumeration COMMA ELLIPSIS ExceptionSpec COMMA Enumeration' + t[0] = { 'val' : t[1], 'ext' : t[6] } + +def p_Enumeration_1 (t): + 'Enumeration : EnumerationItem' + t[0] = [t[1]] + +def p_Enumeration_2 (t): + 'Enumeration : Enumeration COMMA EnumerationItem' + t[0] = t[1] + [t[3]] + +def p_EnumerationItem (t): + '''EnumerationItem : Identifier + | NamedNumber''' + t[0] = t[1] + +def p_Identifier (t): + 'Identifier : identifier' + t[0] = Node ('Identifier', ident = t[1]) + + +# 20 Notation for the real type ----------------------------------------------- + +# 20.1 +def p_RealType (t): + 'RealType : REAL' + t[0] = RealType () + +# 20.6 +def p_RealValue (t): + '''RealValue : REAL_NUMBER + | SpecialRealValue''' + t[0] = t [1] + +def p_SpecialRealValue (t): + '''SpecialRealValue : PLUS_INFINITY + | MINUS_INFINITY''' + t[0] = t[1] + + +# 21 Notation for the bitstring type ------------------------------------------ + +# 21.1 +def p_BitStringType_1 (t): + 'BitStringType : BIT STRING' + t[0] = BitStringType (named_list = None) + +def p_BitStringType_2 (t): + 'BitStringType : BIT STRING LBRACE NamedBitList RBRACE' + t[0] = BitStringType (named_list = t[4]) + +def p_NamedBitList_1 (t): + 'NamedBitList : NamedBit' + t[0] = [t[1]] + +def p_NamedBitList_2 (t): + 'NamedBitList : NamedBitList COMMA NamedBit' + t[0] = t[1] + [t[3]] + +def p_NamedBit (t): + '''NamedBit : identifier LPAREN NUMBER RPAREN + | identifier LPAREN DefinedValue RPAREN''' + t[0] = NamedNumber (ident = t[1], val = t[3]) + + +# 22 Notation for the octetstring type ---------------------------------------- + +# 22.1 +def p_OctetStringType (t): + 'OctetStringType : OCTET STRING' + t[0] = OctetStringType () + + +# 23 Notation for the null type ----------------------------------------------- + +# 23.1 +def p_NullType (t): + 'NullType : NULL' + t[0] = NullType () + +# 23.3 +def p_NullValue (t): + 'NullValue : NULL' + t[0] = NullValue () + + +# 24 Notation for sequence types ---------------------------------------------- + +# 24.1 +def p_SequenceType_1 (t): + 'SequenceType : SEQUENCE LBRACE RBRACE' + t[0] = SequenceType (elt_list = []) + +def p_SequenceType_2 (t): + 'SequenceType : SEQUENCE LBRACE ComponentTypeLists RBRACE' + t[0] = SequenceType (elt_list = t[3]['elt_list']) + if 'ext_list' in t[3]: + t[0].ext_list = t[3]['ext_list'] + if 'elt_list2' in t[3]: + t[0].elt_list2 = t[3]['elt_list2'] + +def p_ExtensionAndException_1 (t): + 'ExtensionAndException : ELLIPSIS' + t[0] = [] + +def p_OptionalExtensionMarker_1 (t): + 'OptionalExtensionMarker : COMMA ELLIPSIS' + t[0] = True + +def p_OptionalExtensionMarker_2 (t): + 'OptionalExtensionMarker : ' + t[0] = False + +def p_ComponentTypeLists_1 (t): + 'ComponentTypeLists : ComponentTypeList' + t[0] = {'elt_list' : t[1]} + +def p_ComponentTypeLists_2 (t): + 'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException OptionalExtensionMarker' + t[0] = {'elt_list' : t[1], 'ext_list' : []} + +def p_ComponentTypeLists_3 (t): + 'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionAdditionList OptionalExtensionMarker' + t[0] = {'elt_list' : t[1], 'ext_list' : t[4]} + +def p_ComponentTypeLists_4 (t): + 'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionEndMarker COMMA ComponentTypeList' + t[0] = {'elt_list' : t[1], 'ext_list' : [], 'elt_list2' : t[6]} + +def p_ComponentTypeLists_5 (t): + 'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionAdditionList ExtensionEndMarker COMMA ComponentTypeList' + t[0] = {'elt_list' : t[1], 'ext_list' : t[4], 'elt_list2' : t[7]} + +def p_ComponentTypeLists_6 (t): + 'ComponentTypeLists : ExtensionAndException OptionalExtensionMarker' + t[0] = {'elt_list' : [], 'ext_list' : []} + +def p_ComponentTypeLists_7 (t): + 'ComponentTypeLists : ExtensionAndException ExtensionAdditionList OptionalExtensionMarker' + t[0] = {'elt_list' : [], 'ext_list' : t[2]} + +def p_ExtensionEndMarker (t): + 'ExtensionEndMarker : COMMA ELLIPSIS' + pass + +def p_ExtensionAdditionList_1 (t): + 'ExtensionAdditionList : COMMA ExtensionAddition' + t[0] = [t[2]] + +def p_ExtensionAdditionList_2 (t): + 'ExtensionAdditionList : ExtensionAdditionList COMMA ExtensionAddition' + t[0] = t[1] + [t[3]] + +def p_ExtensionAddition_1 (t): + 'ExtensionAddition : ExtensionAdditionGroup' + t[0] = Node ('elt_type', val = t[1], optional = 0) + +def p_ExtensionAddition_2 (t): + 'ExtensionAddition : ComponentType' + t[0] = t[1] + +def p_ExtensionAdditionGroup (t): + 'ExtensionAdditionGroup : LVERBRACK VersionNumber ComponentTypeList RVERBRACK' + t[0] = ExtensionAdditionGroup (ver = t[2], elt_list = t[3]) + +def p_VersionNumber_1 (t): + 'VersionNumber : ' + +def p_VersionNumber_2 (t): + 'VersionNumber : NUMBER COLON' + t[0] = t[1] + +def p_ComponentTypeList_1 (t): + 'ComponentTypeList : ComponentType' + t[0] = [t[1]] + +def p_ComponentTypeList_2 (t): + 'ComponentTypeList : ComponentTypeList COMMA ComponentType' + t[0] = t[1] + [t[3]] + +def p_ComponentType_1 (t): + 'ComponentType : NamedType' + t[0] = Node ('elt_type', val = t[1], optional = 0) + +def p_ComponentType_2 (t): + 'ComponentType : NamedType OPTIONAL' + t[0] = Node ('elt_type', val = t[1], optional = 1) + +def p_ComponentType_3 (t): + 'ComponentType : NamedType DEFAULT DefaultValue' + t[0] = Node ('elt_type', val = t[1], optional = 1, default = t[3]) + +def p_ComponentType_4 (t): + 'ComponentType : COMPONENTS OF Type' + t[0] = Node ('components_of', typ = t[3]) + +def p_DefaultValue_1 (t): + '''DefaultValue : ReferencedValue + | BooleanValue + | ChoiceValue + | IntegerValue + | RealValue + | hex_string + | binary_string + | char_string + | ObjectClassFieldValue''' + t[0] = t[1] + +def p_DefaultValue_2 (t): + 'DefaultValue : lbraceignore rbraceignore' + t[0] = '' + +# 24.17 +def p_SequenceValue_1 (t): + 'SequenceValue : LBRACE RBRACE' + t[0] = [] + + +#def p_SequenceValue_2 (t): +# 'SequenceValue : LBRACE ComponentValueList RBRACE' +# t[0] = t[2] + +#def p_ComponentValueList_1 (t): +# 'ComponentValueList : NamedValue' +# t[0] = [t[1]] + +#def p_ComponentValueList_2 (t): +# 'ComponentValueList : ComponentValueList COMMA NamedValue' +# t[0] = t[1] + [t[3]] + + +# 25 Notation for sequence-of types ------------------------------------------- + +# 25.1 +def p_SequenceOfType (t): + '''SequenceOfType : SEQUENCE OF Type + | SEQUENCE OF NamedType''' + t[0] = SequenceOfType (val = t[3], size_constr = None) + + +# 26 Notation for set types --------------------------------------------------- + +# 26.1 +def p_SetType_1 (t): + 'SetType : SET LBRACE RBRACE' + t[0] = SetType (elt_list = []) + +def p_SetType_2 (t): + 'SetType : SET LBRACE ComponentTypeLists RBRACE' + t[0] = SetType (elt_list = t[3]['elt_list']) + if 'ext_list' in t[3]: + t[0].ext_list = t[3]['ext_list'] + if 'elt_list2' in t[3]: + t[0].elt_list2 = t[3]['elt_list2'] + + +# 27 Notation for set-of types ------------------------------------------------ + +# 27.1 +def p_SetOfType (t): + '''SetOfType : SET OF Type + | SET OF NamedType''' + t[0] = SetOfType (val = t[3]) + +# 28 Notation for choice types ------------------------------------------------ + +# 28.1 +def p_ChoiceType (t): + 'ChoiceType : CHOICE LBRACE AlternativeTypeLists RBRACE' + if 'ext_list' in t[3]: + t[0] = ChoiceType (elt_list = t[3]['elt_list'], ext_list = t[3]['ext_list']) + else: + t[0] = ChoiceType (elt_list = t[3]['elt_list']) + +def p_AlternativeTypeLists_1 (t): + 'AlternativeTypeLists : AlternativeTypeList' + t[0] = {'elt_list' : t[1]} + +def p_AlternativeTypeLists_2 (t): + 'AlternativeTypeLists : AlternativeTypeList COMMA ExtensionAndException ExtensionAdditionAlternatives OptionalExtensionMarker' + t[0] = {'elt_list' : t[1], 'ext_list' : t[4]} + +def p_ExtensionAdditionAlternatives_1 (t): + 'ExtensionAdditionAlternatives : ExtensionAdditionAlternativesList' + t[0] = t[1] + +def p_ExtensionAdditionAlternatives_2 (t): + 'ExtensionAdditionAlternatives : ' + t[0] = [] + +def p_ExtensionAdditionAlternativesList_1 (t): + 'ExtensionAdditionAlternativesList : COMMA ExtensionAdditionAlternative' + t[0] = t[2] + +def p_ExtensionAdditionAlternativesList_2 (t): + 'ExtensionAdditionAlternativesList : ExtensionAdditionAlternativesList COMMA ExtensionAdditionAlternative' + t[0] = t[1] + t[3] + +def p_ExtensionAdditionAlternative_1 (t): + 'ExtensionAdditionAlternative : NamedType' + t[0] = [t[1]] + +def p_ExtensionAdditionAlternative_2 (t): + 'ExtensionAdditionAlternative : ExtensionAdditionAlternativesGroup' + t[0] = t[1] + +def p_ExtensionAdditionAlternativesGroup (t): + 'ExtensionAdditionAlternativesGroup : LVERBRACK VersionNumber AlternativeTypeList RVERBRACK' + t[0] = t[3] + +def p_AlternativeTypeList_1 (t): + 'AlternativeTypeList : NamedType' + t[0] = [t[1]] + +def p_AlternativeTypeList_2 (t): + 'AlternativeTypeList : AlternativeTypeList COMMA NamedType' + t[0] = t[1] + [t[3]] + +# 28.10 +def p_ChoiceValue_1 (t): + '''ChoiceValue : identifier COLON Value + | identifier COLON NullValue ''' + val = t[3] + if not isinstance(val, Value): + val = Value(val=val) + t[0] = ChoiceValue (choice = t[1], val = val) + +# 29 Notation for selection types + +# 29.1 +def p_SelectionType (t): # + 'SelectionType : identifier LT Type' + t[0] = SelectionType (typ = t[3], sel = t[1]) + +# 30 Notation for tagged types ------------------------------------------------ + +# 30.1 +def p_TaggedType_1 (t): + 'TaggedType : Tag Type' + t[1].mode = 'default' + t[0] = t[2] + t[0].AddTag(t[1]) + +def p_TaggedType_2 (t): + '''TaggedType : Tag IMPLICIT Type + | Tag EXPLICIT Type''' + t[1].mode = t[2] + t[0] = t[3] + t[0].AddTag(t[1]) + +def p_Tag (t): + 'Tag : LBRACK Class ClassNumber RBRACK' + t[0] = Tag(cls = t[2], num = t[3]) + +def p_ClassNumber_1 (t): + 'ClassNumber : number' + t[0] = t[1] + +def p_ClassNumber_2 (t): + 'ClassNumber : DefinedValue' + t[0] = t[1] + +def p_Class_1 (t): + '''Class : UNIVERSAL + | APPLICATION + | PRIVATE''' + t[0] = t[1] + +def p_Class_2 (t): + 'Class :' + t[0] = 'CONTEXT' + + +# 31 Notation for the object identifier type ---------------------------------- + +# 31.1 +def p_ObjectIdentifierType (t): + 'ObjectIdentifierType : OBJECT IDENTIFIER' + t[0] = ObjectIdentifierType() + +# 31.3 +def p_ObjectIdentifierValue (t): + 'ObjectIdentifierValue : LBRACE oid_comp_list RBRACE' + t[0] = ObjectIdentifierValue (comp_list=t[2]) + +def p_oid_comp_list_1 (t): + 'oid_comp_list : oid_comp_list ObjIdComponents' + t[0] = t[1] + [t[2]] + +def p_oid_comp_list_2 (t): + 'oid_comp_list : ObjIdComponents' + t[0] = [t[1]] + +def p_ObjIdComponents (t): + '''ObjIdComponents : NameForm + | NumberForm + | NameAndNumberForm''' + t[0] = t[1] + +def p_NameForm (t): + '''NameForm : LCASE_IDENT + | LCASE_IDENT_ASSIGNED''' + t [0] = t[1] + +def p_NumberForm (t): + '''NumberForm : NUMBER''' +# | DefinedValue''' + t [0] = t[1] + +def p_NameAndNumberForm (t): + '''NameAndNumberForm : LCASE_IDENT_ASSIGNED LPAREN NumberForm RPAREN + | LCASE_IDENT LPAREN NumberForm RPAREN''' + t[0] = Node('name_and_number', ident = t[1], number = t[3]) + +# 32 Notation for the relative object identifier type ------------------------- + +# 32.1 +def p_RelativeOIDType (t): + 'RelativeOIDType : RELATIVE_OID' + t[0] = RelativeOIDType() + +# 33 Notation for the embedded-pdv type --------------------------------------- + +# 33.1 +def p_EmbeddedPDVType (t): + 'EmbeddedPDVType : EMBEDDED PDV' + t[0] = EmbeddedPDVType() + +# 34 Notation for the external type ------------------------------------------- + +# 34.1 +def p_ExternalType (t): + 'ExternalType : EXTERNAL' + t[0] = ExternalType() + +# 36 Notation for character string types -------------------------------------- + +# 36.1 +def p_CharacterStringType (t): + '''CharacterStringType : RestrictedCharacterStringType + | UnrestrictedCharacterStringType''' + t[0] = t[1] + + +# 37 Definition of restricted character string types -------------------------- + +def p_RestrictedCharacterStringType_1 (t): + 'RestrictedCharacterStringType : BMPString' + t[0] = BMPStringType () +def p_RestrictedCharacterStringType_2 (t): + 'RestrictedCharacterStringType : GeneralString' + t[0] = GeneralStringType () +def p_RestrictedCharacterStringType_3 (t): + 'RestrictedCharacterStringType : GraphicString' + t[0] = GraphicStringType () +def p_RestrictedCharacterStringType_4 (t): + 'RestrictedCharacterStringType : IA5String' + t[0] = IA5StringType () +def p_RestrictedCharacterStringType_5 (t): + 'RestrictedCharacterStringType : ISO646String' + t[0] = ISO646StringType () +def p_RestrictedCharacterStringType_6 (t): + 'RestrictedCharacterStringType : NumericString' + t[0] = NumericStringType () +def p_RestrictedCharacterStringType_7 (t): + 'RestrictedCharacterStringType : PrintableString' + t[0] = PrintableStringType () +def p_RestrictedCharacterStringType_8 (t): + 'RestrictedCharacterStringType : TeletexString' + t[0] = TeletexStringType () +def p_RestrictedCharacterStringType_9 (t): + 'RestrictedCharacterStringType : T61String' + t[0] = T61StringType () +def p_RestrictedCharacterStringType_10 (t): + 'RestrictedCharacterStringType : UniversalString' + t[0] = UniversalStringType () +def p_RestrictedCharacterStringType_11 (t): + 'RestrictedCharacterStringType : UTF8String' + t[0] = UTF8StringType () +def p_RestrictedCharacterStringType_12 (t): + 'RestrictedCharacterStringType : VideotexString' + t[0] = VideotexStringType () +def p_RestrictedCharacterStringType_13 (t): + 'RestrictedCharacterStringType : VisibleString' + t[0] = VisibleStringType () + + +# 40 Definition of unrestricted character string types ------------------------ + +# 40.1 +def p_UnrestrictedCharacterStringType (t): + 'UnrestrictedCharacterStringType : CHARACTER STRING' + t[0] = UnrestrictedCharacterStringType () + + +# 41 Notation for types defined in clauses 42 to 44 --------------------------- + +# 42 Generalized time --------------------------------------------------------- + +def p_UsefulType_1 (t): + 'UsefulType : GeneralizedTime' + t[0] = GeneralizedTime() + +# 43 Universal time ----------------------------------------------------------- + +def p_UsefulType_2 (t): + 'UsefulType : UTCTime' + t[0] = UTCTime() + +# 44 The object descriptor type ----------------------------------------------- + +def p_UsefulType_3 (t): + 'UsefulType : ObjectDescriptor' + t[0] = ObjectDescriptor() + + +# 45 Constrained types -------------------------------------------------------- + +# 45.1 +def p_ConstrainedType_1 (t): + 'ConstrainedType : Type Constraint' + t[0] = t[1] + t[0].AddConstraint(t[2]) + +def p_ConstrainedType_2 (t): + 'ConstrainedType : TypeWithConstraint' + t[0] = t[1] + +# 45.5 +def p_TypeWithConstraint_1 (t): + '''TypeWithConstraint : SET Constraint OF Type + | SET SizeConstraint OF Type''' + t[0] = SetOfType (val = t[4], constr = t[2]) + +def p_TypeWithConstraint_2 (t): + '''TypeWithConstraint : SEQUENCE Constraint OF Type + | SEQUENCE SizeConstraint OF Type''' + t[0] = SequenceOfType (val = t[4], constr = t[2]) + +def p_TypeWithConstraint_3 (t): + '''TypeWithConstraint : SET Constraint OF NamedType + | SET SizeConstraint OF NamedType''' + t[0] = SetOfType (val = t[4], constr = t[2]) + +def p_TypeWithConstraint_4 (t): + '''TypeWithConstraint : SEQUENCE Constraint OF NamedType + | SEQUENCE SizeConstraint OF NamedType''' + t[0] = SequenceOfType (val = t[4], constr = t[2]) + +# 45.6 +# 45.7 +def p_Constraint (t): + 'Constraint : LPAREN ConstraintSpec ExceptionSpec RPAREN' + t[0] = t[2] + +def p_ConstraintSpec (t): + '''ConstraintSpec : ElementSetSpecs + | GeneralConstraint''' + t[0] = t[1] + +# 46 Element set specification ------------------------------------------------ + +# 46.1 +def p_ElementSetSpecs_1 (t): + 'ElementSetSpecs : RootElementSetSpec' + t[0] = t[1] + +def p_ElementSetSpecs_2 (t): + 'ElementSetSpecs : RootElementSetSpec COMMA ELLIPSIS' + t[0] = t[1] + t[0].ext = True + +def p_ElementSetSpecs_3 (t): + 'ElementSetSpecs : RootElementSetSpec COMMA ELLIPSIS COMMA AdditionalElementSetSpec' + t[0] = t[1] + t[0].ext = True + +def p_RootElementSetSpec (t): + 'RootElementSetSpec : ElementSetSpec' + t[0] = t[1] + +def p_AdditionalElementSetSpec (t): + 'AdditionalElementSetSpec : ElementSetSpec' + t[0] = t[1] + +def p_ElementSetSpec (t): + 'ElementSetSpec : Unions' + t[0] = t[1] + +def p_Unions_1 (t): + 'Unions : Intersections' + t[0] = t[1] + +def p_Unions_2 (t): + 'Unions : UElems UnionMark Intersections' + t[0] = Constraint(type = 'Union', subtype = [t[1], t[3]]) + +def p_UElems (t): + 'UElems : Unions' + t[0] = t[1] + +def p_Intersections_1 (t): + 'Intersections : IntersectionElements' + t[0] = t[1] + +def p_Intersections_2 (t): + 'Intersections : IElems IntersectionMark IntersectionElements' + t[0] = Constraint(type = 'Intersection', subtype = [t[1], t[3]]) + +def p_IElems (t): + 'IElems : Intersections' + t[0] = t[1] + +def p_IntersectionElements (t): + 'IntersectionElements : Elements' + t[0] = t[1] + +def p_UnionMark (t): + '''UnionMark : BAR + | UNION''' + +def p_IntersectionMark (t): + '''IntersectionMark : CIRCUMFLEX + | INTERSECTION''' + +# 46.5 +def p_Elements_1 (t): + 'Elements : SubtypeElements' + t[0] = t[1] + +def p_Elements_2 (t): + 'Elements : LPAREN ElementSetSpec RPAREN' + t[0] = t[2] + +# 47 Subtype elements --------------------------------------------------------- + +# 47.1 General +def p_SubtypeElements (t): + '''SubtypeElements : SingleValue + | ContainedSubtype + | ValueRange + | PermittedAlphabet + | SizeConstraint + | TypeConstraint + | InnerTypeConstraints + | PatternConstraint''' + t[0] = t[1] + +# 47.2 Single value +# 47.2.1 +def p_SingleValue (t): + 'SingleValue : Value' + t[0] = Constraint(type = 'SingleValue', subtype = t[1]) + +# 47.3 Contained subtype +# 47.3.1 +def p_ContainedSubtype (t): + 'ContainedSubtype : Includes Type' + t[0] = Constraint(type = 'ContainedSubtype', subtype = t[2]) + +def p_Includes (t): + '''Includes : INCLUDES + | ''' + +# 47.4 Value range +# 47.4.1 +def p_ValueRange (t): + 'ValueRange : LowerEndpoint RANGE UpperEndpoint' + t[0] = Constraint(type = 'ValueRange', subtype = [t[1], t[3]]) + +# 47.4.3 +def p_LowerEndpoint_1 (t): + 'LowerEndpoint : LowerEndValue' + t[0] = t[1] + +def p_LowerEndpoint_2 (t): + 'LowerEndpoint : LowerEndValue LT' + t[0] = t[1] # but not inclusive range + +def p_UpperEndpoint_1 (t): + 'UpperEndpoint : UpperEndValue' + t[0] = t[1] + +def p_UpperEndpoint_2 (t): + 'UpperEndpoint : LT UpperEndValue' + t[0] = t[1] # but not inclusive range + +# 47.4.4 +def p_LowerEndValue (t): + '''LowerEndValue : Value + | MIN''' + t[0] = t[1] # XXX + +def p_UpperEndValue (t): + '''UpperEndValue : Value + | MAX''' + t[0] = t[1] + +# 47.5 Size constraint +# 47.5.1 +def p_SizeConstraint (t): + 'SizeConstraint : SIZE Constraint' + t[0] = Constraint (type = 'Size', subtype = t[2]) + +# 47.6 Type constraint +# 47.6.1 +def p_TypeConstraint (t): + 'TypeConstraint : Type' + t[0] = Constraint (type = 'Type', subtype = t[1]) + +# 47.7 Permitted alphabet +# 47.7.1 +def p_PermittedAlphabet (t): + 'PermittedAlphabet : FROM Constraint' + t[0] = Constraint (type = 'From', subtype = t[2]) + +# 47.8 Inner subtyping +# 47.8.1 +def p_InnerTypeConstraints (t): + '''InnerTypeConstraints : WITH COMPONENT SingleTypeConstraint + | WITH COMPONENTS MultipleTypeConstraints''' + pass # ignore PER invisible constraint + +# 47.8.3 +def p_SingleTypeConstraint (t): + 'SingleTypeConstraint : Constraint' + t[0] = t[1] + +# 47.8.4 +def p_MultipleTypeConstraints (t): + '''MultipleTypeConstraints : FullSpecification + | PartialSpecification''' + t[0] = t[1] + +def p_FullSpecification (t): + 'FullSpecification : LBRACE TypeConstraints RBRACE' + t[0] = t[2] + +def p_PartialSpecification (t): + 'PartialSpecification : LBRACE ELLIPSIS COMMA TypeConstraints RBRACE' + t[0] = t[4] + +def p_TypeConstraints_1 (t): + 'TypeConstraints : named_constraint' + t [0] = [t[1]] + +def p_TypeConstraints_2 (t): + 'TypeConstraints : TypeConstraints COMMA named_constraint' + t[0] = t[1] + [t[3]] + +def p_named_constraint_1 (t): + 'named_constraint : identifier constraint' + return Node ('named_constraint', ident = t[1], constr = t[2]) + +def p_named_constraint_2 (t): + 'named_constraint : constraint' + return Node ('named_constraint', constr = t[1]) + +def p_constraint (t): + 'constraint : value_constraint presence_constraint' + t[0] = Node ('constraint', value = t[1], presence = t[2]) + +def p_value_constraint_1 (t): + 'value_constraint : Constraint' + t[0] = t[1] + +def p_value_constraint_2 (t): + 'value_constraint : ' + pass + +def p_presence_constraint_1 (t): + '''presence_constraint : PRESENT + | ABSENT + | OPTIONAL''' + t[0] = t[1] + +def p_presence_constraint_2 (t): + '''presence_constraint : ''' + pass + +# 47.9 Pattern constraint +# 47.9.1 +def p_PatternConstraint (t): + 'PatternConstraint : PATTERN Value' + t[0] = Constraint (type = 'Pattern', subtype = t[2]) + +# 49 The exception identifier + +# 49.4 +def p_ExceptionSpec_1 (t): + 'ExceptionSpec : EXCLAMATION ExceptionIdentification' + pass + +def p_ExceptionSpec_2 (t): + 'ExceptionSpec : ' + pass + +def p_ExceptionIdentification (t): + '''ExceptionIdentification : SignedNumber + | DefinedValue + | Type COLON Value ''' + pass + +# /*-----------------------------------------------------------------------*/ +# /* Value Notation Productions */ +# /*-----------------------------------------------------------------------*/ + + + +def p_binary_string (t): + 'binary_string : BSTRING' + t[0] = BStringValue(val = t[1]) + +def p_hex_string (t): + 'hex_string : HSTRING' + t[0] = HStringValue(val = t[1]) + +def p_char_string (t): + 'char_string : QSTRING' + t[0] = t[1] + +def p_number (t): + 'number : NUMBER' + t[0] = t[1] + + +#--- ITU-T Recommendation X.208 ----------------------------------------------- + +# 27 Notation for the any type ------------------------------------------------ + +# 27.1 +def p_AnyType (t): + '''AnyType : ANY + | ANY DEFINED BY identifier''' + t[0] = AnyType() + +#--- ITU-T Recommendation X.681 ----------------------------------------------- + +# 7 ASN.1 lexical items ------------------------------------------------------- + +# 7.1 Information object class references + +def p_objectclassreference (t): + 'objectclassreference : CLASS_IDENT' + t[0] = Class_Ref(val=t[1]) + +# 7.2 Information object references + +def p_objectreference (t): + 'objectreference : LCASE_IDENT' + t[0] = t[1] + +# 7.3 Information object set references + +#def p_objectsetreference (t): +# 'objectsetreference : UCASE_IDENT' +# t[0] = t[1] + +# 7.4 Type field references +# ucasefieldreference +# 7.5 Value field references +# lcasefieldreference +# 7.6 Value set field references +# ucasefieldreference +# 7.7 Object field references +# lcasefieldreference +# 7.8 Object set field references +# ucasefieldreference + +def p_ucasefieldreference (t): + 'ucasefieldreference : AMPERSAND UCASE_IDENT' + t[0] = '&' + t[2] + +def p_lcasefieldreference (t): + 'lcasefieldreference : AMPERSAND LCASE_IDENT' + t[0] = '&' + t[2] + +# 8 Referencing definitions + +# 8.1 +def p_DefinedObjectClass (t): + '''DefinedObjectClass : objectclassreference + | UsefulObjectClassReference''' + t[0] = t[1] + global obj_class + obj_class = t[0].val + +def p_DefinedObject (t): + '''DefinedObject : objectreference''' + t[0] = t[1] + +# 8.4 +def p_UsefulObjectClassReference (t): + '''UsefulObjectClassReference : TYPE_IDENTIFIER + | ABSTRACT_SYNTAX''' + t[0] = Class_Ref(val=t[1]) + +# 9 Information object class definition and assignment + +# 9.1 +def p_ObjectClassAssignment (t): + '''ObjectClassAssignment : CLASS_IDENT ASSIGNMENT ObjectClass + | UCASE_IDENT ASSIGNMENT ObjectClass''' + t[0] = t[3] + t[0].SetName(t[1]) + if isinstance(t[0], ObjectClassDefn): + t[0].reg_types() + +# 9.2 +def p_ObjectClass (t): + '''ObjectClass : DefinedObjectClass + | ObjectClassDefn + | ParameterizedObjectClass ''' + t[0] = t[1] + +# 9.3 +def p_ObjectClassDefn (t): + '''ObjectClassDefn : CLASS LBRACE FieldSpecs RBRACE + | CLASS LBRACE FieldSpecs RBRACE WithSyntaxSpec''' + t[0] = ObjectClassDefn(fields = t[3]) + +def p_FieldSpecs_1 (t): + 'FieldSpecs : FieldSpec' + t[0] = [t[1]] + +def p_FieldSpecs_2 (t): + 'FieldSpecs : FieldSpecs COMMA FieldSpec' + t[0] = t[1] + [t[3]] + +def p_WithSyntaxSpec (t): + 'WithSyntaxSpec : WITH SYNTAX lbraceignore rbraceignore' + t[0] = None + +# 9.4 +def p_FieldSpec (t): + '''FieldSpec : TypeFieldSpec + | FixedTypeValueFieldSpec + | VariableTypeValueFieldSpec + | FixedTypeValueSetFieldSpec + | ObjectFieldSpec + | ObjectSetFieldSpec ''' + t[0] = t[1] + +# 9.5 +def p_TypeFieldSpec (t): + '''TypeFieldSpec : ucasefieldreference + | ucasefieldreference TypeOptionalitySpec ''' + t[0] = TypeFieldSpec() + t[0].SetName(t[1]) + +def p_TypeOptionalitySpec_1 (t): + 'TypeOptionalitySpec ::= OPTIONAL' + pass + +def p_TypeOptionalitySpec_2 (t): + 'TypeOptionalitySpec ::= DEFAULT Type' + pass + +# 9.6 +def p_FixedTypeValueFieldSpec (t): + '''FixedTypeValueFieldSpec : lcasefieldreference Type + | lcasefieldreference Type UNIQUE + | lcasefieldreference Type ValueOptionalitySpec + | lcasefieldreference Type UNIQUE ValueOptionalitySpec ''' + t[0] = FixedTypeValueFieldSpec(typ = t[2]) + t[0].SetName(t[1]) + +def p_ValueOptionalitySpec_1 (t): + 'ValueOptionalitySpec ::= OPTIONAL' + pass + +def p_ValueOptionalitySpec_2 (t): + 'ValueOptionalitySpec ::= DEFAULT Value' + pass + +# 9.8 + +def p_VariableTypeValueFieldSpec (t): + '''VariableTypeValueFieldSpec : lcasefieldreference FieldName + | lcasefieldreference FieldName ValueOptionalitySpec ''' + t[0] = VariableTypeValueFieldSpec() + t[0].SetName(t[1]) + +# 9.9 +def p_FixedTypeValueSetFieldSpec (t): + '''FixedTypeValueSetFieldSpec : ucasefieldreference Type + | ucasefieldreference Type ValueSetOptionalitySpec ''' + t[0] = FixedTypeValueSetFieldSpec() + t[0].SetName(t[1]) + +def p_ValueSetOptionalitySpec_1 (t): + 'ValueSetOptionalitySpec ::= OPTIONAL' + pass + +def p_ValueSetOptionalitySpec_2 (t): + 'ValueSetOptionalitySpec ::= DEFAULT ValueSet' + pass + +# 9.11 +def p_ObjectFieldSpec (t): + '''ObjectFieldSpec : lcasefieldreference DefinedObjectClass + | lcasefieldreference DefinedObjectClass ObjectOptionalitySpec ''' + t[0] = ObjectFieldSpec(cls=t[2]) + t[0].SetName(t[1]) + global obj_class + obj_class = None + +def p_ObjectOptionalitySpec_1 (t): + 'ObjectOptionalitySpec ::= OPTIONAL' + pass + +def p_ObjectOptionalitySpec_2 (t): + 'ObjectOptionalitySpec ::= DEFAULT Object' + pass + +# 9.12 +def p_ObjectSetFieldSpec (t): + '''ObjectSetFieldSpec : ucasefieldreference DefinedObjectClass + | ucasefieldreference DefinedObjectClass ObjectSetOptionalitySpec ''' + t[0] = ObjectSetFieldSpec(cls=t[2]) + t[0].SetName(t[1]) + +def p_ObjectSetOptionalitySpec_1 (t): + 'ObjectSetOptionalitySpec ::= OPTIONAL' + pass + +def p_ObjectSetOptionalitySpec_2 (t): + 'ObjectSetOptionalitySpec ::= DEFAULT ObjectSet' + pass + +# 9.13 +def p_PrimitiveFieldName (t): + '''PrimitiveFieldName : ucasefieldreference + | lcasefieldreference ''' + t[0] = t[1] + +# 9.13 +def p_FieldName_1 (t): + 'FieldName : PrimitiveFieldName' + t[0] = t[1] + +def p_FieldName_2 (t): + 'FieldName : FieldName DOT PrimitiveFieldName' + t[0] = t[1] + '.' + t[3] + +# 11 Information object definition and assignment + +# 11.1 +def p_ObjectAssignment (t): + 'ObjectAssignment : objectreference DefinedObjectClass ASSIGNMENT Object' + t[0] = ObjectAssignment (ident = t[1], cls=t[2].val, val=t[4]) + global obj_class + obj_class = None + +# 11.3 +def p_Object (t): + '''Object : DefinedObject + | ObjectDefn + | ParameterizedObject''' + t[0] = t[1] + +# 11.4 +def p_ObjectDefn (t): + 'ObjectDefn : lbraceobject bodyobject rbraceobject' + t[0] = t[2] + +# {...} block of object definition +def p_lbraceobject(t): + 'lbraceobject : braceobjectbegin LBRACE' + t[0] = t[1] + +def p_braceobjectbegin(t): + 'braceobjectbegin : ' + global lexer + global obj_class + if set_class_syntax(obj_class): + state = 'INITIAL' + else: + lexer.level = 1 + state = 'braceignore' + lexer.push_state(state) + +def p_rbraceobject(t): + 'rbraceobject : braceobjectend RBRACE' + t[0] = t[2] + +def p_braceobjectend(t): + 'braceobjectend : ' + global lexer + lexer.pop_state() + set_class_syntax(None) + +def p_bodyobject_1 (t): + 'bodyobject : ' + t[0] = { } + +def p_bodyobject_2 (t): + 'bodyobject : cls_syntax_list' + t[0] = t[1] + +def p_cls_syntax_list_1 (t): + 'cls_syntax_list : cls_syntax_list cls_syntax' + t[0] = t[1] + t[0].update(t[2]) + +def p_cls_syntax_list_2 (t): + 'cls_syntax_list : cls_syntax' + t[0] = t[1] + +# X.681 +def p_cls_syntax_1 (t): + 'cls_syntax : Type IDENTIFIED BY Value' + t[0] = { get_class_fieled(' ') : t[1], get_class_fieled(' '.join((t[2], t[3]))) : t[4] } + +def p_cls_syntax_2 (t): + 'cls_syntax : HAS PROPERTY Value' + t[0] = { get_class_fieled(' '.join(t[1:-1])) : t[-1:][0] } + +# X.880 +def p_cls_syntax_3 (t): + '''cls_syntax : ERRORS ObjectSet + | LINKED ObjectSet + | RETURN RESULT BooleanValue + | SYNCHRONOUS BooleanValue + | INVOKE PRIORITY Value + | RESULT_PRIORITY Value + | PRIORITY Value + | ALWAYS RESPONDS BooleanValue + | IDEMPOTENT BooleanValue ''' + t[0] = { get_class_fieled(' '.join(t[1:-1])) : t[-1:][0] } + +def p_cls_syntax_4 (t): + '''cls_syntax : ARGUMENT Type + | RESULT Type + | PARAMETER Type ''' + t[0] = { get_class_fieled(t[1]) : t[2] } + +def p_cls_syntax_5 (t): + 'cls_syntax : CODE Value' + fld = get_class_fieled(t[1]); + t[0] = { fld : t[2] } + if isinstance(t[2], ChoiceValue): + fldt = fld + '.' + t[2].choice + t[0][fldt] = t[2] + +def p_cls_syntax_6 (t): + '''cls_syntax : ARGUMENT Type OPTIONAL BooleanValue + | RESULT Type OPTIONAL BooleanValue + | PARAMETER Type OPTIONAL BooleanValue ''' + t[0] = { get_class_fieled(t[1]) : t[2], get_class_fieled(' '.join((t[1], t[3]))) : t[4] } + +# 12 Information object set definition and assignment + +# 12.1 +def p_ObjectSetAssignment (t): + 'ObjectSetAssignment : UCASE_IDENT CLASS_IDENT ASSIGNMENT ObjectSet' + t[0] = Node('ObjectSetAssignment', name=t[1], cls=t[2], val=t[4]) + +# 12.3 +def p_ObjectSet (t): + 'ObjectSet : lbraceignore rbraceignore' + t[0] = None + +# 14 Notation for the object class field type --------------------------------- + +# 14.1 +def p_ObjectClassFieldType (t): + 'ObjectClassFieldType : DefinedObjectClass DOT FieldName' + t[0] = get_type_from_class(t[1], t[3]) + +# 14.6 +def p_ObjectClassFieldValue (t): + '''ObjectClassFieldValue : OpenTypeFieldVal''' + t[0] = t[1] + +def p_OpenTypeFieldVal (t): + '''OpenTypeFieldVal : Type COLON Value + | NullType COLON NullValue''' + t[0] = t[3] + + +# 15 Information from objects ------------------------------------------------- + +# 15.1 + +def p_ValueFromObject (t): + 'ValueFromObject : LCASE_IDENT DOT FieldName' + t[0] = t[1] + '.' + t[3] + + +# Annex C - The instance-of type ---------------------------------------------- + +# C.2 +def p_InstanceOfType (t): + 'InstanceOfType : INSTANCE OF DefinedObjectClass' + t[0] = InstanceOfType() + + +# --- tables --- + +useful_object_class_types = { + # Annex A + 'TYPE-IDENTIFIER.&id' : lambda : ObjectIdentifierType(), + 'TYPE-IDENTIFIER.&Type' : lambda : OpenType(), + # Annex B + 'ABSTRACT-SYNTAX.&id' : lambda : ObjectIdentifierType(), + 'ABSTRACT-SYNTAX.&Type' : lambda : OpenType(), + 'ABSTRACT-SYNTAX.&property' : lambda : BitStringType(), +} + +object_class_types = { } + +object_class_typerefs = { } + +object_class_classrefs = { } + +# dummy types +class _VariableTypeValueFieldSpec (AnyType): + pass + +class _FixedTypeValueSetFieldSpec (AnyType): + pass + +class_types_creator = { + 'BooleanType' : lambda : BooleanType(), + 'IntegerType' : lambda : IntegerType(), + 'ObjectIdentifierType' : lambda : ObjectIdentifierType(), + 'OpenType' : lambda : OpenType(), + # dummy types + '_VariableTypeValueFieldSpec' : lambda : _VariableTypeValueFieldSpec(), + '_FixedTypeValueSetFieldSpec' : lambda : _FixedTypeValueSetFieldSpec(), +} + +class_names = { } + +x681_syntaxes = { + 'TYPE-IDENTIFIER' : { + ' ' : '&Type', + 'IDENTIFIED' : 'IDENTIFIED', + #'BY' : 'BY', + 'IDENTIFIED BY' : '&id', + }, + 'ABSTRACT-SYNTAX' : { + ' ' : '&Type', + 'IDENTIFIED' : 'IDENTIFIED', + #'BY' : 'BY', + 'IDENTIFIED BY' : '&id', + 'HAS' : 'HAS', + 'PROPERTY' : 'PROPERTY', + 'HAS PROPERTY' : '&property', + }, +} + +class_syntaxes_enabled = { + 'TYPE-IDENTIFIER' : True, + 'ABSTRACT-SYNTAX' : True, +} + +class_syntaxes = { + 'TYPE-IDENTIFIER' : x681_syntaxes['TYPE-IDENTIFIER'], + 'ABSTRACT-SYNTAX' : x681_syntaxes['ABSTRACT-SYNTAX'], +} + +class_current_syntax = None + +def get_syntax_tokens(syntaxes): + tokens = { } + for s in (syntaxes): + for k in (list(syntaxes[s].keys())): + if k.find(' ') < 0: + tokens[k] = k + tokens[k] = tokens[k].replace('-', '_') + return list(tokens.values()) + +tokens = tokens + get_syntax_tokens(x681_syntaxes) + +def set_class_syntax(syntax): + global class_syntaxes_enabled + global class_current_syntax + #print "set_class_syntax", syntax, class_current_syntax + if class_syntaxes_enabled.get(syntax, False): + class_current_syntax = syntax + return True + else: + class_current_syntax = None + return False + +def is_class_syntax(name): + global class_syntaxes + global class_current_syntax + #print "is_class_syntax", name, class_current_syntax + if not class_current_syntax: + return False + return name in class_syntaxes[class_current_syntax] + +def get_class_fieled(name): + if not class_current_syntax: + return None + return class_syntaxes[class_current_syntax][name] + +def is_class_ident(name): + return name in class_names + +def add_class_ident(name): + #print "add_class_ident", name + class_names[name] = name + +def get_type_from_class(cls, fld): + flds = fld.split('.') + if (isinstance(cls, Class_Ref)): + key = cls.val + '.' + flds[0] + else: + key = cls + '.' + flds[0] + + if key in object_class_classrefs: + return get_type_from_class(object_class_classrefs[key], '.'.join(flds[1:])) + + if key in object_class_typerefs: + return Type_Ref(val=object_class_typerefs[key]) + + creator = lambda : AnyType() + creator = useful_object_class_types.get(key, creator) + creator = object_class_types.get(key, creator) + return creator() + +def set_type_to_class(cls, fld, pars): + #print "set_type_to_class", cls, fld, pars + key = cls + '.' + fld + typename = 'OpenType' + if (len(pars) > 0): + typename = pars[0] + else: + pars.append(typename) + typeref = None + if (len(pars) > 1): + if (isinstance(pars[1], Class_Ref)): + pars[1] = pars[1].val + typeref = pars[1] + + msg = None + if key in object_class_types: + msg = object_class_types[key]().type + if key in object_class_typerefs: + msg = "TypeReference " + object_class_typerefs[key] + if key in object_class_classrefs: + msg = "ClassReference " + object_class_classrefs[key] + + if msg == ' '.join(pars): + msg = None + + if msg: + msg0 = "Can not define CLASS field %s as '%s'\n" % (key, ' '.join(pars)) + msg1 = "Already defined as '%s'" % (msg) + raise CompError(msg0 + msg1) + + if (typename == 'ClassReference'): + if not typeref: return False + object_class_classrefs[key] = typeref + return True + + if (typename == 'TypeReference'): + if not typeref: return False + object_class_typerefs[key] = typeref + return True + + creator = class_types_creator.get(typename) + if creator: + object_class_types[key] = creator + return True + else: + return False + +def import_class_from_module(mod, cls): + add_class_ident(cls) + mcls = "$%s$%s" % (mod, cls) + for k in list(object_class_classrefs.keys()): + kk = k.split('.', 1) + if kk[0] == mcls: + object_class_classrefs[cls + '.' + kk[0]] = object_class_classrefs[k] + for k in list(object_class_typerefs.keys()): + kk = k.split('.', 1) + if kk[0] == mcls: + object_class_typerefs[cls + '.' + kk[0]] = object_class_typerefs[k] + for k in list(object_class_types.keys()): + kk = k.split('.', 1) + if kk[0] == mcls: + object_class_types[cls + '.' + kk[0]] = object_class_types[k] + +#--- ITU-T Recommendation X.682 ----------------------------------------------- + +# 8 General constraint specification ------------------------------------------ + +# 8.1 +def p_GeneralConstraint (t): + '''GeneralConstraint : UserDefinedConstraint + | TableConstraint + | ContentsConstraint''' + t[0] = t[1] + +# 9 User-defined constraints -------------------------------------------------- + +# 9.1 +def p_UserDefinedConstraint (t): + 'UserDefinedConstraint : CONSTRAINED BY LBRACE UserDefinedConstraintParameterList RBRACE' + t[0] = Constraint(type = 'UserDefined', subtype = t[4]) + +def p_UserDefinedConstraintParameterList_1 (t): + 'UserDefinedConstraintParameterList : ' + t[0] = [] + +def p_UserDefinedConstraintParameterList_2 (t): + 'UserDefinedConstraintParameterList : UserDefinedConstraintParameter' + t[0] = [t[1]] + +def p_UserDefinedConstraintParameterList_3 (t): + 'UserDefinedConstraintParameterList : UserDefinedConstraintParameterList COMMA UserDefinedConstraintParameter' + t[0] = t[1] + [t[3]] + +# 9.3 +def p_UserDefinedConstraintParameter (t): + 'UserDefinedConstraintParameter : Type' + t[0] = t[1] + +# 10 Table constraints, including component relation constraints -------------- + +# 10.3 +def p_TableConstraint (t): + '''TableConstraint : SimpleTableConstraint + | ComponentRelationConstraint''' + t[0] = Constraint(type = 'Table', subtype = t[1]) + +def p_SimpleTableConstraint (t): + 'SimpleTableConstraint : LBRACE UCASE_IDENT RBRACE' + t[0] = t[2] + +# 10.7 +def p_ComponentRelationConstraint (t): + 'ComponentRelationConstraint : LBRACE UCASE_IDENT RBRACE LBRACE AtNotations RBRACE' + t[0] = t[2] + str(t[5]) + +def p_AtNotations_1 (t): + 'AtNotations : AtNotation' + t[0] = [t[1]] + +def p_AtNotations_2 (t): + 'AtNotations : AtNotations COMMA AtNotation' + t[0] = t[1] + [t[3]] + +def p_AtNotation_1 (t): + 'AtNotation : AT ComponentIdList' + t[0] = '@' + t[2] + +def p_AtNotation_2 (t): + 'AtNotation : AT DOT Level ComponentIdList' + t[0] = '@.' + t[3] + t[4] + +def p_Level_1 (t): + 'Level : DOT Level' + t[0] = '.' + t[2] + +def p_Level_2 (t): + 'Level : ' + t[0] = '' + +def p_ComponentIdList_1 (t): + 'ComponentIdList : LCASE_IDENT' + t[0] = t[1] + +def p_ComponentIdList_2 (t): + 'ComponentIdList : ComponentIdList DOT LCASE_IDENT' + t[0] = t[1] + '.' + t[3] + +# 11 Contents constraints ----------------------------------------------------- + +# 11.1 +def p_ContentsConstraint (t): + 'ContentsConstraint : CONTAINING type_ref' + t[0] = Constraint(type = 'Contents', subtype = t[2]) + + +#--- ITU-T Recommendation X.683 ----------------------------------------------- + +# 8 Parameterized assignments ------------------------------------------------- + +# 8.1 +def p_ParameterizedAssignment (t): + '''ParameterizedAssignment : ParameterizedTypeAssignment + | ParameterizedObjectClassAssignment + | ParameterizedObjectAssignment + | ParameterizedObjectSetAssignment''' + t[0] = t[1] + +# 8.2 +def p_ParameterizedTypeAssignment (t): + 'ParameterizedTypeAssignment : UCASE_IDENT ParameterList ASSIGNMENT Type' + t[0] = t[4] + t[0].SetName(t[1]) # t[0].SetName(t[1] + 'xxx') + +def p_ParameterizedObjectClassAssignment (t): + '''ParameterizedObjectClassAssignment : CLASS_IDENT ParameterList ASSIGNMENT ObjectClass + | UCASE_IDENT ParameterList ASSIGNMENT ObjectClass''' + t[0] = t[4] + t[0].SetName(t[1]) + if isinstance(t[0], ObjectClassDefn): + t[0].reg_types() + +def p_ParameterizedObjectAssignment (t): + 'ParameterizedObjectAssignment : objectreference ParameterList DefinedObjectClass ASSIGNMENT Object' + t[0] = ObjectAssignment (ident = t[1], cls=t[3].val, val=t[5]) + global obj_class + obj_class = None + +def p_ParameterizedObjectSetAssignment (t): + 'ParameterizedObjectSetAssignment : UCASE_IDENT ParameterList DefinedObjectClass ASSIGNMENT ObjectSet' + t[0] = Node('ObjectSetAssignment', name=t[1], cls=t[3].val, val=t[5]) + +# 8.3 +def p_ParameterList (t): + 'ParameterList : lbraceignore rbraceignore' + +#def p_ParameterList (t): +# 'ParameterList : LBRACE Parameters RBRACE' +# t[0] = t[2] + +#def p_Parameters_1 (t): +# 'Parameters : Parameter' +# t[0] = [t[1]] + +#def p_Parameters_2 (t): +# 'Parameters : Parameters COMMA Parameter' +# t[0] = t[1] + [t[3]] + +#def p_Parameter_1 (t): +# 'Parameter : Type COLON Reference' +# t[0] = [t[1], t[3]] + +#def p_Parameter_2 (t): +# 'Parameter : Reference' +# t[0] = t[1] + + +# 9 Referencing parameterized definitions ------------------------------------- + +# 9.1 +def p_ParameterizedReference (t): + 'ParameterizedReference : Reference LBRACE RBRACE' + t[0] = t[1] + #t[0].val += 'xxx' + +# 9.2 +def p_ParameterizedType (t): + 'ParameterizedType : type_ref ActualParameterList' + t[0] = t[1] + #t[0].val += 'xxx' + + +def p_ParameterizedObjectClass (t): + 'ParameterizedObjectClass : DefinedObjectClass ActualParameterList' + t[0] = t[1] + #t[0].val += 'xxx' + +def p_ParameterizedObject (t): + 'ParameterizedObject : DefinedObject ActualParameterList' + t[0] = t[1] + #t[0].val += 'xxx' + +# 9.5 +def p_ActualParameterList (t): + 'ActualParameterList : lbraceignore rbraceignore' + +#def p_ActualParameterList (t): +# 'ActualParameterList : LBRACE ActualParameters RBRACE' +# t[0] = t[2] + +#def p_ActualParameters_1 (t): +# 'ActualParameters : ActualParameter' +# t[0] = [t[1]] + +#def p_ActualParameters_2 (t): +# 'ActualParameters : ActualParameters COMMA ActualParameter' +# t[0] = t[1] + [t[3]] + +#def p_ActualParameter (t): +# '''ActualParameter : Type +# | Value''' +# t[0] = t[1] + + +#--- ITU-T Recommendation X.880 ----------------------------------------------- + +x880_classes = { + 'OPERATION' : { + '&ArgumentType' : [], + '&argumentTypeOptional' : [ 'BooleanType' ], + '&returnResult' : [ 'BooleanType' ], + '&ResultType' : [], + '&resultTypeOptional' : [ 'BooleanType' ], + '&Errors' : [ 'ClassReference', 'ERROR' ], + '&Linked' : [ 'ClassReference', 'OPERATION' ], + '&synchronous' : [ 'BooleanType' ], + '&idempotent' : [ 'BooleanType' ], + '&alwaysReturns' : [ 'BooleanType' ], + '&InvokePriority' : [ '_FixedTypeValueSetFieldSpec' ], + '&ResultPriority' : [ '_FixedTypeValueSetFieldSpec' ], + '&operationCode' : [ 'TypeReference', 'Code' ], + }, + 'ERROR' : { + '&ParameterType' : [], + '¶meterTypeOptional' : [ 'BooleanType' ], + '&ErrorPriority' : [ '_FixedTypeValueSetFieldSpec' ], + '&errorCode' : [ 'TypeReference', 'Code' ], + }, + 'OPERATION-PACKAGE' : { + '&Both' : [ 'ClassReference', 'OPERATION' ], + '&Consumer' : [ 'ClassReference', 'OPERATION' ], + '&Supplier' : [ 'ClassReference', 'OPERATION' ], + '&id' : [ 'ObjectIdentifierType' ], + }, + 'CONNECTION-PACKAGE' : { + '&bind' : [ 'ClassReference', 'OPERATION' ], + '&unbind' : [ 'ClassReference', 'OPERATION' ], + '&responderCanUnbind' : [ 'BooleanType' ], + '&unbindCanFail' : [ 'BooleanType' ], + '&id' : [ 'ObjectIdentifierType' ], + }, + 'CONTRACT' : { + '&connection' : [ 'ClassReference', 'CONNECTION-PACKAGE' ], + '&OperationsOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ], + '&InitiatorConsumerOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ], + '&InitiatorSupplierOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ], + '&id' : [ 'ObjectIdentifierType' ], + }, + 'ROS-OBJECT-CLASS' : { + '&Is' : [ 'ClassReference', 'ROS-OBJECT-CLASS' ], + '&Initiates' : [ 'ClassReference', 'CONTRACT' ], + '&Responds' : [ 'ClassReference', 'CONTRACT' ], + '&InitiatesAndResponds' : [ 'ClassReference', 'CONTRACT' ], + '&id' : [ 'ObjectIdentifierType' ], + }, +} + +x880_syntaxes = { + 'OPERATION' : { + 'ARGUMENT' : '&ArgumentType', + 'ARGUMENT OPTIONAL' : '&argumentTypeOptional', + 'RESULT' : '&ResultType', + 'RESULT OPTIONAL' : '&resultTypeOptional', + 'RETURN' : 'RETURN', + 'RETURN RESULT' : '&returnResult', + 'ERRORS' : '&Errors', + 'LINKED' : '&Linked', + 'SYNCHRONOUS' : '&synchronous', + 'IDEMPOTENT' : '&idempotent', + 'ALWAYS' : 'ALWAYS', + 'RESPONDS' : 'RESPONDS', + 'ALWAYS RESPONDS' : '&alwaysReturns', + 'INVOKE' : 'INVOKE', + 'PRIORITY' : 'PRIORITY', + 'INVOKE PRIORITY' : '&InvokePriority', + 'RESULT-PRIORITY': '&ResultPriority', + 'CODE' : '&operationCode', + }, + 'ERROR' : { + 'PARAMETER' : '&ParameterType', + 'PARAMETER OPTIONAL' : '¶meterTypeOptional', + 'PRIORITY' : '&ErrorPriority', + 'CODE' : '&errorCode', + }, +# 'OPERATION-PACKAGE' : { +# }, +# 'CONNECTION-PACKAGE' : { +# }, +# 'CONTRACT' : { +# }, +# 'ROS-OBJECT-CLASS' : { +# }, +} + +def x880_module_begin(): + #print "x880_module_begin()" + for name in list(x880_classes.keys()): + add_class_ident(name) + +def x880_import(name): + if name in x880_syntaxes: + class_syntaxes_enabled[name] = True + class_syntaxes[name] = x880_syntaxes[name] + if name in x880_classes: + add_class_ident(name) + for f in (list(x880_classes[name].keys())): + set_type_to_class(name, f, x880_classes[name][f]) + +tokens = tokens + get_syntax_tokens(x880_syntaxes) + +# {...} OID value +#def p_lbrace_oid(t): +# 'lbrace_oid : brace_oid_begin LBRACE' +# t[0] = t[1] + +#def p_brace_oid_begin(t): +# 'brace_oid_begin : ' +# global in_oid +# in_oid = True + +#def p_rbrace_oid(t): +# 'rbrace_oid : brace_oid_end RBRACE' +# t[0] = t[2] + +#def p_brace_oid_end(t): +# 'brace_oid_end : ' +# global in_oid +# in_oid = False + +# {...} block to be ignored +def p_lbraceignore(t): + 'lbraceignore : braceignorebegin LBRACE' + t[0] = t[1] + +def p_braceignorebegin(t): + 'braceignorebegin : ' + global lexer + lexer.level = 1 + lexer.push_state('braceignore') + +def p_rbraceignore(t): + 'rbraceignore : braceignoreend RBRACE' + t[0] = t[2] + +def p_braceignoreend(t): + 'braceignoreend : ' + global lexer + lexer.pop_state() + +def p_error(t): + global input_file + raise ParseError(t, input_file) + +def p_pyquote (t): + '''pyquote : PYQUOTE''' + t[0] = PyQuote (val = t[1]) + + +def testlex (s): + lexer.input (s) + while True: + token = lexer.token () + if not token: + break + print(token) + + +def do_module (ast, defined_dict): + assert (ast.type == 'Module') + ctx = Ctx (defined_dict) + print(ast.to_python (ctx)) + print(ctx.output_assignments ()) + print(ctx.output_pyquotes ()) + +def eth_do_module (ast, ectx): + assert (ast.type == 'Module') + if ectx.dbg('s'): print(ast.str_depth(0)) + ast.to_eth(ectx) + +def testyacc(s, fn, defined_dict): + ast = yacc.parse(s, debug=0) + time_str = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) + print("""#!/usr/bin/env python +# Auto-generated from %s at %s +from PyZ3950 import asn1""" % (fn, time_str)) + for module in ast: + eth_do_module (module, defined_dict) + + +# Wireshark compiler +def eth_usage(): + print(""" + asn2wrs [-h|?] [-d dbg] [-b] [-p proto] [-c cnf_file] [-e] input_file(s) ... + -h|? : Usage + -b : BER (default is PER) + -u : Unaligned (default is aligned) + -p proto : Protocol name (implies -S). Default is module-name + from input_file (renamed by #.MODULE if present) + -o name : Output files name core (default is ) + -O dir : Output directory for dissector + -c cnf_file : Conformance file + -I path : Path for conformance file includes + -e : Create conformance file for exported types + -E : Just create conformance file for exported types + -S : Single output for multiple modules + -s template : Single file output (template is input file + without .c/.h extension) + -k : Keep intermediate files though single file output is used + -L : Suppress #line directive from .cnf file + -D dir : Directory for input_file(s) (default: '.') + -C : Add check for SIZE constraints + -r prefix : Remove the prefix from type names + + input_file(s) : Input ASN.1 file(s) + + -d dbg : Debug output, dbg = [l][y][p][s][a][t][c][m][o] + l - lex + y - yacc + p - parsing + s - internal ASN.1 structure + a - list of assignments + t - tables + c - conformance values + m - list of compiled modules with dependency + o - list of output files + """) + + +## Used to preparse C style comments +## https://github.com/eerimoq/asn1tools/blob/master/asn1tools/parser.py#L231 +## +def ignore_comments(string): + """Ignore comments in given string by replacing them with spaces. This + reduces the parsing time by roughly a factor of two. + + """ + + comments = [ + (mo.start(), mo.group(0)) + for mo in re.finditer(r'(/\*|\*/|\n)', string) + ] + + comments.sort() + + multi_line_comment_depth = 0 + start_offset = 0 + non_comment_offset = 0 + chunks = [] + + for offset, kind in comments: + if multi_line_comment_depth > 0: + if kind == '/*': + multi_line_comment_depth += 1 + elif kind == '*/': + multi_line_comment_depth -= 1 + + if multi_line_comment_depth == 0: + offset += 2 + chunks.append(' ' * (offset - start_offset)) + non_comment_offset = offset + elif kind == '\n': + chunks.append('\n') + non_comment_offset = offset + elif kind == '/*': + multi_line_comment_depth = 1 + start_offset = offset + chunks.append(string[non_comment_offset:start_offset]) + + chunks.append(string[non_comment_offset:]) + + return ''.join(chunks) + +def eth_main(): + global input_file + global g_conform + global lexer + print("ASN.1 to Wireshark dissector compiler"); + try: + opts, args = getopt.getopt(sys.argv[1:], "h?d:D:buXp:FTo:O:c:I:eESs:kLCr:"); + except getopt.GetoptError: + eth_usage(); sys.exit(2) + if len(args) < 1: + eth_usage(); sys.exit(2) + + conform = EthCnf() + conf_to_read = None + output = EthOut() + ectx = EthCtx(conform, output) + ectx.encoding = 'per' + ectx.proto_opt = None + ectx.fld_opt = {} + ectx.tag_opt = False + ectx.outnm_opt = None + ectx.aligned = True + ectx.dbgopt = '' + ectx.new = True + ectx.expcnf = False + ectx.justexpcnf = False + ectx.merge_modules = False + ectx.group_by_prot = False + ectx.conform.last_group = 0 + ectx.conform.suppress_line = False; + ectx.output.outnm = None + ectx.output.single_file = None + ectx.constraints_check = False; + for o, a in opts: + if o in ("-h", "-?"): + eth_usage(); sys.exit(2) + if o in ("-c",): + conf_to_read = relpath(a) + if o in ("-I",): + ectx.conform.include_path.append(relpath(a)) + if o in ("-E",): + ectx.expcnf = True + ectx.justexpcnf = True + if o in ("-D",): + ectx.srcdir = relpath(a) + if o in ("-C",): + ectx.constraints_check = True + if o in ("-L",): + ectx.suppress_line = True + if o in ("-X",): + warnings.warn("Command line option -X is obsolete and can be removed") + if o in ("-T",): + warnings.warn("Command line option -T is obsolete and can be removed") + + if conf_to_read: + ectx.conform.read(conf_to_read) + + for o, a in opts: + if o in ("-h", "-?", "-c", "-I", "-E", "-D", "-C", "-X", "-T"): + pass # already processed + else: + par = [] + if a: par.append(a) + ectx.conform.set_opt(o, par, "commandline", 0) + + (ld, yd, pd) = (0, 0, 0); + if ectx.dbg('l'): ld = 1 + if ectx.dbg('y'): yd = 1 + if ectx.dbg('p'): pd = 2 + lexer = lex.lex(debug=ld) + parser = yacc.yacc(method='LALR', debug=yd, outputdir='.') + parser.defaulted_states = {} + g_conform = ectx.conform + ast = [] + for fn in args: + input_file = fn + lexer.lineno = 1 + if (ectx.srcdir): fn = ectx.srcdir + '/' + fn + # Read ASN.1 definition, trying one of the common encodings. + data = open(fn, "rb").read() + for encoding in ('utf-8', 'windows-1252'): + try: + data = data.decode(encoding) + break + except Exception: + warnings.warn_explicit("Decoding %s as %s failed, trying next." % (fn, encoding), UserWarning, '', 0) + # Py2 compat, name.translate in eth_output_hf_arr fails with unicode + if not isinstance(data, str): + data = data.encode('utf-8') + data = ignore_comments(data) + ast.extend(yacc.parse(data, lexer=lexer, debug=pd)) + ectx.eth_clean() + if (ectx.merge_modules): # common output for all module + ectx.eth_clean() + for module in ast: + eth_do_module(module, ectx) + ectx.eth_prepare() + ectx.eth_do_output() + elif (ectx.groups()): # group by protocols/group + groups = [] + pr2gr = {} + if (ectx.group_by_prot): # group by protocols + for module in ast: + prot = module.get_proto(ectx) + if prot not in pr2gr: + pr2gr[prot] = len(groups) + groups.append([]) + groups[pr2gr[prot]].append(module) + else: # group by groups + pass + for gm in (groups): + ectx.eth_clean() + for module in gm: + eth_do_module(module, ectx) + ectx.eth_prepare() + ectx.eth_do_output() + else: # output for each module + for module in ast: + ectx.eth_clean() + eth_do_module(module, ectx) + ectx.eth_prepare() + ectx.eth_do_output() + + if ectx.dbg('m'): + ectx.dbg_modules() + + if ectx.dbg('c'): + ectx.conform.dbg_print() + if not ectx.justexpcnf: + ectx.conform.unused_report() + + if ectx.dbg('o'): + ectx.output.dbg_print() + ectx.output.make_single_file(ectx.suppress_line) + + +# Python compiler +def main(): + if sys.version_info[0] < 3: + print("This requires Python 3") + sys.exit(2) + + testfn = testyacc + if len (sys.argv) == 1: + while True: + s = eval(input ('Query: ')) + if len (s) == 0: + break + testfn (s, 'console', {}) + else: + defined_dict = {} + for fn in sys.argv [1:]: + f = open (fn, "r") + testfn (f.read (), fn, defined_dict) + f.close () + lexer.lineno = 1 + + +#--- BODY --------------------------------------------------------------------- + +if __name__ == '__main__': + if (os.path.splitext(os.path.basename(sys.argv[0]))[0].lower() in ('asn2wrs', 'asn2eth')): + eth_main() + else: + main() + +#------------------------------------------------------------------------------ +# +# Editor modelines - https://www.wireshark.org/tools/modelines.html +# +# c-basic-offset: 4; tab-width: 8; indent-tabs-mode: nil +# vi: set shiftwidth=4 tabstop=8 expandtab: +# :indentSize=4:tabSize=8:noTabs=true: diff --git a/tools/asterix/README.md b/tools/asterix/README.md new file mode 100644 index 0000000..d7b2101 --- /dev/null +++ b/tools/asterix/README.md @@ -0,0 +1,51 @@ +# Asterix parser generator + +*Asterix* is a set of standards, where each standard is defined +as so called *asterix category*. +In addition, each *asterix category* is potentially released +in number of editions. There is no guarantie about backward +compatibility between the editions. + +The structured version of asterix specifications is maintained +in a separate project: + + +The purpose of this directory is to convert from structured +specifications (json format) to the `epan/dissectors/packet-asterix.c` file, +which is the actual asterix parser for this project. + +It is important **NOT** to edit `epan/dissectors/packet-asterix.c` file +manually, since this file is automatically generated. + +## Manual update procedure + +To sync with the upstream asterix specifications, run: + +```bash +# show current upstream git revision (for reference) +export ASTERIX_SPECS_REV=$(./tools/asterix/update-specs.py --reference) +echo $ASTERIX_SPECS_REV + +# update asterix decoder +./tools/asterix/update-specs.py > epan/dissectors/packet-asterix.c +git add epan/dissectors/packet-asterix.c + +# inspect change, rebuild project, test... + +# commit change, with reference to upstream version +git commit -m "asterix: Sync with asterix-specs #$ASTERIX_SPECS_REV" +``` + +## Automatic update procedure + +To integrate asterix updates to a periodic (GitLab CI) job, use `--update` option. +For example: + +``` +... +# Asterix categories. +- ./tools/asterix/update-specs.py --update || echo "asterix failed." >> commit-message.txt +- COMMIT_FILES+=("epan/dissectors/packet-asterix.c") +... +``` + diff --git a/tools/asterix/packet-asterix-template.c b/tools/asterix/packet-asterix-template.c new file mode 100644 index 0000000..e655cfd --- /dev/null +++ b/tools/asterix/packet-asterix-template.c @@ -0,0 +1,867 @@ +/* + +Notice: + + +This file is auto generated, do not edit! +See tools/asterix/README.md for details. + + +Data source: +---{gitrev}--- + + +*/ + +/* packet-asterix.c + * Routines for ASTERIX decoding + * By Marko Hrastovec + * + * Wireshark - Network traffic analyzer + * By Gerald Combs + * Copyright 1998 Gerald Combs + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +/* + * ASTERIX (All-purpose structured EUROCONTROL surveillances + * information exchange) is a protocol related to air traffic control. + * + * The specifications can be downloaded from + * http://www.eurocontrol.int/services/asterix + */ + +#include + +#include + +#include +#include +#include + +void proto_register_asterix(void); +void proto_reg_handoff_asterix(void); + +#define PROTO_TAG_ASTERIX "ASTERIX" +#define ASTERIX_PORT 8600 + +#define MAX_DISSECT_STR 1024 +#define MAX_BUFFER 256 + +static int proto_asterix = -1; + +static int hf_asterix_category = -1; +static int hf_asterix_length = -1; +static int hf_asterix_message = -1; +static int hf_asterix_fspec = -1; +static int hf_re_field_len = -1; +static int hf_spare = -1; +static int hf_counter = -1; +static int hf_XXX_FX = -1; + +static int ett_asterix = -1; +static int ett_asterix_category = -1; +static int ett_asterix_length = -1; +static int ett_asterix_message = -1; +static int ett_asterix_subtree = -1; + +static dissector_handle_t asterix_handle; +/* The following defines tell us how to decode the length of + * fields and how to construct their display structure */ +#define FIXED 1 +#define REPETITIVE 2 +#define FX 3 +/*#define FX_1 4*/ +/*#define RE 5*/ +#define COMPOUND 6 +/*#define SP 7*/ +/*#define FX_UAP 8*/ +#define EXP 9 /* Explicit (RE or SP) */ + +/* The following defines tell us how to + * decode and display individual fields. */ +#define FIELD_PART_INT 0 +#define FIELD_PART_UINT 1 +#define FIELD_PART_FLOAT 2 +#define FIELD_PART_UFLOAT 3 +#define FIELD_PART_SQUAWK 4 +#define FIELD_PART_CALLSIGN 5 +#define FIELD_PART_ASCII 6 +#define FIELD_PART_FX 7 +#define FIELD_PART_HEX 8 +#define FIELD_PART_IAS_IM 9 +#define FIELD_PART_IAS_ASPD 10 + +typedef struct FieldPart_s FieldPart; +struct FieldPart_s { + uint16_t bit_length; /* length of field in bits */ + double scaling_factor; /* scaling factor of the field (for instance: 1/128) */ + uint8_t type; /* Pre-defined type for proper presentation */ + int *hf; /* Pointer to hf representing this kind of data */ + const char *format_string; /* format string for showing float values */ +}; + +DIAG_OFF_PEDANTIC +typedef struct AsterixField_s AsterixField; +struct AsterixField_s { + uint8_t type; /* type of field */ + unsigned length; /* fixed length */ + unsigned repetition_counter_size; /* size of repetition counter, length of one item is in length */ + unsigned header_length; /* the size is in first header_length bytes of the field */ + int *hf; /* pointer to Wireshark hf_register_info */ + const FieldPart **part; /* Look declaration and description of FieldPart above. */ + const AsterixField *field[]; /* subfields */ +}; +DIAG_ON_PEDANTIC + +static void dissect_asterix_packet (tvbuff_t *, packet_info *pinfo, proto_tree *); +static void dissect_asterix_data_block (tvbuff_t *tvb, packet_info *pinfo, unsigned, proto_tree *, uint8_t, int); +static int dissect_asterix_fields (tvbuff_t *, packet_info *pinfo, unsigned, proto_tree *, uint8_t, const AsterixField *[]); + +static void asterix_build_subtree (tvbuff_t *, packet_info *pinfo, unsigned, proto_tree *, const AsterixField *); +static void twos_complement (int64_t *, int); +static uint8_t asterix_bit (uint8_t, uint8_t); +static unsigned asterix_fspec_len (tvbuff_t *, unsigned); +static uint8_t asterix_field_exists (tvbuff_t *, unsigned, int); +static uint8_t asterix_get_active_uap (tvbuff_t *, unsigned, uint8_t); +static int asterix_field_length (tvbuff_t *, unsigned, const AsterixField *); +static int asterix_field_offset (tvbuff_t *, unsigned, const AsterixField *[], int); +static int asterix_message_length (tvbuff_t *, unsigned, uint8_t, uint8_t); + +static const char AISCode[] = { ' ', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', + 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', ' ', ' ', ' ', ' ', ' ', + ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ' ', ' ', ' ', ' ', ' ', ' ' }; + +static const value_string valstr_XXX_FX[] = { + { 0, "End of data item" }, + { 1, "Extension into next extent" }, + { 0, NULL } +}; +static const FieldPart IXXX_FX = { 1, 1.0, FIELD_PART_FX, &hf_XXX_FX, NULL }; +static const FieldPart IXXX_1bit_spare = { 1, 1.0, FIELD_PART_UINT, NULL, NULL }; +static const FieldPart IXXX_2bit_spare = { 2, 1.0, FIELD_PART_UINT, NULL, NULL }; +static const FieldPart IXXX_3bit_spare = { 3, 1.0, FIELD_PART_UINT, NULL, NULL }; +static const FieldPart IXXX_4bit_spare = { 4, 1.0, FIELD_PART_UINT, NULL, NULL }; +static const FieldPart IXXX_5bit_spare = { 5, 1.0, FIELD_PART_UINT, NULL, NULL }; +static const FieldPart IXXX_6bit_spare = { 6, 1.0, FIELD_PART_UINT, NULL, NULL }; +static const FieldPart IXXX_7bit_spare = { 7, 1.0, FIELD_PART_UINT, NULL, NULL }; + +/* Spare Item */ +DIAG_OFF_PEDANTIC +static const AsterixField IX_SPARE = { FIXED, 0, 0, 0, &hf_spare, NULL, { NULL } }; + +/* insert1 */ +---{insert1}--- +/* insert1 */ + +/* settings which category version to use for each ASTERIX category */ +static int global_categories_version[] = { + 0, /* 000 */ + 0, /* 001 */ + 0, /* 002 */ + 0, /* 003 */ + 0, /* 004 */ + 0, /* 005 */ + 0, /* 006 */ + 0, /* 007 */ + 0, /* 008 */ + 0, /* 009 */ + 0, /* 010 */ + 0, /* 011 */ + 0, /* 012 */ + 0, /* 013 */ + 0, /* 014 */ + 0, /* 015 */ + 0, /* 016 */ + 0, /* 017 */ + 0, /* 018 */ + 0, /* 019 */ + 0, /* 020 */ + 0, /* 021 */ + 0, /* 022 */ + 0, /* 023 */ + 0, /* 024 */ + 0, /* 025 */ + 0, /* 026 */ + 0, /* 027 */ + 0, /* 028 */ + 0, /* 029 */ + 0, /* 030 */ + 0, /* 031 */ + 0, /* 032 */ + 0, /* 033 */ + 0, /* 034 */ + 0, /* 035 */ + 0, /* 036 */ + 0, /* 037 */ + 0, /* 038 */ + 0, /* 039 */ + 0, /* 040 */ + 0, /* 041 */ + 0, /* 042 */ + 0, /* 043 */ + 0, /* 044 */ + 0, /* 045 */ + 0, /* 046 */ + 0, /* 047 */ + 0, /* 048 */ + 0, /* 049 */ + 0, /* 050 */ + 0, /* 051 */ + 0, /* 052 */ + 0, /* 053 */ + 0, /* 054 */ + 0, /* 055 */ + 0, /* 056 */ + 0, /* 057 */ + 0, /* 058 */ + 0, /* 059 */ + 0, /* 060 */ + 0, /* 061 */ + 0, /* 062 */ + 0, /* 063 */ + 0, /* 064 */ + 0, /* 065 */ + 0, /* 066 */ + 0, /* 067 */ + 0, /* 068 */ + 0, /* 069 */ + 0, /* 070 */ + 0, /* 071 */ + 0, /* 072 */ + 0, /* 073 */ + 0, /* 074 */ + 0, /* 075 */ + 0, /* 076 */ + 0, /* 077 */ + 0, /* 078 */ + 0, /* 079 */ + 0, /* 080 */ + 0, /* 081 */ + 0, /* 082 */ + 0, /* 083 */ + 0, /* 084 */ + 0, /* 085 */ + 0, /* 086 */ + 0, /* 087 */ + 0, /* 088 */ + 0, /* 089 */ + 0, /* 090 */ + 0, /* 091 */ + 0, /* 092 */ + 0, /* 093 */ + 0, /* 094 */ + 0, /* 095 */ + 0, /* 096 */ + 0, /* 097 */ + 0, /* 098 */ + 0, /* 099 */ + 0, /* 100 */ + 0, /* 101 */ + 0, /* 102 */ + 0, /* 103 */ + 0, /* 104 */ + 0, /* 105 */ + 0, /* 106 */ + 0, /* 107 */ + 0, /* 108 */ + 0, /* 109 */ + 0, /* 110 */ + 0, /* 111 */ + 0, /* 112 */ + 0, /* 113 */ + 0, /* 114 */ + 0, /* 115 */ + 0, /* 116 */ + 0, /* 117 */ + 0, /* 118 */ + 0, /* 119 */ + 0, /* 120 */ + 0, /* 121 */ + 0, /* 122 */ + 0, /* 123 */ + 0, /* 124 */ + 0, /* 125 */ + 0, /* 126 */ + 0, /* 127 */ + 0, /* 128 */ + 0, /* 129 */ + 0, /* 130 */ + 0, /* 131 */ + 0, /* 132 */ + 0, /* 133 */ + 0, /* 134 */ + 0, /* 135 */ + 0, /* 136 */ + 0, /* 137 */ + 0, /* 138 */ + 0, /* 139 */ + 0, /* 140 */ + 0, /* 141 */ + 0, /* 142 */ + 0, /* 143 */ + 0, /* 144 */ + 0, /* 145 */ + 0, /* 146 */ + 0, /* 147 */ + 0, /* 148 */ + 0, /* 149 */ + 0, /* 150 */ + 0, /* 151 */ + 0, /* 152 */ + 0, /* 153 */ + 0, /* 154 */ + 0, /* 155 */ + 0, /* 156 */ + 0, /* 157 */ + 0, /* 158 */ + 0, /* 159 */ + 0, /* 160 */ + 0, /* 161 */ + 0, /* 162 */ + 0, /* 163 */ + 0, /* 164 */ + 0, /* 165 */ + 0, /* 166 */ + 0, /* 167 */ + 0, /* 168 */ + 0, /* 169 */ + 0, /* 170 */ + 0, /* 171 */ + 0, /* 172 */ + 0, /* 173 */ + 0, /* 174 */ + 0, /* 175 */ + 0, /* 176 */ + 0, /* 177 */ + 0, /* 178 */ + 0, /* 179 */ + 0, /* 180 */ + 0, /* 181 */ + 0, /* 182 */ + 0, /* 183 */ + 0, /* 184 */ + 0, /* 185 */ + 0, /* 186 */ + 0, /* 187 */ + 0, /* 188 */ + 0, /* 189 */ + 0, /* 190 */ + 0, /* 191 */ + 0, /* 192 */ + 0, /* 193 */ + 0, /* 194 */ + 0, /* 195 */ + 0, /* 196 */ + 0, /* 197 */ + 0, /* 198 */ + 0, /* 199 */ + 0, /* 200 */ + 0, /* 201 */ + 0, /* 202 */ + 0, /* 203 */ + 0, /* 204 */ + 0, /* 205 */ + 0, /* 206 */ + 0, /* 207 */ + 0, /* 208 */ + 0, /* 209 */ + 0, /* 210 */ + 0, /* 211 */ + 0, /* 212 */ + 0, /* 213 */ + 0, /* 214 */ + 0, /* 215 */ + 0, /* 216 */ + 0, /* 217 */ + 0, /* 218 */ + 0, /* 219 */ + 0, /* 220 */ + 0, /* 221 */ + 0, /* 222 */ + 0, /* 223 */ + 0, /* 224 */ + 0, /* 225 */ + 0, /* 226 */ + 0, /* 227 */ + 0, /* 228 */ + 0, /* 229 */ + 0, /* 230 */ + 0, /* 231 */ + 0, /* 232 */ + 0, /* 233 */ + 0, /* 234 */ + 0, /* 235 */ + 0, /* 236 */ + 0, /* 237 */ + 0, /* 238 */ + 0, /* 239 */ + 0, /* 240 */ + 0, /* 241 */ + 0, /* 242 */ + 0, /* 243 */ + 0, /* 244 */ + 0, /* 245 */ + 0, /* 246 */ + 0, /* 247 */ + 0, /* 248 */ + 0, /* 249 */ + 0, /* 250 */ + 0, /* 251 */ + 0, /* 252 */ + 0, /* 253 */ + 0, /* 254 */ + 0 /* 255 */ +}; + +static int dissect_asterix (tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U_) +{ + col_set_str (pinfo->cinfo, COL_PROTOCOL, "ASTERIX"); + col_clear (pinfo->cinfo, COL_INFO); + + if (tree) { /* we are being asked for details */ + dissect_asterix_packet (tvb, pinfo, tree); + } + + return tvb_captured_length(tvb); +} + +static void dissect_asterix_packet (tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree) +{ + unsigned i; + uint8_t category; + uint16_t length; + proto_item *asterix_packet_item; + proto_tree *asterix_packet_tree; + + for (i = 0; i < tvb_reported_length (tvb); i += length + 3) { + /* all ASTERIX messages have the same structure: + * + * header: + * + * 1 byte category even though a category is referenced as I019, + * this is just stored as decimal 19 (i.e. 0x13) + * 2 bytes length the total length of this ASTERIX message, the + * length includes the size of the header. + * + * Note that the there was a structural change at + * one point that changes whether multiple + * records can occur after the header or not + * (each category specifies this explicitly. All + * of the currently supported categories can have + * multiple records so this implementation just + * assumes that is always the case) + * + * record (multiple records can exists): + * + * n bytes FSPEC the field specifier is a bit mask where the + * lowest bit of each byte is called the FX bit. + * When the FX bit is set this indicates that + * the FSPEC extends into the next byte. + * Any other bit indicates the presence of the + * field that owns that bit (as per the User + * Application Profile (UAP)). + * X bytes Field Y X is as per the specification for field Y. + * etc. + * + * The User Application Profile (UAP) is simply a mapping from the + * FSPEC to fields. Each category has its own UAP. + */ + category = tvb_get_guint8 (tvb, i); + length = (tvb_get_guint8 (tvb, i + 1) << 8) + tvb_get_guint8 (tvb, i + 2) - 3; /* -3 for category and length */ + + asterix_packet_item = proto_tree_add_item (tree, proto_asterix, tvb, i, length + 3, ENC_NA); + proto_item_append_text (asterix_packet_item, ", Category %03d", category); + asterix_packet_tree = proto_item_add_subtree (asterix_packet_item, ett_asterix); + proto_tree_add_item (asterix_packet_tree, hf_asterix_category, tvb, i, 1, ENC_BIG_ENDIAN); + proto_tree_add_item (asterix_packet_tree, hf_asterix_length, tvb, i + 1, 2, ENC_BIG_ENDIAN); + + dissect_asterix_data_block (tvb, pinfo, i + 3, asterix_packet_tree, category, length); + } +} + +static void dissect_asterix_data_block (tvbuff_t *tvb, packet_info *pinfo, unsigned offset, proto_tree *tree, uint8_t category, int length) +{ + uint8_t active_uap; + int fspec_len, inner_offset, size, counter; + proto_item *asterix_message_item = NULL; + proto_tree *asterix_message_tree = NULL; + + for (counter = 1, inner_offset = 0; inner_offset < length; counter++) { + + /* This loop handles parsing of each ASTERIX record */ + + active_uap = asterix_get_active_uap (tvb, offset + inner_offset, category); + size = asterix_message_length (tvb, offset + inner_offset, category, active_uap); + if (size > 0) { + asterix_message_item = proto_tree_add_item (tree, hf_asterix_message, tvb, offset + inner_offset, size, ENC_NA); + proto_item_append_text (asterix_message_item, ", #%02d, length: %d", counter, size); + asterix_message_tree = proto_item_add_subtree (asterix_message_item, ett_asterix_message); + fspec_len = asterix_fspec_len (tvb, offset + inner_offset); + /*show_fspec (tvb, asterix_message_tree, offset + inner_offset, fspec_len);*/ + proto_tree_add_item (asterix_message_tree, hf_asterix_fspec, tvb, offset + inner_offset, fspec_len, ENC_NA); + + size = dissect_asterix_fields (tvb, pinfo, offset + inner_offset, asterix_message_tree, category, categories[category][global_categories_version[category]][active_uap]); + + inner_offset += size + fspec_len; + } + else { + inner_offset = length; + } + } +} + +static int dissect_asterix_fields (tvbuff_t *tvb, packet_info *pinfo, unsigned offset, proto_tree *tree, uint8_t category, const AsterixField *current_uap[]) +{ + unsigned i, j, size, start, len, inner_offset, fspec_len; + uint64_t counter; + proto_item *asterix_field_item = NULL; + proto_tree *asterix_field_tree = NULL; + proto_item *asterix_field_item2 = NULL; + proto_tree *asterix_field_tree2 = NULL; + + if (current_uap == NULL) + return 0; + + for (i = 0, size = 0; current_uap[i] != NULL; i++) { + start = asterix_field_offset (tvb, offset, current_uap, i); + if (start > 0) { + len = asterix_field_length (tvb, offset + start, current_uap[i]); + size += len; + switch(current_uap[i]->type) { + case COMPOUND: + asterix_field_item = proto_tree_add_item (tree, *current_uap[i]->hf, tvb, offset + start, len, ENC_NA); + asterix_field_tree = proto_item_add_subtree (asterix_field_item, ett_asterix_subtree); + fspec_len = asterix_fspec_len (tvb, offset + start); + proto_tree_add_item (asterix_field_tree, hf_asterix_fspec, tvb, offset + start, fspec_len, ENC_NA); + dissect_asterix_fields (tvb, pinfo, offset + start, asterix_field_tree, category, (const AsterixField **)current_uap[i]->field); + break; + case REPETITIVE: + asterix_field_item = proto_tree_add_item (tree, *current_uap[i]->hf, tvb, offset + start, len, ENC_NA); + asterix_field_tree = proto_item_add_subtree (asterix_field_item, ett_asterix_subtree); + for (j = 0, counter = 0; j < current_uap[i]->repetition_counter_size; j++) { + counter = (counter << 8) + tvb_get_guint8 (tvb, offset + start + j); + } + proto_tree_add_item (asterix_field_tree, hf_counter, tvb, offset + start, current_uap[i]->repetition_counter_size, ENC_BIG_ENDIAN); + for (j = 0, inner_offset = 0; j < counter; j++, inner_offset += current_uap[i]->length) { + asterix_field_item2 = proto_tree_add_item (asterix_field_tree, *current_uap[i]->hf, tvb, offset + start + current_uap[i]->repetition_counter_size + inner_offset, current_uap[i]->length, ENC_NA); + asterix_field_tree2 = proto_item_add_subtree (asterix_field_item2, ett_asterix_subtree); + asterix_build_subtree (tvb, pinfo, offset + start + current_uap[i]->repetition_counter_size + inner_offset, asterix_field_tree2, current_uap[i]); + } + break; + /* currently not generated from asterix-spec*/ + /*case EXP: + asterix_field_item = proto_tree_add_item (tree, *current_uap[i]->hf, tvb, offset + start, len, ENC_NA); + asterix_field_tree = proto_item_add_subtree (asterix_field_item, ett_asterix_subtree); + proto_tree_add_item (asterix_field_tree, hf_re_field_len, tvb, offset + start, 1, ENC_BIG_ENDIAN); + start++; + fspec_len = asterix_fspec_len (tvb, offset + start); + proto_tree_add_item (asterix_field_tree, hf_asterix_fspec, tvb, offset + start, fspec_len, ENC_NA); + dissect_asterix_fields (tvb, pinfo, offset + start, asterix_field_tree, category, (const AsterixField **)current_uap[i]->field); + break;*/ + default: /* FIXED, FX, FX_1, FX_UAP */ + asterix_field_item = proto_tree_add_item (tree, *current_uap[i]->hf, tvb, offset + start, len, ENC_NA); + asterix_field_tree = proto_item_add_subtree (asterix_field_item, ett_asterix_subtree); + asterix_build_subtree (tvb, pinfo, offset + start, asterix_field_tree, current_uap[i]); + break; + } + } + } + return size; +} + +static void asterix_build_subtree (tvbuff_t *tvb, packet_info *pinfo, unsigned offset, proto_tree *parent, const AsterixField *field) +{ + header_field_info* hfi; + int bytes_in_type, byte_offset_of_mask; + int i, inner_offset, offset_in_tvb, length_in_tvb; + uint8_t go_on; + int64_t value; + char *str_buffer = NULL; + double scaling_factor = 1.0; + uint8_t *air_speed_im_bit; + if (field->part != NULL) { + for (i = 0, inner_offset = 0, go_on = 1; go_on && field->part[i] != NULL; i++) { + value = tvb_get_bits64 (tvb, offset * 8 + inner_offset, field->part[i]->bit_length, ENC_BIG_ENDIAN); + if (field->part[i]->hf != NULL) { + offset_in_tvb = offset + inner_offset / 8; + length_in_tvb = (inner_offset % 8 + field->part[i]->bit_length + 7) / 8; + switch (field->part[i]->type) { + case FIELD_PART_FX: + if (!value) go_on = 0; + /* Fall through */ + case FIELD_PART_INT: + case FIELD_PART_UINT: + case FIELD_PART_HEX: + case FIELD_PART_ASCII: + case FIELD_PART_SQUAWK: + hfi = proto_registrar_get_nth (*field->part[i]->hf); + if (hfi->bitmask) + { + // for a small bit field to decode correctly with + // a mask that belongs to a large(r) one we need to + // re-adjust offset_in_tvb and length_in_tvb to + // correctly align with the given hf mask. + // + // E.g. the following would not decode correctly: + // { &hf_020_050_V, ... FT_UINT16, ... 0x8000, ... + // instead one would have to use + // { &hf_020_050_V, ... FT_UINT8, ... 0x80, ... + // + bytes_in_type = ftype_wire_size(hfi->type); + if (bytes_in_type > 1) + { + byte_offset_of_mask = bytes_in_type - (ws_ilog2 (hfi->bitmask) + 8)/8; + if (byte_offset_of_mask >= 0) + { + offset_in_tvb -= byte_offset_of_mask; + length_in_tvb = bytes_in_type; + } + } + } + proto_tree_add_item (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, ENC_BIG_ENDIAN); + break; + case FIELD_PART_FLOAT: + twos_complement (&value, field->part[i]->bit_length); + /* Fall through */ + case FIELD_PART_UFLOAT: + scaling_factor = field->part[i]->scaling_factor; + if (field->part[i]->format_string != NULL) + proto_tree_add_double_format_value (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, value * scaling_factor, field->part[i]->format_string, value * scaling_factor); + else + proto_tree_add_double (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, value * scaling_factor); + break; + case FIELD_PART_CALLSIGN: + str_buffer = wmem_strdup_printf( + pinfo->pool, + "%c%c%c%c%c%c%c%c", + AISCode[(value >> 42) & 63], + AISCode[(value >> 36) & 63], + AISCode[(value >> 30) & 63], + AISCode[(value >> 24) & 63], + AISCode[(value >> 18) & 63], + AISCode[(value >> 12) & 63], + AISCode[(value >> 6) & 63], + AISCode[value & 63]); + proto_tree_add_string (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, str_buffer); + break; + case FIELD_PART_IAS_IM: + /* special processing for I021/150 and I062/380#4 because Air Speed depends on IM subfield */ + air_speed_im_bit = wmem_new (pinfo->pool, uint8_t); + *air_speed_im_bit = (tvb_get_guint8 (tvb, offset_in_tvb) & 0x80) >> 7; + /* Save IM info for the packet. key = 21150. */ + p_add_proto_data (pinfo->pool, pinfo, proto_asterix, 21150, air_speed_im_bit); + proto_tree_add_item (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, ENC_BIG_ENDIAN); + break; + case FIELD_PART_IAS_ASPD: + /* special processing for I021/150 and I062/380#4 because Air Speed depends on IM subfield */ + air_speed_im_bit = (uint8_t *)p_get_proto_data (pinfo->pool, pinfo, proto_asterix, 21150); + if (!air_speed_im_bit || *air_speed_im_bit == 0) + scaling_factor = 1.0/16384.0; + else + scaling_factor = 0.001; + proto_tree_add_double (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, value * scaling_factor); + break; + } + } + inner_offset += field->part[i]->bit_length; + } + } /* if not null */ +} + +static uint8_t asterix_bit (uint8_t b, uint8_t bitNo) +{ + return bitNo < 8 && (b & (0x80 >> bitNo)) > 0; +} + +/* Function makes int64_t two's complement. + * Only the bit_len bit are set in int64_t. All more significant + * bits need to be set to have proper two's complement. + * If the number is negative, all other bits must be set to 1. + * If the number is positive, all other bits must remain 0. */ +static void twos_complement (int64_t *v, int bit_len) +{ + if (*v & (G_GUINT64_CONSTANT(1) << (bit_len - 1))) { + *v |= (G_GUINT64_CONSTANT(0xffffffffffffffff) << bit_len); + } +} + +static unsigned asterix_fspec_len (tvbuff_t *tvb, unsigned offset) +{ + unsigned i; + unsigned max_length = tvb_reported_length (tvb) - offset; + for (i = 0; (tvb_get_guint8 (tvb, offset + i) & 1) && i < max_length; i++); + return i + 1; +} + +static uint8_t asterix_field_exists (tvbuff_t *tvb, unsigned offset, int bitIndex) +{ + uint8_t bitNo, i; + bitNo = bitIndex + bitIndex / 7; + for (i = 0; i < bitNo / 8; i++) { + if (!(tvb_get_guint8 (tvb, offset + i) & 1)) return 0; + } + return asterix_bit (tvb_get_guint8 (tvb, offset + i), bitNo % 8); +} + +static int asterix_field_length (tvbuff_t *tvb, unsigned offset, const AsterixField *field) +{ + unsigned size; + uint64_t count; + uint8_t i; + + size = 0; + switch(field->type) { + case FIXED: + size = field->length; + break; + case REPETITIVE: + for (i = 0, count = 0; i < field->repetition_counter_size && i < sizeof (count); i++) + count = (count << 8) + tvb_get_guint8 (tvb, offset + i); + size = (unsigned)(field->repetition_counter_size + count * field->length); + break; + case FX: + for (size = field->length + field->header_length; tvb_get_guint8 (tvb, offset + size - 1) & 1; size += field->length); + break; + case EXP: + for (i = 0, size = 0; i < field->header_length; i++) { + size = (size << 8) + tvb_get_guint8 (tvb, offset + i); + } + break; + case COMPOUND: + /* FSPEC */ + for (size = 0; tvb_get_guint8 (tvb, offset + size) & 1; size++); + size++; + + for (i = 0; field->field[i] != NULL; i++) { + if (asterix_field_exists (tvb, offset, i)) + size += asterix_field_length (tvb, offset + size, field->field[i]); + } + break; + } + return size; +} + +/* This works for category 001. For other it may require changes. */ +static uint8_t asterix_get_active_uap (tvbuff_t *tvb, unsigned offset, uint8_t category) +{ + int i, inner_offset; + AsterixField **current_uap; + + if ((category == 1) && (categories[category] != NULL)) { /* if category is supported */ + if (categories[category][global_categories_version[category]][1] != NULL) { /* if exists another uap */ + current_uap = (AsterixField **)categories[category][global_categories_version[category]][0]; + if (current_uap != NULL) { + inner_offset = asterix_fspec_len (tvb, offset); + for (i = 0; current_uap[i] != NULL; i++) { + if (asterix_field_exists (tvb, offset, i)) { + if (i == 1) { /* uap selector (I001/020) is always at index '1' */ + return tvb_get_guint8 (tvb, offset + inner_offset) >> 7; + } + inner_offset += asterix_field_length (tvb, offset + inner_offset, current_uap[i]); + } + } + } + } + } + return 0; +} + +static int asterix_field_offset (tvbuff_t *tvb, unsigned offset, const AsterixField *current_uap[], int field_index) +{ + int i, inner_offset; + inner_offset = 0; + if (asterix_field_exists (tvb, offset, field_index)) { + inner_offset = asterix_fspec_len (tvb, offset); + for (i = 0; i < field_index; i++) { + if (asterix_field_exists (tvb, offset, i)) + inner_offset += asterix_field_length (tvb, offset + inner_offset, current_uap[i]); + } + } + return inner_offset; +} + +static int asterix_message_length (tvbuff_t *tvb, unsigned offset, uint8_t category, uint8_t active_uap) +{ + int i, size; + AsterixField **current_uap; + + if (categories[category] != NULL) { /* if category is supported */ + current_uap = (AsterixField **)categories[category][global_categories_version[category]][active_uap]; + if (current_uap != NULL) { + size = asterix_fspec_len (tvb, offset); + for (i = 0; current_uap[i] != NULL; i++) { + if (asterix_field_exists (tvb, offset, i)) { + size += asterix_field_length (tvb, offset + size, current_uap[i]); + } + } + return size; + } + } + return 0; +} + +void proto_register_asterix (void) +{ + static hf_register_info hf[] = { + { &hf_asterix_category, { "Category", "asterix.category", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } }, + { &hf_asterix_length, { "Length", "asterix.length", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } }, + { &hf_asterix_message, { "Asterix message", "asterix.message", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, + { &hf_asterix_fspec, { "FSPEC", "asterix.fspec", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, + { &hf_re_field_len, { "RE LEN", "asterix.re_field_len", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } }, + { &hf_spare, { "Spare", "asterix.spare", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, + { &hf_counter, { "Counter", "asterix.counter", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, + { &hf_XXX_FX, { "FX", "asterix.FX", FT_UINT8, BASE_DEC, VALS (valstr_XXX_FX), 0x01, "Extension into next extent", HFILL } }, +/* insert2 */ +---{insert2}--- +/* insert2 */ + }; + + /* Setup protocol subtree array */ + static int *ett[] = { + &ett_asterix, + &ett_asterix_category, + &ett_asterix_length, + &ett_asterix_message, + &ett_asterix_subtree + }; + + module_t *asterix_prefs_module; + + proto_asterix = proto_register_protocol ( + "ASTERIX packet", /* name */ + "ASTERIX", /* short name */ + "asterix" /* abbrev */ + ); + + proto_register_field_array (proto_asterix, hf, array_length (hf)); + proto_register_subtree_array (ett, array_length (ett)); + + asterix_handle = register_dissector ("asterix", dissect_asterix, proto_asterix); + + asterix_prefs_module = prefs_register_protocol (proto_asterix, NULL); + +/* insert3 */ +---{insert3}--- +/* insert3 */ +} + +void proto_reg_handoff_asterix (void) +{ + dissector_add_uint_with_preference("udp.port", ASTERIX_PORT, asterix_handle); +} + +/* + * Editor modelines - https://www.wireshark.org/tools/modelines.html + * + * Local variables: + * c-basic-offset: 4 + * tab-width: 8 + * indent-tabs-mode: nil + * End: + * + * vi: set shiftwidth=4 tabstop=8 expandtab: + * :indentSize=4:tabSize=8:noTabs=true: + */ diff --git a/tools/asterix/update-specs.py b/tools/asterix/update-specs.py new file mode 100755 index 0000000..7af735d --- /dev/null +++ b/tools/asterix/update-specs.py @@ -0,0 +1,829 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# By Zoran Bošnjak +# +# Use asterix specifications in JSON format, +# to generate C/C++ structures, suitable for wireshark. +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +import argparse + +import urllib.request +import json +from copy import copy, deepcopy +from itertools import chain, repeat, takewhile +from functools import reduce +import os +import sys +import re + +# Path to default upstream repository +upstream_repo = 'https://zoranbosnjak.github.io/asterix-specs' +dissector_file = 'epan/dissectors/packet-asterix.c' + +class Offset(object): + """Keep track of number of added bits. + It's like integer, except when offsets are added together, + a 'modulo 8' is applied, such that offset is always between [0,7]. + """ + + def __init__(self): + self.current = 0 + + def __add__(self, other): + self.current = (self.current + other) % 8 + return self + + @property + def get(self): + return self.current + +class Context(object): + """Support class to be used as a context manager. + The 'tell' method is used to output (print) some data. + All output is first collected to a buffer, then rendered + using a template file. + """ + def __init__(self): + self.buffer = {} + self.offset = Offset() + self.inside_repetitive = False + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + pass + + def tell(self, channel, s): + """Append string 's' to an output channel.""" + lines = self.buffer.get(channel, []) + lines.append(s) + self.buffer[channel] = lines + + def reset_offset(self): + self.offset = Offset() + +def get_number(value): + """Get Natural/Real/Rational number as an object.""" + class Integer(object): + def __init__(self, val): + self.val = val + def __str__(self): + return '{}'.format(self.val) + def __float__(self): + return float(self.val) + + class Ratio(object): + def __init__(self, a, b): + self.a = a + self.b = b + def __str__(self): + return '{}/{}'.format(self.a, self.b) + def __float__(self): + return float(self.a) / float(self.b) + + class Real(object): + def __init__(self, val): + self.val = val + def __str__(self): + return '{0:f}'.format(self.val).rstrip('0') + def __float__(self): + return float(self.val) + + t = value['type'] + val = value['value'] + + if t == 'Integer': + return Integer(int(val)) + if t == 'Ratio': + x, y = val['numerator'], val['denominator'] + return Ratio(x, y) + if t == 'Real': + return Real(float(val)) + raise Exception('unexpected value type {}'.format(t)) + +def replace_string(s, mapping): + """Helper function to replace each entry from the mapping.""" + for (key,val) in mapping.items(): + s = s.replace(key, val) + return s + +def safe_string(s): + """String replacement table.""" + return replace_string(s, { + # from C reference manual + chr(92): r"\\", # Backslash character. + '?': r"\?", # Question mark character. + "'": r"\'", # Single quotation mark. + '"': r'\"', # Double quotation mark. + "\a": "", # Audible alert. + "\b": "", # Backspace character. + "\e": "", # character. (This is a GNU extension.) + "\f": "", # Form feed. + "\n": "", # Newline character. + "\r": "", # Carriage return. + "\t": " ", # Horizontal tab. + "\v": "", # Vertical tab. + }) + +def get_scaling(content): + """Get scaling factor from the content.""" + k = content.get('scaling') + if k is None: + return None + k = get_number(k) + + fract = content['fractionalBits'] + + if fract > 0: + scale = format(float(k) / (pow(2, fract)), '.29f') + scale = scale.rstrip('0') + else: + scale = format(float(k)) + return scale + +def get_fieldpart(content): + """Get FIELD_PART* from the content.""" + t = content['type'] + if t == 'Raw': return 'FIELD_PART_HEX' + elif t == 'Table': return 'FIELD_PART_UINT' + elif t == 'String': + var = content['variation'] + if var == 'StringAscii': return 'FIELD_PART_ASCII' + elif var == 'StringICAO': return 'FIELD_PART_CALLSIGN' + elif var == 'StringOctal': return 'FIELD_PART_SQUAWK' + else: + raise Exception('unexpected string variation: {}'.format(var)) + elif t == 'Integer': + if content['signed']: + return 'FIELD_PART_INT' + else: + return 'FIELD_PART_UINT' + elif t == 'Quantity': + if content['signed']: + return 'FIELD_PART_FLOAT' + else: + return 'FIELD_PART_UFLOAT' + elif t == 'Bds': + return 'FIELD_PART_HEX' + else: + raise Exception('unexpected content type: {}'.format(t)) + +def download_url(path): + """Download url and return content as a string.""" + with urllib.request.urlopen(upstream_repo + path) as url: + return url.read() + +def read_file(path): + """Read file content, return string.""" + with open(path) as f: + return f.read() + +def load_jsons(paths): + """Load json files from either URL or from local disk.""" + + # load from url + if paths == []: + manifest = download_url('/manifest.json').decode() + listing = [] + for spec in json.loads(manifest): + cat = spec['category'] + for edition in spec['cats']: + listing.append('/specs/cat{}/cats/cat{}/definition.json'.format(cat, edition)) + for edition in spec['refs']: + listing.append('/specs/cat{}/refs/ref{}/definition.json'.format(cat, edition)) + return [download_url(i).decode() for i in listing] + + # load from disk + else: + listing = [] + for path in paths: + if os.path.isdir(path): + for root, dirs, files in os.walk(path): + for i in files: + (a,b) = os.path.splitext(i) + if (a,b) != ('definition', '.json'): + continue + listing.append(os.path.join(root, i)) + elif os.path.isfile(path): + listing.append(path) + else: + raise Exception('unexpected path type: {}'.path) + return [read_file(f) for f in listing] + +def load_gitrev(paths): + """Read git revision reference.""" + + # load from url + if paths == []: + gitrev = download_url('/gitrev.txt').decode().strip() + return [upstream_repo, 'git revision: {}'.format(gitrev)] + + # load from disk + else: + return ['(local disk)'] + +def get_ft(ref, n, content, offset): + """Get FT... from the content.""" + a = offset.get + + # bruto bit size (next multiple of 8) + (m, b) = divmod(a+n, 8) + m = m if b == 0 else m + 1 + m *= 8 + + mask = '0x00' + if a != 0 or b != 0: + bits = chain(repeat(0, a), repeat(1, n), repeat(0, m-n-a)) + mask = 0 + for (a,b) in zip(bits, reversed(range(m))): + mask += a*pow(2,b) + mask = hex(mask) + # prefix mask with zeros '0x000...', to adjust mask size + assert mask[0:2] == '0x' + mask = mask[2:] + required_mask_size = (m//8)*2 + add_some = required_mask_size - len(mask) + mask = '0x' + '0'*add_some + mask + + t = content['type'] + + if t == 'Raw': + if n > 64: # very long items + assert (n % 8) == 0, "very long items require byte alignment" + return 'FT_NONE, BASE_NONE, NULL, 0x00' + + if (n % 8): # not byte aligned + base = 'DEC' + else: # byte aligned + if n >= 32: # long items + base = 'HEX' + else: # short items + base = 'HEX_DEC' + return 'FT_UINT{}, BASE_{}, NULL, {}'.format(m, base, mask) + elif t == 'Table': + return 'FT_UINT{}, BASE_DEC, VALS (valstr_{}), {}'.format(m, ref, mask) + elif t == 'String': + var = content['variation'] + if var == 'StringAscii': + return 'FT_STRING, BASE_NONE, NULL, {}'.format(mask) + elif var == 'StringICAO': + return 'FT_STRING, BASE_NONE, NULL, {}'.format(mask) + elif var == 'StringOctal': + return 'FT_UINT{}, BASE_OCT, NULL, {}'.format(m, mask) + else: + raise Exception('unexpected string variation: {}'.format(var)) + elif t == 'Integer': + signed = content['signed'] + if signed: + return 'FT_INT{}, BASE_DEC, NULL, {}'.format(m, mask) + else: + return 'FT_UINT{}, BASE_DEC, NULL, {}'.format(m, mask) + elif t == 'Quantity': + return 'FT_DOUBLE, BASE_NONE, NULL, 0x00' + elif t == 'Bds': + return 'FT_UINT{}, BASE_DEC, NULL, {}'.format(m, mask) + else: + raise Exception('unexpected content type: {}'.format(t)) + +def reference(cat, edition, path): + """Create reference string.""" + name = '_'.join(path) + if edition is None: + return('{:03d}_{}'.format(cat, name)) + return('{:03d}_V{}_{}_{}'.format(cat, edition['major'], edition['minor'], name)) + +def get_content(rule): + t = rule['type'] + # Most cases are 'ContextFree', use as specified. + if t == 'ContextFree': + return rule['content'] + # Handle 'Dependent' contents as 'Raw'. + elif t == 'Dependent': + return {'type': "Raw"} + else: + raise Exception('unexpected type: {}'.format(t)) + +def get_bit_size(item): + """Return bit size of a (spare) item.""" + if item['spare']: + return item['length'] + else: + return item['variation']['size'] + +def get_description(item, content=None): + """Return item description.""" + name = item['name'] if not is_generated(item) else None + title = item.get('title') + if content is not None and content.get('unit'): + unit = '[{}]'.format(safe_string(content['unit'])) + else: + unit = None + + parts = filter(lambda x: bool(x), [name, title, unit]) + if not parts: + return '' + return reduce(lambda a,b: a + ', ' + b, parts) + +def generate_group(item, variation=None): + """Generate group-item from element-item.""" + level2 = copy(item) + level2['name'] = 'VALUE' + level2['is_generated'] = True + if variation is None: + level1 = copy(item) + level1['variation'] = { + 'type': 'Group', + 'items': [level2], + } + else: + level2['variation'] = variation['variation'] + level1 = { + 'type': "Group", + 'items': [level2], + } + return level1 + +def is_generated(item): + return item.get('is_generated') is not None + +def ungroup(item): + """Convert group of items of known size to element""" + n = sum([get_bit_size(i) for i in item['variation']['items']]) + result = copy(item) + result['variation'] = { + 'rule': { + 'content': {'type': 'Raw'}, + 'type': 'ContextFree', + }, + 'size': n, + 'type': 'Element', + } + return result + +def part1(ctx, get_ref, catalogue): + """Generate components in order + - static int hf_... + - FiledPart + - FieldPart[] + - AsterixField + """ + + tell = lambda s: ctx.tell('insert1', s) + tell_pr = lambda s: ctx.tell('insert2', s) + + ctx.reset_offset() + + def handle_item(path, item): + """Handle 'spare' or regular 'item'. + This function is used recursively, depending on the item structure. + """ + + def handle_variation(path, variation): + """Handle 'Element, Group...' variations. + This function is used recursively, depending on the item structure.""" + + t = variation['type'] + + ref = get_ref(path) + + def part_of(item): + if item['spare']: + return '&IXXX_{}bit_spare'.format(item['length']) + return '&I{}_{}'.format(ref, item['name']) + + if t == 'Element': + tell('static int hf_{} = -1;'.format(ref)) + n = variation['size'] + content = get_content(variation['rule']) + scaling = get_scaling(content) + scaling = scaling if scaling is not None else 1.0 + fp = get_fieldpart(content) + + if content['type'] == 'Table': + tell('static const value_string valstr_{}[] = {}'.format(ref, '{')) + for (a,b) in content['values']: + tell(' {} {}, "{}" {},'.format('{', a, safe_string(b), '}')) + tell(' {} 0, NULL {}'.format('{', '}')) + tell('};') + + tell('static const FieldPart I{} = {} {}, {}, {}, &hf_{}, NULL {};'.format(ref, '{', n, scaling, fp, ref, '}')) + description = get_description(item, content) + + ft = get_ft(ref, n, content, ctx.offset) + tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", {}, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, ft, '}', '}')) + + ctx.offset += n + + elif t == 'Group': + ctx.reset_offset() + + description = get_description(item) + tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, '}', '}')) + + tell('static int hf_{} = -1;'.format(ref)) + for i in variation['items']: + handle_item(path, i) + + # FieldPart[] + tell('static const FieldPart *I{}_PARTS[] = {}'.format(ref,'{')) + for i in variation['items']: + tell(' {},'.format(part_of(i))) + tell(' NULL') + tell('};') + + # AsterixField + bit_size = sum([get_bit_size(i) for i in variation['items']]) + byte_size = bit_size // 8 + parts = 'I{}_PARTS'.format(ref) + comp = '{ NULL }' + if not ctx.inside_repetitive: + tell('static const AsterixField I{} = {} FIXED, {}, 0, 0, &hf_{}, {}, {} {};'.format + (ref, '{', byte_size, ref, parts, comp, '}')) + + elif t == 'Extended': + ctx.reset_offset() + + description = get_description(item) + tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, '}', '}')) + tell('static int hf_{} = -1;'.format(ref)) + + items = [] + for i in variation['items']: + if i is None: + items.append(i) + continue + if i.get('variation') is not None: + if i['variation']['type'] == 'Group': + i = ungroup(i) + items.append(i) + + for i in items: + if i is None: + ctx.offset += 1 + else: + handle_item(path, i) + + tell('static const FieldPart *I{}_PARTS[] = {}'.format(ref,'{')) + for i in items: + if i is None: + tell(' &IXXX_FX,') + else: + tell(' {},'.format(part_of(i))) + + tell(' NULL') + tell('};') + + # AsterixField + first_part = list(takewhile(lambda x: x is not None, items)) + n = (sum([get_bit_size(i) for i in first_part]) + 1) // 8 + parts = 'I{}_PARTS'.format(ref) + comp = '{ NULL }' + tell('static const AsterixField I{} = {} FX, {}, 0, {}, &hf_{}, {}, {} {};'.format + (ref, '{', n, 0, ref, parts, comp, '}')) + + elif t == 'Repetitive': + ctx.reset_offset() + ctx.inside_repetitive = True + + # Group is required below this item. + if variation['variation']['type'] == 'Element': + subvar = generate_group(item, variation) + else: + subvar = variation['variation'] + handle_variation(path, subvar) + + # AsterixField + bit_size = sum([get_bit_size(i) for i in subvar['items']]) + byte_size = bit_size // 8 + rep = variation['rep']['size'] // 8 + parts = 'I{}_PARTS'.format(ref) + comp = '{ NULL }' + tell('static const AsterixField I{} = {} REPETITIVE, {}, {}, 0, &hf_{}, {}, {} {};'.format + (ref, '{', byte_size, rep, ref, parts, comp, '}')) + ctx.inside_repetitive = False + + elif t == 'Explicit': + ctx.reset_offset() + tell('static int hf_{} = -1;'.format(ref)) + description = get_description(item) + tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, '}', '}')) + tell('static const AsterixField I{} = {} EXP, 0, 0, 1, &hf_{}, NULL, {} NULL {} {};'.format(ref, '{', ref, '{', '}', '}')) + + elif t == 'Compound': + ctx.reset_offset() + tell('static int hf_{} = -1;'.format(ref)) + description = get_description(item) + tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, '}', '}')) + comp = '{' + for i in variation['items']: + if i is None: + comp += ' &IX_SPARE,' + continue + # Group is required below this item. + if i['variation']['type'] == 'Element': + subitem = generate_group(i) + else: + subitem = i + comp += ' &I{}_{},'.format(ref, subitem['name']) + handle_item(path, subitem) + comp += ' NULL }' + + # AsterixField + tell('static const AsterixField I{} = {} COMPOUND, 0, 0, 0, &hf_{}, NULL, {} {};'.format + (ref, '{', ref, comp, '}')) + + else: + raise Exception('unexpected variation type: {}'.format(t)) + + if item['spare']: + ctx.offset += item['length'] + return + + # Group is required on the first level. + if path == [] and item['variation']['type'] == 'Element': + variation = generate_group(item)['variation'] + else: + variation = item['variation'] + handle_variation(path + [item['name']], variation) + + for item in catalogue: + # adjust 'repetitive fx' item + if item['variation']['type'] == 'Repetitive' and item['variation']['rep']['type'] == 'Fx': + var = item['variation']['variation'].copy() + if var['type'] != 'Element': + raise Exception("Expecting 'Element'") + item = item.copy() + item['variation'] = { + 'type': 'Extended', + 'items': [{ + 'definition': None, + 'description': None, + 'name': 'Subitem', + 'remark': None, + 'spare': False, + 'title': 'Subitem', + 'variation': var, + }, None] + } + handle_item([], item) + tell('') + +def part2(ctx, ref, uap): + """Generate UAPs""" + + tell = lambda s: ctx.tell('insert1', s) + tell('DIAG_OFF_PEDANTIC') + + ut = uap['type'] + if ut == 'uap': + variations = [{'name': 'uap', 'items': uap['items']}] + elif ut == 'uaps': + variations = uap['variations'] + else: + raise Exception('unexpected uap type {}'.format(ut)) + + for var in variations: + tell('static const AsterixField *I{}_{}[] = {}'.format(ref, var['name'], '{')) + for i in var['items']: + if i is None: + tell(' &IX_SPARE,') + else: + tell(' &I{}_{},'.format(ref, i)) + tell(' NULL') + tell('};') + + tell('static const AsterixField **I{}[] = {}'.format(ref, '{')) + for var in variations: + tell(' I{}_{},'.format(ref, var['name'])) + tell(' NULL') + tell('};') + tell('DIAG_ON_PEDANTIC') + tell('') + +def part3(ctx, specs): + """Generate + - static const AsterixField ***... + - static const enum_val_t ..._versions[]... + """ + tell = lambda s: ctx.tell('insert1', s) + def fmt_edition(cat, edition): + return 'I{:03d}_V{}_{}'.format(cat, edition['major'], edition['minor']) + + cats = set([spec['number'] for spec in specs]) + for cat in sorted(cats): + lst = [spec for spec in specs if spec['number'] == cat] + editions = sorted([val['edition'] for val in lst], key = lambda x: (x['major'], x['minor']), reverse=True) + editions_fmt = [fmt_edition(cat, edition) for edition in editions] + editions_str = ', '.join(['I{:03d}'.format(cat)] + editions_fmt) + tell('DIAG_OFF_PEDANTIC') + tell('static const AsterixField ***I{:03d}all[] = {} {} {};'.format(cat, '{', editions_str, '}')) + tell('DIAG_ON_PEDANTIC') + tell('') + + tell('static const enum_val_t I{:03d}_versions[] = {}'.format(cat, '{')) + edition = editions[0] + a = edition['major'] + b = edition['minor'] + tell(' {} "I{:03d}", "Version {}.{} (latest)", 0 {},'.format('{', cat, a, b, '}')) + for ix, edition in enumerate(editions, start=1): + a = edition['major'] + b = edition['minor'] + tell(' {} "I{:03d}_v{}_{}", "Version {}.{}", {} {},'.format('{', cat, a, b, a, b, ix, '}')) + tell(' { NULL, NULL, 0 }') + tell('};') + tell('') + +def part4(ctx, cats): + """Generate + - static const AsterixField ****categories[]... + - prefs_register_enum_preference ... + """ + tell = lambda s: ctx.tell('insert1', s) + tell_pr = lambda s: ctx.tell('insert3', s) + + tell('static const AsterixField ****categories[] = {') + for i in range(0, 256): + val = 'I{:03d}all'.format(i) if i in cats else 'NULL' + tell(' {}, /* {:03d} */'.format(val, i)) + tell(' NULL') + tell('};') + + for cat in sorted(cats): + tell_pr(' prefs_register_enum_preference (asterix_prefs_module, "i{:03d}_version", "I{:03d} version", "Select the CAT{:03d} version", &global_categories_version[{}], I{:03d}_versions, false);'.format(cat, cat, cat, cat, cat)) + +class Output(object): + """Output context manager. Write either to stdout or to a dissector + file directly, depending on 'update' argument""" + def __init__(self, update): + self.update = update + self.f = None + + def __enter__(self): + if self.update: + self.f = open(dissector_file, 'w') + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + if self.f is not None: + self.f.close() + + def dump(self, line): + if self.f is None: + print(line) + else: + self.f.write(line+'\n') + +def remove_rfs(spec): + """Remove RFS item. It's present in specs, but not used.""" + catalogue = [] # create new catalogue without RFS + rfs_items = [] + for i in spec['catalogue']: + if i['variation']['type'] == 'Rfs': + rfs_items.append(i['name']) + else: + catalogue.append(i) + if not rfs_items: + return spec + spec2 = copy(spec) + spec2['catalogue'] = catalogue + # remove RFS from UAP(s) + uap = deepcopy(spec['uap']) + ut = uap['type'] + if ut == 'uap': + items = [None if i in rfs_items else i for i in uap['items']] + if items[-1] is None: items = items[:-1] + uap['items'] = items + elif ut == 'uaps': + variations = [] + for var in uap['variations']: + items = [None if i in rfs_items else i for i in var['items']] + if items[-1] is None: items = items[:-1] + var['items'] = items + variations.append(var) + uap['variations'] = variations + else: + raise Exception('unexpected uap type {}'.format(ut)) + spec2['uap'] = uap + return spec2 + +def is_valid(spec): + """Check spec""" + def check_item(item): + if item['spare']: + return True + return check_variation(item['variation']) + def check_variation(variation): + t = variation['type'] + if t == 'Element': + return True + elif t == 'Group': + return all([check_item(i) for i in variation['items']]) + elif t == 'Extended': + trailing_fx = variation['items'][-1] == None + if not trailing_fx: + return False + return all([check_item(i) for i in variation['items'] if i is not None]) + elif t == 'Repetitive': + return check_variation(variation['variation']) + elif t == 'Explicit': + return True + elif t == 'Compound': + items = [i for i in variation['items'] if i is not None] + return all([check_item(i) for i in items]) + else: + raise Exception('unexpected variation type {}'.format(t)) + return all([check_item(i) for i in spec['catalogue']]) + +def main(): + parser = argparse.ArgumentParser(description='Process asterix specs files.') + parser.add_argument('paths', metavar='PATH', nargs='*', + help='json spec file(s), use upstream repository in no input is given') + parser.add_argument('--reference', action='store_true', + help='print upstream reference and exit') + parser.add_argument("--update", action="store_true", + help="Update %s as needed instead of writing to stdout" % dissector_file) + args = parser.parse_args() + + if args.reference: + gitrev_short = download_url('/gitrev.txt').decode().strip()[0:10] + print(gitrev_short) + sys.exit(0) + + # read and json-decode input files + jsons = load_jsons(args.paths) + jsons = [json.loads(i) for i in jsons] + jsons = sorted(jsons, key = lambda x: (x['number'], x['edition']['major'], x['edition']['minor'])) + jsons = [spec for spec in jsons if spec['type'] == 'Basic'] + jsons = [remove_rfs(spec) for spec in jsons] + jsons = [spec for spec in jsons if is_valid(spec)] + + cats = list(set([x['number'] for x in jsons])) + latest_editions = {cat: sorted( + filter(lambda x: x['number'] == cat, jsons), + key = lambda x: (x['edition']['major'], x['edition']['minor']), reverse=True)[0]['edition'] + for cat in cats} + + # regular expression for template rendering + ins = re.compile(r'---\{([A-Za-z0-9_]*)\}---') + + gitrev = load_gitrev(args.paths) + with Context() as ctx: + for i in gitrev: + ctx.tell('gitrev', i) + + # generate parts into the context buffer + for spec in jsons: + is_latest = spec['edition'] == latest_editions[spec['number']] + + ctx.tell('insert1', '/* Category {:03d}, edition {}.{} */'.format(spec['number'], spec['edition']['major'], spec['edition']['minor'])) + + # handle part1 + get_ref = lambda path: reference(spec['number'], spec['edition'], path) + part1(ctx, get_ref, spec['catalogue']) + if is_latest: + ctx.tell('insert1', '/* Category {:03d}, edition {}.{} (latest) */'.format(spec['number'], spec['edition']['major'], spec['edition']['minor'])) + get_ref = lambda path: reference(spec['number'], None, path) + part1(ctx, get_ref, spec['catalogue']) + + # handle part2 + cat = spec['number'] + edition = spec['edition'] + ref = '{:03d}_V{}_{}'.format(cat, edition['major'], edition['minor']) + part2(ctx, ref, spec['uap']) + if is_latest: + ref = '{:03d}'.format(cat) + part2(ctx, ref, spec['uap']) + + part3(ctx, jsons) + part4(ctx, set([spec['number'] for spec in jsons])) + + # use context buffer to render template + script_path = os.path.dirname(os.path.realpath(__file__)) + with open(os.path.join(script_path, 'packet-asterix-template.c')) as f: + template_lines = f.readlines() + + # All input is collected and rendered. + # It's safe to update the disector. + + # copy each line of the template to required output, + # if the 'insertion' is found in the template, + # replace it with the buffer content + with Output(args.update) as out: + for line in template_lines: + line = line.rstrip() + + insertion = ins.match(line) + if insertion is None: + out.dump(line) + else: + segment = insertion.group(1) + [out.dump(i) for i in ctx.buffer[segment]] + +if __name__ == '__main__': + main() + diff --git a/tools/bsd-setup.sh b/tools/bsd-setup.sh new file mode 100755 index 0000000..6b018c6 --- /dev/null +++ b/tools/bsd-setup.sh @@ -0,0 +1,202 @@ +#!/usr/bin/env sh +# Setup development environment on BSD-like platforms. +# +# Tested on: FreeBSD, OpenBSD, NetBSD. +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# We drag in tools that might not be needed by all users; it's easier +# that way. +# +# We do not use Bash as the shell for this script, and use the POSIX +# syntax for function definition rather than the +# "function () { ... }" syntax, as FreeBSD 13, at least, does +# not have Bash, and its /bin/sh doesn't support the other syntax. +# + +print_usage() { + printf "\\nUtility to setup a bsd-based system for Wireshark Development.\\n" + printf "The basic usage installs the needed software\\n\\n" + printf "Usage: $0 [--install-optional] [...other options...]\\n" + printf "\\t--install-optional: install optional software as well\\n" + printf "\\t[other]: other options are passed as-is to pkg manager.\\n" +} + +ADDITIONAL=0 +OPTIONS= +for arg; do + case $arg in + --help) + print_usage + exit 0 + ;; + --install-optional) + ADDITIONAL=1 + ;; + *) + OPTIONS="$OPTIONS $arg" + ;; + esac +done + +# Check if the user is root +if [ $(id -u) -ne 0 ] +then + echo "You must be root." + exit 1 +fi + +BASIC_LIST="\ + cmake \ + qt6 \ + git \ + pcre2 \ + speexdsp" + +ADDITIONAL_LIST="\ + gettext-tools \ + snappy \ + bcg729 \ + libssh \ + libmaxminddb \ + libsmi \ + brotli \ + zstd \ + lua52 \ + " + +# Uncomment to add PNG compression utilities used by compress-pngs: +# ADDITIONAL_LIST="$ADDITIONAL_LIST \ +# advancecomp \ +# optipng \ +# pngcrush" + +# Guess which package manager we will use +PM=`which pkgin 2> /dev/null || which pkg 2> /dev/null || which pkg_add 2> /dev/null` + +case $PM in + */pkgin) + PM_OPTIONS="install" + PM_SEARCH="pkgin search" + PM_MUST_GLOB=no + ;; + */pkg) + PM_OPTIONS="install" + PM_SEARCH="pkg search" + PM_MUST_GLOB=yes + ;; + */pkg_add) + PM_OPTIONS="" + PM_SEARCH="pkg_info" + PM_MUST_GLOB=no + ;; +esac + + +echo "Using $PM ($PM_SEARCH)" + +# Adds package $2 to list variable $1 if the package is found +add_package() { + local list="$1" pkgname="$2" + + # fail if the package is not known + if [ "$PM_MUST_GLOB" = yes ] + then + # + # We need to do a glob search, with a "*" at the + # end, so we only find packages that *begin* with + # the name; otherwise, searching for pkg-config + # could find packages that *don't* begin with + # pkg-config, but have it later in the name + # (FreeBSD 11 has one such package), so when + # we then try to install it, that fails. Doing + # an *exact* search fails, as that requires that + # the package name include the version number. + # + $PM_SEARCH -g "$pkgname*" > /dev/null 2>&1 || return 1 + else + $PM_SEARCH "$pkgname" > /dev/null 2>&1 || return 1 + fi + + # package is found, append it to list + eval "${list}=\"\${${list}} \${pkgname}\"" +} + +# pkg-config: NetBSD +# pkgconf: FreeBSD +add_package BASIC_LIST pkg-config || +add_package BASIC_LIST pkgconf || +echo "pkg-config is unavailable" + +# c-ares: FreeBSD +# libcares: OpenBSD +add_package BASIC_LIST c-ares || +add_package BASIC_LIST libcares || +echo "c-ares is unavailable" + +# rubygem-asciidoctor: FreeBSD +add_package ADDITIONAL_LIST rubygem-asciidoctor || +echo "asciidoctor is unavailable" + +# liblz4: FreeBSD +# lz4: NetBSD +add_package ADDITIONAL_LIST liblz4 || +add_package ADDITIONAL_LIST lz4 || +echo "lz4 is unavailable" + +# libnghttp2: FreeBSD +# nghttp2: NetBSD +add_package ADDITIONAL_LIST libnghttp2 || +add_package ADDITIONAL_LIST nghttp2 || +echo "nghttp2 is unavailable" + +# libnghttp3: FreeBSD +# nghttp3: NetBSD +add_package ADDITIONAL_LIST libnghttp3 || +add_package ADDITIONAL_LIST nghttp3 || +echo "nghttp3 is unavailable" + +# spandsp: NetBSD +add_package ADDITIONAL_LIST spandsp || +echo "spandsp is unavailable" + +# ninja: FreeBSD, OpenBSD +# ninja-build: NetBSD +add_package ADDITIONAL_LIST ninja-build || +add_package ADDITIONAL_LIST ninja || +echo "ninja is unavailable" + +# libilbc: FreeBSD +add_package ADDITIONAL_LIST libilbc || +echo "libilbc is unavailable" + +# Add OS-specific required/optional packages +# Those not listed don't require additions. +case `uname` in + FreeBSD | NetBSD) + add_package ADDITIONAL_LIST libgcrypt || echo "libgcrypt is unavailable" + ;; +esac + +ACTUAL_LIST=$BASIC_LIST + +# Now arrange for optional support libraries +if [ $ADDITIONAL -ne 0 ] +then + ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST" +fi + +$PM $PM_OPTIONS $ACTUAL_LIST $OPTIONS +if [ ! $? ] +then + exit 2 +fi + +if [ $ADDITIONAL -eq 0 ] +then + echo -e "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n" +fi diff --git a/tools/checkAPIs.pl b/tools/checkAPIs.pl new file mode 100755 index 0000000..c9570b5 --- /dev/null +++ b/tools/checkAPIs.pl @@ -0,0 +1,1303 @@ +#!/usr/bin/env perl + +# +# Copyright 2006, Jeff Morriss +# +# A simple tool to check source code for function calls that should not +# be called by Wireshark code and to perform certain other checks. +# +# Usage: +# checkAPIs.pl [-M] [-g group1] [-g group2] ... +# [-s summary-group1] [-s summary-group2] ... +# [--nocheck-hf] +# [--nocheck-value-string-array] +# [--nocheck-shadow] +# [--debug] +# file1 file2 ... +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +use strict; +use Encode; +use English; +use Getopt::Long; +use Text::Balanced qw(extract_bracketed); + +my %APIs = ( + # API groups. + # Group name, e.g. 'prohibited' + # '' => { + # 'count_errors' => 1, # 1 if these are errors, 0 if warnings + # 'functions' => [ 'f1', 'f2', ...], # Function array + # 'function-counts' => {'f1',0, 'f2',0, ...}, # Function Counts hash (initialized in the code) + # } + # + # APIs that MUST NOT be used in Wireshark + 'prohibited' => { 'count_errors' => 1, 'functions' => [ + # Memory-unsafe APIs + # Use something that won't overwrite the end of your buffer instead + # of these. + # + # Microsoft provides lists of unsafe functions and their + # recommended replacements in "Security Development Lifecycle + # (SDL) Banned Function Calls" + # https://docs.microsoft.com/en-us/previous-versions/bb288454(v=msdn.10) + # and "Deprecated CRT Functions" + # https://docs.microsoft.com/en-us/previous-versions/ms235384(v=vs.100) + # + 'atoi', # use wsutil/strtoi.h functions + 'gets', + 'sprintf', + 'g_sprintf', + 'vsprintf', + 'g_vsprintf', + 'strcpy', + 'strncpy', + 'strcat', + 'strncat', + 'cftime', + 'ascftime', + ### non-portable APIs + # use glib (g_*) versions instead of these: + 'ntohl', + 'ntohs', + 'htonl', + 'htons', + 'strdup', + 'strndup', + # Windows doesn't have this; use g_ascii_strtoull() instead + 'strtoull', + ### non-portable: fails on Windows Wireshark built with VC newer than VC6 + # See https://gitlab.com/wireshark/wireshark/-/issues/6695#note_400659130 + 'g_fprintf', + 'g_vfprintf', + # use native snprintf() and vsnprintf() instead of these: + 'g_snprintf', + 'g_vsnprintf', + ### non-ANSI C + # use memset, memcpy, memcmp instead of these: + 'bzero', + 'bcopy', + 'bcmp', + # The MSDN page for ZeroMemory recommends SecureZeroMemory + # instead. + 'ZeroMemory', + # use wmem_*, ep_*, or g_* functions instead of these: + # (One thing to be aware of is that space allocated with malloc() + # may not be freeable--at least on Windows--with g_free() and + # vice-versa.) + 'malloc', + 'calloc', + 'realloc', + 'valloc', + 'free', + 'cfree', + # Locale-unsafe APIs + # These may have unexpected behaviors in some locales (e.g., + # "I" isn't always the upper-case form of "i", and "i" isn't + # always the lower-case form of "I"). Use the g_ascii_* version + # instead. + 'isalnum', + 'isascii', + 'isalpha', + 'iscntrl', + 'isdigit', + 'islower', + 'isgraph', + 'isprint', + 'ispunct', + 'isspace', + 'isupper', + 'isxdigit', + 'tolower', + 'atof', + 'strtod', + 'strcasecmp', + 'strncasecmp', + # Deprecated in glib 2.68 in favor of g_memdup2 + # We have our local implementation for older versions + 'g_memdup', + 'g_strcasecmp', + 'g_strncasecmp', + 'g_strup', + 'g_strdown', + 'g_string_up', + 'g_string_down', + 'strerror', # use g_strerror + # Use the ws_* version of these: + # (Necessary because on Windows we use UTF8 for throughout the code + # so we must tweak that to UTF16 before operating on the file. Code + # using these functions will work unless the file/path name contains + # non-ASCII chars.) + 'open', + 'rename', + 'mkdir', + 'stat', + 'unlink', + 'remove', + 'fopen', + 'freopen', + 'fstat', + 'lseek', + # Misc + 'tmpnam', # use mkstemp + '_snwprintf' # use StringCchPrintf + ] }, + + ### Soft-Deprecated functions that should not be used in new code but + # have not been entirely removed from old code. These will become errors + # once they've been removed from all existing code. + 'soft-deprecated' => { 'count_errors' => 0, 'functions' => [ + 'tvb_length_remaining', # replaced with tvb_captured_length_remaining + + # Locale-unsafe APIs + # These may have unexpected behaviors in some locales (e.g., + # "I" isn't always the upper-case form of "i", and "i" isn't + # always the lower-case form of "I"). Use the g_ascii_* version + # instead. + 'toupper' + ] }, + + # APIs that SHOULD NOT be used in Wireshark (any more) + 'deprecated' => { 'count_errors' => 1, 'functions' => [ + 'perror', # Use g_strerror() and report messages in whatever + # fashion is appropriate for the code in question. + 'ctime', # Use abs_time_secs_to_str() + 'next_tvb_add_port', # Use next_tvb_add_uint() (and a matching change + # of NTVB_PORT -> NTVB_UINT) + + ### Deprecated GLib/GObject functions/macros + # (The list is based upon the GLib 2.30.2 & GObject 2.30.2 documentation; + # An entry may be commented out if it is currently + # being used in Wireshark and if the replacement functionality + # is not available in all the GLib versions that Wireshark + # currently supports. + # Note: Wireshark currently (Jan 2012) requires GLib 2.14 or newer. + # The Wireshark build currently (Jan 2012) defines G_DISABLE_DEPRECATED + # so use of any of the following should cause the Wireshark build to fail and + # therefore the tests for obsolete GLib function usage in checkAPIs should not be needed. + 'G_ALLOC_AND_FREE', + 'G_ALLOC_ONLY', + 'g_allocator_free', # "use slice allocator" (avail since 2.10,2.14) + 'g_allocator_new', # "use slice allocator" (avail since 2.10,2.14) + 'g_async_queue_ref_unlocked', # g_async_queue_ref() (OK since 2.8) + 'g_async_queue_unref_and_unlock', # g_async_queue_unref() (OK since 2.8) + 'g_atomic_int_exchange_and_add', # since 2.30 + 'g_basename', + 'g_blow_chunks', # "use slice allocator" (avail since 2.10,2.14) + 'g_cache_value_foreach', # g_cache_key_foreach() + 'g_chunk_free', # g_slice_free (avail since 2.10) + 'g_chunk_new', # g_slice_new (avail since 2.10) + 'g_chunk_new0', # g_slice_new0 (avail since 2.10) + 'g_completion_add_items', # since 2.26 + 'g_completion_clear_items', # since 2.26 + 'g_completion_complete', # since 2.26 + 'g_completion_complete_utf8', # since 2.26 + 'g_completion_free', # since 2.26 + 'g_completion_new', # since 2.26 + 'g_completion_remove_items', # since 2.26 + 'g_completion_set_compare', # since 2.26 + 'G_CONST_RETURN', # since 2.26 + 'g_date_set_time', # g_date_set_time_t (avail since 2.10) + 'g_dirname', + 'g_format_size_for_display', # since 2.30: use g_format_size() + 'G_GNUC_FUNCTION', + 'G_GNUC_PRETTY_FUNCTION', + 'g_hash_table_freeze', + 'g_hash_table_thaw', + 'G_HAVE_GINT64', + 'g_io_channel_close', + 'g_io_channel_read', + 'g_io_channel_seek', + 'g_io_channel_write', + 'g_list_pop_allocator', # "does nothing since 2.10" + 'g_list_push_allocator', # "does nothing since 2.10" + 'g_main_destroy', + 'g_main_is_running', + 'g_main_iteration', + 'g_main_new', + 'g_main_pending', + 'g_main_quit', + 'g_main_run', + 'g_main_set_poll_func', + 'g_mapped_file_free', # [as of 2.22: use g_map_file_unref] + 'g_mem_chunk_alloc', # "use slice allocator" (avail since 2.10) + 'g_mem_chunk_alloc0', # "use slice allocator" (avail since 2.10) + 'g_mem_chunk_clean', # "use slice allocator" (avail since 2.10) + 'g_mem_chunk_create', # "use slice allocator" (avail since 2.10) + 'g_mem_chunk_destroy', # "use slice allocator" (avail since 2.10) + 'g_mem_chunk_free', # "use slice allocator" (avail since 2.10) + 'g_mem_chunk_info', # "use slice allocator" (avail since 2.10) + 'g_mem_chunk_new', # "use slice allocator" (avail since 2.10) + 'g_mem_chunk_print', # "use slice allocator" (avail since 2.10) + 'g_mem_chunk_reset', # "use slice allocator" (avail since 2.10) + 'g_node_pop_allocator', # "does nothing since 2.10" + 'g_node_push_allocator', # "does nothing since 2.10" + 'g_relation_count', # since 2.26 + 'g_relation_delete', # since 2.26 + 'g_relation_destroy', # since 2.26 + 'g_relation_exists', # since 2.26 + 'g_relation_index', # since 2.26 + 'g_relation_insert', # since 2.26 + 'g_relation_new', # since 2.26 + 'g_relation_print', # since 2.26 + 'g_relation_select', # since 2.26 + 'g_scanner_add_symbol', + 'g_scanner_remove_symbol', + 'g_scanner_foreach_symbol', + 'g_scanner_freeze_symbol_table', + 'g_scanner_thaw_symbol_table', + 'g_slist_pop_allocator', # "does nothing since 2.10" + 'g_slist_push_allocator', # "does nothing since 2.10" + 'g_source_get_current_time', # since 2.28: use g_source_get_time() + 'g_strcasecmp', # + 'g_strdown', # + 'g_string_down', # + 'g_string_sprintf', # use g_string_printf() instead + 'g_string_sprintfa', # use g_string_append_printf instead + 'g_string_up', # + 'g_strncasecmp', # + 'g_strup', # + 'g_tree_traverse', + 'g_tuples_destroy', # since 2.26 + 'g_tuples_index', # since 2.26 + 'g_unicode_canonical_decomposition', # since 2.30: use g_unichar_fully_decompose() + 'G_UNICODE_COMBINING_MARK', # since 2.30:use G_UNICODE_SPACING_MARK + 'g_value_set_boxed_take_ownership', # GObject + 'g_value_set_object_take_ownership', # GObject + 'g_value_set_param_take_ownership', # GObject + 'g_value_set_string_take_ownership', # Gobject + 'G_WIN32_DLLMAIN_FOR_DLL_NAME', + 'g_win32_get_package_installation_directory', + 'g_win32_get_package_installation_subdirectory', + 'qVariantFromValue' + ] }, + + 'dissectors-prohibited' => { 'count_errors' => 1, 'functions' => [ + # APIs that make the program exit. Dissectors shouldn't call these. + 'abort', + 'assert', + 'assert_perror', + 'exit', + 'g_assert', + 'g_error', + ] }, + + 'dissectors-restricted' => { 'count_errors' => 0, 'functions' => [ + # APIs that print to the terminal. Dissectors shouldn't call these. + # FIXME: Explain what to use instead. + 'printf', + 'g_warning', + ] }, + +); + +my @apiGroups = qw(prohibited deprecated soft-deprecated); + +# Defines array of pairs function/variable which are excluded +# from prefs_register_*_preference checks +my @excludePrefsCheck = ( + [ qw(prefs_register_password_preference), '(const char **)arg->pref_valptr' ], + [ qw(prefs_register_string_preference), '(const char **)arg->pref_valptr' ], +); + + +# Given a ref to a hash containing "functions" and "functions_count" entries: +# Determine if any item of the list of APIs contained in the array referenced by "functions" +# exists in the file. +# For each API which appears in the file: +# Push the API onto the provided list; +# Add the number of times the API appears in the file to the total count +# for the API (stored as the value of the API key in the hash referenced by "function_counts"). + +sub findAPIinFile($$$) +{ + my ($groupHashRef, $fileContentsRef, $foundAPIsRef) = @_; + + for my $api ( @{$groupHashRef->{functions}} ) + { + my $cnt = 0; + # Match function calls, but ignore false positives from: + # C++ method definition: int MyClass::open(...) + # Method invocation: myClass->open(...); + # Function declaration: int open(...); + # Method invocation: QString().sprintf(...) + while (${$fileContentsRef} =~ m/ \W (?|\w\ ) (? 0) { + push @{$foundAPIsRef}, $api; + $groupHashRef->{function_counts}->{$api} += 1; + } + } +} + +# APIs which (generally) should not be called with an argument of tvb_get_ptr() +my @TvbPtrAPIs = ( + # Use NULL for the value_ptr instead of tvb_get_ptr() (only if the + # given offset and length are equal) with these: + 'proto_tree_add_bytes_format', + 'proto_tree_add_bytes_format_value', + 'proto_tree_add_ether', + # Use the tvb_* version of these: + # Use tvb_bytes_to_str[_punct] instead of: + 'bytes_to_str', + 'bytes_to_str_punct', + 'SET_ADDRESS', + 'SET_ADDRESS_HF', +); + +sub checkAPIsCalledWithTvbGetPtr($$$) +{ + my ($APIs, $fileContentsRef, $foundAPIsRef) = @_; + + for my $api (@{$APIs}) { + my @items; + my $cnt = 0; + + @items = (${$fileContentsRef} =~ m/ ($api [^;]* ; ) /xsg); + while (@items) { + my ($item) = @items; + shift @items; + if ($item =~ / tvb_get_ptr /xos) { + $cnt += 1; + } + } + + if ($cnt > 0) { + push @{$foundAPIsRef}, $api; + } + } +} + +# List of possible shadow variable (Majority coming from macOS..) +my @ShadowVariable = ( + 'index', + 'time', + 'strlen', + 'system' +); + +sub check_shadow_variable($$$) +{ + my ($groupHashRef, $fileContentsRef, $foundAPIsRef) = @_; + + for my $api ( @{$groupHashRef} ) + { + my $cnt = 0; + while (${$fileContentsRef} =~ m/ \s $api \s*+ [^\(\w] /gx) + { + $cnt += 1; + } + if ($cnt > 0) { + push @{$foundAPIsRef}, $api; + } + } +} + +sub check_snprintf_plus_strlen($$) +{ + my ($fileContentsRef, $filename) = @_; + my @items; + + # This catches both snprintf() and g_snprint. + # If we need to do more APIs, we can make this function look more like + # checkAPIsCalledWithTvbGetPtr(). + @items = (${$fileContentsRef} =~ m/ (snprintf [^;]* ; ) /xsg); + while (@items) { + my ($item) = @items; + shift @items; + if ($item =~ / strlen\s*\( /xos) { + print STDERR "Warning: ".$filename." uses snprintf + strlen to assemble strings.\n"; + last; + } + } +} + +#### Regex for use when searching for value-string definitions +my $StaticRegex = qr/ static \s+ /xs; +my $ConstRegex = qr/ const \s+ /xs; +my $Static_andor_ConstRegex = qr/ (?: $StaticRegex $ConstRegex | $StaticRegex | $ConstRegex) /xs; +my $ValueStringVarnameRegex = qr/ (?:value|val64|string|range|bytes)_string /xs; +my $ValueStringRegex = qr/ $Static_andor_ConstRegex ($ValueStringVarnameRegex) \ + [^;*#]+ = [^;]+ [{] .+? [}] \s*? ; /xs; +my $EnumValRegex = qr/ $Static_andor_ConstRegex enum_val_t \ + [^;*]+ = [^;]+ [{] .+? [}] \s*? ; /xs; +my $NewlineStringRegex = qr/ ["] [^"]* \\n [^"]* ["] /xs; + +sub check_value_string_arrays($$$) +{ + my ($fileContentsRef, $filename, $debug_flag) = @_; + my $cnt = 0; + # Brute force check for value_string (and string_string or range_string) arrays + # which are missing {0, NULL} as the final (terminating) array entry + + # Assumption: definition is of form (pseudo-Regex): + # " (static const|static|const) (value|string|range)_string .+ = { .+ ;" + # (possibly over multiple lines) + while (${$fileContentsRef} =~ / ( $ValueStringRegex ) /xsog) { + # XXX_string array definition found; check if NULL terminated + my $vs = my $vsx = $1; + my $type = $2; + if ($debug_flag) { + $vsx =~ / ( .+ $ValueStringVarnameRegex [^=]+ ) = /xo; + printf STDERR "==> %-35.35s: %s\n", $filename, $1; + printf STDERR "%s\n", $vs; + } + $vs =~ s{ \s } {}xg; + + # Check for expected trailer + my $expectedTrailer; + my $trailerHint; + if ($type eq "string_string") { + # XXX shouldn't we reject 0 since it is gchar*? + $expectedTrailer = "(NULL|0), NULL"; + $trailerHint = "NULL, NULL"; + } elsif ($type eq "range_string") { + $expectedTrailer = "0(x0+)?, 0(x0+)?, NULL"; + $trailerHint = "0, 0, NULL"; + } elsif ($type eq "bytes_string") { + # XXX shouldn't we reject 0 since it is guint8*? + $expectedTrailer = "(NULL|0), 0, NULL"; + $trailerHint = "NULL, NULL"; + } else { + $expectedTrailer = "0(x?0+)?, NULL"; + $trailerHint = "0, NULL"; + } + if ($vs !~ / [{] $expectedTrailer [}] ,? [}] ; $/x) { + $vsx =~ /( $ValueStringVarnameRegex [^=]+ ) = /xo; + printf STDERR "Error: %-35.35s: {%s} is required as the last %s array entry: %s\n", $filename, $trailerHint, $type, $1; + $cnt++; + } + + if ($vs !~ / (static)? const $ValueStringVarnameRegex /xo) { + $vsx =~ /( $ValueStringVarnameRegex [^=]+ ) = /xo; + printf STDERR "Error: %-35.35s: Missing 'const': %s\n", $filename, $1; + $cnt++; + } + if ($vs =~ / $NewlineStringRegex /xo && $type ne "bytes_string") { + $vsx =~ /( $ValueStringVarnameRegex [^=]+ ) = /xo; + printf STDERR "Error: %-35.35s: XXX_string contains a newline: %s\n", $filename, $1; + $cnt++; + } + } + + # Brute force check for enum_val_t arrays which are missing {NULL, NULL, ...} + # as the final (terminating) array entry + # For now use the same option to turn this and value_string checking on and off. + # (Is the option even necessary?) + + # Assumption: definition is of form (pseudo-Regex): + # " (static const|static|const) enum_val_t .+ = { .+ ;" + # (possibly over multiple lines) + while (${$fileContentsRef} =~ / ( $EnumValRegex ) /xsog) { + # enum_val_t array definition found; check if NULL terminated + my $vs = my $vsx = $1; + if ($debug_flag) { + $vsx =~ / ( .+ enum_val_t [^=]+ ) = /xo; + printf STDERR "==> %-35.35s: %s\n", $filename, $1; + printf STDERR "%s\n", $vs; + } + $vs =~ s{ \s } {}xg; + # README.developer says + # "Don't put a comma after the last tuple of an initializer of an array" + # However: since this usage is present in some number of cases, we'll allow for now + if ($vs !~ / NULL, NULL, -?[0-9] [}] ,? [}] ; $/xo) { + $vsx =~ /( enum_val_t [^=]+ ) = /xo; + printf STDERR "Error: %-35.35s: {NULL, NULL, ...} is required as the last enum_val_t array entry: %s\n", $filename, $1; + $cnt++; + } + if ($vs !~ / (static)? const enum_val_t /xo) { + $vsx =~ /( enum_val_t [^=]+ ) = /xo; + printf STDERR "Error: %-35.35s: Missing 'const': %s\n", $filename, $1; + $cnt++; + } + if ($vs =~ / $NewlineStringRegex /xo) { + $vsx =~ /( (?:value|string|range)_string [^=]+ ) = /xo; + printf STDERR "Error: %-35.35s: enum_val_t contains a newline: %s\n", $filename, $1; + $cnt++; + } + } + + return $cnt; +} + + +sub check_included_files($$) +{ + my ($fileContentsRef, $filename) = @_; + my @incFiles; + + @incFiles = (${$fileContentsRef} =~ m/\#include \s* ([<"].+[>"])/gox); + + # files in the ui/qt directory should include the ui class includes + # by using #include <> + # this ensures that Visual Studio picks up these files from the + # build directory if we're compiling with cmake + if ($filename =~ m#ui/qt/# ) { + foreach (@incFiles) { + if ( m#"ui_.*\.h"$# ) { + # strip the quotes to get the base name + # for the error message + s/\"//g; + + print STDERR "$filename: ". + "Please use #include <$_> ". + "instead of #include \"$_\".\n"; + } + } + } +} + + +sub check_proto_tree_add_XXX($$) +{ + my ($fileContentsRef, $filename) = @_; + my @items; + my $errorCount = 0; + + @items = (${$fileContentsRef} =~ m/ (proto_tree_add_[_a-z0-9]+) \( ([^;]*) \) \s* ; /xsg); + + while (@items) { + my ($func) = @items; + shift @items; + my ($args) = @items; + shift @items; + + #Check to make sure tvb_get* isn't used to pass into a proto_tree_add_, when + #proto_tree_add_item could just be used instead + if ($args =~ /,\s*tvb_get_/xos) { + if (($func =~ m/^proto_tree_add_(time|bytes|ipxnet|ipv4|ipv6|ether|guid|oid|string|boolean|float|double|uint|uint64|int|int64|eui64|bitmask_list_value)$/) + ) { + print STDERR "Error: ".$filename." uses $func with tvb_get_*. Use proto_tree_add_item instead\n"; + $errorCount++; + + # Print out the function args to make it easier + # to find the offending code. But first make + # it readable by eliminating extra white space. + $args =~ s/\s+/ /g; + print STDERR "\tArgs: " . $args . "\n"; + } + } + + # Remove anything inside parenthesis in the arguments so we + # don't get false positives when someone calls + # proto_tree_add_XXX(..., tvb_YYY(..., ENC_ZZZ)) + # and allow there to be newlines inside + $args =~ s/\(.*\)//sg; + + #Check for accidental usage of ENC_ parameter + if ($args =~ /,\s*ENC_/xos) { + if (!($func =~ /proto_tree_add_(time|item|bitmask|[a-z0-9]+_bits_format_value|bits_item|bits_ret_val|item_ret_int|item_ret_uint|bytes_item|checksum)/xos) + ) { + print STDERR "Error: ".$filename." uses $func with ENC_*.\n"; + $errorCount++; + + # Print out the function args to make it easier + # to find the offending code. But first make + # it readable by eliminating extra white space. + $args =~ s/\s+/ /g; + print STDERR "\tArgs: " . $args . "\n"; + } + } + } + + return $errorCount; +} + + +# Verify that all declared ett_ variables are registered. +# Don't bother trying to check usage (for now)... +sub check_ett_registration($$) +{ + my ($fileContentsRef, $filename) = @_; + my @ett_declarations; + my @ett_address_uses; + my %ett_uses; + my @unUsedEtts; + my $errorCount = 0; + + # A pattern to match ett variable names. Obviously this assumes that + # they start with `ett_` + my $EttVarName = qr{ (?: ett_[a-z0-9_]+ (?:\[[0-9]+\])? ) }xi; + + # Find all the ett_ variables declared in the file + @ett_declarations = (${$fileContentsRef} =~ m{ + ^ # assume declarations are on their own line + (?:static\s+)? # some declarations aren't static + g?int # could be int or gint + \s+ + ($EttVarName) # variable name + \s*=\s* + -1\s*; + }xgiom); + + if (!@ett_declarations) { + # Only complain if the file looks like a dissector + #print STDERR "Found no etts in ".$filename."\n" if + # (${$fileContentsRef} =~ m{proto_register_field_array}os); + return; + } + #print "Found these etts in ".$filename.": ".join(' ', @ett_declarations)."\n\n"; + + # Find all the uses of the *addresses* of ett variables in the file. + # (We assume if someone is using the address they're using it to + # register the ett.) + @ett_address_uses = (${$fileContentsRef} =~ m{ + &\s*($EttVarName) + }xgiom); + + if (!@ett_address_uses) { + print STDERR "Found no ett address uses in ".$filename."\n"; + # Don't treat this as an error. + # It's more likely a problem with checkAPIs. + return; + } + #print "Found these etts addresses used in ".$filename.": ".join(' ', @ett_address_uses)."\n\n"; + + # Convert to a hash for fast lookup + $ett_uses{$_}++ for (@ett_address_uses); + + # Find which declared etts are not used. + while (@ett_declarations) { + my ($ett_var) = @ett_declarations; + shift @ett_declarations; + + push(@unUsedEtts, $ett_var) if (not exists $ett_uses{$ett_var}); + } + + if (@unUsedEtts) { + print STDERR "Error: found these unused ett variables in ".$filename.": ".join(' ', @unUsedEtts)."\n"; + $errorCount++; + } + + return $errorCount; +} + +# Given the file contents and a file name, check all of the hf entries for +# various problems (such as those checked for in proto.c). +sub check_hf_entries($$) +{ + my ($fileContentsRef, $filename) = @_; + my $errorCount = 0; + + my @items; + my $hfRegex = qr{ + \{ + \s* + &\s*([A-Z0-9_\[\]-]+) # &hf + \s*,\s* + }xis; + @items = (${$fileContentsRef} =~ m{ + $hfRegex # &hf + \{\s* + ("[A-Z0-9 '\./\(\)_:-]+") # name + \s*,\s* + (NULL|"[A-Z0-9_\.-]*") # abbrev + \s*,\s* + (FT_[A-Z0-9_]+) # field type + \s*,\s* + ([A-Z0-9x\|_\s]+) # display + \s*,\s* + ([^,]+?) # convert + \s*,\s* + ([A-Z0-9_]+) # bitmask + \s*,\s* + (NULL|"[A-Z0-9 '\./\(\)\?_:-]+") # blurb (NULL or a string) + \s*,\s* + HFILL # HFILL + }xgios); + + #print "Found @items items\n"; + while (@items) { + ##my $errorCount_save = $errorCount; + my ($hf, $name, $abbrev, $ft, $display, $convert, $bitmask, $blurb) = @items; + shift @items; shift @items; shift @items; shift @items; shift @items; shift @items; shift @items; shift @items; + + $display =~ s/\s+//g; + $convert =~ s/\s+//g; + # GET_VALS_EXTP is a macro in packet-mq.h for packet-mq.c and packet-mq-pcf.c + $convert =~ s/\bGET_VALS_EXTP\(/VALS_EXT_PTR\(/; + + #print "name=$name, abbrev=$abbrev, ft=$ft, display=$display, convert=>$convert<, bitmask=$bitmask, blurb=$blurb\n"; + + if ($abbrev eq '""' || $abbrev eq "NULL") { + print STDERR "Error: $hf does not have an abbreviation in $filename\n"; + $errorCount++; + } + if ($abbrev =~ m/\.\.+/) { + print STDERR "Error: the abbreviation for $hf ($abbrev) contains two or more sequential periods in $filename\n"; + $errorCount++; + } + if ($name eq $abbrev) { + print STDERR "Error: the abbreviation for $hf ($abbrev) matches the field name ($name) in $filename\n"; + $errorCount++; + } + if (lc($name) eq lc($blurb)) { + print STDERR "Error: the blurb for $hf ($blurb) matches the field name ($name) in $filename\n"; + $errorCount++; + } + if ($name =~ m/"\s+/) { + print STDERR "Error: the name for $hf ($name) has leading space in $filename\n"; + $errorCount++; + } + if ($name =~ m/\s+"/) { + print STDERR "Error: the name for $hf ($name) has trailing space in $filename\n"; + $errorCount++; + } + if ($blurb =~ m/"\s+/) { + print STDERR "Error: the blurb for $hf ($blurb) has leading space in $filename\n"; + $errorCount++; + } + if ($blurb =~ m/\s+"/) { + print STDERR "Error: the blurb for $hf ($blurb) has trailing space in $filename\n"; + $errorCount++; + } + if ($abbrev =~ m/\s+/) { + print STDERR "Error: the abbreviation for $hf ($abbrev) has white space in $filename\n"; + $errorCount++; + } + if ("\"".$hf ."\"" eq $name) { + print STDERR "Error: name is the hf_variable_name in field $name ($abbrev) in $filename\n"; + $errorCount++; + } + if ("\"".$hf ."\"" eq $abbrev) { + print STDERR "Error: abbreviation is the hf_variable_name in field $name ($abbrev) in $filename\n"; + $errorCount++; + } + if ($ft ne "FT_BOOLEAN" && $convert =~ m/^TFS\(.*\)/) { + print STDERR "Error: $hf uses a true/false string but is an $ft instead of FT_BOOLEAN in $filename\n"; + $errorCount++; + } + if ($ft eq "FT_BOOLEAN" && $convert =~ m/^VALS\(.*\)/) { + print STDERR "Error: $hf uses a value_string but is an FT_BOOLEAN in $filename\n"; + $errorCount++; + } + if (($ft eq "FT_BOOLEAN") && ($bitmask !~ /^(0x)?0+$/) && ($display =~ /^BASE_/)) { + print STDERR "Error: $hf: FT_BOOLEAN with a bitmask must specify a 'parent field width' for 'display' in $filename\n"; + $errorCount++; + } + if (($ft eq "FT_BOOLEAN") && ($convert !~ m/^((0[xX]0?)?0$|NULL$|TFS)/)) { + print STDERR "Error: $hf: FT_BOOLEAN with non-null 'convert' field missing TFS in $filename\n"; + $errorCount++; + } + if ($convert =~ m/RVALS/ && $display !~ m/BASE_RANGE_STRING/) { + print STDERR "Error: $hf uses RVALS but 'display' does not include BASE_RANGE_STRING in $filename\n"; + $errorCount++; + } + if ($convert =~ m/VALS64/ && $display !~ m/BASE_VAL64_STRING/) { + print STDERR "Error: $hf uses VALS64 but 'display' does not include BASE_VAL64_STRING in $filename\n"; + $errorCount++; + } + if ($display =~ /BASE_EXT_STRING/ && $convert !~ /^(VALS_EXT_PTR\(|&)/) { + print STDERR "Error: $hf: BASE_EXT_STRING should use VALS_EXT_PTR for 'strings' instead of '$convert' in $filename\n"; + $errorCount++; + } + if ($ft =~ m/^FT_U?INT(8|16|24|32)$/ && $convert =~ m/^VALS64\(/) { + print STDERR "Error: $hf: 32-bit field must use VALS instead of VALS64 in $filename\n"; + $errorCount++; + } + if ($ft =~ m/^FT_U?INT(40|48|56|64)$/ && $convert =~ m/^VALS\(/) { + print STDERR "Error: $hf: 64-bit field must use VALS64 instead of VALS in $filename\n"; + $errorCount++; + } + if ($convert =~ m/^(VALS|VALS64|RVALS)\(&.*\)/) { + print STDERR "Error: $hf is passing the address of a pointer to $1 in $filename\n"; + $errorCount++; + } + if ($convert !~ m/^((0[xX]0?)?0$|NULL$|VALS|VALS64|VALS_EXT_PTR|RVALS|TFS|CF_FUNC|FRAMENUM_TYPE|&|STRINGS_ENTERPRISES)/ && $display !~ /BASE_CUSTOM/) { + print STDERR "Error: non-null $hf 'convert' field missing 'VALS|VALS64|RVALS|TFS|CF_FUNC|FRAMENUM_TYPE|&|STRINGS_ENTERPRISES' in $filename ?\n"; + $errorCount++; + } +## Benign... +## if (($ft eq "FT_BOOLEAN") && ($bitmask =~ /^(0x)?0+$/) && ($display ne "BASE_NONE")) { +## print STDERR "Error: $abbrev: FT_BOOLEAN with no bitmask must use BASE_NONE for 'display' in $filename\n"; +## $errorCount++; +## } + ##if ($errorCount != $errorCount_save) { + ## print STDERR "name=$name, abbrev=$abbrev, ft=$ft, display=$display, convert=>$convert<, bitmask=$bitmask, blurb=$blurb\n"; + ##} + + } + + return $errorCount; +} + +sub check_pref_var_dupes($$) +{ + my ($filecontentsref, $filename) = @_; + my $errorcount = 0; + + # Avoid flagging the actual prototypes + return 0 if $filename =~ /prefs\.[ch]$/; + + # remove macro lines + my $filecontents = ${$filecontentsref}; + $filecontents =~ s { ^\s*\#.*$} []xogm; + + # At what position is the variable in the prefs_register_*_preference() call? + my %prefs_register_var_pos = ( + static_text => undef, obsolete => undef, # ignore + decode_as_range => -2, range => -2, filename => -2, # second to last + enum => -3, # third to last + # everything else is the last argument + ); + + my @dupes; + my %count; + while ($filecontents =~ /prefs_register_(\w+?)_preference/gs) { + my ($func) = "prefs_register_$1_preference"; + my ($args) = extract_bracketed(substr($filecontents, $+[0]), '()'); + $args = substr($args, 1, -1); # strip parens + + my $pos = $prefs_register_var_pos{$1}; + next if exists $prefs_register_var_pos{$1} and not defined $pos; + $pos //= -1; + my $var = (split /\s*,\s*(?![^(]*\))/, $args)[$pos]; # only commas outside parens + + my $ignore = 0; + for my $row (@excludePrefsCheck) { + my ($rfunc, $rvar) = @$row; + if (($rfunc eq $func) && ($rvar eq $var)) { + $ignore = 1 + } + } + if (!$ignore) { + push @dupes, $var if $count{$var}++ == 1; + } + } + + if (@dupes) { + print STDERR "$filename: error: found these preference variables used in more than one prefs_register_*_preference:\n\t".join(', ', @dupes)."\n"; + $errorcount++; + } + + return $errorcount; +} + +# Check for forbidden control flow changes, see epan/exceptions.h +sub check_try_catch($$) +{ + my ($fileContentsRef, $filename) = @_; + my $errorCount = 0; + + # Match TRY { ... } ENDTRY (with an optional '\' in case of a macro). + my @items = (${$fileContentsRef} =~ m/ \bTRY\s*\{ (.+?) \}\s* \\? \s*ENDTRY\b /xsg); + for my $block (@items) { + if ($block =~ m/ \breturn\b /x) { + print STDERR "Error: return is forbidden in TRY/CATCH in $filename\n"; + $errorCount++; + } + + my @gotoLabels = $block =~ m/ \bgoto\s+ (\w+) /xsg; + my %seen = (); + for my $gotoLabel (@gotoLabels) { + if ($seen{$gotoLabel}) { + next; + } + $seen{$gotoLabel} = 1; + + if ($block !~ /^ \s* $gotoLabel \s* :/xsgm) { + print STDERR "Error: goto to label '$gotoLabel' outside TRY/CATCH is forbidden in $filename\n"; + $errorCount++; + } + } + } + + return $errorCount; +} + +sub print_usage +{ + print "Usage: checkAPIs.pl [-M] [-h] [-g group1[:count]] [-g group2] ... \n"; + print " [-summary-group group1] [-summary-group group2] ... \n"; + print " [--sourcedir=srcdir] \n"; + print " [--nocheck-hf]\n"; + print " [--nocheck-value-string-array] \n"; + print " [--nocheck-shadow]\n"; + print " [--debug]\n"; + print " [--file=/path/to/file_list]\n"; + print " file1 file2 ...\n"; + print "\n"; + print " -M: Generate output for -g in 'machine-readable' format\n"; + print " -p: used by the git pre-commit hook\n"; + print " -h: help, print usage message\n"; + print " -g : Check input files for use of APIs in \n"; + print " (in addition to the default groups)\n"; + print " Maximum uses can be specified with :\n"; + print " -summary-group : Output summary (count) for each API in \n"; + print " (-g also req'd)\n"; + print " --nocheck-hf: Skip header field definition checks\n"; + print " --nocheck-value-string-array: Skip value string array checks\n"; + print " --nocheck-shadow: Skip shadow variable checks\n"; + print " --debug: UNDOCUMENTED\n"; + print "\n"; + print " Default Groups[-g]: ", join (", ", sort @apiGroups), "\n"; + print " Available Groups: ", join (", ", sort keys %APIs), "\n"; +} + +# ------------- +# action: remove '#if 0'd code from the input string +# args codeRef, fileName +# returns: codeRef +# +# Essentially: split the input into blocks of code or lines of #if/#if 0/etc. +# Remove blocks that follow '#if 0' until '#else/#endif' is found. + +{ # block begin +my $debug = 0; + + sub remove_if0_code { + my ($codeRef, $fileName) = @_; + + # Preprocess output (ensure trailing LF and no leading WS before '#') + $$codeRef =~ s/^\s*#/#/m; + if ($$codeRef !~ /\n$/) { $$codeRef .= "\n"; } + + # Split into blocks of normal code or lines with conditionals. + my $ifRegExp = qr/if 0|if|else|endif/; + my @blocks = split(/^(#\s*(?:$ifRegExp).*\n)/m, $$codeRef); + + my ($if_lvl, $if0_lvl, $if0) = (0,0,0); + my $lines = ''; + for my $block (@blocks) { + my $if; + if ($block =~ /^#\s*($ifRegExp)/) { + # #if/#if 0/#else/#endif processing + $if = $1; + if ($debug == 99) { + print(STDERR "if0=$if0 if0_lvl=$if0_lvl lvl=$if_lvl [$if] - $block"); + } + if ($if eq 'if') { + $if_lvl += 1; + } elsif ($if eq 'if 0') { + $if_lvl += 1; + if ($if0_lvl == 0) { + $if0_lvl = $if_lvl; + $if0 = 1; # inside #if 0 + } + } elsif ($if eq 'else') { + if ($if0_lvl == $if_lvl) { + $if0 = 0; + } + } elsif ($if eq 'endif') { + if ($if0_lvl == $if_lvl) { + $if0 = 0; + $if0_lvl = 0; + } + $if_lvl -= 1; + if ($if_lvl < 0) { + die "patsub: #if/#endif mismatch in $fileName" + } + } + } + + if ($debug == 99) { + print(STDERR "if0=$if0 if0_lvl=$if0_lvl lvl=$if_lvl\n"); + } + # Keep preprocessor lines and blocks that are not enclosed in #if 0 + if ($if or $if0 != 1) { + $lines .= $block; + } + } + $$codeRef = $lines; + + ($debug == 2) && print "==> After Remove if0: code: [$fileName]\n$$codeRef\n===<\n"; + return $codeRef; + } +} # block end + +# The below Regexp are based on those from: +# https://web.archive.org/web/20080614012925/http://aspn.activestate.com/ASPN/Cookbook/Rx/Recipe/59811 +# They are in the public domain. + +# 2. A regex which matches double-quoted strings. +# ?s added so that strings containing a 'line continuation' +# ( \ followed by a new-line) will match. +my $DoubleQuotedStr = qr{ (?: ["] (?s: \\. | [^\"\\])* ["]) }x; + +# 3. A regex which matches single-quoted strings. +my $SingleQuotedStr = qr{ (?: \' (?: \\. | [^\'\\])* [']) }x; + +# +# MAIN +# +my $errorCount = 0; + +# The default list, which can be expanded. +my @apiSummaryGroups = (); +my $machine_readable_output = 0; # default: disabled +my $check_hf = 1; # default: enabled +my $check_value_string_array= 1; # default: enabled +my $check_shadow = 1; # default: enabled +my $debug_flag = 0; # default: disabled +my $source_dir = ""; +my $filenamelist = ""; +my $help_flag = 0; +my $pre_commit = 0; + +my $result = GetOptions( + 'group=s' => \@apiGroups, + 'summary-group=s' => \@apiSummaryGroups, + 'Machine-readable' => \$machine_readable_output, + 'check-hf!' => \$check_hf, + 'check-value-string-array!' => \$check_value_string_array, + 'check-shadow!' => \$check_shadow, + 'sourcedir=s' => \$source_dir, + 'debug' => \$debug_flag, + 'pre-commit' => \$pre_commit, + 'file=s' => \$filenamelist, + 'help' => \$help_flag + ); +if (!$result || $help_flag) { + print_usage(); + exit(1); +} + +# the pre-commit hook only calls checkAPIs one file at a time, so this +# is safe to do globally (and easier) +if ($pre_commit) { + my $filename = $ARGV[0]; + # if the filename is packet-*.c or packet-*.h, then we set the abort and termoutput groups. + if ($filename =~ /\bpacket-[^\/\\]+\.[ch]$/) { + push @apiGroups, "abort"; + push @apiGroups, "termoutput"; + } +} + +# Add a 'function_count' anonymous hash to each of the 'apiGroup' entries in the %APIs hash. +for my $apiGroup (keys %APIs) { + my @functions = @{$APIs{$apiGroup}{functions}}; + + $APIs{$apiGroup}->{function_counts} = {}; + @{$APIs{$apiGroup}->{function_counts}}{@functions} = (); # Add fcn names as keys to the anonymous hash + $APIs{$apiGroup}->{max_function_count} = -1; + if ($APIs{$apiGroup}->{count_errors}) { + $APIs{$apiGroup}->{max_function_count} = 0; + } + $APIs{$apiGroup}->{cur_function_count} = 0; +} + +my @filelist; +push @filelist, @ARGV; +if ("$filenamelist" ne "") { + # We have a file containing a list of files to check (possibly in + # addition to those on the command line). + open(FC, $filenamelist) || die("Couldn't open $filenamelist"); + + while () { + # file names can be separated by ; + push @filelist, split(';'); + } + close(FC); +} + +die "no files to process" unless (scalar @filelist); + +# Read through the files; do various checks +while ($_ = pop @filelist) +{ + my $filename = $_; + my $fileContents = ''; + my @foundAPIs = (); + my $line; + + if ($source_dir and ! -e $filename) { + $filename = $source_dir . '/' . $filename; + } + if (! -e $filename) { + warn "No such file: \"$filename\""; + next; + } + + # delete leading './' + $filename =~ s{ ^ \. / } {}xo; + unless (-f $filename) { + print STDERR "Warning: $filename is not of type file - skipping.\n"; + next; + } + + # Read in the file (ouch, but it's easier that way) + open(FC, $filename) || die("Couldn't open $filename"); + $line = 1; + while () { + $fileContents .= $_; + eval { decode( 'UTF-8', $_, Encode::FB_CROAK ) }; + if ($EVAL_ERROR) { + print STDERR "Error: Found an invalid UTF-8 sequence on line " .$line. " of " .$filename."\n"; + $errorCount++; + } + $line++; + } + close(FC); + + if (($fileContents =~ m{ \$Id .* \$ }xo)) + { + print STDERR "Warning: ".$filename." has an SVN Id tag. Please remove it!\n"; + } + + if (($fileContents =~ m{ tab-width:\s*[0-7|9]+ | tabstop=[0-7|9]+ | tabSize=[0-7|9]+ }xo)) + { + # To quote Icf0831717de10fc615971fa1cf75af2f1ea2d03d : + # HT tab stops are set every 8 spaces on UN*X; UN*X tools that treat an HT character + # as tabbing to 4-space tab stops, or that even are configurable but *default* to + # 4-space tab stops (I'm looking at *you*, Xcode!) are broken. tab-width: 4, + # tabstop=4, and tabSize=4 are errors if you ever expect anybody to look at your file + # with a UN*X tool, and every text file will probably be looked at by a UN*X tool at + # some point, so Don't Do That. + # + # Can I get an "amen!"? + print STDERR "Error: Found modelines with tabstops set to something other than 8 in " .$filename."\n"; + $errorCount++; + } + + # Remove C/C++ comments + # The below pattern is modified (to keep newlines at the end of C++-style comments) from that at: + # https://perldoc.perl.org/perlfaq6.html#How-do-I-use-a-regular-expression-to-strip-C-style-comments-from-a-file? + $fileContents =~ s#/\*[^*]*\*+([^/*][^*]*\*+)*/|//([^\\]|[^\n][\n]?)*?\n|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^/"'\\]*)#defined $3 ? $3 : "\n"#gse; + + # optionally check the hf entries (including those under #if 0) + if ($check_hf) { + $errorCount += check_hf_entries(\$fileContents, $filename); + } + + if ($fileContents =~ m{ %\d*?ll }dxo) + { + # use PRI[dux...]N instead of ll + print STDERR "Error: Found %ll in " .$filename."\n"; + $errorCount++; + } + + if ($fileContents =~ m{ %hh }xo) + { + # %hh is C99 and Windows doesn't like it: + # http://connect.microsoft.com/VisualStudio/feedback/details/416843/sscanf-cannot-not-handle-hhd-format + # Need to use temporary variables instead. + print STDERR "Error: Found %hh in " .$filename."\n"; + $errorCount++; + } + + # check for files that we should not include directly + # this must be done before quoted strings (#include "file.h") are removed + check_included_files(\$fileContents, $filename); + + # Check for value_string and enum_val_t errors: NULL termination, + # const-nes, and newlines within strings + if ($check_value_string_array) { + $errorCount += check_value_string_arrays(\$fileContents, $filename, $debug_flag); + } + + # Remove all the quoted strings + $fileContents =~ s{ $DoubleQuotedStr | $SingleQuotedStr } []xog; + + $errorCount += check_pref_var_dupes(\$fileContents, $filename); + + # Remove all blank lines + $fileContents =~ s{ ^ \s* $ } []xog; + + # Remove all '#if 0'd' code + remove_if0_code(\$fileContents, $filename); + + $errorCount += check_ett_registration(\$fileContents, $filename); + + #checkAPIsCalledWithTvbGetPtr(\@TvbPtrAPIs, \$fileContents, \@foundAPIs); + #if (@foundAPIs) { + # print STDERR "Found APIs with embedded tvb_get_ptr() calls in ".$filename." : ".join(',', @foundAPIs)."\n" + #} + + if ($check_shadow) { + check_shadow_variable(\@ShadowVariable, \$fileContents, \@foundAPIs); + if (@foundAPIs) { + print STDERR "Warning: Found shadow variable(s) in ".$filename." : ".join(',', @foundAPIs)."\n" + } + } + + + check_snprintf_plus_strlen(\$fileContents, $filename); + + $errorCount += check_proto_tree_add_XXX(\$fileContents, $filename); + + $errorCount += check_try_catch(\$fileContents, $filename); + + + # Check and count APIs + for my $groupArg (@apiGroups) { + my $pfx = "Warning"; + @foundAPIs = (); + my @groupParts = split(/:/, $groupArg); + my $apiGroup = $groupParts[0]; + my $curFuncCount = 0; + + if (scalar @groupParts > 1) { + $APIs{$apiGroup}->{max_function_count} = $groupParts[1]; + } + + findAPIinFile($APIs{$apiGroup}, \$fileContents, \@foundAPIs); + + for my $api (keys %{$APIs{$apiGroup}->{function_counts}} ) { + $curFuncCount += $APIs{$apiGroup}{function_counts}{$api}; + } + + # If we have a max function count and we've exceeded it, treat it + # as an error. + if (!$APIs{$apiGroup}->{count_errors} && $APIs{$apiGroup}->{max_function_count} >= 0) { + if ($curFuncCount > $APIs{$apiGroup}->{max_function_count}) { + print STDERR $pfx . ": " . $apiGroup . " exceeds maximum function count: " . $APIs{$apiGroup}->{max_function_count} . "\n"; + $APIs{$apiGroup}->{count_errors} = 1; + } + } + + if ($curFuncCount <= $APIs{$apiGroup}->{max_function_count}) { + next; + } + + if ($APIs{$apiGroup}->{count_errors}) { + # the use of "prohibited" APIs is an error, increment the error count + $errorCount += @foundAPIs; + $pfx = "Error"; + } + + if (@foundAPIs && ! $machine_readable_output) { + print STDERR $pfx . ": Found " . $apiGroup . " APIs in ".$filename.": ".join(',', @foundAPIs)."\n"; + } + if (@foundAPIs && $machine_readable_output) { + for my $api (@foundAPIs) { + printf STDERR "%-8.8s %-20.20s %-30.30s %-45.45s\n", $pfx, $apiGroup, $filename, $api; + } + } + } +} + +# Summary: Print Use Counts of each API in each requested summary group + +if (scalar @apiSummaryGroups > 0) { + my $fileline = join(", ", @ARGV); + printf "\nSummary for " . substr($fileline, 0, 65) . "…\n"; + + for my $apiGroup (@apiSummaryGroups) { + printf "\nUse counts for %s (maximum allowed total is %d)\n", $apiGroup, $APIs{$apiGroup}->{max_function_count}; + for my $api (sort {"\L$a" cmp "\L$b"} (keys %{$APIs{$apiGroup}->{function_counts}} )) { + if ($APIs{$apiGroup}{function_counts}{$api} < 1) { next; } + printf "%5d %-40.40s\n", $APIs{$apiGroup}{function_counts}{$api}, $api; + } + } +} + +exit($errorCount > 120 ? 120 : $errorCount); + +# +# Editor modelines - https://www.wireshark.org/tools/modelines.html +# +# Local variables: +# c-basic-offset: 8 +# tab-width: 8 +# indent-tabs-mode: nil +# End: +# +# vi: set shiftwidth=8 tabstop=8 expandtab: +# :indentSize=8:tabSize=8:noTabs=true: +# diff --git a/tools/check_dissector.py b/tools/check_dissector.py new file mode 100755 index 0000000..af1dc64 --- /dev/null +++ b/tools/check_dissector.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import sys +import os +import signal +import argparse + +# Run battery of tests on one or more dissectors. + +# For text colouring/highlighting. +class bcolors: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKGREEN = '\033[92m' + ADDED = '\033[45m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + + +# Try to exit soon after Ctrl-C is pressed. +should_exit = False + +def signal_handler(sig, frame): + global should_exit + should_exit = True + print('You pressed Ctrl+C - exiting') + +signal.signal(signal.SIGINT, signal_handler) + +# Command-line args +parser = argparse.ArgumentParser(description="Run gamut of tests on dissector(s)") +parser.add_argument('--file', action='append', + help='specify individual dissector file to test') +parser.add_argument('--file-list', action='store', + help='file with list of dissectors') +parser.add_argument('--build-folder', action='store', + help='build folder') + +args = parser.parse_args() + +if not args.file and not args.file_list: + print('Need to specify --file or --file-list') + exit(1) + +# TODO: verify build-folder if set. + +# Get list of files to check. +dissectors = [] + +# Individually-selected files +if args.file: + for f in args.file: + if not os.path.isfile(f): + print('Chosen file', f, 'does not exist.') + exit(1) + else: + dissectors.append(f) + +# List of dissectors stored in a file +if args.file_list: + if not os.path.isfile(args.file_list): + print('Dissector-list file', args.file_list, 'does not exist.') + exit(1) + else: + with open(args.file_list, 'r') as f: + contents = f.read().splitlines() + for f in contents: + if not os.path.isfile(f): + print('Chosen file', f, 'does not exist.') + exit(1) + else: + dissectors.append(f) + +# Tools that should be run on selected files. +# Boolean arg is for whether build-dir is needed in order to run it. +# 3rd is Windows support. +tools = [ + ('tools/delete_includes.py --folder .', True, True), + ('tools/check_spelling.py', False, True), + ('tools/check_tfs.py --check-value-strings', False, True), + ('tools/check_typed_item_calls.py --all-checks', False, True), + ('tools/check_static.py', True, False), + ('tools/check_dissector_urls.py', False, True), + ('tools/check_val_to_str.py', False, True), + ('tools/cppcheck/cppcheck.sh', False, True), + ('tools/checkhf.pl', False, True), + ('tools/checkAPIs.pl', False, True), + ('tools/fix-encoding-args.pl', False, True), + ('tools/checkfiltername.pl', False, True) +] + + +def run_check(tool, dissectors, python): + # Create command-line with all dissectors included + command = '' + + # Don't trust shebang on windows. + if sys.platform.startswith('win'): + if python: + command += 'python.exe ' + else: + command += 'perl.exe ' + + command += tool[0] + if tool[1]: + command += ' --build-folder ' + args.build_folder + + for d in dissectors: + # Add this dissector file to command-line args + command += ((' --file' if python else '') + ' ' + d) + + # Run it + print(bcolors.BOLD + command + bcolors.ENDC) + os.system(command) + + +# Run all checks on all of my dissectors. +for tool in tools: + if should_exit: + exit(1) + if ((not sys.platform.startswith('win') or tool[2]) and # Supported on this platform? + (not tool[1] or (tool[1] and args.build_folder))): # Have --build-folder if needed? + + # Run it. + run_check(tool, dissectors, tool[0].find('.py') != -1) diff --git a/tools/check_dissector_urls.py b/tools/check_dissector_urls.py new file mode 100755 index 0000000..373d88b --- /dev/null +++ b/tools/check_dissector_urls.py @@ -0,0 +1,291 @@ +#!/usr/bin/env python3 +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import argparse +import aiohttp +import asyncio +import os +import re +import shutil +import signal +import subprocess + +# This utility scans the dissector code for URLs, then attempts to +# fetch the links. The results are shown in stdout, but also, at +# the end of the run, written to files: +# - URLs that couldn't be loaded are written to failures.txt +# - working URLs are written to successes.txt +# - any previous failures.txt is also copied to failures_last_run.txt +# +# N.B. preferred form of RFC link is e.g., https://tools.ietf.org/html/rfc4349 + + +# TODO: +# - option to write back to dissector file when there is a failure? +# - optionally parse previous/recent successes.txt and avoid fetching them again? +# - make sure URLs are really within comments in code? +# - use urllib.parse or similar to better check URLs? +# - improve regex to allow '+' in URL (like confluence uses) + +# Try to exit soon after Ctrl-C is pressed. +should_exit = False + + +def signal_handler(sig, frame): + global should_exit + should_exit = True + print('You pressed Ctrl+C - exiting') + try: + tasks = asyncio.all_tasks() + except (RuntimeError): + # we haven't yet started the async link checking, we can exit directly + exit(1) + # ignore further SIGINTs while we're cancelling the running tasks + signal.signal(signal.SIGINT, signal.SIG_IGN) + for t in tasks: + t.cancel() + +signal.signal(signal.SIGINT, signal_handler) + + +class FailedLookup: + + def __init__(self): + # Fake values that will be queried (for a requests.get() return value) + self.status = 0 + self.headers = {} + self.headers['content-type'] = '' + + def __str__(self): + s = ('FailedLookup: status=' + str(self.status) + + ' content-type=' + self.headers['content-type']) + return s + + +# Dictionary from url -> result +cached_lookups = {} + + +class Link(object): + + def __init__(self, file, line_number, url): + self.file = file + self.line_number = line_number + self.url = url + self.tested = False + self.r = None + self.success = False + + def __str__(self): + epan_idx = self.file.find('epan') + if epan_idx == -1: + filename = self.file + else: + filename = self.file[epan_idx:] + s = ('SUCCESS ' if self.success else 'FAILED ') + \ + filename + ':' + str(self.line_number) + ' ' + self.url + if True: # self.r: + if self.r.status: + s += " status-code=" + str(self.r.status) + if 'content-type' in self.r.headers: + s += (' content-type="' + + self.r.headers['content-type'] + '"') + else: + s += ' ' + return s + + def validate(self): + global cached_lookups + global should_exit + if should_exit: + return + self.tested = True + if self.url in cached_lookups: + self.r = cached_lookups[self.url] + else: + self.r = FailedLookup() + + if self.r.status < 200 or self.r.status >= 300: + self.success = False + else: + self.success = True + + if (args.verbose or not self.success) and not should_exit: + print(self) + +links = [] +files = [] +all_urls = set() + +def find_links_in_file(filename): + with open(filename, 'r', encoding="utf8") as f: + for line_number, line in enumerate(f, start=1): + # TODO: not matching + # https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + urls = re.findall( + r'https?://(?:[a-zA-Z0-9./_?&=-]+|%[0-9a-fA-F]{2})+', line) + + for url in urls: + # Lop off any trailing chars that are not part of it + url = url.rstrip(").',") + + # A url must have a period somewhere + if '.' not in url: + continue + global links, all_urls + links.append(Link(filename, line_number, url)) + all_urls.add(url) + + +# Scan the given folder for links to test. +def find_links_in_folder(folder): + # Look at files in sorted order, to give some idea of how far through it + # is. + for filename in sorted(os.listdir(folder)): + if filename.endswith('.c'): + global links + find_links_in_file(os.path.join(folder, filename)) + + +async def populate_cache(sem, session, url): + global cached_lookups + if should_exit: + return + async with sem: + try: + async with session.get(url) as r: + cached_lookups[url] = r + if args.verbose: + print('checking ', url, ': success', sep='') + + except (asyncio.CancelledError, ValueError, ConnectionError, Exception): + cached_lookups[url] = FailedLookup() + if args.verbose: + print('checking ', url, ': failed', sep='') + + +async def check_all_links(links): + sem = asyncio.Semaphore(50) + timeout = aiohttp.ClientTimeout(total=25) + connector = aiohttp.TCPConnector(limit=30) + headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36', + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'} + async with aiohttp.ClientSession(connector=connector, headers=headers, timeout=timeout) as session: + tasks = [populate_cache(sem, session, u) for u in all_urls] + try: + await asyncio.gather(*tasks) + except (asyncio.CancelledError): + await session.close() + + for l in links: + l.validate() + + +################################################################# +# Main logic. + +# command-line args. Controls which dissector files should be scanned. +# If no args given, will just scan epan/dissectors folder. +parser = argparse.ArgumentParser(description='Check URL links in dissectors') +parser.add_argument('--file', action='append', + help='specify individual dissector file to test') +parser.add_argument('--commits', action='store', + help='last N commits to check') +parser.add_argument('--open', action='store_true', + help='check open files') +parser.add_argument('--verbose', action='store_true', + help='when enabled, show more output') + +args = parser.parse_args() + + +def is_dissector_file(filename): + p = re.compile(r'epan/dissectors/packet-.*\.c') + return p.match(filename) + + +# Get files from wherever command-line args indicate. +if args.file: + # Add specified file(s) + for f in args.file: + if not f.startswith('epan'): + f = os.path.join('epan', 'dissectors', f) + if not os.path.isfile(f): + print('Chosen file', f, 'does not exist.') + exit(1) + else: + files.append(f) + find_links_in_file(f) +elif args.commits: + # Get files affected by specified number of commits. + command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits] + files = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Fetch links from files (dissectors files only) + files = list(filter(is_dissector_file, files)) + for f in files: + find_links_in_file(f) +elif args.open: + # Unstaged changes. + command = ['git', 'diff', '--name-only'] + files = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + files = list(filter(is_dissector_file, files)) + # Staged changes. + command = ['git', 'diff', '--staged', '--name-only'] + files_staged = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + files_staged = list(filter(is_dissector_file, files_staged)) + for f in files: + find_links_in_file(f) + for f in files_staged: + if f not in files: + find_links_in_file(f) + files.append(f) +else: + # Find links from dissector folder. + find_links_in_folder(os.path.join(os.path.dirname( + __file__), '..', 'epan', 'dissectors')) + + +# If scanning a subset of files, list them here. +print('Examining:') +if args.file or args.commits or args.open: + if files: + print(' '.join(files), '\n') + else: + print('No files to check.\n') +else: + print('All dissector modules\n') + +asyncio.run(check_all_links(links)) + +# Write failures to a file. Back up any previous first though. +if os.path.exists('failures.txt'): + shutil.copyfile('failures.txt', 'failures_last_run.txt') +with open('failures.txt', 'w') as f_f: + for l in links: + if l.tested and not l.success: + f_f.write(str(l) + '\n') +# And successes +with open('successes.txt', 'w') as f_s: + for l in links: + if l.tested and l.success: + f_s.write(str(l) + '\n') + + +# Count and show overall stats. +passed, failed = 0, 0 +for l in links: + if l.tested: + if l.success: + passed += 1 + else: + failed += 1 + +print('--------------------------------------------------------------------------------------------------') +print(len(links), 'links checked: ', passed, 'passed,', failed, 'failed') diff --git a/tools/check_help_urls.py b/tools/check_help_urls.py new file mode 100755 index 0000000..ddf3673 --- /dev/null +++ b/tools/check_help_urls.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +''' +Go through all user guide help URLs listed in the program +and confirm these are present in the User's Guide source files. +''' + +from re import search +from glob import glob +from sys import exit + +found = {} + +with open("ui/help_url.c") as f: + for line in f: + if url := search(r"user_guide_url\(\"(.*).html\"\);", line): + chapter = url.group(1) + found[chapter] = False + +adoc_files = glob("docbook/wsug_src/*.adoc") + +for adoc_file in adoc_files: + with open(adoc_file) as f: + for line in f: + # Fail on legacy block anchor syntax (double square brackets) + if tag := search(r"^\[\#(.*)]", line): + chapter = tag.group(1) + if chapter in found: + found[chapter] = True + +missing = False + +for chapter in found: + if not found[chapter]: + if not missing: + print("The following chapters are missing in the User's Guide:") + missing = True + print(chapter) + +if missing: + exit(-1) diff --git a/tools/check_spelling.py b/tools/check_spelling.py new file mode 100755 index 0000000..7e31908 --- /dev/null +++ b/tools/check_spelling.py @@ -0,0 +1,493 @@ +#!/usr/bin/env python3 +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import sys +import re +import subprocess +import argparse +import signal +from collections import Counter + +# Looks for spelling errors among strings found in source or documentation files. +# N.B. To run this script, you should install pyspellchecker (not spellchecker) using pip. + +# TODO: check structured doxygen comments? + +# For text colouring/highlighting. +class bcolors: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKGREEN = '\033[92m' + ADDED = '\033[45m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + + +# Try to exit soon after Ctrl-C is pressed. +should_exit = False + +def signal_handler(sig, frame): + global should_exit + should_exit = True + print('You pressed Ctrl+C - exiting') + +signal.signal(signal.SIGINT, signal_handler) + + + +# Create spellchecker, and augment with some Wireshark words. +from spellchecker import SpellChecker +# Set up our dict with words from text file. +spell = SpellChecker() +spell.word_frequency.load_text_file('./tools/wireshark_words.txt') + + +# Track words that were not found. +missing_words = [] + + +# Split camelCase string into separate words. +def camelCaseSplit(identifier): + matches = re.finditer(r'.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier) + return [m.group(0) for m in matches] + + +# A File object contains all of the strings to be checked for a given file. +class File: + def __init__(self, file): + self.file = file + self.values = [] + + filename, extension = os.path.splitext(file) + self.code_file = extension in {'.c', '.cpp'} + + + with open(file, 'r', encoding="utf8") as f: + contents = f.read() + + if self.code_file: + # Remove comments so as not to trip up RE. + contents = removeComments(contents) + + # Find protocol name and add to dict. + # N.B. doesn't work when a variable is used instead of a literal for the protocol name... + matches = re.finditer(r'proto_register_protocol\s*\([\n\r\s]*\"(.*)\",[\n\r\s]*\"(.*)\",[\n\r\s]*\"(.*)\"', contents) + for m in matches: + protocol = m.group(3) + # Add to dict. + spell.word_frequency.load_words([protocol]) + spell.known([protocol]) + print('Protocol is: ' + bcolors.BOLD + protocol + bcolors.ENDC) + + # Add a string found in this file. + def add(self, value): + self.values.append(value.encode('utf-8') if sys.platform.startswith('win') else value) + + # Whole word is not recognised, but is it 2 words concatenated (without camelcase) ? + def checkMultiWords(self, word): + if len(word) < 6: + return False + + # Don't consider if mixed cases. + if not (word.islower() or word.isupper()): + # But make an exception if only the fist letter is uppercase.. + if not word == (word[0].upper() + word[1:]): + return False + + # Try splitting into 2 words recognised at various points. + # Allow 3-letter words. + length = len(word) + for idx in range(3, length-3): + word1 = word[0:idx] + word2 = word[idx:] + + if not spell.unknown([word1, word2]): + return True + + return self.checkMultiWordsRecursive(word) + + # If word before 'id' is recognised, accept word. + def wordBeforeId(self, word): + if word.lower().endswith('id'): + if not spell.unknown([word[0:len(word)-2]]): + return True + else: + return False + + def checkMultiWordsRecursive(self, word): + length = len(word) + #print('word=', word) + if length < 4: + return False + + for idx in range(4, length+1): + w = word[0:idx] + if not spell.unknown([w]): + if idx == len(word): + return True + else: + if self.checkMultiWordsRecursive(word[idx:]): + return True + + return False + + def numberPlusUnits(self, word): + m = re.search(r'^([0-9]+)([a-zA-Z]+)$', word) + if m: + if m.group(2).lower() in { "bit", "bits", "gb", "kbps", "gig", "mb", "th", "mhz", "v", "hz", "k", + "mbps", "m", "g", "ms", "nd", "nds", "rd", "kb", "kbit", "ghz", + "khz", "km", "ms", "usec", "sec", "gbe", "ns", "ksps", "qam", "mm" }: + return True + return False + + + # Check the spelling of all the words we have found + def spellCheck(self): + + num_values = len(self.values) + for value_index,v in enumerate(self.values): + if should_exit: + exit(1) + + v = str(v) + + # Ignore includes. + if v.endswith('.h'): + continue + + # Store original (as want to include for context in error report). + original = str(v) + + # Replace most punctuation with spaces, and eliminate common format specifiers. + v = v.replace('.', ' ') + v = v.replace(',', ' ') + v = v.replace('`', ' ') + v = v.replace(':', ' ') + v = v.replace(';', ' ') + v = v.replace('"', ' ') + v = v.replace('\\', ' ') + v = v.replace('+', ' ') + v = v.replace('|', ' ') + v = v.replace('(', ' ') + v = v.replace(')', ' ') + v = v.replace('[', ' ') + v = v.replace(']', ' ') + v = v.replace('{', ' ') + v = v.replace('}', ' ') + v = v.replace('<', ' ') + v = v.replace('>', ' ') + v = v.replace('_', ' ') + v = v.replace('-', ' ') + v = v.replace('/', ' ') + v = v.replace('!', ' ') + v = v.replace('?', ' ') + v = v.replace('=', ' ') + v = v.replace('*', ' ') + v = v.replace('%', ' ') + v = v.replace('#', ' ') + v = v.replace('&', ' ') + v = v.replace('@', ' ') + v = v.replace('$', ' ') + v = v.replace('®', '') + v = v.replace("'", ' ') + v = v.replace('"', ' ') + v = v.replace('%u', '') + v = v.replace('%d', '') + v = v.replace('%s', '') + + # Split into words. + value_words = v.split() + # Further split up any camelCase words. + words = [] + for w in value_words: + words += camelCaseSplit(w) + + # Check each word within this string in turn. + for word in words: + # Strip trailing digits from word. + word = word.rstrip('1234567890') + + # Quote marks found in some of the docs... + word = word.replace('“', '') + word = word.replace('”', '') + + # Single and collective possession + if word.endswith("’s"): + word = word[:-2] + if word.endswith("s’"): + word = word[:-2] + + if self.numberPlusUnits(word): + continue + + if len(word) > 4 and spell.unknown([word]) and not self.checkMultiWords(word) and not self.wordBeforeId(word): + print(self.file, value_index, '/', num_values, '"' + original + '"', bcolors.FAIL + word + bcolors.ENDC, + ' -> ', '?') + + # TODO: this can be interesting, but takes too long! + # bcolors.OKGREEN + spell.correction(word) + bcolors.ENDC + global missing_words + missing_words.append(word) + +def removeWhitespaceControl(code_string): + code_string = code_string.replace('\\n', ' ') + code_string = code_string.replace('\\r', ' ') + code_string = code_string.replace('\\t', ' ') + return code_string + +# Remove any contractions from the given string. +def removeContractions(code_string): + contractions = [ "wireshark’s", "don’t", "let’s", "isn’t", "won’t", "user’s", "hasn’t", "you’re", "o’clock", "you’ll", + "you’d", "developer’s", "doesn’t", "what’s", "let’s", "haven’t", "can’t", "you’ve", + "shouldn’t", "didn’t", "wouldn’t", "aren’t", "there’s", "packet’s", "couldn’t", "world’s", + "needn’t", "graph’s", "table’s", "parent’s", "entity’s", "server’s", "node’s", + "querier’s", "sender’s", "receiver’s", "computer’s", "frame’s", "vendor’s", "system’s", + "we’ll", "asciidoctor’s", "protocol’s", "microsoft’s", "wasn’t" ] + for c in contractions: + code_string = code_string.replace(c, "") + code_string = code_string.replace(c.capitalize(), "") + code_string = code_string.replace(c.replace('’', "'"), "") + code_string = code_string.replace(c.capitalize().replace('’', "'"), "") + return code_string + +def removeComments(code_string): + code_string = re.sub(re.compile(r"/\*.*?\*/", re.DOTALL), "" , code_string) # C-style comment + # Avoid matching // where it is allowed, e.g., https://www... or file:///... + code_string = re.sub(re.compile(r"(? 10: + f_read.close() + return False + if (line.find('Generated automatically') != -1 or + line.find('Autogenerated from') != -1 or + line.find('is autogenerated') != -1 or + line.find('automatically generated by Pidl') != -1 or + line.find('Created by: The Qt Meta Object Compiler') != -1 or + line.find('This file was generated') != -1 or + line.find('This filter was automatically generated') != -1 or + line.find('This file is auto generated, do not edit!') != -1 or + line.find('this file is automatically generated') != -1): + + f_read.close() + return True + + # OK, looks like a hand-written file! + f_read.close() + return False + + +def isAppropriateFile(filename): + file, extension = os.path.splitext(filename) + if filename.find('CMake') != -1: + return False + return extension in { '.adoc', '.c', '.cpp', '.pod', '.nsi', '.txt'} or file.endswith('README') + + +def findFilesInFolder(folder, recursive=True): + files_to_check = [] + + if recursive: + for root, subfolders, files in os.walk(folder): + for f in files: + if should_exit: + return + f = os.path.join(root, f) + if isAppropriateFile(f) and not isGeneratedFile(f): + files_to_check.append(f) + else: + for f in sorted(os.listdir(folder)): + f = os.path.join(folder, f) + if isAppropriateFile(f) and not isGeneratedFile(f): + files_to_check.append(f) + + return files_to_check + + +# Check the given file. +def checkFile(filename): + # Check file exists - e.g. may have been deleted in a recent commit. + if not os.path.exists(filename): + print(filename, 'does not exist!') + return + + file = findStrings(filename) + file.spellCheck() + + + +################################################################# +# Main logic. + +# command-line args. Controls which files should be checked. +# If no args given, will just scan epan/dissectors folder. +parser = argparse.ArgumentParser(description='Check spellings in specified files') +parser.add_argument('--file', action='append', + help='specify individual file to test') +parser.add_argument('--folder', action='store', default='', + help='specify folder to test') +parser.add_argument('--no-recurse', action='store_true', default='', + help='do not recurse inside chosen folder') +parser.add_argument('--commits', action='store', + help='last N commits to check') +parser.add_argument('--open', action='store_true', + help='check open files') + +args = parser.parse_args() + + +# Get files from wherever command-line args indicate. +files = [] +if args.file: + # Add specified file(s) + for f in args.file: + if not os.path.isfile(f): + print('Chosen file', f, 'does not exist.') + exit(1) + else: + files.append(f) +elif args.commits: + # Get files affected by specified number of commits. + command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits] + files = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Filter files + files = list(filter(lambda f : os.path.exists(f) and isAppropriateFile(f) and not isGeneratedFile(f), files)) +elif args.open: + # Unstaged changes. + command = ['git', 'diff', '--name-only'] + files = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Filter files. + files = list(filter(lambda f : isAppropriateFile(f) and not isGeneratedFile(f), files)) + # Staged changes. + command = ['git', 'diff', '--staged', '--name-only'] + files_staged = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Filter files. + files_staged = list(filter(lambda f : isAppropriateFile(f) and not isGeneratedFile(f), files_staged)) + for f in files_staged: + if not f in files: + files.append(f) +else: + # By default, scan dissectors directory + folder = os.path.join('epan', 'dissectors') + # But overwrite with any folder entry. + if args.folder: + folder = args.folder + if not os.path.isdir(folder): + print('Folder', folder, 'not found!') + exit(1) + + # Find files from folder. + print('Looking for files in', folder) + files = findFilesInFolder(folder, not args.no_recurse) + + +# If scanning a subset of files, list them here. +print('Examining:') +if args.file or args.folder or args.commits or args.open: + if files: + print(' '.join(files), '\n') + else: + print('No files to check.\n') +else: + print('All dissector modules\n') + + +# Now check the chosen files. +for f in files: + # Check this file. + checkFile(f) + # But get out if control-C has been pressed. + if should_exit: + exit(1) + + + +# Show the most commonly not-recognised words. +print('') +counter = Counter(missing_words).most_common(100) +if len(counter) > 0: + for c in counter: + print(c[0], ':', c[1]) + +# Show error count. +print('\n' + bcolors.BOLD + str(len(missing_words)) + ' issues found' + bcolors.ENDC + '\n') diff --git a/tools/check_static.py b/tools/check_static.py new file mode 100755 index 0000000..fbd1d11 --- /dev/null +++ b/tools/check_static.py @@ -0,0 +1,326 @@ +#!/usr/bin/env python3 +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import re +import subprocess +import argparse +import signal + +# Look for dissector symbols that could/should be static. +# This will not run on Windows, unless/until we check the platform +# and use (I think) dumpbin.exe + +# Try to exit soon after Ctrl-C is pressed. +should_exit = False + +def signal_handler(sig, frame): + global should_exit + should_exit = True + print('You pressed Ctrl+C - exiting') + +signal.signal(signal.SIGINT, signal_handler) + +# Allow this as a default build folder name... +build_folder = os.getcwd() + '-build' + +# Record which symbols are referred to (by a set of files). +class CalledSymbols: + def __init__(self): + self.referred = set() + + def addCalls(self, file): + # Make sure that file is built. + last_dir = os.path.split(os.path.dirname(file))[-1] + if file.find('ui/cli') != -1: + # A tshark target-only file + object_file = os.path.join(build_folder, 'CMakeFiles', ('tshark' + '.dir'), file + '.o') + elif file.find('ui/qt') != -1: + object_file = os.path.join(build_folder, os.path.dirname(file), 'CMakeFiles', ('qtui' + '.dir'), os.path.basename(file) + '.o') + else: + if file.endswith('dissectors.c'): + object_file = os.path.join(build_folder, os.path.dirname(file), 'CMakeFiles', 'dissector-registration' + '.dir', os.path.basename(file) + '.o') + else: + object_file = os.path.join(build_folder, os.path.dirname(file), 'CMakeFiles', last_dir + '.dir', os.path.basename(file) + '.o') + if not os.path.exists(object_file): + #print('Warning -', object_file, 'does not exist') + return + command = ['nm', object_file] + for f in subprocess.check_output(command).splitlines(): + l = str(f)[2:-1] + # Lines might or might not have an address before letter and symbol. + p1 = re.compile(r'[0-9a-f]* ([a-zA-Z]) (.*)') + p2 = re.compile(r'[ ]* ([a-zA-Z]) (.*)') + + m = p1.match(l) + if not m: + m = p2.match(l) + if m: + letter = m.group(1) + function_name = m.group(2) + + # Only interested in undefined references to symbols. + if letter == 'U': + self.referred.add(function_name) + + + +# Record which symbols are defined in a single file. +class DefinedSymbols: + def __init__(self, file): + self.filename = file + self.global_dict = {} + self.header_file_contents = None + + # Make sure that file is built. + object_file = os.path.join(build_folder, 'epan', 'dissectors', 'CMakeFiles', 'dissectors.dir', os.path.basename(file) + '.o') + + if not os.path.exists(object_file): + #print('Warning -', object_file, 'does not exist') + return + + header_file= file.replace('.c', '.h') + try: + f = open(header_file, 'r') + self.header_file_contents = f.read() + except IOError: + pass + + + command = ['nm', object_file] + for f in subprocess.check_output(command).splitlines(): + # Line consists of whitespace, [address], letter, symbolName + l = str(f)[2:-1] + p = re.compile(r'[0-9a-f]* ([a-zA-Z]) (.*)') + m = p.match(l) + if m: + letter = m.group(1) + function_name = m.group(2) + # globally-defined symbols. Would be 't' or 'd' if already static. + if letter in 'TD': + self.add(function_name, l) + + def add(self, letter, function_name): + self.global_dict[letter] = function_name + + def mentionedInHeaders(self, symbol): + if self.header_file_contents: + if self.header_file_contents.find(symbol) != -1: + return True + # Also check some of the 'common' header files that don't match the dissector file name. + # TODO: could cache the contents of these files, but it's not that slow. + common_mismatched_headers = [ os.path.join('epan', 'dissectors', 'packet-ncp-int.h'), + os.path.join('epan', 'dissectors', 'packet-mq.h'), + os.path.join('epan', 'dissectors', 'packet-ip.h'), + os.path.join('epan', 'dissectors', 'packet-gsm_a_common.h'), + os.path.join('epan', 'dissectors', 'packet-epl.h'), + os.path.join('epan', 'dissectors', 'packet-bluetooth.h'), + os.path.join('epan', 'dissectors', 'packet-dcerpc.h'), + os.path.join('epan', 'ip_opts.h'), + os.path.join('epan', 'eap.h')] + for hf in common_mismatched_headers: + try: + f = open(hf) + contents = f.read() + if contents.find(symbol) != -1: + return True + except EnvironmentError: + pass + + return False + + def check(self, called_symbols): + global issues_found + for f in self.global_dict: + if not f in called_symbols: + mentioned_in_header = self.mentionedInHeaders(f) + fun = self.global_dict[f] + print(self.filename, '(' + fun + ')', 'is not referred to so could be static?', '(in header)' if mentioned_in_header else '') + issues_found += 1 + + + +# Helper functions. + +def isDissectorFile(filename): + p = re.compile(r'(packet|file)-.*\.c') + return p.match(filename) + +# Test for whether the given dissector file was automatically generated. +def isGeneratedFile(filename): + # Check file exists - e.g. may have been deleted in a recent commit. + if not os.path.exists(filename): + return False + + if not filename.endswith('.c'): + return False + + # Open file + f_read = open(os.path.join(filename), 'r') + lines_tested = 0 + for line in f_read: + # The comment to say that its generated is near the top, so give up once + # get a few lines down. + if lines_tested > 10: + f_read.close() + return False + if (line.find('Generated automatically') != -1 or + line.find('Autogenerated from') != -1 or + line.find('is autogenerated') != -1 or + line.find('automatically generated by Pidl') != -1 or + line.find('Created by: The Qt Meta Object Compiler') != -1 or + line.find('This file was generated') != -1 or + line.find('This filter was automatically generated') != -1): + + f_read.close() + return True + lines_tested = lines_tested + 1 + + # OK, looks like a hand-written file! + f_read.close() + return False + + +def findDissectorFilesInFolder(folder, include_generated): + # Look at files in sorted order, to give some idea of how far through is. + tmp_files = [] + + for f in sorted(os.listdir(folder)): + if should_exit: + return + if isDissectorFile(f): + if include_generated or not isGeneratedFile(os.path.join('epan', 'dissectors', f)): + filename = os.path.join(folder, f) + tmp_files.append(filename) + return tmp_files + +def findFilesInFolder(folder): + # Look at files in sorted order, to give some idea of how far through is. + tmp_files = [] + + for f in sorted(os.listdir(folder)): + if should_exit: + return + if f.endswith('.c') or f.endswith('.cpp'): + filename = os.path.join(folder, f) + tmp_files.append(filename) + return tmp_files + + +def is_dissector_file(filename): + p = re.compile(r'.*packet-.*\.c') + return p.match(filename) + + +issues_found = 0 + + + +################################################################# +# Main logic. + +# command-line args. Controls which dissector files should be checked. +# If no args given, will just scan epan/dissectors folder. +parser = argparse.ArgumentParser(description='Check calls in dissectors') +parser.add_argument('--build-folder', action='store', default='', + help='build folder', required=False) +parser.add_argument('--file', action='append', + help='specify individual dissector file to test') +parser.add_argument('--commits', action='store', + help='last N commits to check') +parser.add_argument('--open', action='store_true', + help='check open files') + +args = parser.parse_args() + + +# Get files from wherever command-line args indicate. +files = [] + +if args.build_folder: + build_folder = args.build_folder + +if args.file: + # Add specified file(s) + for f in args.file: + if not f.startswith('epan'): + f = os.path.join('epan', 'dissectors', f) + if not os.path.isfile(f): + print('Chosen file', f, 'does not exist.') + exit(1) + else: + files.append(f) +elif args.commits: + # Get files affected by specified number of commits. + command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits] + files = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Will examine dissector files only + files = list(filter(lambda f : is_dissector_file(f), files)) +elif args.open: + # Unstaged changes. + command = ['git', 'diff', '--name-only'] + files = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Only interested in dissector files. + files = list(filter(lambda f : is_dissector_file(f), files)) + # Staged changes. + command = ['git', 'diff', '--staged', '--name-only'] + files_staged = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Only interested in dissector files. + files_staged = list(filter(lambda f : is_dissector_file(f), files_staged)) + for f in files: + files.append(f) + for f in files_staged: + if not f in files: + files.append(f) +else: + # Find all dissector files from folder. + files = findDissectorFilesInFolder(os.path.join('epan', 'dissectors'), + include_generated=False) + + +# If scanning a subset of files, list them here. +print('Examining:') +if args.file or args.commits or args.open: + if files: + print(' '.join(files), '\n') + else: + print('No files to check.\n') +else: + print('All dissector modules\n') + + +if not os.path.isdir(build_folder): + print('Build directory not valid', build_folder, '- please set with --build-folder') + exit(1) + + +# Get the set of called functions and referred-to data. +called = CalledSymbols() +for d in findDissectorFilesInFolder(os.path.join('epan', 'dissectors'), include_generated=True): + called.addCalls(d) +called.addCalls(os.path.join('epan', 'dissectors', 'dissectors.c')) +# Also check calls from GUI code +for d in findFilesInFolder('ui'): + called.addCalls(d) +for d in findFilesInFolder(os.path.join('ui', 'qt')): + called.addCalls(d) +# These are from tshark.. +for d in findFilesInFolder(os.path.join('ui', 'cli')): + called.addCalls(d) + + +# Now check identified files. +for f in files: + if should_exit: + exit(1) + DefinedSymbols(f).check(called.referred) + +# Show summary. +print(issues_found, 'issues found') diff --git a/tools/check_tfs.py b/tools/check_tfs.py new file mode 100755 index 0000000..cecf8d9 --- /dev/null +++ b/tools/check_tfs.py @@ -0,0 +1,595 @@ +#!/usr/bin/env python3 +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import re +import subprocess +import argparse +import signal + +# This utility scans for tfs items, and works out if standard ones +# could have been used intead (from epan/tfs.c) +# Can also check for value_string where common tfs could be used instead. + +# TODO: +# - check how many of the definitions in epan/tfs.c are used in other dissectors +# - although even if unused, might be in external dissectors? +# - consider merging Item class with check_typed_item_calls.py ? + + +# Try to exit soon after Ctrl-C is pressed. +should_exit = False + +def signal_handler(sig, frame): + global should_exit + should_exit = True + print('You pressed Ctrl+C - exiting') + +signal.signal(signal.SIGINT, signal_handler) + + +# Test for whether the given file was automatically generated. +def isGeneratedFile(filename): + # Check file exists - e.g. may have been deleted in a recent commit. + if not os.path.exists(filename): + return False + + # Open file + f_read = open(os.path.join(filename), 'r') + lines_tested = 0 + for line in f_read: + # The comment to say that its generated is near the top, so give up once + # get a few lines down. + if lines_tested > 10: + f_read.close() + return False + if (line.find('Generated automatically') != -1 or + line.find('Generated Automatically') != -1 or + line.find('Autogenerated from') != -1 or + line.find('is autogenerated') != -1 or + line.find('automatically generated by Pidl') != -1 or + line.find('Created by: The Qt Meta Object Compiler') != -1 or + line.find('This file was generated') != -1 or + line.find('This filter was automatically generated') != -1 or + line.find('This file is auto generated, do not edit!') != -1 or + line.find('This file is auto generated') != -1): + + f_read.close() + return True + lines_tested = lines_tested + 1 + + # OK, looks like a hand-written file! + f_read.close() + return False + + +# Keep track of custom entries that might appear in multiple dissectors, +# so we can consider adding them to tfs.c +custom_tfs_entries = {} +def AddCustomEntry(val1, val2, file): + global custom_tfs_entries + if (val1, val2) in custom_tfs_entries: + custom_tfs_entries[(val1, val2)].append(file) + else: + custom_tfs_entries[(val1, val2)] = [file] + + + +class TFS: + def __init__(self, file, name, val1, val2): + self.file = file + self.name = name + self.val1 = val1 + self.val2 = val2 + + global warnings_found + + # Should not be empty + if not len(val1) or not len(val2): + print('Warning:', file, name, 'has an empty field', self) + warnings_found += 1 + #else: + # Strange if one begins with capital but other doesn't? + #if val1[0].isalpha() and val2[0].isalpha(): + # if val1[0].isupper() != val2[0].isupper(): + # print(file, name, 'one starts lowercase and the other upper', self) + + # Leading or trailing space should not be needed. + if val1.startswith(' ') or val1.endswith(' '): + print('Note: ' + self.file + ' ' + self.name + ' - false val begins or ends with space \"' + self.val1 + '\"') + if val2.startswith(' ') or val2.endswith(' '): + print('Note: ' + self.file + ' ' + self.name + ' - true val begins or ends with space \"' + self.val2 + '\"') + + # Should really not be identical... + if val1.lower() == val2.lower(): + print('Warning:', file, name, 'true and false strings are the same', self) + warnings_found += 1 + + # Shouldn't both be negation (with exception..) + if (file != os.path.join('epan', 'dissectors', 'packet-smb.c') and (val1.lower().find('not ') != -1) and (val2.lower().find('not ') != -1)): + print('Warning:', file, name, self, 'both strings contain not') + warnings_found += 1 + + # Not expecting full-stops inside strings.. + if val1.find('.') != -1 or val2.find('.') != -1: + print('Warning:', file, name, 'Period found in string', self) + warnings_found += 1 + + + def __str__(self): + return '{' + '"' + self.val1 + '", "' + self.val2 + '"}' + + +class ValueString: + def __init__(self, file, name, vals): + self.file = file + self.name = name + self.raw_vals = vals + self.parsed_vals = {} + self.looks_like_tfs = True + + no_lines = self.raw_vals.count('{') + if no_lines != 3: + self.looks_like_tfs = False + return + + # Now parse out each entry in the value_string + matches = re.finditer(r'\{([\"a-zA-Z\s\d\,]*)\}', self.raw_vals) + for m in matches: + entry = m[1] + # Check each entry looks like part of a TFS entry. + match = re.match(r'\s*([01])\,\s*\"([a-zA-Z\d\s]*\s*)\"', entry) + if match: + if match[1] == '1': + self.parsed_vals[True] = match[2] + else: + self.parsed_vals[False] = match[2] + + # Now have both entries + if len(self.parsed_vals) == 2: + break + else: + self.looks_like_tfs = False + break + + def __str__(self): + return '{' + '"' + self.raw_vals + '"}' + + +field_widths = { + 'FT_BOOLEAN' : 64, # TODO: Width depends upon 'display' field + 'FT_CHAR' : 8, + 'FT_UINT8' : 8, + 'FT_INT8' : 8, + 'FT_UINT16' : 16, + 'FT_INT16' : 16, + 'FT_UINT24' : 24, + 'FT_INT24' : 24, + 'FT_UINT32' : 32, + 'FT_INT32' : 32, + 'FT_UINT40' : 40, + 'FT_INT40' : 40, + 'FT_UINT48' : 48, + 'FT_INT48' : 48, + 'FT_UINT56' : 56, + 'FT_INT56' : 56, + 'FT_UINT64' : 64, + 'FT_INT64' : 64 +} + + + + +# Simplified version of class that is in check_typed_item_calls.py +class Item: + + previousItem = None + + def __init__(self, filename, hf, filter, label, item_type, type_modifier, strings, macros, mask=None, + check_mask=False): + self.filename = filename + self.hf = hf + self.filter = filter + self.label = label + self.strings = strings + self.mask = mask + + # N.B. Not sestting mask by looking up macros. + + self.item_type = item_type + self.type_modifier = type_modifier + + self.set_mask_value(macros) + + self.bits_set = 0 + for n in range(0, self.get_field_width_in_bits()): + if self.check_bit(self.mask_value, n): + self.bits_set += 1 + + def check_bit(self, value, n): + return (value & (0x1 << n)) != 0 + + + def __str__(self): + return 'Item ({0} "{1}" {2} type={3}:{4} strings={5} mask={6})'.format(self.filename, self.label, self.filter, + self.item_type, self.type_modifier, self.strings, self.mask) + + + + def set_mask_value(self, macros): + try: + self.mask_read = True + + # Substitute mask if found as a macro.. + if self.mask in macros: + self.mask = macros[self.mask] + elif any(not c in '0123456789abcdefABCDEFxX' for c in self.mask): + self.mask_read = False + self.mask_value = 0 + return + + + # Read according to the appropriate base. + if self.mask.startswith('0x'): + self.mask_value = int(self.mask, 16) + elif self.mask.startswith('0'): + self.mask_value = int(self.mask, 8) + else: + self.mask_value = int(self.mask, 10) + except: + self.mask_read = False + self.mask_value = 0 + + + # Return true if bit position n is set in value. + def check_bit(self, value, n): + return (value & (0x1 << n)) != 0 + + + def get_field_width_in_bits(self): + if self.item_type == 'FT_BOOLEAN': + if self.type_modifier == 'NULL': + return 8 # i.e. 1 byte + elif self.type_modifier == 'BASE_NONE': + return 8 + elif self.type_modifier == 'SEP_DOT': # from proto.h, only meant for FT_BYTES + return 64 + else: + try: + # For FT_BOOLEAN, modifier is just numerical number of bits. Round up to next nibble. + return int((int(self.type_modifier) + 3)/4)*4 + except: + #print('oops', self) + return 0 + else: + if self.item_type in field_widths: + # Lookup fixed width for this type + return field_widths[self.item_type] + else: + #print('returning 0 for', self) + return 0 + + + + + +def removeComments(code_string): + code_string = re.sub(re.compile(r"/\*.*?\*/",re.DOTALL ) ,"" ,code_string) # C-style comment + code_string = re.sub(re.compile(r"//.*?\n" ) ,"" ,code_string) # C++-style comment + code_string = re.sub(re.compile(r"#if 0.*?#endif",re.DOTALL ) ,"" , code_string) # Ignored region + + return code_string + + +# Look for true_false_string items in a dissector file. +def findTFS(filename): + tfs_found = {} + + with open(filename, 'r', encoding="utf8") as f: + contents = f.read() + # Example: const true_false_string tfs_yes_no = { "Yes", "No" }; + + # Remove comments so as not to trip up RE. + contents = removeComments(contents) + + matches = re.finditer(r'\sconst\s*true_false_string\s*([a-zA-Z0-9_]*)\s*=\s*{\s*\"([a-zA-Z_0-9/:! ]*)\"\s*,\s*\"([a-zA-Z_0-9/:! ]*)\"', contents) + for m in matches: + name = m.group(1) + val1 = m.group(2) + val2 = m.group(3) + # Store this entry. + tfs_found[name] = TFS(filename, name, val1, val2) + + return tfs_found + +# Look for value_string entries in a dissector file. +def findValueStrings(filename): + vals_found = {} + + #static const value_string radio_type_vals[] = + #{ + # { 0, "FDD"}, + # { 1, "TDD"}, + # { 0, NULL } + #}; + + with open(filename, 'r', encoding="utf8") as f: + contents = f.read() + + # Remove comments so as not to trip up RE. + contents = removeComments(contents) + + matches = re.finditer(r'.*const value_string\s*([a-zA-Z0-9_]*)\s*\[\s*\]\s*\=\s*\{([\{\}\d\,a-zA-Z0-9\s\"]*)\};', contents) + for m in matches: + name = m.group(1) + vals = m.group(2) + vals_found[name] = ValueString(filename, name, vals) + + return vals_found + +# Look for hf items (i.e. full item to be registered) in a dissector file. +def find_items(filename, macros, check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False): + is_generated = isGeneratedFile(filename) + items = {} + with open(filename, 'r', encoding="utf8") as f: + contents = f.read() + # Remove comments so as not to trip up RE. + contents = removeComments(contents) + + # N.B. re extends all the way to HFILL to avoid greedy matching + matches = re.finditer( r'.*\{\s*\&(hf_[a-z_A-Z0-9]*)\s*,\s*{\s*\"(.*?)\"\s*,\s*\"(.*?)\"\s*,\s*(.*?)\s*,\s*([0-9A-Z_\|\s]*?)\s*,\s*(.*?)\s*,\s*(.*?)\s*,\s*([a-zA-Z0-9\W\s_\u00f6\u00e4]*?)\s*,\s*HFILL', contents) + for m in matches: + # Store this item. + hf = m.group(1) + items[hf] = Item(filename, hf, filter=m.group(3), label=m.group(2), item_type=m.group(4), + type_modifier=m.group(5), + strings=m.group(6), + macros=macros, + mask=m.group(7)) + return items + +def find_macros(filename): + macros = {} + with open(filename, 'r', encoding="utf8") as f: + contents = f.read() + # Remove comments so as not to trip up RE. + contents = removeComments(contents) + + matches = re.finditer( r'#define\s*([A-Z0-9_]*)\s*([0-9xa-fA-F]*)\n', contents) + for m in matches: + # Store this mapping. + macros[m.group(1)] = m.group(2) + return macros + + + +def is_dissector_file(filename): + p = re.compile(r'.*packet-.*\.c') + return p.match(filename) + +def findDissectorFilesInFolder(folder): + # Look at files in sorted order, to give some idea of how far through is. + files = [] + + for f in sorted(os.listdir(folder)): + if should_exit: + return + if is_dissector_file(f): + filename = os.path.join(folder, f) + files.append(filename) + return files + + + +warnings_found = 0 +errors_found = 0 + + +tfs_found = 0 + +# Check the given dissector file. +def checkFile(filename, common_tfs, look_for_common=False, check_value_strings=False): + global warnings_found + global errors_found + + # Check file exists - e.g. may have been deleted in a recent commit. + if not os.path.exists(filename): + print(filename, 'does not exist!') + return + + # Find items. + file_tfs = findTFS(filename) + + # See if any of these items already existed in tfs.c + for f in file_tfs: + for c in common_tfs: + found = False + + # + # Do not do this check for plugins; plugins cannot import + # data values from libwireshark (functions, yes; data + # values, no). + # + # Test whether there's a common prefix for the file name + # and "plugin/epan/"; if so, this is a plugin, and there + # is no common path and os.path.commonprefix returns an + # empty string, otherwise it returns the common path, so + # we check whether the common path is an empty string. + # + if os.path.commonprefix([filename, 'plugin/epan/']) == '': + exact_case = False + if file_tfs[f].val1 == common_tfs[c].val1 and file_tfs[f].val2 == common_tfs[c].val2: + found = True + exact_case = True + elif file_tfs[f].val1.upper() == common_tfs[c].val1.upper() and file_tfs[f].val2.upper() == common_tfs[c].val2.upper(): + found = True + + if found: + print("Error:" if exact_case else "Warn: ", filename, f, "- could have used", c, 'from tfs.c instead: ', common_tfs[c], + '' if exact_case else ' (capitalisation differs)') + if exact_case: + errors_found += 1 + else: + warnings_found += 1 + break + if not found: + if look_for_common: + AddCustomEntry(file_tfs[f].val1, file_tfs[f].val2, filename) + + if check_value_strings: + # Get macros + macros = find_macros(filename) + + # Get value_string entries. + vs = findValueStrings(filename) + + # Also get hf items + items = find_items(filename, macros, check_mask=True) + + + for v in vs: + if vs[v].looks_like_tfs: + found = False + exact_case = False + + #print('Candidate', v, vs[v]) + for c in common_tfs: + found = False + + # + # Do not do this check for plugins; plugins cannot import + # data values from libwireshark (functions, yes; data + # values, no). + # + # Test whether there's a common prefix for the file name + # and "plugin/epan/"; if so, this is a plugin, and there + # is no common path and os.path.commonprefix returns an + # empty string, otherwise it returns the common path, so + # we check whether the common path is an empty string. + # + if os.path.commonprefix([filename, 'plugin/epan/']) == '': + exact_case = False + if common_tfs[c].val1 == vs[v].parsed_vals[True] and common_tfs[c].val2 == vs[v].parsed_vals[False]: + found = True + exact_case = True + elif common_tfs[c].val1.upper() == vs[v].parsed_vals[True].upper() and common_tfs[c].val2.upper() == vs[v].parsed_vals[False].upper(): + found = True + + # Do values match? + if found: + # OK, now look for items that: + # - have VALS(v) AND + # - have a mask width of 1 bit (no good if field can have values > 1...) + for i in items: + if re.match(r'VALS\(\s*'+v+r'\s*\)', items[i].strings): + if items[i].bits_set == 1: + print("Warn:" if exact_case else "Note:", filename, 'value_string', "'"+v+"'", + "- could have used", c, 'from tfs.c instead: ', common_tfs[c], 'for', i, + '' if exact_case else ' (capitalisation differs)') + if exact_case: + warnings_found += 1 + + + +################################################################# +# Main logic. + +# command-line args. Controls which dissector files should be checked. +# If no args given, will just scan epan/dissectors folder. +parser = argparse.ArgumentParser(description='Check calls in dissectors') +parser.add_argument('--file', action='append', + help='specify individual dissector file to test') +parser.add_argument('--commits', action='store', + help='last N commits to check') +parser.add_argument('--open', action='store_true', + help='check open files') +parser.add_argument('--check-value-strings', action='store_true', + help='check whether value_strings could have been tfs?') + +parser.add_argument('--common', action='store_true', + help='check for potential new entries for tfs.c') + + +args = parser.parse_args() + + +# Get files from wherever command-line args indicate. +files = [] +if args.file: + # Add specified file(s) + for f in args.file: + if not f.startswith('epan'): + f = os.path.join('epan', 'dissectors', f) + if not os.path.isfile(f): + print('Chosen file', f, 'does not exist.') + exit(1) + else: + files.append(f) +elif args.commits: + # Get files affected by specified number of commits. + command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits] + files = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Will examine dissector files only + files = list(filter(lambda f : is_dissector_file(f), files)) +elif args.open: + # Unstaged changes. + command = ['git', 'diff', '--name-only'] + files = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Only interested in dissector files. + files = list(filter(lambda f : is_dissector_file(f), files)) + # Staged changes. + command = ['git', 'diff', '--staged', '--name-only'] + files_staged = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Only interested in dissector files. + files_staged = list(filter(lambda f : is_dissector_file(f), files_staged)) + for f in files_staged: + if not f in files: + files.append(f) +else: + # Find all dissector files from folder. + files = findDissectorFilesInFolder(os.path.join('epan', 'dissectors')) + + +# If scanning a subset of files, list them here. +print('Examining:') +if args.file or args.commits or args.open: + if files: + print(' '.join(files), '\n') + else: + print('No files to check.\n') +else: + print('All dissector modules\n') + + +# Get standard/ shared ones. +tfs_entries = findTFS(os.path.join('epan', 'tfs.c')) + +# Now check the files to see if they could have used shared ones instead. +for f in files: + if should_exit: + exit(1) + if not isGeneratedFile(f): + checkFile(f, tfs_entries, look_for_common=args.common, check_value_strings=args.check_value_strings) + +# Report on commonly-defined values. +if args.common: + # Looking for items that could potentially be moved to tfs.c + for c in custom_tfs_entries: + # Only want to see items that have 3 or more occurrences. + # Even then, probably only want to consider ones that sound generic. + if len(custom_tfs_entries[c]) > 2: + print(c, 'appears', len(custom_tfs_entries[c]), 'times, in: ', custom_tfs_entries[c]) + + +# Show summary. +print(warnings_found, 'warnings found') +if errors_found: + print(errors_found, 'errors found') + exit(1) diff --git a/tools/check_typed_item_calls.py b/tools/check_typed_item_calls.py new file mode 100755 index 0000000..4800203 --- /dev/null +++ b/tools/check_typed_item_calls.py @@ -0,0 +1,1775 @@ +#!/usr/bin/env python3 +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import re +import argparse +import signal +import subprocess + +# This utility scans the dissector code for various issues. +# TODO: +# - Create maps from type -> display types for hf items (see display (FIELDDISPLAY)) in docs/README.dissector + + +# Try to exit soon after Ctrl-C is pressed. +should_exit = False + +def signal_handler(sig, frame): + global should_exit + should_exit = True + print('You pressed Ctrl+C - exiting') + +signal.signal(signal.SIGINT, signal_handler) + + +warnings_found = 0 +errors_found = 0 + +def name_has_one_of(name, substring_list): + for word in substring_list: + if name.lower().find(word) != -1: + return True + return False + +# An individual call to an API we are interested in. +# Used by APICheck below. +class Call: + def __init__(self, hf_name, macros, line_number=None, length=None, fields=None): + self.hf_name = hf_name + self.line_number = line_number + self.fields = fields + self.length = None + if length: + try: + self.length = int(length) + except: + if length.isupper(): + if length in macros: + try: + self.length = int(macros[length]) + except: + pass + pass + + +# These are variable names that have been seen to be used in calls.. +common_hf_var_names = { 'hf_index', 'hf_item', 'hf_idx', 'hf_x', 'hf_id', 'hf_cookie', 'hf_flag', + 'hf_dos_time', 'hf_dos_date', 'hf_value', 'hf_num', + 'hf_cause_value', 'hf_uuid', + 'hf_endian', 'hf_ip', 'hf_port', 'hf_suff', 'hf_string', 'hf_uint', + 'hf_tag', 'hf_type', 'hf_hdr', 'hf_field', 'hf_opcode', 'hf_size', + 'hf_entry', 'field' } + +item_lengths = {} +item_lengths['FT_CHAR'] = 1 +item_lengths['FT_UINT8'] = 1 +item_lengths['FT_INT8'] = 1 +item_lengths['FT_UINT16'] = 2 +item_lengths['FT_INT16'] = 2 +item_lengths['FT_UINT24'] = 3 +item_lengths['FT_INT24'] = 3 +item_lengths['FT_UINT32'] = 4 +item_lengths['FT_INT32'] = 4 +item_lengths['FT_UINT40'] = 5 +item_lengths['FT_INT40'] = 5 +item_lengths['FT_UINT48'] = 6 +item_lengths['FT_INT48'] = 6 +item_lengths['FT_UINT56'] = 7 +item_lengths['FT_INT56'] = 7 +item_lengths['FT_UINT64'] = 8 +item_lengths['FT_INT64'] = 8 +item_lengths['FT_ETHER'] = 6 +# TODO: other types... + + +# A check for a particular API function. +class APICheck: + def __init__(self, fun_name, allowed_types, positive_length=False): + self.fun_name = fun_name + self.allowed_types = allowed_types + self.positive_length = positive_length + self.calls = [] + + if fun_name.startswith('ptvcursor'): + # RE captures function name + 1st 2 args (always ptvc + hfindex) + self.p = re.compile('[^\n]*' + self.fun_name + '\s*\(([a-zA-Z0-9_]+),\s*([a-zA-Z0-9_]+)') + elif fun_name.find('add_bitmask') == -1: + # Normal case. + # RE captures function name + 1st 2 args (always tree + hfindex + length) + self.p = re.compile('[^\n]*' + self.fun_name + '\s*\(([a-zA-Z0-9_]+),\s*([a-zA-Z0-9_]+),\s*[a-zA-Z0-9_]+,\s*[a-zA-Z0-9_]+,\s*([a-zA-Z0-9_]+)') + else: + # _add_bitmask functions. + # RE captures function name + 1st + 4th args (always tree + hfindex) + # 6th arg is 'fields' + self.p = re.compile('[^\n]*' + self.fun_name + '\s*\(([a-zA-Z0-9_]+),\s*[a-zA-Z0-9_]+,\s*[a-zA-Z0-9_]+,\s*([a-zA-Z0-9_]+)\s*,\s*[a-zA-Z0-9_]+\s*,\s*([a-zA-Z0-9_]+)\s*,') + + self.file = None + self.mask_allowed = True + if fun_name.find('proto_tree_add_bits_') != -1: + self.mask_allowed = False + + + def find_calls(self, file, macros): + self.file = file + self.calls = [] + + with open(file, 'r', encoding="utf8") as f: + contents = f.read() + lines = contents.splitlines() + total_lines = len(lines) + for line_number,line in enumerate(lines): + # Want to check this, and next few lines + to_check = lines[line_number-1] + '\n' + # Nothing to check if function name isn't in it + if to_check.find(self.fun_name) != -1: + # Ok, add the next file lines before trying RE + for i in range(1, 4): + if to_check.find(';') != -1: + break + elif line_number+i < total_lines: + to_check += (lines[line_number-1+i] + '\n') + m = self.p.search(to_check) + if m: + fields = None + length = None + + if self.fun_name.find('add_bitmask') != -1: + fields = m.group(3) + else: + if self.p.groups == 3: + length = m.group(3) + + # Add call. We have length if re had 3 groups. + num_groups = self.p.groups + self.calls.append(Call(m.group(2), + macros, + line_number=line_number, + length=length, + fields=fields)) + + # Return true if bit position n is set in value. + def check_bit(self, value, n): + return (value & (0x1 << n)) != 0 + + def does_mask_cover_value(self, mask, value): + # Walk past any l.s. 0 bits in value + n = 0 + + mask_start = n + # Walk through any bits that are set and check they are in mask + while self.check_bit(value, n) and n <= 63: + if not self.check_bit(mask, n): + return False + n += 1 + + return True + + def check_against_items(self, items_defined, items_declared, items_declared_extern, check_missing_items=False, + field_arrays=None): + global errors_found + global warnings_found + + for call in self.calls: + + # Check lengths, but for now only for APIs that have length in bytes. + if self.fun_name.find('add_bits') == -1 and call.hf_name in items_defined: + if call.length and items_defined[call.hf_name].item_type in item_lengths: + if item_lengths[items_defined[call.hf_name].item_type] < call.length: + print('Warning:', self.file + ':' + str(call.line_number), + self.fun_name + ' called for', call.hf_name, ' - ', + 'item type is', items_defined[call.hf_name].item_type, 'but call has len', call.length) + warnings_found += 1 + + # Needs a +ve length + if self.positive_length and call.length != None: + if call.length != -1 and call.length <= 0: + print('Error: ' + self.fun_name + '(.., ' + call.hf_name + ', ...) called at ' + + self.file + ':' + str(call.line_number) + + ' with length ' + str(call.length) + ' - must be > 0 or -1') + errors_found += 1 + + if call.hf_name in items_defined: + # Is type allowed? + if not items_defined[call.hf_name].item_type in self.allowed_types: + print('Error: ' + self.fun_name + '(.., ' + call.hf_name + ', ...) called at ' + + self.file + ':' + str(call.line_number) + + ' with type ' + items_defined[call.hf_name].item_type) + print(' (allowed types are', self.allowed_types, ')\n') + errors_found += 1 + # No mask allowed + if not self.mask_allowed and items_defined[call.hf_name].mask_value != 0: + print('Error: ' + self.fun_name + '(.., ' + call.hf_name + ', ...) called at ' + + self.file + ':' + str(call.line_number) + + ' with mask ' + items_defined[call.hf_name].mask + ' (must be zero!)\n') + errors_found += 1 + + if self.fun_name.find('add_bitmask') != -1 and call.hf_name in items_defined and field_arrays: + if call.fields in field_arrays: + if (items_defined[call.hf_name].mask_value and + field_arrays[call.fields][1] != 0 and items_defined[call.hf_name].mask_value != field_arrays[call.fields][1]): + # TODO: only really a problem if bit is set in array but not in top-level item? + if not self.does_mask_cover_value(items_defined[call.hf_name].mask_value, + field_arrays[call.fields][1]): + print('Warning:', self.file, call.hf_name, call.fields, "masks don't match. root=", + items_defined[call.hf_name].mask, + "array has", hex(field_arrays[call.fields][1])) + warnings_found += 1 + + if check_missing_items: + if call.hf_name in items_declared and not call.hf_name in items_declared_extern: + #not in common_hf_var_names: + print('Warning:', self.file + ':' + str(call.line_number), + self.fun_name + ' called for "' + call.hf_name + '"', ' - but no item found') + warnings_found += 1 + + +# Specialization of APICheck for add_item() calls +class ProtoTreeAddItemCheck(APICheck): + def __init__(self, ptv=None): + + # RE will capture whole call. + + if not ptv: + # proto_item * + # proto_tree_add_item(proto_tree *tree, int hfindex, tvbuff_t *tvb, + # const gint start, gint length, const guint encoding) + self.fun_name = 'proto_tree_add_item' + self.p = re.compile('[^\n]*' + self.fun_name + '\s*\(\s*[a-zA-Z0-9_]+?,\s*([a-zA-Z0-9_]+?),\s*[a-zA-Z0-9_\+\s]+?,\s*[^,.]+?,\s*(.+),\s*([^,.]+?)\);') + else: + # proto_item * + # ptvcursor_add(ptvcursor_t *ptvc, int hfindex, gint length, + # const guint encoding) + self.fun_name = 'ptvcursor_add' + self.p = re.compile('[^\n]*' + self.fun_name + '\s*\([^,.]+?,\s*([^,.]+?),\s*([^,.]+?),\s*([a-zA-Z0-9_\-\>]+)') + + + def find_calls(self, file, macros): + self.file = file + self.calls = [] + with open(file, 'r', encoding="utf8") as f: + + contents = f.read() + lines = contents.splitlines() + total_lines = len(lines) + for line_number,line in enumerate(lines): + # Want to check this, and next few lines + to_check = lines[line_number-1] + '\n' + # Nothing to check if function name isn't in it + fun_idx = to_check.find(self.fun_name) + if fun_idx != -1: + # Ok, add the next file lines before trying RE + for i in range(1, 5): + if to_check.find(';') != -1: + break + elif line_number+i < total_lines: + to_check += (lines[line_number-1+i] + '\n') + # Lose anything before function call itself. + to_check = to_check[fun_idx:] + m = self.p.search(to_check) + if m: + # Throw out if parens not matched + if m.group(0).count('(') != m.group(0).count(')'): + continue + + enc = m.group(3) + hf_name = m.group(1) + if not enc.startswith('ENC_'): + if not enc in { 'encoding', 'enc', 'client_is_le', 'cigi_byte_order', 'endian', 'endianess', 'machine_encoding', 'byte_order', 'bLittleEndian', + 'p_mq_parm->mq_str_enc', 'p_mq_parm->mq_int_enc', + 'iEnc', 'strid_enc', 'iCod', 'nl_data->encoding', + 'argp->info->encoding', 'gquic_info->encoding', 'writer_encoding', + 'tds_get_int2_encoding(tds_info)', + 'tds_get_int4_encoding(tds_info)', + 'tds_get_char_encoding(tds_info)', + 'info->encoding', + 'item->encoding', + 'DREP_ENC_INTEGER(drep)', 'string_encoding', 'item', 'type', + 'dvb_enc_to_item_enc(encoding)', + 'packet->enc', + 'IS_EBCDIC(uCCS) ? ENC_EBCDIC : ENC_ASCII', + 'DREP_ENC_INTEGER(hdr->drep)', + 'dhcp_uuid_endian', + 'payload_le', + 'local_encoding', + 'big_endian', + 'hf_data_encoding', + 'IS_EBCDIC(eStr) ? ENC_EBCDIC : ENC_ASCII', + 'big_endian ? ENC_BIG_ENDIAN : ENC_LITTLE_ENDIAN', + '(skip == 1) ? ENC_BIG_ENDIAN : ENC_LITTLE_ENDIAN', + 'pdu_info->sbc', 'pdu_info->mbc', + 'seq_info->txt_enc | ENC_NA', + 'BASE_SHOW_UTF_8_PRINTABLE', + 'dhcp_secs_endian', + 'is_mdns ? ENC_UTF_8|ENC_NA : ENC_ASCII|ENC_NA' + }: + global warnings_found + + print('Warning:', self.file + ':' + str(line_number), + self.fun_name + ' called for "' + hf_name + '"', 'check last/enc param:', enc, '?') + warnings_found += 1 + self.calls.append(Call(hf_name, macros, line_number=line_number, length=m.group(2))) + + def check_against_items(self, items_defined, items_declared, items_declared_extern, + check_missing_items=False, field_arrays=None): + # For now, only complaining if length if call is longer than the item type implies. + # + # Could also be bugs where the length is always less than the type allows. + # Would involve keeping track (in the item) of whether any call had used the full length. + + global warnings_found + + for call in self.calls: + if call.hf_name in items_defined: + if call.length and items_defined[call.hf_name].item_type in item_lengths: + if item_lengths[items_defined[call.hf_name].item_type] < call.length: + print('Warning:', self.file + ':' + str(call.line_number), + self.fun_name + ' called for', call.hf_name, ' - ', + 'item type is', items_defined[call.hf_name].item_type, 'but call has len', call.length) + warnings_found += 1 + elif check_missing_items: + if call.hf_name in items_declared and not call.hf_name in items_declared_extern: + #not in common_hf_var_names: + print('Warning:', self.file + ':' + str(call.line_number), + self.fun_name + ' called for "' + call.hf_name + '"', ' - but no item found') + warnings_found += 1 + + + +################################################################################################## +# This is a set of items (by filter name) where we know that the bitmask is non-contiguous, +# but is still believed to be correct. +known_non_contiguous_fields = { 'wlan.fixed.capabilities.cfpoll.sta', + 'wlan.wfa.ie.wme.qos_info.sta.reserved', + 'btrfcomm.frame_type', # https://os.itec.kit.edu/downloads/sa_2006_roehricht-martin_flow-control-in-bluez.pdf + 'capwap.control.message_element.ac_descriptor.dtls_policy.r', # RFC 5415 + 'couchbase.extras.subdoc.flags.reserved', + 'wlan.fixed.capabilities.cfpoll.ap', # These are 3 separate bits... + 'wlan.wfa.ie.wme.tspec.ts_info.reserved', # matches other fields in same sequence + 'zbee_zcl_se.pp.attr.payment_control_configuration.reserved', # matches other fields in same sequence + 'zbee_zcl_se.pp.snapshot_payload_cause.reserved', # matches other fields in same sequence + 'ebhscr.eth.rsv', # matches other fields in same sequence + 'v120.lli', # non-contiguous field (http://www.acacia-net.com/wwwcla/protocol/v120_l2.htm) + 'stun.type.class', + 'bssgp.csg_id', 'tiff.t6.unused', 'artnet.ip_prog_reply.unused', + 'telnet.auth.mod.enc', 'osc.message.midi.bender', 'btle.data_header.rfu', + 'stun.type.method', # figure 3 in rfc 5389 + 'tds.done.status', # covers all bits in bitset + 'hf_iax2_video_csub', # RFC 5456, table 8.7 + 'iax2.video.subclass', + 'dnp3.al.ana.int', + 'pwcesopsn.cw.lm', + 'gsm_a.rr.format_id', # EN 301 503 + 'siii.mst.phase', # comment in code seems convinced + 'xmcp.type.class', + 'xmcp.type.method', + 'hf_hiqnet_flags', + 'hf_hiqnet_flagmask', + 'hf_h223_mux_mpl', + 'rdp.flags.pkt' + } +################################################################################################## + + +field_widths = { + 'FT_BOOLEAN' : 64, # TODO: Width depends upon 'display' field + 'FT_CHAR' : 8, + 'FT_UINT8' : 8, + 'FT_INT8' : 8, + 'FT_UINT16' : 16, + 'FT_INT16' : 16, + 'FT_UINT24' : 24, + 'FT_INT24' : 24, + 'FT_UINT32' : 32, + 'FT_INT32' : 32, + 'FT_UINT40' : 40, + 'FT_INT40' : 40, + 'FT_UINT48' : 48, + 'FT_INT48' : 48, + 'FT_UINT56' : 56, + 'FT_INT56' : 56, + 'FT_UINT64' : 64, + 'FT_INT64' : 64 +} + +# TODO: most of these might as well be strings... +def is_ignored_consecutive_filter(filter): + ignore_patterns = [ + re.compile(r'^elf.sh_type'), + re.compile(r'^elf.p_type'), + re.compile(r'^btavrcp.pdu_id'), + re.compile(r'^nstrace.trcdbg.val(\d+)'), + re.compile(r'^netlogon.dummy_string'), + re.compile(r'^opa.reserved'), + re.compile(r'^mpls_pm.timestamp\d\..*'), + re.compile(r'^wassp.data.mu_mac'), + re.compile(r'^thrift.type'), + re.compile(r'^quake2.game.client.command.move.angles'), + re.compile(r'^ipp.enum_value'), + re.compile(r'^idrp.error.subcode'), + re.compile(r'^ftdi-ft.lValue'), + re.compile(r'^6lowpan.src'), + re.compile(r'^couchbase.flex_frame.frame.id'), + re.compile(r'^rtps.param.id'), + re.compile(r'^rtps.locator.port'), + re.compile(r'^sigcomp.udvm.value'), + re.compile(r'^opa.mad.attributemodifier.n'), + re.compile(r'^smb.cmd'), + re.compile(r'^sctp.checksum'), + re.compile(r'^dhcp.option.end'), + re.compile(r'^nfapi.num.bf.vector.bf.value'), + re.compile(r'^dnp3.al.range.abs'), + re.compile(r'^dnp3.al.range.quantity'), + re.compile(r'^dnp3.al.index'), + re.compile(r'^dnp3.al.size'), + re.compile(r'^ftdi-ft.hValue'), + re.compile(r'^homeplug_av.op_attr_cnf.data.sw_sub'), + re.compile(r'^radiotap.he_mu.preamble_puncturing'), + re.compile(r'^ndmp.file'), + re.compile(r'^ocfs2.dlm.lvb'), + re.compile(r'^oran_fh_cus.reserved'), + re.compile(r'^qnet6.kif.msgsend.msg.read.xtypes0-7'), + re.compile(r'^qnet6.kif.msgsend.msg.write.xtypes0-7'), + re.compile(r'^mih.sig_strength'), + re.compile(r'^couchbase.flex_frame.frame.len'), + re.compile(r'^nvme-rdma.read_to_host_req'), + re.compile(r'^rpcap.dummy'), + re.compile(r'^sflow.flow_sample.output_interface'), + re.compile(r'^socks.results'), + re.compile(r'^opa.mad.attributemodifier.p'), + re.compile(r'^v5ua.efa'), + re.compile(r'^zbncp.data.tx_power'), + re.compile(r'^zbncp.data.nwk_addr'), + re.compile(r'^zbee_zcl_hvac.pump_config_control.attr.ctrl_mode'), + re.compile(r'^nat-pmp.external_port'), + re.compile(r'^zbee_zcl.attr.float'), + re.compile(r'^wpan-tap.phr.fsk_ms.mode'), + re.compile(r'^mysql.exec_flags'), + re.compile(r'^pim.metric_pref'), + re.compile(r'^modbus.regval_float'), + re.compile(r'^alcap.cau.value'), + re.compile(r'^bpv7.crc_field'), + re.compile(r'^at.chld.mode'), + re.compile(r'^btl2cap.psm'), + re.compile(r'^srvloc.srvtypereq.nameauthlistlen'), + re.compile(r'^a11.ext.code'), + re.compile(r'^adwin_config.port'), + re.compile(r'^afp.unknown'), + re.compile(r'^ansi_a_bsmap.mid.digit_1'), + re.compile(r'^ber.unknown.OCTETSTRING'), + re.compile(r'^btatt.handle'), + re.compile(r'^btl2cap.option_flushto'), + re.compile(r'^cip.network_segment.prod_inhibit'), + re.compile(r'^cql.result.rows.table_name'), + re.compile(r'^dcom.sa.vartype'), + re.compile(r'^f5ethtrailer.slot'), + re.compile(r'^ipdr.cm_ipv6_addr'), + re.compile(r'^mojito.kuid'), + re.compile(r'^mtp3.priority'), + re.compile(r'^pw.cw.length'), + re.compile(r'^rlc.ciphered_data'), + re.compile(r'^vp8.pld.pictureid'), + re.compile(r'^gryphon.sched.channel'), + re.compile(r'^pn_io.ioxs'), + re.compile(r'^pn_dcp.block_qualifier_reset'), + re.compile(r'^pn_dcp.suboption_device_instance'), + re.compile(r'^nfs.attr'), + re.compile(r'^nfs.create_session_flags'), + re.compile(r'^rmt-lct.toi64'), + re.compile(r'^gryphon.data.header_length'), + re.compile(r'^quake2.game.client.command.move.movement'), + re.compile(r'^isup.parameter_type'), + re.compile(r'^cip.port'), + re.compile(r'^adwin.fifo_no'), + re.compile(r'^bthci_evt.hci_vers_nr'), + re.compile(r'^gryphon.usdt.stmin_active'), + re.compile(r'^dnp3.al.anaout.int'), + re.compile(r'^dnp3.al.ana.int'), + re.compile(r'^dnp3.al.cnt'), + re.compile(r'^bthfp.chld.mode'), + re.compile(r'^nat-pmp.pml'), + re.compile(r'^isystemactivator.actproperties.ts.hdr'), + re.compile(r'^rtpdump.txt_addr'), + re.compile(r'^unistim.vocoder.id'), + re.compile(r'^mac.ueid'), + re.compile(r'cip.symbol.size'), + re.compile(r'dnp3.al.range.start'), + re.compile(r'dnp3.al.range.stop'), + re.compile(r'gtpv2.mp'), + re.compile(r'gvcp.cmd.resend.firstpacketid'), + re.compile(r'gvcp.cmd.resend.lastpacketid'), + re.compile(r'wlan.bf.reserved'), + re.compile(r'opa.sa.reserved'), + re.compile(r'rmt-lct.ext_tol_transfer_len'), + re.compile(r'pn_io.error_code2'), + re.compile(r'gryphon.ldf.schedsize'), + re.compile(r'wimaxmacphy.burst_opt_mimo_matrix_indicator'), + re.compile(r'alcap.*bwt.*.[b|f]w'), + re.compile(r'ccsds.packet_type'), + re.compile(r'iso15765.flow_control.stmin'), + re.compile(r'msdo.PieceSize'), + re.compile(r'opa.clasportinfo.redirect.reserved'), + re.compile(r'p_mul.unused'), + re.compile(r'btle.control.phys.le_[1|2]m_phy'), + re.compile(r'opa.pm.dataportcounters.reserved'), + re.compile(r'opa.switchinfo.switchcapabilitymask.reserved'), + re.compile(r'nvme-rdma.read_from_host_resp'), + re.compile(r'nvme-rdma.write_to_host_req'), + re.compile(r'netlink-route.ifla_linkstats.rx_errors.fifo_errs'), + re.compile(r'mtp3mg.japan_spare'), + re.compile(r'ixveriwave.errors.ip_checksum_error'), + re.compile(r'ansi_a_bsmap.cm2.scm.bc_entry.opmode[0|1]') + ] + + for patt in ignore_patterns: + if patt.match(filter): + return True + return False + + +class ValueString: + def __init__(self, file, name, vals, macros, do_extra_checks=False): + self.file = file + self.name = name + self.raw_vals = vals + self.parsed_vals = {} + self.seen_labels = set() + self.valid = True + self.min_value = 99999 + self.max_value = -99999 + + # Now parse out each entry in the value_string + matches = re.finditer(r'\{\s*([0-9_A-Za-z]*)\s*,\s*(".*?")\s*}\s*,', self.raw_vals) + for m in matches: + value,label = m.group(1), m.group(2) + if value in macros: + value = macros[value] + elif any(not c in '0123456789abcdefABCDEFxX' for c in value): + self.valid = False + return + + try: + # Read according to the appropriate base. + if value.lower().startswith('0x'): + value = int(value, 16) + elif value.startswith('0b'): + value = int(value[2:], 2) + elif value.startswith('0'): + value = int(value, 8) + else: + value = int(value, 10) + except: + return + + global warnings_found + + # Check for value conflict before inserting + if value in self.parsed_vals and label != self.parsed_vals[value]: + print('Warning:', self.file, ': value_string', self.name, '- value ', value, 'repeated with different values - was', + self.parsed_vals[value], 'now', label) + warnings_found += 1 + else: + # Add into table, while checking for repeated label + self.parsed_vals[value] = label + if do_extra_checks and label in self.seen_labels: + # These are commonly repeated.. + exceptions = [ 'reserved', 'invalid', 'unused', 'not used', 'unknown', 'undefined', 'spare', + 'unallocated', 'not assigned', 'implementation specific', 'unspecified', + 'other', 'for further study', 'future', 'vendor specific', 'obsolete', 'none', + 'shall not be used', 'national use', 'unassigned', 'oem', 'user defined', + 'manufacturer specific', 'not specified', 'proprietary', 'operator-defined', + 'dynamically allocated', 'user specified', 'xxx', 'default', 'planned', 'not req' ] + excepted = False + for ex in exceptions: + if label.lower().find(ex) != -1: + excepted = True + break + + if not excepted: + print('Warning:', self.file, ': value_string', self.name, '- label ', label, 'repeated') + warnings_found += 1 + else: + self.seen_labels.add(label) + + if value > self.max_value: + self.max_value = value + if value < self.min_value: + self.min_value = value + + def extraChecks(self): + global warnings_found + + # Look for one value missing in range (quite common...) + num_items = len(self.parsed_vals) + span = self.max_value - self.min_value + 1 + if num_items > 4 and span > num_items and (span-num_items <=1): + for val in range(self.min_value, self.max_value): + if not val in self.parsed_vals: + print('Warning:', self.file, ': value_string', self.name, '- value', val, 'missing?', '(', num_items, 'entries)') + global warnings_found + warnings_found += 1 + + # Do most of the labels match the number? + matching_label_entries = set() + for val in self.parsed_vals: + if self.parsed_vals[val].find(str(val)) != -1: + # TODO: pick out multiple values rather than concat into wrong number + parsed_value = int(''.join(d for d in self.parsed_vals[val] if d.isdecimal())) + if val == parsed_value: + matching_label_entries.add(val) + + if len(matching_label_entries) >= 4 and len(matching_label_entries) > 0 and len(matching_label_entries) < num_items and len(matching_label_entries) >= num_items-1: + # Be forgiving about first or last entry + first_val = list(self.parsed_vals)[0] + last_val = list(self.parsed_vals)[-1] + if not first_val in matching_label_entries or not last_val in matching_label_entries: + return + print('Warning:', self.file, ': value_string', self.name, 'Labels match value except for 1!', matching_label_entries, num_items, self) + + # Do all labels start with lower-or-upper char? + startLower,startUpper = 0,0 + for val in self.parsed_vals: + first_letter = self.parsed_vals[val][1] + if first_letter.isalpha(): + if first_letter.isupper(): + startUpper += 1 + else: + startLower += 1 + if startLower > 0 and startUpper > 0: + if startLower+startUpper > 10 and (startLower <=3 or startUpper <=3): + standouts = [] + if startLower < startUpper: + standouts += [self.parsed_vals[val] for val in self.parsed_vals if self.parsed_vals[val][1].islower()] + if startLower > startUpper: + standouts += [self.parsed_vals[val] for val in self.parsed_vals if self.parsed_vals[val][1].isupper()] + + print('Note:', self.file, ': value_string', self.name, 'mix of upper', startUpper, 'and lower', startLower, standouts) + + + def __str__(self): + return self.name + '= { ' + self.raw_vals + ' }' + + +class RangeStringEntry: + def __init__(self, min, max, label): + self.min = min + self.max = max + self.label = label + + def hides(self, min, max): + return min >= self.min and max <= self.max + + def __str__(self): + return '(' + str(self.min) + ', ' + str(self.max) + ') -> ' + self.label + + +class RangeString: + def __init__(self, file, name, vals, macros, do_extra_checks=False): + self.file = file + self.name = name + self.raw_vals = vals + self.parsed_vals = [] + self.seen_labels = set() + self.valid = True + self.min_value = 99999 + self.max_value = -99999 + + # Now parse out each entry in the value_string + matches = re.finditer(r'\{\s*([0-9_A-Za-z]*)\s*,\s*([0-9_A-Za-z]*)\s*,\s*(".*?")\s*}\s*,', self.raw_vals) + for m in matches: + min,max,label = m.group(1), m.group(2), m.group(3) + if min in macros: + min = macros[min] + elif any(not c in '0123456789abcdefABCDEFxX' for c in min): + self.valid = False + return + if max in macros: + max = macros[max] + elif any(not c in '0123456789abcdefABCDEFxX' for c in max): + self.valid = False + return + + + try: + # Read according to the appropriate base. + if min.lower().startswith('0x'): + min = int(min, 16) + elif min.startswith('0b'): + min = int(min[2:], 2) + elif min.startswith('0'): + min = int(min, 8) + else: + min = int(min, 10) + + if max.lower().startswith('0x'): + max = int(max, 16) + elif max.startswith('0b'): + max = int(max[2:], 2) + elif max.startswith('0'): + max = int(max, 8) + else: + max = int(max, 10) + except: + return + + # Now check what we've found. + global warnings_found + + if min < self.min_value: + self.min_value = min + # For overall max value, still use min of each entry. + # It is common for entries to extend to e.g. 0xff, but at least we can check for items + # that can never match if we only chec the min. + if min > self.max_value: + self.max_value = min + + # This value should not be entirely hidden by earlier entries + for prev in self.parsed_vals: + if prev.hides(min, max): + print('Warning:', self.file, ': range_string label', label, 'hidden by', prev) + warnings_found += 1 + + # Max should not be > min + if min > max: + print('Warning:', self.file, ': range_string', self.name, 'entry', label, 'min', min, '>', max) + warnings_found += 1 + + # Check label. + if label[1:-1].startswith(' ') or label[1:-1].endswith(' '): + print('Warning:', self.file, ': range_string', self.name, 'entry', label, 'starts or ends with space') + warnings_found += 1 + + # OK, add this entry + self.parsed_vals.append(RangeStringEntry(min, max, label)) + + def extraChecks(self): + pass + # TODO: some checks over all entries. e.g., + # - can multiple values be coalesced into 1? + # - if in all cases min==max, suggest value_string instead? + + + + +# Look for value_string entries in a dissector file. Return a dict name -> ValueString +def findValueStrings(filename, macros, do_extra_checks=False): + vals_found = {} + + #static const value_string radio_type_vals[] = + #{ + # { 0, "FDD"}, + # { 1, "TDD"}, + # { 0, NULL } + #}; + + with open(filename, 'r', encoding="utf8") as f: + contents = f.read() + + # Remove comments so as not to trip up RE. + contents = removeComments(contents) + + matches = re.finditer(r'.*const value_string\s*([a-zA-Z0-9_]*)\s*\[\s*\]\s*\=\s*\{([\{\}\d\,a-zA-Z0-9_\-\*\#\.:\/\(\)\'\s\"]*)\};', contents) + for m in matches: + name = m.group(1) + vals = m.group(2) + vals_found[name] = ValueString(filename, name, vals, macros, do_extra_checks) + + return vals_found + +# Look for value_string entries in a dissector file. Return a dict name -> ValueString +def findRangeStrings(filename, macros, do_extra_checks=False): + vals_found = {} + + #static const range_string symbol_table_shndx_rvals[] = { + # { 0x0000, 0x0000, "Undefined" }, + # { 0x0001, 0xfeff, "Normal Section" }, + # { 0, 0, NULL } + #}; + + with open(filename, 'r', encoding="utf8") as f: + contents = f.read() + + # Remove comments so as not to trip up RE. + contents = removeComments(contents) + + matches = re.finditer(r'.*const range_string\s*([a-zA-Z0-9_]*)\s*\[\s*\]\s*\=\s*\{([\{\}\d\,a-zA-Z0-9_\-\*\#\.:\/\(\)\'\s\"]*)\};', contents) + for m in matches: + name = m.group(1) + vals = m.group(2) + vals_found[name] = RangeString(filename, name, vals, macros, do_extra_checks) + + return vals_found + + + +# The relevant parts of an hf item. Used as value in dict where hf variable name is key. +class Item: + + # Keep the previous few items + previousItems = [] + + def __init__(self, filename, hf, filter, label, item_type, display, strings, macros, + value_strings, range_strings, + mask=None, check_mask=False, mask_exact_width=False, check_label=False, + check_consecutive=False, blurb=''): + self.filename = filename + self.hf = hf + self.filter = filter + self.label = label + self.mask = mask + self.strings = strings + self.mask_exact_width = mask_exact_width + + global warnings_found + + self.set_mask_value(macros) + + if check_consecutive: + for previous_index,previous_item in enumerate(Item.previousItems): + if previous_item.filter == filter: + if label != previous_item.label: + if not is_ignored_consecutive_filter(self.filter): + print('Warning:', filename, hf, ': - filter "' + filter + + '" appears ' + str(previous_index+1) + ' items before - labels are "' + previous_item.label + '" and "' + label + '"') + warnings_found += 1 + + # Add this one to front of (short) previous list + Item.previousItems = [self] + Item.previousItems + if len(Item.previousItems) > 5: + # Get rid of oldest one now + #Item.previousItems = Item.previousItems[:-1] + Item.previousItems.pop() + + self.item_type = item_type + self.display = display + + # Optionally check label (short and long). + if check_label: + self.check_label(label, 'label') + #self.check_label(blurb, 'blurb') + + # Optionally check that mask bits are contiguous + if check_mask: + if self.mask_read and not mask in { 'NULL', '0x0', '0', '0x00' }: + self.check_contiguous_bits(mask) + self.check_num_digits(self.mask) + # N.B., if last entry in set is removed, see around 18,000 warnings + self.check_digits_all_zeros(self.mask) + + # N.B. these checks are already done by checkApis.pl + if strings.find('RVALS') != -1 and display.find('BASE_RANGE_STRING') == -1: + print('Warning: ' + filename, hf, 'filter "' + filter + ' strings has RVALS but display lacks BASE_RANGE_STRING') + warnings_found += 1 + + # For RVALS, is BASE_RANGE_STRING also set (checked by checkApis.pl)? + if strings.find('VALS_EXT_PTR') != -1 and display.find('BASE_EXT_STRING') == -1: + print('Warning: ' + filename, hf, 'filter "' + filter + ' strings has VALS_EXT_PTR but display lacks BASE_EXT_STRING') + warnings_found += 1 + + # For VALS, lookup the corresponding ValueString and try to check range. + vs_re = re.compile(r'VALS\(([a-zA-Z0-9_]*)\)') + m = vs_re.search(strings) + if m: + self.vs_name = m.group(1) + if self.vs_name in value_strings: + vs = value_strings[self.vs_name] + self.check_value_string_range(vs.min_value, vs.max_value) + + # For RVALS, lookup the corresponding RangeString and try to check range. + rs_re = re.compile(r'RVALS\(([a-zA-Z0-9_]*)\)') + m = rs_re.search(strings) + if m: + self.rs_name = m.group(1) + if self.rs_name in range_strings: + rs = range_strings[self.rs_name] + self.check_range_string_range(rs.min_value, rs.max_value) + + + def __str__(self): + return 'Item ({0} "{1}" {2} type={3}:{4} {5} mask={6})'.format(self.filename, self.label, self.filter, self.item_type, self.display, self.strings, self.mask) + + def check_label(self, label, label_name): + global warnings_found + + # TODO: this is masking a bug where the re for the item can't cope with macro for containing ',' for mask arg.. + if label.count('"') == 1: + return + + if label.startswith(' ') or label.endswith(' '): + print('Warning: ' + self.filename, self.hf, 'filter "' + self.filter, label_name, '"' + label + '" begins or ends with a space') + warnings_found += 1 + + if (label.count('(') != label.count(')') or + label.count('[') != label.count(']') or + label.count('{') != label.count('}')): + # Ignore if includes quotes, as may be unbalanced. + if label.find("'") == -1: + print('Warning: ' + self.filename, self.hf, 'filter "' + self.filter + '"', label_name, '"' + label + '"', 'has unbalanced parens/braces/brackets') + warnings_found += 1 + if self.item_type != 'FT_NONE' and label.endswith(':'): + print('Warning: ' + self.filename, self.hf, 'filter "' + self.filter + '"', label_name, '"' + label + '"', 'ends with an unnecessary colon') + warnings_found += 1 + + + def set_mask_value(self, macros): + try: + self.mask_read = True + + # Substitute mask if found as a macro.. + if self.mask in macros: + self.mask = macros[self.mask] + elif any(not c in '0123456789abcdefABCDEFxX' for c in self.mask): + self.mask_read = False + self.mask_value = 0 + return + + + # Read according to the appropriate base. + if self.mask.startswith('0x'): + self.mask_value = int(self.mask, 16) + elif self.mask.startswith('0'): + self.mask_value = int(self.mask, 8) + else: + self.mask_value = int(self.mask, 10) + except: + self.mask_read = False + self.mask_value = 0 + + def check_value_string_range(self, vs_min, vs_max): + item_width = self.get_field_width_in_bits() + + if item_width is None: + # Type field defined by macro? + return + + if self.mask_value > 0: + # Distance between first and last '1' + bitBools = bin(self.mask_value)[2:] + mask_width = bitBools.rfind('1') - bitBools.find('1') + 1 + else: + # No mask is effectively a full mask.. + mask_width = item_width + + item_max = (2 ** mask_width) + if vs_max > item_max: + global warnings_found + print('Warning:', self.filename, self.hf, 'filter=', self.filter, + self.strings, "has max value", vs_max, '(' + hex(vs_max) + ')', "which doesn't fit into", mask_width, 'bits', + '( mask is', hex(self.mask_value), ')') + warnings_found += 1 + + def check_range_string_range(self, rs_min, rs_max): + item_width = self.get_field_width_in_bits() + + if item_width is None: + # Type field defined by macro? + return + + if self.mask_value > 0: + # Distance between first and last '1' + bitBools = bin(self.mask_value)[2:] + mask_width = bitBools.rfind('1') - bitBools.find('1') + 1 + else: + # No mask is effectively a full mask.. + mask_width = item_width + + item_max = (2 ** mask_width) + if rs_max > item_max: + global warnings_found + print('Warning:', self.filename, self.hf, 'filter=', self.filter, + self.strings, "has values", rs_min, rs_max, '(' + hex(rs_max) + ')', "which doesn't fit into", mask_width, 'bits', + '( mask is', hex(self.mask_value), ')') + warnings_found += 1 + + + + + # Return true if bit position n is set in value. + def check_bit(self, value, n): + return (value & (0x1 << n)) != 0 + + # Output a warning if non-contigous bits are found in the mask (guint64). + # Note that this legimately happens in several dissectors where multiple reserved/unassigned + # bits are conflated into one field. + # - there is probably a cool/efficient way to check this (+1 => 1-bit set?) + def check_contiguous_bits(self, mask): + if not self.mask_value: + return + + # Do see legitimate non-contiguous bits often for these.. + if name_has_one_of(self.hf, ['reserved', 'unknown', 'unused', 'spare']): + return + if name_has_one_of(self.label, ['reserved', 'unknown', 'unused', 'spare']): + return + + + # Walk past any l.s. 0 bits + n = 0 + while not self.check_bit(self.mask_value, n) and n <= 63: + n += 1 + if n==63: + return + + mask_start = n + # Walk through any bits that are set + while self.check_bit(self.mask_value, n) and n <= 63: + n += 1 + n += 1 + + if n >= 63: + return + + # Look up the field width + field_width = 0 + if not self.item_type in field_widths: + print('unexpected item_type is ', self.item_type) + field_width = 64 + else: + field_width = self.get_field_width_in_bits() + + + # Its a problem is the mask_width is > field_width - some of the bits won't get looked at!? + mask_width = n-1-mask_start + if field_width is not None and (mask_width > field_width): + # N.B. No call, so no line number. + print(self.filename + ':', self.hf, 'filter=', self.filter, self.item_type, 'so field_width=', field_width, + 'but mask is', mask, 'which is', mask_width, 'bits wide!') + global warnings_found + warnings_found += 1 + # Now, any more zero set bits are an error! + if self.filter in known_non_contiguous_fields or self.filter.startswith('rtpmidi'): + # Don't report if we know this one is Ok. + # TODO: also exclude items that are used as root in add_bitmask() calls? + return + while n <= 63: + if self.check_bit(self.mask_value, n): + print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - mask with non-contiguous bits', + mask, '(', hex(self.mask_value), ')') + warnings_found += 1 + return + n += 1 + + def get_field_width_in_bits(self): + if self.item_type == 'FT_BOOLEAN': + if self.display == 'NULL': + return 8 # i.e. 1 byte + elif self.display == 'BASE_NONE': + return 8 + elif self.display == 'SEP_DOT': # from proto.h, only meant for FT_BYTES + return 64 + else: + try: + # For FT_BOOLEAN, modifier is just numerical number of bits. Round up to next nibble. + return int((int(self.display) + 3)/4)*4 + except: + return None + else: + if self.item_type in field_widths: + # Lookup fixed width for this type + return field_widths[self.item_type] + else: + return None + + def check_num_digits(self, mask): + if mask.startswith('0x') and len(mask) > 3: + global warnings_found + global errors_found + + width_in_bits = self.get_field_width_in_bits() + # Warn if odd number of digits. TODO: only if >= 5? + if len(mask) % 2 and self.item_type != 'FT_BOOLEAN': + print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - mask has odd number of digits', mask, + 'expected max for', self.item_type, 'is', int(width_in_bits/4)) + warnings_found += 1 + + if self.item_type in field_widths: + # Longer than it should be? + if width_in_bits is None: + return + if len(mask)-2 > width_in_bits/4: + extra_digits = mask[2:2+(len(mask)-2 - int(width_in_bits/4))] + # Its definitely an error if any of these are non-zero, as they won't have any effect! + if extra_digits != '0'*len(extra_digits): + print('Error:', self.filename, self.hf, 'filter=', self.filter, 'mask', self.mask, "with len is", len(mask)-2, + "but type", self.item_type, " indicates max of", int(width_in_bits/4), + "and extra digits are non-zero (" + extra_digits + ")") + errors_found += 1 + else: + # Has extra leading zeros, still confusing, so warn. + print('Warning:', self.filename, self.hf, 'filter=', self.filter, 'mask', self.mask, "with len", len(mask)-2, + "but type", self.item_type, " indicates max of", int(width_in_bits/4)) + warnings_found += 1 + + # Strict/fussy check - expecting mask length to match field width exactly! + # Currently only doing for FT_BOOLEAN, and don't expect to be in full for 64-bit fields! + if self.mask_exact_width: + ideal_mask_width = int(width_in_bits/4) + if self.item_type == 'FT_BOOLEAN' and ideal_mask_width < 16 and len(mask)-2 != ideal_mask_width: + print('Warning:', self.filename, self.hf, 'filter=', self.filter, 'mask', self.mask, "with len", len(mask)-2, + "but type", self.item_type, "|", self.display, " indicates should be", int(width_in_bits/4)) + warnings_found += 1 + + else: + # This type shouldn't have a mask set at all. + print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - item has type', self.item_type, 'but mask set:', mask) + warnings_found += 1 + + def check_digits_all_zeros(self, mask): + if mask.startswith('0x') and len(mask) > 3: + if mask[2:] == '0'*(len(mask)-2): + print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - item mask has all zeros - this is confusing! :', '"' + mask + '"') + global warnings_found + warnings_found += 1 + + # A mask where all bits are set should instead be 0. + # Exceptions might be where: + # - in add_bitmask() + # - represents flags, but dissector is not yet decoding them + def check_full_mask(self, mask, field_arrays): + if self.item_type == "FT_BOOLEAN": + return + if self.label.lower().find('mask') != -1 or self.label.lower().find('flag') != -1 or self.label.lower().find('bitmap') != -1: + return + if mask.startswith('0x') and len(mask) > 3: + width_in_bits = self.get_field_width_in_bits() + if not width_in_bits: + return + num_digits = int(width_in_bits / 4) + if num_digits is None: + return + if mask[2:] == 'f'*num_digits or mask[2:] == 'F'*num_digits: + # Don't report if appears in a 'fields' array + for arr in field_arrays: + list = field_arrays[arr][0] + if self.hf in list: + # These need to have a mask - don't judge for being 0 + return + + print('Note:', self.filename, self.hf, 'filter=', self.filter, " - mask is all set - if only want value (rather than bits), set 0 instead? :", '"' + mask + '"') + + # An item that appears in a bitmask set, needs to have a non-zero mask. + def check_mask_if_in_field_array(self, mask, field_arrays): + # Work out if this item appears in a field array + found = False + array_name = None + for arr in field_arrays: + list = field_arrays[arr][0] + if self.hf in list: + # These need to have a mask - don't judge for being 0 + found = True + array_name = arr + break + + if found: + # It needs to have a non-zero mask. + if self.mask_read and self.mask_value == 0: + print('Error:', self.filename, self.hf, 'is in fields array', arr, 'but has a zero mask - this is not allowed') + global errors_found + errors_found += 1 + + + + # Return True if appears to be a match + def check_label_vs_filter(self, reportError=True, reportNumericalMismatch=True): + global warnings_found + + last_filter = self.filter.split('.')[-1] + last_filter_orig = last_filter + last_filter = last_filter.replace('-', '') + last_filter = last_filter.replace('_', '') + last_filter = last_filter.replace(' ', '') + label = self.label + label_orig = label + label = label.replace(' ', '') + label = label.replace('-', '') + label = label.replace('_', '') + label = label.replace('(', '') + label = label.replace(')', '') + label = label.replace('/', '') + label = label.replace("'", '') + + + # OK if filter is abbrev of label. + label_words = self.label.split(' ') + label_words = [w for w in label_words if len(w)] + if len(label_words) == len(last_filter): + #print(label_words) + abbrev_letters = [w[0] for w in label_words] + abbrev = ''.join(abbrev_letters) + if abbrev.lower() == last_filter.lower(): + return True + + # If both have numbers, they should probably match! + label_numbers = [int(n) for n in re.findall(r'\d+', label_orig)] + filter_numbers = [int(n) for n in re.findall(r'\d+', last_filter_orig)] + if len(label_numbers) == len(filter_numbers) and label_numbers != filter_numbers: + if reportNumericalMismatch: + print('Note:', self.filename, self.hf, 'label="' + self.label + '" has different **numbers** from filter="' + self.filter + '"') + print(label_numbers, filter_numbers) + return False + + # If they match after trimming number from filter, they should match. + if label.lower() == last_filter.lower().rstrip("0123456789"): + return True + + # Are they just different? + if label.lower().find(last_filter.lower()) == -1: + if reportError: + print('Warning:', self.filename, self.hf, 'label="' + self.label + '" does not seem to match filter="' + self.filter + '"') + warnings_found += 1 + return False + + return True + + +class CombinedCallsCheck: + def __init__(self, file, apiChecks): + self.file = file + self.apiChecks = apiChecks + self.get_all_calls() + + def get_all_calls(self): + self.all_calls = [] + # Combine calls into one list. + for check in self.apiChecks: + self.all_calls += check.calls + + # Sort by line number. + self.all_calls.sort(key=lambda x:x.line_number) + + def check_consecutive_item_calls(self): + lines = open(self.file, 'r', encoding="utf8").read().splitlines() + + prev = None + for call in self.all_calls: + + # These names commonly do appear together.. + if name_has_one_of(call.hf_name, [ 'unused', 'unknown', 'spare', 'reserved', 'default']): + return + + if prev and call.hf_name == prev.hf_name: + # More compelling if close together.. + if call.line_number>prev.line_number and call.line_number-prev.line_number <= 4: + scope_different = False + for l in range(prev.line_number, call.line_number-1): + if lines[l].find('{') != -1 or lines[l].find('}') != -1 or lines[l].find('else') != -1 or lines[l].find('break;') != -1 or lines[l].find('if ') != -1: + scope_different = True + break + # Also more compelling if check for and scope changes { } in lines in-between? + if not scope_different: + print('Warning:', f + ':' + str(call.line_number), + call.hf_name + ' called consecutively at line', call.line_number, '- previous at', prev.line_number) + global warnings_found + warnings_found += 1 + prev = call + + + + +# These are APIs in proto.c that check a set of types at runtime and can print '.. is not of type ..' to the console +# if the type is not suitable. +apiChecks = [] +apiChecks.append(APICheck('proto_tree_add_item_ret_uint', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32'}, positive_length=True)) +apiChecks.append(APICheck('proto_tree_add_item_ret_int', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'})) +apiChecks.append(APICheck('ptvcursor_add_ret_uint', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32'}, positive_length=True)) +apiChecks.append(APICheck('ptvcursor_add_ret_int', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'}, positive_length=True)) +apiChecks.append(APICheck('ptvcursor_add_ret_string', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'})) +apiChecks.append(APICheck('ptvcursor_add_ret_boolean', { 'FT_BOOLEAN'}, positive_length=True)) +apiChecks.append(APICheck('proto_tree_add_item_ret_uint64', { 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64'}, positive_length=True)) +apiChecks.append(APICheck('proto_tree_add_item_ret_int64', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'}, positive_length=True)) +apiChecks.append(APICheck('proto_tree_add_item_ret_boolean', { 'FT_BOOLEAN'}, positive_length=True)) +apiChecks.append(APICheck('proto_tree_add_item_ret_string_and_length', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'})) +apiChecks.append(APICheck('proto_tree_add_item_ret_display_string_and_length', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', + 'FT_STRINGZPAD', 'FT_STRINGZTRUNC', 'FT_BYTES', 'FT_UINT_BYTES'})) +apiChecks.append(APICheck('proto_tree_add_item_ret_time_string', { 'FT_ABSOLUTE_TIME', 'FT_RELATIVE_TIME'})) +apiChecks.append(APICheck('proto_tree_add_uint', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_FRAMENUM'})) +apiChecks.append(APICheck('proto_tree_add_uint_format_value', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_FRAMENUM'})) +apiChecks.append(APICheck('proto_tree_add_uint_format', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_FRAMENUM'})) +apiChecks.append(APICheck('proto_tree_add_uint64', { 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64', 'FT_FRAMENUM'})) +apiChecks.append(APICheck('proto_tree_add_int64', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'})) +apiChecks.append(APICheck('proto_tree_add_int64_format_value', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'})) +apiChecks.append(APICheck('proto_tree_add_int64_format', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'})) +apiChecks.append(APICheck('proto_tree_add_int', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'})) +apiChecks.append(APICheck('proto_tree_add_int_format_value', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'})) +apiChecks.append(APICheck('proto_tree_add_int_format', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'})) +apiChecks.append(APICheck('proto_tree_add_boolean', { 'FT_BOOLEAN'})) +apiChecks.append(APICheck('proto_tree_add_boolean64', { 'FT_BOOLEAN'})) +apiChecks.append(APICheck('proto_tree_add_float', { 'FT_FLOAT'})) +apiChecks.append(APICheck('proto_tree_add_float_format', { 'FT_FLOAT'})) +apiChecks.append(APICheck('proto_tree_add_float_format_value', { 'FT_FLOAT'})) +apiChecks.append(APICheck('proto_tree_add_double', { 'FT_DOUBLE'})) +apiChecks.append(APICheck('proto_tree_add_double_format', { 'FT_DOUBLE'})) +apiChecks.append(APICheck('proto_tree_add_double_format_value', { 'FT_DOUBLE'})) +apiChecks.append(APICheck('proto_tree_add_string', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'})) +apiChecks.append(APICheck('proto_tree_add_string_format', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'})) +apiChecks.append(APICheck('proto_tree_add_string_format_value', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'})) +apiChecks.append(APICheck('proto_tree_add_guid', { 'FT_GUID'})) +apiChecks.append(APICheck('proto_tree_add_oid', { 'FT_OID'})) +apiChecks.append(APICheck('proto_tree_add_none_format', { 'FT_NONE'})) +apiChecks.append(APICheck('proto_tree_add_item_ret_varint', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32', 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64', + 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_FRAMENUM', + 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64',})) +apiChecks.append(APICheck('proto_tree_add_boolean_bits_format_value', { 'FT_BOOLEAN'})) +apiChecks.append(APICheck('proto_tree_add_boolean_bits_format_value64', { 'FT_BOOLEAN'})) +apiChecks.append(APICheck('proto_tree_add_ascii_7bits_item', { 'FT_STRING'})) +# TODO: positions are different, and takes 2 hf_fields.. +#apiChecks.append(APICheck('proto_tree_add_checksum', { 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32'})) +apiChecks.append(APICheck('proto_tree_add_int64_bits_format_value', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'})) + +# TODO: add proto_tree_add_bytes_item, proto_tree_add_time_item ? + +bitmask_types = { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', + 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32', + 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64', + 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64', + 'FT_BOOLEAN'} +apiChecks.append(APICheck('proto_tree_add_bitmask', bitmask_types)) +apiChecks.append(APICheck('proto_tree_add_bitmask_tree', bitmask_types)) +apiChecks.append(APICheck('proto_tree_add_bitmask_ret_uint64', bitmask_types)) +apiChecks.append(APICheck('proto_tree_add_bitmask_with_flags', bitmask_types)) +apiChecks.append(APICheck('proto_tree_add_bitmask_with_flags_ret_uint64', bitmask_types)) +apiChecks.append(APICheck('proto_tree_add_bitmask_value', bitmask_types)) +apiChecks.append(APICheck('proto_tree_add_bitmask_value_with_flags', bitmask_types)) +apiChecks.append(APICheck('proto_tree_add_bitmask_len', bitmask_types)) +# N.B., proto_tree_add_bitmask_list does not have a root item, just a subtree... + +add_bits_types = { 'FT_CHAR', 'FT_BOOLEAN', + 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64', + 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32', 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64', + 'FT_BYTES'} +apiChecks.append(APICheck('proto_tree_add_bits_item', add_bits_types)) +apiChecks.append(APICheck('proto_tree_add_bits_ret_val', add_bits_types)) + +# TODO: doesn't even have an hf_item ! +#apiChecks.append(APICheck('proto_tree_add_bitmask_text', bitmask_types)) + +# Check some ptvcuror calls too. +apiChecks.append(APICheck('ptvcursor_add_ret_uint', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32'})) +apiChecks.append(APICheck('ptvcursor_add_ret_int', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'})) +apiChecks.append(APICheck('ptvcursor_add_ret_boolean', { 'FT_BOOLEAN'})) + + +# Also try to check proto_tree_add_item() calls (for length) +apiChecks.append(ProtoTreeAddItemCheck()) +apiChecks.append(ProtoTreeAddItemCheck(True)) # for ptvcursor_add() + + + +def removeComments(code_string): + code_string = re.sub(re.compile(r"/\*.*?\*/",re.DOTALL ) ,"" , code_string) # C-style comment + code_string = re.sub(re.compile(r"//.*?\n" ) ,"" , code_string) # C++-style comment + code_string = re.sub(re.compile(r"#if 0.*?#endif",re.DOTALL ) ,"" , code_string) # Ignored region + + return code_string + +# Test for whether the given file was automatically generated. +def isGeneratedFile(filename): + # Check file exists - e.g. may have been deleted in a recent commit. + if not os.path.exists(filename): + return False + + # Open file + f_read = open(os.path.join(filename), 'r', encoding="utf8") + lines_tested = 0 + for line in f_read: + # The comment to say that its generated is near the top, so give up once + # get a few lines down. + if lines_tested > 10: + f_read.close() + return False + if (line.find('Generated automatically') != -1 or + line.find('Generated Automatically') != -1 or + line.find('Autogenerated from') != -1 or + line.find('is autogenerated') != -1 or + line.find('automatically generated by Pidl') != -1 or + line.find('Created by: The Qt Meta Object Compiler') != -1 or + line.find('This file was generated') != -1 or + line.find('This filter was automatically generated') != -1 or + line.find('This file is auto generated, do not edit!') != -1): + + f_read.close() + return True + lines_tested = lines_tested + 1 + + # OK, looks like a hand-written file! + f_read.close() + return False + + +def find_macros(filename): + macros = {} + with open(filename, 'r', encoding="utf8") as f: + contents = f.read() + # Remove comments so as not to trip up RE. + contents = removeComments(contents) + + matches = re.finditer( r'#define\s*([A-Z0-9_]*)\s*([0-9xa-fA-F]*)\n', contents) + for m in matches: + # Store this mapping. + macros[m.group(1)] = m.group(2) + return macros + + +# Look for hf items (i.e. full item to be registered) in a dissector file. +def find_items(filename, macros, value_strings, range_strings, + check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False): + is_generated = isGeneratedFile(filename) + items = {} + with open(filename, 'r', encoding="utf8") as f: + contents = f.read() + # Remove comments so as not to trip up RE. + contents = removeComments(contents) + + # N.B. re extends all the way to HFILL to avoid greedy matching + # TODO: fix a problem where re can't cope with mask that involve a macro with commas in it... + matches = re.finditer( r'.*\{\s*\&(hf_[a-z_A-Z0-9]*)\s*,\s*{\s*\"(.*?)\"\s*,\s*\"(.*?)\"\s*,\s*(.*?)\s*,\s*([0-9A-Z_\|\s]*?)\s*,\s*(.*?)\s*,\s*(.*?)\s*,\s*([a-zA-Z0-9\W\s_\u00f6\u00e4]*?)\s*,\s*HFILL', contents) + for m in matches: + # Store this item. + hf = m.group(1) + + blurb = m.group(8) + if blurb.startswith('"'): + blurb = blurb[1:-1] + + items[hf] = Item(filename, hf, filter=m.group(3), label=m.group(2), item_type=m.group(4), + display=m.group(5), + strings=m.group(6), + macros=macros, + value_strings=value_strings, + range_strings=range_strings, + mask=m.group(7), + blurb=blurb, + check_mask=check_mask, + mask_exact_width=mask_exact_width, + check_label=check_label, + check_consecutive=(not is_generated and check_consecutive)) + return items + + +# Looking for args to ..add_bitmask_..() calls that are not NULL-terminated or have repeated items. +# TODO: some dissectors have similar-looking hf arrays for other reasons, so need to cross-reference with +# the 6th arg of ..add_bitmask_..() calls... +# TODO: return items (rather than local checks) from here so can be checked against list of calls for given filename +def find_field_arrays(filename, all_fields, all_hf): + field_entries = {} + global warnings_found + with open(filename, 'r', encoding="utf8") as f: + contents = f.read() + # Remove comments so as not to trip up RE. + contents = removeComments(contents) + + # Find definition of hf array + matches = re.finditer(r'static\s*g?int\s*\*\s*const\s+([a-zA-Z0-9_]*)\s*\[\]\s*\=\s*\{([a-zA-Z0-9,_\&\s]*)\}', contents) + for m in matches: + name = m.group(1) + # Ignore if not used in a call to an _add_bitmask_ API + if not name in all_fields: + continue + + fields_text = m.group(2) + fields_text = fields_text.replace('&', '') + fields_text = fields_text.replace(',', '') + + # Get list of each hf field in the array + fields = fields_text.split() + + if fields[0].startswith('ett_'): + continue + if fields[-1].find('NULL') == -1 and fields[-1] != '0': + print('Warning:', filename, name, 'is not NULL-terminated - {', ', '.join(fields), '}') + warnings_found += 1 + continue + + # Do any hf items reappear? + seen_fields = set() + for f in fields: + if f in seen_fields: + print(filename, name, f, 'already added!') + warnings_found += 1 + seen_fields.add(f) + + # Check for duplicated flags among entries.. + combined_mask = 0x0 + for f in fields[0:-1]: + if f in all_hf: + new_mask = all_hf[f].mask_value + if new_mask & combined_mask: + print('Warning:', filename, name, 'has overlapping mask - {', ', '.join(fields), '} combined currently', hex(combined_mask), f, 'adds', hex(new_mask)) + warnings_found += 1 + combined_mask |= new_mask + + # Make sure all entries have the same width + set_field_width = None + for f in fields[0:-1]: + if f in all_hf: + new_field_width = all_hf[f].get_field_width_in_bits() + if set_field_width is not None and new_field_width != set_field_width: + # Its not uncommon for fields to be used in multiple sets, some of which can be different widths.. + print('Note:', filename, name, 'set items not all same width - {', ', '.join(fields), '} seen', set_field_width, 'now', new_field_width) + set_field_width = new_field_width + + # Add entry to table + field_entries[name] = (fields[0:-1], combined_mask) + + return field_entries + +def find_item_declarations(filename): + items = set() + + with open(filename, 'r', encoding="utf8") as f: + lines = f.read().splitlines() + p = re.compile(r'^static int (hf_[a-zA-Z0-9_]*)\s*\=\s*-1;') + for line in lines: + m = p.search(line) + if m: + items.add(m.group(1)) + return items + +def find_item_extern_declarations(filename): + items = set() + with open(filename, 'r', encoding="utf8") as f: + lines = f.read().splitlines() + p = re.compile(r'^\s*(hf_[a-zA-Z0-9_]*)\s*\=\s*proto_registrar_get_id_byname\s*\(') + for line in lines: + m = p.search(line) + if m: + items.add(m.group(1)) + return items + + +def is_dissector_file(filename): + p = re.compile(r'.*(packet|file)-.*\.c$') + return p.match(filename) + + +def findDissectorFilesInFolder(folder, recursive=False): + dissector_files = [] + + if recursive: + for root, subfolders, files in os.walk(folder): + for f in files: + if should_exit: + return + f = os.path.join(root, f) + dissector_files.append(f) + else: + for f in sorted(os.listdir(folder)): + if should_exit: + return + filename = os.path.join(folder, f) + dissector_files.append(filename) + + return [x for x in filter(is_dissector_file, dissector_files)] + + + +# Run checks on the given dissector file. +def checkFile(filename, check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False, + check_missing_items=False, check_bitmask_fields=False, label_vs_filter=False, extra_value_string_checks=False): + # Check file exists - e.g. may have been deleted in a recent commit. + if not os.path.exists(filename): + print(filename, 'does not exist!') + return + + # Find simple macros so can substitute into items and calls. + macros = find_macros(filename) + + # Find (and sanity-check) value_strings + value_strings = findValueStrings(filename, macros, do_extra_checks=extra_value_string_checks) + if extra_value_string_checks: + for name in value_strings: + value_strings[name].extraChecks() + + # Find (and sanity-check) range_strings + range_strings = findRangeStrings(filename, macros, do_extra_checks=extra_value_string_checks) + if extra_value_string_checks: + for name in range_strings: + range_strings[name].extraChecks() + + + + # Find important parts of items. + items_defined = find_items(filename, macros, value_strings, range_strings, + check_mask, mask_exact_width, check_label, check_consecutive) + items_extern_declared = {} + + items_declared = {} + if check_missing_items: + items_declared = find_item_declarations(filename) + items_extern_declared = find_item_extern_declarations(filename) + + fields = set() + + # Get 'fields' out of calls + for c in apiChecks: + c.find_calls(filename, macros) + for call in c.calls: + # From _add_bitmask() calls + if call.fields: + fields.add(call.fields) + + # Checking for lists of fields for add_bitmask calls + field_arrays = {} + if check_bitmask_fields: + field_arrays = find_field_arrays(filename, fields, items_defined) + + if check_mask and check_bitmask_fields: + for i in items_defined: + item = items_defined[i] + item.check_full_mask(item.mask, field_arrays) + item.check_mask_if_in_field_array(item.mask, field_arrays) + + # Now actually check the calls + for c in apiChecks: + c.check_against_items(items_defined, items_declared, items_extern_declared, check_missing_items, field_arrays) + + + if label_vs_filter: + matches = 0 + for hf in items_defined: + if items_defined[hf].check_label_vs_filter(reportError=False, reportNumericalMismatch=True): + matches += 1 + + # Only checking if almost every field does match. + checking = len(items_defined) and matches 0.93) + if checking: + print(filename, ':', matches, 'label-vs-filter matches of out of', len(items_defined), 'so reporting mismatches') + for hf in items_defined: + items_defined[hf].check_label_vs_filter(reportError=True, reportNumericalMismatch=False) + + + +################################################################# +# Main logic. + +# command-line args. Controls which dissector files should be checked. +# If no args given, will just scan epan/dissectors folder. +parser = argparse.ArgumentParser(description='Check calls in dissectors') +parser.add_argument('--file', action='append', + help='specify individual dissector file to test') +parser.add_argument('--folder', action='store', default='', + help='specify folder to test') +parser.add_argument('--commits', action='store', + help='last N commits to check') +parser.add_argument('--open', action='store_true', + help='check open files') +parser.add_argument('--mask', action='store_true', + help='when set, check mask field too') +parser.add_argument('--mask-exact-width', action='store_true', + help='when set, check width of mask against field width') +parser.add_argument('--label', action='store_true', + help='when set, check label field too') +parser.add_argument('--consecutive', action='store_true', + help='when set, copy copy/paste errors between consecutive items') +parser.add_argument('--missing-items', action='store_true', + help='when set, look for used items that were never registered') +parser.add_argument('--check-bitmask-fields', action='store_true', + help='when set, attempt to check arrays of hf items passed to add_bitmask() calls') +parser.add_argument('--label-vs-filter', action='store_true', + help='when set, check whether label matches last part of filter') +parser.add_argument('--extra-value-string-checks', action='store_true', + help='when set, do extra checks on parsed value_strings') +parser.add_argument('--all-checks', action='store_true', + help='when set, apply all checks to selected files') + + +args = parser.parse_args() + +# Turn all checks on. +if args.all_checks: + args.mask = True + args.mask_exact_width = True + args.consecutive = True + args.check_bitmask_fields = True + #args.label = True + args.label_vs_filter = True + args.extra_value_string_checks + +if args.check_bitmask_fields: + args.mask = True + + +# Get files from wherever command-line args indicate. +files = [] +if args.file: + # Add specified file(s) + for f in args.file: + if not os.path.isfile(f): + print('Chosen file', f, 'does not exist.') + exit(1) + else: + files.append(f) +elif args.folder: + # Add all files from a given folder. + folder = args.folder + if not os.path.isdir(folder): + print('Folder', folder, 'not found!') + exit(1) + # Find files from folder. + print('Looking for files in', folder) + files = findDissectorFilesInFolder(folder, recursive=True) +elif args.commits: + # Get files affected by specified number of commits. + command = ['git', 'diff', '--name-only', '--diff-filter=d', 'HEAD~' + args.commits] + files = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Will examine dissector files only + files = list(filter(lambda f : is_dissector_file(f), files)) +elif args.open: + # Unstaged changes. + command = ['git', 'diff', '--name-only', '--diff-filter=d'] + files = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Only interested in dissector files. + files = list(filter(lambda f : is_dissector_file(f), files)) + # Staged changes. + command = ['git', 'diff', '--staged', '--name-only', '--diff-filter=d'] + files_staged = [f.decode('utf-8') + for f in subprocess.check_output(command).splitlines()] + # Only interested in dissector files. + files_staged = list(filter(lambda f : is_dissector_file(f), files_staged)) + for f in files_staged: + if not f in files: + files.append(f) +else: + # Find all dissector files. + files = findDissectorFilesInFolder(os.path.join('epan', 'dissectors')) + files += findDissectorFilesInFolder(os.path.join('plugins', 'epan'), recursive=True) + + +# If scanning a subset of files, list them here. +print('Examining:') +if args.file or args.commits or args.open: + if files: + print(' '.join(files), '\n') + else: + print('No files to check.\n') +else: + print('All dissector modules\n') + + +# Now check the files. +for f in files: + if should_exit: + exit(1) + checkFile(f, check_mask=args.mask, mask_exact_width=args.mask_exact_width, check_label=args.label, + check_consecutive=args.consecutive, check_missing_items=args.missing_items, + check_bitmask_fields=args.check_bitmask_fields, label_vs_filter=args.label_vs_filter, + extra_value_string_checks=args.extra_value_string_checks) + + # Do checks against all calls. + if args.consecutive: + combined_calls = CombinedCallsCheck(f, apiChecks) + # This hasn't really found any issues, but shows lots of false positives (and are difficult to investigate) + #combined_calls.check_consecutive_item_calls() + + +# Show summary. +print(warnings_found, 'warnings') +if errors_found: + print(errors_found, 'errors') + exit(1) diff --git a/tools/check_val_to_str.py b/tools/check_val_to_str.py new file mode 100755 index 0000000..417655c --- /dev/null +++ b/tools/check_val_to_str.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python3 +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# Scan dissectors for calls to val_to_str() and friends, +# checking for appropriate format specifier strings in +# 'unknown' arg. +# TODO: +# - more detailed format specifier checking (check letter, that there is only 1) +# - scan conformance (.cnf) files for ASN1 dissectors? + +import os +import re +import subprocess +import argparse +import signal + + +# Try to exit soon after Ctrl-C is pressed. +should_exit = False + +def signal_handler(sig, frame): + global should_exit + should_exit = True + print('You pressed Ctrl+C - exiting') + +signal.signal(signal.SIGINT, signal_handler) + + +# Test for whether the given file was automatically generated. +def isGeneratedFile(filename): + # Check file exists - e.g. may have been deleted in a recent commit. + if not os.path.exists(filename): + return False + + # Open file + f_read = open(os.path.join(filename), 'r', encoding="utf8") + lines_tested = 0 + for line in f_read: + # The comment to say that its generated is near the top, so give up once + # get a few lines down. + if lines_tested > 10: + f_read.close() + return False + if (line.find('Generated automatically') != -1 or + line.find('Generated Automatically') != -1 or + line.find('Autogenerated from') != -1 or + line.find('is autogenerated') != -1 or + line.find('automatically generated by Pidl') != -1 or + line.find('Created by: The Qt Meta Object Compiler') != -1 or + line.find('This file was generated') != -1 or + line.find('This filter was automatically generated') != -1 or + line.find('This file is auto generated, do not edit!') != -1 or + line.find('This file is auto generated') != -1): + + f_read.close() + return True + lines_tested = lines_tested + 1 + + # OK, looks like a hand-written file! + f_read.close() + return False + + + +def removeComments(code_string): + code_string = re.sub(re.compile(r"/\*.*?\*/",re.DOTALL ) ,"" ,code_string) # C-style comment + code_string = re.sub(re.compile(r"//.*?\n" ) ,"" ,code_string) # C++-style comment + return code_string + + +def is_dissector_file(filename): + p = re.compile(r'.*packet-.*\.c') + return p.match(filename) + +def findDissectorFilesInFolder(folder, recursive=False): + dissector_files = [] + + if recursive: + for root, subfolders, files in os.walk(folder): + for f in files: + if should_exit: + return + f = os.path.join(root, f) + dissector_files.append(f) + else: + for f in sorted(os.listdir(folder)): + if should_exit: + return + filename = os.path.join(folder, f) + dissector_files.append(filename) + + return [x for x in filter(is_dissector_file, dissector_files)] + + + +warnings_found = 0 +errors_found = 0 + +# Check the given dissector file. +def checkFile(filename): + global warnings_found + global errors_found + + # Check file exists - e.g. may have been deleted in a recent commit. + if not os.path.exists(filename): + print(filename, 'does not exist!') + return + + with open(filename, 'r', encoding="utf8") as f: + contents = f.read() + + # Remove comments so as not to trip up RE. + contents = removeComments(contents) + + matches = re.finditer(r'(? + +# +# Copyright 2011 Michael Mann (see AUTHORS file) +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# +# Example: +# ~/work/wireshark/trunk/epan/dissectors> ../../tools/checkfiltername.pl packet-3com-xns.c +# packet-3com-xns.c (2 (of 2) fields) +# 102 3comxns.type doesn't match PROTOABBREV of 3com-xns +# 106 3comxns.type doesn't match PROTOABBREV of 3com-xns +# +# or checkfiltername.pl packet-*.c, which will check all the dissector files. +# +# + +use warnings; +use strict; +use Getopt::Long; + +my @elements; +my @elements_dup; +my @protocols; +my %filters; +my %expert_filters; +my @acceptedprefixes = ("dcerpc-"); +my @asn1automatedfilelist; +my @dcerpcautomatedfilelist; +my @idl2wrsautomatedfilelist; +my @filemanipulationfilelist; +my @prefixfilelist; +my @nofieldfilelist; +my %unique; +my @uniquefilelist; +my @noregprotocolfilelist; +my @periodinfilternamefilelist; + +my $showlinenoFlag = ''; +my $showautomatedFlag = ''; + +my $state = ""; +# "s_unknown", +# "s_start", +# "s_in_hf_register_info", +# "s_hf_register_info_entry", +# "s_header_field_info_entry", +# "s_header_field_info_entry_start", +# "s_header_field_info_entry_name", +# "s_header_field_info_entry_abbrev", +# "s_header_field_info_entry_abbrev_end", +# "s_start_expert", +# "s_in_ei_register_info", +# "s_ei_register_info_entry", +# "s_ei_register_info_entry_start", +# "s_ei_register_info_entry_abbrev_end", +# "s_nofields" + +my $restofline; +my $filecount = 0; +my $currfile = ""; +my $protabbrev = ""; +my $protabbrev_index; +my $PFNAME_value = ""; +my $linenumber = 1; +my $totalerrorcount = 0; +my $errorfilecount = 0; +my $onefield = 0; +my $nofields = 0; +my $noperiod = 0; +my $noregprotocol = 1; +my $automated = 0; +my $more_tokens; +my $showall = 0; + +my $comment = 0; + +sub checkprotoabbrev { + my $abbrev = ""; + my $abbrevpos; + my $proto_abbrevpos1; + my $proto_abbrevpos2; + my $afterabbrev = ""; + my $check_dup_abbrev = ""; + my $modprotabbrev = ""; + my $errorline = 0; + my $prefix; + + if (($automated == 0) || ($showall == 1)) { + $abbrevpos = index($_[0], "."); + if ($abbrevpos == -1) { + $abbrev = $_[0]; + } + else { + $abbrev = substr($_[0], 0, $abbrevpos); + $afterabbrev = substr($_[0], $abbrevpos+1, length($_[0])-$abbrevpos); + $check_dup_abbrev = $afterabbrev; + $afterabbrev = substr($afterabbrev, 0, length($abbrev)); + } + + if ($abbrev ne $protabbrev) { + $errorline = 1; + + #check if there is a supported protocol that matches the abbrev. + #This may be a case of filename != PROTOABBREV + foreach (@protocols) { + if ($abbrev eq $_) { + $errorline = 0; + } elsif (index($_, ".") != -1) { + + #compare from start of string for each period found + $proto_abbrevpos1 = 0; + while ((($proto_abbrevpos2 = index($_, ".", $proto_abbrevpos1)) != -1) && + ($errorline == 1)) { + if ($abbrev eq substr($_, 0, $proto_abbrevpos2)) { + $errorline = 0; + } + + $proto_abbrevpos1 = $proto_abbrevpos2+1; + } + } + } + } + + # find any underscores that preface or follow a period + if (((index($_[0], "._") >= 0) || (index($_[0], "_.") >= 0)) && + #ASN.1 dissectors can intentionally generating this field name, so don't fault the dissector + (index($_[0], "_untag_item_element") < 0)) { + if ($showlinenoFlag) { + push(@elements, "$_[1] $_[0] contains an unnecessary \'_\'\n"); + } else { + push(@elements, "$_[0] contains an unnecessary \'_\'\n"); + } + } + + if (($errorline == 1) && ($showall == 0)) { + #try some "accepted" variations of PROTOABBREV + + #replace '-' with '_' + $modprotabbrev = $protabbrev; + $modprotabbrev =~ s/-/_/g; + if ($abbrev eq $modprotabbrev) { + $errorline = 0; + } + + #remove '-' + if ($errorline == 1) { + $modprotabbrev = $protabbrev; + $modprotabbrev =~ s/-//g; + if ($abbrev eq $modprotabbrev) { + $errorline = 0; + } + } + + #remove '_' + if ($errorline == 1) { + $modprotabbrev = $protabbrev; + $modprotabbrev =~ s/_//g; + if ($abbrev eq $modprotabbrev) { + $errorline = 0; + } + } + + if ($errorline == 1) { + #remove any "accepted" prefix to see if there is still a problem + foreach (@acceptedprefixes) { + if ($protabbrev =~ /^$_/) { + $modprotabbrev = substr($protabbrev, length($_)); + if ($abbrev eq $modprotabbrev) { + push(@prefixfilelist, "$currfile\n"); + $errorline = 0; + } + } + } + } + else { + push(@filemanipulationfilelist, "$currfile\n"); + } + + #now check the acceptable "fields from a different protocol" + if ($errorline == 1) { + if (is_from_other_protocol_allowed($_[0], $currfile) == 1) { + $errorline = 0; + } + } + + #now check the acceptable "fields that include a version number" + if ($errorline == 1) { + if (is_protocol_version_allowed($_[0], $currfile) == 1) { + $errorline = 0; + } + } + } + + if ($errorline == 1) { + $debug>1 && print "$_[1] $_[0] doesn't match PROTOABBREV of $protabbrev\n"; + if ($showlinenoFlag) { + push(@elements, "$_[1] $_[0] doesn't match PROTOABBREV of $protabbrev\n"); + } else { + push(@elements, "$_[0] doesn't match PROTOABBREV of $protabbrev\n"); + } + } + + if (($abbrev ne "") && (lc($abbrev) eq lc($afterabbrev))) { + # Allow ASN.1 generated files to duplicate part of proto name + if ((!(grep {$currfile eq $_ } @asn1automatedfilelist)) && + # Check allowed list + (is_proto_dup_allowed($abbrev, $check_dup_abbrev) == 0)) { + if ($showlinenoFlag) { + push(@elements_dup, "$_[1] $_[0] duplicates PROTOABBREV of $abbrev\n"); + } else { + push(@elements_dup, "$_[0] duplicates PROTOABBREV of $abbrev\n"); + } + } + } + } +} + +sub printprevfile { + my $totalfields = keys(%filters); + my $count_ele; + my $count_dup; + my $total_count; + + foreach (sort keys %filters) { + checkprotoabbrev ($filters{$_}, $_); + } + + foreach (sort keys %expert_filters) { + checkprotoabbrev ($expert_filters{$_}, $_); + } + + $count_ele = @elements; + $count_dup = @elements_dup; + $total_count = $count_ele+$count_dup; + + if ($noregprotocol == 1) { + #if no protocol is registered, only worry about duplicates + if ($currfile ne "") { + push(@noregprotocolfilelist, "$currfile\n"); + } + + if ($count_dup > 0) { + $errorfilecount++; + $totalerrorcount += $count_dup; + } + + if (($showall == 1) || ($count_dup > 0)) { + print "\n\n$currfile - NO PROTOCOL REGISTERED\n"; + if ($showall == 1) { + #everything is included, so count all errors + $totalerrorcount += $count_ele; + if (($count_ele > 0) && ($count_dup == 0)) { + $errorfilecount++; + } + + foreach (@elements) { + print $_; + } + } + foreach (@elements_dup) { + print $_; + } + } + } else { + if ($total_count > 0) { + $errorfilecount++; + $totalerrorcount += $total_count; + } + + if (($automated == 0) || ($showall == 1)) { + if ($total_count > 0) { + if ($automated == 1) { + if ($showall == 1) { + print "\n\n$currfile - AUTOMATED ($total_count (of $totalfields) fields)\n"; + } + } else { + print "\n\n$currfile ($total_count (of $totalfields) fields)\n"; + } + + foreach (@elements) { + print $_; + } + foreach (@elements_dup) { + print $_; + } + } + + if ((($nofields) || ($totalfields == 0)) && ($currfile ne "")) { + if ($showall == 1) { + print "\n\n$currfile - NO FIELDS\n"; + } + push(@nofieldfilelist, "$currfile\n"); + } + } + } +} + +#-------------------------------------------------------------------- +# This is a list of dissectors that intentionally have filter names +# where the second segment duplicates (at least partially) the name +# of the first. The most common case is in ASN.1 dissectors, but +# those can be dealt with by looking at the first few lines of the +# dissector. This list has been vetted and justification will need +# to be provided to add to it. Acknowledge these dissectors aren't +# a problem for the pre-commit script +#-------------------------------------------------------------------- +sub is_proto_dup_allowed { + if (($_[0] eq "amf") && (index($_[1], "amf0") >= 0)) {return 1;} + if (($_[0] eq "amf") && (index($_[1], "amf3") >= 0)) {return 1;} + if (($_[0] eq "amqp") && (index($_[1], "amqp") >= 0)) {return 1;} + if (($_[0] eq "bat") && (index($_[1], "batman") >= 0)) {return 1;} + if (($_[0] eq "browser") && (index($_[1], "browser_") >= 0)) {return 1;} + if (($_[0] eq "data") && (index($_[1], "data") >= 0)) {return 1;} + if (($_[0] eq "dlsw") && (index($_[1], "dlsw_version") >= 0)) {return 1;} + if (($_[0] eq "dns") && (index($_[1], "dnskey") >= 0)) {return 1;} + if (($_[0] eq "ecmp") && (index($_[1], "ecmp_") >= 0)) {return 1;} + if (($_[0] eq "exported_pdu") && (index($_[1], "exported_pdu") >= 0)) {return 1;} + if (($_[0] eq "fc") && (index($_[1], "fctl") >= 0)) {return 1;} + if (($_[0] eq "fcs") && (index($_[1], "fcsmask") >= 0)) {return 1;} + if (($_[0] eq "fmp") && (index($_[1], "fmp") >= 0)) {return 1;} + if (($_[0] eq "fr") && (index($_[1], "frame_relay") >= 0)) {return 1;} + if (($_[0] eq "lustre") && (index($_[1], "lustre_") >= 0)) {return 1;} + if (($_[0] eq "mac") && (index($_[1], "macd") >= 0)) {return 1;} + if (($_[0] eq "mac") && (index($_[1], "macis") >= 0)) {return 1;} + if (($_[0] eq "mih") && (index($_[1], "mihf") >= 0)) {return 1;} + if (($_[0] eq "mih") && (index($_[1], "mihcap") >= 0)) {return 1;} + if (($_[0] eq "ncp") && (index($_[1], "ncp") >= 0)) {return 1;} + if (($_[0] eq "nfs") && (index($_[1], "nfs") >= 0)) {return 1;} + if (($_[0] eq "oxid") && (index($_[1], "oxid") >= 0)) {return 1;} + if (($_[0] eq "rquota") && (index($_[1], "rquota") >= 0)) {return 1;} + if (($_[0] eq "pfcp") && (index($_[1], "pfcp") >= 0)) {return 1;} + if (($_[0] eq "sm") && (index($_[1], "sm_") >= 0)) {return 1;} + if (($_[0] eq "smpp") && (index($_[1], "smppplus") >= 0)) {return 1;} + if (($_[0] eq "spray") && (index($_[1], "sprayarr") >= 0)) {return 1;} + if (($_[0] eq "stat") && (index($_[1], "stat_") >= 0)) {return 1;} + if (($_[0] eq "stat") && (index($_[1], "state") >= 0)) {return 1;} + if (($_[0] eq "tds") && (index($_[1], "tds_") >= 0)) {return 1;} + if (($_[0] eq "time") && (index($_[1], "time") >= 0)) {return 1;} + if (($_[0] eq "tn3270") && (index($_[1], "tn3270e") >= 0)) {return 1;} + if (($_[0] eq "usb") && (index($_[1], "usb") >= 0)) {return 1;} + if (($_[0] eq "xml") && (index($_[1], "xml") >= 0)) {return 1;} + + return 0; +} + +#-------------------------------------------------------------------- +# This is a list of dissectors that intentionally have filter names +# shared with other dissectors. This list has been vetted and +# justification will need to be provided to add to it. +# Acknowledge these dissectors aren't a problem for the pre-commit script +#-------------------------------------------------------------------- +sub is_from_other_protocol_allowed { + my $proto_filename; + my $dir_index = rindex($_[1], "\\"); + + #handle directory names on all platforms + if ($dir_index < 0) { + $dir_index = rindex($_[1], "/"); + } + + if ($dir_index < 0) { + $proto_filename = $_[1]; + } + else { + $proto_filename = substr($_[1], $dir_index+1); + } + + # XXX - may be faster to hash this (note 1-many relationship)? + if (($proto_filename eq "packet-atalk.c") && (index($_[0], "llc") >= 0)) {return 1;} + if (($proto_filename eq "packet-awdl.c") && (index($_[0], "llc") >= 0)) {return 1;} + if (($proto_filename eq "packet-bpdu.c") && (index($_[0], "mstp") >= 0)) {return 1;} + if (($proto_filename eq "packet-bssap.c") && (index($_[0], "bsap") >= 0)) {return 1;} + if (($proto_filename eq "packet-caneth.c") && (index($_[0], "can") >= 0)) {return 1;} + if (($proto_filename eq "packet-cimetrics.c") && (index($_[0], "llc") >= 0)) {return 1;} + if (($proto_filename eq "packet-cipsafety.c") && (index($_[0], "cip") >= 0)) {return 1;} + if (($proto_filename eq "packet-cipsafety.c") && (index($_[0], "enip") >= 0)) {return 1;} + if (($proto_filename eq "packet-dcerpc-netlogon.c") && (index($_[0], "ntlmssp") >= 0)) {return 1;} + if (($proto_filename eq "packet-dcom-oxid.c") && (index($_[0], "dcom") >= 0)) {return 1;} + if (($proto_filename eq "packet-dvb-data-mpe.c") && (index($_[0], "mpeg_sect") >= 0)) {return 1;} + if (($proto_filename eq "packet-dvb-ipdc.c") && (index($_[0], "ipdc") >= 0)) {return 1;} + if (($proto_filename eq "packet-enip.c") && (index($_[0], "cip") >= 0)) {return 1;} + if (($proto_filename eq "packet-extreme.c") && (index($_[0], "llc") >= 0)) {return 1;} + if (($proto_filename eq "packet-fmp_notify.c") && (index($_[0], "fmp") >= 0)) {return 1;} + if (($proto_filename eq "packet-foundry.c") && (index($_[0], "llc") >= 0)) {return 1;} + if (($proto_filename eq "packet-glusterfs.c") && (index($_[0], "gluster") >= 0)) {return 1;} + if (($proto_filename eq "packet-h248_annex_e.c") && (index($_[0], "h248") >= 0)) {return 1;} + if (($proto_filename eq "packet-h248_q1950.c") && (index($_[0], "h248") >= 0)) {return 1;} + if (($proto_filename eq "packet-ieee1722.c") && (index($_[0], "can") >= 0)) {return 1;} + if (($proto_filename eq "packet-ieee80211.c") && (index($_[0], "eapol") >= 0)) {return 1;} + if (($proto_filename eq "packet-ieee80211-radio.c") && (index($_[0], "wlan") >= 0)) {return 1;} + if (($proto_filename eq "packet-ieee80211-wlancap.c") && (index($_[0], "wlan") >= 0)) {return 1;} + if (($proto_filename eq "packet-ieee802154.c") && (index($_[0], "wpan") >= 0)) {return 1;} + if (($proto_filename eq "packet-isup.c") && (index($_[0], "ansi_isup") >= 0)) {return 1;} + if (($proto_filename eq "packet-isup.c") && (index($_[0], "bat_ase") >= 0)) {return 1;} + if (($proto_filename eq "packet-isup.c") && (index($_[0], "nsap") >= 0)) {return 1;} + if (($proto_filename eq "packet-isup.c") && (index($_[0], "x213") >= 0)) {return 1;} + if (($proto_filename eq "packet-iwarp-ddp-rdmap.c") && (index($_[0], "iwarp_ddp") >= 0)) {return 1;} + if (($proto_filename eq "packet-iwarp-ddp-rdmap.c") && (index($_[0], "iwarp_rdma") >= 0)) {return 1;} + if (($proto_filename eq "packet-k12.c") && (index($_[0], "aal2") >= 0)) {return 1;} + if (($proto_filename eq "packet-k12.c") && (index($_[0], "atm") >= 0)) {return 1;} + if (($proto_filename eq "packet-m3ua.c") && (index($_[0], "mtp3") >= 0)) {return 1;} + if (($proto_filename eq "packet-mle.c") && (index($_[0], "wpan") >= 0)) {return 1;} + if (($proto_filename eq "packet-mpeg-dsmcc.c") && (index($_[0], "mpeg_sect") >= 0)) {return 1;} + if (($proto_filename eq "packet-mpeg-dsmcc.c") && (index($_[0], "etv.dsmcc") >= 0)) {return 1;} + if (($proto_filename eq "packet-mpeg1.c") && (index($_[0], "rtp.payload_mpeg_") >= 0)) {return 1;} + if (($proto_filename eq "packet-mysql.c") && (index($_[0], "mariadb") >= 0)) {return 1;} + if (($proto_filename eq "packet-ndps.c") && (index($_[0], "spx.ndps_") >= 0)) {return 1;} + if (($proto_filename eq "packet-pw-atm.c") && (index($_[0], "atm") >= 0)) {return 1;} + if (($proto_filename eq "packet-pw-atm.c") && (index($_[0], "pw") >= 0)) {return 1;} + if (($proto_filename eq "packet-scsi.c") && (index($_[0], "scsi_sbc") >= 0)) {return 1;} + if (($proto_filename eq "packet-sndcp-xid.c") && (index($_[0], "llcgprs") >= 0)) {return 1;} + if (($proto_filename eq "packet-wlccp.c") && (index($_[0], "llc") >= 0)) {return 1;} + if (($proto_filename eq "packet-wps.c") && (index($_[0], "eap") >= 0)) {return 1;} + if (($proto_filename eq "packet-wsp.c") && (index($_[0], "wap") >= 0)) {return 1;} + if (($proto_filename eq "packet-xot.c") && (index($_[0], "x25") >= 0)) {return 1;} + if (($proto_filename eq "packet-zbee-zcl-misc.c") && (index($_[0], "zbee_zcl_hvac") >= 0)) {return 1;} + if (($proto_filename eq "packet-zbee-zcl-misc.c") && (index($_[0], "zbee_zcl_ias") >= 0)) {return 1;} + + #Understand why, but I think it could be prefixed with "dissector" + #prefix (which isn't necessarily "protocol") + if (($proto_filename eq "packet-rtcp.c") && (index($_[0], "srtcp") >= 0)) {return 1;} + if (($proto_filename eq "packet-rtp.c") && (index($_[0], "srtp") >= 0)) {return 1;} + if (($proto_filename eq "packet-dcom-cba-acco.c") && (index($_[0], "cba") >= 0)) {return 1;} + if (($proto_filename eq "packet-dcom-cba.c") && (index($_[0], "cba") >= 0)) {return 1;} + + #XXX - HACK to get around nested "s in field name + if (($proto_filename eq "packet-gsm_sim.c") && (index($_[0], "e\\") >= 0)) {return 1;} + + return 0; +} + +#-------------------------------------------------------------------- +# This is a list of dissectors that use their (protocol) version number +# as part of the first display filter segment, which checkfiltername +# usually complains about. Manually allow them so that they can pass +# pre-commit script +#-------------------------------------------------------------------- +sub is_protocol_version_allowed { + my $proto_filename; + my $dir_index = rindex($_[1], "\\"); + + #handle directory names on all platforms + if ($dir_index < 0) { + $dir_index = rindex($_[1], "/"); + } + + if ($dir_index < 0) { + $proto_filename = $_[1]; + } + else { + $proto_filename = substr($_[1], $dir_index+1); + } + + # XXX - may be faster to hash this? + if (($proto_filename eq "packet-ehs.c") && (index($_[0], "ehs2") >= 0)) {return 1;} + if (($proto_filename eq "packet-hsrp.c") && (index($_[0], "hsrp2") >= 0)) {return 1;} + if (($proto_filename eq "packet-ipv6.c") && (index($_[0], "ip") >= 0)) {return 1;} + if (($proto_filename eq "packet-openflow_v1.c") && (index($_[0], "openflow") >= 0)) {return 1;} + if (($proto_filename eq "packet-rtnet.c") && (index($_[0], "tdma-v1") >= 0)) {return 1;} + if (($proto_filename eq "packet-scsi-osd.c") && (index($_[0], "scsi_osd2") >= 0)) {return 1;} + if (($proto_filename eq "packet-sflow.c") && (index($_[0], "sflow_5") >= 0)) {return 1;} + if (($proto_filename eq "packet-sflow.c") && (index($_[0], "sflow_245") >= 0)) {return 1;} + if (($proto_filename eq "packet-tipc.c") && (index($_[0], "tipcv2") >= 0)) {return 1;} + if (($proto_filename eq "packet-bluetooth.c") && (index($_[0], "llc.bluetooth_pid") >= 0)) {return 1;} + + return 0; +} + +# --------------------------------------------------------------------- +# +# MAIN +# +GetOptions( + 'showlineno' => \$showlinenoFlag, + 'showautomated' => \$showautomatedFlag, + ); + +while (<>) { + if ($currfile !~ /$ARGV/) { + &printprevfile(); + + # New file - reset array and state + $filecount++; + $currfile = $ARGV; + + #determine PROTABBREV for dissector based on file name format of (dirs)/packet-PROTABBREV.c or (dirs)/file-PROTABBREV.c + $protabbrev_index = rindex($currfile, "packet-"); + if ($protabbrev_index == -1) { + $protabbrev_index = rindex($currfile, "file-"); + if ($protabbrev_index == -1) { + #ignore "non-dissector" files + next; + } + + $protabbrev = substr($currfile, $protabbrev_index+length("file-")); + $protabbrev_index = rindex($protabbrev, "."); + if ($protabbrev_index == -1) { + print "$currfile doesn't fit format of file-PROTABBREV.c\n"; + next; + } + } else { + $protabbrev = substr($currfile, $protabbrev_index+length("packet-")); + $protabbrev_index = rindex($protabbrev, "."); + if ($protabbrev_index == -1) { + print "$currfile doesn't fit format of packet-PROTABBREV.c\n"; + next; + } + } + $protabbrev = substr($protabbrev, 0, $protabbrev_index); + + $PFNAME_value = ""; + $noregprotocol = 1; + $automated = 0; + $nofields = 0; + $onefield = 0; + $noperiod = 0; + $linenumber = 1; + %filters = ( ); + %expert_filters = ( ); + @protocols = ( ); + @elements = ( ); + @elements_dup = ( ); + $state = "s_unknown"; + } + + if (($automated == 0) && ($showautomatedFlag eq "")) { + #DCERPC automated files + if ($_ =~ "DO NOT EDIT") { + push(@dcerpcautomatedfilelist, "$currfile\n"); + $automated = 1; + next; + } + #ASN.1 automated files + elsif ($_ =~ "Generated automatically by the ASN.1 to Wireshark dissector compiler") { + push(@asn1automatedfilelist, "$currfile\n"); + $automated = 1; + next; + } + #idl2wrs automated files + elsif ($_ =~ "Autogenerated from idl2wrs") { + push(@idl2wrsautomatedfilelist, "$currfile\n"); + $automated = 1; + next; + } + } + + # opening then closing comment + if (/(.*?)\/\*.*\*\/(.*)/) { + $comment = 0; + $_ = "$1$2"; + # closing then opening comment + } elsif (/.*?\*\/(.*?)\/\*/) { + $comment = 1; + $_ = "$1"; + # opening comment + } elsif (/(.*?)\/\*/) { + $comment = 1; + $_ = "$1"; + # closing comment + } elsif (/\*\/(.*?)/) { + $comment = 0; + $_ = "$1"; + } elsif ($comment == 1) { + $linenumber++; + next; + } + # unhandled: more than one complete comment per line + + chomp; + + #proto_register_protocol state machine + $restofline = $_; + $more_tokens = 1; + + #PFNAME is a popular #define for the proto filter name, so use it for testing + if ($restofline =~ /#define\s*PFNAME\s*\"([^\"]*)\"/) { + $PFNAME_value = $1; + $debug>1 && print "PFNAME: '$1'\n"; + } + + until ($more_tokens == 0) { + if (($restofline =~ /proto_register_protocol\s*\((.*)/) || + ($restofline =~ /proto_register_protocol_in_name_only\s*\((.*)/)) { + $noregprotocol = 0; + $restofline = $1; + $state = "s_proto_start"; + } elsif (($state eq "s_proto_start") && ($restofline =~ /^(\s*\"([^\"]*)\"\s*,)\s*(.*)/)) { + $restofline = $3; + $state = "s_proto_long_name"; + $debug>1 && print "proto long name: '$2'\n"; + } elsif (($state eq "s_proto_start") && ($restofline =~ /^(\s*(([\w\d])+)\s*,)\s*(.*)/)) { + $restofline = $4; + $state = "s_proto_long_name"; + $debug>1 && print "proto long name: '$2'\n"; + } elsif (($state eq "s_proto_long_name") && ($restofline =~ /^(\s*\"([^\"]*)\"\s*,)\s*(.*)/)) { + $restofline = $3; + $state = "s_proto_short_name"; + $debug>1 && print "proto short name: '$2'\n"; + } elsif (($state eq "s_proto_long_name") && ($restofline =~ /^(\s*(([\w\d])+)\s*,)\s*(.*)/)) { + $restofline = $4; + $state = "s_proto_short_name"; + $debug>1 && print "proto short name: '$2'\n"; + } elsif (($state eq "s_proto_short_name") && ($restofline =~ /\s*PFNAME\s*(.*)/)) { + $more_tokens = 0; + $state = "s_proto_filter_name"; + if ((index($PFNAME_value, ".") != -1) && ($noperiod == 0)) { + push(@periodinfilternamefilelist, "$currfile\n"); + $noperiod = 1; + } + push(@protocols, $PFNAME_value); + $debug>1 && print "proto filter name: '$PFNAME_value'\n"; + } elsif (($state eq "s_proto_short_name") && ($restofline =~ /\s*\"([^\"]*)\"\s*(.*)/)) { + $more_tokens = 0; + $state = "s_proto_filter_name"; + if ((index($1, ".") != -1) && ($noperiod == 0)) { + push(@periodinfilternamefilelist, "$currfile\n"); + $noperiod = 1; + } + push(@protocols, $1); + $debug>1 && print "proto filter name: '$1'\n"; + } elsif (($state eq "s_proto_short_name") && ($restofline =~ /\s*(([\w\d])+)\s*(.*)/)) { + $more_tokens = 0; + $state = "s_proto_filter_name"; + $debug>1 && print "proto filter name: '$1'\n"; + } else { + $more_tokens = 0; + } + } + + #retrieving display filters state machine + $restofline = $_; + $more_tokens = 1; + until ($more_tokens == 0) { + if ($restofline =~ /\s*static\s*hf_register_info\s*(\w+)\[\](.*)/) { + $restofline = $2; + $state = "s_start"; + $debug>1 && print "$linenumber $state\n"; + } elsif ($restofline =~ /\s*static\s*ei_register_info\s*(\w+)\[\](.*)/) { + $restofline = $2; + $state = "s_start_expert"; + $debug>1 && print "$linenumber $state\n"; + } elsif (($state eq "s_start") && ($restofline =~ /\W+{(.*)/)) { + $restofline = $1; + $state = "s_in_hf_register_info"; + $debug>1 && print "$linenumber $state\n"; + } elsif (($state eq "s_in_hf_register_info") && ($restofline =~ /\W+{(.*)/)) { + $restofline = $1; + $state = "s_hf_register_info_entry"; + $debug>1 && print "$linenumber $state\n"; + $onefield = 1; + } elsif (($state eq "s_in_hf_register_info") && ($restofline =~ /\s*};(.*)/)) { + $restofline = $1; + if ($onefield == 0) { + $debug && print "$linenumber NO FIELDS!!!\n"; + $nofields = 1; + $state = "s_nofields"; + $more_tokens = 0; + } else { + $state = "s_unknown"; + } + } elsif (($state eq "s_hf_register_info_entry") && ($restofline =~ /\s*&\s*(hf_\w*(\[w*\])?)\s*,?(.*)/)) { + $restofline = $3; + $debug>1 && print "$linenumber hf_register_info_entry: $1\n"; + $state = "s_header_field_info_entry"; + } elsif (($state eq "s_header_field_info_entry") && ($restofline =~ /\s*{(.*)/)) { + $restofline = $1; + $state = "s_header_field_info_entry_start"; + $debug>1 && print "$linenumber $state\n"; + } elsif (($state eq "s_header_field_info_entry_start") && ($restofline =~ /((\"([^\"]*)\")|(\w+))\s*,(.*)/)) { + $restofline = $5; + $debug>1 && print "$linenumber header_field_info_entry_name: $1\n"; + $state = "s_header_field_info_entry_name"; + } elsif (($state eq "s_header_field_info_entry_name") && ($restofline =~ /\"([^\"]*)\"\s*,?(.*)/)) { + $restofline = $2; + $debug>1 && print "$linenumber header_field_info_entry_abbrev: $1\n"; + $state = "s_header_field_info_entry_abbrev"; + $filters{$linenumber} = $1; + } elsif (($state eq "s_header_field_info_entry_abbrev") && ($restofline =~ /[^}]*}(.*)/)) { + $restofline = $1; + $state = "s_header_field_info_entry_abbrev_end"; + $debug>1 && print "$linenumber $state\n"; + } elsif (($state eq "s_header_field_info_entry_abbrev_end") && ($restofline =~ /[^}]*}(.*)/)) { + $restofline = $1; + $state = "s_in_hf_register_info"; + $debug>1 && print "$linenumber $state\n"; + } elsif (($state eq "s_start_expert") && ($restofline =~ /\W+{(.*)/)) { + $restofline = $1; + $state = "s_in_ei_register_info"; + $debug>1 && print "$linenumber $state\n"; + } elsif (($state eq "s_in_ei_register_info") && ($restofline =~ /\W+{(.*)/)) { + $restofline = $1; + $state = "s_ei_register_info_entry"; + $debug>1 && print "$linenumber $state\n"; + } elsif (($state eq "s_in_ei_register_info") && ($restofline =~ /\s*};(.*)/)) { + $restofline = $1; + $state = "s_unknown"; + } elsif (($state eq "s_ei_register_info_entry") && ($restofline =~ /\s*{(.*)/)) { + $restofline = $1; + $state = "s_ei_register_info_entry_start"; + $debug>1 && print "$linenumber $state\n"; + } elsif (($state eq "s_ei_register_info_entry_start") && ($restofline =~ /\"([^\"]*)\"\s*,(.*)/)) { + $restofline = $2; + $debug>1 && print "$linenumber ei_register_info_entry_abbrev: $1\n"; + $expert_filters{$linenumber} = $1; + $state = "s_ei_register_info_entry_abbrev_end"; + } elsif (($state eq "s_ei_register_info_entry_abbrev_end") && ($restofline =~ /[^}]*}(.*)/)) { + $restofline = $1; + $state = "s_in_ei_register_info"; + $debug>1 && print "$linenumber $state\n"; + } else { + $more_tokens = 0; + } + } + + $linenumber++; +} + +&printprevfile(); + +if ($totalerrorcount > 0) { + print "\n\nTOTAL ERRORS: $totalerrorcount"; + + if ($filecount > 1) { + print " ($errorfilecount files)\n"; + + print "NO FIELDS: " . scalar(@nofieldfilelist) . "\n"; + print "AUTOMATED: " . (scalar(@asn1automatedfilelist) + scalar(@dcerpcautomatedfilelist) + scalar(@idl2wrsautomatedfilelist)) . "\n"; + print "NO PROTOCOL: " . scalar(@noregprotocolfilelist) . "\n"; + + print "\nASN.1 AUTOMATED FILE LIST\n"; + foreach (@asn1automatedfilelist) { + print $_; + } + print "\nDCE/RPC AUTOMATED FILE LIST\n"; + foreach (@dcerpcautomatedfilelist) { + print $_; + } + print "\nIDL2WRS AUTOMATED FILE LIST\n"; + foreach (@idl2wrsautomatedfilelist) { + print $_; + } + print "\n\"FILE MANIPULATION\" FILE LIST\n"; + @uniquefilelist = grep{ not $unique{$_}++} @filemanipulationfilelist; + foreach (@uniquefilelist) { + print $_; + } + print "\nREMOVE PREFIX FILE LIST\n"; + @uniquefilelist = grep{ not $unique{$_}++} @prefixfilelist; + foreach (@uniquefilelist) { + print $_; + } + print "\nNO PROTOCOL REGISTERED FILE LIST\n"; + foreach (@noregprotocolfilelist) { + print $_; + } + print "\nNO FIELDS FILE LIST\n"; + foreach (@nofieldfilelist) { + print $_; + } + + print "\nPERIOD IN PROTO FILTER NAME FILE LIST\n"; + foreach (@periodinfilternamefilelist) { + print $_; + } + } else { + print "\n"; + } + + exit(1); # exit 1 if ERROR +} + +__END__ diff --git a/tools/checkhf.pl b/tools/checkhf.pl new file mode 100755 index 0000000..7e01c7e --- /dev/null +++ b/tools/checkhf.pl @@ -0,0 +1,700 @@ +#!/usr/bin/env perl +# +# Copyright 2013, William Meier (See AUTHORS file) +# +# Validate hf_... and ei_... usage for a dissector file; +# +# Usage: checkhf.pl [--debug=?] +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +## Note: This program is a re-implementation of the +## original checkhf.pl written and (C) by Joerg Mayer. +## The overall objective of the new implementation was to reduce +## the number of false positives which occurred with the +## original checkhf.pl +## +## This program can be used to scan original .c source files or source +## files which have been passed through a C pre-processor. +## Operating on pre-processed source files is optimal; There should be +## minimal false positives. +## If the .c input is an original source file there may very well be +## false positives/negatives due to the fact that the hf_... variables & etc +## may be created via macros. +## +## ----- (The following is extracted from the original checkhf.pl with thanks to Joerg) ------- +## Example: +## ~/work/wireshark/trunk/epan/dissectors> ../../tools/checkhf.pl packet-afs.c +## Unused entry: packet-afs.c, hf_afs_ubik_voteend +## Unused entry: packet-afs.c, hf_afs_ubik_errcode +## Unused entry: packet-afs.c, hf_afs_ubik_votetype +## ERROR: NO ARRAY: packet-afs.c, hf_afs_fs_ipaddr +## +## or checkhf.pl packet-*.c, which will check all the dissector files. +## +## NOTE: This tool currently generates false positives! +## +## The "NO ARRAY" messages - if accurate - points to an error that will +## cause (t|wire)shark to report a DISSECTOR_BUG when a packet containing +## this particular element is being dissected. +## +## The "Unused entry" message indicates the opposite: We define an entry but +## never use it (e.g., in a proto_...add... function). +## ------------------------------------------------------------------------------------ + +# ------------------------------------------------------------------------------------ +# Main +# +# Logic: +# 1. Clean the input: remove blank lines, comments, quoted strings and code under '#if 0'. +# 2. hf_defs: +# Find (and remove from input) list of hf_... variable +# definitions ('static? g?int hf_... ;') +# 2. hf_array_entries: +# Find (and remove from input) list of hf_... variables +# referenced in the hf[] entries; +# 3. hf_usage: +# From the remaining input, extract list of all strings of form hf_... +# (which may include strings which are not actually valid +# hf_... variable references). +# 4. Checks: +# If entries in hf_defs not in hf_usage then "unused" (for static hf_defs only) +# If entries in hf_defs not in hf_array_entries then "ERROR: NO ARRAY"; + +use strict; +use warnings; + +use Getopt::Long; + +my $help_flag = ''; +my $debug = 0; # default: off; 1=cmt; 2=#if0; 3=hf_defs; 4=hf_array_entries; 5=hfusage (See code) + +my $sts = GetOptions( + 'debug=i' => \$debug, + 'help|?' => \$help_flag + ); +if (!$sts || $help_flag || !$ARGV[0]) { + usage(); +} + +my $error = 0; + +while (my $filename = $ARGV[0]) { + shift; + + my ($file_contents); + my (%hf_defs, %hf_static_defs, %hf_array_entries, %hf_usage); + my ($unused_href, $no_array_href); + my (%ei_defs, %ei_static_defs, %ei_array_entries, %ei_usage); + my ($unused_ei, $no_array_ei); + + read_file(\$filename, \$file_contents); + + remove_comments (\$file_contents, $filename); + remove_blank_lines (\$file_contents, $filename); + $file_contents =~ s/^\s+//m; # Remove leading spaces + remove_quoted_strings(\$file_contents, $filename); + remove_if0_code (\$file_contents, $filename); + + find_remove_hf_defs (\$file_contents, $filename, \%hf_defs); + find_remove_hf_array_entries (\$file_contents, $filename, \%hf_array_entries); + find_remove_proto_get_id_hf_assignments(\$file_contents, $filename, \%hf_array_entries); + find_hf_usage (\$file_contents, $filename, \%hf_usage); + + find_remove_ei_defs (\$file_contents, $filename, \%ei_defs); + find_remove_ei_array_entries (\$file_contents, $filename, \%ei_array_entries); + find_ei_usage (\$file_contents, $filename, \%ei_usage); + +# Tests (See above) +# 1. Are all the static hf_defs and ei_defs entries in hf_usage and ei_usage? +# if not: "Unused entry:" +# + + # create a hash containing entries just for the static definitions + @hf_static_defs{grep {$hf_defs{$_} == 0} keys %hf_defs} = (); # All values in the new hash will be undef + @ei_static_defs{grep {$ei_defs{$_} == 0} keys %ei_defs} = (); # All values in the new hash will be undef + + $unused_href = diff_hash(\%hf_static_defs, \%hf_usage); + remove_hf_pid_from_unused_if_add_oui_call(\$file_contents, $filename, $unused_href); + + $unused_ei = diff_hash(\%ei_static_defs, \%ei_usage); + + print_list("Unused href entry: $filename: ", $unused_href); + print_list("Unused ei entry: $filename: ", $unused_ei); + +# 2. Are all the hf_defs and ei_ entries (static and global) in [hf|ei]_array_entries ? +# (Note: if a static hf_def or ei is "unused", don't check for same in [hf|ei]_array_entries) +# if not: "ERROR: NO ARRAY" + +## Checking for missing global defs currently gives false positives +## So: only check static defs for now. +## $no_array_href = diff_hash(\%hf_defs, \%hf_array_entries); + $no_array_href = diff_hash(\%hf_static_defs, \%hf_array_entries); + $no_array_href = diff_hash($no_array_href, $unused_href); # Remove "unused" hf_... from no_array list + $no_array_ei = diff_hash(\%ei_static_defs, \%ei_array_entries); + $no_array_ei = diff_hash($no_array_ei, $unused_ei); # Remove "unused" ei_... from no_array list + + print_list("ERROR: NO ARRAY: $filename: ", $no_array_href); + print_list("ERROR: NO ARRAY: $filename: ", $no_array_ei); + + if ((keys %{$no_array_href}) != 0) { + $error += 1; + } + if ((keys %{$no_array_ei}) != 0) { + $error += 1; + } +} + +exit (($error == 0) ? 0 : 1); # exit 1 if ERROR + + +# --------------------------------------------------------------------- +# +sub usage { + print "Usage: $0 [--debug=n] Filename [...]\n"; + exit(1); +} + +# --------------------------------------------------------------------- +# action: read contents of a file to specified string +# arg: filename_ref, file_contents_ref + +sub read_file { + my ($filename_ref, $file_contents_ref) = @_; + + die "No such file: \"${$filename_ref}\"\n" if (! -e ${$filename_ref}); + + # delete leading './' + ${$filename_ref} =~ s{ ^ [.] / } {}xmso; + + # Read in the file (ouch, but it's easier that way) + open(my $fci, "<:crlf", ${$filename_ref}) || die("Couldn't open ${$filename_ref}"); + + ${$file_contents_ref} = do { local( $/ ) ; <$fci> } ; + + close($fci); + + return; +} + +# --------------------------------------------------------------------- +# action: Create a hash containing entries in 'a' that are not in 'b' +# arg: a_href, b_href +# returns: pointer to hash + +sub diff_hash { + my ($a_href, $b_href) = @_; + + my %diffs; + + @diffs{grep {! exists $b_href->{$_}} keys %{$a_href}} = (); # All values in the new hash will be undef + + return \%diffs; +} + +# --------------------------------------------------------------------- +# action: print a list +# arg: hdr, list_href + +sub print_list { + my ($hdr, $list_href) = @_; + + print + map {"$hdr$_\n"} + sort + keys %{$list_href}; + + return; +} + +# ------------ +# action: remove blank lines from input string +# arg: code_ref, filename + +sub remove_blank_lines { + my ($code_ref, $filename) = @_; + + ${$code_ref} =~ s{ ^ \s* \n ? } {}xmsog; + + return; +} + +sub get_quoted_str_regex { + # A regex which matches double-quoted strings. + # 's' modifier added so that strings containing a 'line continuation' + # ( \ followed by a new-line) will match. + my $double_quoted_str = qr{ (?: ["] (?: \\. | [^\"\\\n])* ["]) }xmso; + + # A regex which matches single-quoted strings. + my $single_quoted_str = qr{ (?: ['] (?: \\. | [^\'\\\n])* [']) }xmso; + + return qr{ $double_quoted_str | $single_quoted_str }xmso; +} + +# ------------ +# action: remove comments from input string +# arg: code_ref, filename + +sub remove_comments { + my ($code_ref, $filename) = @_; + + # The below Regexp is based on one from: + # https://web.archive.org/web/20080614012925/http://aspn.activestate.com/ASPN/Cookbook/Rx/Recipe/59811 + # It is in the public domain. + # A complicated regex which matches C-style comments. + my $c_comment_regex = qr{ / [*] [^*]* [*]+ (?: [^/*] [^*]* [*]+ )* / }xmso; + + ${$code_ref} =~ s{ $c_comment_regex } {}xmsog; + + # Remove single-line C++-style comments. Be careful not to break up strings + # like "coap://", so match double quoted strings, single quoted characters, + # division operator and other characters before the actual "//" comment. + my $quoted_str = get_quoted_str_regex(); + my $cpp_comment_regex = qr{ ^((?: $quoted_str | /(?!/) | [^'"/\n] )*) // .*$ }xm; + ${$code_ref} =~ s{ $cpp_comment_regex } { $1 }xmg; + + ($debug == 1) && print "==> After Remove Comments: code: [$filename]\n${$code_ref}\n===<\n"; + + return; +} + +# ------------ +# action: remove quoted strings from input string +# arg: code_ref, filename + +sub remove_quoted_strings { + my ($code_ref, $filename) = @_; + + my $quoted_str = get_quoted_str_regex(); + ${$code_ref} =~ s{ $quoted_str } {}xmsog; + + ($debug == 1) && print "==> After Remove quoted strings: code: [$filename]\n${$code_ref}\n===<\n"; + + return; +} + +# ------------- +# action: remove '#if 0'd code from the input string +# args codeRef, fileName +# returns: codeRef +# +# Essentially: split the input into blocks of code or lines of #if/#if 0/etc. +# Remove blocks that follow '#if 0' until '#else/#endif' is found. + +{ # block begin + + sub remove_if0_code { + my ($codeRef, $fileName) = @_; + + # Preprocess outputput (ensure trailing LF and no leading WS before '#') + $$codeRef =~ s/^\s*#/#/m; + if ($$codeRef !~ /\n$/) { $$codeRef .= "\n"; } + + # Split into blocks of normal code or lines with conditionals. + my $ifRegExp = qr/if 0|if|else|endif/; + my @blocks = split(/^(#\s*(?:$ifRegExp).*\n)/m, $$codeRef); + + my ($if_lvl, $if0_lvl, $if0) = (0,0,0); + my $lines = ''; + for my $block (@blocks) { + my $if; + if ($block =~ /^#\s*($ifRegExp)/) { + # #if/#if 0/#else/#endif processing + $if = $1; + if ($debug == 99) { + print(STDERR "if0=$if0 if0_lvl=$if0_lvl lvl=$if_lvl [$if] - $block"); + } + if ($if eq 'if') { + $if_lvl += 1; + } elsif ($if eq 'if 0') { + $if_lvl += 1; + if ($if0_lvl == 0) { + $if0_lvl = $if_lvl; + $if0 = 1; # inside #if 0 + } + } elsif ($if eq 'else') { + if ($if0_lvl == $if_lvl) { + $if0 = 0; + } + } elsif ($if eq 'endif') { + if ($if0_lvl == $if_lvl) { + $if0 = 0; + $if0_lvl = 0; + } + $if_lvl -= 1; + if ($if_lvl < 0) { + die "patsub: #if/#endif mismatch in $fileName" + } + } + } + + if ($debug == 99) { + print(STDERR "if0=$if0 if0_lvl=$if0_lvl lvl=$if_lvl\n"); + } + # Keep preprocessor lines and blocks that are not enclosed in #if 0 + if ($if or $if0 != 1) { + $lines .= $block; + } + } + $$codeRef = $lines; + + ($debug == 2) && print "==> After Remove if0: code: [$fileName]\n$$codeRef\n===<\n"; + return $codeRef; + } +} # block end + +# --------------------------------------------------------------------- +# action: Add to hash an entry for each +# 'static? g?int hf_...' definition (including array names) +# in the input string. +# The entry value will be 0 for 'static' definitions and 1 for 'global' definitions; +# Remove each definition found from the input string. +# args: code_ref, filename, hf_defs_href +# returns: ref to the hash + +sub find_remove_hf_defs { + my ($code_ref, $filename, $hf_defs_href) = @_; + + # Build pattern to match any of the following + # static? g?int hf_foo = -1; + # static? g?int hf_foo[xxx]; + # static? g?int hf_foo[xxx] = { + + # p1: 'static? g?int hf_foo' + my $p1_regex = qr{ + ^ + \s* + (static \s+)? + g?int + \s+ + (hf_[a-zA-Z0-9_]+) # hf_.. + }xmso; + + # p2a: ' = -1;' + my $p2a_regex = qr{ + \s* = \s* + (?: + - \s* 1 + ) + \s* ; + }xmso; + + # p2b: '[xxx];' or '[xxx] = {' + my $p2b_regex = qr/ + \s* \[ [^\]]+ \] \s* + (?: + = \s* [{] | ; + ) + /xmso; + + my $hf_def_regex = qr{ $p1_regex (?: $p2a_regex | $p2b_regex ) }xmso; + + while (${$code_ref} =~ m{ $hf_def_regex }xmsog) { + #print ">%s< >$2<\n", (defined $1) ? $1 ; ""; + $hf_defs_href->{$2} = (defined $1) ? 0 : 1; # 'static' if $1 is defined. + } + ($debug == 3) && debug_print_hash("VD: $filename", $hf_defs_href); # VariableDefinition + + # remove all + ${$code_ref} =~ s{ $hf_def_regex } {}xmsog; + ($debug == 3) && print "==> After remove hf_defs: code: [$filename]\n${$code_ref}\n===<\n"; + + return; +} + +# --------------------------------------------------------------------- +# action: Add to hash an entry (hf_...) for each hf[] entry. +# Remove each hf[] entries found from the input string. +# args: code_ref, filename, hf_array_entries_href + +sub find_remove_hf_array_entries { + my ($code_ref, $filename, $hf_array_entries_href) = @_; + +# hf[] entry regex (to extract an hf_index_name and associated field type) + my $hf_array_entry_regex = qr / + [{] + \s* + & \s* ( [a-zA-Z0-9_]+ ) # &hf + (?: + \s* [[] [^]]+ []] # optional array ref + ) ? + \s* , \s* + [{] + [^}]+ + , \s* + (FT_[a-zA-Z0-9_]+) # field type + \s* , + [^}]+ + , \s* + (?: + HFILL | HF_REF_TYPE_NONE + ) + [^}]* + } + [\s,]* + [}] + /xmso; + + # find all the hf[] entries (searching ${$code_ref}). + while (${$code_ref} =~ m{ $hf_array_entry_regex }xmsog) { + ($debug == 98) && print "+++ $1 $2\n"; + $hf_array_entries_href->{$1} = undef; + } + + ($debug == 4) && debug_print_hash("AE: $filename", $hf_array_entries_href); # ArrayEntry + + # now remove all + ${$code_ref} =~ s{ $hf_array_entry_regex } {}xmsog; + ($debug == 4) && print "==> After remove hf_array_entries: code: [$filename]\n${$code_ref}\n===<\n"; + + return; +} + +# --------------------------------------------------------------------- +# action: Add to hash an entry (hf_...) for each hf_... var +# found in statements of the form: +# 'hf_... = proto_registrar_get_id_byname ...' +# 'hf_... = proto_get_id_by_filtername ...' +# Remove each such statement found from the input string. +# args: code_ref, filename, hf_array_entries_href + +sub find_remove_proto_get_id_hf_assignments { + my ($code_ref, $filename, $hf_array_entries_href) = @_; + + my $_regex = qr{ ( hf_ [a-zA-Z0-9_]+ ) + \s* = \s* + (?: proto_registrar_get_id_byname | proto_get_id_by_filter_name ) + }xmso; + + my @hfvars = ${$code_ref} =~ m{ $_regex }xmsog; + + if (@hfvars == 0) { + return; + } + + # found: + # Sanity check: hf_vars shouldn't already be in hf_array_entries + if (defined @$hf_array_entries_href{@hfvars}) { + printf "? one or more of [@hfvars] initialized via proto_registrar_get_by_name() also in hf[] ??\n"; + } + + # Now: add to hf_array_entries + @$hf_array_entries_href{@hfvars} = (); + + ($debug == 4) && debug_print_hash("PR: $filename", $hf_array_entries_href); + + # remove from input (so not considered as 'usage') + ${$code_ref} =~ s{ $_regex } {}xmsog; + + ($debug == 4) && print "==> After remove proto_registrar_by_name: code: [$filename]\n${$code_ref}\n===<\n"; + + return; +} + +# --------------------------------------------------------------------- +# action: Add to hash all hf_... strings remaining in input string. +# arga: code_ref, filename, hf_usage_href +# return: ref to hf_usage hash +# +# The hash will include *all* strings of form hf_... +# which are in the input string (even strings which +# aren't actually vars). +# We don't care since we'll be checking only +# known valid vars against these strings. + +sub find_hf_usage { + my ($code_ref, $filename, $hf_usage_href) = @_; + + my $hf_usage_regex = qr{ + \b ( hf_[a-zA-Z0-9_]+ ) # hf_... + }xmso; + + while (${$code_ref} =~ m{ $hf_usage_regex }xmsog) { + #print "$1\n"; + $hf_usage_href->{$1} += 1; + } + + ($debug == 5) && debug_print_hash("VU: $filename", $hf_usage_href); # VariableUsage + + return; +} + +# --------------------------------------------------------------------- +# action: Remove from 'unused' hash an instance of a variable named hf_..._pid +# if the source has a call to llc_add_oui() or ieee802a_add_oui(). +# (This is rather a bit of a hack). +# arga: code_ref, filename, unused_href + +sub remove_hf_pid_from_unused_if_add_oui_call { + my ($code_ref, $filename, $unused_href) = @_; + + if ((keys %{$unused_href}) == 0) { + return; + } + + my @hfvars = grep { m/ ^ hf_ [a-zA-Z0-9_]+ _pid $ /xmso} keys %{$unused_href}; + + if ((@hfvars == 0) || (@hfvars > 1)) { + return; # if multiple unused hf_..._pid + } + + if (${$code_ref} !~ m{ llc_add_oui | ieee802a_add_oui }xmso) { + return; + } + + # hf_...pid unused var && a call to ..._add_oui(); delete entry from unused + # XXX: maybe hf_..._pid should really be added to hfUsed ? + delete @$unused_href{@hfvars}; + + return; +} + +# --------------------------------------------------------------------- +# action: Add to hash an entry for each +# 'static? expert_field ei_...' definition (including array names) +# in the input string. +# The entry value will be 0 for 'static' definitions and 1 for 'global' definitions; +# Remove each definition found from the input string. +# args: code_ref, filename, hf_defs_href +# returns: ref to the hash + +sub find_remove_ei_defs { + my ($code_ref, $filename, $ei_defs_eiref) = @_; + + # Build pattern to match any of the following + # static? expert_field ei_foo = -1; + # static? expert_field ei_foo[xxx]; + # static? expert_field ei_foo[xxx] = { + + # p1: 'static? expert_field ei_foo' + my $p1_regex = qr{ + ^ + (static \s+)? + expert_field + \s+ + (ei_[a-zA-Z0-9_]+) # ei_.. + }xmso; + + # p2a: ' = EI_INIT;' + my $p2a_regex = qr{ + \s* = \s* + (?: + EI_INIT + ) + \s* ; + }xmso; + + # p2b: '[xxx];' or '[xxx] = {' + my $p2b_regex = qr/ + \s* \[ [^\]]+ \] \s* + (?: + = \s* [{] | ; + ) + /xmso; + + my $ei_def_regex = qr{ $p1_regex (?: $p2a_regex | $p2b_regex ) }xmso; + + while (${$code_ref} =~ m{ $ei_def_regex }xmsog) { + #print ">%s< >$2<\n", (defined $1) ? $1 ; ""; + $ei_defs_eiref->{$2} = (defined $1) ? 0 : 1; # 'static' if $1 is defined. + } + ($debug == 3) && debug_print_hash("VD: $filename", $ei_defs_eiref); # VariableDefinition + + # remove all + ${$code_ref} =~ s{ $ei_def_regex } {}xmsog; + ($debug == 3) && print "==> After remove ei_defs: code: [$filename]\n${$code_ref}\n===<\n"; + + return; +} + +# --------------------------------------------------------------------- +# action: Add to hash an entry (ei_...) for each ei[] entry. +# Remove each ei[] entries found from the input string. +# args: code_ref, filename, ei_array_entries_href + +sub find_remove_ei_array_entries { + my ($code_ref, $filename, $ei_array_entries_eiref) = @_; + +# ei[] entry regex (to extract an ei_index_name and associated field type) + my $ei_array_entry_regex = qr / + { + \s* + & \s* ( [a-zA-Z0-9_]+ ) # &ei + (?: + \s* [ [^]]+ ] # optional array ref + ) ? + \s* , \s* + { + # \s* "[^"]+" # (filter string has been removed already) + \s* , \s* + PI_[A-Z0-9_]+ # event group + \s* , \s* + PI_[A-Z0-9_]+ # event severity + \s* , + [^,]* # description string (already removed) or NULL + , \s* + EXPFILL + \s* + } + \s* + } + /xs; + + # find all the ei[] entries (searching ${$code_ref}). + while (${$code_ref} =~ m{ $ei_array_entry_regex }xsg) { + ($debug == 98) && print "+++ $1\n"; + $ei_array_entries_eiref->{$1} = undef; + } + + ($debug == 4) && debug_print_hash("AE: $filename", $ei_array_entries_eiref); # ArrayEntry + + # now remove all + ${$code_ref} =~ s{ $ei_array_entry_regex } {}xmsog; + ($debug == 4) && print "==> After remove ei_array_entries: code: [$filename]\n${$code_ref}\n===<\n"; + + return; +} + +# --------------------------------------------------------------------- +# action: Add to hash all ei_... strings remaining in input string. +# arga: code_ref, filename, ei_usage_eiref +# return: ref to ei_usage hash +# +# The hash will include *all* strings of form ei_... +# which are in the input string (even strings which +# aren't actually vars). +# We don't care since we'll be checking only +# known valid vars against these strings. + +sub find_ei_usage { + my ($code_ref, $filename, $ei_usage_eiref) = @_; + + my $ei_usage_regex = qr{ + \b ( ei_[a-zA-Z0-9_]+ ) # ei_... + }xmso; + + while (${$code_ref} =~ m{ $ei_usage_regex }xmsog) { + #print "$1\n"; + $ei_usage_eiref->{$1} += 1; + } + + ($debug == 5) && debug_print_hash("VU: $filename", $ei_usage_eiref); # VariableUsage + + return; +} + +# --------------------------------------------------------------------- +sub debug_print_hash { + my ($title, $href) = @_; + + ##print "==> $title\n"; + for my $k (sort keys %{$href}) { + my $h = defined($href->{$k}) ? $href->{$k} : "undef"; + printf "%-40.40s %5.5s %s\n", $title, $h, $k; + } +} diff --git a/tools/checklicenses.py b/tools/checklicenses.py new file mode 100755 index 0000000..192fecb --- /dev/null +++ b/tools/checklicenses.py @@ -0,0 +1,262 @@ +#!/usr/bin/env python3 +# Copyright (c) 2013 The Chromium Authors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# +"""Makes sure that all files contain proper licensing information.""" + + +import optparse +import os.path +import subprocess +import sys + + +def PrintUsage(): + print("""Usage: python checklicenses.py [--root ] [tocheck] + --root Specifies the repository root. This defaults to ".." relative + to the script file. This will be correct given the normal location + of the script in "/tools". + + --ignore-suppressions Ignores path-specific allowed license. Useful when + trying to remove a suppression/allowed entry. + + --list-allowed Print a list of allowed licenses and exit. + + tocheck Specifies the directory, relative to root, to check. This defaults + to "." so it checks everything. + +Examples: + python checklicenses.py + python checklicenses.py --root ~/chromium/src third_party""") + + +ALLOWED_LICENSES = [ + 'BSD (1 clause)', + 'BSD (2 clause)', + 'BSD (2 clause) GPL (v2 or later)', + 'BSD (3 clause)', + 'GPL (v2 or later)', + 'GPL (v3 or later) (with Bison parser exception)', + 'ISC', + 'ISC GPL (v2 or later)', + 'LGPL (v2 or later)', + 'LGPL (v2.1 or later)', + 'MIT/X11 (BSD like)', + 'Public domain', + 'Public domain GPL (v2 or later)', + 'Public domain MIT/X11 (BSD like)', + 'zlib/libpng', + 'zlib/libpng GPL (v2 or later)', +] + + +PATH_SPECIFIC_ALLOWED_LICENSES = { + 'caputils/airpcap.h': [ + 'BSD-3-Clause', + ], + 'wsutil/strnatcmp.c': [ + 'Zlib', + ], + 'wsutil/strnatcmp.h': [ + 'Zlib', + ], + 'resources/protocols/dtds': [ + 'UNKNOWN', + ], + 'resources/protocols/diameter/dictionary.dtd': [ + 'UNKNOWN', + ], + 'resources/protocols/wimaxasncp/dictionary.dtd': [ + 'UNKNOWN', + ], + 'doc/': [ + 'UNKNOWN', + ], + 'docbook/custom_layer_chm.xsl': [ + 'UNKNOWN', + ], + 'docbook/custom_layer_single_html.xsl': [ + 'UNKNOWN', + ], + 'docbook/ws.css' : [ + 'UNKNOWN' + ], + 'fix': [ + 'UNKNOWN', + ], + 'wsutil/g711.c': [ + 'UNKNOWN', + ], + 'packaging/macosx': [ + 'UNKNOWN', + ], + 'epan/except.c': [ + 'UNKNOWN', + ], + 'epan/except.h': [ + 'UNKNOWN', + ], + # Generated header files by lex/lemon/whatever + 'epan/dtd_grammar.h': [ + 'UNKNOWN', + ], + 'epan/dfilter/grammar.h': [ + 'UNKNOWN', + ], + 'epan/dfilter/grammar.c': [ + 'UNKNOWN', + ], + 'epan/dissectors/packet-ieee80211-radiotap-iter.': [ # Using ISC license only + 'ISC GPL (v2)' + ], + # Mentions BSD-3-clause twice due to embedding of code: + 'epan/dissectors/packet-communityid.c': [ + 'BSD (3 clause) BSD (3 clause)', + ], + 'plugins/mate/mate_grammar.h': [ + 'UNKNOWN', + ], + 'vcs_version.h': [ + 'UNKNOWN', + ], + # Special IDL license that appears to be compatible as far as I (not a + # lawyer) can tell. See + # https://www.wireshark.org/lists/wireshark-dev/201310/msg00234.html + 'epan/dissectors/pidl/idl_types.h': [ + 'UNKNOWN', + ], + # The following tools are under incompatible licenses (mostly GPLv3 or + # GPLv3+), but this is OK since they are not actually linked into Wireshark + 'tools/pidl': [ + 'UNKNOWN', + ], + 'tools/lemon': [ + 'UNKNOWN', + ], + 'tools/licensecheck.pl': [ + 'GPL (v2)' + ], + '.gitlab/': [ + 'UNKNOWN', + ], + 'wsutil/safe-math.h': [ # Public domain (CC0) + 'UNKNOWN', + ], +} + +def check_licenses(options, args): + if options.list_allowed: + print('\n'.join(ALLOWED_LICENSES)) + sys.exit(0) + + # Figure out which directory we have to check. + if len(args) == 0: + # No directory to check specified, use the repository root. + start_dir = options.base_directory + elif len(args) == 1: + # Directory specified. Start here. It's supposed to be relative to the + # base directory. + start_dir = os.path.abspath(os.path.join(options.base_directory, args[0])) + else: + # More than one argument, we don't handle this. + PrintUsage() + return 1 + + print("Using base directory: %s" % options.base_directory) + print("Checking: %s" % start_dir) + print("") + + licensecheck_path = os.path.abspath(os.path.join(options.base_directory, + 'tools', + 'licensecheck.pl')) + + licensecheck = subprocess.Popen([licensecheck_path, + '-l', '150', + '-r', start_dir], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = licensecheck.communicate() + stdout = stdout.decode('utf-8') + stderr = stderr.decode('utf-8') + if options.verbose: + print('----------- licensecheck stdout -----------') + print(stdout) + print('--------- end licensecheck stdout ---------') + if licensecheck.returncode != 0 or stderr: + print('----------- licensecheck stderr -----------') + print(stderr) + print('--------- end licensecheck stderr ---------') + print("\nFAILED\n") + return 1 + + success = True + exit_status = 0 + for line in stdout.splitlines(): + filename, license = line.split(':', 1) + filename = os.path.relpath(filename.strip(), options.base_directory) + + # All files in the build output directory are generated one way or another. + # There's no need to check them. + if os.path.dirname(filename).startswith('build'): + continue + + # For now we're just interested in the license. + license = license.replace('*No copyright*', '').strip() + + # Skip generated files. + if 'GENERATED FILE' in license: + continue + + # Support files which provide a choice between licenses. + if any(item in ALLOWED_LICENSES for item in license.split(';')): + continue + + if not options.ignore_suppressions: + found_path_specific = False + for prefix in PATH_SPECIFIC_ALLOWED_LICENSES: + if (filename.startswith(prefix) and + license in PATH_SPECIFIC_ALLOWED_LICENSES[prefix]): + found_path_specific = True + break + if found_path_specific: + continue + + reason = "License '%s' for '%s' is not allowed." % (license, filename) + success = False + print(reason) + exit_status = 1 + + if success: + print("\nSUCCESS\n") + return 0 + else: + print("\nFAILED\n") + return exit_status + + +def main(): + default_root = os.path.abspath( + os.path.join(os.path.dirname(__file__), '..')) + option_parser = optparse.OptionParser() + option_parser.add_option('--root', default=default_root, + dest='base_directory', + help='Specifies the repository root. This defaults ' + 'to "../.." relative to the script file, which ' + 'will normally be the repository root.') + option_parser.add_option('-v', '--verbose', action='store_true', + default=False, help='Print debug logging') + option_parser.add_option('--list-allowed', + action='store_true', + default=False, + help='Print a list of allowed licenses and exit.') + option_parser.add_option('--ignore-suppressions', + action='store_true', + default=False, + help='Ignore path-specific allowed license.') + options, args = option_parser.parse_args() + return check_licenses(options, args) + + +if '__main__' == __name__: + sys.exit(main()) diff --git a/tools/colorfilters2js.py b/tools/colorfilters2js.py new file mode 100644 index 0000000..49b8a42 --- /dev/null +++ b/tools/colorfilters2js.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# +# Copyright 2022 by Moshe Kaplan +# Based on colorfilter2js.pl by Dirk Jagdmann +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + + +# Python script to convert a Wireshark color scheme to javascript +# code. The javascript function should then be inserted into the +# pdml2html.xsl file. +# +# run this as: python tools/colorfilters2js.py colorfilters + + +import argparse +import io +import re +import sys + +js_prologue = """\ +function set_node_color(node, colorname) +{ + if (dojo.isString(node)) + node = dojo.byId(node); + if (!node) return; + var fg; + var bg; +""" + +js_color_entry = """\ + {7}if (colorname == '{0}') {{ + bg='#{1:02x}{2:02x}{3:02x}'; + fg='#{4:02x}{5:02x}{6:02x}'; + }}\ +""" + +js_epilogue = """ + if (fg.length > 0) + node.style.color = fg; + if (bg.length > 0) + node.style.background = bg; +} +""" + + +def generate_javascript(colorlines): + output = [js_prologue] + else_text = "" + for colorline in colorlines: + colorvalues = colorline[0], int(colorline[1])//256, int(colorline[2])//256, int(colorline[3])//256, int(colorline[4])//256, int(colorline[5])//256, int(colorline[6])//256, else_text + output += [js_color_entry.format(*colorvalues)] + else_text = "else " + output += [js_epilogue] + return "\n".join(output) + + +def main(): + parser = argparse.ArgumentParser(description="Convert a Wireshark color scheme to javascript code.") + parser.add_argument("files", metavar='files', nargs='+', help="paths to colorfiles") + parsed_args = parser.parse_args() + + COLORLINE_PATTERN = r"\@(.+?)\@.+\[(\d+),(\d+),(\d+)\]\[(\d+),(\d+),(\d+)\]" + colorlines = [] + + # Sample line: + # @Errors@ct.error@[4626,10023,11822][63479,34695,34695] + + # Read the lines from all files: + for filename in parsed_args.files: + with open(filename, encoding='utf-8') as fh: + file_content = fh.read() + colorlines += re.findall(COLORLINE_PATTERN, file_content) + javascript_code = generate_javascript(colorlines) + + stdoutu8 = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') + stdoutu8.write(javascript_code) + + +if __name__ == "__main__": + main() diff --git a/tools/commit-msg b/tools/commit-msg new file mode 100755 index 0000000..6b3052b --- /dev/null +++ b/tools/commit-msg @@ -0,0 +1,7 @@ +#!/bin/sh +# +# Validate the commit message. + +./tools/validate-commit.py --commitmsg $1 + + diff --git a/tools/compress-pngs.py b/tools/compress-pngs.py new file mode 100755 index 0000000..ed3e32a --- /dev/null +++ b/tools/compress-pngs.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +# +# compress-pngs.py - Compress PNGs +# +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +'''\ +convert-glib-types.py - Convert glib types to their C and C99 eqivalents. +''' + +# Imports + +import argparse +import glob +import platform +import re +import sys + +padded_type_map = {} + +type_map = { + 'gboolean': 'bool', + 'gchar': 'char', + 'guchar': 'unsigned char', + 'gint': 'int', + 'guint': 'unsigned', # Matches README.developer + 'glong': 'long', + 'gulong': 'unsigned long', + 'gint8': 'int8_t', + 'gint16': 'int16_t', + 'gint32': 'int32_t', + 'gint64': 'int64_t', + 'guint8': 'uint8_t', + 'guint16': 'uint16_t', + 'guint32': 'uint32_t', + 'guint64': 'uint64_t', + 'gfloat': 'float', + 'gdouble': 'double', + 'gpointer ': 'void *', # 'void *foo' instead of 'void * foo' + 'gpointer': 'void *', + # Is gsize the same as size_t on the platforms we support? + # https://gitlab.gnome.org/GNOME/glib/-/issues/2493 + 'gsize': 'size_t', + 'gssize': 'ssize_t', +} + +definition_map = { + 'TRUE': 'true', + 'FALSE': 'false', + 'G_MAXINT8': 'INT8_MAX', + 'G_MAXINT16': 'INT16_MAX', + 'G_MAXINT32': 'INT32_MAX', + 'G_MAXINT64': 'INT64_MAX', + 'G_MAXINT': 'INT_MAX', + 'G_MAXUINT8': 'UINT8_MAX', + 'G_MAXUINT16': 'UINT16_MAX', + 'G_MAXUINT32': 'UINT32_MAX', + 'G_MAXUINT64': 'UINT64_MAX', + 'G_MAXUINT': 'UINT_MAX', + 'G_MININT8': 'INT8_MIN', + 'G_MININT16': 'INT16_MIN', + 'G_MININT32': 'INT32_MIN', + 'G_MININT64': 'INT64_MIN', + 'G_MININT': 'INT_MIN', +} + +format_spec_map = { + 'G_GINT64_FORMAT': 'PRId64', + 'G_GUINT64_FORMAT': 'PRIu64', +} + +def convert_file(file): + lines = '' + try: + with open(file, 'r') as f: + lines = f.read() + for glib_type, c99_type in padded_type_map.items(): + lines = lines.replace(glib_type, c99_type) + for glib_type, c99_type in type_map.items(): + lines = re.sub(rf'([^"])\b{glib_type}\b([^"])', rf'\1{c99_type}\2', lines, flags=re.MULTILINE) + for glib_define, c99_define in definition_map.items(): + lines = re.sub(rf'\b{glib_define}\b', rf'{c99_define}', lines, flags=re.MULTILINE) + for glib_fmt_spec, c99_fmt_spec in format_spec_map.items(): + lines = re.sub(rf'\b{glib_fmt_spec}\b', rf'{c99_fmt_spec}', lines, flags=re.MULTILINE) + except IsADirectoryError: + sys.stderr.write(f'{file} is a directory.\n') + return + except UnicodeDecodeError: + sys.stderr.write(f"{file} isn't valid UTF-8.\n") + return + except: + sys.stderr.write(f'Unable to open {file}.\n') + return + + with open(file, 'w') as f: + f.write(lines) + print(f'Converted {file}') + +def main(): + parser = argparse.ArgumentParser(description='Convert glib types to their C and C99 eqivalents.') + parser.add_argument('files', metavar='FILE', nargs='*') + args = parser.parse_args() + + # Build a padded version of type_map which attempts to preseve alignment + for glib_type, c99_type in type_map.items(): + pg_type = glib_type + ' ' + pc_type = c99_type + ' ' + pad_len = max(len(pg_type), len(pc_type)) + padded_type_map[f'{pg_type:{pad_len}s}'] = f'{pc_type:{pad_len}s}' + + files = [] + if platform.system() == 'Windows': + for arg in args.files: + files += glob.glob(arg) + else: + files = args.files + + for file in files: + convert_file(file) + +# On with the show + +if __name__ == "__main__": + sys.exit(main()) diff --git a/tools/convert_expert_add_info_format.pl b/tools/convert_expert_add_info_format.pl new file mode 100755 index 0000000..5728936 --- /dev/null +++ b/tools/convert_expert_add_info_format.pl @@ -0,0 +1,417 @@ +#!/usr/bin/env perl +# +# Copyright 2013 Michael Mann (see AUTHORS file) +# +# A program to help convert the "old" expert_add_info_format API calls into filterable "items" that +# use the other expert API calls. The program requires 2 passes. "Pass 1" (generate) collects +# the eligible expert_add_info_format calls and outputs the necessary data into a delimited +# file. "Pass 2" (fix-all) takes the data from the delimited file and replaces the +# expert_add_info_format calls with filterable "expert info" calls as well as +# generating a separate files for the ei variable declarations and array data. +# The ei "file" can be copy/pasted into the dissector where appropriate +# +# Note that the output from "Pass 1" won't always be a perfect conversion for "Pass 2", so +# "human interaction" is needed as an intermediary to verify and update the delimited file +# before "Pass 2" is done. +# +# Delimited file field format: +# <[GROUP]><[SEVERITY]><[FIELDNAME]><[EXPERTABBREV]> +# +# +# convert proto_tree_add_text_call enumerations: +# 1 - expert_add_info +# 2 - expert_add_info_format +# 3 - proto_tree_add_expert +# 4 - proto_tree_add_expert_format +# +# Usage: convert_expert_add_info_format.pl action= +# +# Based off of convert_proto_tree_add_text.pl +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +use strict; +use warnings; + +use Getopt::Long; + +my %EXPERT_SEVERITY = ('PI_COMMENT' => "PI_COMMENT", + 'PI_CHAT' => "PI_CHAT", + 'PI_NOTE' => "PI_NOTE", + 'PI_WARN' => "PI_WARN", + 'PI_ERROR' => "PI_ERROR"); + +my %EXPERT_GROUPS = ('PI_CHECKSUM' => "PI_CHECKSUM", + 'PI_SEQUENCE' => "PI_SEQUENCE", + 'PI_RESPONSE_CODE' => "PI_RESPONSE_CODE", + 'PI_REQUEST_CODE' => "PI_REQUEST_CODE", + 'PI_UNDECODED' => "PI_UNDECODED", + 'PI_REASSEMBLE' => "PI_REASSEMBLE", + 'PI_MALFORMED' => "PI_MALFORMED", + 'PI_DEBUG' => "PI_DEBUG", + 'PI_PROTOCOL' => "PI_PROTOCOL", + 'PI_SECURITY' => "PI_SECURITY", + 'PI_COMMENTS_GROUP' => "PI_COMMENTS_GROUP", + 'PI_DECRYPTION' => "PI_DECRYPTION", + 'PI_ASSUMPTION' => "PI_ASSUMPTION", + 'PI_DEPRECATED' => "PI_DEPRECATED"); + +my @expert_list; +my $protabbrev = ""; + +# Perl trim function to remove whitespace from the start and end of the string +sub trim($) +{ + my $string = shift; + $string =~ s/^\s+//; + $string =~ s/\s+$//; + return $string; +} + +# --------------------------------------------------------------------- +# +# MAIN +# +my $helpFlag = ''; +my $action = 'generate'; +my $register = ''; + +my $result = GetOptions( + 'action=s' => \$action, + 'register' => \$register, + 'help|?' => \$helpFlag + ); + +if (!$result || $helpFlag || !$ARGV[0]) { + usage(); +} + +sub usage { + print "\nUsage: $0 [--action=generate|fix-all|find-all] FILENAME [...]\n\n"; + print " --action = generate (default)\n"; + print " generate - create a delimited file (FILENAME.expert_add_info_input) with\n"; + print " expert_add_info_format fields in FILENAME(s)\n"; + print " fix-all - Use delimited file (FILENAME.expert_add_info_input) to convert\n"; + print " expert_add_info_format to \"filterable\" expert API\n"; + print " Also generates FILENAME.ei to be copy/pasted into\n"; + print " the dissector where appropriate\n\n"; + print " --register = generate ei_register_info and expert register function calls\n\n"; + + exit(1); +} + +# +# XXX Outline general algorithm here +# +my $found_total = 0; +my $protabbrev_index; +my $line_number = 0; + +while (my $fileName = $ARGV[0]) { + shift; + my $fileContents = ''; + + die "No such file: \"$fileName\"\n" if (! -e $fileName); + + # delete leading './' + $fileName =~ s{ ^ \. / } {}xo; + + #determine PROTABBREV for dissector based on file name format of (dirs)/packet-PROTABBREV.c + $protabbrev_index = rindex($fileName, "packet-"); + if ($protabbrev_index == -1) { + print "$fileName doesn't fit format of packet-PROTABBREV.c\n"; + next; + } + + $protabbrev = substr($fileName, $protabbrev_index+length("packet-")); + $protabbrev_index = rindex($protabbrev, "."); + if ($protabbrev_index == -1) { + print "$fileName doesn't fit format of packet-PROTABBREV.c\n"; + next; + } + $protabbrev = lc(substr($protabbrev, 0, $protabbrev_index)); + + # Read in the file (ouch, but it's easier that way) + open(FCI, "<", $fileName) || die("Couldn't open $fileName"); + while () { + $fileContents .= $_; + } + close(FCI); + + if ($action eq "generate") { + generate_eis(\$fileContents, $fileName); + } + + if ($action eq "fix-all") { + # Read in the ei "input" file + $line_number = 0; + my $errors = 0; + open(FCI, "<", $fileName . ".expert_add_info_input") || die("Couldn't open $fileName.expert_add_info_input"); + while(my $line=){ + my @expert_item = split(/;|\n/, $line); + + $line_number++; + $errors += verify_line(@expert_item); + + push(@expert_list, \@expert_item); + } + close(FCI); + + if ($errors > 0) { + print "Aborting conversion.\n"; + exit(-1); + } + + fix_expert_add_info_format(\$fileContents, $fileName); + + # Write out the ei data + output_ei_data($fileName); + + # Write out the changed version to a file + open(FCO, ">", $fileName . ".expert_add_info_format"); + print FCO "$fileContents"; + close(FCO); + } + +} # while + +exit $found_total; + +# --------------------------------------------------------------------- +# Sanity check the data in the .proto_tree_input file +sub verify_line { + my( @expert_item) = @_; + my $errors = 0; + + #do some basic error checking of the file + if (($expert_item[0] eq "1") || + ($expert_item[0] eq "2") || + ($expert_item[0] eq "3") || + ($expert_item[0] eq "4")) { + #expert info conversions + if (!($expert_item[2] =~ /^ei_/)) { + print "$line_number: Poorly formed ei_ variable ($expert_item[2])!\n"; + $errors++; + } + } else { + print "$line_number: Bad conversion value!\n"; + $errors++; + } + + if ($expert_item[1] eq "1") { + if (!($expert_item[2] =~ /^ei_/)) { + print "$line_number: Poorly formed ei_ variable ($expert_item[2])!\n"; + $errors++; + } + if (!exists($EXPERT_SEVERITY{$expert_item[4]})) { + print "$line_number: Expert severity value '$expert_item[5]' unknown!\n"; + $errors++; + } + if (!exists($EXPERT_GROUPS{$expert_item[3]})) { + print "$line_number: Expert group value '$expert_item[4]' unknown!\n"; + $errors++; + } + + } elsif ($expert_item[1] ne "0") { + print "$line_number: Bad ei variable generation value!\n"; + $errors++; + } + + return $errors; +} + +sub generate_eis { + my( $fileContentsRef, $fileName) = @_; + my @args; + my $num_items = 0; + my @temp; + my $str_temp; + my $pat; + + $pat = qr / + ( + (?:expert_add_info_format)\s* \( + (([^[\,;])*\,){4,} + [^;]* + \s* \) \s* ; + ) + /xs; + + while ($$fileContentsRef =~ / $pat /xgso) { + + my @expert_item = (1, 1, "ei_name", "GROUP", "SEVERITY", "fieldfullname", "fieldabbrevname", + "pinfo", "item", "tvb", "offset", "length", "params"); + my $arg_loop = 5; + my $str = "${1}\n"; + $str =~ tr/\t\n\r/ /d; + $str =~ s/ \s+ / /xg; + #print "$fileName: $str\n"; + + @args = split(/,/, $str); + #printf "ARGS(%d): %s\n", scalar @args, join("# ", @args); + $args[0] =~ s/expert_add_info_format\s*\(\s*//; + + $expert_item[7] = $args[0]; #pinfo + $expert_item[8] = trim($args[1]); #item + $expert_item[3] = trim($args[2]); #GROUP + $expert_item[4] = trim($args[3]); #SEVERITY + $expert_item[5] = trim($args[4]); #fieldfullname + $expert_item[5] =~ s/\"//; + + #XXX - conditional? + $expert_item[5] =~ s/\"\s*\)\s*;$//; + $expert_item[5] =~ s/\"$//; + + #params + $expert_item[12] = ""; + while ($arg_loop < scalar @args) { + $expert_item[12] .= trim($args[$arg_loop]); + if ($arg_loop+1 < scalar @args) { + $expert_item[12] .= ", "; + } + $arg_loop += 1; + } + $expert_item[12] =~ s/\s*\)\s*;$//; + + #ei variable name + $expert_item[2] = sprintf("ei_%s_%s", $protabbrev, lc($expert_item[5])); + $expert_item[2] =~ s/\s+|-|:/_/g; + + #field abbreviated name + $expert_item[6] = sprintf("%s.%s", $protabbrev, lc($expert_item[5])); + $expert_item[6] =~ s/\s+|-|:/_/g; + + push(@expert_list, \@expert_item); + + $num_items += 1; + } + + if ($num_items > 0) { + open(FCO, ">", $fileName . ".expert_add_info_input"); + for my $item (@expert_list) { + print FCO join(";", @{$item}), "\n"; + } + close(FCO); + } +} + +# --------------------------------------------------------------------- +# Find all expert_add_info_format calls and replace them with the data +# found in expert_list +sub fix_expert_add_info_format { + my( $fileContentsRef, $fileName) = @_; + my $found = 0; + my $pat; + + $pat = qr / + ( + (?:expert_add_info_format)\s* \( + (([^[\,;])*\,){4,} + [^;]* + \s* \) \s* ; + ) + /xs; + + $$fileContentsRef =~ s/ $pat /patsub($found, $1)/xges; +} + +# --------------------------------------------------------------------- +# Format expert info functions with expert_list data +sub patsub { + my $item_str; + + #print $expert_list[$_[0]][2] . " = "; + #print $#{$expert_list[$_[0]]}+1; + #print "\n"; + + if ($expert_list[$_[0]][0] eq "1") { + $item_str = sprintf("expert_add_info(%s, %s, &%s);", + $expert_list[$_[0]][7], $expert_list[$_[0]][8], $expert_list[$_[0]][2]); + } elsif ($expert_list[$_[0]][0] eq "2") { + $item_str = sprintf("expert_add_info_format(%s, %s, &%s, \"%s\"", + $expert_list[$_[0]][7], $expert_list[$_[0]][8], + $expert_list[$_[0]][2], $expert_list[$_[0]][5]); + if (($#{$expert_list[$_[0]]}+1 > 12 ) && ($expert_list[$_[0]][12] ne "")) { + $item_str .= ", $expert_list[$_[0]][12]"; + } + $item_str .= ");"; + } elsif ($expert_list[$_[0]][0] eq "3") { + $item_str = sprintf("proto_tree_add_expert(%s, %s, &%s, %s, %s, %s);", + $expert_list[$_[0]][8], $expert_list[$_[0]][7], + $expert_list[$_[0]][2], $expert_list[$_[0]][9], + $expert_list[$_[0]][10], $expert_list[$_[0]][11]); + } elsif ($expert_list[$_[0]][0] eq "4") { + $item_str = sprintf("proto_tree_add_expert_format(%s, %s, &%s, %s, %s, %s, \"%s\"", + $expert_list[$_[0]][8], $expert_list[$_[0]][7], $expert_list[$_[0]][2], + $expert_list[$_[0]][9], $expert_list[$_[0]][10], + $expert_list[$_[0]][11], $expert_list[$_[0]][5]); + if (($#{$expert_list[$_[0]]}+1 > 12) && ($expert_list[$_[0]][12] ne "")) { + $item_str .= ", $expert_list[$_[0]][12]"; + } + $item_str .= ");"; + } + + $_[0] += 1; + + return $item_str; +} + +# --------------------------------------------------------------------- +# Output the ei variable declarations and expert array. For now, write them to a file. +# XXX - Eventually find the right place to add it to the modified dissector file +sub output_ei_data { + my( $fileName) = @_; + my %eis = (); + my $index; + my $key; + + #add ei to hash table to prevent against (accidental) duplicates + for ($index=0;$index<@expert_list;$index++) { + if ($expert_list[$index][1] eq "1") { + $eis{$expert_list[$index][2]} = $expert_list[$index][2]; + } + } + + open(FCO, ">", $fileName . ".ei"); + + print FCO "/* Generated from convert_expert_add_info_format.pl */\n"; + + foreach $key (keys %eis) { + print FCO "static expert_field $key = EI_INIT;\n"; + } + print FCO "\n\n"; + + if ($register ne "") { + print FCO " static ei_register_info ei[] = {\n"; + } + + %eis = (); + for ($index=0;$index<@expert_list;$index++) { + if ($expert_list[$index][1] eq "1") { + if (exists($eis{$expert_list[$index][2]})) { + print "duplicate ei entry '$expert_list[$index][2]' found! Aborting conversion.\n"; + exit(-1); + } + $eis{$expert_list[$index][2]} = $expert_list[$index][2]; + + print FCO " { &$expert_list[$index][2], { \"$expert_list[$index][6]\", $expert_list[$index][3], "; + print FCO "$expert_list[$index][4], \"$expert_list[$index][5]\", EXPFILL }},\r\n"; + } + } + + if ($register ne "") { + print FCO " };\n\n\n"; + print FCO " expert_module_t* expert_$protabbrev;\n\n"; + + print FCO " expert_$protabbrev = expert_register_protocol(proto_$protabbrev);\n"; + print FCO " expert_register_field_array(expert_$protabbrev, ei, array_length(ei));\n\n"; + } + + + close(FCO); +} diff --git a/tools/convert_proto_tree_add_text.pl b/tools/convert_proto_tree_add_text.pl new file mode 100755 index 0000000..3576455 --- /dev/null +++ b/tools/convert_proto_tree_add_text.pl @@ -0,0 +1,759 @@ +#!/usr/bin/env perl +# +# Copyright 2013 Michael Mann (see AUTHORS file) +# +# A program to help convert proto_tree_add_text calls into filterable "items" that +# use proto_tree_add_item. The program requires 2 passes. "Pass 1" (generate) collects +# the eligible proto_tree_add_text calls and outputs the necessary data into a delimited +# file. "Pass 2" (fix-all) takes the data from the delimited file and replaces the +# proto_tree_add_text calls with proto_tree_add_item or "expert info" calls as well as +# generating separate files for the hf and/or ei variable declarations and hf and/or ei array data. +# The hf "files" can be copy/pasted into the dissector where appropriate (until such time as +# its done automatically) +# +# Note that the output from "Pass 1" won't always be a perfect conversion for "Pass 2", so +# "human interaction" is needed as an intermediary to verify and update the delimited file +# before "Pass 2" is done. +# It is also recommended to run checkhf.pl and checkAPIs.pl after "Pass 2" is completed. +# +# Delimited file field format: +# +# <[FIELDNAME]><[FIELDTYPE]|[EXPERT_SEVERITY]><[FIELDABBREV]><[FIELDDISPLAY]><[FIELDCONVERT]><[BITMASK]> +# +# convert proto_tree_add_text_call enumerations: +# 0 - no conversions +# 1 - proto_tree_add_item +# 10 - expert_add_info +# 11 - expert_add_info_format +# 12 - proto_tree_add_expert +# 13 - proto_tree_add_expert_format +# +# Usage: convert_proto_tree_add_text.pl action= +# +# Lots of code shamelessly borrowed from fix-encoding-args.pl (Thanks Bill!) +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +use strict; +use warnings; + +use Getopt::Long; + +my %DISPLAY_BASE = ('BASE_NONE' => "BASE_NONE", + 'BASE_DEC' => "BASE_DEC", + 'BASE_HEX' => "BASE_HEX", + 'BASE_OCT' => "BASE_OCT", + 'BASE_DEC_HEX' => "BASE_DEC_HEX", + 'BASE_HEX_DEC' => "BASE_HEX_DEC", + 'BASE_EXT_STRING' => "BASE_EXT_STRING", + 'BASE_RANGE_STRING' => "BASE_RANGE_STRING", + 'ABSOLUTE_TIME_LOCAL' => "ABSOLUTE_TIME_LOCAL", + 'ABSOLUTE_TIME_UTC' => "ABSOLUTE_TIME_UTC", + 'ABSOLUTE_TIME_DOY_UTC' => "ABSOLUTE_TIME_DOY_UTC", + 'BASE_CUSTOM' => "BASE_CUSTOM"); + +my %ENCODINGS = ('ENC_BIG_ENDIAN' => "ENC_BIG_ENDIAN", + 'ENC_LITTLE_ENDIAN' => "ENC_LITTLE_ENDIAN", + 'ENC_TIME_SECS_NSECS' => "ENC_TIME_SECS_NSECS", + 'ENC_TIME_NTP' => "ENC_TIME_NTP", + 'ENC_ASCII' => "ENC_ASCII", + 'ENC_UTF_8' => "ENC_UTF_8", + 'ENC_UTF_16' => "ENC_UTF_16", + 'ENC_UCS_2' => "ENC_UCS_2", + 'ENC_EBCDIC' => "ENC_EBCDIC", + 'ENC_NA' => "ENC_NA"); + +my %FIELD_TYPE = ('FT_NONE' => "FT_NONE", 'FT_PROTOCOL' => "FT_PROTOCOL", 'FT_BOOLEAN' => "FT_BOOLEAN", + 'FT_UINT8' => "FT_UINT8", 'FT_UINT16' => "FT_UINT16", 'FT_UINT24' => "FT_UINT24", 'FT_UINT32' => "FT_UINT32", 'FT_UINT64' => "FT_UINT64", + 'FT_INT8' => "FT_INT8", 'FT_INT16' => "FT_INT16", 'FT_INT24' => "FT_INT24", 'FT_INT32' => "FT_INT32", 'FT_INT64' => "FT_INT64", + 'FT_FLOAT' => "FT_FLOAT", 'FT_DOUBLE' => "FT_DOUBLE", + 'FT_ABSOLUTE_TIME' => "FT_ABSOLUTE_TIME", 'FT_RELATIVE_TIME' => "FT_RELATIVE_TIME", + 'FT_STRING' => "FT_STRING", 'FT_STRINGZ' => "FT_STRINGZ", 'FT_UINT_STRING' => "FT_UINT_STRING", + 'FT_ETHER' => "FT_ETHER", 'FT_BYTES' => "FT_BYTES", 'FT_UINT_BYTES' => "FT_UINT_BYTES", + 'FT_IPv4' => "FT_IPv4", 'FT_IPv6' => "FT_IPv6", 'FT_IPXNET' => "FT_IPXNET", 'FT_AX25' => "FT_AX25", 'FT_VINES' => "FT_VINES", + 'FT_FRAMENUM' => "FT_FRAMENUM", 'FT_GUID' => "FT_GUID", 'FT_OID' => "FT_OID", 'FT_REL_OID' => "FT_REL_OID", 'FT_EUI64' => "FT_EUI64"); + +my %EXPERT_SEVERITY = ('PI_COMMENT' => "PI_COMMENT", + 'PI_CHAT' => "PI_CHAT", + 'PI_NOTE' => "PI_NOTE", + 'PI_WARN' => "PI_WARN", + 'PI_ERROR' => "PI_ERROR"); + +my %EXPERT_GROUPS = ('PI_CHECKSUM' => "PI_CHECKSUM", + 'PI_SEQUENCE' => "PI_SEQUENCE", + 'PI_RESPONSE_CODE' => "PI_RESPONSE_CODE", + 'PI_REQUEST_CODE' => "PI_REQUEST_CODE", + 'PI_UNDECODED' => "PI_UNDECODED", + 'PI_REASSEMBLE' => "PI_REASSEMBLE", + 'PI_MALFORMED' => "PI_MALFORMED", + 'PI_DEBUG' => "PI_DEBUG", + 'PI_PROTOCOL' => "PI_PROTOCOL", + 'PI_SECURITY' => "PI_SECURITY", + 'PI_COMMENTS_GROUP' => "PI_COMMENTS_GROUP", + 'PI_DECRYPTION' => "PI_DECRYPTION", + 'PI_ASSUMPTION' => "PI_ASSUMPTION", + 'PI_DEPRECATED' => "PI_DEPRECATED"); + +my @proto_tree_list; +my @expert_list; +my $protabbrev = ""; + +# Perl trim function to remove whitespace from the start and end of the string +sub trim($) +{ + my $string = shift; + $string =~ s/^\s+//; + $string =~ s/\s+$//; + return $string; +} + +# --------------------------------------------------------------------- +# +# MAIN +# +my $helpFlag = ''; +my $action = 'generate'; +my $encoding = ''; +my $expert = ''; + +my $result = GetOptions( + 'action=s' => \$action, + 'encoding=s' => \$encoding, + 'expert' => \$expert, + 'help|?' => \$helpFlag + ); + +if (!$result || $helpFlag || !$ARGV[0]) { + usage(); +} + +sub usage { + print "\nUsage: $0 [--action=generate|fix-all|find-all] [--encoding=ENC_BIG_ENDIAN|ENC_LITTLE_ENDIAN] FILENAME [...]\n\n"; + print " --action = generate (default)\n"; + print " generate - create a delimited file (FILENAME.proto_tree_input) with\n"; + print " proto_tree_add_text fields in FILENAME(s)\n"; + print " fix-all - Use delimited file (FILENAME.proto_tree_input) to convert\n"; + print " proto_tree_add_text to proto_tree_add_item\n"; + print " Also generates FILENAME.hf and FILENAME.hf_array to be\n"; + print " copy/pasted into the dissector where appropriate\n"; + print " find-all - Output the number of eligible proto_tree_add_text calls\n"; + print " for conversion\n\n"; + print " --expert (Optional) Includes proto_tree_add_text calls with no printf arguments in\n"; + print " the .proto_tree_input file as they could be converted to expert info\n"; + print " (otherwise they are ignored)\n"; + print " Must be called for 'fix-all' if called on 'generate'\n"; + print " --encoding (Optional) Default encoding if one can't be determined\n"; + print " (effective only for generate)\n"; + print " If not specified, an encoding will not be auto-populated\n"; + print " if undetermined\n\n"; + + exit(1); +} + +# +# XXX Outline general algorithm here +# +my $found_total = 0; +my $protabbrev_index; +my $line_number = 0; + +while (my $fileName = $ARGV[0]) { + shift; + my $fileContents = ''; + + die "No such file: \"$fileName\"\n" if (! -e $fileName); + + # delete leading './' + $fileName =~ s{ ^ \. / } {}xo; + + #determine PROTABBREV for dissector based on file name format of (dirs)/packet-PROTABBREV.c + $protabbrev_index = rindex($fileName, "packet-"); + if ($protabbrev_index == -1) { + print "$fileName doesn't fit format of packet-PROTABBREV.c\n"; + next; + } + + $protabbrev = substr($fileName, $protabbrev_index+length("packet-")); + $protabbrev_index = rindex($protabbrev, "."); + if ($protabbrev_index == -1) { + print "$fileName doesn't fit format of packet-PROTABBREV.c\n"; + next; + } + $protabbrev = lc(substr($protabbrev, 0, $protabbrev_index)); + + # Read in the file (ouch, but it's easier that way) + open(FCI, "<", $fileName) || die("Couldn't open $fileName"); + while () { + $fileContents .= $_; + } + close(FCI); + + if ($action eq "generate") { + generate_hfs(\$fileContents, $fileName); + } + + if ($action eq "fix-all") { + # Read in the hf "input" file + $line_number = 0; + my $errors = 0; + open(FCI, "<", $fileName . ".proto_tree_input") || die("Couldn't open $fileName.proto_tree_input"); + while(my $line=){ + my @proto_tree_item = split(/;|\n/, $line); + + $line_number++; + $errors += verify_line(@proto_tree_item); + + push(@proto_tree_list, \@proto_tree_item); + if ($proto_tree_item[1] eq "2") { + push(@expert_list, \@proto_tree_item); + } + } + close(FCI); + + if ($errors > 0) { + print "Aborting conversion.\n"; + exit(-1); + } + + fix_proto_tree_add_text(\$fileContents, $fileName); + + # Write out the hf data + output_hf_array($fileName); + output_hf($fileName); + + # Write out the changed version to a file + open(FCO, ">", $fileName . ".proto_tree_add_text"); + print FCO "$fileContents"; + close(FCO); + } + + if ($action eq "find-all") { + # Find all proto_tree_add_text() statements eligible for conversion + $found_total += find_all(\$fileContents, $fileName); + } + +} # while + +exit $found_total; + +# --------------------------------------------------------------------- +# Sanity check the data in the .proto_tree_input file +sub verify_line { + my( @proto_tree_item) = @_; + my $errors = 0; + + #do some basic error checking of the file + if ($proto_tree_item[0] eq "1") { + if (!($proto_tree_item[3] =~ /^hf_/)) { + print "$line_number: Poorly formed hf_ variable ($proto_tree_item[3])!\n"; + $errors++; + } + + foreach (split(/\|/, $proto_tree_item[7])) { + if (!exists($ENCODINGS{$_})) { + print "$line_number: Encoding value '$_' unknown!\n"; + $errors++; + } + } + } elsif (($proto_tree_item[0] eq "10") || + ($proto_tree_item[0] eq "11") || + ($proto_tree_item[0] eq "12") || + ($proto_tree_item[0] eq "13")) { + #expert info conversions + if (!($proto_tree_item[3] =~ /^ei_/)) { + print "$line_number: Poorly formed ei_ variable ($proto_tree_item[3])!\n"; + $errors++; + } + } elsif ($proto_tree_item[0] ne "0") { + print "Bad conversion value! Aborting conversion.\n"; + $errors++; + } + + if ($proto_tree_item[1] eq "1") { + if (!($proto_tree_item[3] =~ /^hf_/)) { + print "$line_number: Poorly formed hf_ variable ($proto_tree_item[3])!\n"; + $errors++; + } + if (!exists($FIELD_TYPE{$proto_tree_item[9]})) { + print "$line_number: Field type '$proto_tree_item[9]' unknown!\n"; + $errors++; + } + foreach (split(/\|/, $proto_tree_item[11])) { + if ((!exists($DISPLAY_BASE{$_})) && + (!($proto_tree_item[11] =~ /\d+/))) { + print "$line_number: Display base '$proto_tree_item[11]' unknown!\n"; + $errors++; + } + } + if (($proto_tree_item[9] eq "FT_UINT8") || + ($proto_tree_item[9] eq "FT_UINT16") || + ($proto_tree_item[9] eq "FT_UINT24") || + ($proto_tree_item[9] eq "FT_UINT32") || + ($proto_tree_item[9] eq "FT_UINT64") || + ($proto_tree_item[9] eq "FT_INT8") || + ($proto_tree_item[9] eq "FT_INT16") || + ($proto_tree_item[9] eq "FT_INT24") || + ($proto_tree_item[9] eq "FT_INT32") || + ($proto_tree_item[9] eq "FT_INT64")) { + if ($proto_tree_item[11] eq "BASE_NONE") { + print "$line_number: Interger type should not be BASE_NONE!\n"; + $errors++; + } + } + + } elsif ($proto_tree_item[1] eq "2") { + if (!($proto_tree_item[3] =~ /^ei_/)) { + print "$line_number: Poorly formed ei_ variable ($proto_tree_item[3])!\n"; + $errors++; + } + if (!exists($EXPERT_SEVERITY{$proto_tree_item[9]})) { + print "$line_number: Expert severity value '$proto_tree_item[9]' unknown!\n"; + $errors++; + } + if (!exists($EXPERT_GROUPS{$proto_tree_item[7]})) { + print "$line_number: Expert group value '$proto_tree_item[7]' unknown!\n"; + $errors++; + } + + } elsif ($proto_tree_item[1] ne "0") { + print "$line_number: Bad hf/ei variable generation value!\n"; + $errors++; + } + + return $errors; +} + +sub generate_hfs { + my( $fileContentsRef, $fileName) = @_; + my @args; + my $num_items = 0; + my @temp; + my $str_temp; + my $pat; + + if ($expert ne "") { + $pat = qr / + ( + (?:proto_tree_add_text)\s* \( + (([^[\,;])*\,){4,} + [^;]* + \s* \) \s* ; + ) + /xs; + } else { + $pat = qr / + ( + (?:proto_tree_add_text)\s* \( + (([^[\,;])*\,){5,} + [^;]* + \s* \) \s* ; + ) + /xs; + } + + while ($$fileContentsRef =~ / $pat /xgso) { + my @proto_tree_item = (1, 1, "tree", "hf_name", "tvb", "offset", "length", "encoding", + "fieldfullname", "fieldtype", "fieldabbrevname", "BASE_NONE", "NULL", "0x0"); + my $str = "${1}\n"; + $str =~ tr/\t\n\r/ /d; + $str =~ s/ \s+ / /xg; + #print "$fileName: $str\n"; + + @args = split(/,/, $str); + #printf "ARGS(%d): %s\n", scalar @args, join("# ", @args); + $args[0] =~ s/proto_tree_add_text\s*\(\s*//; + $proto_tree_item[2] = $args[0]; #tree + $proto_tree_item[4] = trim($args[1]); #tvb + $proto_tree_item[5] = trim($args[2]); #offset + $proto_tree_item[6] = trim($args[3]); #length + if (scalar @args == 5) { + #remove the "); at the end + $args[4] =~ s/\"\s*\)\s*;$//; + } + + #encoding + if (scalar @args > 5) { + if (($proto_tree_item[6] eq "1") || + ($args[5] =~ /tvb_get_guint8/) || + ($args[5] =~ /tvb_bytes_to_str/) || + ($args[5] =~ /tvb_ether_to_str/)) { + $proto_tree_item[7] = "ENC_NA"; + } elsif ($args[5] =~ /tvb_get_ntoh/) { + $proto_tree_item[7] = "ENC_BIG_ENDIAN"; + } elsif ($args[5] =~ /tvb_get_letoh/) { + $proto_tree_item[7] = "ENC_LITTLE_ENDIAN"; + } elsif (($args[5] =~ /tvb_get_ephemeral_string/) || + ($args[5] =~ /tvb_format_text/)){ + $proto_tree_item[7] = "ENC_NA|ENC_ASCII"; + } elsif ($encoding ne "") { + $proto_tree_item[7] = $encoding; + } + } + + #field full name + if (($expert ne "") || (scalar @args > 5)) { + my @arg_temp = split(/=|:/, $args[4]); + $proto_tree_item[8] = $arg_temp[0]; + } else { + $proto_tree_item[8] = $args[4]; + } + $proto_tree_item[8] =~ s/\"//; + $proto_tree_item[8] = trim($proto_tree_item[8]); + + if ($proto_tree_item[8] eq "%s\"") { + #assume proto_tree_add_text will not be converted + $proto_tree_item[0] = 0; + $proto_tree_item[1] = 0; + $proto_tree_item[3] = sprintf("hf_%s_", $protabbrev); + $proto_tree_item[10] = sprintf("%s.", $protabbrev); + } else { + #hf variable name + $proto_tree_item[3] = sprintf("hf_%s_%s", $protabbrev, lc($proto_tree_item[8])); + $proto_tree_item[3] =~ s/\s+|-|:/_/g; + + #field abbreviated name + $proto_tree_item[10] = sprintf("%s.%s", $protabbrev, lc($proto_tree_item[8])); + $proto_tree_item[10] =~ s/\s+|-|:/_/g; + } + + #VALS + if ($str =~ /val_to_str(_const)?\(\s*tvb_get_[^\(]*\([^\,]*,[^\)]*\)\s*\,\s*([^\,]*)\s*\,\s*([^\)]*)\)/) { + $proto_tree_item[12] = sprintf("VALS(%s)", trim($2)); + } elsif ($str =~ /val_to_str(_const)?\([^\,]*\,([^\,]*)\,/) { + $proto_tree_item[12] = sprintf("VALS(%s)", trim($2)); + } elsif ($str =~ /val_to_str_ext(_const)?\(\s*tvb_get_[^\(]*\([^\,]*,[^\)]*\)\s*\,\s*([^\,]*)\s*\,\s*([^\)]*)\)/) { + $proto_tree_item[12] = trim($2); + } elsif ($str =~ /val_to_str_ext(_const)?\([^\,]*\,([^\,]*)\,/) { + $proto_tree_item[12] = trim($2); + } + + #field type + if (scalar @args > 5) { + if ($args[5] =~ /tvb_get_guint8/) { + if ($args[4] =~ /%[0-9]*[i]/) { + $proto_tree_item[9] = "FT_INT8"; + } else { + $proto_tree_item[9] = "FT_UINT8"; + } + } elsif ($args[5] =~ /tvb_get_(n|"le")tohs/) { + if ($args[4] =~ /%[0-9]*[i]/) { + $proto_tree_item[9] = "FT_INT16"; + } else { + $proto_tree_item[9] = "FT_UINT16"; + } + } elsif ($args[5] =~ /tvb_get_(n|"le")toh24/) { + if ($args[4] =~ /%[0-9]*[i]/) { + $proto_tree_item[9] = "FT_INT24"; + } else { + $proto_tree_item[9] = "FT_UINT24"; + } + } elsif ($args[5] =~ /tvb_get_(n|"le")tohl/) { + if ($args[4] =~ /%[0-9]*[i]/) { + $proto_tree_item[9] = "FT_INT32"; + } else { + $proto_tree_item[9] = "FT_UINT32"; + } + } elsif ($args[5] =~ /tvb_get_(n|"le")toh("40"|"48"|"56"|"64")/) { + if ($args[4] =~ /%[0-9]*[i]/) { + $proto_tree_item[9] = "FT_INT64"; + } else { + $proto_tree_item[9] = "FT_UINT64"; + } + } elsif (($args[5] =~ /tvb_get_(n|"le")tohieee_float/) || + ($args[4] =~ /%[0-9\.]*[fFeEgG]/)) { + $proto_tree_item[9] = "FT_FLOAT"; + } elsif ($args[5] =~ /tvb_get_(n|"le")tohieee_double/) { + $proto_tree_item[9] = "FT_DOUBLE"; + } elsif (($args[5] =~ /tvb_get_ipv4/) || + ($args[5] =~ /tvb_ip_to_str/)) { + $proto_tree_item[9] = "FT_IPv4"; + } elsif (($args[5] =~ /tvb_get_ipv6/) || + ($args[5] =~ /tvb_ip6_to_str/)) { + $proto_tree_item[9] = "FT_IPv6"; + } elsif ($args[5] =~ /tvb_get_(n|"le")tohguid/) { + $proto_tree_item[9] = "FT_GUID"; + } elsif ($args[5] =~ /tvb_get_ephemeral_stringz/) { + $proto_tree_item[9] = "FT_STRINGZ"; + } elsif (($args[5] =~ /tvb_get_ephemeral_string/) || + ($args[5] =~ /tvb_format_text/)){ + $proto_tree_item[9] = "FT_STRING"; + } elsif (($args[5] =~ /tvb_bytes_to_str/)) { + $proto_tree_item[9] = "FT_BYTES"; + } elsif ($args[5] =~ /tvb_ether_to_str/) { + $proto_tree_item[9] = "FT_ETHER"; + } + + #if we still can't determine type, assume a constant length + #value means we have an unsigned value + if ($proto_tree_item[9] eq "fieldtype") { + my $len_str = trim($args[3]); + if ($len_str eq "1") { + $proto_tree_item[9] = "FT_UINT8"; + } elsif ($len_str eq "2") { + $proto_tree_item[9] = "FT_UINT16"; + } elsif ($len_str eq "3") { + $proto_tree_item[9] = "FT_UINT24"; + } elsif ($len_str eq "4") { + $proto_tree_item[9] = "FT_UINT32"; + } elsif ($len_str eq "8") { + $proto_tree_item[9] = "FT_UINT64"; + } + } + } + + #display base + if ($args[4] =~ /%[0-9]*[xX]/) { + $proto_tree_item[11] = "BASE_HEX"; + } elsif ($args[4] =~ /%[0-9]*[uld]/) { + $proto_tree_item[11] = "BASE_DEC"; + } elsif ($args[4] =~ /%[0-9]*o/) { + $proto_tree_item[11] = "BASE_OCT"; + } + if ($str =~ /val_to_str_ext(_const)?\([^\,]*\,([^\,]*)\,/) { + $proto_tree_item[11] .= "|BASE_EXT_STRING"; + } + + if (($proto_tree_item[7] eq "encoding") && ($proto_tree_item[9] eq "FT_BYTES")) { + $proto_tree_item[7] = "ENC_NA"; + } + + push(@proto_tree_list, \@proto_tree_item); + + $num_items += 1; + } + + if ($num_items > 0) { + open(FCO, ">", $fileName . ".proto_tree_input"); + for my $item (@proto_tree_list) { + print FCO join(";", @{$item}), "\n"; + } + close(FCO); + } +} + +# --------------------------------------------------------------------- +# Find all proto_tree_add_text calls and replace them with the data +# found in proto_tree_list +sub fix_proto_tree_add_text { + my( $fileContentsRef, $fileName) = @_; + my $found = 0; + my $pat; + + if ($expert ne "") { + $pat = qr / + ( + (?:proto_tree_add_text)\s* \( + (([^[\,;])*\,){4,} + [^;]* + \s* \) \s* ; + ) + /xs; + } else { + $pat = qr / + ( + (?:proto_tree_add_text)\s* \( + (([^[\,;])*\,){5,} + [^;]* + \s* \) \s* ; + ) + /xs; + } + + $$fileContentsRef =~ s/ $pat /patsub($found, $1)/xges; +} + +# --------------------------------------------------------------------- +# Format proto_tree_add_item or expert info functions with proto_tree_list data +sub patsub { + my $item_str; + if ($proto_tree_list[$_[0]][0] eq "1") { + $item_str = sprintf("proto_tree_add_item(%s, %s, %s, %s, %s, %s);", + $proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3], + $proto_tree_list[$_[0]][4], $proto_tree_list[$_[0]][5], + $proto_tree_list[$_[0]][6], $proto_tree_list[$_[0]][7]); + } elsif ($proto_tree_list[$_[0]][0] eq "10") { + $item_str = sprintf("expert_add_info(pinfo, %s, &%s);", + $proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3]); + } elsif ($proto_tree_list[$_[0]][0] eq "11") { + $item_str = sprintf("expert_add_info_format(pinfo, %s, &%s, \"%s\"", + $proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3], + $proto_tree_list[$_[0]][8]); + if ($proto_tree_list[$_[0]][11] ne "") { + $item_str .= ", $proto_tree_list[$_[0]][11]"; + } + $item_str .= ");"; + } elsif ($proto_tree_list[$_[0]][0] eq "12") { + $item_str = sprintf("proto_tree_add_expert(%s, pinfo, &%s, %s, %s, %s);", + $proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3], + $proto_tree_list[$_[0]][4], $proto_tree_list[$_[0]][5], + $proto_tree_list[$_[0]][6]); + } elsif ($proto_tree_list[$_[0]][0] eq "13") { + $item_str = sprintf("proto_tree_add_expert_format(%s, pinfo, &%s, %s, %s, %s, \"%s\"", + $proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3], + $proto_tree_list[$_[0]][4], $proto_tree_list[$_[0]][5], + $proto_tree_list[$_[0]][6], $proto_tree_list[$_[0]][8]); + if ($proto_tree_list[$_[0]][11] ne "") { + $item_str .= ", $proto_tree_list[$_[0]][11]"; + } + $item_str .= ");"; + } else { + $item_str = $1; + } + + $_[0] += 1; + + return $item_str; +} + +# --------------------------------------------------------------------- +# Output the hf variable declarations. For now, write them to a file. +# XXX - Eventually find the right place to add it to the modified dissector file +sub output_hf { + my( $fileName) = @_; + my %hfs = (); + my %eis = (); + my $index; + my $key; + + open(FCO, ">", $fileName . ".hf"); + + print FCO "/* Generated from convert_proto_tree_add_text.pl */\n"; + + #add hfs to hash table to prevent against (accidental) duplicates + for ($index=0;$index<@proto_tree_list;$index++) { + if ($proto_tree_list[$index][1] eq "1") { + $hfs{$proto_tree_list[$index][3]} = $proto_tree_list[$index][3]; + print FCO "static int $proto_tree_list[$index][3] = -1;\n"; + } elsif ($proto_tree_list[$index][1] eq "2") { + $eis{$proto_tree_list[$index][3]} = $proto_tree_list[$index][3]; + } + } + + if (scalar keys %hfs > 0) { + print FCO "\n\n"; + } + + print FCO "/* Generated from convert_proto_tree_add_text.pl */\n"; + + foreach $key (keys %eis) { + print FCO "static expert_field $key = EI_INIT;\n"; + } + close(FCO); + +} + +# --------------------------------------------------------------------- +# Output the hf array items. For now, write them to a file. +# XXX - Eventually find the right place to add it to the modified dissector file +# (bonus points if formatting of hf array in dissector file is kept) +sub output_hf_array { + my( $fileName) = @_; + my $index; + my %hfs = (); + my %eis = (); + + open(FCO, ">", $fileName . ".hf_array"); + + print FCO " /* Generated from convert_proto_tree_add_text.pl */\n"; + + for ($index=0;$index<@proto_tree_list;$index++) { + if ($proto_tree_list[$index][1] eq "1") { + if (exists($hfs{$proto_tree_list[$index][3]})) { + print "duplicate hf entry '$proto_tree_list[$index][3]' found! Aborting conversion.\n"; + exit(-1); + } + $hfs{$proto_tree_list[$index][3]} = $proto_tree_list[$index][3]; + print FCO " { &$proto_tree_list[$index][3], { \"$proto_tree_list[$index][8]\", \"$proto_tree_list[$index][10]\", "; + print FCO "$proto_tree_list[$index][9], $proto_tree_list[$index][11], $proto_tree_list[$index][12], $proto_tree_list[$index][13], NULL, HFILL }},\r\n"; + } + } + + if ($index > 0) { + print FCO "\n\n"; + } + + print FCO " /* Generated from convert_proto_tree_add_text.pl */\n"; + for ($index=0;$index<@expert_list;$index++) { + if (exists($eis{$expert_list[$index][3]})) { + print "duplicate ei entry '$expert_list[$index][3]' found! Aborting conversion.\n"; + exit(-1); + } + $eis{$expert_list[$index][3]} = $expert_list[$index][3]; + + print FCO " { &$expert_list[$index][3], { \"$expert_list[$index][10]\", $expert_list[$index][7], "; + print FCO "$expert_list[$index][9], \"$expert_list[$index][8]\", EXPFILL }},\r\n"; + } + + close(FCO); +} + +# --------------------------------------------------------------------- +# Find all proto_tree_add_text calls that have parameters passed in them +# and output number found + +sub find_all { + my( $fileContentsRef, $fileName) = @_; + + my $found = 0; + my $tvb_found = 0; + my $pat; + my $tvb_percent; + + if ($expert ne "") { + $pat = qr / + ( + (?:proto_tree_add_text)\s* \( + (([^[\,;])*\,){4,} + [^;]* + \s* \) \s* ; + ) + /xs; + } else { + $pat = qr / + ( + (?:proto_tree_add_text)\s* \( + (([^[\,;])*\,){5,} + [^;]* + \s* \) \s* ; + ) + /xs; + } + + while ($$fileContentsRef =~ / $pat /xgso) { + my $str = "${1}\n"; + my @args = split(/,/, ${1}); + + #cleanup whitespace to show proto_tree_add_text in single line (easier for seeing grep results) + $str =~ tr/\t\n\r/ /d; + $str =~ s/ \s+ / /xg; + #print "$fileName: $str\n"; + + #find all instances where proto_tree_add_text has a tvb_get (or similar) call, because + #convert_proto_tree_add_text.pl has an easier time determining hf_ field values with it + if (scalar @args > 5) { + my $tvb = trim($args[5]); + if ($tvb =~ /^tvb_/) { + $tvb_found += 1; + } + } + + $found += 1; + } + + if ($found > 0) { + if ($tvb_found > 0) { + $tvb_percent = 100*$tvb_found/$found; + + printf "%s: Found %d proto_tree_add_text calls eligible for conversion, %d contain a \"tvb get\" call (%.2f%%).\n", + $fileName, $found, $tvb_found, $tvb_percent; + } else { + print "$fileName: Found $found proto_tree_add_text calls eligible for conversion, 0 \"tvb get\" calls.\n"; + } + } + return $found; +} diff --git a/tools/cppcheck/cppcheck.sh b/tools/cppcheck/cppcheck.sh new file mode 100755 index 0000000..780fbbc --- /dev/null +++ b/tools/cppcheck/cppcheck.sh @@ -0,0 +1,158 @@ +#!/bin/bash + +# +# cppcheck.sh +# Script to run CppCheck Static Analyzer. +# http://cppcheck.sourceforge.net/ +# +# Usage: tools/cppcheck/cppcheck.sh [options] [file] +# Where options can be: +# -a disable suppression list (see $CPPCHECK_DIR/suppressions) +# -c colorize html output +# -h html output (default is gcc) +# -x xml output (default is gcc) +# -j n threads (default: 4) +# -l n check files from the last [n] commits +# -o check modified files +# -v quiet mode +# If argument file is omitted then checking all files in the current directory. +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 2012 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +CPPCHECK=$(type -p cppcheck) +CPPCHECK_DIR=$(dirname "$0") + +if [ -z "$CPPCHECK" ] ; then + echo "cppcheck not found" + exit 1 +fi + +THREADS=4 +LAST_COMMITS=0 +TARGET="" +QUIET="--quiet" +SUPPRESSIONS="--suppressions-list=$CPPCHECK_DIR/suppressions" +INCLUDES="--includes-file=$CPPCHECK_DIR/includes" +MODE="gcc" +COLORIZE_HTML_MODE="no" +OPEN_FILES="no" +XML_ARG="" + +colorize_worker() +{ + # always uses stdin/stdout + [ "$COLORIZE_HTML_MODE" = "yes" ] && \ + sed -e '/warning<\/td>/s/^//' \ + -e '/error<\/td>/s/^//' \ + || sed '' +} + +# switcher +colorize() +{ + [ -z "$1" ] && colorize_worker || colorize_worker <<< "$1" +} + +exit_cleanup() { + if [ "$MODE" = "html" ]; then + echo "" + fi + if [ -n "$1" ] ; then + exit "$1" + fi +} + +while getopts "achxj:l:ov" OPTCHAR ; do + case $OPTCHAR in + a) SUPPRESSIONS=" " ;; + c) COLORIZE_HTML_MODE="yes" ;; + h) MODE="html" ;; + x) MODE="xml" ;; + j) THREADS="$OPTARG" ;; + l) LAST_COMMITS="$OPTARG" ;; + o) OPEN_FILES="yes" ;; + v) QUIET=" " ;; + *) printf "Unknown option %s" "$OPTCHAR" + esac +done +shift $(( OPTIND - 1 )) + +if [ "$MODE" = "gcc" ]; then + TEMPLATE="gcc" +elif [ "$MODE" = "html" ]; then + echo "" + echo "" + echo "" + TEMPLATE="" +fi + +# Ensure that the COLORIZE_HTML_MODE option is used only with HTML-mode and not with GCC-mode. +[ "$MODE" = "html" ] && [ "$COLORIZE_HTML_MODE" = "yes" ] || COLORIZE_HTML_MODE="no" + +if [ "$LAST_COMMITS" -gt 0 ] ; then + TARGET=$( git diff --name-only --diff-filter=d HEAD~"$LAST_COMMITS".. | grep -E '\.(c|cpp)$' ) + if [ -z "${TARGET//[[:space:]]/}" ] ; then + >&2 echo "No C or C++ files found in the last $LAST_COMMITS commit(s)." + exit_cleanup 0 + fi +fi + +if [ "$OPEN_FILES" = "yes" ] ; then + TARGET=$(git diff --name-only | grep -E '\.(c|cpp)$' ) + TARGET="$TARGET $(git diff --staged --name-only | grep -E '\.(c|cpp)$' )" + if [ -z "${TARGET//[[:space:]]/}" ] ; then + >&2 echo "No C or C++ files are currently opened (modified or added for next commit)." + exit_cleanup 0 + fi +fi + +if [ $# -gt 0 ]; then + TARGET="$TARGET $*" +fi + +if [ -z "$TARGET" ] ; then + TARGET=. +fi + +if [ "$MODE" = "xml" ]; then + XML_ARG="--xml" +fi + +# Use a little-documented feature of the shell to pass SIGINTs only to the +# child process (cppcheck in this case). That way the final 'echo' still +# runs and we aren't left with broken HTML. +trap : INT + +if [ "$QUIET" = " " ]; then + echo "Examining:" + echo $TARGET + echo +fi + +# shellcheck disable=SC2086 +$CPPCHECK --force --enable=style $QUIET \ + $SUPPRESSIONS $INCLUDES \ + -i doc/ \ + -i epan/dissectors/asn1/ \ + --std=c11 --template=$TEMPLATE \ + -j $THREADS $TARGET $XML_ARG 2>&1 | colorize + +exit_cleanup + +# +# Editor modelines - https://www.wireshark.org/tools/modelines.html +# +# Local variables: +# c-basic-offset: 4 +# tab-width: 8 +# indent-tabs-mode: nil +# End: +# +# vi: set shiftwidth=4 tabstop=8 expandtab: +# :indentSize=4:tabSize=8:noTabs=true: +# diff --git a/tools/cppcheck/includes b/tools/cppcheck/includes new file mode 100644 index 0000000..896651e --- /dev/null +++ b/tools/cppcheck/includes @@ -0,0 +1,7 @@ +./epan/ +./epan/dissectors/ +./epan/wslua/ +./tools/lemon/ +./ui/ +./wiretap/ +. diff --git a/tools/cppcheck/suppressions b/tools/cppcheck/suppressions new file mode 100644 index 0000000..734cd5a --- /dev/null +++ b/tools/cppcheck/suppressions @@ -0,0 +1,7 @@ +variableScope +duplicateExpression +invalidscanf +noConstructor +internalAstError +syntaxError + diff --git a/tools/debian-nightly-package.sh b/tools/debian-nightly-package.sh new file mode 100755 index 0000000..c07185a --- /dev/null +++ b/tools/debian-nightly-package.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +set -e + +if test -z $1; then + echo "Usage:" + echo " $0 " + echo " e.g: $0 xenial" + exit 1 +fi + +DIST=$1 +VERSION=$(git describe --tags | sed 's/v//;s/-/~/g;s/rc/~rc/') +ln --symbolic --no-dereference --force packaging/debian ./debian +rm packaging/debian/changelog || true +EDITOR=touch dch -p --package wireshark --create --force-distribution -v${VERSION}~${DIST}1 -D $DIST +sed -i 's/\* Initial release.*/* Nightly build for '${DIST^}'/' packaging/debian/changelog +dpkg-buildpackage -S -d diff --git a/tools/debian-setup.sh b/tools/debian-setup.sh new file mode 100755 index 0000000..9b68879 --- /dev/null +++ b/tools/debian-setup.sh @@ -0,0 +1,300 @@ +#!/bin/bash +# Setup development environment on Debian and derivatives such as Ubuntu +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# We drag in tools that might not be needed by all users; it's easier +# that way. +# + +set -e -u -o pipefail + +function print_usage() { + printf "\\nUtility to setup a debian-based system for Wireshark Development.\\n" + printf "The basic usage installs the needed software\\n\\n" + printf "Usage: %s [--install-optional] [--install-deb-deps] [...other options...]\\n" "$0" + printf "\\t--install-optional: install optional software as well\\n" + printf "\\t--install-deb-deps: install packages required to build the .deb file\\n" + printf "\\t--install-test-deps: install packages required to run all tests\\n" + printf "\\t--install-qt5-deps: force installation of packages required to use Qt5\\n" + printf "\\t--install-qt6-deps: force installation of packages required to use Qt6\\n" + printf "\\t--install-all: install everything\\n" + printf "\\t[other]: other options are passed as-is to apt\\n" +} + +ADDITIONAL=0 +DEBDEPS=0 +TESTDEPS=0 +ADD_QT5=0 +ADD_QT6=0 +HAVE_ADD_QT=0 +OPTIONS= +for arg; do + case $arg in + --help) + print_usage + exit 0 + ;; + --install-optional) + ADDITIONAL=1 + ;; + --install-deb-deps) + DEBDEPS=1 + ;; + --install-test-deps) + TESTDEPS=1 + ;; + --install-qt5-deps) + ADD_QT5=1 + ;; + --install-qt6-deps) + ADD_QT6=1 + ;; + --install-all) + ADDITIONAL=1 + DEBDEPS=1 + TESTDEPS=1 + ADD_QT5=1 + ADD_QT6=1 + HAVE_ADD_QT=1 + ;; + *) + OPTIONS="$OPTIONS $arg" + ;; + esac +done + +# Check if the user is root +if [ "$(id -u)" -ne 0 ] +then + echo "You must be root." + exit 1 +fi + +BASIC_LIST="gcc \ + g++\ + libglib2.0-dev \ + libc-ares-dev \ + libpcap-dev \ + libpcre2-dev \ + flex \ + make \ + python3 \ + libgcrypt-dev \ + libspeexdsp-dev" + +QT5_LIST="qttools5-dev \ + qttools5-dev-tools \ + libqt5svg5-dev \ + qtmultimedia5-dev \ + qtbase5-dev \ + qtchooser \ + qt5-qmake \ + qtbase5-dev-tools" + +QT6_LIST="qt6-base-dev \ + qt6-multimedia-dev \ + qt6-tools-dev \ + qt6-tools-dev-tools \ + qt6-l10n-tools \ + libqt6core5compat6-dev \ + freeglut3-dev \ + libvulkan-dev \ + libxkbcommon-dev" + +if [ $ADD_QT5 -ne 0 ] +then + BASIC_LIST="$BASIC_LIST $QT5_LIST" + HAVE_ADD_QT=1 +fi + +if [ $ADD_QT6 -ne 0 ] +then + BASIC_LIST="$BASIC_LIST $QT6_LIST" + HAVE_ADD_QT=1 +fi + +if [ $HAVE_ADD_QT -eq 0 ] +then + # Try to select Qt version from distro + test -e /etc/os-release && os_release='/etc/os-release' || os_release='/usr/lib/os-release' + # shellcheck disable=SC1090 + . "${os_release}" + + # Ubuntu 22.04 (jammy) or later + MAJOR=$(echo "$VERSION_ID" | cut -f1 -d.) + if [ "${ID:-linux}" = "ubuntu" ] && [ "${MAJOR:-0}" -ge "22" ]; then + echo "Installing Qt6." + BASIC_LIST="$BASIC_LIST $QT6_LIST" + else + echo "Installing Qt5." + BASIC_LIST="$BASIC_LIST $QT5_LIST" + fi +fi + +ADDITIONAL_LIST="libnl-3-dev \ + libkrb5-dev \ + libsmi2-dev \ + libsbc-dev \ + liblua5.2-dev \ + libnl-cli-3-dev \ + libparse-yapp-perl \ + libcap-dev \ + liblz4-dev \ + libsnappy-dev \ + libzstd-dev \ + libspandsp-dev \ + libxml2-dev \ + libminizip-dev \ + git \ + ninja-build \ + perl \ + xsltproc \ + ccache \ + doxygen" + +# Uncomment to add PNG compression utilities used by compress-pngs: +# ADDITIONAL_LIST="$ADDITIONAL_LIST \ +# advancecomp \ +# optipng \ +# pngcrush" + +DEBDEPS_LIST="debhelper \ + dh-python \ + asciidoctor \ + docbook-xml \ + docbook-xsl \ + libxml2-utils \ + lintian \ + lsb-release \ + po-debconf \ + python3-ply \ + quilt" + +TESTDEPS_LIST="python3-pytest \ + python3-pytest-xdist" + +# Adds package $2 to list variable $1 if the package is found. +# If $3 is given, then this version requirement must be satisfied. +add_package() { + local list="$1" pkgname="$2" versionreq="${3:-}" version + + version=$(apt-cache show "$pkgname" 2>/dev/null | + awk '/^Version:/{ print $2; exit}') + # fail if the package is not known + if [ -z "$version" ]; then + return 1 + elif [ -n "$versionreq" ]; then + # Require minimum version or fail. + # shellcheck disable=SC2086 + dpkg --compare-versions $version $versionreq || return 1 + fi + + # package is found, append it to list + eval "${list}=\"\${${list}} \${pkgname}\"" +} + +# apt-get update must be called before calling add_package +# otherwise available packages appear as unavailable +apt-get update || exit 2 + +# cmake3 3.5.1: Ubuntu 14.04 +# cmake >= 3.5: Debian >= jessie-backports, Ubuntu >= 16.04 +add_package BASIC_LIST cmake3 || +BASIC_LIST="$BASIC_LIST cmake" + +# Debian >= wheezy-backports, Ubuntu >= 16.04 +add_package ADDITIONAL_LIST libnghttp2-dev || +echo "libnghttp2-dev is unavailable" >&2 + +# Debian >= bookworm, Ubuntu >= 22.04 +add_package ADDITIONAL_LIST libnghttp3-dev || +echo "libnghttp3-dev is unavailable" >&2 + +# libssh-gcrypt-dev: Debian >= jessie, Ubuntu >= 16.04 +# libssh-dev (>= 0.6): Debian >= jessie, Ubuntu >= 14.04 +add_package ADDITIONAL_LIST libssh-gcrypt-dev || +add_package ADDITIONAL_LIST libssh-dev || +echo "libssh-gcrypt-dev and libssh-dev are unavailable" >&2 + +# libgnutls28-dev: Debian >= wheezy-backports, Ubuntu >= 12.04 +add_package ADDITIONAL_LIST libgnutls28-dev || +echo "libgnutls28-dev is unavailable" >&2 + +# Debian >= jessie-backports, Ubuntu >= 16.04 +add_package ADDITIONAL_LIST libmaxminddb-dev || +echo "libmaxminddb-dev is unavailable" >&2 + +# Debian >= stretch-backports, Ubuntu >= 16.04 +add_package ADDITIONAL_LIST libbrotli-dev || +echo "libbrotli-dev is unavailable" >&2 + +# libsystemd-journal-dev: Ubuntu 14.04 +# libsystemd-dev: Ubuntu >= 16.04 +add_package ADDITIONAL_LIST libsystemd-dev || +add_package ADDITIONAL_LIST libsystemd-journal-dev || +echo "libsystemd-dev is unavailable" + +# ilbc library from http://www.deb-multimedia.org +add_package ADDITIONAL_LIST libilbc-dev || +echo "libilbc-dev is unavailable" + +# opus library libopus-dev +add_package ADDITIONAL_LIST libopus-dev || + echo "libopus-dev is unavailable" + +# bcg729 library libbcg729-dev +add_package ADDITIONAL_LIST libbcg729-dev || + echo "libbcg729-dev is unavailable" + +# softhsm2 2.0.0: Ubuntu 16.04 +# softhsm2 2.2.0: Debian >= jessie-backports, Ubuntu 18.04 +# softhsm2 >= 2.4.0: Debian >= buster, Ubuntu >= 18.10 +if ! add_package TESTDEPS_LIST softhsm2 '>= 2.3.0'; then + if add_package TESTDEPS_LIST softhsm2; then + # If SoftHSM 2.3.0 is unavailble, install p11tool. + TESTDEPS_LIST="$TESTDEPS_LIST gnutls-bin" + else + echo "softhsm2 is unavailable" >&2 + fi +fi + +ACTUAL_LIST=$BASIC_LIST + +# Now arrange for optional support libraries +if [ $ADDITIONAL -ne 0 ] +then + ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST" +fi + +if [ $DEBDEPS -ne 0 ] +then + ACTUAL_LIST="$ACTUAL_LIST $DEBDEPS_LIST" +fi + +if [ $TESTDEPS -ne 0 ] +then + ACTUAL_LIST="$ACTUAL_LIST $TESTDEPS_LIST" +fi + +# shellcheck disable=SC2086 +apt-get install $ACTUAL_LIST $OPTIONS || exit 2 + +if [ $ADDITIONAL -eq 0 ] +then + printf "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n" +fi + +if [ $DEBDEPS -eq 0 ] +then + printf "\n*** Debian packages build deps not installed. Rerun with --install-deb-deps to have them.\n" +fi + +if [ $TESTDEPS -eq 0 ] +then + printf "\n*** Test deps not installed. Rerun with --install-test-deps to have them.\n" +fi diff --git a/tools/debug-alloc.env b/tools/debug-alloc.env new file mode 100644 index 0000000..d6d454c --- /dev/null +++ b/tools/debug-alloc.env @@ -0,0 +1,33 @@ +############################################################################## +### Set up environment variables for testing ### +############################################################################## + +# Use the Wmem strict allocator which does canaries and scrubbing etc. +export WIRESHARK_DEBUG_WMEM_OVERRIDE=strict +# Abort if a dissector adds too many items to the tree +export WIRESHARK_ABORT_ON_TOO_MANY_ITEMS= + +# Turn on GLib memory debugging (since 2.13) +export G_SLICE=debug-blocks + +# Cause glibc (Linux) to abort() if some memory errors are found +export MALLOC_CHECK_=3 + +# Cause FreeBSD (and other BSDs) to abort() on allocator warnings and +# initialize allocated memory (to 0xa5) and freed memory (to 0x5a). see: +# https://www.freebsd.org/cgi/man.cgi?query=malloc&apropos=0&sektion=0&manpath=FreeBSD+8.2-RELEASE&format=html +export MALLOC_OPTIONS=AJ + +# macOS options; see https://developer.apple.com/library/archive/documentation/Performance/Conceptual/ManagingMemory/Articles/MallocDebug.html +# Initialize allocated memory to 0xAA and freed memory to 0x55 +export MallocPreScribble=1 +export MallocScribble=1 +# Add guard pages before and after large allocations +export MallocGuardEdges=1 +# Call abort() if heap corruption is detected. Heap is checked every 1000 +# allocations (may need to be tuned!) +export MallocCheckHeapStart=1000 +export MallocCheckHeapEach=1000 +export MallocCheckHeapAbort=1 +# Call abort() if an illegal free() call is made +export MallocBadFreeAbort=1 diff --git a/tools/delete_includes.py b/tools/delete_includes.py new file mode 100755 index 0000000..cc804e0 --- /dev/null +++ b/tools/delete_includes.py @@ -0,0 +1,427 @@ +#!/usr/bin/python3 + +# Martin Mathieson +# Look for and removes unnecessary includes in .cpp or .c files +# Run from wireshark source folder as e.g., +# ./tools/delete_includes.py --build-folder ~/wireshark-build/ --folder epan/dissectors/ +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +import subprocess +import os +import sys +import shutil +import argparse +import signal +import re +from pathlib import Path + + +# Try to exit soon after Ctrl-C is pressed. +should_exit = False + +def signal_handler(sig, frame): + global should_exit + should_exit = True + print('You pressed Ctrl+C - exiting') + +signal.signal(signal.SIGINT, signal_handler) + +# For text colouring/highlighting. +class bcolors: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKGREEN = '\033[92m' + ADDED = '\033[45m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + + + +# command-line args +# +# Controls which dissector files should be checked. If no args given, will just +# scan whole epan/dissectors folder. +parser = argparse.ArgumentParser(description='Check calls in dissectors') +# required +parser.add_argument('--build-folder', action='store', required=True, + help='specify individual dissector file to test') +parser.add_argument('--file', action='append', + help='specify individual dissector file to test') +parser.add_argument('--folder', action='store', default=os.path.join('epan', 'dissectors'), + help='specify folder to test, relative to current/wireshark folder') +parser.add_argument('--commits', action='store', + help='last N commits to check') +parser.add_argument('--open', action='store_true', + help='check open files') +parser.add_argument('--first-file', action='store', + help='first file in folder to test') +parser.add_argument('--last-file', action='store', + help='last file in folder to test') +args = parser.parse_args() + + +test_folder = os.path.join(os.getcwd(), args.folder) + + +# Usually only building one module, so no -j benefit? +make_command = ['cmake', '--build', args.build_folder] +if sys.platform.startswith('win'): + make_command += ['--config', 'RelWithDebInfo'] + + + +# A list of header files that it is not safe to uninclude, as doing so +# has been seen to cause link failures against implemented functions... +# TODO: some of these could probably be removed on more permissive platforms. +includes_to_keep = { + 'config.h', + 'epan/packet.h', + 'stdlib.h', + 'math.h', + 'errno.h', + 'string.h', + 'prefs.h', + # These are probably mostly redundant in that they are now covered by the check + # for 'self-includes'... + 'x11-keysym.h', + 'packet-atm.h', + 'packet-atalk.h', + 'packet-ppp.h', + 'packet-scsi-mmc.h', + 'packet-tls.h' +} + + +# Build stats. +class BuildStats: + def __init__(self): + self.files_examined = 0 + self.includes_tested = 0 + self.includes_deleted = 0 + self.files_not_built_list = [] + self.generated_files_ignored = [] + self.includes_to_keep_kept = 0 + + def showSummary(self): + print('\n\n') + print('Summary') + print('=========') + print('files examined: %d' % self.files_examined) + print('includes tested: %d' % self.includes_tested) + print('includes deleted: %d' % self.includes_deleted) + print('files not built: %d' % len(self.files_not_built_list)) + for abandoned_file in self.files_not_built_list: + print(' %s' % abandoned_file) + print('generated files not tested: %d' % len(self.generated_files_ignored)) + for generated_file in self.generated_files_ignored: + print(' %s' % generated_file) + print('includes kept as not safe to remove: %d' % self.includes_to_keep_kept) + +stats = BuildStats() + + +# We want to confirm that this file is actually built as part of the build. +# To do this, add some nonsense to the front of the file and confirm that the +# build then fails. If it doesn't, won't want to remove #includes from that file! +def test_file_is_built(filename): + print('test_file_is_built(', filename, ')') + temp_filename = filename + '.tmp' + + f_read = open(filename, 'r') + write_filename = filename + '.new' + f_write = open(write_filename, 'w') + # Write the file with nonsense at start. + f_write.write('NO WAY THIS FILE BUILDS!!!!!') + # Copy remaining lines as-is. + for line in f_read: + f_write.write(line) + f_read.close() + f_write.close() + # Backup file, and do this build with the one we wrote. + shutil.copy(filename, temp_filename) + shutil.copy(write_filename, filename) + + # Try the build. + result = subprocess.call(make_command) + # Restore proper file & delete temp files + shutil.copy(temp_filename, filename) + os.remove(temp_filename) + os.remove(write_filename) + + if result == 0: + # Build succeeded so this file wasn't in it + return False + else: + # Build failed so this file *is* part of it + return True + + +# Function to test removal of each #include from a file in turn. +# At the end, only those that appear to be needed will be left. +def test_file(filename): + global stats + + print('\n------------------------------') + print(bcolors.OKBLUE, bcolors.BOLD, 'Testing', filename, bcolors.ENDC) + + temp_filename = filename + '.tmp' + + # Test if file seems to be part of the build. + is_built = test_file_is_built(filename) + if not is_built: + print(bcolors.WARNING, '***** File not used in build, so ignore!!!!', bcolors.ENDC) + # TODO: should os.path.join with root before adding? + stats.files_not_built_list.append(filename) + return + else: + print('This file is part of the build') + + # OK, we are going to test removing includes from this file. + tested_line_number = 0 + + # Don't want to delete 'self-includes', so prepare filename. + module_name = Path(filename).stem + extension = Path(filename).suffix + + module_header = module_name + '.h' + + # Loop around, finding all possible include lines to comment out + while (True): + if should_exit: + exit(1) + + have_deleted_line = False + result = 0 + + # Open read & write files + f_read = open(filename, 'r') + write_filename = filename + '.new' + f_write = open(write_filename, 'w') + + # Walk the file again looking for another place to comment out an include + this_line_number = 1 + hash_if_level = 0 + + for line in f_read: + this_line_deleted = False + + # Maintain view of how many #if or #ifdefs we are in. + # Don't want to remove any includes that may not be active in this build. + if line.startswith('#if'): + hash_if_level = hash_if_level + 1 + + if line.startswith('#endif'): + if hash_if_level > 1: + hash_if_level = hash_if_level - 1 + + # Consider deleting this line have haven't already reached. + if (not have_deleted_line and (tested_line_number < this_line_number)): + + # Test line for starting with #include, and eligible for deletion. + if line.startswith('#include ') and hash_if_level == 0 and line.find(module_header) == -1: + # Check that this isn't a header file that known unsafe to uninclude. + allowed_to_delete = True + for entry in includes_to_keep: + if line.find(entry) != -1: + allowed_to_delete = False + stats.includes_to_keep_kept += 1 + continue + + if allowed_to_delete: + # OK, actually doing it. + have_deleted_line = True + this_line_deleted = True + tested_line_number = this_line_number + + # Write line to output file, unless this very one was deleted. + if not this_line_deleted: + f_write.write(line) + this_line_number = this_line_number + 1 + + # Close both files. + f_read.close() + f_write.close() + + # If we commented out a line, try to build file without it. + if (have_deleted_line): + # Test a build. 0 means success, others are failures. + shutil.copy(filename, temp_filename) + shutil.copy(write_filename, filename) + + # Try build + result = subprocess.call(make_command) + if result == 0: + print(bcolors.OKGREEN +bcolors.BOLD + 'Good build' + bcolors.ENDC) + # Line was eliminated so decrement line counter + tested_line_number = tested_line_number - 1 + # Inc successes counter + stats.includes_deleted += 1 + # Good - promote this version by leaving it here! + + # Occasionally fails so delete this file each time. + # TODO: this is very particular to dissector target... + if sys.argv[1] == 'dissectors': + os.remove(os.path.join(args.build_folder, 'vc100.pdb')) + else: + print(bcolors.FAIL +bcolors.BOLD + 'Bad build' + bcolors.ENDC) + # Never mind, go back to previous building version + shutil.copy(temp_filename, filename) + + # Inc counter of tried + stats.includes_tested += 1 + + else: + # Reached the end of the file without making changes, so nothing doing. + # Delete temporary files + if os.path.isfile(temp_filename): + os.remove(temp_filename) + if os.path.isfile(write_filename): + os.remove(write_filename) + return + +# Test for whether a the given file is under source control +def under_version_control(filename): + # TODO: git command to see if under version control. Check retcode of 'git log ' ? + return True + +# Test for whether the given file was automatically generated. +def generated_file(filename): + # Special known case. + if filename == 'register.c': + return True + + # Open file + f_read = open(filename, 'r') + lines_tested = 0 + for line in f_read: + # The comment to say that its generated is near the top, so give up once + # get a few lines down. + if lines_tested > 10: + f_read.close() + return False + if (line.find('Generated automatically') != -1 or + line.find('Generated Automatically') != -1 or + line.find('Autogenerated from') != -1 or + line.find('is autogenerated') != -1 or + line.find('automatically generated by Pidl') != -1 or + line.find('Created by: The Qt Meta Object Compiler') != -1 or + line.find('This file was generated') != -1 or + line.find('This filter was automatically generated') != -1 or + line.find('This file is auto generated, do not edit!') != -1): + + f_read.close() + return True + lines_tested = lines_tested + 1 + + # OK, looks like a hand-written file! + f_read.close() + return False + +def isBuildableFile(filename): + return filename.endswith('.c') or filename.endswith('.cpp') + + +def findFilesInFolder(folder, recursive=False): + dissector_files = [] + + if recursive: + for root, subfolders, files in os.walk(folder): + for f in files: + if should_exit: + return + f = os.path.join(root, f) + dissector_files.append(f) + else: + for f in sorted(os.listdir(folder)): + if should_exit: + return + filename = os.path.join(folder, f) + dissector_files.append(filename) + + return [x for x in filter(isBuildableFile, dissector_files)] + + +###################################################################################### +# MAIN PROGRAM STARTS HERE +###################################################################################### + +# Work out which files we want to look at. +files = [] +if args.file: + # Add specified file(s) + for f in args.file: + if not os.path.isfile(f): + print('Chosen file', f, 'does not exist.') + exit(1) + else: + files.append(f) +elif args.folder: + # Add all files from a given folder. + folder = args.folder + if not os.path.isdir(folder): + print('Folder', folder, 'not found!') + exit(1) + # Find files from folder. + print('Looking for files in', folder) + files = findFilesInFolder(folder, recursive=False) + + +# If first-file/last-file are given, will need to trim files accordingly +if args.first_file: + idx = files.index(args.first_file) + if idx == -1: + print('first-file entry', args.first_file, 'not in list of files to be checked') + exit(1) + else: + files = files[idx:] + +if args.last_file: + idx = files.index(args.last_file) + if idx == -1: + print('last-file entry', args.last_file, 'not in list of files to be checked') + exit(1) + else: + files = files[:idx+1] + + +# Confirm that the build is currently passing, if not give up now. +print(bcolors.OKBLUE,bcolors.BOLD, + 'Doing an initial build to check we have a stable base.', + bcolors.ENDC) +result = subprocess.call(make_command) +if result != 0: + print(bcolors.FAIL, bcolors.BOLD, 'Initial build failed - give up now!!!!', bcolors.ENDC) + exit (-1) + + + +# Test each file. +for filename in files: + + # Want to filter out generated files that are not checked in. + if not generated_file(filename) and under_version_control(filename): + # OK, try this file + test_file(filename) + + # Inc counter + stats.files_examined += 1 + else: + if generated_file(filename): + reason = 'generated file...' + if not under_version_control(filename): + reason = 'not under source control' + print('Ignoring %s: %s' % (filename, reason)) + + + +# Show summary stats of run +stats.showSummary() diff --git a/tools/detect_bad_alloc_patterns.py b/tools/detect_bad_alloc_patterns.py new file mode 100644 index 0000000..a89ceb6 --- /dev/null +++ b/tools/detect_bad_alloc_patterns.py @@ -0,0 +1,120 @@ +""" +Detect and replace instances of g_malloc() and wmem_alloc() with +g_new() wmem_new(), to improve the readability of Wireshark's code. + +Also detect and replace instances of +g_malloc(sizeof(struct myobj) * foo) +with: +g_new(struct myobj, foo) +to better prevent integer overflows + +SPDX-License-Identifier: MIT +""" + +import os +import re +import sys + +print_replacement_info = True + +patterns = [ +# Replace (myobj *)g_malloc(sizeof(myobj)) with g_new(myobj, 1) +# Replace (struct myobj *)g_malloc(sizeof(struct myobj)) with g_new(struct myobj, 1) +(re.compile(r'\(\s*([struct]{0,6}\s*[^\s\*]+)\s*\*\s*\)\s*g_malloc(0?)\s*\(\s*sizeof\s*\(\s*\1\s*\)\s*\)'), r'g_new\2(\1, 1)'), + +# Replace (myobj *)g_malloc(sizeof(myobj) * foo) with g_new(myobj, foo) +# Replace (struct myobj *)g_malloc(sizeof(struct myobj) * foo) with g_new(struct myobj, foo) +(re.compile(r'\(\s*([struct]{0,6}\s*[^\s\*]+)\s*\*\s*\)\s*g_malloc(0?)\s*\(\s*sizeof\s*\(\s*\1\s*\)\s*\*\s*([^\s]+)\s*\)'), r'g_new\2(\1, \3)'), + +# Replace (myobj *)g_malloc(foo * sizeof(myobj)) with g_new(myobj, foo) +# Replace (struct myobj *)g_malloc(foo * sizeof(struct myobj)) with g_new(struct myobj, foo) +(re.compile(r'\(\s*([struct]{0,6}\s*[^\s\*]+)\s*\*\s*\)\s*g_malloc(0?)\s*\(\s*([^\s]+)\s*\*\s*sizeof\s*\(\s*\1\s*\)\s*\)'), r'g_new\2(\1, \3)'), + +# Replace (myobj *)wmem_alloc(wmem_file_scope(), sizeof(myobj)) with wmem_new(wmem_file_scope(), myobj) +# Replace (struct myobj *)wmem_alloc(wmem_file_scope(), sizeof(struct myobj)) with wmem_new(wmem_file_scope(), struct myobj) +(re.compile(r'\(\s*([struct]{0,6}\s*[^\s\*]+)\s*\*\s*\)\s*wmem_alloc(0?)\s*\(\s*([_a-z\(\)->]+),\s*sizeof\s*\(\s*\1\s*\)\s*\)'), r'wmem_new\2(\3, \1)'), +] + +def replace_file(fpath): + with open(fpath, 'r') as fh: + fdata_orig = fh.read() + fdata = fdata_orig + for pattern, replacewith in patterns: + fdata_out = pattern.sub(replacewith, fdata) + if print_replacement_info and fdata != fdata_out: + for match in re.finditer(pattern, fdata): + replacement = re.sub(pattern, replacewith, match.group(0)) + print("Bad malloc pattern in %s: Replace '%s' with '%s'" % (fpath, match.group(0), replacement)) + fdata = fdata_out + if fdata_out != fdata_orig: + with open(fpath, 'w') as fh: + fh.write(fdata_out) + return fdata_out + +def run_specific_files(fpaths): + for fpath in fpaths: + if not (fpath.endswith('.c') or fpath.endswith('.cpp')): + continue + replace_file(fpath) + +def run_recursive(root_dir): + for root, dirs, files in os.walk(root_dir): + fpaths = [] + for fname in files: + fpath = os.path.join(root, fname) + fpaths.append(fpath) + run_specific_files(fpaths) + +def test_replacements(): + test_string = """\ +(if_info_t*) g_malloc0(sizeof(if_info_t)) +(oui_info_t *)g_malloc(sizeof (oui_info_t)) +(guint8 *)g_malloc(16 * sizeof(guint8)) +(guint32 *)g_malloc(sizeof(guint32)*2) +(struct imf_field *)g_malloc (sizeof (struct imf_field)) +(rtspstat_t *)g_malloc( sizeof(rtspstat_t) ) +(proto_data_t *)wmem_alloc(scope, sizeof(proto_data_t)) +(giop_sub_handle_t *)wmem_alloc(wmem_epan_scope(), sizeof (giop_sub_handle_t)) +(mtp3_addr_pc_t *)wmem_alloc0(pinfo->pool, sizeof(mtp3_addr_pc_t)) +(dcerpc_bind_value *)wmem_alloc(wmem_file_scope(), sizeof (dcerpc_bind_value)) +(dcerpc_matched_key *)wmem_alloc(wmem_file_scope(), sizeof (dcerpc_matched_key)); +(struct smtp_session_state *)wmem_alloc0(wmem_file_scope(), sizeof(struct smtp_session_state)) +(struct batman_packet_v5 *)wmem_alloc(pinfo->pool, sizeof(struct batman_packet_v5)) +(struct knx_keyring_mca_keys*) wmem_alloc( wmem_epan_scope(), sizeof( struct knx_keyring_mca_keys ) ) +""" + expected_output = """\ +g_new0(if_info_t, 1) +g_new(oui_info_t, 1) +g_new(guint8, 16) +g_new(guint32, 2) +g_new(struct imf_field, 1) +g_new(rtspstat_t, 1) +wmem_new(scope, proto_data_t) +wmem_new(wmem_epan_scope(), giop_sub_handle_t) +wmem_new0(pinfo->pool, mtp3_addr_pc_t) +wmem_new(wmem_file_scope(), dcerpc_bind_value) +wmem_new(wmem_file_scope(), dcerpc_matched_key); +wmem_new0(wmem_file_scope(), struct smtp_session_state) +wmem_new(pinfo->pool, struct batman_packet_v5) +wmem_new(wmem_epan_scope(), struct knx_keyring_mca_keys) +""" + output = test_string + for pattern, replacewith in patterns: + output = pattern.sub(replacewith, output) + assert(output == expected_output) + +def main(): + test_replacements() + if len(sys.argv) == 2: + root_dir = sys.argv[1] + run_recursive(root_dir) + else: + fpaths = [] + for line in sys.stdin: + line = line.strip() + if line: + fpaths.append(line) + run_specific_files(fpaths) + +if __name__ == "__main__": + main() diff --git a/tools/eti2wireshark.py b/tools/eti2wireshark.py new file mode 100755 index 0000000..98fb291 --- /dev/null +++ b/tools/eti2wireshark.py @@ -0,0 +1,1166 @@ +#!/usr/bin/env python3 + +# Generate Wireshark Dissectors for eletronic trading/market data +# protocols such as ETI/EOBI. +# +# Targets Wireshark 3.5 or later. +# +# SPDX-FileCopyrightText: © 2021 Georg Sauthoff +# SPDX-License-Identifier: GPL-2.0-or-later + + +import argparse +import itertools +import re +import sys +import xml.etree.ElementTree as ET + + +# inlined from upstream's etimodel.py + +import itertools + +def get_max_sizes(st, dt): + h = {} + for name, e in dt.items(): + v = e.get('size', '0') + h[name] = int(v) + for name, e in itertools.chain((i for i in st.items() if i[1].get('type') != 'Message'), + (i for i in st.items() if i[1].get('type') == 'Message')): + s = 0 + for m in e: + x = h.get(m.get('type'), 0) + s += x * int(m.get('cardinality')) + h[name] = s + return h + +def get_min_sizes(st, dt): + h = {} + for name, e in dt.items(): + v = e.get('size', '0') + if e.get('variableSize') is None: + h[name] = int(v) + else: + h[name] = 0 + for name, e in itertools.chain((i for i in st.items() if i[1].get('type') != 'Message'), + (i for i in st.items() if i[1].get('type') == 'Message')): + s = 0 + for m in e: + x = h.get(m.get('type'), 0) + s += x * int(m.get('minCardinality', '1')) + h[name] = s + return h + +# end # inlined from upstream's etimodel.py + + +def get_used_types(st): + xs = set(y.get('type') for _, x in st.items() for y in x) + return xs + +def get_data_types(d): + r = d.getroot() + x = r.find('DataTypes') + h = {} + for e in x: + h[e.get('name')] = e + return h + +def get_structs(d): + r = d.getroot() + x = r.find('Structures') + h = {} + for e in x: + h[e.get('name')] = e + return h + +def get_templates(st): + ts = [] + for k, v in st.items(): + if v.get('type') == 'Message': + ts.append((int(v.get('numericID')), k)) + ts.sort() + return ts + + +def gen_header(proto, desc, o=sys.stdout): + if proto.startswith('eti') or proto.startswith('xti'): + ph = '#include "packet-tcp.h" // tcp_dissect_pdus()' + else: + ph = '#include "packet-udp.h" // udp_dissect_pdus()' + print(f'''// auto-generated by Georg Sauthoff's eti2wireshark.py + +/* packet-eti.c + * Routines for {proto.upper()} dissection + * Copyright 2021, Georg Sauthoff + * + * Wireshark - Network traffic analyzer + * By Gerald Combs + * Copyright 1998 Gerald Combs + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +/* + * The {desc} ({proto.upper()}) is an electronic trading protocol + * that is used by a few exchanges (Eurex, Xetra, ...). + * + * It's a Length-Tag based protocol consisting of mostly fix sized + * request/response messages. + * + * Links: + * https://en.wikipedia.org/wiki/List_of_electronic_trading_protocols#Europe + * https://github.com/gsauthof/python-eti#protocol-descriptions + * https://github.com/gsauthof/python-eti#protocol-introduction + * + */ + +#include + + +#include // Should be first Wireshark include (other than config.h) +{ph} +#include // expert info + +#include +#include // snprintf() + + +/* Prototypes */ +/* (Required to prevent [-Wmissing-prototypes] warnings */ +void proto_reg_handoff_{proto}(void); +void proto_register_{proto}(void); +''', file=o) + + +def name2ident(name): + ll = True + xs = [] + for i, c in enumerate(name): + if c.isupper(): + if i > 0 and ll: + xs.append('_') + xs.append(c.lower()) + ll = False + else: + xs.append(c) + ll = True + return ''.join(xs) + +def gen_enums(dt, ts, o=sys.stdout): + print('static const value_string template_id_vals[] = { // TemplateID', file=o) + min_tid, max_tid = ts[0][0], ts[-1][0] + xs = [None] * (max_tid - min_tid + 1) + for tid, name in ts: + xs[tid-min_tid] = name + for i, name in enumerate(xs): + if name is None: + print(f' {{ {min_tid + i}, "Unknown" }},', file=o) + else: + print(f' {{ {min_tid + i}, "{name}" }},', file=o) + print(''' { 0, NULL } +}; +static value_string_ext template_id_vals_ext = VALUE_STRING_EXT_INIT(template_id_vals);''', file=o) + name2access = { 'TemplateID': '&template_id_vals_ext' } + + dedup = {} + for name, e in dt.items(): + vs = [ (x.get('value'), x.get('name')) for x in e.findall('ValidValue') ] + if not vs: + continue + if e.get('rootType') == 'String' and e.get('size') != '1': + continue + + ident = name2ident(name) + + nv = e.get('noValue') + ws = [ v[0] for v in vs ] + if nv not in ws: + if nv.startswith('0x0') and e.get('rootType') == 'String': + nv = '\0' + vs.append( (nv, 'NO_VALUE') ) + + if e.get('type') == 'int': + vs.sort(key = lambda x : int(x[0], 0)) + else: + vs.sort(key = lambda x : ord(x[0])) + s = '-'.join(f'{v[0]}:{v[1]}' for v in vs) + x = dedup.get(s) + if x is None: + dedup[s] = name + else: + name2access[name] = name2access[x] + print(f'// {name} aliased by {x}', file=o) + continue + + print(f'static const value_string {ident}_vals[] = {{ // {name}', file=o) + for i, v in enumerate(vs): + if e.get('rootType') == 'String': + k = f"'{v[0]}'" if ord(v[0]) != 0 else '0' + print(f''' {{ {k}, "{v[1]}" }},''', file=o) + else: + print(f' {{ {v[0]}, "{v[1]}" }},', file=o) + print(''' { 0, NULL } +};''', file=o) + + if len(vs) > 7: + print(f'static value_string_ext {ident}_vals_ext = VALUE_STRING_EXT_INIT({ident}_vals);', file=o) + name2access[name] = f'&{ident}_vals_ext' + else: + name2access[name] = f'VALS({ident}_vals)' + + return name2access + + +def get_fields(st, dt): + seen = {} + for name, e in st.items(): + for m in e: + t = dt.get(m.get('type')) + if is_padding(t): + continue + if not (is_int(t) or is_fixed_string(t) or is_var_string(t)): + continue + name = m.get('name') + if name in seen: + if seen[name] != t: + raise RuntimeError(f'Mismatching type for: {name}') + else: + seen[name] = t + vs = list(seen.items()) + vs.sort() + return vs + +def gen_field_handles(st, dt, proto, o=sys.stdout): + print(f'''static expert_field ei_{proto}_counter_overflow = EI_INIT; +static expert_field ei_{proto}_invalid_template = EI_INIT; +static expert_field ei_{proto}_invalid_length = EI_INIT;''', file=o) + if not proto.startswith('eobi'): + print(f'static expert_field ei_{proto}_unaligned = EI_INIT;', file=o) + print(f'''static expert_field ei_{proto}_missing = EI_INIT; +static expert_field ei_{proto}_overused = EI_INIT; +''', file=o) + + vs = get_fields(st, dt) + s = ', '.join('-1' for i in range(len(vs))) + print(f'static int hf_{proto}[] = {{ {s} }};', file=o) + print(f'''static int hf_{proto}_dscp_exec_summary = -1; +static int hf_{proto}_dscp_improved = -1; +static int hf_{proto}_dscp_widened = -1;''', file=o) + print('enum Field_Handle_Index {', file=o) + for i, (name, _) in enumerate(vs): + c = ' ' if i == 0 else ',' + print(f' {c} {name.upper()}_FH_IDX', file=o) + print('};', file=o) + +def type2ft(t): + if is_timestamp_ns(t): + return 'FT_ABSOLUTE_TIME' + if is_dscp(t): + return 'FT_UINT8' + if is_int(t): + if t.get('rootType') == 'String': + return 'FT_CHAR' + u = 'U' if is_unsigned(t) else '' + if t.get('size') is None: + raise RuntimeError(f'None size: {t.get("name")}') + size = int(t.get('size')) * 8 + return f'FT_{u}INT{size}' + if is_fixed_string(t) or is_var_string(t): + # NB: technically, ETI fixed-strings are blank-padded, + # unless they are marked NO_VALUE, in that case + # the first byte is zero, followed by unspecified content. + # Also, some fixed-strings are zero-terminated, where again + # the bytes following the terminator are unspecified. + return 'FT_STRINGZTRUNC' + raise RuntimeError('unexpected type') + +def type2enc(t): + if is_timestamp_ns(t): + return 'ABSOLUTE_TIME_UTC' + if is_dscp(t): + return 'BASE_HEX' + if is_int(t): + if t.get('rootType') == 'String': + # NB: basically only used when enum and value is unknown + return 'BASE_HEX' + else: + return 'BASE_DEC' + if is_fixed_string(t) or is_var_string(t): + # previously 'STR_ASCII', which was removed upstream + # cf. 19dcb725b61e384f665ad4b955f3b78f63e626d9 + return 'BASE_NONE' + raise RuntimeError('unexpected type') + +def gen_field_info(st, dt, n2enum, proto='eti', o=sys.stdout): + print(' static hf_register_info hf[] ={', file=o) + vs = get_fields(st, dt) + for i, (name, t) in enumerate(vs): + c = ' ' if i == 0 else ',' + ft = type2ft(t) + enc = type2enc(t) + if is_enum(t) and not is_dscp(t): + vals = n2enum[t.get('name')] + if vals.startswith('&'): + extra_enc = '| BASE_EXT_STRING' + else: + extra_enc = '' + else: + vals = 'NULL' + extra_enc = '' + print(f''' {c} {{ &hf_{proto}[{name.upper()}_FH_IDX], + {{ "{name}", "{proto}.{name.lower()}", + {ft}, {enc}{extra_enc}, {vals}, 0x0, + NULL, HFILL }} + }}''', file=o) + print(f''' , {{ &hf_{proto}_dscp_exec_summary, + {{ "DSCP_ExecSummary", "{proto}.dscp_execsummary", + FT_BOOLEAN, 8, NULL, 0x10, + NULL, HFILL }} + }} + , {{ &hf_{proto}_dscp_improved, + {{ "DSCP_Improved", "{proto}.dscp_improved", + FT_BOOLEAN, 8, NULL, 0x20, + NULL, HFILL }} + }} + , {{ &hf_{proto}_dscp_widened, + {{ "DSCP_Widened", "{proto}.dscp_widened", + FT_BOOLEAN, 8, NULL, 0x40, + NULL, HFILL }} + }}''', file=o) + print(' };', file=o) + + +def gen_subtree_handles(st, proto='eti', o=sys.stdout): + ns = [ name for name, e in st.items() if e.get('type') != 'Message' ] + ns.sort() + s = ', '.join('-1' for i in range(len(ns) + 1)) + h = dict( (n, i) for i, n in enumerate(ns, 1) ) + print(f'static gint ett_{proto}[] = {{ {s} }};', file=o) + print(f'static gint ett_{proto}_dscp = -1;', file=o) + return h + + +def gen_subtree_array(st, proto='eti', o=sys.stdout): + n = sum(1 for name, e in st.items() if e.get('type') != 'Message') + n += 1 + s = ', '.join(f'&ett_{proto}[{i}]' for i in range(n)) + print(f' static gint * const ett[] = {{ {s}, &ett_{proto}_dscp }};', file=o) + + +def gen_fields_table(st, dt, sh, o=sys.stdout): + name2off = {} + off = 0 + names = [] + for name, e in st.items(): + if e.get('type') == 'Message': + continue + if name.endswith('Comp'): + s = name[:-4] + name2off[name] = off + off += len(s) + 1 + names.append(s) + s = '\\0'.join(names) + print(f' static const char struct_names[] = "{s}";', file=o) + + xs = [ x for x in st.items() if x[1].get('type') != 'Message' ] + xs += [ x for x in st.items() if x[1].get('type') == 'Message' ] + print(' static const struct ETI_Field fields[] = {', file=o) + i = 0 + fields2idx = {} + for name, e in xs: + fields2idx[name] = i + print(f' // {name}@{i}', file=o) + counters = {} + cnt = 0 + for m in e: + t = dt.get(m.get('type')) + c = ' ' if i == 0 else ',' + typ = '' + size = int(t.get('size')) if t is not None else 0 + rep = '' + fh = f'{m.get("name").upper()}_FH_IDX' + sub = '' + if is_padding(t): + print(f' {c} {{ ETI_PADDING, 0, {size}, 0, 0 }}', file=o) + elif is_fixed_point(t): + if size != 8: + raise RuntimeError('only supporting 8 byte fixed point') + fraction = int(t.get('precision')) + if fraction > 16: + raise RuntimeError('unusual high precisio in fixed point') + print(f' {c} {{ ETI_FIXED_POINT, {fraction}, {size}, {fh}, 0 }}', file=o) + elif is_timestamp_ns(t): + if size != 8: + raise RuntimeError('only supporting timestamps') + print(f' {c} {{ ETI_TIMESTAMP_NS, 0, {size}, {fh}, 0 }}', file=o) + elif is_dscp(t): + print(f' {c} {{ ETI_DSCP, 0, {size}, {fh}, 0 }}', file=o) + elif is_int(t): + u = 'U' if is_unsigned(t) else '' + if t.get('rootType') == 'String': + typ = 'ETI_CHAR' + else: + typ = f'ETI_{u}INT' + if is_enum(t): + typ += '_ENUM' + if t.get('type') == 'Counter': + counters[m.get('name')] = cnt + suf = f' // <- counter@{cnt}' + if cnt > 7: + raise RuntimeError(f'too many counters in message: {name}') + rep = cnt + cnt += 1 + if typ != 'ETI_UINT': + raise RuntimeError('only unsigned counters supported') + if size > 2: + raise RuntimeError('only smaller counters supported') + typ = 'ETI_COUNTER' + ett_idx = t.get('maxValue') + else: + rep = 0 + suf = '' + ett_idx = 0 + print(f' {c} {{ {typ}, {rep}, {size}, {fh}, {ett_idx} }}{suf}', file=o) + elif is_fixed_string(t): + print(f' {c} {{ ETI_STRING, 0, {size}, {fh}, 0 }}', file=o) + elif is_var_string(t): + k = m.get('counter') + x = counters[k] + print(f' {c} {{ ETI_VAR_STRING, {x}, {size}, {fh}, 0 }}', file=o) + else: + a = m.get('type') + fields_idx = fields2idx[a] + k = m.get('counter') + if k: + counter_off = counters[k] + typ = 'ETI_VAR_STRUCT' + else: + counter_off = 0 + typ = 'ETI_STRUCT' + names_off = name2off[m.get('type')] + ett_idx = sh[a] + print(f' {c} {{ {typ}, {counter_off}, {names_off}, {fields_idx}, {ett_idx} }} // {m.get("name")}', file=o) + i += 1 + print(' , { ETI_EOF, 0, 0, 0, 0 }', file=o) + i += 1 + print(' };', file=o) + return fields2idx + +def gen_template_table(min_templateid, n, ts, fields2idx, o=sys.stdout): + xs = [ '-1' ] * n + for tid, name in ts: + xs[tid - min_templateid] = f'{fields2idx[name]} /* {name} */' + s = '\n , '.join(xs) + print(f' static const int16_t tid2fidx[] = {{\n {s}\n }};', file=o) + +def gen_sizes_table(min_templateid, n, st, dt, ts, proto, o=sys.stdout): + is_eobi = proto.startswith('eobi') + xs = [ '0' if is_eobi else '{ 0, 0}' ] * n + min_s = get_min_sizes(st, dt) + max_s = get_max_sizes(st, dt) + if is_eobi: + for tid, name in ts: + xs[tid - min_templateid] = f'{max_s[name]} /* {name} */' + else: + for tid, name in ts: + xs[tid - min_templateid] = f'{{ {min_s[name]}, {max_s[name]} }} /* {name} */' + s = '\n , '.join(xs) + if is_eobi: + print(f' static const uint32_t tid2size[] = {{\n {s}\n }};', file=o) + else: + print(f' static const uint32_t tid2size[{n}][2] = {{\n {s}\n }};', file=o) + + +# yes, usage attribute of single fields depends on the context +# otherwise, we could just put the information into the fields table +# Example: EOBI.PacketHeader.MessageHeader.MsgSeqNum is unused whereas +# it's required in the EOBI ExecutionSummary and other messages +def gen_usage_table(min_templateid, n, ts, ams, o=sys.stdout): + def map_usage(m): + x = m.get('usage') + if x == 'mandatory': + return 0 + elif x == 'optional': + return 1 + elif x == 'unused': + return 2 + else: + raise RuntimeError(f'unknown usage value: {x}') + + h = {} + i = 0 + print(' static const unsigned char usages[] = {', file=o) + for am in ams: + name = am.get("name") + tid = int(am.get('numericID')) + print(f' // {name}', file=o) + h[tid] = i + for e in am: + if e.tag == 'Group': + print(f' //// {e.get("type")}', file=o) + for m in e: + if m.get('hidden') == 'true' or pad_re.match(m.get('name')): + continue + k = ' ' if i == 0 else ',' + print(f' {k} {map_usage(m)} // {m.get("name")}#{i}', file=o) + i += 1 + print(' ///', file=o) + else: + if e.get('hidden') == 'true' or pad_re.match(e.get('name')): + continue + k = ' ' if i == 0 else ',' + print(f' {k} {map_usage(e)} // {e.get("name")}#{i}', file=o) + i += 1 + + # NB: the last element is a filler to simplify the out-of-bounds check + # (cf. the uidx DISSECTOR_ASSER_CMPUINIT() before the switch statement) + # when the ETI_EOF of the message whose usage information comes last + # is reached + print(f' , 0 // filler', file=o) + print(' };', file=o) + xs = [ '-1' ] * n + t2n = dict(ts) + for tid, uidx in h.items(): + name = t2n[tid] + xs[tid - min_templateid] = f'{uidx} /* {name} */' + s = '\n , '.join(xs) + print(f' static const int16_t tid2uidx[] = {{\n {s}\n }};', file=o) + + +def gen_dscp_table(proto, o=sys.stdout): + print(f''' static int * const dscp_bits[] = {{ + &hf_{proto}_dscp_exec_summary, + &hf_{proto}_dscp_improved, + &hf_{proto}_dscp_widened, + NULL + }};''', file=o) + + +def mk_int_case(size, signed, proto): + signed_str = 'i' if signed else '' + unsigned_str = '' if signed else 'u' + fmt_str = 'i' if signed else 'u' + if size == 2: + size_str = 's' + elif size == 4: + size_str = 'l' + elif size == 8: + size_str = '64' + type_str = f'g{unsigned_str}int{size * 8}' + no_value_str = f'INT{size * 8}_MIN' if signed else f'UINT{size * 8}_MAX' + pt_size = '64' if size == 8 else '' + if signed: + hex_str = '0x80' + '00' * (size - 1) + else: + hex_str = '0x' + 'ff' * size + if size == 1: + fn = f'tvb_get_g{unsigned_str}int8' + else: + fn = f'tvb_get_letoh{signed_str}{size_str}' + s = f'''case {size}: + {{ + {type_str} x = {fn}(tvb, off); + if (x == {no_value_str}) {{ + proto_item *e = proto_tree_add_{unsigned_str}int{pt_size}_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "NO_VALUE ({hex_str})"); + if (!usages[uidx]) + expert_add_info_format(pinfo, e, &ei_{proto}_missing, "required value is missing"); + }} else {{ + proto_item *e = proto_tree_add_{unsigned_str}int{pt_size}_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "%" PRI{fmt_str}{size * 8}, x); + if (usages[uidx] == 2) + expert_add_info_format(pinfo, e, &ei_{proto}_overused, "unused value is set"); + }} + }} + break;''' + return s + + +def gen_dissect_structs(o=sys.stdout): + print(''' +enum ETI_Type { + ETI_EOF, + ETI_PADDING, + ETI_UINT, + ETI_INT, + ETI_UINT_ENUM, + ETI_INT_ENUM, + ETI_COUNTER, + ETI_FIXED_POINT, + ETI_TIMESTAMP_NS, + ETI_CHAR, + ETI_STRING, + ETI_VAR_STRING, + ETI_STRUCT, + ETI_VAR_STRUCT, + ETI_DSCP +}; + +struct ETI_Field { + uint8_t type; + uint8_t counter_off; // offset into counter array + // if ETI_COUNTER => storage + // if ETI_VAR_STRING or ETI_VAR_STRUCT => load + // to get length or repeat count + // if ETI_FIXED_POINT: #fractional digits + uint16_t size; // or offset into struct_names if ETI_STRUCT/ETI_VAR_STRUCT + uint16_t field_handle_idx; // or index into fields array if ETI_STRUCT/ETI_VAR_STRUT + uint16_t ett_idx; // index into ett array if ETI_STRUCT/ETI_VAR_STRUCT + // or max value if ETI_COUNTER +}; +''', file=o) + +def gen_dissect_fn(st, dt, ts, sh, ams, proto, o=sys.stdout): + if proto.startswith('eti') or proto.startswith('xti'): + bl_fn = 'tvb_get_letohl' + template_off = 4 + else: + bl_fn = 'tvb_get_letohs' + template_off = 2 + print(f'''/* This method dissects fully reassembled messages */ +static int +dissect_{proto}_message(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_) +{{ + col_set_str(pinfo->cinfo, COL_PROTOCOL, "{proto.upper()}"); + col_clear(pinfo->cinfo, COL_INFO); + guint16 templateid = tvb_get_letohs(tvb, {template_off}); + const char *template_str = val_to_str_ext(templateid, &template_id_vals_ext, "Unknown {proto.upper()} template: 0x%04x"); + col_add_fstr(pinfo->cinfo, COL_INFO, "%s", template_str); + + /* create display subtree for the protocol */ + proto_item *ti = proto_tree_add_item(tree, proto_{proto}, tvb, 0, -1, ENC_NA); + guint32 bodylen= {bl_fn}(tvb, 0); + proto_item_append_text(ti, ", %s (%" PRIu16 "), BodyLen: %u", template_str, templateid, bodylen); + proto_tree *root = proto_item_add_subtree(ti, ett_{proto}[0]); +''', file=o) + + min_templateid = ts[0][0] + max_templateid = ts[-1][0] + n = max_templateid - min_templateid + 1 + + fields2idx = gen_fields_table(st, dt, sh, o) + gen_template_table(min_templateid, n, ts, fields2idx, o) + gen_sizes_table(min_templateid, n, st, dt, ts, proto, o) + gen_usage_table(min_templateid, n, ts, ams, o) + gen_dscp_table(proto, o) + + print(f''' if (templateid < {min_templateid} || templateid > {max_templateid}) {{ + proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_template, tvb, {template_off}, 4, + "Template ID out of range: %" PRIu16, templateid); + return tvb_captured_length(tvb); + }} + int fidx = tid2fidx[templateid - {min_templateid}]; + if (fidx == -1) {{ + proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_template, tvb, {template_off}, 4, + "Unallocated Template ID: %" PRIu16, templateid); + return tvb_captured_length(tvb); + }}''', file=o) + + if proto.startswith('eobi'): + print(f''' if (bodylen != tid2size[templateid - {min_templateid}]) {{ + proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_length, tvb, 0, {template_off}, + "Unexpected BodyLen value of %" PRIu32 ", expected: %" PRIu32, bodylen, tid2size[templateid - {min_templateid}]); + }}''', file=o) + else: + print(f''' if (bodylen < tid2size[templateid - {min_templateid}][0] || bodylen > tid2size[templateid - {min_templateid}][1]) {{ + if (tid2size[templateid - {min_templateid}][0] != tid2size[templateid - {min_templateid}][1]) + proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_length, tvb, 0, {template_off}, + "Unexpected BodyLen value of %" PRIu32 ", expected: %" PRIu32 "..%" PRIu32, bodylen, tid2size[templateid - {min_templateid}][0], tid2size[templateid - {min_templateid}][1]); + else + proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_length, tvb, 0, {template_off}, + "Unexpected BodyLen value of %" PRIu32 ", expected: %" PRIu32, bodylen, tid2size[templateid - {min_templateid}][0]); + }} + if (bodylen % 8) + proto_tree_add_expert_format(root, pinfo, &ei_{proto}_unaligned, tvb, 0, {template_off}, + "BodyLen value of %" PRIu32 " is not divisible by 8", bodylen); +''', file=o) + + print(f''' int uidx = tid2uidx[templateid - {min_templateid}]; + DISSECTOR_ASSERT_CMPINT(uidx, >=, 0); + DISSECTOR_ASSERT_CMPUINT(((size_t)uidx), <, (sizeof usages / sizeof usages[0])); +''', file=o) + + print(f''' int old_fidx = 0; + int old_uidx = 0; + unsigned top = 1; + unsigned counter[8] = {{0}}; + unsigned off = 0; + unsigned struct_off = 0; + unsigned repeats = 0; + proto_tree *t = root; + while (top) {{ + DISSECTOR_ASSERT_CMPINT(fidx, >=, 0); + DISSECTOR_ASSERT_CMPUINT(((size_t)fidx), <, (sizeof fields / sizeof fields[0])); + DISSECTOR_ASSERT_CMPINT(uidx, >=, 0); + DISSECTOR_ASSERT_CMPUINT(((size_t)uidx), <, (sizeof usages / sizeof usages[0])); + + switch (fields[fidx].type) {{ + case ETI_EOF: + DISSECTOR_ASSERT_CMPUINT(top, >=, 1); + DISSECTOR_ASSERT_CMPUINT(top, <=, 2); + if (t != root) + proto_item_set_len(t, off - struct_off); + if (repeats) {{ + --repeats; + fidx = fields[old_fidx].field_handle_idx; + uidx = old_uidx; + t = proto_tree_add_subtree(root, tvb, off, -1, ett_{proto}[fields[old_fidx].ett_idx], NULL, &struct_names[fields[old_fidx].size]); + struct_off = off; + }} else {{ + fidx = old_fidx + 1; + t = root; + --top; + }} + break; + case ETI_VAR_STRUCT: + case ETI_STRUCT: + DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, <, sizeof counter / sizeof counter[0]); + repeats = fields[fidx].type == ETI_VAR_STRUCT ? counter[fields[fidx].counter_off] : 1; + if (repeats) {{ + --repeats; + t = proto_tree_add_subtree(root, tvb, off, -1, ett_{proto}[fields[fidx].ett_idx], NULL, &struct_names[fields[fidx].size]); + struct_off = off; + old_fidx = fidx; + old_uidx = uidx; + fidx = fields[fidx].field_handle_idx; + DISSECTOR_ASSERT_CMPUINT(top, ==, 1); + ++top; + }} else {{ + ++fidx; + }} + break; + case ETI_PADDING: + off += fields[fidx].size; + ++fidx; + break; + case ETI_CHAR: + proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, ENC_ASCII); + off += fields[fidx].size; + ++fidx; + ++uidx; + break; + case ETI_STRING: + {{ + guint8 c = tvb_get_guint8(tvb, off); + if (c) + proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, ENC_ASCII); + else {{ + proto_item *e = proto_tree_add_string(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, "NO_VALUE ('0x00...')"); + if (!usages[uidx]) + expert_add_info_format(pinfo, e, &ei_{proto}_missing, "required value is missing"); + }} + }} + off += fields[fidx].size; + ++fidx; + ++uidx; + break; + case ETI_VAR_STRING: + DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, <, sizeof counter / sizeof counter[0]); + proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, counter[fields[fidx].counter_off], ENC_ASCII); + off += counter[fields[fidx].counter_off]; + ++fidx; + ++uidx; + break; + case ETI_COUNTER: + DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, <, sizeof counter / sizeof counter[0]); + DISSECTOR_ASSERT_CMPUINT(fields[fidx].size, <=, 2); + {{ + switch (fields[fidx].size) {{ + case 1: + {{ + guint8 x = tvb_get_guint8(tvb, off); + if (x == UINT8_MAX) {{ + proto_tree_add_uint_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "NO_VALUE (0xff)"); + counter[fields[fidx].counter_off] = 0; + }} else {{ + proto_item *e = proto_tree_add_uint_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "%" PRIu8, x); + if (x > fields[fidx].ett_idx) {{ + counter[fields[fidx].counter_off] = fields[fidx].ett_idx; + expert_add_info_format(pinfo, e, &ei_{proto}_counter_overflow, "Counter overflow: %" PRIu8 " > %" PRIu16, x, fields[fidx].ett_idx); + }} else {{ + counter[fields[fidx].counter_off] = x; + }} + }} + }} + break; + case 2: + {{ + guint16 x = tvb_get_letohs(tvb, off); + if (x == UINT16_MAX) {{ + proto_tree_add_uint_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "NO_VALUE (0xffff)"); + counter[fields[fidx].counter_off] = 0; + }} else {{ + proto_item *e = proto_tree_add_uint_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "%" PRIu16, x); + if (x > fields[fidx].ett_idx) {{ + counter[fields[fidx].counter_off] = fields[fidx].ett_idx; + expert_add_info_format(pinfo, e, &ei_{proto}_counter_overflow, "Counter overflow: %" PRIu16 " > %" PRIu16, x, fields[fidx].ett_idx); + }} else {{ + counter[fields[fidx].counter_off] = x; + }} + }} + }} + break; + }} + }} + off += fields[fidx].size; + ++fidx; + ++uidx; + break; + case ETI_UINT: + switch (fields[fidx].size) {{ + {mk_int_case(1, False, proto)} + {mk_int_case(2, False, proto)} + {mk_int_case(4, False, proto)} + {mk_int_case(8, False, proto)} + }} + off += fields[fidx].size; + ++fidx; + ++uidx; + break; + case ETI_INT: + switch (fields[fidx].size) {{ + {mk_int_case(1, True, proto)} + {mk_int_case(2, True, proto)} + {mk_int_case(4, True, proto)} + {mk_int_case(8, True, proto)} + }} + off += fields[fidx].size; + ++fidx; + ++uidx; + break; + case ETI_UINT_ENUM: + case ETI_INT_ENUM: + proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, ENC_LITTLE_ENDIAN); + off += fields[fidx].size; + ++fidx; + ++uidx; + break; + case ETI_FIXED_POINT: + DISSECTOR_ASSERT_CMPUINT(fields[fidx].size, ==, 8); + DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, >, 0); + DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, <=, 16); + {{ + gint64 x = tvb_get_letohi64(tvb, off); + if (x == INT64_MIN) {{ + proto_item *e = proto_tree_add_int64_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "NO_VALUE (0x8000000000000000)"); + if (!usages[uidx]) + expert_add_info_format(pinfo, e, &ei_{proto}_missing, "required value is missing"); + }} else {{ + unsigned slack = fields[fidx].counter_off + 1; + if (x < 0) + slack += 1; + char s[21]; + int n = snprintf(s, sizeof s, "%0*" PRIi64, slack, x); + DISSECTOR_ASSERT_CMPUINT(n, >, 0); + unsigned k = n - fields[fidx].counter_off; + proto_tree_add_int64_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "%.*s.%s", k, s, s + k); + }} + }} + off += fields[fidx].size; + ++fidx; + ++uidx; + break; + case ETI_TIMESTAMP_NS: + DISSECTOR_ASSERT_CMPUINT(fields[fidx].size, ==, 8); + proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, ENC_LITTLE_ENDIAN | ENC_TIME_NSECS); + off += fields[fidx].size; + ++fidx; + ++uidx; + break; + case ETI_DSCP: + DISSECTOR_ASSERT_CMPUINT(fields[fidx].size, ==, 1); + proto_tree_add_bitmask(t, tvb, off, hf_{proto}[fields[fidx].field_handle_idx], ett_{proto}_dscp, dscp_bits, ENC_LITTLE_ENDIAN); + off += fields[fidx].size; + ++fidx; + ++uidx; + break; + }} + }} +''', file=o) + + print(''' return tvb_captured_length(tvb); +} +''', file=o) + + print(f'''/* determine PDU length of protocol {proto.upper()} */ +static guint +get_{proto}_message_len(packet_info *pinfo _U_, tvbuff_t *tvb, int offset, void *data _U_) +{{ + return (guint){bl_fn}(tvb, offset); +}} +''', file=o) + + if proto.startswith('eobi'): + print(f'''static int +dissect_{proto}(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, + void *data) +{{ + return udp_dissect_pdus(tvb, pinfo, tree, 4, NULL, + get_{proto}_message_len, dissect_{proto}_message, data); +}} +''', file=o) + else: + print(f'''static int +dissect_{proto}(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, + void *data) +{{ + tcp_dissect_pdus(tvb, pinfo, tree, TRUE, 4 /* bytes to read for bodylen */, + get_{proto}_message_len, dissect_{proto}_message, data); + return tvb_captured_length(tvb); +}} +''', file=o) + +def gen_register_fn(st, dt, n2enum, proto, desc, o=sys.stdout): + print(f'''void +proto_register_{proto}(void) +{{''', file=o) + gen_field_info(st, dt, n2enum, proto, o) + + print(f''' static ei_register_info ei[] = {{ + {{ + &ei_{proto}_counter_overflow, + {{ "{proto}.counter_overflow", PI_PROTOCOL, PI_WARN, "Counter Overflow", EXPFILL }} + }}, + {{ + &ei_{proto}_invalid_template, + {{ "{proto}.invalid_template", PI_PROTOCOL, PI_ERROR, "Invalid Template ID", EXPFILL }} + }}, + {{ + &ei_{proto}_invalid_length, + {{ "{proto}.invalid_length", PI_PROTOCOL, PI_ERROR, "Invalid Body Length", EXPFILL }} + }},''', file=o) + if not proto.startswith('eobi'): + print(f''' {{ + &ei_{proto}_unaligned, + {{ "{proto}.unaligned", PI_PROTOCOL, PI_ERROR, "A Body Length not divisible by 8 leads to unaligned followup messages", EXPFILL }} + }},''', file=o) + print(f''' {{ + &ei_{proto}_missing, + {{ "{proto}.missing", PI_PROTOCOL, PI_WARN, "A required value is missing", EXPFILL }} + }}, + {{ + &ei_{proto}_overused, + {{ "{proto}.overused", PI_PROTOCOL, PI_WARN, "An unused value is set", EXPFILL }} + }} + }};''', file=o) + + print(f''' proto_{proto} = proto_register_protocol("{desc}", + "{proto.upper()}", "{proto}");''', file=o) + + print(f''' expert_module_t *expert_{proto} = expert_register_protocol(proto_{proto}); + expert_register_field_array(expert_{proto}, ei, array_length(ei));''', file=o) + + print(f' proto_register_field_array(proto_{proto}, hf, array_length(hf));', + file=o) + gen_subtree_array(st, proto, o) + print(' proto_register_subtree_array(ett, array_length(ett));', file=o) + if proto.startswith('eobi'): + print(f' proto_disable_by_default(proto_{proto});', file=o) + print('}\n', file=o) + + +def gen_handoff_fn(proto, o=sys.stdout): + print(f'''void +proto_reg_handoff_{proto}(void) +{{ + dissector_handle_t {proto}_handle = create_dissector_handle(dissect_{proto}, + proto_{proto}); + + // cf. N7 Network Access Guide, e.g. + // https://www.xetra.com/xetra-en/technology/t7/system-documentation/release10-0/Release-10.0-2692700?frag=2692724 + // https://www.xetra.com/resource/blob/2762078/388b727972b5122945eedf0e63c36920/data/N7-Network-Access-Guide-v2.0.59.pdf + +''', file=o) + if proto.startswith('eti'): + print(f''' // NB: can only be called once for a port/handle pair ... + // dissector_add_uint_with_preference("tcp.port", 19006 /* LF PROD */, eti_handle); + + dissector_add_uint("tcp.port", 19006 /* LF PROD */, {proto}_handle); + dissector_add_uint("tcp.port", 19043 /* PS PROD */, {proto}_handle); + dissector_add_uint("tcp.port", 19506 /* LF SIMU */, {proto}_handle); + dissector_add_uint("tcp.port", 19543 /* PS SIMU */, {proto}_handle);''', file=o) + elif proto.startswith('xti'): + print(f''' // NB: unfortunately, Cash-ETI shares the same ports as Derivatives-ETI ... + // We thus can't really add a well-know port for XTI. + // Use Wireshark's `Decode As...` or tshark's `-d tcp.port=19043,xti` feature + // to switch from ETI to XTI dissection. + dissector_add_uint_with_preference("tcp.port", 19042 /* dummy */, {proto}_handle);''', file=o) + else: + print(f''' static const int ports[] = {{ + 59000, // Snapshot EUREX US-allowed PROD + 59001, // Incremental EUREX US-allowed PROD + 59032, // Snapshot EUREX US-restricted PROD + 59033, // Incremental EUREX US-restricted PROD + 59500, // Snapshot EUREX US-allowed SIMU + 59501, // Incremental EUREX US-allowed SIMU + 59532, // Snapshot EUREX US-restricted SIMU + 59533, // Incremental EUREX US-restricted SIMU + + 57000, // Snapshot FX US-allowed PROD + 57001, // Incremental FX US-allowed PROD + 57032, // Snapshot FX US-restricted PROD + 57033, // Incremental FX US-restricted PROD + 57500, // Snapshot FX US-allowed SIMU + 57501, // Incremental FX US-allowed SIMU + 57532, // Snapshot FX US-restricted SIMU + 57533, // Incremental FX US-restricted SIMU + + 59000, // Snapshot Xetra PROD + 59001, // Incremental Xetra PROD + 59500, // Snapshot Xetra SIMU + 59501, // Incremental Xetra SIMU + + 56000, // Snapshot Boerse Frankfurt PROD + 56001, // Incremental Boerse Frankfurt PROD + 56500, // Snapshot Boerse Frankfurt SIMU + 56501 // Incremental Boerse Frankfurt SIMU + }}; + for (unsigned i = 0; i < sizeof ports / sizeof ports[0]; ++i) + dissector_add_uint("udp.port", ports[i], {proto}_handle);''', file=o) + print('}', file=o) + +def is_int(t): + if t is not None: + r = t.get('rootType') + return r in ('int', 'floatDecimal') or (r == 'String' and t.get('size') == '1') + return False + +def is_enum(t): + if t is not None: + r = t.get('rootType') + if r == 'int' or (r == 'String' and t.get('size') == '1'): + return t.find('ValidValue') is not None + return False + +def is_fixed_point(t): + return t is not None and t.get('rootType') == 'floatDecimal' + +def is_timestamp_ns(t): + return t is not None and t.get('type') == 'UTCTimestamp' + +def is_dscp(t): + return t is not None and t.get('name') == 'DSCP' + +pad_re = re.compile('Pad[1-9]') + +def is_padding(t): + if t is not None: + return t.get('rootType') == 'String' and pad_re.match(t.get('name')) + return False + +def is_fixed_string(t): + if t is not None: + return t.get('rootType') in ('String', 'data') and not t.get('variableSize') + return False + +def is_var_string(t): + if t is not None: + return t.get('rootType') in ('String', 'data') and t.get('variableSize') is not None + return False + +def is_unsigned(t): + v = t.get('minValue') + return v is not None and not v.startswith('-') + +def is_counter(t): + return t.get('type') == 'Counter' + +def type_to_fmt(t): + if is_padding(t): + return f'{t.get("size")}x' + elif is_int(t): + n = int(t.get('size')) + if n == 1: + return 'B' + else: + if n == 2: + c = 'h' + elif n == 4: + c = 'i' + elif n == 8: + c = 'q' + else: + raise ValueError(f'unknown int size {n}') + if is_unsigned(t): + c = c.upper() + return c + elif is_fixed_string(t): + return f'{t.get("size")}s' + else: + return '?' + +def pp_int_type(t): + if not is_int(t): + return None + s = 'i' + if is_unsigned(t): + s = 'u' + n = int(t.get('size')) + s += str(n) + return s + +def is_elementary(t): + return t is not None and t.get('counter') is None + +def group_members(e, dt): + xs = [] + ms = [] + for m in e: + t = dt.get(m.get('type')) + if is_elementary(t): + ms.append(m) + else: + if ms: + xs.append(ms) + ms = [] + xs.append([m]) + if ms: + xs.append(ms) + return xs + + + +def parse_args(): + p = argparse.ArgumentParser(description='Generate Wireshark Dissector for ETI/EOBI style protocol specifictions') + p.add_argument('filename', help='protocol description XML file') + p.add_argument('--proto', default='eti', + help='short protocol name (default: %(default)s)') + p.add_argument('--desc', '-d', + default='Enhanced Trading Interface', + help='protocol description (default: %(default)s)') + p.add_argument('--output', '-o', default='-', + help='output filename (default: stdout)') + args = p.parse_args() + return args + +def main(): + args = parse_args() + filename = args.filename + d = ET.parse(filename) + o = sys.stdout if args.output == '-' else open(args.output, 'w') + proto = args.proto + + version = (d.getroot().get('version'), d.getroot().get('subVersion')) + desc = f'{args.desc} {version[0]}' + + dt = get_data_types(d) + st = get_structs(d) + used = get_used_types(st) + for k in list(dt.keys()): + if k not in used: + del dt[k] + ts = get_templates(st) + ams = d.getroot().find('ApplicationMessages') + + gen_header(proto, desc, o) + print(f'static int proto_{proto} = -1;', file=o) + gen_field_handles(st, dt, proto, o) + n2enum = gen_enums(dt, ts, o) + gen_dissect_structs(o) + sh = gen_subtree_handles(st, proto, o) + gen_dissect_fn(st, dt, ts, sh, ams, proto, o) + gen_register_fn(st, dt, n2enum, proto, desc, o) + gen_handoff_fn(proto, o) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/tools/extract_asn1_from_spec.pl b/tools/extract_asn1_from_spec.pl new file mode 100755 index 0000000..f542632 --- /dev/null +++ b/tools/extract_asn1_from_spec.pl @@ -0,0 +1,125 @@ +#!/usr/bin/perl +# +# This script extracts the ASN1 definition from TS 36.331/36.355/25.331/38.331/37.355/36.413/38.413/36.423/38.423 +# /38.463/38.473 , and generates asn files that can be processed by asn2wrs +# First download the specification from 3gpp.org as a word document and open it +# Then in "view" menu, select normal, draft or web layout (any kind that removes page header and footers) +# Finally save the document as a text file +# Example with TS 36.331: "perl extract_asn1_from_spec.pl 36331-xxx.txt" +# It should generate: EUTRA-RRC-Definitions.asn, EUTRA-UE-Variables.asn and EUTRA-InterNodeDefinitions +# +# Copyright 2011 Vincent Helfre and Erwan Yvin +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +use warnings; +$input_file = $ARGV[0]; +$version = 0; + +sub extract_spec_version; +sub extract_asn1; + +open(INPUT_FILE, "< $input_file") or die "Can not open file $input_file"; + +extract_spec_version(); + +extract_asn1(); + +close(INPUT_FILE); + +# This subroutine extracts the version of the specification +sub extract_spec_version { + my $line; + while($line = ){ + if($line =~ m/3GPP TS ((25|36|38)\.331|(36|37)\.355|(36|38)\.413|(36|38)\.423|36\.(443|444)|(36|38)\.455|38\.463|38\.473|37\.483) V/){ + $version = $line; + return; + } + } +} + +# This subroutine copies the text delimited by -- ASN1START and -- ASN1STOP in INPUT_FILE +# and copies it into OUTPUT_FILE. +# The OUTPUT_FILE is opened on encounter of the keyword "DEFINITIONS AUTOMATIC TAGS" +# and closed on encounter of the keyword "END" +sub extract_asn1 { + my $line; + my $prev_line; + my $is_asn1 = 0; + my $output_file_name = 0; + my $file_name_found = 0; + + while($line = ){ + if ($line =~ m/-- ASN1STOP/) { + $is_asn1 = 0; + } + + if(($file_name_found == 0) && ($line =~ m/^LPP-PDU-Definitions/)){ + $output_file_name = "LPP-PDU-Definitions.asn"; + print "generating $output_file_name\n"; + open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name"; + $file_name_found = 1; + syswrite OUTPUT_FILE,"-- "."$version"."\n"; + } + + if(($file_name_found == 0) && ($line =~ m/^LPP-Broadcast-Definitions/)){ + $output_file_name = "LPP-Broadcast-Definitions.asn"; + print "generating $output_file_name\n"; + open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name"; + $file_name_found = 1; + syswrite OUTPUT_FILE,"-- "."$version"."\n"; + } + + if(($file_name_found == 0) && ($line =~ m/SonTransfer-IEs/)){ + $output_file_name = "S1AP-SonTransfer-IEs.asn"; + print "generating $output_file_name\n"; + open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name"; + $is_asn1 = 1; + $file_name_found = 1; + syswrite OUTPUT_FILE,"-- "."$version"."\n"; + } + + if(($file_name_found == 0) && ($line =~ m/itu-t \(0\) identified-organization \(4\) etsi \(0\) mobileDomain \(0\)/)){ + ($output_file_name) = ($prev_line =~ m/^([a-zA-Z0-9\-]+)\s/); + $output_file_name = "$output_file_name".".asn"; + print "generating $output_file_name\n"; + open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name"; + $is_asn1 = 1; + $file_name_found = 1; + syswrite OUTPUT_FILE,"-- "."$version"."\n"; + syswrite OUTPUT_FILE,"$prev_line"; + } + + if(($file_name_found == 0) && ($line =~ m/DEFINITIONS AUTOMATIC TAGS ::=/)){ + ($output_file_name) = ($line =~ m/^([a-zA-Z0-9\-]+)\s+DEFINITIONS AUTOMATIC TAGS ::=/); + $output_file_name = "$output_file_name".".asn"; + print "generating $output_file_name\n"; + open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name"; + $is_asn1 = 1; + $file_name_found = 1; + syswrite OUTPUT_FILE,"-- "."$version"."\n"; + } + + if (($line =~ /^END[\r\n]/) && (defined fileno OUTPUT_FILE)){ + syswrite OUTPUT_FILE,"$line"; + close(OUTPUT_FILE); + $is_asn1 = 0; + $file_name_found = 0; + } + + if (($is_asn1 == 1) && (defined fileno OUTPUT_FILE)){ + syswrite OUTPUT_FILE,"$line"; + } + + if ($line =~ m/-- ASN1START/) { + $is_asn1 = 1; + } + + $prev_line = $line; + } +} + diff --git a/tools/fix-encoding-args.pl b/tools/fix-encoding-args.pl new file mode 100755 index 0000000..04151a2 --- /dev/null +++ b/tools/fix-encoding-args.pl @@ -0,0 +1,698 @@ +#!/usr/bin/env perl +# +# Copyright 2011, William Meier +# +# A program to fix encoding args for certain Wireshark API function calls +# from TRUE/FALSE to ENC_?? as appropriate (and possible) +# - proto_tree_add_item +# - proto_tree_add_bits_item +# - proto_tree_add_bits_ret_val +# - proto_tree_add_bitmask +# - proto_tree_add_bitmask_text !! ToDo: encoding arg not last arg +# - tvb_get_bits +# - tvb_get_bits16 +# - tvb_get_bits24 +# - tvb_get_bits32 +# - tvb_get_bits64 +# - ptvcursor_add +# - ptvcursor_add_no_advance +# - ptvcursor_add_with_subtree !! ToDo: encoding arg not last arg +# +# ToDo: Rework program so that it can better be used to *validate* encoding-args +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +use strict; +use warnings; + +use Getopt::Long; + +# Conversion "Requests" + +# Standard conversions +my $searchReplaceFalseTrueHRef = + { + "FALSE" => "ENC_BIG_ENDIAN", + "0" => "ENC_BIG_ENDIAN", + "TRUE" => "ENC_LITTLE_ENDIAN", + "1" => "ENC_LITTLE_ENDIAN" + }; + +my $searchReplaceEncNAHRef = + { + "FALSE" => "ENC_NA", + "0" => "ENC_NA", + "TRUE" => "ENC_NA", + "1" => "ENC_NA", + "ENC_LITTLE_ENDIAN" => "ENC_NA", + "ENC_BIG_ENDIAN" => "ENC_NA", + "ENC_ASCII|ENC_NA" => "ENC_NA", + "ENC_ASCII | ENC_NA" => "ENC_NA" + }; + +my $searchReplaceDissectorTable = + { + "FALSE" => "STRING_CASE_SENSITIVE", + "0" => "STRING_CASE_SENSITIVE", + "BASE_NONE" => "STRING_CASE_SENSITIVE", + "TRUE" => "STRING_CASE_INSENSITIVE", + "1" => "STRING_CASE_INSENSITIVE" + }; + +# --------------------------------------------------------------------- +# Conversion "request" structure +# ( +# [ ], +# { } +# } + +my @types_NA = + ( + [ qw (FT_NONE FT_BYTES FT_ETHER FT_IPv6 FT_IPXNET FT_OID FT_REL_OID)], + $searchReplaceEncNAHRef + ); + +my @types_INT = + ( + [ qw (FT_UINT8 FT_UINT16 FT_UINT24 FT_UINT32 FT_UINT64 FT_INT8 + FT_INT16 FT_INT24 FT_INT32 FT_INT64 FT_FLOAT FT_DOUBLE)], + $searchReplaceFalseTrueHRef + ); + +my @types_MISC = + ( + [ qw (FT_BOOLEAN FT_IPv4 FT_GUID FT_EUI64)], + $searchReplaceFalseTrueHRef + ); + +my @types_STRING = + ( + [qw (FT_STRING FT_STRINGZ)], + { + "FALSE" => "ENC_ASCII", + "0" => "ENC_ASCII", + "TRUE" => "ENC_ASCII", + "1" => "ENC_ASCII", + "ENC_LITTLE_ENDIAN" => "ENC_ASCII", + "ENC_BIG_ENDIAN" => "ENC_ASCII", + "ENC_NA" => "ENC_ASCII", + + "ENC_ASCII|ENC_LITTLE_ENDIAN" => "ENC_ASCII", + "ENC_ASCII|ENC_BIG_ENDIAN" => "ENC_ASCII", + + "ENC_UTF_8|ENC_LITTLE_ENDIAN" => "ENC_UTF_8", + "ENC_UTF_8|ENC_BIG_ENDIAN" => "ENC_UTF_8", + + "ENC_EBCDIC|ENC_LITTLE_ENDIAN" => "ENC_EBCDIC", + "ENC_EBCDIC|ENC_BIG_ENDIAN" => "ENC_EBCDIC", + } + ); + +my @types_UINT_STRING = + ( + [qw (FT_UINT_STRING)], + { + "FALSE" => "ENC_ASCII|ENC_BIG_ENDIAN", + "0" => "ENC_ASCII|ENC_BIG_ENDIAN", + "TRUE" => "ENC_ASCII|ENC_LITTLE_ENDIAN", + "1" => "ENC_ASCII|ENC_LITTLE_ENDIAN", + "ENC_BIG_ENDIAN" => "ENC_ASCII|ENC_BIG_ENDIAN", + "ENC_LITTLE_ENDIAN" => "ENC_ASCII|ENC_LITTLE_ENDIAN", + "ENC_ASCII|ENC_NA" => "ENC_ASCII|ENC_BIG_ENDIAN", + "ENC_ASCII" => "ENC_ASCII|ENC_BIG_ENDIAN", + "ENC_NA" => "ENC_ASCII|ENC_BIG_ENDIAN" + } + ); + +my @types_REG_PROTO = + ( + [ qw (REG_PROTO)], + $searchReplaceEncNAHRef + ); + +# --------------------------------------------------------------------- + +my @findAllFunctionList = +## proto_tree_add_bitmask_text !! ToDo: encoding arg not last arg +## ptvcursor_add_with_subtree !! ToDo: encoding Arg not last arg + qw ( + proto_tree_add_item + proto_tree_add_bits_item + proto_tree_add_bits_ret_val + proto_tree_add_bitmask + proto_tree_add_bitmask_with_flags + tvb_get_bits + tvb_get_bits16 + tvb_get_bits24 + tvb_get_bits32 + tvb_get_bits64 + ptvcursor_add + ptvcursor_add_no_advance + register_dissector_table + ); + +# --------------------------------------------------------------------- +# +# MAIN +# +my $writeFlag = ''; +my $helpFlag = ''; +my $action = 'fix-all'; + +my $result = GetOptions( + 'action=s' => \$action, + 'write' => \$writeFlag, + 'help|?' => \$helpFlag + ); + +if (!$result || $helpFlag || !$ARGV[0]) { + usage(); +} + +if (($action ne 'fix-all') && ($action ne 'find-all')) { + usage(); +} + +sub usage { + print "\nUsage: $0 [--action=fix-all|find-all] [--write] FILENAME [...]\n\n"; + print " --action = fix-all (default)\n"; + print " Fix () encoding arg when possible in FILENAME(s)\n"; + print " Fixes (if any) are listed on stdout)\n\n"; + print " --write create FILENAME.encoding-arg-fixes (original file with fixes)\n"; + print " (effective only for fix-all)\n"; + print "\n"; + print " --action = find-all\n"; + print " Find all occurrences of () statements)\n"; + print " highlighting the 'encoding' arg\n"; + exit(1); +} + +# Read through the files; fix up encoding parameter of proto_tree_add_item() calls +# Essentially: +# For each file { +# . Create a hash of the hf_index_names & associated field types from the entries in hf[] +# . For each requested "conversion request" { +# . . For each hf[] entry hf_index_name with a field type in a set of specified field types { +# . . . For each proto_tree_add_item() statement +# . . . . - replace encoding arg in proto_tree_add_item(..., hf_index_name, ..., 'encoding-arg') +# specific values ith new values +# . . . . - print the statement showing the change +# . . . } +# . . } +# . } +# . If requested and if replacements done: write new file "orig-filename.encoding-arg-fixes" +# } +# +# Note: The proto_tree_add_item() encoding arg will be converted only if +# the hf_index_name referenced is in one of the entries in hf[] in the same file + +my $found_total = 0; + +while (my $fileName = $ARGV[0]) { + shift; + my $fileContents = ''; + + die "No such file: \"$fileName\"\n" if (! -e $fileName); + + # delete leading './' + $fileName =~ s{ ^ \. / } {}xo; + ##print "$fileName\n"; + + # Read in the file (ouch, but it's easier that way) + open(FCI, "<", $fileName) || die("Couldn't open $fileName"); + while () { + $fileContents .= $_; + } + close(FCI); + + # Create a hash of the hf[] entries (name_index_name=>field_type) + my $hfArrayEntryFieldTypeHRef = find_hf_array_entries(\$fileContents, $fileName); + + if ($action eq "fix-all") { + + # Find and replace: () encoding arg in $fileContents for: + # - hf[] entries with specified field types; + # - 'proto' as returned from proto_register_protocol() + my $fcn_name = "(?:proto_tree_add_item|ptvcursor_add(?:_no_advance)?)"; + my $found = 0; + $found += fix_encoding_args_by_hf_type(1, \@types_NA, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName); + $found += fix_encoding_args_by_hf_type(1, \@types_INT, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName); + $found += fix_encoding_args_by_hf_type(1, \@types_MISC, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName); + $found += fix_encoding_args_by_hf_type(1, \@types_STRING, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName); + $found += fix_encoding_args_by_hf_type(1, \@types_UINT_STRING, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName); + $found += fix_encoding_args_by_hf_type(1, \@types_REG_PROTO, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName); + + # Find and replace: alters () encoding arg in $fileContents + $found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "proto_tree_add_bits_(?:item|ret_val)", \$fileContents, $fileName); + $found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "proto_tree_add_bitmask", \$fileContents, $fileName); + $found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "proto_tree_add_bitmask_with_flags", \$fileContents, $fileName); + $found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "tvb_get_bits(?:16|24|32|64)?", \$fileContents, $fileName); + $found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "tvb_get_(?:ephemeral_)?unicode_string[z]?", \$fileContents, $fileName); + + $found += fix_dissector_table_args(1, $searchReplaceDissectorTable, "register_dissector_table", \$fileContents, $fileName); + + # If desired and if any changes, write out the changed version to a file + if (($writeFlag) && ($found > 0)) { + open(FCO, ">", $fileName . ".encoding-arg-fixes"); +# open(FCO, ">", $fileName ); + print FCO "$fileContents"; + close(FCO); + } + $found_total += $found; + } + + if ($action eq "find-all") { + # Find all proto_tree_add_item() statements + # and output same highlighting the encoding arg + $found_total += find_all(\@findAllFunctionList, \$fileContents, $fileName); + } + +} # while + +exit $found_total; + +# --------------------------------------------------------------------- +# Create a hash containing an entry (hf_index_name => field_type) for each hf[]entry. +# also: create an entry in the hash for the 'protocol name' variable (proto... => FT_PROTOCOL) +# returns: ref to the hash + +sub find_hf_array_entries { + my ($fileContentsRef, $fileName) = @_; + + # The below Regexp is based on one from: + # https://web.archive.org/web/20080614012925/http://aspn.activestate.com/ASPN/Cookbook/Rx/Recipe/59811 + # It is in the public domain. + # A complicated regex which matches C-style comments. + my $CCommentRegEx = qr{ / [*] [^*]* [*]+ (?: [^/*] [^*]* [*]+ )* / }xo; + + # hf[] entry regex (to extract an hf_index_name and associated field type) + my $hfArrayFieldTypeRegEx = qr { + \{ + \s* + &\s*([A-Z0-9_\[\]-]+) # &hf + \s*,\s* + \{\s* + .+? # (a bit dangerous) + \s*,\s* + (FT_[A-Z0-9_]+) # field type + \s*,\s* + .+? + \s*,\s* + HFILL # HFILL + }xios; + + # create a copy of $fileContents with comments removed + my $fileContentsWithoutComments = $$fileContentsRef; + $fileContentsWithoutComments =~ s {$CCommentRegEx} []xg; + + # find all the hf[] entries (searching $fileContentsWithoutComments). + # Create a hash keyed by the hf_index_name with the associated value being the field_type + my %hfArrayEntryFieldType; + while ($fileContentsWithoutComments =~ m{ $hfArrayFieldTypeRegEx }xgis) { +# print "$1 $2\n"; + if (exists $hfArrayEntryFieldType{$1}) { + printf "%-35.35s: ? duplicate hf[] entry: no fixes done for: $1; manual action may be req'd\n", $fileName; + $hfArrayEntryFieldType{$1} = "???"; # prevent any substitutions for this hf_index_name + } else { + $hfArrayEntryFieldType{$1} = $2; + } + } + + # pre-process contents to fold multiple lines and speed up matching. + $fileContentsWithoutComments =~ s/\s*=\s*/=/gs; + $fileContentsWithoutComments =~ s/^\s+//g; + + # RegEx to get "proto" variable name + my $protoRegEx = qr / + ^ # note m modifier below + ( + [a-zA-Z0-9_]+ + ) + = + proto_register_protocol\b + /xom; + + # Find all registered protocols + while ($fileContentsWithoutComments =~ m { $protoRegEx }xgom ) { + ##print "$1\n"; + if (exists $hfArrayEntryFieldType{$1}) { + printf "%-35.35s: ? duplicate 'proto': no fixes done for: $1; manual action may be req'd\n", $fileName; + $hfArrayEntryFieldType{$1} = "???"; # prevent any substitutions for this protocol + } else { + $hfArrayEntryFieldType{$1} = "REG_PROTO"; + } + } + + return \%hfArrayEntryFieldType; +} + +# --------------------------------------------------------------------- +# fix_encoding_args +# Substitute new values for the specified () encoding arg values +# when the encoding arg is the *last* arg of the call to fcn_name +# args: +# substitute_flag: 1: replace specified encoding arg values by a new value (keys/values in search hash); +# ref to hash containing search (keys) and replacement (values) for encoding arg +# fcn_name string +# ref to string containing file contents +# filename string +# +{ # block begin + + # shared variables + my $fileName; + my $searchReplaceHRef; + my $found; + + sub fix_encoding_args { + (my $subFlag, $searchReplaceHRef, my $fcn_name, my $fileContentsRef, $fileName) = @_; + + my $encArgPat; + + if ($subFlag == 1) { + # just match for () statements which have an encoding arg matching one of the + # keys in the searchReplace hash. + # Escape any "|" characters in the keys + # and then create "alternatives" string containing all the resulting key strings. Ex: "(A|B|C\|D|..." + $encArgPat = join "|", map { my $copy = $_; $copy =~ s{ ( \| ) }{\\$1}gx; $copy } keys %$searchReplaceHRef; + } elsif ($subFlag == 3) { + # match for () statements for any value of the encoding parameter + # IOW: find all the statements + $encArgPat = qr / [^,)]+? /x; + } + + # build the complete pattern + my $patRegEx = qr / + # part 1: $1 + ( + (?:^|=) # don't try to handle fcn_name call when arg of another fcn call + \s* + $fcn_name \s* \( + [^;]+? # a bit dangerous + ,\s* + ) + + # part 2: $2 + # exact match of pattern (including spaces) + ((?-x)$encArgPat) + + # part 3: $3 + ( + \s* \) + \s* ; + ) + /xms; # m for ^ above + + ##print "$patRegEx\n"; + + ## Match and substitute as specified + $found = 0; + + $$fileContentsRef =~ s/ $patRegEx /patsubx($1,$2,$3)/xges; + + return $found; + } + + # Called from fix_encoding_args to determine replacement string when a regex match is encountered + # $_[0]: part 1 + # $_[1]: part 2: encoding arg + # $_[2]: part 3 + # lookup the desired replacement value for the encoding arg + # print match string showing and highlighting the encoding arg replacement + # return "replacement" string + sub patsubx { + $found += 1; + my $substr = exists $$searchReplaceHRef{$_[1]} ? $$searchReplaceHRef{$_[1]} : "???"; + my $str = sprintf("%s[[%s]-->[%s]]%s", $_[0], $_[1], $substr, $_[2]); + $str =~ tr/\t\n\r/ /d; + printf "%s: $str\n", $fileName; + return $_[0] . $substr . $_[2]; + } +} # block end + +# --------------------------------------------------------------------- +# fix_encoding_args_by_hf_type +# +# Substitute new values for certain proto_tree_add_item() encoding arg +# values (for specified hf field types) +# Variants: search for and display for "exceptions" to allowed encoding arg values; +# search for and display all encoding arg values +# args: +# substitute_flag: 1: replace specified encoding arg values by a new value (keys/values in search hash); +# 2: search for "exceptions" to allowed encoding arg values (values in search hash); +# 3: search for all encoding arg values +# ref to array containing two elements: +# - ref to array containing hf[] types to be processed (FT_STRING, etc) +# - ref to hash containing search (keys) and replacement (values) for encoding arg +# fcn_name string +# ref to string containing file contents +# ref to hfArrayEntries hash (key: hf name; value: field type) +# filename string + +{ # block begin + +# shared variables + my $fileName; + my $searchReplaceHRef; + my $found; + my $hf_field_type; + + sub fix_encoding_args_by_hf_type { + + (my $subFlag, my $mapArg, my $fcn_name, my $fileContentsRef, my $hfArrayEntryFieldTypeHRef, $fileName) = @_; + + my $hf_index_name; + my $hfTypesARef; + my $encArgPat; + + $hfTypesARef = $$mapArg[0]; + $searchReplaceHRef = $$mapArg[1]; + + my %hfTypes; + @hfTypes{@$hfTypesARef}=(); + + # set up the encoding arg match pattern + if ($subFlag == 1) { + # just match for () statements which have an encoding arg matching one of the + # keys in the searchReplace hash. + # Escape any "|" characters in the keys + # and then create "alternatives" string containing all the resulting key strings. Ex: "A|B|C\|D|..." + $encArgPat = join "|", map { my $copy = $_; $copy =~ s{ ( \| ) }{\\$1}gx; $copy } keys %$searchReplaceHRef; + } elsif ($subFlag == 2) { + # Find all the () statements wherein the encoding arg is a value other than + # one of the "replace" values. + # Uses zero-length negative-lookahead to find () statements for which the encoding + # arg is something other than one of the provided replace values. + # Escape any "|" characters in the values to be matched + # and then create "alternatives" string containing all the value strings. Ex: "A|B|C\|D|..." + my $match_str = join "|", map { my $copy = $_; $copy =~ s{ ( \| ) }{\\$1}gx; $copy } values %$searchReplaceHRef; + $encArgPat = qr / + (?! # negative zero-length look-ahead + \s* + (?: $match_str ) # alternatives we don't want to match + \s* + ) + [^,)]+? # OK: enoding arg is other than one of the alternatives: + # match to end of the arg + /x; + } elsif ($subFlag == 3) { + # match for () statements for any value of the encoding parameter + # IOW: find all the proto_tree_add_item statements with an hf entry of the desired types + $encArgPat = qr / [^,)]+? /x; + } + + my @hf_index_names; + + # For each hf[] entry which matches a type in %hfTypes do replacements + $found = 0; + foreach my $key (keys %$hfArrayEntryFieldTypeHRef) { + $hf_index_name = $key; + $hf_field_type = $$hfArrayEntryFieldTypeHRef{$key}; + ##printf "--> %-35.35s: %s\n", $hf_index_name, $hf_field_type; + + next unless exists $hfTypes{$hf_field_type}; # Do we want to process for this hf[] entry type ? + + ##print "\n$hf_index_name $hf_field_type\n"; + push @hf_index_names, $hf_index_name; + } + + if (@hf_index_names) { + # build the complete pattern + my $hf_index_names_re = join('|', @hf_index_names); + $hf_index_names_re =~ s/\[|\]/\\$&/g; # escape any "[" or "]" characters + my $patRegEx = qr / + # part 1: $1 + ( + $fcn_name \s* \( + [^;]+? + ,\s* + (?:$hf_index_names_re) + \s*, + [^;]+ + ,\s* + ) + + # part 2: $2 + # exact match of pattern (including spaces) + ((?-x)$encArgPat) + + # part 3: $3 + ( + \s* \) + \s* ; + ) + /xs; + + ##print "\n$patRegEx\n"; + + ## Match and substitute as specified + $$fileContentsRef =~ s/ $patRegEx /patsub($1,$2,$3)/xges; + + } + + return $found; + } + + # Called from fix_encoding_args to determine replacement string when a regex match is encountered + # $_[0]: part 1 + # $_[1]: part 2: encoding arg + # $_[2]: part 3 + # lookup the desired replacement value for the encoding arg + # print match string showing and highlighting the encoding arg replacement + # return "replacement" string + sub patsub { + $found += 1; + my $substr = exists $$searchReplaceHRef{$_[1]} ? $$searchReplaceHRef{$_[1]} : "???"; + my $str = sprintf("%s[[%s]-->[%s]]%s", $_[0], $_[1], $substr, $_[2]); + $str =~ tr/\t\n\r/ /d; + printf "%s: %-17.17s $str\n", $fileName, $hf_field_type . ":"; + return $_[0] . $substr . $_[2]; + } +} # block end + +# --------------------------------------------------------------------- +# fix_dissector_table_args +# Substitute new values for the specified () encoding arg values +# when the encoding arg is the *last* arg of the call to fcn_name +# args: +# substitute_flag: 1: replace specified encoding arg values by a new value (keys/values in search hash); +# ref to hash containing search (keys) and replacement (values) for encoding arg +# fcn_name string +# ref to string containing file contents +# filename string +# +{ # block begin + + # shared variables + my $fileName; + my $searchReplaceHRef; + my $found; + + sub fix_dissector_table_args { + (my $subFlag, $searchReplaceHRef, my $fcn_name, my $fileContentsRef, $fileName) = @_; + + my $encArgPat; + + if ($subFlag == 1) { + # just match for () statements which have an encoding arg matching one of the + # keys in the searchReplace hash. + # Escape any "|" characters in the keys + # and then create "alternatives" string containing all the resulting key strings. Ex: "(A|B|C\|D|..." + $encArgPat = join "|", map { my $copy = $_; $copy =~ s{ ( \| ) }{\\$1}gx; $copy } keys %$searchReplaceHRef; + } elsif ($subFlag == 3) { + # match for () statements for any value of the encoding parameter + # IOW: find all the statements + $encArgPat = qr / [^,)]+? /x; + } + + # build the complete pattern + my $patRegEx = qr / + # part 1: $1 + ( + (?:^|=) # don't try to handle fcn_name call when arg of another fcn call + \s* + $fcn_name \s* \( + [^;]+? # a bit dangerous + ,\s* + FT_STRING[A-Z]* + ,\s* + ) + + # part 2: $2 + # exact match of pattern (including spaces) + ((?-x)$encArgPat) + + # part 3: $3 + ( + \s* \) + \s* ; + ) + /xms; # m for ^ above + + ##print "$patRegEx\n"; + + ## Match and substitute as specified + $found = 0; + + $$fileContentsRef =~ s/ $patRegEx /patsuby($1,$2,$3)/xges; + + return $found; + } + + # Called from fix_encoding_args to determine replacement string when a regex match is encountered + # $_[0]: part 1 + # $_[1]: part 2: encoding arg + # $_[2]: part 3 + # lookup the desired replacement value for the encoding arg + # print match string showing and highlighting the encoding arg replacement + # return "replacement" string + sub patsuby { + $found += 1; + my $substr = exists $$searchReplaceHRef{$_[1]} ? $$searchReplaceHRef{$_[1]} : "???"; + my $str = sprintf("%s[[%s]-->[%s]]%s", $_[0], $_[1], $substr, $_[2]); + $str =~ tr/\t\n\r/ /d; + printf "%s: $str\n", $fileName; + return $_[0] . $substr . $_[2]; + } +} # block end + +# --------------------------------------------------------------------- +# Find all statements +# and output same highlighting the encoding arg +# Currently: encoding arg is matched as the *last* arg of the function call + +sub find_all { + my( $fcnListARef, $fileContentsRef, $fileName) = @_; + + my $found = 0; + my $fcnListPat = join "|", @$fcnListARef; + my $pat = qr / + ( + (?:$fcnListPat) \s* \( + [^;]+ + , \s* + ) + ( + [^ \t,)]+? + ) + ( + \s* \) + \s* ; + ) + /xs; + + while ($$fileContentsRef =~ / $pat /xgso) { + my $str = "${1}[[${2}]]${3}\n"; + $str =~ tr/\t\n\r/ /d; + $str =~ s/ \s+ / /xg; + print "$fileName: $str\n"; + $found += 1; + } + return $found; +} + diff --git a/tools/fuzz-test.sh b/tools/fuzz-test.sh new file mode 100755 index 0000000..b63f647 --- /dev/null +++ b/tools/fuzz-test.sh @@ -0,0 +1,317 @@ +#!/bin/bash +# +# Fuzz-testing script for TShark +# +# This script uses Editcap to add random errors ("fuzz") to a set of +# capture files specified on the command line. It runs TShark on +# each fuzzed file and checks for errors. The files are processed +# repeatedly until an error is found. +# +# Copyright 2013 Gerald Combs +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +TEST_TYPE="fuzz" +# shellcheck source=tools/test-common.sh +. "$( dirname "$0" )"/test-common.sh || exit 1 + +# Sanity check to make sure we can find our plugins. Zero or less disables. +MIN_PLUGINS=0 + +# Did we catch a signal or time out? +DONE=false + +# Currently running children +RUNNER_PIDS= + +# Perform a two-pass analysis on the capture file? +TWO_PASS= + +# Specific config profile ? +CONFIG_PROFILE= + +# Run under valgrind ? +VALGRIND=0 + +# Abort on UTF-8 encoding errors +CHECK_UTF_8="--log-fatal-domains=UTF-8 " + +# Run under AddressSanitizer ? +ASAN=$CONFIGURED_WITH_ASAN + +# Don't skip any byte from being changed +CHANGE_OFFSET=0 + +# The maximum permitted amount of memory leaked. Eventually this should be +# worked down to zero, but right now that would fail on every single capture. +# Only has effect when running under valgrind. +MAX_LEAK=$(( 1024 * 100 )) + +# Our maximum run time. +RUN_START_SECONDS=$SECONDS +RUN_MAX_SECONDS=$(( RUN_START_SECONDS + 86400 )) + +# To do: add options for file names and limits +while getopts "2b:C:d:e:agp:P:o:t:U" OPTCHAR ; do + case $OPTCHAR in + a) ASAN=1 ;; + 2) TWO_PASS="-2 " ;; + b) WIRESHARK_BIN_DIR=$OPTARG ;; + C) CONFIG_PROFILE="-C $OPTARG " ;; + d) TMP_DIR=$OPTARG ;; + e) ERR_PROB=$OPTARG ;; + g) VALGRIND=1 ;; + p) MAX_PASSES=$OPTARG ;; + P) MIN_PLUGINS=$OPTARG ;; + o) CHANGE_OFFSET=$OPTARG ;; + t) RUN_MAX_SECONDS=$(( RUN_START_SECONDS + OPTARG )) ;; + U) CHECK_UTF_8= ;; # disable + *) printf "Unknown option %s\n" "$OPTCHAR" + esac +done +shift $((OPTIND - 1)) + +### usually you won't have to change anything below this line ### + +ws_bind_exec_paths +ws_check_exec "$TSHARK" "$EDITCAP" "$CAPINFOS" "$DATE" "$TMP_DIR" + +COMMON_ARGS="${CONFIG_PROFILE}${TWO_PASS}${CHECK_UTF_8}" +KEEP= +PACKET_RANGE= +if [ $VALGRIND -eq 1 ]; then + RUNNER=$( dirname "$0" )"/valgrind-wireshark.sh" + COMMON_ARGS="-b $WIRESHARK_BIN_DIR $COMMON_ARGS" + declare -a RUNNER_ARGS=("" "-T") + # Valgrind requires more resources, so permit 1.5x memory and 3x time + # (1.5x time is too small for a few large captures in the menagerie) + MAX_CPU_TIME=$(( 3 * MAX_CPU_TIME )) + MAX_VMEM=$(( 3 * MAX_VMEM / 2 )) + # Valgrind is slow. Trim captures to the first 10k packets so that + # we don't time out. + KEEP=-r + PACKET_RANGE=1-10000 +else + # Not using valgrind, use regular tshark. + # TShark arguments (you won't have to change these) + # n Disable network object name resolution + # V Print a view of the details of the packet rather than a one-line summary of the packet + # x Cause TShark to print a hex and ASCII dump of the packet data after printing the summary or details + # r Read packet data from the following infile + RUNNER="$TSHARK" + declare -a RUNNER_ARGS=("-nVxr" "-nr") + # Running with a read filter but without generating the tree exposes some + # "More than 100000 items in tree" bugs. + # Not sure if we want to add even more cycles to the fuzz bot's work load... + #declare -a RUNNER_ARGS=("${CONFIG_PROFILE}${TWO_PASS}-nVxr" "${CONFIG_PROFILE}${TWO_PASS}-nr" "-Yframe ${CONFIG_PROFILE}${TWO_PASS}-nr") +fi + + +# Make sure we have a valid test set +FOUND=0 +for CF in "$@" ; do + if [ "$OSTYPE" == "cygwin" ] ; then + CF=$( cygpath --windows "$CF" ) + fi + "$CAPINFOS" "$CF" > /dev/null 2>&1 && FOUND=1 + if [ $FOUND -eq 1 ] ; then break ; fi +done + +if [ $FOUND -eq 0 ] ; then + cat <C, etc +trap_all() { + printf '\n\nCaught signal. Exiting.\n' + rm -f "$TMP_DIR/$TMP_FILE" "$TMP_DIR/$ERR_FILE"* + exit 0 +} + +trap_abrt() { + for RUNNER_PID in $RUNNER_PIDS ; do + kill -ABRT "$RUNNER_PID" + done + trap_all +} + +trap trap_all HUP INT TERM +trap trap_abrt ABRT + +# Iterate over our capture files. +PASS=0 +while { [ $PASS -lt "$MAX_PASSES" ] || [ "$MAX_PASSES" -lt 1 ]; } && ! $DONE ; do + PASS=$(( PASS+1 )) + echo "Pass $PASS:" + RUN=0 + + for CF in "$@" ; do + if $DONE; then + break # We caught a signal or timed out + fi + RUN=$(( RUN + 1 )) + if [ $(( RUN % 50 )) -eq 0 ] ; then + echo " [Pass $PASS]" + fi + if [ "$OSTYPE" == "cygwin" ] ; then + CF=$( cygpath --windows "$CF" ) + fi + + "$CAPINFOS" "$CF" > /dev/null 2> "$TMP_DIR/$ERR_FILE" + RETVAL=$? + if [ $RETVAL -eq 1 ] || [ $RETVAL -eq 2 ] ; then + echo "Not a valid capture file" + rm -f "$TMP_DIR/$ERR_FILE" + continue + elif [ $RETVAL -ne 0 ] && ! $DONE ; then + # Some other error + ws_exit_error + fi + + # Choose a random subset of large captures. + KEEP= + PACKET_RANGE= + CF_PACKETS=$( "$CAPINFOS" -T -r -c "$CF" | cut -f2 ) + if [[ CF_PACKETS -gt $MAX_FUZZ_PACKETS ]] ; then + START_PACKET=$(( CF_PACKETS - MAX_FUZZ_PACKETS )) + START_PACKET=$( shuf --input-range=1-$START_PACKET --head-count=1 ) + END_PACKET=$(( START_PACKET + MAX_FUZZ_PACKETS )) + KEEP=-r + PACKET_RANGE="$START_PACKET-$END_PACKET" + printf " Fuzzing packets %d-%d of %d\n" "$START_PACKET" "$END_PACKET" "$CF_PACKETS" + fi + + DISSECTOR_BUG=0 + VG_ERR_CNT=0 + + printf " %s: " "$( basename "$CF" )" + # shellcheck disable=SC2086 + "$EDITCAP" -E "$ERR_PROB" -o "$CHANGE_OFFSET" $KEEP "$CF" "$TMP_DIR/$TMP_FILE" $PACKET_RANGE > /dev/null 2>&1 + RETVAL=$? + if [ $RETVAL -ne 0 ] ; then + # shellcheck disable=SC2086 + "$EDITCAP" -E "$ERR_PROB" -o "$CHANGE_OFFSET" $KEEP -T ether "$CF" "$TMP_DIR/$TMP_FILE" $PACKET_RANGE \ + > /dev/null 2>&1 + RETVAL=$? + if [ $RETVAL -ne 0 ] ; then + echo "Invalid format for editcap" + continue + fi + fi + + FILE_START_SECONDS=$SECONDS + RUNNER_PIDS= + RUNNER_ERR_FILES= + for ARGS in "${RUNNER_ARGS[@]}" ; do + if $DONE; then + break # We caught a signal + fi + echo -n "($ARGS) " + + # Run in a child process with limits. + ( + # Set some limits to the child processes, e.g. stop it if + # it's running longer than MAX_CPU_TIME seconds. (ulimit + # is not supported well on cygwin - it shows some warnings - + # and the features we use may not all be supported on some + # UN*X platforms.) + ulimit -S -t "$MAX_CPU_TIME" -s "$MAX_STACK" + + # Allow core files to be generated + ulimit -c unlimited + + # Don't enable ulimit -v when using ASAN. See + # https://github.com/google/sanitizers/wiki/AddressSanitizer#ulimit--v + if [ $ASAN -eq 0 ]; then + ulimit -S -v "$MAX_VMEM" + fi + + # shellcheck disable=SC2016 + SUBSHELL_PID=$($SHELL -c 'echo $PPID') + + printf 'Command and args: %s %s %s\n' "$RUNNER" "$COMMON_ARGS" "$ARGS" > "$TMP_DIR/$ERR_FILE.$SUBSHELL_PID" + # shellcheck disable=SC2086 + "$RUNNER" $COMMON_ARGS $ARGS "$TMP_DIR/$TMP_FILE" \ + > /dev/null 2>> "$TMP_DIR/$ERR_FILE.$SUBSHELL_PID" + ) & + RUNNER_PID=$! + RUNNER_PIDS="$RUNNER_PIDS $RUNNER_PID" + RUNNER_ERR_FILES="$RUNNER_ERR_FILES $TMP_DIR/$ERR_FILE.$RUNNER_PID" + + if [ $SECONDS -ge $RUN_MAX_SECONDS ] ; then + printf "\nStopping after %d seconds.\n" $(( SECONDS - RUN_START_SECONDS )) + DONE=true + fi + done + + for RUNNER_PID in $RUNNER_PIDS ; do + wait "$RUNNER_PID" + RUNNER_RETVAL=$? + mv "$TMP_DIR/$ERR_FILE.$RUNNER_PID" "$TMP_DIR/$ERR_FILE" + + # Uncomment the next two lines to enable dissector bug + # checking. + #grep -i "dissector bug" $TMP_DIR/$ERR_FILE \ + # > /dev/null 2>&1 && DISSECTOR_BUG=1 + + if [ $VALGRIND -eq 1 ] && ! $DONE; then + VG_ERR_CNT=$( grep "ERROR SUMMARY:" "$TMP_DIR/$ERR_FILE" | cut -f4 -d' ' ) + VG_DEF_LEAKED=$( grep "definitely lost:" "$TMP_DIR/$ERR_FILE" | cut -f7 -d' ' | tr -d , ) + VG_IND_LEAKED=$( grep "indirectly lost:" "$TMP_DIR/$ERR_FILE" | cut -f7 -d' ' | tr -d , ) + VG_TOTAL_LEAKED=$(( VG_DEF_LEAKED + VG_IND_LEAKED )) + if [ $RUNNER_RETVAL -ne 0 ] ; then + echo "General Valgrind failure." + VG_ERR_CNT=1 + elif [ "$VG_TOTAL_LEAKED" -gt "$MAX_LEAK" ] ; then + echo "Definitely + indirectly ($VG_DEF_LEAKED + $VG_IND_LEAKED) exceeds max ($MAX_LEAK)." + echo "Definitely + indirectly ($VG_DEF_LEAKED + $VG_IND_LEAKED) exceeds max ($MAX_LEAK)." >> "$TMP_DIR/$ERR_FILE" + VG_ERR_CNT=1 + fi + if grep -q "Valgrind cannot continue" "$TMP_DIR/$ERR_FILE" ; then + echo "Valgrind unable to continue." + VG_ERR_CNT=-1 + fi + fi + + if ! $DONE && { [ $RUNNER_RETVAL -ne 0 ] || [ $DISSECTOR_BUG -ne 0 ] || [ $VG_ERR_CNT -ne 0 ]; } ; then + # shellcheck disable=SC2086 + rm -f $RUNNER_ERR_FILES + ws_exit_error + fi + done + + printf " OK (%s seconds)\\n" $(( SECONDS - FILE_START_SECONDS )) + rm -f "$TMP_DIR/$TMP_FILE" "$TMP_DIR/$ERR_FILE" + done +done diff --git a/tools/gen-bugnote b/tools/gen-bugnote new file mode 100755 index 0000000..786886e --- /dev/null +++ b/tools/gen-bugnote @@ -0,0 +1,54 @@ +#!/bin/bash +# +# Given a Wireshark issue ID, fetch its title and prepare an entry suitable +# for pasting into the release notes. Requires curl and jq. +# +# Usage: gen-bugnote +# +# Copyright 2013 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +gitlab_issue_url_pfx="https://gitlab.com/api/v4/projects/wireshark%2Fwireshark/issues" +issue_id="${1#\#}" # Strip leading "#" + +case "$OSTYPE" in + darwin*) + clipboard_cmd="pbcopy -Pascii" + ;; + cygwin*) + clipboard_cmd="cat > /dev/clipboard" + ;; + linux*) + clipboard_cmd="xsel --clipboard" + ;; + *) + echo "Unable to copy to clipboard" + clipboard_cmd="cat > /dev/null" + ;; +esac + +if [ -z "$issue_id" ] ; then + echo "Usage: $( basename "$0" ) " + exit 1 +fi + +issue_title=$( + curl -s -o - "${gitlab_issue_url_pfx}/$issue_id" \ + | jq '.title' + ) + +# We can escape backslashes in jq's --raw-output or we can trim quotes off +# its plain output. +issue_title="${issue_title%\"}" +issue_title="${issue_title#\"}" +trailing_period="" +if [[ ! ${issue_title: -1} =~ [[:punct:]] ]] ; then + trailing_period="." +fi + +printf "* %s%s wsbuglink:${issue_id}[].\\n" "$issue_title" "$trailing_period" \ + | $clipboard_cmd + +echo "Copied $issue_id: $issue_title" diff --git a/tools/generate-bacnet-vendors.py b/tools/generate-bacnet-vendors.py new file mode 100755 index 0000000..14fc530 --- /dev/null +++ b/tools/generate-bacnet-vendors.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 + +''' + Copyright 2023 Jaap Keuter + based on work by Anish Bhatt + +SPDX-License-Identifier: GPL-2.0-or-later +''' + +import sys +import urllib.request, urllib.error, urllib.parse +from bs4 import BeautifulSoup + +req_headers = { 'User-Agent': 'Wireshark generate-bacnet-vendors' } +try: + req = urllib.request.Request("https://bacnet.org/assigned-vendor-ids/", headers=req_headers) + response = urllib.request.urlopen(req) + lines = response.read().decode() + response.close() +except urllib.error.HTTPError as err: + exit_msg("HTTP error fetching {0}: {1}".format(url, err.reason)) +except urllib.error.URLError as err: + exit_msg("URL error fetching {0}: {1}".format(url, err.reason)) +except OSError as err: + exit_msg("OS error fetching {0}".format(url, err.strerror)) +except Exception: + exit_msg("Unexpected error:", sys.exc_info()[0]) + +soup = BeautifulSoup(lines, "html.parser") +table = soup.find('table') +rows = table.findAll('tr') + +entry = "static const value_string\nBACnetVendorIdentifiers [] = {" + +for tr in rows: + cols = tr.findAll('td') + for index,td in enumerate(cols[0:2]): + text = ''.join(td.find(string=True)) + if index == 0: + entry = " { %4s" % text + else: + entry += ", \"%s\" }," % text.rstrip() + print(entry) + +entry = " { 0, NULL }\n};" +print(entry) + diff --git a/tools/generate-dissector.py b/tools/generate-dissector.py new file mode 100755 index 0000000..4d8ab37 --- /dev/null +++ b/tools/generate-dissector.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +# +# Copyright 2019, Dario Lombardo +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# This script generates a Wireshark skeleton dissector, based on the example in the doc/ directory. +# +# Example usage: +# +# generate-dissector.py --name "My Self" --email "myself@example.com" --protoname "The dumb protocol" +# --protoshortname DUMB --protoabbrev dumb --license GPL-2.0-or-later --years "2019-2020" +# + +import argparse +from datetime import datetime +import os + + +parser = argparse.ArgumentParser(description='The Wireshark Dissector Generator') +parser.add_argument("--name", help="The author of the dissector", required=True) +parser.add_argument("--email", help="The email address of the author", required=True) +parser.add_argument("--protoname", help="The name of the protocol", required=True) +parser.add_argument("--protoshortname", help="The protocol short name", required=True) +parser.add_argument("--protoabbrev", help="The protocol abbreviation", required=True) +parser.add_argument("--license", help="The license for this dissector (please use a SPDX-License-Identifier). If omitted, %(default)s will be used", default="GPL-2.0-or-later") +parser.add_argument("--years", help="Years of validity for the license. If omitted, the current year will be used", default=str(datetime.now().year)) +parser.add_argument("-f", "--force", action='store_true', help="Force overwriting the dissector file if it already exists") +parser.add_argument("-p", "--plugin", action='store_true', help="Create as a plugin. Default is to create in epan") + + +def wsdir(): + return os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) + + +def output_dir(args): + if args.plugin: + os.makedirs(os.path.join(wsdir(), "plugins/epan/" + args.protoabbrev), exist_ok=True) + return os.path.join(wsdir(), "plugins/epan/" + args.protoabbrev) + return os.path.join(wsdir(), "epan/dissectors") + + +def output_file(args): + return os.path.join(output_dir(args), "packet-" + args.protoabbrev + ".c") + + +def read_skeleton(filename): + skeletonfile = os.path.join(wsdir(), "doc/" + filename) + print("Reading skeleton file: " + skeletonfile) + return open(skeletonfile).read() + + +def replace_fields(buffer, args): + print("Replacing fields in skeleton") + output = buffer\ + .replace("YOUR_NAME", args.name)\ + .replace("YOUR_EMAIL_ADDRESS", args.email)\ + .replace("PROTONAME", args.protoname)\ + .replace("PROTOSHORTNAME", args.protoshortname)\ + .replace("PROTOABBREV", args.protoabbrev)\ + .replace("FIELDNAME", "Sample Field")\ + .replace("FIELDABBREV", "sample_field")\ + .replace("FT_FIELDTYPE", "FT_STRING")\ + .replace("FIELDDISPLAY", "BASE_NONE")\ + .replace("FIELDCONVERT", "NULL")\ + .replace("BITMASK", "0x0")\ + .replace("FIELDDESCR", "NULL")\ + .replace("MAX_NEEDED_FOR_HEURISTICS", "1")\ + .replace("TEST_HEURISTICS_FAIL", "0")\ + .replace("ENC_xxx", "ENC_NA")\ + .replace("EXPERTABBREV", "expert")\ + .replace("PI_GROUP", "PI_PROTOCOL")\ + .replace("PI_SEVERITY", "PI_ERROR")\ + .replace("TEST_EXPERT_condition", "0")\ + .replace("const char *subtree", "\"\"")\ + .replace("LICENSE", args.license)\ + .replace("YEARS", args.years) + + return output + + +def write_dissector(buffer, args): + ofile = output_file(args) + if os.path.isfile(ofile) and not args.force: + raise Exception("The file " + ofile + " already exists. You're likely overwriting an existing dissector.") + print("Writing output file: " + ofile) + return open(ofile, "w").write(buffer) + + +def patch_makefile(args): + if args.plugin: + cmakefile = os.path.join(wsdir(), "CMakeLists.txt") + patchline = "\t\tplugins/epan/" + args.protoabbrev + groupstart = "set(PLUGIN_SRC_DIRS" + else: + cmakefile = os.path.join(wsdir(), "epan/dissectors/CMakeLists.txt") + patchline = "\t${CMAKE_CURRENT_SOURCE_DIR}/packet-" + args.protoabbrev + ".c" + groupstart = "set(DISSECTOR_SRC" + print("Patching makefile: " + cmakefile) + output = "" + in_group = False + patched = False + for line in open(cmakefile): + line_strip = line.strip() + if in_group and line_strip == ")": + in_group = False + if in_group and not patched and line_strip > patchline: + output += patchline + "\n" + patched = True + if line_strip == groupstart: + in_group = True + if line_strip != patchline: + output += line + open(cmakefile, "w").write(output) + + +def write_plugin_makefile(args): + if not args.plugin: + return True + buffer = replace_fields(read_skeleton("CMakeLists-PROTOABBREV.txt"), args) + ofile = os.path.join(output_dir(args), "CMakeLists.txt") + print("Writing output file: " + ofile) + return open(ofile, "w").write(buffer) + + +def print_header(): + print("") + print("**************************************************") + print("* Wireshark skeleton dissector generator *") + print("* *") + print("* Generate a new dissector for your protocol *") + print("* starting from the skeleton provided in the *") + print("* doc directory. *") + print("* *") + print("* Copyright 2019 Dario Lombardo *") + print("**************************************************") + print("") + + +def print_trailer(args): + print("") + print("The skeleton for the dissector of the " + args.protoshortname + " protocol has been generated.") + print("Please review/extend it to match your specific criterias.") + print("") + + +if __name__ == '__main__': + print_header() + args = parser.parse_args() + buffer = replace_fields(read_skeleton("packet-PROTOABBREV.c"), args) + write_dissector(buffer, args) + patch_makefile(args) + write_plugin_makefile(args) + print_trailer(args) diff --git a/tools/generate-nl80211-fields.py b/tools/generate-nl80211-fields.py new file mode 100755 index 0000000..dfa8faa --- /dev/null +++ b/tools/generate-nl80211-fields.py @@ -0,0 +1,373 @@ +#!/usr/bin/env python3 +# Parses the nl80211.h interface and generate appropriate enums and fields +# (value_string) for packet-netlink-nl80211.c +# +# Copyright (c) 2017, Peter Wu +# Copyright (c) 2018, Mikael Kanstrup +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# +# To update the dissector source file, run this from the source directory: +# +# python tools/generate-nl80211-fields.py --update +# + +import argparse +import re +import requests +import sys + +# Begin of comment, followed by the actual array definition +HEADER = "/* Definitions from linux/nl80211.h {{{ */\n" +FOOTER = "/* }}} */\n" +# Enums to extract from the header file +EXPORT_ENUMS = { + # 'enum_name': ('field_name', field_type', 'field_blurb') + 'nl80211_commands': ('Command', 'FT_UINT8', '"Generic Netlink Command"'), + 'nl80211_attrs': (None, None, None), + 'nl80211_iftype': (None, None, None), + 'nl80211_sta_flags': (None, None, None), + 'nl80211_sta_p2p_ps_status': ('Attribute Value', 'FT_UINT8', None), + 'nl80211_he_gi': (None, None, None), + 'nl80211_he_ru_alloc': (None, None, None), + 'nl80211_rate_info': (None, None, None), + 'nl80211_sta_bss_param': (None, None, None), + 'nl80211_sta_info': (None, None, None), + 'nl80211_tid_stats': (None, None, None), + 'nl80211_txq_stats': (None, None, None), + 'nl80211_mpath_flags': (None, None, None), + 'nl80211_mpath_info': (None, None, None), + 'nl80211_band_iftype_attr': (None, None, None), + 'nl80211_band_attr': (None, None, None), + 'nl80211_wmm_rule': (None, None, None), + 'nl80211_frequency_attr': (None, None, None), + 'nl80211_bitrate_attr': (None, None, None), + 'nl80211_reg_initiator': ('Attribute Value', 'FT_UINT8', None), + 'nl80211_reg_type': ('Attribute Value', 'FT_UINT8', None), + 'nl80211_reg_rule_attr': (None, None, None), + 'nl80211_sched_scan_match_attr': (None, None, None), + 'nl80211_reg_rule_flags': (None, None, None), + 'nl80211_dfs_regions': ('Attribute Value', 'FT_UINT8', None), + 'nl80211_user_reg_hint_type': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_survey_info': (None, None, None), + 'nl80211_mntr_flags': (None, None, None), + 'nl80211_mesh_power_mode': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_meshconf_params': (None, None, None), + 'nl80211_mesh_setup_params': (None, None, None), + 'nl80211_txq_attr': (None, None, None), + 'nl80211_ac': (None, None, None), + 'nl80211_channel_type': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_key_mode': (None, None, None), + 'nl80211_chan_width': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_bss_scan_width': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_bss': (None, None, None), + 'nl80211_bss_status': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_auth_type': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_key_type': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_mfp': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_wpa_versions': (None, None, None), + 'nl80211_key_default_types': (None, None, None), + 'nl80211_key_attributes': (None, None, None), + 'nl80211_tx_rate_attributes': (None, None, None), + 'nl80211_txrate_gi': (None, None, None), + 'nl80211_band': (None, None, None), + 'nl80211_ps_state': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_attr_cqm': (None, None, None), + 'nl80211_cqm_rssi_threshold_event': (None, None, None), + 'nl80211_tx_power_setting': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_packet_pattern_attr': (None, None, None), + 'nl80211_wowlan_triggers': (None, None, None), + 'nl80211_wowlan_tcp_attrs': (None, None, None), + 'nl80211_attr_coalesce_rule': (None, None, None), + 'nl80211_coalesce_condition': (None, None, None), + 'nl80211_iface_limit_attrs': (None, None, None), + 'nl80211_if_combination_attrs': (None, None, None), + 'nl80211_plink_state': ('Attribute Value', 'FT_UINT8', None), + 'plink_actions': ('Attribute Value', 'FT_UINT8', None), + 'nl80211_rekey_data': (None, None, None), + 'nl80211_hidden_ssid': (None, None, None), + 'nl80211_sta_wme_attr': (None, None, None), + 'nl80211_pmksa_candidate_attr': (None, None, None), + 'nl80211_tdls_operation': ('Attribute Value', 'FT_UINT8', None), + #Reserved for future use 'nl80211_ap_sme_features': (None, None, None), + 'nl80211_feature_flags': (None, None, None), + 'nl80211_ext_feature_index': (None, None, None), + 'nl80211_probe_resp_offload_support_attr': (None, None, None), + 'nl80211_connect_failed_reason': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_timeout_reason': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_scan_flags': (None, None, None), + 'nl80211_acl_policy': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_smps_mode': ('Attribute Value', 'FT_UINT8', None), + 'nl80211_radar_event': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_dfs_state': (None, None, None), + 'nl80211_protocol_features': (None, None, None), + 'nl80211_crit_proto_id': ('Attribute Value', 'FT_UINT16', None), + 'nl80211_rxmgmt_flags': (None, None, None), + 'nl80211_tdls_peer_capability': (None, None, None), + 'nl80211_sched_scan_plan': (None, None, None), + 'nl80211_bss_select_attr': (None, None, None), + 'nl80211_nan_function_type': (None, None, None), + 'nl80211_nan_publish_type': (None, None, None), + 'nl80211_nan_func_term_reason': (None, None, None), + 'nl80211_nan_func_attributes': (None, None, None), + 'nl80211_nan_srf_attributes': (None, None, None), + 'nl80211_nan_match_attributes': (None, None, None), + 'nl80211_external_auth_action': ('Attribute Value', 'FT_UINT32', None), + 'nl80211_ftm_responder_attributes': (None, None, None), + 'nl80211_ftm_responder_stats': (None, None, None), + 'nl80211_preamble': (None, None, None), + 'nl80211_peer_measurement_type': (None, None, None), + 'nl80211_peer_measurement_status': (None, None, None), + 'nl80211_peer_measurement_req': (None, None, None), + 'nl80211_peer_measurement_resp': (None, None, None), + 'nl80211_peer_measurement_peer_attrs': (None, None, None), + 'nl80211_peer_measurement_attrs': (None, None, None), + 'nl80211_peer_measurement_ftm_capa': (None, None, None), + 'nl80211_peer_measurement_ftm_req': (None, None, None), + 'nl80211_peer_measurement_ftm_failure_reasons': (None, None, None), + 'nl80211_peer_measurement_ftm_resp': (None, None, None), + 'nl80211_obss_pd_attributes': (None, None, None), +} +# File to be patched +SOURCE_FILE = "epan/dissectors/packet-netlink-nl80211.c" +# URL where the latest version can be found +URL = "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/include/uapi/linux/nl80211.h" + +def make_enum(name, values, expressions, indent): + code = 'enum ws_%s {\n' % name + for value, expression in zip(values, expressions): + if expression and 'NL80211' in expression: + expression = 'WS_%s' % expression + if expression: + code += '%sWS_%s = %s,\n' % (indent, value, expression) + else: + code += '%sWS_%s,\n' % (indent, value) + + code += '};\n' + return code + +def make_value_string(name, values, indent,): + code = 'static const value_string ws_%s_vals[] = {\n' % name + align = 40 + for value in values: + code += indent + ('{ WS_%s,' % value).ljust(align - 1) + ' ' + code += '"%s" },\n' % value + code += '%s{ 0, NULL }\n' % indent + code += '};\n' + code += 'static value_string_ext ws_%s_vals_ext =' % name + code += ' VALUE_STRING_EXT_INIT(ws_%s_vals);\n' % name + return code + +def remove_prefix(prefix, text): + if text.startswith(prefix): + return text[len(prefix):] + return text + +def make_hf_defs(name, indent): + code = 'static gint hf_%s = -1;' % name + return code + +def make_hf(name, indent): + (field_name, field_type, field_blurb) = EXPORT_ENUMS.get(name) + field_abbrev = name + + # Fill in default values + if not field_name: + field_name = 'Attribute Type' + if not field_type: + field_type = 'FT_UINT16' + if not field_blurb: + field_blurb = 'NULL' + + # Special treatment of already existing field names + rename_fields = { + 'nl80211_attrs': 'nl80211_attr_type', + 'nl80211_commands': 'nl80211_cmd' + } + if rename_fields.get(name): + field_abbrev = rename_fields[name] + field_abbrev = remove_prefix('nl80211_', field_abbrev) + + code = indent + indent + '{ &hf_%s,\n' % name + code += indent*3 + '{ "%s", "nl80211.%s",\n' % (field_name, field_abbrev) + code += indent*3 + ' %s, BASE_DEC | BASE_EXT_STRING,\n' % (field_type) + code += indent*3 + ' VALS_EXT_PTR(&ws_%s_vals_ext), 0x00,\n' % (name) + code += indent*3 + ' %s, HFILL },\n' % (field_blurb) + code += indent + indent + '},' + return code + +def make_ett_defs(name, indent): + code = 'static gint ett_%s = -1;' % name + return code + +def make_ett(name, indent): + code = indent + indent + '&ett_%s,' % name + return code + +class EnumStore(object): + __RE_ENUM_VALUE = re.compile( + r'\s+?(?P\w+)(?:\ /\*.*?\*\/)?(?:\s*=\s*(?P.*?))?(?:\s*,|$)', + re.MULTILINE | re.DOTALL) + + def __init__(self, name, values): + self.name = name + self.values = [] + self.expressions = [] + self.active = True + self.parse_values(values) + + + def parse_values(self, values): + for m in self.__RE_ENUM_VALUE.finditer(values): + value, expression = m.groups() + if value.startswith('NUM_'): + break + if value.endswith('_AFTER_LAST'): + break + if value.endswith('_LAST'): + break + if value.startswith('__') and value.endswith('_NUM'): + break + if expression and expression in self.values: + # Skip aliases + continue + self.values.append(value) + self.expressions.append(expression) + + def finish(self): + return self.name, self.values, self.expressions + +RE_ENUM = re.compile( + r'enum\s+?(?P\w+)\s+?\{(?P.*?)\}\;', + re.MULTILINE | re.DOTALL) +RE_COMMENT = re.compile(r'/\*.*?\*/', re.MULTILINE | re.DOTALL) + +def parse_header(content): + # Strip comments + content = re.sub(RE_COMMENT, '', content) + + enums = [] + for m in RE_ENUM.finditer(content): + enum = m.group('enum') + values = m.group('values') + if enum in EXPORT_ENUMS: + enums.append(EnumStore(enum, values).finish()) + + return enums + +def parse_source(): + """ + Reads the source file and tries to split it in the parts before, inside and + after the block. + """ + begin, block, end = '', '', '' + parts = [] + # Stages: 1 (before block), 2 (in block, skip), 3 (after block) + stage = 1 + with open(SOURCE_FILE) as f: + for line in f: + if line == FOOTER and stage == 2: + stage = 3 # End of block + if stage == 1: + begin += line + elif stage == 2: + block += line + elif stage == 3: + end += line + if line == HEADER and stage == 1: + stage = 2 # Begin of block + if line == HEADER and stage == 3: + stage = 2 # Begin of next code block + parts.append((begin, block, end)) + begin, block, end = '', '', '' + + parts.append((begin, block, end)) + if stage != 3 or len(parts) != 3: + raise RuntimeError("Could not parse file (in stage %d) (parts %d)" % (stage, len(parts))) + return parts + +parser = argparse.ArgumentParser() +parser.add_argument("--update", action="store_true", + help="Update %s as needed instead of writing to stdout" % SOURCE_FILE) +parser.add_argument("--indent", default=" " * 4, + help="indentation (use \\t for tabs, default 4 spaces)") +parser.add_argument("header_file", nargs="?", default=URL, + help="nl80211.h header file (use - for stdin or a HTTP(S) URL, " + "default %(default)s)") + +def main(): + args = parser.parse_args() + + indent = args.indent.replace("\\t", "\t") + + if any(args.header_file.startswith(proto) for proto in ('http:', 'https')): + r = requests.get(args.header_file) + r.raise_for_status() + enums = parse_header(r.text) + elif args.header_file == "-": + enums = parse_header(sys.stdin.read()) + else: + with open(args.header_file) as f: + enums = parse_header(f.read()) + + assert len(enums) == len(EXPORT_ENUMS), \ + "Could not parse data, found %d/%d results" % \ + (len(enums), len(EXPORT_ENUMS)) + + code_enums, code_vals, code_hf_defs, code_ett_defs, code_hf, code_ett = '', '', '', '', '', '' + for enum_name, enum_values, expressions in enums: + code_enums += make_enum(enum_name, enum_values, expressions, indent) + '\n' + code_vals += make_value_string(enum_name, enum_values, indent) + '\n' + code_hf_defs += make_hf_defs(enum_name, indent) + '\n' + code_ett_defs += make_ett_defs(enum_name, indent) + '\n' + code_hf += make_hf(enum_name, indent) + '\n' + code_ett += make_ett(enum_name, indent) + '\n' + + code_top = code_enums + code_vals + code_hf_defs + '\n' + code_ett_defs + code_top = code_top.rstrip("\n") + "\n" + + code = [code_top, code_hf, code_ett] + + update = False + if args.update: + parts = parse_source() + + # Check if file needs update + for (begin, old_code, end), new_code in zip(parts, code): + if old_code != new_code: + update = True + break + if not update: + print("File is up-to-date") + return + # Update file + with open(SOURCE_FILE, "w") as f: + for (begin, old_code, end), new_code in zip(parts, code): + f.write(begin) + f.write(new_code) + f.write(end) + print("Updated %s" % SOURCE_FILE) + else: + for new_code in code: + print(new_code) + +if __name__ == '__main__': + main() + +# +# Editor modelines - https://www.wireshark.org/tools/modelines.html +# +# Local variables: +# c-basic-offset: 4 +# tab-width: 8 +# indent-tabs-mode: nil +# End: +# +# vi: set shiftwidth=4 tabstop=8 expandtab: +# :indentSize=4:tabSize=8:noTabs=true: +# diff --git a/tools/generate-sysdig-event.py b/tools/generate-sysdig-event.py new file mode 100755 index 0000000..67419c8 --- /dev/null +++ b/tools/generate-sysdig-event.py @@ -0,0 +1,412 @@ +#!/usr/bin/env python3 +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +'''\ +Generate Sysdig event dissector sections from the sysdig sources. + +Reads driver/event_table.c and driver/ppm_events_public.h and generates +corresponding dissection code in packet-sysdig-event.c. Updates are +performed in-place in the dissector code. + +Requires an Internet connection. Assets are loaded from GitHub over HTTPS, from falcosecurity/libs master. +''' + +import logging +import os +import os.path +import re +import urllib.request, urllib.error, urllib.parse +import sys + +sysdig_repo_pfx = 'https://raw.githubusercontent.com/falcosecurity/libs/master/' + +def exit_msg(msg=None, status=1): + if msg is not None: + sys.stderr.write(msg + '\n\n') + sys.stderr.write(__doc__ + '\n') + sys.exit(status) + +def get_url_lines(url): + '''Open a URL. + Returns the URL body as a list of lines. + ''' + req_headers = { 'User-Agent': 'Wireshark generate-sysdig-event' } + try: + req = urllib.request.Request(url, headers=req_headers) + response = urllib.request.urlopen(req) + lines = response.read().decode().splitlines() + response.close() + except urllib.error.HTTPError as err: + exit_msg("HTTP error fetching {0}: {1}".format(url, err.reason)) + except urllib.error.URLError as err: + exit_msg("URL error fetching {0}: {1}".format(url, err.reason)) + except OSError as err: + exit_msg("OS error fetching {0}".format(url, err.strerror)) + except Exception: + exit_msg("Unexpected error:", sys.exc_info()[0]) + + return lines + + +ppm_ev_pub_lines = get_url_lines(sysdig_repo_pfx + 'driver/ppm_events_public.h') + +ppme_re = re.compile('^\s+PPME_([A-Z0-9_]+_[EX])\s*=\s*([0-9]+)\s*,') +ppm_sc_x_re = re.compile('^\s+PPM_SC_X\s*\(\s*(\S+)\s*,\s*(\d+)\s*\)') + +event_info_d = {} + +def get_event_defines(): + event_d = {} + for line in ppm_ev_pub_lines: + m = ppme_re.match(line) + if m: + event_d[int(m.group(2))] = m.group(1) + return event_d + +def get_syscall_code_defines(): + sc_d = {} + for line in ppm_ev_pub_lines: + m = ppm_sc_x_re.match(line) + if m: + sc_d[int(m.group(2))] = m.group(1) + return sc_d + +ppm_ev_table_lines = get_url_lines(sysdig_repo_pfx + 'driver/event_table.c') + +hf_d = {} + +event_info_re = re.compile('^\s+\[\s*PPME_.*\]\s*=\s*{\s*"([A-Za-z0-9_]+)"\s*,[^,]+,[^,]+,\s*([0-9]+)\s*[,{}]') +event_param_re = re.compile('{\s*"([A-Za-z0-9_ ]+)"\s*,\s*PT_([A-Z0-9_]+)\s*,\s*PF_([A-Z0-9_]+)\s*[,}]') + +def get_event_names(): + '''Return a contiguous list of event names. Names are lower case.''' + event_name_l = [] + for line in ppm_ev_table_lines: + ei = event_info_re.match(line) + if ei: + event_name_l.append(ei.group(1)) + return event_name_l + +# PT_xxx to FT_xxx +pt_to_ft = { + 'BYTEBUF': 'BYTES', + 'CHARBUF': 'STRING', + 'ERRNO': 'INT64', + 'FD': 'INT64', + 'FLAGS8': 'INT8', + 'FLAGS16': 'INT16', + 'FLAGS32': 'INT32', + 'FSPATH': 'STRING', + 'FSRELPATH': 'STRING', + 'GID': 'INT32', + 'MODE': 'INT32', + 'PID': 'INT64', + 'UID': 'INT32', + 'SYSCALLID': 'UINT16', +} + +# FT_xxx to BASE_xxx +force_param_formats = { + 'STRING': 'NONE', + 'INT.*': 'DEC', +} + +def get_event_params(): + '''Return a list of dictionaries containing event names and parameter info.''' + event_param_l = [] + event_num = 0 + force_string_l = ['args', 'env'] + for line in ppm_ev_table_lines: + ei = event_info_re.match(line) + ep = event_param_re.findall(line) + if ei and ep: + event_name = ei.group(1) + src_param_count = int(ei.group(2)) + if len(ep) != src_param_count: + err_msg = '{}: found {} parameters. Expected {}. Params: {}'.format( + event_name, len(ep), src_param_count, repr(ep)) + if len(ep) > src_param_count: + logging.warning(err_msg) + del ep[src_param_count:] + else: + raise NameError(err_msg) + for p in ep: + if p[0] in force_string_l: + param_type = 'STRING' + elif p[1] in pt_to_ft: + param_type = pt_to_ft[p[1]] + elif p[0] == 'flags' and p[1].startswith('INT') and 'HEX' in p[2]: + param_type = 'U' + p[1] + elif 'INT' in p[1]: + # Ints + param_type = p[1] + else: + print(f"p fallback {p}") + # Fall back to bytes + param_type = 'BYTES' + + if p[2] == 'NA': + if 'INT' in param_type: + param_format = 'DEC' + else: + param_format = 'NONE' + elif param_type == 'BYTES': + param_format = 'NONE' + else: + param_format = p[2] + + for pt_pat, force_pf in force_param_formats.items(): + if re.match(pt_pat, param_type) and param_format != force_pf: + err_msg = 'Forcing {} {} format to {}. Params: {}'.format( + event_name, param_type, force_pf, repr(ep)) + logging.warning(err_msg) + param_format = force_pf + + param_d = { + 'event_name': event_name, + 'event_num': event_num, + # use replace() to account for "plugin ID" param name (ie: param names with space) + 'param_name': p[0].replace(" ", "_"), + 'param_type': param_type, + 'param_format': param_format, + } + event_param_l.append(param_d) + if ei: + event_num += 1 + return event_param_l + +def param_to_hf_name(param): + return 'hf_param_{}_{}'.format(param['param_name'], param['param_type'].lower()) + +def param_to_value_string_name(param): + return '{}_{}_vals'.format(param['param_name'], param['param_type'].lower()) + +def get_param_desc(param): + # Try to coerce event names and parameters into human-friendly + # strings. + # XXX This could use some work. + + # Specific descriptions. Event name + parameter name. + param_descs = { + 'accept.queuepct': 'Accept queue per connection', + 'execve.args': 'Program arguments', + 'execve.comm': 'Command', + 'execve.cwd': 'Current working directory', + } + # General descriptions. Event name only. + event_descs = { + 'ioctl': 'I/O control', + } + + event_name = param['event_name'] + param_id = '{}.{}'.format(event_name, param['param_name']) + if param_id in param_descs: + param_desc = param_descs[param_id] + elif event_name in event_descs: + param_desc = '{}: {}'.format(event_descs[event_name], param['param_name']) + else: + param_desc = param['param_name'] + return param_desc + +def main(): + logging.basicConfig(format='%(levelname)s: %(message)s') + + # Event list + event_d = get_event_defines() + event_nums = list(event_d.keys()) + event_nums.sort() + + event_name_l = get_event_names() + event_param_l = get_event_params() + + hf_d = {} + for param in event_param_l: + hf_name = param_to_hf_name(param) + hf_d[hf_name] = param + + idx_id_to_name = { '': 'no' } + parameter_index_l = [] + + for en in range (0, len(event_nums)): + param_id = '' + param_l = [] + event_var = event_d[en].lower() + for param in event_param_l: + if param['event_num'] == en: + hf_name = param_to_hf_name(param) + param_l.append(hf_name) + param_id += ':' + param['param_name'] + '_' + param['param_type'] + + ei_str = '' + if param_id not in idx_id_to_name: + idx_id_to_name[param_id] = event_var + ei_str = 'static int * const {}_indexes[] = {{ &{}, NULL }};'.format( + event_var, + ', &'.join(param_l) + ) + else: + ei_str = '#define {}_indexes {}_indexes'.format(event_var, idx_id_to_name[param_id]) + + parameter_index_l.append(ei_str) + + dissector_path = os.path.join(os.path.dirname(__file__), + '..', 'epan', 'dissectors', 'packet-sysdig-event.c') + dissector_f = open(dissector_path, 'r') + dissector_lines = list(dissector_f) + dissector_f = open(dissector_path, 'w+') + + # Strip out old content + strip_re_l = [] + strip_re_l.append(re.compile('^static\s+int\s+hf_param_.*;')) + strip_re_l.append(re.compile('^#define\s+EVT_STR_[A-Z0-9_]+\s+"[A-Za-z0-9_]+"')) + strip_re_l.append(re.compile('^#define\s+EVT_[A-Z0-9_]+\s+[0-9]+')) + strip_re_l.append(re.compile('^\s*{\s*EVT_[A-Z0-9_]+\s*,\s*EVT_STR_[A-Z0-9_]+\s*}')) + strip_re_l.append(re.compile('^static\s+const\s+int\s+\*\s*[a-z0-9_]+_[ex]_indexes\[\]\s*=\s*\{\s*&hf_param_.*NULL\s*\}\s*;')) + strip_re_l.append(re.compile('^static\s+int\s*\*\s+const\s+[a-z0-9_]+_[ex]_indexes\[\]\s*=\s*\{\s*&hf_param_.*NULL\s*\}\s*;')) + strip_re_l.append(re.compile('^\s*#define\s+[a-z0-9_]+_[ex]_indexes\s+[a-z0-9_]+_indexes')) + strip_re_l.append(re.compile('^\s*\{\s*EVT_[A-Z0-9_]+_[EX]\s*,\s*[a-z0-9_]+_[ex]_indexes\s*}\s*,')) + strip_re_l.append(re.compile('^\s*\{\s*\d+\s*,\s*"\S+"\s*}\s*,\s*//\s*PPM_SC_\S+')) + strip_re_l.append(re.compile('^\s*{\s*&hf_param_.*},')) # Must all be on one line + + for strip_re in strip_re_l: + dissector_lines = [l for l in dissector_lines if not strip_re.search(l)] + + # Find our value strings + value_string_re = re.compile('static\s+const\s+value_string\s+([A-Za-z0-9_]+_vals)') + value_string_l = [] + for line in dissector_lines: + vs = value_string_re.match(line) + if vs: + value_string_l.append(vs.group(1)) + + # Add in new content after comments. + + header_fields_c = 'Header fields' + header_fields_re = re.compile('/\*\s+' + header_fields_c, flags = re.IGNORECASE) + header_fields_l = [] + for hf_name in sorted(hf_d.keys()): + header_fields_l.append('static int {} = -1;'.format(hf_name)) + + event_names_c = 'Event names' + event_names_re = re.compile('/\*\s+' + event_names_c, flags = re.IGNORECASE) + event_names_l = [] + event_str_l = list(set(event_name_l)) + event_str_l.sort() + for evt_str in event_str_l: + event_names_l.append('#define EVT_STR_{0:24s} "{1:s}"'.format(evt_str.upper(), evt_str)) + + event_definitions_c = 'Event definitions' + event_definitions_re = re.compile('/\*\s+' + event_definitions_c, flags = re.IGNORECASE) + event_definitions_l = [] + for evt in event_nums: + event_definitions_l.append('#define EVT_{0:24s} {1:3d}'.format(event_d[evt], evt)) + + value_strings_c = 'Value strings' + value_strings_re = re.compile('/\*\s+' + value_strings_c, flags = re.IGNORECASE) + value_strings_l = [] + for evt in event_nums: + evt_num = 'EVT_{},'.format(event_d[evt]) + evt_str = 'EVT_STR_' + event_name_l[evt].upper() + value_strings_l.append(' {{ {0:<32s} {1:s} }},'.format(evt_num, evt_str)) + + parameter_index_c = 'Parameter indexes' + parameter_index_re = re.compile('/\*\s+' + parameter_index_c, flags = re.IGNORECASE) + # parameter_index_l defined above. + + event_tree_c = 'Event tree' + event_tree_re = re.compile('/\*\s+' + event_tree_c, flags = re.IGNORECASE) + event_tree_l = [] + for evt in event_nums: + evt_num = 'EVT_{}'.format(event_d[evt]) + evt_idx = '{}_indexes'.format(event_d[evt].lower()) + event_tree_l.append(' {{ {}, {} }},'.format(evt_num, evt_idx)) + + # Syscall codes + syscall_code_d = get_syscall_code_defines() + syscall_code_c = 'Syscall codes' + syscall_code_re = re.compile('/\*\s+' + syscall_code_c, flags = re.IGNORECASE) + syscall_code_l = [] + for sc_num in syscall_code_d: + syscall_code_l.append(f' {{ {sc_num:3}, "{syscall_code_d[sc_num].lower()}" }}, // PPM_SC_{syscall_code_d[sc_num]}') + + header_field_reg_c = 'Header field registration' + header_field_reg_re = re.compile('/\*\s+' + header_field_reg_c, flags = re.IGNORECASE) + header_field_reg_l = [] + for hf_name in sorted(hf_d.keys()): + param = hf_d[hf_name] + event_name = param['event_name'] + param_desc = get_param_desc(param) + param_name = param['param_name'] + param_type = param['param_type'] + param_format = param['param_format'] + fieldconvert = 'NULL' + vs_name = param_to_value_string_name(param) + if vs_name in value_string_l and 'INT' in param_type: + fieldconvert = 'VALS({})'.format(vs_name) + header_field_reg_l.append(' {{ &{}, {{ "{}", "sysdig.param.{}.{}", FT_{}, BASE_{}, {}, 0, NULL, HFILL }} }},'.format( + hf_name, + param_desc, + event_name, + param_name, + param_type, + param_format, + fieldconvert + )) + + for line in dissector_lines: + fill_comment = None + fill_l = [] + + if header_fields_re.match(line): + fill_comment = header_fields_c + fill_l = header_fields_l + elif event_names_re.match(line): + fill_comment = event_names_c + fill_l = event_names_l + elif event_definitions_re.match(line): + fill_comment = event_definitions_c + fill_l = event_definitions_l + elif value_strings_re.match(line): + fill_comment = value_strings_c + fill_l = value_strings_l + elif parameter_index_re.match(line): + fill_comment = parameter_index_c + fill_l = parameter_index_l + elif event_tree_re.match(line): + fill_comment = event_tree_c + fill_l = event_tree_l + elif syscall_code_re.match(line): + fill_comment = syscall_code_c + fill_l = syscall_code_l + elif header_field_reg_re.match(line): + fill_comment = header_field_reg_c + fill_l = header_field_reg_l + + if fill_comment is not None: + # Write our comment followed by the content + print(('Generating {}, {:d} lines'.format(fill_comment, len(fill_l)))) + dissector_f.write('/* {}. Automatically generated by tools/{} */\n'.format( + fill_comment, + os.path.basename(__file__) + )) + for line in fill_l: + dissector_f.write('{}\n'.format(line)) + # Fill each section only once + del fill_l[:] + else: + # Existing content + dissector_f.write(line) + + dissector_f.close() + +# +# On with the show +# + +if __name__ == "__main__": + sys.exit(main()) diff --git a/tools/generate_authors.py b/tools/generate_authors.py new file mode 100755 index 0000000..a74ef1c --- /dev/null +++ b/tools/generate_authors.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 + +# +# Generate the AUTHORS file combining existing AUTHORS file with +# git commit log. +# +# Usage: generate_authors.py AUTHORS.src + +# Copyright 2022 Moshe Kaplan +# Based on generate_authors.pl by Michael Mann +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import argparse +import io +import re +import subprocess +import sys + + +def get_git_authors(): + ''' + Sample line: + # 4321 Navin R. Johnson + ''' + GIT_LINE_REGEX = r"^\s*\d+\s+([^<]*)\s*<([^>]*)>" + cmd = "git --no-pager shortlog --email --summary HEAD".split(' ') + # check_output is used for Python 3.4 compatability + git_cmd_output = subprocess.check_output(cmd, universal_newlines=True, encoding='utf-8') + + git_authors = [] + for line in git_cmd_output.splitlines(): + # Check if this is needed: + line = line.strip() + match = re.match(GIT_LINE_REGEX, line) + name = match.group(1).strip() + email = match.group(2).strip() + # Try to lower how much spam people get: + email = email.replace('@', '[AT]') + git_authors.append((name, email)) + return git_authors + + +def extract_contributors(authors_content): + # Extract names and email addresses from the AUTHORS file Contributors + contributors_content = authors_content.split("= Contributors =", 1)[1] + CONTRIBUTOR_LINE_REGEX = r"^([\w\.\-\'\x80-\xff]+(\s*[\w+\.\-\'\x80-\xff])*)\s+<([^>]*)>" + contributors = [] + state = "" + for line in contributors_content.splitlines(): + contributor_match = re.match(CONTRIBUTOR_LINE_REGEX, line) + if re.search(r'([^\{]*)\{', line): + if contributor_match: + name = contributor_match.group(1) + email = contributor_match.group(3) + contributors.append((name, email)) + state = "s_in_bracket" + elif state == "s_in_bracket": + if re.search(r'([^\}]*)\}', line): + state = "" + elif re.search('<', line): + if contributor_match: + name = contributor_match.group(1) + email = contributor_match.group(3) + contributors.append((name, email)) + elif re.search(r"(e-mail address removed at contributor's request)", line): + if contributor_match: + name = contributor_match.group(1) + email = contributor_match.group(3) + contributors.append((name, email)) + else: + pass + return contributors + + +def generate_git_contributors_text(contributors_emails, git_authors_emails): + # Track the email addresses seen to avoid including the same email address twice + emails_addresses_seen = set() + for name, email in contributors_emails: + emails_addresses_seen.add(email.lower()) + + output_lines = [] + for name, email in git_authors_emails: + if email.lower() in emails_addresses_seen: + continue + + # Skip Gerald, since he's part of the header: + if email == "gerald[AT]wireshark.org": + continue + + ntab = 3 + if len(name) >= 8*ntab: + line = "{name} <{email}>".format(name=name, email=email) + else: + ntab -= len(name)/8 + if len(name) % 8: + ntab += 1 + tabs = '\t'*int(ntab) + line = "{name}{tabs}<{email}>".format(name=name, tabs=tabs, email=email) + + emails_addresses_seen.add(email.lower()) + output_lines += [line] + return "\n".join(output_lines) + + +# Read authos file until we find gitlog entries, then stop +def read_authors(parsed_args): + lines = [] + with open(parsed_args.authors[0], 'r', encoding='utf-8') as fh: + for line in fh.readlines(): + if '= From git log =' in line: + break + lines.append(line) + return ''.join(lines) + + +def main(): + parser = argparse.ArgumentParser(description="Generate the AUTHORS file combining existing AUTHORS file with git commit log.") + parser.add_argument("authors", metavar='authors', nargs=1, help="path to AUTHORS file") + parsed_args = parser.parse_args() + + author_content = read_authors(parsed_args) + + # Collect the listed contributors emails so that we don't duplicate them + # in the listing of git contributors + contributors_emails = extract_contributors(author_content) + git_authors_emails = get_git_authors() + # Then generate the text output for git contributors + git_contributors_text = generate_git_contributors_text(contributors_emails, git_authors_emails) + + # Now we can write our output: + git_contributor_header = '= From git log =\n\n' + output = author_content + git_contributor_header + git_contributors_text + '\n' + + with open(parsed_args.authors[0], 'w', encoding='utf-8') as fh: + fh.write(output) + + +if __name__ == '__main__': + main() diff --git a/tools/generate_cbor_pcap.py b/tools/generate_cbor_pcap.py new file mode 100755 index 0000000..545b985 --- /dev/null +++ b/tools/generate_cbor_pcap.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +''' +Convert a CBOR diagnostic notation file into an HTTP request +for the encoded cbor. +This allows straightforward test and debugging of simple pcap files. + + Copyright 2021 Brian Sipos + +SPDX-License-Identifier: LGPL-2.1-or-later +''' + +from argparse import ArgumentParser +from io import BytesIO +import scapy +from scapy.layers.l2 import Ether +from scapy.layers.inet import IP, TCP +from scapy.layers.http import HTTP, HTTPRequest +from scapy.packet import Raw +from scapy.utils import wrpcap +from subprocess import check_output +import sys + + +def main(): + parser = ArgumentParser() + parser.add_argument('--content-type', default='application/cbor', + help='The request content-type header') + parser.add_argument('--infile', default='-', + help='The diagnostic text input file, or "-" for stdin') + parser.add_argument('--outfile', default='-', + help='The PCAP output file, or "-" for stdout') + parser.add_argument('--intype', default='cbordiag', + choices=['cbordiag', 'raw'], + help='The input data type.') + args = parser.parse_args() + + # First get the CBOR data itself + infile_name = args.infile.strip() + if infile_name != '-': + infile = open(infile_name, 'rb') + else: + infile = sys.stdin.buffer + + if args.intype == 'raw': + cbordata = infile.read() + elif args.intype == 'cbordiag': + cbordata = check_output('diag2cbor.rb', stdin=infile) + + # Now synthesize an HTTP request with that body + req = HTTPRequest( + Method='POST', + Host='example.com', + User_Agent='scapy', + Content_Type=args.content_type, + Content_Length=str(len(cbordata)), + ) / Raw(cbordata) + + # Write the request directly into pcap + outfile_name = args.outfile.strip() + if outfile_name != '-': + outfile = open(outfile_name, 'wb') + else: + outfile = sys.stdout.buffer + + pkt = Ether()/IP()/TCP()/HTTP()/req + wrpcap(outfile, pkt) + +if __name__ == '__main__': + sys.exit(main()) diff --git a/tools/html2text.py b/tools/html2text.py new file mode 100755 index 0000000..da290b1 --- /dev/null +++ b/tools/html2text.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python3 +# +# html2text.py - converts HTML to text +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from __future__ import unicode_literals + +__author__ = "Peter Wu " +__copyright__ = "Copyright 2015, Peter Wu" +__license__ = "GPL (v2 or later)" + +# TODO: +# multiple list indentation levels (modify bullets?) +# maybe allow for ascii output instead of utf-8? + +import sys +from textwrap import TextWrapper +try: + from HTMLParser import HTMLParser + from htmlentitydefs import name2codepoint +except ImportError: # Python 3 + from html.parser import HTMLParser + from html.entities import name2codepoint + unichr = chr # for html entity handling + +class TextHTMLParser(HTMLParser): + """Converts a HTML document to text.""" + def __init__(self): + try: + # Python 3.4 + HTMLParser. __init__(self, convert_charrefs=True) + except Exception: + HTMLParser. __init__(self) + # All text, concatenated + self.output_buffer = '' + # The current text block which is being constructed + self.text_block = '' + # Whether the previous element was terminated with whitespace + self.need_space = False + # Whether to prevent word-wrapping the contents (for "pre" tag) + self.skip_wrap = False + # Quoting + self.need_quote = False + self.quote_stack = [] + # Suffixes + self.need_suffix = False + self.suffix_stack = [] + # track list items + self.list_item_prefix = None + self.ordered_list_index = None + self.stack_list_item_prefix = [] + self.stack_ordered_list_index = [] + self.list_indent_level = 0 + self.list_item_indent = "" + # Indentation (for heading and paragraphs) + self.indent_levels = [0, 0] + # Don't dump CSS, scripts, etc. + self.ignore_tags = ('head', 'style', 'script') + self.ignore_level = 0 + # href footnotes. + self.footnotes = [] + self.href = None + + def _wrap_text(self, text): + """Wraps text, but additionally indent list items.""" + initial_indent = indent = sum(self.indent_levels) * ' ' + if self.list_item_prefix: + initial_indent += self.list_item_prefix + indent += ' ' + kwargs = { + 'width': 72, + 'initial_indent': initial_indent, + 'subsequent_indent': indent + } + kwargs['break_on_hyphens'] = False + wrapper = TextWrapper(**kwargs) + return '\n'.join(wrapper.wrap(text)) + + def _commit_block(self, newline='\n\n'): + text = self.text_block + if text: + if not self.skip_wrap: + text = self._wrap_text(text) + self.output_buffer += text + newline + self.text_block = '' + self.need_space = False + + def handle_starttag(self, tag, attrs): + # end a block of text on
, but also flush list items which are not + # terminated. + if tag == 'br' or tag == 'li': + self._commit_block('\n') + if tag == 'code': + self.need_quote = True + self.quote_stack.append('`') + if tag == 'pre': + self.skip_wrap = True + if tag in ('ol', 'ul'): + self.list_indent_level += 1 + self.list_item_indent = " " * (self.list_indent_level - 1) + self.stack_ordered_list_index.append(self.ordered_list_index) + self.stack_list_item_prefix.append(self.list_item_prefix) + # Following list items are numbered. + if tag == 'ol': + self.ordered_list_index = 1 + if tag == 'ul': + self.list_item_prefix = self.list_item_indent + ' • ' + if tag == 'li' and self.ordered_list_index: + self.list_item_prefix = self.list_item_indent + ' %d. ' % (self.ordered_list_index) + self.ordered_list_index += 1 + if tag[0] == 'h' and len(tag) == 2 and \ + (tag[1] >= '1' and tag[1] <= '6'): + self.indent_levels = [int(tag[1]) - 1, 0] + if tag == 'p': + self.indent_levels[1] = 1 + if tag == 'a': + try: + href = [attr[1] for attr in attrs if attr[0] == 'href'][0] + if '://' in href: # Skip relative URLs and links. + self.href = href + except IndexError: + self.href = None + if tag == 'span': + try: + el_class = [attr[1] for attr in attrs if attr[0] == 'class'][0] + if 'menuseq' in el_class: + self.need_quote = True + self.quote_stack.append('"') + except IndexError: + pass + if tag == 'div': + try: + el_class = [attr[1] for attr in attrs if attr[0] == 'class'][0] + if 'title' in el_class.split(' '): + self.need_suffix = True + self.suffix_stack.append(':') + except IndexError: + pass + if tag in self.ignore_tags: + self.ignore_level += 1 + + def handle_data(self, data): + quote = '' + if self.need_quote: + quote = self.quote_stack[-1] + suffix = '' + if self.need_suffix: + suffix = self.suffix_stack.pop() + if self.ignore_level > 0: + return + elif self.skip_wrap: + block = data + else: + if self.href and data == self.href: + # This is a self link. Don't create a footnote. + self.href = None + + # For normal text, fold multiple whitespace and strip + # leading and trailing spaces for the whole block (but + # keep spaces in the middle). + block = quote + if data.strip() and data[:1].isspace(): + # Keep spaces in the middle + self.need_space = True + if self.need_space and data.strip() and self.text_block: + block = ' ' + quote + block += ' '.join(data.split()) + suffix + self.need_space = data[-1:].isspace() + self.text_block += block + self.need_quote = False + self.need_suffix = False + + def handle_endtag(self, tag): + block_elements = 'p li ul pre ol h1 h2 h3 h4 h5 h6 tr' + #block_elements += ' dl dd dt' + if tag in block_elements.split(): + self._commit_block() + if tag in ('code', 'span'): + # XXX This span isn't guaranteed to match its opening. + self.text_block += self.quote_stack.pop() + if tag in ('ol', 'ul'): + self.list_indent_level -= 1 + self.list_item_indent = " " * (self.list_indent_level - 1) + self.ordered_list_index = self.stack_ordered_list_index.pop() + self.list_item_prefix = self.stack_list_item_prefix.pop() + if tag == 'pre': + self.skip_wrap = False + if tag == 'a' and self.href: + self.footnotes.append(self.href) + self.text_block += '[{0}]'.format(len(self.footnotes)) + if tag in self.ignore_tags: + self.ignore_level -= 1 + + def handle_charref(self, name): + self.handle_data(unichr(int(name))) + + def handle_entityref(self, name): + self.handle_data(unichr(name2codepoint[name])) + + def close(self): + HTMLParser.close(self) + self._commit_block() + + if len(self.footnotes) > 0: + self.list_item_prefix = None + self.indent_levels = [1, 0] + self.text_block = 'References' + self._commit_block() + self.indent_levels = [1, 1] + footnote_num = 1 + for href in self.footnotes: + self.text_block += '{0:>2}. {1}\n'.format(footnote_num, href) + footnote_num += 1 + self._commit_block('\n') + + + byte_output = self.output_buffer.encode('utf-8') + if hasattr(sys.stdout, 'buffer'): + sys.stdout.buffer.write(byte_output) + else: + sys.stdout.write(byte_output) + + +def main(): + htmlparser = TextHTMLParser() + if len(sys.argv) > 1 and sys.argv[1] != '-': + filename = sys.argv[1] + f = open(filename, 'rb') + else: + filename = None + f = sys.stdin + try: + if hasattr(f, 'buffer'): + # Access raw (byte) buffer in Python 3 instead of decoded one + f = f.buffer + # Read stdin as a Unicode string + htmlparser.feed(f.read().decode('utf-8')) + finally: + if filename is not None: + f.close() + htmlparser.close() + +if __name__ == '__main__': + sys.exit(main()) diff --git a/tools/idl2deb b/tools/idl2deb new file mode 100755 index 0000000..18f1b05 --- /dev/null +++ b/tools/idl2deb @@ -0,0 +1,141 @@ +#!/usr/bin/env python3 + +# idl2deb - quick hack by W. Martin Borgert to create +# Debian GNU/Linux packages from idl2wrs modules for Wireshark. +# Copyright 2003, 2008, W. Martin Borgert + +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs + +# SPDX-License-Identifier: GPL-2.0-or-later + +import optparse +import os +import string +import sys +import time + +scriptinfo = """idl2deb version 2008-03-10 +Copyright 2003, 2008, W. Martin Borgert +Free software, released under the terms of the GPL.""" + +def create_file(preserve, filename, content, mode = None): + """Create a file with given content.""" + if preserve and os.path.isfile(filename): + return + f = open(filename, 'w') + f.write(content) + f.close() + if mode: + os.chmod(filename, mode) + +def create_files(version, deb, email, idl, name, preserve, iso, rfc): + """Create all files for the .deb build process.""" + base = os.path.basename(idl.lower().split(".idl")[0]) + + if not os.path.isdir("packaging/debian"): + os.mkdir("packaging/debian") + + create_file(preserve, "packaging/debian/rules", """#!/usr/bin/make -f + +include /usr/share/cdbs/1/rules/debhelper.mk +include /usr/share/cdbs/1/class/autotools.mk + +PREFIX=`pwd`/packaging/debian/wireshark-giop-%s + +binary-post-install/wireshark-giop-%s:: + rm -f $(PREFIX)/usr/lib/wireshark/plugins/%s/*.a +""" % (base, base, version), 0o755) + + create_file(preserve, "packaging/debian/control", """Source: wireshark-giop-%s +Section: net +Priority: optional +Maintainer: %s <%s> +Standards-Version: 3.6.1.0 +Build-Depends: wireshark-dev, autotools-dev, debhelper, cdbs + +Package: wireshark-giop-%s +Architecture: any +Depends: wireshark (= %s), ${shlibs:Depends} +Description: GIOP dissector for CORBA interface %s + This package provides a dissector for GIOP (General Inter-ORB + Protocol) for the Wireshark protocol analyser. It decodes the CORBA + (Common Object Request Broker Architecture) interfaces described + in the IDL (Interface Definition Language) file '%s.idl'. +""" % (base, name, email, base, deb, base, base)) + + create_file(preserve, "packaging/debian/changelog", + """wireshark-giop-%s (0.0.1-1) unstable; urgency=low + + * Automatically created package. + + -- %s <%s> %s +""" % (base, name, email, rfc)) + + create_file(preserve, "packaging/debian/copyright", + """This package has been created automatically by idl2deb on +%s for Debian GNU/Linux. + +Wireshark: https://www.wireshark.org/ + +Copyright: + +GPL, as evidenced by existence of GPL license file \"COPYING\". +(the GNU GPL may be viewed on Debian systems in +/usr/share/common-licenses/GPL) +""" % (iso)) + +def get_wrs_version(): + """Detect version of wireshark-dev package.""" + deb = os.popen( + "dpkg-query -W --showformat='${Version}' wireshark-dev").read() + debv = string.find(deb, "-") + if debv == -1: debv = len(deb) + version = deb[string.find(deb, ":")+1:debv] + return version, deb + +def get_time(): + """Detect current time and return ISO and RFC time string.""" + currenttime = time.gmtime() + return time.strftime("%Y-%m-%d %H:%M:%S +0000", currenttime), \ + time.strftime("%a, %d %b %Y %H:%M:%S +0000", currenttime) + +def main(): + opts = process_opts(sys.argv) + iso, rfc = get_time() + version, deb = get_wrs_version() + create_files(version, deb, + opts.email, opts.idl, opts.name, opts.preserve, + iso, rfc) + os.system("dpkg-buildpackage " + opts.dbopts) + +def process_opts(argv): + """Process command line options.""" + parser = optparse.OptionParser( + version=scriptinfo, + description="""Example: +%prog -e me@foo.net -i bar.idl -n \"My Name\" -d \"-rfakeroot -uc -us\"""") + parser.add_option("-d", "--dbopts", + default="", metavar="opts", + help="options for dpkg-buildpackage") + parser.add_option("-e", "--email", metavar="address", + default="invalid@invalid.invalid", + help="use e-mail address") + parser.add_option("-i", "--idl", metavar="idlfile", + help="IDL file to use (mandatory)") + parser.add_option("-n", "--name", default="No Name", + help="use user name", metavar="name") + parser.add_option("-p", "--preserve", action="store_true", + help="do not overwrite files") + opts, args = parser.parse_args() + if not opts.idl: + print("mandatory IDL file parameter missing") + sys.exit(1) + if not os.access(opts.idl, os.R_OK): + print("IDL file not accessible") + sys.exit(1) + return opts + +if __name__ == '__main__': + main() diff --git a/tools/idl2wrs b/tools/idl2wrs new file mode 100755 index 0000000..7a51f4b --- /dev/null +++ b/tools/idl2wrs @@ -0,0 +1,114 @@ +#!/bin/sh +# +# File : idl2wrs +# +# Author : Frank Singleton (frank.singleton@ericsson.com) +# +# Copyright (C) 2001 Frank Singleton, Ericsson Inc. +# +# This file is a simple shell script wrapper for the IDL to +# Wireshark dissector code. +# +# ie: wireshark_be.py and wireshark_gen.py +# +# This file is used to generate "Wireshark" dissectors from IDL descriptions. +# The output language generated is "C". It will generate code to use the +# GIOP/IIOP get_CDR_XXX API. +# +# Please see packet-giop.h in Wireshark distro for API description. +# Wireshark is available at https://www.wireshark.org/ +# +# Omniidl is part of the OmniOrb distribution, and is available at +# http://omniorb.sourceforge.net/ +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA +# 02111-1307, USA. +# + + +# Must at least supply an IDL file + +if [ $# -lt 1 ]; then + echo "idl2wrs Error: no IDL file specified." + echo "Usage: idl2wrs idl_file_name" + exit 1; +fi + +# Check the file name for valid characters. +# Implementation based on Dave Taylor's validalnum shell script from his book, +# "Wicked Cool Shell Scripts", as well as Mark Rushakoff's answer he provided +# to the question posted at stackoverflow.com entitled, "How can I use the +# UNIX shell to count the number of times a letter appears in a text file?" +file=$(basename $1) +compressed="$(echo $file | sed 's/[^[:alnum:]._]//g')" +if [ "$compressed" != "$file" ]; then + echo "idl2wrs Error: Invalid file name: $file" + exit 1; +fi + +# Only allow one '.' at most. +count=$(echo $compressed | awk -F. '{c += NF - 1} END {print c}') +if [ $count -gt 1 ] ; then + echo "idl2wrs Error: Invalid file name: $file" + exit 1; +fi + +# +# Run wireshark backend, looking for wireshark_be.py and wireshark_gen.py +# in pythons's "site-packages" directory. If cannot find that, then +# try looking in current directory. If still cannot, then exit with +# error. + +if [ -f $PYTHONPATH/site-packages/wireshark_be.py ] && [ -f $PYTHONPATH/site-packages/wireshark_gen.py ]; then + exec omniidl -p $PYTHONPATH/site-packages -b wireshark_be $@ + /* not reached */ +fi + +# Try current directory. + +if [ -f ./wireshark_be.py ] && [ -f ./wireshark_gen.py ]; then + exec omniidl -p ./ -b wireshark_be $@ + /* not reached */ +fi + +# Could not find both wireshark_be.py AND wireshark_gen.py +# So let's just try to run it without -p, hoping that the installation +# set up a valid path. + +exec omniidl -b wireshark_be $@ + +old code: not reached + +echo "idl2wrs Error: Could not find both wireshark_be.py AND wireshark_gen.py." +echo "Please ensure you have the PYTHONPATH variable set, or that wireshark_be.py " +echo "and wireshark_gen.py exist in the current directory. " +echo +echo "On this system, PYTHONPATH is : $PYTHONPATH" +echo + +exit 2 + + +# +# Editor modelines - https://www.wireshark.org/tools/modelines.html +# +# Local variables: +# c-basic-offset: 4 +# indent-tabs-mode: nil +# End: +# +# vi: set shiftwidth=4 expandtab: +# :indentSize=4:noTabs=true: +# diff --git a/tools/indexcap.py b/tools/indexcap.py new file mode 100755 index 0000000..d18e76f --- /dev/null +++ b/tools/indexcap.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python3 +# +# Tool to index protocols that appears in the given capture files +# +# The script list_protos_in_cap.sh does the same thing. +# +# Copyright 2009, Kovarththanan Rajaratnam +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +from optparse import OptionParser +import multiprocessing +import sys +import os +import subprocess +import re +import pickle +import tempfile +import filecmp +import random + +def extract_protos_from_file_proces(tshark, file): + try: + cmd = [tshark, "-Tfields", "-e", "frame.protocols", "-r", file] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdout, stderr) = p.communicate() + stdout = stdout.decode('utf-8') + if p.returncode != 0: + return (file, {}) + + proto_hash = {} + for line in stdout.splitlines(): + if not re.match(r'^[\w:-]+$', line): + continue + + for proto in line.split(':'): + proto_hash[proto] = 1 + proto_hash.setdefault(proto, 0) + + return (file, proto_hash) + except KeyboardInterrupt: + return None + +def extract_protos_from_file(tshark, num_procs, max_files, cap_files, cap_hash, index_file_name): + pool = multiprocessing.Pool(num_procs) + results = [pool.apply_async(extract_protos_from_file_proces, [tshark, file]) for file in cap_files] + try: + for (cur_item_idx,result_async) in enumerate(results): + file_result = result_async.get() + action = "SKIPPED" if file_result[1] is {} else "PROCESSED" + print("%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result[0], os.path.getsize(file_result[0]))) + cap_hash.update(dict([file_result])) + except KeyboardInterrupt: + print("%s was interrupted by user" % (sys.argv[0])) + pool.terminate() + exit(1) + + index_file = open(index_file_name, "wb") + pickle.dump(cap_hash, index_file) + index_file.close() + exit(0) + +def dissect_file_process(tshark, tmpdir, file): + try: + (handle_o, tmpfile_o) = tempfile.mkstemp(suffix='_stdout', dir=tmpdir) + (handle_e, tmpfile_e) = tempfile.mkstemp(suffix='_stderr', dir=tmpdir) + cmd = [tshark, "-nxVr", file] + p = subprocess.Popen(cmd, stdout=handle_o, stderr=handle_e) + (stdout, stderr) = p.communicate() + if p.returncode == 0: + return (file, True, tmpfile_o, tmpfile_e) + else: + return (file, False, tmpfile_o, tmpfile_e) + + except KeyboardInterrupt: + return False + + finally: + os.close(handle_o) + os.close(handle_e) + +def dissect_files(tshark, tmpdir, num_procs, max_files, cap_files): + pool = multiprocessing.Pool(num_procs) + results = [pool.apply_async(dissect_file_process, [tshark, tmpdir, file]) for file in cap_files] + try: + for (cur_item_idx,result_async) in enumerate(results): + file_result = result_async.get() + action = "FAILED" if file_result[1] is False else "PASSED" + print("%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result[0], os.path.getsize(file_result[0]))) + except KeyboardInterrupt: + print("%s was interrupted by user" % (sys.argv[0])) + pool.terminate() + exit(1) + +def compare_files(tshark_bin, tmpdir, tshark_cmp, num_procs, max_files, cap_files): + pool = multiprocessing.Pool(num_procs) + results_bin = [pool.apply_async(dissect_file_process, [tshark_bin, tmpdir, file]) for file in cap_files] + results_cmp = [pool.apply_async(dissect_file_process, [tshark_cmp, tmpdir, file]) for file in cap_files] + try: + for (cur_item_idx,(result_async_bin, result_async_cmp)) in enumerate(zip(results_bin, results_cmp)): + file_result_bin = result_async_bin.get() + file_result_cmp = result_async_cmp.get() + if file_result_cmp[1] is False or file_result_bin[1] is False: + action = "FAILED (exitcode)" + if not filecmp.cmp(file_result_bin[2], file_result_cmp[2]): + action = "FAILED (stdout)" + if not filecmp.cmp(file_result_bin[3], file_result_cmp[3]): + action = "FAILED (stderr)" + else: + action = "PASSED" + os.remove(file_result_bin[2]) + os.remove(file_result_cmp[2]) + os.remove(file_result_bin[3]) + os.remove(file_result_cmp[3]) + + print("%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result_bin[0], os.path.getsize(file_result_bin[0]))) + print("%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result_cmp[0], os.path.getsize(file_result_cmp[0]))) + except KeyboardInterrupt: + print("%s was interrupted by user" % (sys.argv[0])) + pool.terminate() + exit(1) + +def list_all_proto(cap_hash): + proto_hash = {} + for files_hash in cap_hash.values(): + for proto,count in files_hash.items(): + proto_hash[proto] = count + proto_hash.setdefault(proto, 0) + + return proto_hash + +def list_all_files(cap_hash): + files = list(cap_hash.keys()) + files.sort() + + return files + +def list_all_proto_files(cap_hash, proto_comma_delit): + protos = [ x.strip() for x in proto_comma_delit.split(',') ] + files = [] + for (file, files_hash) in cap_hash.items(): + for proto in files_hash.keys(): + if proto in protos: + files.append(file) + break + + return files + +def index_file_action(options): + return options.list_all_proto or \ + options.list_all_files or \ + options.list_all_proto_files or \ + options.dissect_files + +def find_capture_files(paths, cap_hash): + cap_files = [] + for path in paths: + if os.path.isdir(path): + path = os.path.normpath(path) + for root, dirs, files in os.walk(path): + cap_files += [os.path.join(root, name) for name in files if os.path.join(root, name) not in cap_hash] + elif path not in cap_hash: + cap_files.append(path) + return cap_files + +def find_tshark_executable(bin_dir): + for file in ["tshark.exe", "tshark"]: + tshark = os.path.join(bin_dir, file) + if os.access(tshark, os.X_OK): + return tshark + + return None + +def main(): + parser = OptionParser(usage="usage: %prog [options] index_file [file_1|dir_1 [.. file_n|dir_n]]") + parser.add_option("-d", "--dissect-files", dest="dissect_files", default=False, action="store_true", + help="Dissect all matching files") + parser.add_option("-m", "--max-files", dest="max_files", default=sys.maxsize, type="int", + help="Max number of files to process") + parser.add_option("-b", "--binary-dir", dest="bin_dir", default=os.getcwd(), + help="Directory containing tshark executable") + parser.add_option("-c", "--compare-dir", dest="compare_dir", default=None, + help="Directory containing tshark executable which is used for comparison") + parser.add_option("-j", dest="num_procs", default=multiprocessing.cpu_count(), type=int, + help="Max number of processes to spawn") + parser.add_option("-r", "--randomize", default=False, action="store_true", + help="Randomize the file list order") + parser.add_option("", "--list-all-proto", dest="list_all_proto", default=False, action="store_true", + help="List all protocols in index file") + parser.add_option("", "--list-all-files", dest="list_all_files", default=False, action="store_true", + help="List all files in index file") + parser.add_option("", "--list-all-proto-files", dest="list_all_proto_files", default=False, + metavar="PROTO_1[, .. PROTO_N]", + help="List all files in index file containing the given protocol") + + (options, args) = parser.parse_args() + + if len(args) == 0: + parser.error("index_file is a required argument") + + if len(args) == 1 and not index_file_action(options): + parser.error("one capture file/directory must be specified") + + if options.dissect_files and not options.list_all_files and not options.list_all_proto_files: + parser.error("--list-all-files or --list-all-proto-files must be specified") + + if options.dissect_files and not options.compare_dir is None: + parser.error("--dissect-files and --compare-dir cannot be specified at the same time") + + index_file_name = args.pop(0) + paths = args + cap_hash = {} + try: + index_file = open(index_file_name, "rb") + print("index file: %s [OPENED]" % index_file.name) + cap_hash = pickle.load(index_file) + index_file.close() + print("%d files" % len(cap_hash)) + except IOError: + print("index file: %s [NEW]" % index_file_name) + + if options.list_all_proto: + print(list_all_proto(cap_hash)) + exit(0) + + indexed_files = [] + if options.list_all_files: + indexed_files = list_all_files(cap_hash) + print(indexed_files) + + if options.list_all_proto_files: + indexed_files = list_all_proto_files(cap_hash, options.list_all_proto_files) + print(indexed_files) + + tshark_bin = find_tshark_executable(options.bin_dir) + if not tshark_bin is None: + print("tshark: %s [FOUND]" % tshark_bin) + else: + print("tshark: %s [MISSING]" % tshark_bin) + exit(1) + + if not options.compare_dir is None: + tshark_cmp = find_tshark_executable(options.compare_dir) + if not tshark_cmp is None: + print("tshark: %s [FOUND]" % tshark_cmp) + else: + print("tshark: %s [MISSING]" % tshark_cmp) + exit(1) + + if options.dissect_files or options.compare_dir: + cap_files = indexed_files + elif options.list_all_proto_files or options.list_all_files: + exit(0) + else: + cap_files = find_capture_files(paths, cap_hash) + + if options.randomize: + random.shuffle(cap_files) + else: + cap_files.sort() + + options.max_files = min(options.max_files, len(cap_files)) + print("%u total files, %u working files" % (len(cap_files), options.max_files)) + cap_files = cap_files[:options.max_files] + if options.compare_dir or options.dissect_files: + tmpdir = tempfile.mkdtemp() + print("Temporary working dir: %s" % tmpdir) + try: + if options.compare_dir: + compare_files(tshark_bin, tmpdir, tshark_cmp, options.num_procs, options.max_files, cap_files) + elif options.dissect_files: + dissect_files(tshark_bin, tmpdir, options.num_procs, options.max_files, cap_files) + else: + extract_protos_from_file(tshark_bin, options.num_procs, options.max_files, cap_files, cap_hash, index_file_name) + finally: + # Dissection may result in a non-empty directory. + if options.compare_dir: + os.rmdir(tmpdir) +if __name__ == "__main__": + main() diff --git a/tools/json2pcap/json2pcap.py b/tools/json2pcap/json2pcap.py new file mode 100755 index 0000000..2a059ad --- /dev/null +++ b/tools/json2pcap/json2pcap.py @@ -0,0 +1,686 @@ +#!/usr/bin/env python3 + +# +# Copyright 2020, Martin Kacer and contributors +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import sys +import ijson +import operator +import copy +import binascii +import array +import argparse +import string +import random +import math +import hashlib +import re +from collections import OrderedDict +from scapy import all as scapy + +# Field anonymization class +class AnonymizedField: + ''' + The Anonymization field object specifying anonymization + :filed arg: field name + :type arg: anonymization type [0 masking 0xff, 1 anonymization shake_256] + :start arg: If specified, the anonymization starts at given byte number + :end arg: If specified, the anonymization ends at given byte number + ''' + def __init__(self, field, type): + self.field = field + self.type = type + self.start = None + self.end = None + + match = re.search(r'(\S+)\[(-?\d+)?:(-?\d+)?\]', field) + if match: + self.field = match.group(1) + self.start = match.group(2) + if self.start is not None: + self.start = int(self.start) + self.end = match.group(3) + if self.end is not None: + self.end = int(self.end) + + # Returns the new field value after anonymization + def anonymize_field_shake256(self, field, type, salt): + shake = hashlib.shake_256(str(field + ':' + salt).encode('utf-8')) + + # String type, output should be ASCII + if type in [26, 27, 28]: + length = math.ceil(len(field)/4) + shake_hash = shake.hexdigest(length) + ret_string = array.array('B', str.encode(shake_hash)) + ret_string = ''.join('{:02x}'.format(x) for x in ret_string) + # Other types, output could be HEX + else: + length = math.ceil(len(field)/2) + shake_hash = shake.hexdigest(length) + ret_string = shake_hash + + # Correct the string length + if (len(ret_string) < len(field)): + ret_string = ret_string.ljust(len(field)) + if (len(ret_string) > len(field)): + ret_string = ret_string[:len(field)] + + return ret_string + + def anonymize_field(self, _h, _t, salt): + s = 0 + e = None + if self.start: + s = self.start + if self.end: + e = self.end + if e < 0: + e = len(_h) + e + else: + e = len(_h) + h = _h[s:e] + if self.type == 0: + h = 'f' * len(h) + elif self.type == 1: + h = self.anonymize_field_shake256(h, _t, salt) + + h_mask = '0' * len(_h[0:s]) + 'f' * len(h) + '0' * len(_h[e:]) + h = _h[0:s] + h + _h[e:] + return [h, h_mask] + +def make_unique(key, dct): + counter = 0 + unique_key = key + + while unique_key in dct: + counter += 1 + unique_key = '{}_{}'.format(key, counter) + return unique_key + + +def parse_object_pairs(pairs): + dct = OrderedDict() + for key, value in pairs: + if key in dct: + key = make_unique(key, dct) + dct[key] = value + + return dct + +# +# ********* PY TEMPLATES ********* +# +def read_py_function(name): + s = '' + record = False + indent = 0 + + file = open(__file__) + for line in file: + + ind = len(line) - len(line.lstrip()) + + if line.find("def " + name) != -1: + record = True + indent = ind + elif record and indent == ind and len(line) > 1: + record = False + + if record: + s = s + line + + file.close() + return s + +py_header = """#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# File generated by json2pcap.py +# json2pcap.py created by Martin Kacer, 2020 + +import os +import binascii +import array +import sys +import subprocess +from collections import OrderedDict +from scapy import all as scapy + +# ***************************************************** +# * PACKET PAYLOAD GENERATED FROM INPUT PCAP * +# * Modify this function to edit the packet * +# ***************************************************** +def main(): + d = OrderedDict() +""" + +py_footer = """ generate_pcap(d) + +# ***************************************************** +# * FUNCTIONS from TEMPLATE * +# * Do not edit these functions if not required * +# ***************************************************** + +""" +py_footer = py_footer + read_py_function("to_bytes") +py_footer = py_footer + read_py_function("lsb") +py_footer = py_footer + read_py_function("multiply_strings") +py_footer = py_footer + read_py_function("rewrite_frame") +py_footer = py_footer + read_py_function("assemble_frame") +py_footer = py_footer + read_py_function("generate_pcap") + +py_footer = py_footer + """ + +if __name__ == '__main__': + main() +""" +# +# ***** End of PY TEMPLATES ****** +# + + + +# +# ********** FUNCTIONS *********** +# + +def raw_flat_collector(dict): + if hasattr(dict, 'items'): + for k, v in dict.items(): + if k.endswith("_raw"): + yield k, v + else: + for val in raw_flat_collector(v): + yield val + + +# d - input dictionary, parsed from json +# r - result dictionary +# frame_name - parent protocol name +# frame_position - parent protocol position +def py_generator(d, r, frame_name='frame_raw', frame_position=0): + if (d is None or d is None): + return + + if hasattr(d, 'items'): + for k, v in d.items(): + + # no recursion + if k.endswith("_raw") or "_raw_" in k: + if isinstance(v[1], (list, tuple)) or isinstance(v[2], (list, tuple)): + #i = 1; + for _v in v: + h = _v[0] + p = _v[1] + l = _v[2] * 2 + b = _v[3] + t = _v[4] + if (len(h) != l): + l = len(h) + + p = p - frame_position + + # Add into result dictionary + key = str(k).replace('.', '_') + key = make_unique(key, r) + + fn = frame_name.replace('.', '_') + if (fn == key): + fn = None + value = [fn, h, p, l, b, t] + + r[key] = value + + else: + h = v[0] + p = v[1] + l = v[2] * 2 + b = v[3] + t = v[4] + if (len(h) != l): + l = len(h) + + p = p - frame_position + + # Add into result dictionary + key = str(k).replace('.', '_') + key = make_unique(key, r) + + fn = frame_name.replace('.', '_') + if (fn == key): + fn = None + value = [fn , h, p, l, b, t] + + r[key] = value + + # recursion + else: + if isinstance(v, dict): + fn = frame_name + fp = frame_position + + # if there is also preceding raw protocol frame use it + # remove tree suffix + key = k + if (key.endswith("_tree") or ("_tree_" in key)): + key = key.replace('_tree', '') + + raw_key = key + "_raw" + if (raw_key in d): + # f = d[raw_key][0] + fn = raw_key + fp = d[raw_key][1] + + + py_generator(v, r, fn, fp) + + elif isinstance(v, (list, tuple)): + + fn = frame_name + fp = frame_position + + # if there is also preceding raw protocol frame use it + # remove tree suffix + key = k + if (key.endswith("_tree") or ("_tree_" in key)): + key = key.replace('_tree', '') + + raw_key = key + "_raw" + if (raw_key in d): + fn = raw_key + fp = d[raw_key][1] + + for _v in v: + py_generator(_v, r, frame_name, frame_position) + +# To emulate Python 3.2 +def to_bytes(n, length, endianess='big'): + h = '%x' % n + s = bytearray.fromhex(('0' * (len(h) % 2) + h).zfill(length * 2)) + return s if endianess == 'big' else s[::-1] + +# Returns the index, counting from 0, of the least significant set bit in x +def lsb(x): + return (x & -x).bit_length() - 1 + +# Replace parts of original_string by new_string, only if mask in the byte is not ff +def multiply_strings(original_string, new_string, mask): + + ret_string = new_string + if mask is None: + return ret_string + for i in range(0, min(len(original_string), len(new_string), len(mask)), 2): + if mask[i:i + 2] == 'ff': + #print("ff") + ret_string = ret_string[:i] + original_string[i:i + 2] + ret_string[i + 2:] + + return ret_string + +# Rewrite frame +# h - hex bytes +# p - position +# l - length +# b - bitmask +# t - type +# frame_amask - optional, anonymization mask (00 - not anonymized byte, ff - anonymized byte) +def rewrite_frame(frame_raw, h, p, l, b, t, frame_amask=None): + if p < 0 or l < 0 or h is None: + return frame_raw + + # no bitmask + if(b == 0): + if (len(h) != l): + l = len(h) + frame_raw_new = frame_raw[:p] + h + frame_raw[p + l:] + return multiply_strings(frame_raw, frame_raw_new, frame_amask) + # bitmask + else: + # get hex string from frame which will be replaced + _h = frame_raw[p:p + l] + + # add 0 padding to have correct length + if (len(_h) % 2 == 1): + _h = '0' + _h + if (len(h) % 2 == 1): + h = '0' + h + + # Only replace bits defined by mask + # new_hex = (old_hex & !mask) | (new_hex & mask) + _H = bytearray.fromhex(_h) + _H = array.array('B', _H) + + M = to_bytes(b, len(_H)) + M = array.array('B', M) + # shift mask aligned to position + for i in range(len(M)): + if (i + p / 2) < len(M): + M[i] = M[i + int(p / 2)] + else: + M[i] = 0x00 + + H = bytearray.fromhex(h) + H = array.array('B', H) + + # for i in range(len(_H)): + # print "{0:08b}".format(_H[i]), + # print + # for i in range(len(M)): + # print "{0:08b}".format(M[i]), + # print + + j = 0 + for i in range(len(_H)): + if (M[i] != 0): + v = H[j] << lsb(M[i]) + # print "Debug: {0:08b}".format(v), + _H[i] = (_H[i] & ~M[i]) | (v & M[i]) + # print "Debug: " + str(_H[i]), + j = j + 1 + + # for i in range(len(_H)): + # print "{0:08b}".format(_H[i]), + # print + + masked_h = binascii.hexlify(_H) + masked_h = masked_h.decode('ascii') + + frame_raw_new = frame_raw[:p] + str(masked_h) + frame_raw[p + l:] + return multiply_strings(frame_raw, frame_raw_new, frame_amask) + + +def assemble_frame(d, frame_time): + input = d['frame_raw'][1] + isFlat = False + linux_cooked_header = False + while not isFlat: + isFlat = True + _d = d.copy() + for key, val in _d.items(): + h = str(val[1]) # hex + p = val[2] * 2 # position + l = val[3] * 2 # length + b = val[4] # bitmask + t = val[5] # type + + if (key == "sll_raw"): + linux_cooked_header = True + + # only if the node is not parent + isParent = False + for k, v in d.items(): + if (v[0] == key): + isParent = True + isFlat = False + break + + if not isParent and val[0] is not None: + d[val[0]][1] = rewrite_frame(d[val[0]][1], h, p, l, b, t) + del d[key] + + output = d['frame_raw'][1] + + # for Linux cooked header replace dest MAC and remove two bytes to reconstruct normal frame + if (linux_cooked_header): + output = "000000000000" + output[6*2:] # replce dest MAC + output = output[:12*2] + "" + output[14*2:] # remove two bytes before Protocol + + return output + +def generate_pcap(d): + # 1. Assemble frame + input = d['frame_raw'][1] + output = assemble_frame(d, None) + print(input) + print(output) + # 2. Testing: compare input and output for not modified json + if (input != output): + print("Modified frames: ") + s1 = input + s2 = output + print(s1) + print(s2) + if (len(s1) == len(s2)): + d = [i for i in range(len(s1)) if s1[i] != s2[i]] + print(d) + # 3. Generate pcap + outfile = sys.argv[0] + ".pcap" + pcap_out = scapy.PcapWriter(outfile, append=False, sync=False) + new_packet = scapy.Packet(bytearray.fromhex(output)) + pcap_out.write(new_packet) + print("Generated " + outfile) + +# +# ************ MAIN ************** +# +VERSION = "1.1" + +parser = argparse.ArgumentParser(description=""" +json2pcap {version} + +Utility to generate pcap from json format. + +Packet modification: +In input json it is possible to modify the raw values of decoded fields. +The output pcap will include the modified values. The algorithm of +generating the output pcap is to get all raw hex fields from input json and +then assembling them by layering from longest (less decoded fields) to +shortest (more decoded fields). It means if the modified raw field is +shorter field (more decoded field) it takes precedence against modification +in longer field (less decoded field). If the json includes duplicated raw +fields with same position and length, the behavior is not deterministic. +For manual packet editing it is always possible to remove any not required +raw fields from json, only frame_raw is field mandatory for reconstruction. + +Packet modification with -p switch: +The python script is generated instead of pcap. This python script when +executed will generate the pcap of 1st packet from input json. The +generated code includes the decoded fields and the function to assembly the +packet. This enables to modify the script and programmatically edit or +encode the packet variables. The assembling algorithm is different, because +the decoded packet fields are relative and points to parent node with their +position (compared to input json which has absolute positions). + +Pcap masking and anonymization with -m and -a switch: +The script allows to mask or anonymize the selected json raw fields. If the +The fields are selected and located on lower protocol layers, they are not +The overwritten by upper fields which are not marked by these switches. +The pcap masking and anonymization can be performed in the following way: + +tshark -r orig.pcap -T json -x | \ python json2pcap.py -m "ip.src_raw" +-a "ip.dst_raw" -o anonymized.pcap +In this example the ip.src_raw field is masked with ffffffff by byte values +and ip.dst_raw is hashed by randomly generated salt. + +Additionally the following syntax is valid to anonymize portion of field +tshark -r orig.pcap -T json -x | \ python json2pcap.py -m "ip.src_raw[2:]" +-a "ip.dst_raw[:-2]" -o anonymized.pcap +Where the src_ip first byte is preserved and dst_ip last byte is preserved. +And the same can be achieved by +tshark -r orig.pcap -T json -x | \ python json2pcap.py -m "ip.src_raw[2:8]" +-a "ip.dst_raw[0:6]" -o anonymized.pcap + +Masking and anonymization limitations are mainly the following: +- In case the tshark is performing reassembling from multiple frames, the +backward pcap reconstruction is not properly performed and can result in +malformed frames. +- The new values in the fields could violate the field format, as the +json2pcap is no performing correct protocol encoding with respect to +allowed values of the target field and field encoding. + +""".format(version=VERSION), formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION) +parser.add_argument('-i', '--infile', nargs='?', help='json generated by tshark -T json -x\nor by tshark -T jsonraw (not preserving frame timestamps).\nIf no inpout file is specified script reads from stdin.') +parser.add_argument('-o', '--outfile', required=True, help='output pcap filename') +parser.add_argument('-p', '--python', help='generate python payload instead of pcap (only 1st packet)', default=False, action='store_true') +parser.add_argument('-m', '--mask', help='mask the specific raw field (e.g. -m "ip.src_raw" -m "ip.dst_raw[2:6]")', action='append', metavar='MASKED_FIELD') +parser.add_argument('-a', '--anonymize', help='anonymize the specific raw field (e.g. -a "ip.src_raw[2:]" -a "ip.dst_raw[:-2]")', action='append', metavar='ANONYMIZED_FIELD') +parser.add_argument('-s', '--salt', help='salt use for anonymization. If no value is provided it is randomized.', default=None) +parser.add_argument('-v', '--verbose', help='verbose output', default=False, action='store_true') +args = parser.parse_args() + +# read JSON +infile = args.infile +outfile = args.outfile + +# Read from input file +if infile: + data_file = open(infile) +# Read from pipe +else: + data_file = sys.stdin + +# Parse anonymization fields +anonymize = {} +if args.mask: + for m in args.mask: + if '_raw' not in m: + print("Error: The specified fields by -m switch should be raw fields. " + m + " does not have _raw suffix") + sys.exit() + af = AnonymizedField(m, 0) + anonymize[af.field] = af +if args.anonymize: + for a in args.anonymize: + if '_raw' not in a: + print("Error: The specified fields by -a switch should be raw fields. " + a + " does not have _raw suffix") + sys.exit() + af = AnonymizedField(a, 1) + anonymize[af.field] = af + +input_frame_raw = '' +frame_raw = '' +frame_time = None + +salt = args.salt +if salt is None: + # generate random salt if no salt was provided + salt = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10)) + +# Generate pcap +if args.python is False: + pcap_out = scapy.PcapWriter(outfile, append=False, sync=False) + + # Iterate over packets in JSON + for packet in ijson.items(data_file, "item", buf_size=200000): + _list = [] + linux_cooked_header = False + + # get flat raw fields into _list + for raw in raw_flat_collector(packet['_source']['layers']): + if len(raw) >= 2: + if (raw[0] == "frame_raw"): + frame_raw = raw[1][0] + frame_amask = "0"*len(frame_raw) # initialize anonymization mask + input_frame_raw = copy.copy(frame_raw) + frame_time = None + if 'frame.time_epoch' in packet['_source']['layers']['frame']: + frame_time = packet['_source']['layers']['frame']['frame.time_epoch'] + else: + # add into value list into raw[5] the field name + if isinstance(raw[1], list): + raw[1].append(raw[0]) + _list.append(raw[1]) + if (raw[0] == "sll_raw"): + linux_cooked_header = True + + # sort _list + sorted_list = sorted(_list, key=operator.itemgetter(1), reverse=False) + sorted_list = sorted(sorted_list, key=operator.itemgetter(2), reverse=True) + # print("Debug: " + str(sorted_list)) + + # rewrite frame + for raw in sorted_list: + if len(raw) >= 6: + h = str(raw[0]) # hex + p = raw[1] * 2 # position + l = raw[2] * 2 # length + b = raw[3] # bitmask + t = raw[4] # type + # raw[5] # field_name (added by script) + h_mask = h # hex for anonymization mask + + # anonymize fields + if (raw[5] in anonymize): + [h, h_mask] = anonymize[raw[5]].anonymize_field(h, t, salt) + + if (isinstance(p, (list, tuple)) or isinstance(l, (list, tuple))): + for r in raw: + _h = str(r[0]) # hex + _p = r[1] * 2 # position + _l = r[2] * 2 # length + _b = r[3] # bitmask + _t = r[4] # type + # raw[5] # field_name (added by script) + _h_mask = _h # hex for anonymization mask + + # anonymize fields + if (raw[5] in anonymize): + [_h, _h_mask] = anonymize[raw[5]].anonymize_field(_h, _t, salt) + + # print("Debug: " + str(raw)) + frame_raw = rewrite_frame(frame_raw, _h, _p, _l, _b, _t, frame_amask) + + # update anonymization mask + if (raw[5] in anonymize): + frame_amask = rewrite_frame(frame_amask, _h_mask, _p, _l, _b, _t) + + else: + # print("Debug: " + str(raw)) + frame_raw = rewrite_frame(frame_raw, h, p, l, b, t, frame_amask) + + # update anonymization mask + if (raw[5] in anonymize): + frame_amask = rewrite_frame(frame_amask, h_mask, p, l, b, t) + + # for Linux cooked header replace dest MAC and remove two bytes to reconstruct normal frame using text2pcap + if (linux_cooked_header): + frame_raw = "000000000000" + frame_raw[6 * 2:] # replce dest MAC + frame_raw = frame_raw[:12 * 2] + "" + frame_raw[14 * 2:] # remove two bytes before Protocol + + # Testing: remove comment to compare input and output for not modified json + if (args.verbose and input_frame_raw != frame_raw): + print("Modified frames: ") + s1 = input_frame_raw + s2 = frame_raw + print(s1) + print(s2) + if (len(s1) == len(s2)): + d = [i for i in range(len(s1)) if s1[i] != s2[i]] + print(d) + + new_packet = scapy.Packet(bytearray.fromhex(frame_raw)) + if frame_time: + new_packet.time = float(frame_time) + pcap_out.write(new_packet) + +# Generate python payload only for first packet +else: + py_outfile = outfile + '.py' + f = open(py_outfile, 'w') + + #for packet in json: + for packet in ijson.items(data_file, "item", buf_size=200000): + f.write(py_header) + + r = OrderedDict({}) + + #print "packet = " + str(packet['_source']['layers']) + py_generator(packet['_source']['layers'], r) + + for key, value in r.items(): + f.write(" d['" + key + "'] =",) + f.write(" " + str(value) + "\n") + + f.write(py_footer) + + # Currently only first packet is used from pcap + f.close + + print("Generated " + py_outfile) + + break diff --git a/tools/lemon/CMakeLists.txt b/tools/lemon/CMakeLists.txt new file mode 100644 index 0000000..529eeae --- /dev/null +++ b/tools/lemon/CMakeLists.txt @@ -0,0 +1,46 @@ +# CMakeLists.txt +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +add_executable(lemon lemon.c) + +if(DEFINED LEMON_C_COMPILER) + set(CMAKE_C_COMPILER "${LEMON_C_COMPILER}") + set(CMAKE_C_FLAGS "") +endif() + +# To keep lemon.c as close to upstream as possible disable all warnings +if(CMAKE_C_COMPILER_ID MATCHES "MSVC") + target_compile_options(lemon PRIVATE /w) +else() + target_compile_options(lemon PRIVATE -w) +endif() +if(CMAKE_C_COMPILER_ID MATCHES "Clang") + # Disable static analysis for lemon source code. These issues don't + # affect Wireshark at runtime. + target_compile_options(lemon PRIVATE -Xclang -analyzer-disable-all-checks) +endif() +if(DEFINED NO_SANITIZE_CFLAGS) + target_compile_options(lemon PRIVATE ${NO_SANITIZE_CFLAGS}) +endif() +if(DEFINED NO_SANITIZE_LDFLAGS) + target_link_options(lemon PRIVATE ${NO_SANITIZE_LDFLAGS}) +endif() + +# +# Editor modelines - https://www.wireshark.org/tools/modelines.html +# +# Local variables: +# c-basic-offset: 8 +# tab-width: 8 +# indent-tabs-mode: t +# End: +# +# vi: set shiftwidth=8 tabstop=8 noexpandtab: +# :indentSize=8:tabSize=8:noTabs=false: +# diff --git a/tools/lemon/README b/tools/lemon/README new file mode 100644 index 0000000..59ed343 --- /dev/null +++ b/tools/lemon/README @@ -0,0 +1,52 @@ +The Lemon Parser Generator's home page is: https://www.hwaci.com/sw/lemon/ +Lemon seems now to be maintained at: https://sqlite.org/lemon.html + +Documentation is available at: https://sqlite.org/src/doc/trunk/doc/lemon.html +Git mirror of the upstream Fossil repository: https://github.com/mackyle/sqlite + +The lempar.c and lemon.c are taken from sqlite and are modified as little as +possible to make it easier to synchronize changes. Last updated at: + + commit a913f942cf6b32b85de6428fd542b39458df2a88 + Author: D. Richard Hipp + Date: Wed Dec 28 14:03:47 2022 +0000 + + Version 3.40.1 + +To check for changes (adjust "previous commit" accordingly): + + git clone --depth=1000 https://github.com/sqlite/sqlite + cd sqlite/tools + git log -p 273ee15121.. lemon.c lempar.c + +To create a Wireshark version (steps 1-3) and validate the result (steps 4-5): +1. Copy the two files. +2. Run ./apply-patches.sh to apply local patches. +3. Update the commit in this README (to ensure the base is known). +4. Check for CSA warnings: clang-check -analyze lemon.c -- +5. Build and run lemon: ninja epan/dfilter/grammar.c + +To keep the lemon source as pristine as possible from upstream all warnings +when building lemon itself are disabled. Only patch the lemon source code as +a last resort. + +Warnings for lemon generated code are few in practice with -Wall -Wextra. These +are preferably selectively disabled in the Wireshark build. + +The patches to lemon to silence compiler warnings and static analysis reports +(for edge cases that cannot occur) are not proposed upstream because that +process is difficult. From : + + SQLite is open-source, meaning that you can make as many copies of it as you + want and do whatever you want with those copies, without limitation. But + SQLite is not open-contribution. In order to keep SQLite in the public + domain and ensure that the code does not become contaminated with + proprietary or licensed content, the project does not accept patches from + unknown persons. + +A note about the Lemon patches, we have no intention to fork Lemon and maintain +it. These patches are written to address static analyzer warnings without +actually modifying the functionality. If upstream is willing to accept patches, +then that would be great and the intention is to make it as easy as possible. +The lemon and lempar patches are dedicated to the public domain, as set forward +in Creative Commons Zero v1.0 Universal (IANAL, but I hope this is sufficient). diff --git a/tools/lemon/apply-patches.sh b/tools/lemon/apply-patches.sh new file mode 100755 index 0000000..e445c87 --- /dev/null +++ b/tools/lemon/apply-patches.sh @@ -0,0 +1,16 @@ +#!/bin/sh -e +# Patch lemon.c and lempar.c to silence static analyzer warnings. +# See also tools/lemon/README + +# Strip trailing whitespace +sed -e 's/ \+$//' -i lemon.c lempar.c + +# Other patches +if [ -d "patches" ]; then + for i in patches/*.patch; do + echo "Applying $i" + patch --silent -p1 -i "$i" + done +fi + +echo DONE diff --git a/tools/lemon/lemon.c b/tools/lemon/lemon.c new file mode 100644 index 0000000..869ac58 --- /dev/null +++ b/tools/lemon/lemon.c @@ -0,0 +1,5893 @@ +/* +** This file contains all sources (including headers) to the LEMON +** LALR(1) parser generator. The sources have been combined into a +** single file to make it easy to include LEMON in the source tree +** and Makefile of another program. +** +** The author of this program disclaims copyright. +*/ +#include +#include +#include +#include +#include +#include + +#define ISSPACE(X) isspace((unsigned char)(X)) +#define ISDIGIT(X) isdigit((unsigned char)(X)) +#define ISALNUM(X) isalnum((unsigned char)(X)) +#define ISALPHA(X) isalpha((unsigned char)(X)) +#define ISUPPER(X) isupper((unsigned char)(X)) +#define ISLOWER(X) islower((unsigned char)(X)) + + +#ifndef __WIN32__ +# if defined(_WIN32) || defined(WIN32) +# define __WIN32__ +# endif +#endif + +#ifdef __WIN32__ +#ifdef __cplusplus +extern "C" { +#endif +extern int access(const char *path, int mode); +#ifdef __cplusplus +} +#endif +#else +#include +#endif + +/* #define PRIVATE static */ +#define PRIVATE + +#ifdef TEST +#define MAXRHS 5 /* Set low to exercise exception code */ +#else +#define MAXRHS 1000 +#endif + +extern void memory_error(); +static int showPrecedenceConflict = 0; +static char *msort(char*,char**,int(*)(const char*,const char*)); + +/* +** Compilers are getting increasingly pedantic about type conversions +** as C evolves ever closer to Ada.... To work around the latest problems +** we have to define the following variant of strlen(). +*/ +#define lemonStrlen(X) ((int)strlen(X)) + +/* +** Compilers are starting to complain about the use of sprintf() and strcpy(), +** saying they are unsafe. So we define our own versions of those routines too. +** +** There are three routines here: lemon_sprintf(), lemon_vsprintf(), and +** lemon_addtext(). The first two are replacements for sprintf() and vsprintf(). +** The third is a helper routine for vsnprintf() that adds texts to the end of a +** buffer, making sure the buffer is always zero-terminated. +** +** The string formatter is a minimal subset of stdlib sprintf() supporting only +** a few simply conversions: +** +** %d +** %s +** %.*s +** +*/ +static void lemon_addtext( + char *zBuf, /* The buffer to which text is added */ + int *pnUsed, /* Slots of the buffer used so far */ + const char *zIn, /* Text to add */ + int nIn, /* Bytes of text to add. -1 to use strlen() */ + int iWidth /* Field width. Negative to left justify */ +){ + if( nIn<0 ) for(nIn=0; zIn[nIn]; nIn++){} + while( iWidth>nIn ){ zBuf[(*pnUsed)++] = ' '; iWidth--; } + if( nIn==0 ) return; + memcpy(&zBuf[*pnUsed], zIn, nIn); + *pnUsed += nIn; + while( (-iWidth)>nIn ){ zBuf[(*pnUsed)++] = ' '; iWidth++; } + zBuf[*pnUsed] = 0; +} +static int lemon_vsprintf(char *str, const char *zFormat, va_list ap){ + int i, j, k, c; + int nUsed = 0; + const char *z; + char zTemp[50]; + str[0] = 0; + for(i=j=0; (c = zFormat[i])!=0; i++){ + if( c=='%' ){ + int iWidth = 0; + lemon_addtext(str, &nUsed, &zFormat[j], i-j, 0); + c = zFormat[++i]; + if( ISDIGIT(c) || (c=='-' && ISDIGIT(zFormat[i+1])) ){ + if( c=='-' ) i++; + while( ISDIGIT(zFormat[i]) ) iWidth = iWidth*10 + zFormat[i++] - '0'; + if( c=='-' ) iWidth = -iWidth; + c = zFormat[i]; + } + if( c=='d' ){ + int v = va_arg(ap, int); + if( v<0 ){ + lemon_addtext(str, &nUsed, "-", 1, iWidth); + v = -v; + }else if( v==0 ){ + lemon_addtext(str, &nUsed, "0", 1, iWidth); + } + k = 0; + while( v>0 ){ + k++; + zTemp[sizeof(zTemp)-k] = (v%10) + '0'; + v /= 10; + } + lemon_addtext(str, &nUsed, &zTemp[sizeof(zTemp)-k], k, iWidth); + }else if( c=='s' ){ + z = va_arg(ap, const char*); + lemon_addtext(str, &nUsed, z, -1, iWidth); + }else if( c=='.' && memcmp(&zFormat[i], ".*s", 3)==0 ){ + i += 2; + k = va_arg(ap, int); + z = va_arg(ap, const char*); + lemon_addtext(str, &nUsed, z, k, iWidth); + }else if( c=='%' ){ + lemon_addtext(str, &nUsed, "%", 1, 0); + }else{ + fprintf(stderr, "illegal format\n"); + exit(1); + } + j = i+1; + } + } + lemon_addtext(str, &nUsed, &zFormat[j], i-j, 0); + return nUsed; +} +static int lemon_sprintf(char *str, const char *format, ...){ + va_list ap; + int rc; + va_start(ap, format); + rc = lemon_vsprintf(str, format, ap); + va_end(ap); + return rc; +} +static void lemon_strcpy(char *dest, const char *src){ + while( (*(dest++) = *(src++))!=0 ){} +} +static void lemon_strcat(char *dest, const char *src){ + while( *dest ) dest++; + lemon_strcpy(dest, src); +} + + +/* a few forward declarations... */ +struct rule; +struct lemon; +struct action; + +static struct action *Action_new(void); +static struct action *Action_sort(struct action *); + +/********** From the file "build.h" ************************************/ +void FindRulePrecedences(struct lemon*); +void FindFirstSets(struct lemon*); +void FindStates(struct lemon*); +void FindLinks(struct lemon*); +void FindFollowSets(struct lemon*); +void FindActions(struct lemon*); + +/********* From the file "configlist.h" *********************************/ +void Configlist_init(void); +struct config *Configlist_add(struct rule *, int); +struct config *Configlist_addbasis(struct rule *, int); +void Configlist_closure(struct lemon *); +void Configlist_sort(void); +void Configlist_sortbasis(void); +struct config *Configlist_return(void); +struct config *Configlist_basis(void); +void Configlist_eat(struct config *); +void Configlist_reset(void); + +/********* From the file "error.h" ***************************************/ +void ErrorMsg(const char *, int,const char *, ...); + +/****** From the file "option.h" ******************************************/ +enum option_type { OPT_FLAG=1, OPT_INT, OPT_DBL, OPT_STR, + OPT_FFLAG, OPT_FINT, OPT_FDBL, OPT_FSTR}; +struct s_options { + enum option_type type; + const char *label; + char *arg; + const char *message; +}; +int OptInit(char**,struct s_options*,FILE*); +int OptNArgs(void); +char *OptArg(int); +void OptErr(int); +void OptPrint(void); + +/******** From the file "parse.h" *****************************************/ +void Parse(struct lemon *lemp); + +/********* From the file "plink.h" ***************************************/ +struct plink *Plink_new(void); +void Plink_add(struct plink **, struct config *); +void Plink_copy(struct plink **, struct plink *); +void Plink_delete(struct plink *); + +/********** From the file "report.h" *************************************/ +void Reprint(struct lemon *); +void ReportOutput(struct lemon *); +void ReportTable(struct lemon *, int, int); +void ReportHeader(struct lemon *); +void CompressTables(struct lemon *); +void ResortStates(struct lemon *); + +/********** From the file "set.h" ****************************************/ +void SetSize(int); /* All sets will be of size N */ +char *SetNew(void); /* A new set for element 0..N */ +void SetFree(char*); /* Deallocate a set */ +int SetAdd(char*,int); /* Add element to a set */ +int SetUnion(char *,char *); /* A <- A U B, thru element N */ +#define SetFind(X,Y) (X[Y]) /* True if Y is in set X */ + +/********** From the file "struct.h" *************************************/ +/* +** Principal data structures for the LEMON parser generator. +*/ + +typedef enum {LEMON_FALSE=0, LEMON_TRUE} Boolean; + +/* Symbols (terminals and nonterminals) of the grammar are stored +** in the following: */ +enum symbol_type { + TERMINAL, + NONTERMINAL, + MULTITERMINAL +}; +enum e_assoc { + LEFT, + RIGHT, + NONE, + UNK +}; +struct symbol { + const char *name; /* Name of the symbol */ + int index; /* Index number for this symbol */ + enum symbol_type type; /* Symbols are all either TERMINALS or NTs */ + struct rule *rule; /* Linked list of rules of this (if an NT) */ + struct symbol *fallback; /* fallback token in case this token doesn't parse */ + int prec; /* Precedence if defined (-1 otherwise) */ + enum e_assoc assoc; /* Associativity if precedence is defined */ + char *firstset; /* First-set for all rules of this symbol */ + Boolean lambda; /* True if NT and can generate an empty string */ + int useCnt; /* Number of times used */ + char *destructor; /* Code which executes whenever this symbol is + ** popped from the stack during error processing */ + int destLineno; /* Line number for start of destructor. Set to + ** -1 for duplicate destructors. */ + char *datatype; /* The data type of information held by this + ** object. Only used if type==NONTERMINAL */ + int dtnum; /* The data type number. In the parser, the value + ** stack is a union. The .yy%d element of this + ** union is the correct data type for this object */ + int bContent; /* True if this symbol ever carries content - if + ** it is ever more than just syntax */ + /* The following fields are used by MULTITERMINALs only */ + int nsubsym; /* Number of constituent symbols in the MULTI */ + struct symbol **subsym; /* Array of constituent symbols */ +}; + +/* Each production rule in the grammar is stored in the following +** structure. */ +struct rule { + struct symbol *lhs; /* Left-hand side of the rule */ + const char *lhsalias; /* Alias for the LHS (NULL if none) */ + int lhsStart; /* True if left-hand side is the start symbol */ + int ruleline; /* Line number for the rule */ + int nrhs; /* Number of RHS symbols */ + struct symbol **rhs; /* The RHS symbols */ + const char **rhsalias; /* An alias for each RHS symbol (NULL if none) */ + int line; /* Line number at which code begins */ + const char *code; /* The code executed when this rule is reduced */ + const char *codePrefix; /* Setup code before code[] above */ + const char *codeSuffix; /* Breakdown code after code[] above */ + struct symbol *precsym; /* Precedence symbol for this rule */ + int index; /* An index number for this rule */ + int iRule; /* Rule number as used in the generated tables */ + Boolean noCode; /* True if this rule has no associated C code */ + Boolean codeEmitted; /* True if the code has been emitted already */ + Boolean canReduce; /* True if this rule is ever reduced */ + Boolean doesReduce; /* Reduce actions occur after optimization */ + Boolean neverReduce; /* Reduce is theoretically possible, but prevented + ** by actions or other outside implementation */ + struct rule *nextlhs; /* Next rule with the same LHS */ + struct rule *next; /* Next rule in the global list */ +}; + +/* A configuration is a production rule of the grammar together with +** a mark (dot) showing how much of that rule has been processed so far. +** Configurations also contain a follow-set which is a list of terminal +** symbols which are allowed to immediately follow the end of the rule. +** Every configuration is recorded as an instance of the following: */ +enum cfgstatus { + COMPLETE, + INCOMPLETE +}; +struct config { + struct rule *rp; /* The rule upon which the configuration is based */ + int dot; /* The parse point */ + char *fws; /* Follow-set for this configuration only */ + struct plink *fplp; /* Follow-set forward propagation links */ + struct plink *bplp; /* Follow-set backwards propagation links */ + struct state *stp; /* Pointer to state which contains this */ + enum cfgstatus status; /* used during followset and shift computations */ + struct config *next; /* Next configuration in the state */ + struct config *bp; /* The next basis configuration */ +}; + +enum e_action { + SHIFT, + ACCEPT, + REDUCE, + ERROR, + SSCONFLICT, /* A shift/shift conflict */ + SRCONFLICT, /* Was a reduce, but part of a conflict */ + RRCONFLICT, /* Was a reduce, but part of a conflict */ + SH_RESOLVED, /* Was a shift. Precedence resolved conflict */ + RD_RESOLVED, /* Was reduce. Precedence resolved conflict */ + NOT_USED, /* Deleted by compression */ + SHIFTREDUCE /* Shift first, then reduce */ +}; + +/* Every shift or reduce operation is stored as one of the following */ +struct action { + struct symbol *sp; /* The look-ahead symbol */ + enum e_action type; + union { + struct state *stp; /* The new state, if a shift */ + struct rule *rp; /* The rule, if a reduce */ + } x; + struct symbol *spOpt; /* SHIFTREDUCE optimization to this symbol */ + struct action *next; /* Next action for this state */ + struct action *collide; /* Next action with the same hash */ +}; + +/* Each state of the generated parser's finite state machine +** is encoded as an instance of the following structure. */ +struct state { + struct config *bp; /* The basis configurations for this state */ + struct config *cfp; /* All configurations in this set */ + int statenum; /* Sequential number for this state */ + struct action *ap; /* List of actions for this state */ + int nTknAct, nNtAct; /* Number of actions on terminals and nonterminals */ + int iTknOfst, iNtOfst; /* yy_action[] offset for terminals and nonterms */ + int iDfltReduce; /* Default action is to REDUCE by this rule */ + struct rule *pDfltReduce;/* The default REDUCE rule. */ + int autoReduce; /* True if this is an auto-reduce state */ +}; +#define NO_OFFSET (-2147483647) + +/* A followset propagation link indicates that the contents of one +** configuration followset should be propagated to another whenever +** the first changes. */ +struct plink { + struct config *cfp; /* The configuration to which linked */ + struct plink *next; /* The next propagate link */ +}; + +/* The state vector for the entire parser generator is recorded as +** follows. (LEMON uses no global variables and makes little use of +** static variables. Fields in the following structure can be thought +** of as begin global variables in the program.) */ +struct lemon { + struct state **sorted; /* Table of states sorted by state number */ + struct rule *rule; /* List of all rules */ + struct rule *startRule; /* First rule */ + int nstate; /* Number of states */ + int nxstate; /* nstate with tail degenerate states removed */ + int nrule; /* Number of rules */ + int nruleWithAction; /* Number of rules with actions */ + int nsymbol; /* Number of terminal and nonterminal symbols */ + int nterminal; /* Number of terminal symbols */ + int minShiftReduce; /* Minimum shift-reduce action value */ + int errAction; /* Error action value */ + int accAction; /* Accept action value */ + int noAction; /* No-op action value */ + int minReduce; /* Minimum reduce action */ + int maxAction; /* Maximum action value of any kind */ + struct symbol **symbols; /* Sorted array of pointers to symbols */ + int errorcnt; /* Number of errors */ + struct symbol *errsym; /* The error symbol */ + struct symbol *wildcard; /* Token that matches anything */ + char *name; /* Name of the generated parser */ + char *arg; /* Declaration of the 3rd argument to parser */ + char *ctx; /* Declaration of 2nd argument to constructor */ + char *tokentype; /* Type of terminal symbols in the parser stack */ + char *vartype; /* The default type of non-terminal symbols */ + char *start; /* Name of the start symbol for the grammar */ + char *stacksize; /* Size of the parser stack */ + char *include; /* Code to put at the start of the C file */ + char *error; /* Code to execute when an error is seen */ + char *overflow; /* Code to execute on a stack overflow */ + char *failure; /* Code to execute on parser failure */ + char *accept; /* Code to execute when the parser excepts */ + char *extracode; /* Code appended to the generated file */ + char *tokendest; /* Code to execute to destroy token data */ + char *vardest; /* Code for the default non-terminal destructor */ + char *filename; /* Name of the input file */ + char *outname; /* Name of the current output file */ + char *tokenprefix; /* A prefix added to token names in the .h file */ + int nconflict; /* Number of parsing conflicts */ + int nactiontab; /* Number of entries in the yy_action[] table */ + int nlookaheadtab; /* Number of entries in yy_lookahead[] */ + int tablesize; /* Total table size of all tables in bytes */ + int basisflag; /* Print only basis configurations */ + int printPreprocessed; /* Show preprocessor output on stdout */ + int has_fallback; /* True if any %fallback is seen in the grammar */ + int nolinenosflag; /* True if #line statements should not be printed */ + char *argv0; /* Name of the program */ +}; + +#define MemoryCheck(X) if((X)==0){ \ + extern void memory_error(); \ + memory_error(); \ +} + +/**************** From the file "table.h" *********************************/ +/* +** All code in this file has been automatically generated +** from a specification in the file +** "table.q" +** by the associative array code building program "aagen". +** Do not edit this file! Instead, edit the specification +** file, then rerun aagen. +*/ +/* +** Code for processing tables in the LEMON parser generator. +*/ +/* Routines for handling a strings */ + +const char *Strsafe(const char *); + +void Strsafe_init(void); +int Strsafe_insert(const char *); +const char *Strsafe_find(const char *); + +/* Routines for handling symbols of the grammar */ + +struct symbol *Symbol_new(const char *); +int Symbolcmpp(const void *, const void *); +void Symbol_init(void); +int Symbol_insert(struct symbol *, const char *); +struct symbol *Symbol_find(const char *); +struct symbol *Symbol_Nth(int); +int Symbol_count(void); +struct symbol **Symbol_arrayof(void); + +/* Routines to manage the state table */ + +int Configcmp(const char *, const char *); +struct state *State_new(void); +void State_init(void); +int State_insert(struct state *, struct config *); +struct state *State_find(struct config *); +struct state **State_arrayof(void); + +/* Routines used for efficiency in Configlist_add */ + +void Configtable_init(void); +int Configtable_insert(struct config *); +struct config *Configtable_find(struct config *); +void Configtable_clear(int(*)(struct config *)); + +/****************** From the file "action.c" *******************************/ +/* +** Routines processing parser actions in the LEMON parser generator. +*/ + +/* Allocate a new parser action */ +static struct action *Action_new(void){ + static struct action *actionfreelist = 0; + struct action *newaction; + + if( actionfreelist==0 ){ + int i; + int amt = 100; + actionfreelist = (struct action *)calloc(amt, sizeof(struct action)); + if( actionfreelist==0 ){ + fprintf(stderr,"Unable to allocate memory for a new parser action."); + exit(1); + } + for(i=0; inext; + return newaction; +} + +/* Compare two actions for sorting purposes. Return negative, zero, or +** positive if the first action is less than, equal to, or greater than +** the first +*/ +static int actioncmp( + struct action *ap1, + struct action *ap2 +){ + int rc; + rc = ap1->sp->index - ap2->sp->index; + if( rc==0 ){ + rc = (int)ap1->type - (int)ap2->type; + } + if( rc==0 && (ap1->type==REDUCE || ap1->type==SHIFTREDUCE) ){ + rc = ap1->x.rp->index - ap2->x.rp->index; + } + if( rc==0 ){ + rc = (int) (ap2 - ap1); + } + return rc; +} + +/* Sort parser actions */ +static struct action *Action_sort( + struct action *ap +){ + ap = (struct action *)msort((char *)ap,(char **)&ap->next, + (int(*)(const char*,const char*))actioncmp); + return ap; +} + +void Action_add( + struct action **app, + enum e_action type, + struct symbol *sp, + char *arg +){ + struct action *newaction; + newaction = Action_new(); + newaction->next = *app; + *app = newaction; + newaction->type = type; + newaction->sp = sp; + newaction->spOpt = 0; + if( type==SHIFT ){ + newaction->x.stp = (struct state *)arg; + }else{ + newaction->x.rp = (struct rule *)arg; + } +} +/********************** New code to implement the "acttab" module ***********/ +/* +** This module implements routines use to construct the yy_action[] table. +*/ + +/* +** The state of the yy_action table under construction is an instance of +** the following structure. +** +** The yy_action table maps the pair (state_number, lookahead) into an +** action_number. The table is an array of integers pairs. The state_number +** determines an initial offset into the yy_action array. The lookahead +** value is then added to this initial offset to get an index X into the +** yy_action array. If the aAction[X].lookahead equals the value of the +** of the lookahead input, then the value of the action_number output is +** aAction[X].action. If the lookaheads do not match then the +** default action for the state_number is returned. +** +** All actions associated with a single state_number are first entered +** into aLookahead[] using multiple calls to acttab_action(). Then the +** actions for that single state_number are placed into the aAction[] +** array with a single call to acttab_insert(). The acttab_insert() call +** also resets the aLookahead[] array in preparation for the next +** state number. +*/ +struct lookahead_action { + int lookahead; /* Value of the lookahead token */ + int action; /* Action to take on the given lookahead */ +}; +typedef struct acttab acttab; +struct acttab { + int nAction; /* Number of used slots in aAction[] */ + int nActionAlloc; /* Slots allocated for aAction[] */ + struct lookahead_action + *aAction, /* The yy_action[] table under construction */ + *aLookahead; /* A single new transaction set */ + int mnLookahead; /* Minimum aLookahead[].lookahead */ + int mnAction; /* Action associated with mnLookahead */ + int mxLookahead; /* Maximum aLookahead[].lookahead */ + int nLookahead; /* Used slots in aLookahead[] */ + int nLookaheadAlloc; /* Slots allocated in aLookahead[] */ + int nterminal; /* Number of terminal symbols */ + int nsymbol; /* total number of symbols */ +}; + +/* Return the number of entries in the yy_action table */ +#define acttab_lookahead_size(X) ((X)->nAction) + +/* The value for the N-th entry in yy_action */ +#define acttab_yyaction(X,N) ((X)->aAction[N].action) + +/* The value for the N-th entry in yy_lookahead */ +#define acttab_yylookahead(X,N) ((X)->aAction[N].lookahead) + +/* Free all memory associated with the given acttab */ +void acttab_free(acttab *p){ + free( p->aAction ); + free( p->aLookahead ); + free( p ); +} + +/* Allocate a new acttab structure */ +acttab *acttab_alloc(int nsymbol, int nterminal){ + acttab *p = (acttab *) calloc( 1, sizeof(*p) ); + if( p==0 ){ + fprintf(stderr,"Unable to allocate memory for a new acttab."); + exit(1); + } + memset(p, 0, sizeof(*p)); + p->nsymbol = nsymbol; + p->nterminal = nterminal; + return p; +} + +/* Add a new action to the current transaction set. +** +** This routine is called once for each lookahead for a particular +** state. +*/ +void acttab_action(acttab *p, int lookahead, int action){ + if( p->nLookahead>=p->nLookaheadAlloc ){ + p->nLookaheadAlloc += 25; + p->aLookahead = (struct lookahead_action *) realloc( p->aLookahead, + sizeof(p->aLookahead[0])*p->nLookaheadAlloc ); + if( p->aLookahead==0 ){ + fprintf(stderr,"malloc failed\n"); + exit(1); + } + } + if( p->nLookahead==0 ){ + p->mxLookahead = lookahead; + p->mnLookahead = lookahead; + p->mnAction = action; + }else{ + if( p->mxLookaheadmxLookahead = lookahead; + if( p->mnLookahead>lookahead ){ + p->mnLookahead = lookahead; + p->mnAction = action; + } + } + p->aLookahead[p->nLookahead].lookahead = lookahead; + p->aLookahead[p->nLookahead].action = action; + p->nLookahead++; +} + +/* +** Add the transaction set built up with prior calls to acttab_action() +** into the current action table. Then reset the transaction set back +** to an empty set in preparation for a new round of acttab_action() calls. +** +** Return the offset into the action table of the new transaction. +** +** If the makeItSafe parameter is true, then the offset is chosen so that +** it is impossible to overread the yy_lookaside[] table regardless of +** the lookaside token. This is done for the terminal symbols, as they +** come from external inputs and can contain syntax errors. When makeItSafe +** is false, there is more flexibility in selecting offsets, resulting in +** a smaller table. For non-terminal symbols, which are never syntax errors, +** makeItSafe can be false. +*/ +int acttab_insert(acttab *p, int makeItSafe){ + int i, j, k, n, end; + assert( p->nLookahead>0 ); + + /* Make sure we have enough space to hold the expanded action table + ** in the worst case. The worst case occurs if the transaction set + ** must be appended to the current action table + */ + n = p->nsymbol + 1; + if( p->nAction + n >= p->nActionAlloc ){ + int oldAlloc = p->nActionAlloc; + p->nActionAlloc = p->nAction + n + p->nActionAlloc + 20; + p->aAction = (struct lookahead_action *) realloc( p->aAction, + sizeof(p->aAction[0])*p->nActionAlloc); + if( p->aAction==0 ){ + fprintf(stderr,"malloc failed\n"); + exit(1); + } + for(i=oldAlloc; inActionAlloc; i++){ + p->aAction[i].lookahead = -1; + p->aAction[i].action = -1; + } + } + + /* Scan the existing action table looking for an offset that is a + ** duplicate of the current transaction set. Fall out of the loop + ** if and when the duplicate is found. + ** + ** i is the index in p->aAction[] where p->mnLookahead is inserted. + */ + end = makeItSafe ? p->mnLookahead : 0; + for(i=p->nAction-1; i>=end; i--){ + if( p->aAction[i].lookahead==p->mnLookahead ){ + /* All lookaheads and actions in the aLookahead[] transaction + ** must match against the candidate aAction[i] entry. */ + if( p->aAction[i].action!=p->mnAction ) continue; + for(j=0; jnLookahead; j++){ + k = p->aLookahead[j].lookahead - p->mnLookahead + i; + if( k<0 || k>=p->nAction ) break; + if( p->aLookahead[j].lookahead!=p->aAction[k].lookahead ) break; + if( p->aLookahead[j].action!=p->aAction[k].action ) break; + } + if( jnLookahead ) continue; + + /* No possible lookahead value that is not in the aLookahead[] + ** transaction is allowed to match aAction[i] */ + n = 0; + for(j=0; jnAction; j++){ + if( p->aAction[j].lookahead<0 ) continue; + if( p->aAction[j].lookahead==j+p->mnLookahead-i ) n++; + } + if( n==p->nLookahead ){ + break; /* An exact match is found at offset i */ + } + } + } + + /* If no existing offsets exactly match the current transaction, find an + ** an empty offset in the aAction[] table in which we can add the + ** aLookahead[] transaction. + */ + if( inAction, which means the + ** transaction will be appended. */ + i = makeItSafe ? p->mnLookahead : 0; + for(; inActionAlloc - p->mxLookahead; i++){ + if( p->aAction[i].lookahead<0 ){ + for(j=0; jnLookahead; j++){ + k = p->aLookahead[j].lookahead - p->mnLookahead + i; + if( k<0 ) break; + if( p->aAction[k].lookahead>=0 ) break; + } + if( jnLookahead ) continue; + for(j=0; jnAction; j++){ + if( p->aAction[j].lookahead==j+p->mnLookahead-i ) break; + } + if( j==p->nAction ){ + break; /* Fits in empty slots */ + } + } + } + } + /* Insert transaction set at index i. */ +#if 0 + printf("Acttab:"); + for(j=0; jnLookahead; j++){ + printf(" %d", p->aLookahead[j].lookahead); + } + printf(" inserted at %d\n", i); +#endif + for(j=0; jnLookahead; j++){ + k = p->aLookahead[j].lookahead - p->mnLookahead + i; + p->aAction[k] = p->aLookahead[j]; + if( k>=p->nAction ) p->nAction = k+1; + } + if( makeItSafe && i+p->nterminal>=p->nAction ) p->nAction = i+p->nterminal+1; + p->nLookahead = 0; + + /* Return the offset that is added to the lookahead in order to get the + ** index into yy_action of the action */ + return i - p->mnLookahead; +} + +/* +** Return the size of the action table without the trailing syntax error +** entries. +*/ +int acttab_action_size(acttab *p){ + int n = p->nAction; + while( n>0 && p->aAction[n-1].lookahead<0 ){ n--; } + return n; +} + +/********************** From the file "build.c" *****************************/ +/* +** Routines to construction the finite state machine for the LEMON +** parser generator. +*/ + +/* Find a precedence symbol of every rule in the grammar. +** +** Those rules which have a precedence symbol coded in the input +** grammar using the "[symbol]" construct will already have the +** rp->precsym field filled. Other rules take as their precedence +** symbol the first RHS symbol with a defined precedence. If there +** are not RHS symbols with a defined precedence, the precedence +** symbol field is left blank. +*/ +void FindRulePrecedences(struct lemon *xp) +{ + struct rule *rp; + for(rp=xp->rule; rp; rp=rp->next){ + if( rp->precsym==0 ){ + int i, j; + for(i=0; inrhs && rp->precsym==0; i++){ + struct symbol *sp = rp->rhs[i]; + if( sp->type==MULTITERMINAL ){ + for(j=0; jnsubsym; j++){ + if( sp->subsym[j]->prec>=0 ){ + rp->precsym = sp->subsym[j]; + break; + } + } + }else if( sp->prec>=0 ){ + rp->precsym = rp->rhs[i]; + } + } + } + } + return; +} + +/* Find all nonterminals which will generate the empty string. +** Then go back and compute the first sets of every nonterminal. +** The first set is the set of all terminal symbols which can begin +** a string generated by that nonterminal. +*/ +void FindFirstSets(struct lemon *lemp) +{ + int i, j; + struct rule *rp; + int progress; + + for(i=0; insymbol; i++){ + lemp->symbols[i]->lambda = LEMON_FALSE; + } + for(i=lemp->nterminal; insymbol; i++){ + lemp->symbols[i]->firstset = SetNew(); + } + + /* First compute all lambdas */ + do{ + progress = 0; + for(rp=lemp->rule; rp; rp=rp->next){ + if( rp->lhs->lambda ) continue; + for(i=0; inrhs; i++){ + struct symbol *sp = rp->rhs[i]; + assert( sp->type==NONTERMINAL || sp->lambda==LEMON_FALSE ); + if( sp->lambda==LEMON_FALSE ) break; + } + if( i==rp->nrhs ){ + rp->lhs->lambda = LEMON_TRUE; + progress = 1; + } + } + }while( progress ); + + /* Now compute all first sets */ + do{ + struct symbol *s1, *s2; + progress = 0; + for(rp=lemp->rule; rp; rp=rp->next){ + s1 = rp->lhs; + for(i=0; inrhs; i++){ + s2 = rp->rhs[i]; + if( s2->type==TERMINAL ){ + progress += SetAdd(s1->firstset,s2->index); + break; + }else if( s2->type==MULTITERMINAL ){ + for(j=0; jnsubsym; j++){ + progress += SetAdd(s1->firstset,s2->subsym[j]->index); + } + break; + }else if( s1==s2 ){ + if( s1->lambda==LEMON_FALSE ) break; + }else{ + progress += SetUnion(s1->firstset,s2->firstset); + if( s2->lambda==LEMON_FALSE ) break; + } + } + } + }while( progress ); + return; +} + +/* Compute all LR(0) states for the grammar. Links +** are added to between some states so that the LR(1) follow sets +** can be computed later. +*/ +PRIVATE struct state *getstate(struct lemon *); /* forward reference */ +void FindStates(struct lemon *lemp) +{ + struct symbol *sp; + struct rule *rp; + + Configlist_init(); + + /* Find the start symbol */ + if( lemp->start ){ + sp = Symbol_find(lemp->start); + if( sp==0 ){ + ErrorMsg(lemp->filename,0, + "The specified start symbol \"%s\" is not " + "in a nonterminal of the grammar. \"%s\" will be used as the start " + "symbol instead.",lemp->start,lemp->startRule->lhs->name); + lemp->errorcnt++; + sp = lemp->startRule->lhs; + } + }else if( lemp->startRule ){ + sp = lemp->startRule->lhs; + }else{ + ErrorMsg(lemp->filename,0,"Internal error - no start rule\n"); + exit(1); + } + + /* Make sure the start symbol doesn't occur on the right-hand side of + ** any rule. Report an error if it does. (YACC would generate a new + ** start symbol in this case.) */ + for(rp=lemp->rule; rp; rp=rp->next){ + int i; + for(i=0; inrhs; i++){ + if( rp->rhs[i]==sp ){ /* FIX ME: Deal with multiterminals */ + ErrorMsg(lemp->filename,0, + "The start symbol \"%s\" occurs on the " + "right-hand side of a rule. This will result in a parser which " + "does not work properly.",sp->name); + lemp->errorcnt++; + } + } + } + + /* The basis configuration set for the first state + ** is all rules which have the start symbol as their + ** left-hand side */ + for(rp=sp->rule; rp; rp=rp->nextlhs){ + struct config *newcfp; + rp->lhsStart = 1; + newcfp = Configlist_addbasis(rp,0); + SetAdd(newcfp->fws,0); + } + + /* Compute the first state. All other states will be + ** computed automatically during the computation of the first one. + ** The returned pointer to the first state is not used. */ + (void)getstate(lemp); + return; +} + +/* Return a pointer to a state which is described by the configuration +** list which has been built from calls to Configlist_add. +*/ +PRIVATE void buildshifts(struct lemon *, struct state *); /* Forwd ref */ +PRIVATE struct state *getstate(struct lemon *lemp) +{ + struct config *cfp, *bp; + struct state *stp; + + /* Extract the sorted basis of the new state. The basis was constructed + ** by prior calls to "Configlist_addbasis()". */ + Configlist_sortbasis(); + bp = Configlist_basis(); + + /* Get a state with the same basis */ + stp = State_find(bp); + if( stp ){ + /* A state with the same basis already exists! Copy all the follow-set + ** propagation links from the state under construction into the + ** preexisting state, then return a pointer to the preexisting state */ + struct config *x, *y; + for(x=bp, y=stp->bp; x && y; x=x->bp, y=y->bp){ + Plink_copy(&y->bplp,x->bplp); + Plink_delete(x->fplp); + x->fplp = x->bplp = 0; + } + cfp = Configlist_return(); + Configlist_eat(cfp); + }else{ + /* This really is a new state. Construct all the details */ + Configlist_closure(lemp); /* Compute the configuration closure */ + Configlist_sort(); /* Sort the configuration closure */ + cfp = Configlist_return(); /* Get a pointer to the config list */ + stp = State_new(); /* A new state structure */ + MemoryCheck(stp); + stp->bp = bp; /* Remember the configuration basis */ + stp->cfp = cfp; /* Remember the configuration closure */ + stp->statenum = lemp->nstate++; /* Every state gets a sequence number */ + stp->ap = 0; /* No actions, yet. */ + State_insert(stp,stp->bp); /* Add to the state table */ + buildshifts(lemp,stp); /* Recursively compute successor states */ + } + return stp; +} + +/* +** Return true if two symbols are the same. +*/ +int same_symbol(struct symbol *a, struct symbol *b) +{ + int i; + if( a==b ) return 1; + if( a->type!=MULTITERMINAL ) return 0; + if( b->type!=MULTITERMINAL ) return 0; + if( a->nsubsym!=b->nsubsym ) return 0; + for(i=0; insubsym; i++){ + if( a->subsym[i]!=b->subsym[i] ) return 0; + } + return 1; +} + +/* Construct all successor states to the given state. A "successor" +** state is any state which can be reached by a shift action. +*/ +PRIVATE void buildshifts(struct lemon *lemp, struct state *stp) +{ + struct config *cfp; /* For looping thru the config closure of "stp" */ + struct config *bcfp; /* For the inner loop on config closure of "stp" */ + struct config *newcfg; /* */ + struct symbol *sp; /* Symbol following the dot in configuration "cfp" */ + struct symbol *bsp; /* Symbol following the dot in configuration "bcfp" */ + struct state *newstp; /* A pointer to a successor state */ + + /* Each configuration becomes complete after it contributes to a successor + ** state. Initially, all configurations are incomplete */ + for(cfp=stp->cfp; cfp; cfp=cfp->next) cfp->status = INCOMPLETE; + + /* Loop through all configurations of the state "stp" */ + for(cfp=stp->cfp; cfp; cfp=cfp->next){ + if( cfp->status==COMPLETE ) continue; /* Already used by inner loop */ + if( cfp->dot>=cfp->rp->nrhs ) continue; /* Can't shift this config */ + Configlist_reset(); /* Reset the new config set */ + sp = cfp->rp->rhs[cfp->dot]; /* Symbol after the dot */ + + /* For every configuration in the state "stp" which has the symbol "sp" + ** following its dot, add the same configuration to the basis set under + ** construction but with the dot shifted one symbol to the right. */ + for(bcfp=cfp; bcfp; bcfp=bcfp->next){ + if( bcfp->status==COMPLETE ) continue; /* Already used */ + if( bcfp->dot>=bcfp->rp->nrhs ) continue; /* Can't shift this one */ + bsp = bcfp->rp->rhs[bcfp->dot]; /* Get symbol after dot */ + if( !same_symbol(bsp,sp) ) continue; /* Must be same as for "cfp" */ + bcfp->status = COMPLETE; /* Mark this config as used */ + newcfg = Configlist_addbasis(bcfp->rp,bcfp->dot+1); + Plink_add(&newcfg->bplp,bcfp); + } + + /* Get a pointer to the state described by the basis configuration set + ** constructed in the preceding loop */ + newstp = getstate(lemp); + + /* The state "newstp" is reached from the state "stp" by a shift action + ** on the symbol "sp" */ + if( sp->type==MULTITERMINAL ){ + int i; + for(i=0; insubsym; i++){ + Action_add(&stp->ap,SHIFT,sp->subsym[i],(char*)newstp); + } + }else{ + Action_add(&stp->ap,SHIFT,sp,(char *)newstp); + } + } +} + +/* +** Construct the propagation links +*/ +void FindLinks(struct lemon *lemp) +{ + int i; + struct config *cfp, *other; + struct state *stp; + struct plink *plp; + + /* Housekeeping detail: + ** Add to every propagate link a pointer back to the state to + ** which the link is attached. */ + for(i=0; instate; i++){ + stp = lemp->sorted[i]; + for(cfp=stp?stp->cfp:0; cfp; cfp=cfp->next){ + cfp->stp = stp; + } + } + + /* Convert all backlinks into forward links. Only the forward + ** links are used in the follow-set computation. */ + for(i=0; instate; i++){ + stp = lemp->sorted[i]; + for(cfp=stp?stp->cfp:0; cfp; cfp=cfp->next){ + for(plp=cfp->bplp; plp; plp=plp->next){ + other = plp->cfp; + Plink_add(&other->fplp,cfp); + } + } + } +} + +/* Compute all followsets. +** +** A followset is the set of all symbols which can come immediately +** after a configuration. +*/ +void FindFollowSets(struct lemon *lemp) +{ + int i; + struct config *cfp; + struct plink *plp; + int progress; + int change; + + for(i=0; instate; i++){ + assert( lemp->sorted[i]!=0 ); + for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){ + cfp->status = INCOMPLETE; + } + } + + do{ + progress = 0; + for(i=0; instate; i++){ + assert( lemp->sorted[i]!=0 ); + for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){ + if( cfp->status==COMPLETE ) continue; + for(plp=cfp->fplp; plp; plp=plp->next){ + change = SetUnion(plp->cfp->fws,cfp->fws); + if( change ){ + plp->cfp->status = INCOMPLETE; + progress = 1; + } + } + cfp->status = COMPLETE; + } + } + }while( progress ); +} + +static int resolve_conflict(struct action *,struct action *); + +/* Compute the reduce actions, and resolve conflicts. +*/ +void FindActions(struct lemon *lemp) +{ + int i,j; + struct config *cfp; + struct state *stp; + struct symbol *sp; + struct rule *rp; + + /* Add all of the reduce actions + ** A reduce action is added for each element of the followset of + ** a configuration which has its dot at the extreme right. + */ + for(i=0; instate; i++){ /* Loop over all states */ + stp = lemp->sorted[i]; + for(cfp=stp->cfp; cfp; cfp=cfp->next){ /* Loop over all configurations */ + if( cfp->rp->nrhs==cfp->dot ){ /* Is dot at extreme right? */ + for(j=0; jnterminal; j++){ + if( SetFind(cfp->fws,j) ){ + /* Add a reduce action to the state "stp" which will reduce by the + ** rule "cfp->rp" if the lookahead symbol is "lemp->symbols[j]" */ + Action_add(&stp->ap,REDUCE,lemp->symbols[j],(char *)cfp->rp); + } + } + } + } + } + + /* Add the accepting token */ + if( lemp->start ){ + sp = Symbol_find(lemp->start); + if( sp==0 ){ + if( lemp->startRule==0 ){ + fprintf(stderr, "internal error on source line %d: no start rule\n", + __LINE__); + exit(1); + } + sp = lemp->startRule->lhs; + } + }else{ + sp = lemp->startRule->lhs; + } + /* Add to the first state (which is always the starting state of the + ** finite state machine) an action to ACCEPT if the lookahead is the + ** start nonterminal. */ + Action_add(&lemp->sorted[0]->ap,ACCEPT,sp,0); + + /* Resolve conflicts */ + for(i=0; instate; i++){ + struct action *ap, *nap; + stp = lemp->sorted[i]; + /* assert( stp->ap ); */ + stp->ap = Action_sort(stp->ap); + for(ap=stp->ap; ap && ap->next; ap=ap->next){ + for(nap=ap->next; nap && nap->sp==ap->sp; nap=nap->next){ + /* The two actions "ap" and "nap" have the same lookahead. + ** Figure out which one should be used */ + lemp->nconflict += resolve_conflict(ap,nap); + } + } + } + + /* Report an error for each rule that can never be reduced. */ + for(rp=lemp->rule; rp; rp=rp->next) rp->canReduce = LEMON_FALSE; + for(i=0; instate; i++){ + struct action *ap; + for(ap=lemp->sorted[i]->ap; ap; ap=ap->next){ + if( ap->type==REDUCE ) ap->x.rp->canReduce = LEMON_TRUE; + } + } + for(rp=lemp->rule; rp; rp=rp->next){ + if( rp->canReduce ) continue; + ErrorMsg(lemp->filename,rp->ruleline,"This rule can not be reduced.\n"); + lemp->errorcnt++; + } +} + +/* Resolve a conflict between the two given actions. If the +** conflict can't be resolved, return non-zero. +** +** NO LONGER TRUE: +** To resolve a conflict, first look to see if either action +** is on an error rule. In that case, take the action which +** is not associated with the error rule. If neither or both +** actions are associated with an error rule, then try to +** use precedence to resolve the conflict. +** +** If either action is a SHIFT, then it must be apx. This +** function won't work if apx->type==REDUCE and apy->type==SHIFT. +*/ +static int resolve_conflict( + struct action *apx, + struct action *apy +){ + struct symbol *spx, *spy; + int errcnt = 0; + assert( apx->sp==apy->sp ); /* Otherwise there would be no conflict */ + if( apx->type==SHIFT && apy->type==SHIFT ){ + apy->type = SSCONFLICT; + errcnt++; + } + if( apx->type==SHIFT && apy->type==REDUCE ){ + spx = apx->sp; + spy = apy->x.rp->precsym; + if( spy==0 || spx->prec<0 || spy->prec<0 ){ + /* Not enough precedence information. */ + apy->type = SRCONFLICT; + errcnt++; + }else if( spx->prec>spy->prec ){ /* higher precedence wins */ + apy->type = RD_RESOLVED; + }else if( spx->precprec ){ + apx->type = SH_RESOLVED; + }else if( spx->prec==spy->prec && spx->assoc==RIGHT ){ /* Use operator */ + apy->type = RD_RESOLVED; /* associativity */ + }else if( spx->prec==spy->prec && spx->assoc==LEFT ){ /* to break tie */ + apx->type = SH_RESOLVED; + }else{ + assert( spx->prec==spy->prec && spx->assoc==NONE ); + apx->type = ERROR; + } + }else if( apx->type==REDUCE && apy->type==REDUCE ){ + spx = apx->x.rp->precsym; + spy = apy->x.rp->precsym; + if( spx==0 || spy==0 || spx->prec<0 || + spy->prec<0 || spx->prec==spy->prec ){ + apy->type = RRCONFLICT; + errcnt++; + }else if( spx->prec>spy->prec ){ + apy->type = RD_RESOLVED; + }else if( spx->precprec ){ + apx->type = RD_RESOLVED; + } + }else{ + assert( + apx->type==SH_RESOLVED || + apx->type==RD_RESOLVED || + apx->type==SSCONFLICT || + apx->type==SRCONFLICT || + apx->type==RRCONFLICT || + apy->type==SH_RESOLVED || + apy->type==RD_RESOLVED || + apy->type==SSCONFLICT || + apy->type==SRCONFLICT || + apy->type==RRCONFLICT + ); + /* The REDUCE/SHIFT case cannot happen because SHIFTs come before + ** REDUCEs on the list. If we reach this point it must be because + ** the parser conflict had already been resolved. */ + } + return errcnt; +} +/********************* From the file "configlist.c" *************************/ +/* +** Routines to processing a configuration list and building a state +** in the LEMON parser generator. +*/ + +static struct config *freelist = 0; /* List of free configurations */ +static struct config *current = 0; /* Top of list of configurations */ +static struct config **currentend = 0; /* Last on list of configs */ +static struct config *basis = 0; /* Top of list of basis configs */ +static struct config **basisend = 0; /* End of list of basis configs */ + +/* Return a pointer to a new configuration */ +PRIVATE struct config *newconfig(void){ + return (struct config*)calloc(1, sizeof(struct config)); +} + +/* The configuration "old" is no longer used */ +PRIVATE void deleteconfig(struct config *old) +{ + old->next = freelist; + freelist = old; +} + +/* Initialized the configuration list builder */ +void Configlist_init(void){ + current = 0; + currentend = ¤t; + basis = 0; + basisend = &basis; + Configtable_init(); + return; +} + +/* Initialized the configuration list builder */ +void Configlist_reset(void){ + current = 0; + currentend = ¤t; + basis = 0; + basisend = &basis; + Configtable_clear(0); + return; +} + +/* Add another configuration to the configuration list */ +struct config *Configlist_add( + struct rule *rp, /* The rule */ + int dot /* Index into the RHS of the rule where the dot goes */ +){ + struct config *cfp, model; + + assert( currentend!=0 ); + model.rp = rp; + model.dot = dot; + cfp = Configtable_find(&model); + if( cfp==0 ){ + cfp = newconfig(); + cfp->rp = rp; + cfp->dot = dot; + cfp->fws = SetNew(); + cfp->stp = 0; + cfp->fplp = cfp->bplp = 0; + cfp->next = 0; + cfp->bp = 0; + *currentend = cfp; + currentend = &cfp->next; + Configtable_insert(cfp); + } + return cfp; +} + +/* Add a basis configuration to the configuration list */ +struct config *Configlist_addbasis(struct rule *rp, int dot) +{ + struct config *cfp, model; + + assert( basisend!=0 ); + assert( currentend!=0 ); + model.rp = rp; + model.dot = dot; + cfp = Configtable_find(&model); + if( cfp==0 ){ + cfp = newconfig(); + cfp->rp = rp; + cfp->dot = dot; + cfp->fws = SetNew(); + cfp->stp = 0; + cfp->fplp = cfp->bplp = 0; + cfp->next = 0; + cfp->bp = 0; + *currentend = cfp; + currentend = &cfp->next; + *basisend = cfp; + basisend = &cfp->bp; + Configtable_insert(cfp); + } + return cfp; +} + +/* Compute the closure of the configuration list */ +void Configlist_closure(struct lemon *lemp) +{ + struct config *cfp, *newcfp; + struct rule *rp, *newrp; + struct symbol *sp, *xsp; + int i, dot; + + assert( currentend!=0 ); + for(cfp=current; cfp; cfp=cfp->next){ + rp = cfp->rp; + dot = cfp->dot; + if( dot>=rp->nrhs ) continue; + sp = rp->rhs[dot]; + if( sp->type==NONTERMINAL ){ + if( sp->rule==0 && sp!=lemp->errsym ){ + ErrorMsg(lemp->filename,rp->line,"Nonterminal \"%s\" has no rules.", + sp->name); + lemp->errorcnt++; + } + for(newrp=sp->rule; newrp; newrp=newrp->nextlhs){ + newcfp = Configlist_add(newrp,0); + for(i=dot+1; inrhs; i++){ + xsp = rp->rhs[i]; + if( xsp->type==TERMINAL ){ + SetAdd(newcfp->fws,xsp->index); + break; + }else if( xsp->type==MULTITERMINAL ){ + int k; + for(k=0; knsubsym; k++){ + SetAdd(newcfp->fws, xsp->subsym[k]->index); + } + break; + }else{ + SetUnion(newcfp->fws,xsp->firstset); + if( xsp->lambda==LEMON_FALSE ) break; + } + } + if( i==rp->nrhs ) Plink_add(&cfp->fplp,newcfp); + } + } + } + return; +} + +/* Sort the configuration list */ +void Configlist_sort(void){ + current = (struct config*)msort((char*)current,(char**)&(current->next), + Configcmp); + currentend = 0; + return; +} + +/* Sort the basis configuration list */ +void Configlist_sortbasis(void){ + basis = (struct config*)msort((char*)current,(char**)&(current->bp), + Configcmp); + basisend = 0; + return; +} + +/* Return a pointer to the head of the configuration list and +** reset the list */ +struct config *Configlist_return(void){ + struct config *old; + old = current; + current = 0; + currentend = 0; + return old; +} + +/* Return a pointer to the head of the configuration list and +** reset the list */ +struct config *Configlist_basis(void){ + struct config *old; + old = basis; + basis = 0; + basisend = 0; + return old; +} + +/* Free all elements of the given configuration list */ +void Configlist_eat(struct config *cfp) +{ + struct config *nextcfp; + for(; cfp; cfp=nextcfp){ + nextcfp = cfp->next; + assert( cfp->fplp==0 ); + assert( cfp->bplp==0 ); + if( cfp->fws ) SetFree(cfp->fws); + deleteconfig(cfp); + } + return; +} +/***************** From the file "error.c" *********************************/ +/* +** Code for printing error message. +*/ + +void ErrorMsg(const char *filename, int lineno, const char *format, ...){ + va_list ap; + fprintf(stderr, "%s:%d: ", filename, lineno); + va_start(ap, format); + vfprintf(stderr,format,ap); + va_end(ap); + fprintf(stderr, "\n"); +} +/**************** From the file "main.c" ************************************/ +/* +** Main program file for the LEMON parser generator. +*/ + +/* Report an out-of-memory condition and abort. This function +** is used mostly by the "MemoryCheck" macro in struct.h +*/ +void memory_error(void){ + fprintf(stderr,"Out of memory. Aborting...\n"); + exit(1); +} + +static int nDefine = 0; /* Number of -D options on the command line */ +static char **azDefine = 0; /* Name of the -D macros */ + +/* This routine is called with the argument to each -D command-line option. +** Add the macro defined to the azDefine array. +*/ +static void handle_D_option(char *z){ + char **paz; + nDefine++; + azDefine = (char **) realloc(azDefine, sizeof(azDefine[0])*nDefine); + if( azDefine==0 ){ + fprintf(stderr,"out of memory\n"); + exit(1); + } + paz = &azDefine[nDefine-1]; + *paz = (char *) malloc( lemonStrlen(z)+1 ); + if( *paz==0 ){ + fprintf(stderr,"out of memory\n"); + exit(1); + } + lemon_strcpy(*paz, z); + for(z=*paz; *z && *z!='='; z++){} + *z = 0; +} + +/* Rember the name of the output directory +*/ +static char *outputDir = NULL; +static void handle_d_option(char *z){ + outputDir = (char *) malloc( lemonStrlen(z)+1 ); + if( outputDir==0 ){ + fprintf(stderr,"out of memory\n"); + exit(1); + } + lemon_strcpy(outputDir, z); +} + +static char *user_templatename = NULL; +static void handle_T_option(char *z){ + user_templatename = (char *) malloc( lemonStrlen(z)+1 ); + if( user_templatename==0 ){ + memory_error(); + } + lemon_strcpy(user_templatename, z); +} + +/* Merge together to lists of rules ordered by rule.iRule */ +static struct rule *Rule_merge(struct rule *pA, struct rule *pB){ + struct rule *pFirst = 0; + struct rule **ppPrev = &pFirst; + while( pA && pB ){ + if( pA->iRuleiRule ){ + *ppPrev = pA; + ppPrev = &pA->next; + pA = pA->next; + }else{ + *ppPrev = pB; + ppPrev = &pB->next; + pB = pB->next; + } + } + if( pA ){ + *ppPrev = pA; + }else{ + *ppPrev = pB; + } + return pFirst; +} + +/* +** Sort a list of rules in order of increasing iRule value +*/ +static struct rule *Rule_sort(struct rule *rp){ + unsigned int i; + struct rule *pNext; + struct rule *x[32]; + memset(x, 0, sizeof(x)); + while( rp ){ + pNext = rp->next; + rp->next = 0; + for(i=0; iindex = i; + qsort(lem.symbols,lem.nsymbol,sizeof(struct symbol*), Symbolcmpp); + for(i=0; iindex = i; + while( lem.symbols[i-1]->type==MULTITERMINAL ){ i--; } + assert( strcmp(lem.symbols[i-1]->name,"{default}")==0 ); + lem.nsymbol = i - 1; + for(i=1; ISUPPER(lem.symbols[i]->name[0]); i++); + lem.nterminal = i; + + /* Assign sequential rule numbers. Start with 0. Put rules that have no + ** reduce action C-code associated with them last, so that the switch() + ** statement that selects reduction actions will have a smaller jump table. + */ + for(i=0, rp=lem.rule; rp; rp=rp->next){ + rp->iRule = rp->code ? i++ : -1; + } + lem.nruleWithAction = i; + for(rp=lem.rule; rp; rp=rp->next){ + if( rp->iRule<0 ) rp->iRule = i++; + } + lem.startRule = lem.rule; + lem.rule = Rule_sort(lem.rule); + + /* Generate a reprint of the grammar, if requested on the command line */ + if( rpflag ){ + Reprint(&lem); + }else{ + /* Initialize the size for all follow and first sets */ + SetSize(lem.nterminal+1); + + /* Find the precedence for every production rule (that has one) */ + FindRulePrecedences(&lem); + + /* Compute the lambda-nonterminals and the first-sets for every + ** nonterminal */ + FindFirstSets(&lem); + + /* Compute all LR(0) states. Also record follow-set propagation + ** links so that the follow-set can be computed later */ + lem.nstate = 0; + FindStates(&lem); + lem.sorted = State_arrayof(); + + /* Tie up loose ends on the propagation links */ + FindLinks(&lem); + + /* Compute the follow set of every reducible configuration */ + FindFollowSets(&lem); + + /* Compute the action tables */ + FindActions(&lem); + + /* Compress the action tables */ + if( compress==0 ) CompressTables(&lem); + + /* Reorder and renumber the states so that states with fewer choices + ** occur at the end. This is an optimization that helps make the + ** generated parser tables smaller. */ + if( noResort==0 ) ResortStates(&lem); + + /* Generate a report of the parser generated. (the "y.output" file) */ + if( !quiet ) ReportOutput(&lem); + + /* Generate the source code for the parser */ + ReportTable(&lem, mhflag, sqlFlag); + + /* Produce a header file for use by the scanner. (This step is + ** omitted if the "-m" option is used because makeheaders will + ** generate the file for us.) */ + if( !mhflag ) ReportHeader(&lem); + } + if( statistics ){ + printf("Parser statistics:\n"); + stats_line("terminal symbols", lem.nterminal); + stats_line("non-terminal symbols", lem.nsymbol - lem.nterminal); + stats_line("total symbols", lem.nsymbol); + stats_line("rules", lem.nrule); + stats_line("states", lem.nxstate); + stats_line("conflicts", lem.nconflict); + stats_line("action table entries", lem.nactiontab); + stats_line("lookahead table entries", lem.nlookaheadtab); + stats_line("total table size (bytes)", lem.tablesize); + } + if( lem.nconflict > 0 ){ + fprintf(stderr,"%d parsing conflicts.\n",lem.nconflict); + } + + /* return 0 on success, 1 on failure. */ + exitcode = ((lem.errorcnt > 0) || (lem.nconflict > 0)) ? 1 : 0; + exit(exitcode); + return (exitcode); +} +/******************** From the file "msort.c" *******************************/ +/* +** A generic merge-sort program. +** +** USAGE: +** Let "ptr" be a pointer to some structure which is at the head of +** a null-terminated list. Then to sort the list call: +** +** ptr = msort(ptr,&(ptr->next),cmpfnc); +** +** In the above, "cmpfnc" is a pointer to a function which compares +** two instances of the structure and returns an integer, as in +** strcmp. The second argument is a pointer to the pointer to the +** second element of the linked list. This address is used to compute +** the offset to the "next" field within the structure. The offset to +** the "next" field must be constant for all structures in the list. +** +** The function returns a new pointer which is the head of the list +** after sorting. +** +** ALGORITHM: +** Merge-sort. +*/ + +/* +** Return a pointer to the next structure in the linked list. +*/ +#define NEXT(A) (*(char**)(((char*)A)+offset)) + +/* +** Inputs: +** a: A sorted, null-terminated linked list. (May be null). +** b: A sorted, null-terminated linked list. (May be null). +** cmp: A pointer to the comparison function. +** offset: Offset in the structure to the "next" field. +** +** Return Value: +** A pointer to the head of a sorted list containing the elements +** of both a and b. +** +** Side effects: +** The "next" pointers for elements in the lists a and b are +** changed. +*/ +static char *merge( + char *a, + char *b, + int (*cmp)(const char*,const char*), + int offset +){ + char *ptr, *head; + + if( a==0 ){ + head = b; + }else if( b==0 ){ + head = a; + }else{ + if( (*cmp)(a,b)<=0 ){ + ptr = a; + a = NEXT(a); + }else{ + ptr = b; + b = NEXT(b); + } + head = ptr; + while( a && b ){ + if( (*cmp)(a,b)<=0 ){ + NEXT(ptr) = a; + ptr = a; + a = NEXT(a); + }else{ + NEXT(ptr) = b; + ptr = b; + b = NEXT(b); + } + } + if( a ) NEXT(ptr) = a; + else NEXT(ptr) = b; + } + return head; +} + +/* +** Inputs: +** list: Pointer to a singly-linked list of structures. +** next: Pointer to pointer to the second element of the list. +** cmp: A comparison function. +** +** Return Value: +** A pointer to the head of a sorted list containing the elements +** originally in list. +** +** Side effects: +** The "next" pointers for elements in list are changed. +*/ +#define LISTSIZE 30 +static char *msort( + char *list, + char **next, + int (*cmp)(const char*,const char*) +){ + unsigned long offset; + char *ep; + char *set[LISTSIZE]; + int i; + offset = (unsigned long)((char*)next - (char*)list); + for(i=0; istate = WAITING_FOR_DECL_KEYWORD; + }else if( ISLOWER(x[0]) ){ + psp->lhs = Symbol_new(x); + psp->nrhs = 0; + psp->lhsalias = 0; + psp->state = WAITING_FOR_ARROW; + }else if( x[0]=='{' ){ + if( psp->prevrule==0 ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "There is no prior rule upon which to attach the code " + "fragment which begins on this line."); + psp->errorcnt++; + }else if( psp->prevrule->code!=0 ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Code fragment beginning on this line is not the first " + "to follow the previous rule."); + psp->errorcnt++; + }else if( strcmp(x, "{NEVER-REDUCE")==0 ){ + psp->prevrule->neverReduce = 1; + }else{ + psp->prevrule->line = psp->tokenlineno; + psp->prevrule->code = &x[1]; + psp->prevrule->noCode = 0; + } + }else if( x[0]=='[' ){ + psp->state = PRECEDENCE_MARK_1; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Token \"%s\" should be either \"%%\" or a nonterminal name.", + x); + psp->errorcnt++; + } + break; + case PRECEDENCE_MARK_1: + if( !ISUPPER(x[0]) ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "The precedence symbol must be a terminal."); + psp->errorcnt++; + }else if( psp->prevrule==0 ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "There is no prior rule to assign precedence \"[%s]\".",x); + psp->errorcnt++; + }else if( psp->prevrule->precsym!=0 ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Precedence mark on this line is not the first " + "to follow the previous rule."); + psp->errorcnt++; + }else{ + psp->prevrule->precsym = Symbol_new(x); + } + psp->state = PRECEDENCE_MARK_2; + break; + case PRECEDENCE_MARK_2: + if( x[0]!=']' ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Missing \"]\" on precedence mark."); + psp->errorcnt++; + } + psp->state = WAITING_FOR_DECL_OR_RULE; + break; + case WAITING_FOR_ARROW: + if( x[0]==':' && x[1]==':' && x[2]=='=' ){ + psp->state = IN_RHS; + }else if( x[0]=='(' ){ + psp->state = LHS_ALIAS_1; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Expected to see a \":\" following the LHS symbol \"%s\".", + psp->lhs->name); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case LHS_ALIAS_1: + if( ISALPHA(x[0]) ){ + psp->lhsalias = x; + psp->state = LHS_ALIAS_2; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "\"%s\" is not a valid alias for the LHS \"%s\"\n", + x,psp->lhs->name); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case LHS_ALIAS_2: + if( x[0]==')' ){ + psp->state = LHS_ALIAS_3; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Missing \")\" following LHS alias name \"%s\".",psp->lhsalias); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case LHS_ALIAS_3: + if( x[0]==':' && x[1]==':' && x[2]=='=' ){ + psp->state = IN_RHS; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Missing \"->\" following: \"%s(%s)\".", + psp->lhs->name,psp->lhsalias); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case IN_RHS: + if( x[0]=='.' ){ + struct rule *rp; + rp = (struct rule *)calloc( sizeof(struct rule) + + sizeof(struct symbol*)*psp->nrhs + sizeof(char*)*psp->nrhs, 1); + if( rp==0 ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Can't allocate enough memory for this rule."); + psp->errorcnt++; + psp->prevrule = 0; + }else{ + int i; + rp->ruleline = psp->tokenlineno; + rp->rhs = (struct symbol**)&rp[1]; + rp->rhsalias = (const char**)&(rp->rhs[psp->nrhs]); + for(i=0; inrhs; i++){ + rp->rhs[i] = psp->rhs[i]; + rp->rhsalias[i] = psp->alias[i]; + if( rp->rhsalias[i]!=0 ){ rp->rhs[i]->bContent = 1; } + } + rp->lhs = psp->lhs; + rp->lhsalias = psp->lhsalias; + rp->nrhs = psp->nrhs; + rp->code = 0; + rp->noCode = 1; + rp->precsym = 0; + rp->index = psp->gp->nrule++; + rp->nextlhs = rp->lhs->rule; + rp->lhs->rule = rp; + rp->next = 0; + if( psp->firstrule==0 ){ + psp->firstrule = psp->lastrule = rp; + }else{ + psp->lastrule->next = rp; + psp->lastrule = rp; + } + psp->prevrule = rp; + } + psp->state = WAITING_FOR_DECL_OR_RULE; + }else if( ISALPHA(x[0]) ){ + if( psp->nrhs>=MAXRHS ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Too many symbols on RHS of rule beginning at \"%s\".", + x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + }else{ + psp->rhs[psp->nrhs] = Symbol_new(x); + psp->alias[psp->nrhs] = 0; + psp->nrhs++; + } + }else if( (x[0]=='|' || x[0]=='/') && psp->nrhs>0 && ISUPPER(x[1]) ){ + struct symbol *msp = psp->rhs[psp->nrhs-1]; + if( msp->type!=MULTITERMINAL ){ + struct symbol *origsp = msp; + msp = (struct symbol *) calloc(1,sizeof(*msp)); + memset(msp, 0, sizeof(*msp)); + msp->type = MULTITERMINAL; + msp->nsubsym = 1; + msp->subsym = (struct symbol **) calloc(1,sizeof(struct symbol*)); + msp->subsym[0] = origsp; + msp->name = origsp->name; + psp->rhs[psp->nrhs-1] = msp; + } + msp->nsubsym++; + msp->subsym = (struct symbol **) realloc(msp->subsym, + sizeof(struct symbol*)*msp->nsubsym); + msp->subsym[msp->nsubsym-1] = Symbol_new(&x[1]); + if( ISLOWER(x[1]) || ISLOWER(msp->subsym[0]->name[0]) ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Cannot form a compound containing a non-terminal"); + psp->errorcnt++; + } + }else if( x[0]=='(' && psp->nrhs>0 ){ + psp->state = RHS_ALIAS_1; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Illegal character on RHS of rule: \"%s\".",x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case RHS_ALIAS_1: + if( ISALPHA(x[0]) ){ + psp->alias[psp->nrhs-1] = x; + psp->state = RHS_ALIAS_2; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "\"%s\" is not a valid alias for the RHS symbol \"%s\"\n", + x,psp->rhs[psp->nrhs-1]->name); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case RHS_ALIAS_2: + if( x[0]==')' ){ + psp->state = IN_RHS; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Missing \")\" following LHS alias name \"%s\".",psp->lhsalias); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case WAITING_FOR_DECL_KEYWORD: + if( ISALPHA(x[0]) ){ + psp->declkeyword = x; + psp->declargslot = 0; + psp->decllinenoslot = 0; + psp->insertLineMacro = 1; + psp->state = WAITING_FOR_DECL_ARG; + if( strcmp(x,"name")==0 ){ + psp->declargslot = &(psp->gp->name); + psp->insertLineMacro = 0; + }else if( strcmp(x,"include")==0 ){ + psp->declargslot = &(psp->gp->include); + }else if( strcmp(x,"code")==0 ){ + psp->declargslot = &(psp->gp->extracode); + }else if( strcmp(x,"token_destructor")==0 ){ + psp->declargslot = &psp->gp->tokendest; + }else if( strcmp(x,"default_destructor")==0 ){ + psp->declargslot = &psp->gp->vardest; + }else if( strcmp(x,"token_prefix")==0 ){ + psp->declargslot = &psp->gp->tokenprefix; + psp->insertLineMacro = 0; + }else if( strcmp(x,"syntax_error")==0 ){ + psp->declargslot = &(psp->gp->error); + }else if( strcmp(x,"parse_accept")==0 ){ + psp->declargslot = &(psp->gp->accept); + }else if( strcmp(x,"parse_failure")==0 ){ + psp->declargslot = &(psp->gp->failure); + }else if( strcmp(x,"stack_overflow")==0 ){ + psp->declargslot = &(psp->gp->overflow); + }else if( strcmp(x,"extra_argument")==0 ){ + psp->declargslot = &(psp->gp->arg); + psp->insertLineMacro = 0; + }else if( strcmp(x,"extra_context")==0 ){ + psp->declargslot = &(psp->gp->ctx); + psp->insertLineMacro = 0; + }else if( strcmp(x,"token_type")==0 ){ + psp->declargslot = &(psp->gp->tokentype); + psp->insertLineMacro = 0; + }else if( strcmp(x,"default_type")==0 ){ + psp->declargslot = &(psp->gp->vartype); + psp->insertLineMacro = 0; + }else if( strcmp(x,"stack_size")==0 ){ + psp->declargslot = &(psp->gp->stacksize); + psp->insertLineMacro = 0; + }else if( strcmp(x,"start_symbol")==0 ){ + psp->declargslot = &(psp->gp->start); + psp->insertLineMacro = 0; + }else if( strcmp(x,"left")==0 ){ + psp->preccounter++; + psp->declassoc = LEFT; + psp->state = WAITING_FOR_PRECEDENCE_SYMBOL; + }else if( strcmp(x,"right")==0 ){ + psp->preccounter++; + psp->declassoc = RIGHT; + psp->state = WAITING_FOR_PRECEDENCE_SYMBOL; + }else if( strcmp(x,"nonassoc")==0 ){ + psp->preccounter++; + psp->declassoc = NONE; + psp->state = WAITING_FOR_PRECEDENCE_SYMBOL; + }else if( strcmp(x,"destructor")==0 ){ + psp->state = WAITING_FOR_DESTRUCTOR_SYMBOL; + }else if( strcmp(x,"type")==0 ){ + psp->state = WAITING_FOR_DATATYPE_SYMBOL; + }else if( strcmp(x,"fallback")==0 ){ + psp->fallback = 0; + psp->state = WAITING_FOR_FALLBACK_ID; + }else if( strcmp(x,"token")==0 ){ + psp->state = WAITING_FOR_TOKEN_NAME; + }else if( strcmp(x,"wildcard")==0 ){ + psp->state = WAITING_FOR_WILDCARD_ID; + }else if( strcmp(x,"token_class")==0 ){ + psp->state = WAITING_FOR_CLASS_ID; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Unknown declaration keyword: \"%%%s\".",x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + } + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Illegal declaration keyword: \"%s\".",x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + } + break; + case WAITING_FOR_DESTRUCTOR_SYMBOL: + if( !ISALPHA(x[0]) ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Symbol name missing after %%destructor keyword"); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + }else{ + struct symbol *sp = Symbol_new(x); + psp->declargslot = &sp->destructor; + psp->decllinenoslot = &sp->destLineno; + psp->insertLineMacro = 1; + psp->state = WAITING_FOR_DECL_ARG; + } + break; + case WAITING_FOR_DATATYPE_SYMBOL: + if( !ISALPHA(x[0]) ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Symbol name missing after %%type keyword"); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + }else{ + struct symbol *sp = Symbol_find(x); + if((sp) && (sp->datatype)){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Symbol %%type \"%s\" already defined", x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + }else{ + if (!sp){ + sp = Symbol_new(x); + } + psp->declargslot = &sp->datatype; + psp->insertLineMacro = 0; + psp->state = WAITING_FOR_DECL_ARG; + } + } + break; + case WAITING_FOR_PRECEDENCE_SYMBOL: + if( x[0]=='.' ){ + psp->state = WAITING_FOR_DECL_OR_RULE; + }else if( ISUPPER(x[0]) ){ + struct symbol *sp; + sp = Symbol_new(x); + if( sp->prec>=0 ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Symbol \"%s\" has already be given a precedence.",x); + psp->errorcnt++; + }else{ + sp->prec = psp->preccounter; + sp->assoc = psp->declassoc; + } + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Can't assign a precedence to \"%s\".",x); + psp->errorcnt++; + } + break; + case WAITING_FOR_DECL_ARG: + if( x[0]=='{' || x[0]=='\"' || ISALNUM(x[0]) ){ + const char *zOld, *zNew; + char *zBuf, *z; + int nOld, n, nLine = 0, nNew, nBack; + int addLineMacro; + char zLine[50]; + zNew = x; + if( zNew[0]=='"' || zNew[0]=='{' ) zNew++; + nNew = lemonStrlen(zNew); + if( *psp->declargslot ){ + zOld = *psp->declargslot; + }else{ + zOld = ""; + } + nOld = lemonStrlen(zOld); + n = nOld + nNew + 20; + addLineMacro = !psp->gp->nolinenosflag + && psp->insertLineMacro + && psp->tokenlineno>1 + && (psp->decllinenoslot==0 || psp->decllinenoslot[0]!=0); + if( addLineMacro ){ + for(z=psp->filename, nBack=0; *z; z++){ + if( *z=='\\' ) nBack++; + } + lemon_sprintf(zLine, "#line %d ", psp->tokenlineno); + nLine = lemonStrlen(zLine); + n += nLine + lemonStrlen(psp->filename) + nBack; + } + *psp->declargslot = (char *) realloc(*psp->declargslot, n); + zBuf = *psp->declargslot + nOld; + if( addLineMacro ){ + if( nOld && zBuf[-1]!='\n' ){ + *(zBuf++) = '\n'; + } + memcpy(zBuf, zLine, nLine); + zBuf += nLine; + *(zBuf++) = '"'; + for(z=psp->filename; *z; z++){ + if( *z=='\\' ){ + *(zBuf++) = '\\'; + } + *(zBuf++) = *z; + } + *(zBuf++) = '"'; + *(zBuf++) = '\n'; + } + if( psp->decllinenoslot && psp->decllinenoslot[0]==0 ){ + psp->decllinenoslot[0] = psp->tokenlineno; + } + memcpy(zBuf, zNew, nNew); + zBuf += nNew; + *zBuf = 0; + psp->state = WAITING_FOR_DECL_OR_RULE; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Illegal argument to %%%s: %s",psp->declkeyword,x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + } + break; + case WAITING_FOR_FALLBACK_ID: + if( x[0]=='.' ){ + psp->state = WAITING_FOR_DECL_OR_RULE; + }else if( !ISUPPER(x[0]) ){ + ErrorMsg(psp->filename, psp->tokenlineno, + "%%fallback argument \"%s\" should be a token", x); + psp->errorcnt++; + }else{ + struct symbol *sp = Symbol_new(x); + if( psp->fallback==0 ){ + psp->fallback = sp; + }else if( sp->fallback ){ + ErrorMsg(psp->filename, psp->tokenlineno, + "More than one fallback assigned to token %s", x); + psp->errorcnt++; + }else{ + sp->fallback = psp->fallback; + psp->gp->has_fallback = 1; + } + } + break; + case WAITING_FOR_TOKEN_NAME: + /* Tokens do not have to be declared before use. But they can be + ** in order to control their assigned integer number. The number for + ** each token is assigned when it is first seen. So by including + ** + ** %token ONE TWO THREE. + ** + ** early in the grammar file, that assigns small consecutive values + ** to each of the tokens ONE TWO and THREE. + */ + if( x[0]=='.' ){ + psp->state = WAITING_FOR_DECL_OR_RULE; + }else if( !ISUPPER(x[0]) ){ + ErrorMsg(psp->filename, psp->tokenlineno, + "%%token argument \"%s\" should be a token", x); + psp->errorcnt++; + }else{ + (void)Symbol_new(x); + } + break; + case WAITING_FOR_WILDCARD_ID: + if( x[0]=='.' ){ + psp->state = WAITING_FOR_DECL_OR_RULE; + }else if( !ISUPPER(x[0]) ){ + ErrorMsg(psp->filename, psp->tokenlineno, + "%%wildcard argument \"%s\" should be a token", x); + psp->errorcnt++; + }else{ + struct symbol *sp = Symbol_new(x); + if( psp->gp->wildcard==0 ){ + psp->gp->wildcard = sp; + }else{ + ErrorMsg(psp->filename, psp->tokenlineno, + "Extra wildcard to token: %s", x); + psp->errorcnt++; + } + } + break; + case WAITING_FOR_CLASS_ID: + if( !ISLOWER(x[0]) ){ + ErrorMsg(psp->filename, psp->tokenlineno, + "%%token_class must be followed by an identifier: %s", x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + }else if( Symbol_find(x) ){ + ErrorMsg(psp->filename, psp->tokenlineno, + "Symbol \"%s\" already used", x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + }else{ + psp->tkclass = Symbol_new(x); + psp->tkclass->type = MULTITERMINAL; + psp->state = WAITING_FOR_CLASS_TOKEN; + } + break; + case WAITING_FOR_CLASS_TOKEN: + if( x[0]=='.' ){ + psp->state = WAITING_FOR_DECL_OR_RULE; + }else if( ISUPPER(x[0]) || ((x[0]=='|' || x[0]=='/') && ISUPPER(x[1])) ){ + struct symbol *msp = psp->tkclass; + msp->nsubsym++; + msp->subsym = (struct symbol **) realloc(msp->subsym, + sizeof(struct symbol*)*msp->nsubsym); + if( !ISUPPER(x[0]) ) x++; + msp->subsym[msp->nsubsym-1] = Symbol_new(x); + }else{ + ErrorMsg(psp->filename, psp->tokenlineno, + "%%token_class argument \"%s\" should be a token", x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + } + break; + case RESYNC_AFTER_RULE_ERROR: +/* if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE; +** break; */ + case RESYNC_AFTER_DECL_ERROR: + if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE; + if( x[0]=='%' ) psp->state = WAITING_FOR_DECL_KEYWORD; + break; + } +} + +/* The text in the input is part of the argument to an %ifdef or %ifndef. +** Evaluate the text as a boolean expression. Return true or false. +*/ +static int eval_preprocessor_boolean(char *z, int lineno){ + int neg = 0; + int res = 0; + int okTerm = 1; + int i; + for(i=0; z[i]!=0; i++){ + if( ISSPACE(z[i]) ) continue; + if( z[i]=='!' ){ + if( !okTerm ) goto pp_syntax_error; + neg = !neg; + continue; + } + if( z[i]=='|' && z[i+1]=='|' ){ + if( okTerm ) goto pp_syntax_error; + if( res ) return 1; + i++; + okTerm = 1; + continue; + } + if( z[i]=='&' && z[i+1]=='&' ){ + if( okTerm ) goto pp_syntax_error; + if( !res ) return 0; + i++; + okTerm = 1; + continue; + } + if( z[i]=='(' ){ + int k; + int n = 1; + if( !okTerm ) goto pp_syntax_error; + for(k=i+1; z[k]; k++){ + if( z[k]==')' ){ + n--; + if( n==0 ){ + z[k] = 0; + res = eval_preprocessor_boolean(&z[i+1], -1); + z[k] = ')'; + if( res<0 ){ + i = i-res; + goto pp_syntax_error; + } + i = k; + break; + } + }else if( z[k]=='(' ){ + n++; + }else if( z[k]==0 ){ + i = k; + goto pp_syntax_error; + } + } + if( neg ){ + res = !res; + neg = 0; + } + okTerm = 0; + continue; + } + if( ISALPHA(z[i]) ){ + int j, k, n; + if( !okTerm ) goto pp_syntax_error; + for(k=i+1; ISALNUM(z[k]) || z[k]=='_'; k++){} + n = k - i; + res = 0; + for(j=0; j0 ){ + fprintf(stderr, "%%if syntax error on line %d.\n", lineno); + fprintf(stderr, " %.*s <-- syntax error here\n", i+1, z); + exit(1); + }else{ + return -(i+1); + } +} + +/* Run the preprocessor over the input file text. The global variables +** azDefine[0] through azDefine[nDefine-1] contains the names of all defined +** macros. This routine looks for "%ifdef" and "%ifndef" and "%endif" and +** comments them out. Text in between is also commented out as appropriate. +*/ +static void preprocess_input(char *z){ + int i, j, k; + int exclude = 0; + int start = 0; + int lineno = 1; + int start_lineno = 1; + for(i=0; z[i]; i++){ + if( z[i]=='\n' ) lineno++; + if( z[i]!='%' || (i>0 && z[i-1]!='\n') ) continue; + if( strncmp(&z[i],"%endif",6)==0 && ISSPACE(z[i+6]) ){ + if( exclude ){ + exclude--; + if( exclude==0 ){ + for(j=start; jfilename; + ps.errorcnt = 0; + ps.state = INITIALIZE; + + /* Begin by reading the input file */ + fp = fopen(ps.filename,"rb"); + if( fp==0 ){ + ErrorMsg(ps.filename,0,"Can't open this file for reading."); + gp->errorcnt++; + return; + } + fseek(fp,0,2); + filesize = ftell(fp); + rewind(fp); + filebuf = (char *)malloc( filesize+1 ); + if( filesize>100000000 || filebuf==0 ){ + ErrorMsg(ps.filename,0,"Input file too large."); + free(filebuf); + gp->errorcnt++; + fclose(fp); + return; + } + if( fread(filebuf,1,filesize,fp)!=filesize ){ + ErrorMsg(ps.filename,0,"Can't read in all %d bytes of this file.", + filesize); + free(filebuf); + gp->errorcnt++; + fclose(fp); + return; + } + fclose(fp); + filebuf[filesize] = 0; + + /* Make an initial pass through the file to handle %ifdef and %ifndef */ + preprocess_input(filebuf); + if( gp->printPreprocessed ){ + printf("%s\n", filebuf); + return; + } + + /* Now scan the text of the input file */ + lineno = 1; + for(cp=filebuf; (c= *cp)!=0; ){ + if( c=='\n' ) lineno++; /* Keep track of the line number */ + if( ISSPACE(c) ){ cp++; continue; } /* Skip all white space */ + if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments */ + cp+=2; + while( (c= *cp)!=0 && c!='\n' ) cp++; + continue; + } + if( c=='/' && cp[1]=='*' ){ /* Skip C style comments */ + cp+=2; + if( (*cp)=='/' ) cp++; + while( (c= *cp)!=0 && (c!='/' || cp[-1]!='*') ){ + if( c=='\n' ) lineno++; + cp++; + } + if( c ) cp++; + continue; + } + ps.tokenstart = cp; /* Mark the beginning of the token */ + ps.tokenlineno = lineno; /* Linenumber on which token begins */ + if( c=='\"' ){ /* String literals */ + cp++; + while( (c= *cp)!=0 && c!='\"' ){ + if( c=='\n' ) lineno++; + cp++; + } + if( c==0 ){ + ErrorMsg(ps.filename,startline, + "String starting on this line is not terminated before " + "the end of the file."); + ps.errorcnt++; + nextcp = cp; + }else{ + nextcp = cp+1; + } + }else if( c=='{' ){ /* A block of C code */ + int level; + cp++; + for(level=1; (c= *cp)!=0 && (level>1 || c!='}'); cp++){ + if( c=='\n' ) lineno++; + else if( c=='{' ) level++; + else if( c=='}' ) level--; + else if( c=='/' && cp[1]=='*' ){ /* Skip comments */ + int prevc; + cp = &cp[2]; + prevc = 0; + while( (c= *cp)!=0 && (c!='/' || prevc!='*') ){ + if( c=='\n' ) lineno++; + prevc = c; + cp++; + } + }else if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments too */ + cp = &cp[2]; + while( (c= *cp)!=0 && c!='\n' ) cp++; + if( c ) lineno++; + }else if( c=='\'' || c=='\"' ){ /* String a character literals */ + int startchar, prevc; + startchar = c; + prevc = 0; + for(cp++; (c= *cp)!=0 && (c!=startchar || prevc=='\\'); cp++){ + if( c=='\n' ) lineno++; + if( prevc=='\\' ) prevc = 0; + else prevc = c; + } + } + } + if( c==0 ){ + ErrorMsg(ps.filename,ps.tokenlineno, + "C code starting on this line is not terminated before " + "the end of the file."); + ps.errorcnt++; + nextcp = cp; + }else{ + nextcp = cp+1; + } + }else if( ISALNUM(c) ){ /* Identifiers */ + while( (c= *cp)!=0 && (ISALNUM(c) || c=='_') ) cp++; + nextcp = cp; + }else if( c==':' && cp[1]==':' && cp[2]=='=' ){ /* The operator "::=" */ + cp += 3; + nextcp = cp; + }else if( (c=='/' || c=='|') && ISALPHA(cp[1]) ){ + cp += 2; + while( (c = *cp)!=0 && (ISALNUM(c) || c=='_') ) cp++; + nextcp = cp; + }else{ /* All other (one character) operators */ + cp++; + nextcp = cp; + } + c = *cp; + *cp = 0; /* Null terminate the token */ + parseonetoken(&ps); /* Parse the token */ + *cp = (char)c; /* Restore the buffer */ + cp = nextcp; + } + free(filebuf); /* Release the buffer after parsing */ + gp->rule = ps.firstrule; + gp->errorcnt = ps.errorcnt; +} +/*************************** From the file "plink.c" *********************/ +/* +** Routines processing configuration follow-set propagation links +** in the LEMON parser generator. +*/ +static struct plink *plink_freelist = 0; + +/* Allocate a new plink */ +struct plink *Plink_new(void){ + struct plink *newlink; + + if( plink_freelist==0 ){ + int i; + int amt = 100; + plink_freelist = (struct plink *)calloc( amt, sizeof(struct plink) ); + if( plink_freelist==0 ){ + fprintf(stderr, + "Unable to allocate memory for a new follow-set propagation link.\n"); + exit(1); + } + for(i=0; inext; + return newlink; +} + +/* Add a plink to a plink list */ +void Plink_add(struct plink **plpp, struct config *cfp) +{ + struct plink *newlink; + newlink = Plink_new(); + newlink->next = *plpp; + *plpp = newlink; + newlink->cfp = cfp; +} + +/* Transfer every plink on the list "from" to the list "to" */ +void Plink_copy(struct plink **to, struct plink *from) +{ + struct plink *nextpl; + while( from ){ + nextpl = from->next; + from->next = *to; + *to = from; + from = nextpl; + } +} + +/* Delete every plink on the list */ +void Plink_delete(struct plink *plp) +{ + struct plink *nextpl; + + while( plp ){ + nextpl = plp->next; + plp->next = plink_freelist; + plink_freelist = plp; + plp = nextpl; + } +} +/*********************** From the file "report.c" **************************/ +/* +** Procedures for generating reports and tables in the LEMON parser generator. +*/ + +/* Generate a filename with the given suffix. Space to hold the +** name comes from malloc() and must be freed by the calling +** function. +*/ +PRIVATE char *file_makename(struct lemon *lemp, const char *suffix) +{ + char *name; + char *cp; + char *filename = lemp->filename; + int sz; + + if( outputDir ){ + cp = strrchr(filename, '/'); + if( cp ) filename = cp + 1; + } + sz = lemonStrlen(filename); + sz += lemonStrlen(suffix); + if( outputDir ) sz += lemonStrlen(outputDir) + 1; + sz += 5; + name = (char*)malloc( sz ); + if( name==0 ){ + fprintf(stderr,"Can't allocate space for a filename.\n"); + exit(1); + } + name[0] = 0; + if( outputDir ){ + lemon_strcpy(name, outputDir); + lemon_strcat(name, "/"); + } + lemon_strcat(name,filename); + cp = strrchr(name,'.'); + if( cp ) *cp = 0; + lemon_strcat(name,suffix); + return name; +} + +/* Open a file with a name based on the name of the input file, +** but with a different (specified) suffix, and return a pointer +** to the stream */ +PRIVATE FILE *file_open( + struct lemon *lemp, + const char *suffix, + const char *mode +){ + FILE *fp; + + if( lemp->outname ) free(lemp->outname); + lemp->outname = file_makename(lemp, suffix); + fp = fopen(lemp->outname,mode); + if( fp==0 && *mode=='w' ){ + fprintf(stderr,"Can't open file \"%s\".\n",lemp->outname); + lemp->errorcnt++; + return 0; + } + return fp; +} + +/* Print the text of a rule +*/ +void rule_print(FILE *out, struct rule *rp){ + int i, j; + fprintf(out, "%s",rp->lhs->name); + /* if( rp->lhsalias ) fprintf(out,"(%s)",rp->lhsalias); */ + fprintf(out," ::="); + for(i=0; inrhs; i++){ + struct symbol *sp = rp->rhs[i]; + if( sp->type==MULTITERMINAL ){ + fprintf(out," %s", sp->subsym[0]->name); + for(j=1; jnsubsym; j++){ + fprintf(out,"|%s", sp->subsym[j]->name); + } + }else{ + fprintf(out," %s", sp->name); + } + /* if( rp->rhsalias[i] ) fprintf(out,"(%s)",rp->rhsalias[i]); */ + } +} + +/* Duplicate the input file without comments and without actions +** on rules */ +void Reprint(struct lemon *lemp) +{ + struct rule *rp; + struct symbol *sp; + int i, j, maxlen, len, ncolumns, skip; + printf("// Reprint of input file \"%s\".\n// Symbols:\n",lemp->filename); + maxlen = 10; + for(i=0; insymbol; i++){ + sp = lemp->symbols[i]; + len = lemonStrlen(sp->name); + if( len>maxlen ) maxlen = len; + } + ncolumns = 76/(maxlen+5); + if( ncolumns<1 ) ncolumns = 1; + skip = (lemp->nsymbol + ncolumns - 1)/ncolumns; + for(i=0; insymbol; j+=skip){ + sp = lemp->symbols[j]; + assert( sp->index==j ); + printf(" %3d %-*.*s",j,maxlen,maxlen,sp->name); + } + printf("\n"); + } + for(rp=lemp->rule; rp; rp=rp->next){ + rule_print(stdout, rp); + printf("."); + if( rp->precsym ) printf(" [%s]",rp->precsym->name); + /* if( rp->code ) printf("\n %s",rp->code); */ + printf("\n"); + } +} + +/* Print a single rule. +*/ +void RulePrint(FILE *fp, struct rule *rp, int iCursor){ + struct symbol *sp; + int i, j; + fprintf(fp,"%s ::=",rp->lhs->name); + for(i=0; i<=rp->nrhs; i++){ + if( i==iCursor ) fprintf(fp," *"); + if( i==rp->nrhs ) break; + sp = rp->rhs[i]; + if( sp->type==MULTITERMINAL ){ + fprintf(fp," %s", sp->subsym[0]->name); + for(j=1; jnsubsym; j++){ + fprintf(fp,"|%s",sp->subsym[j]->name); + } + }else{ + fprintf(fp," %s", sp->name); + } + } +} + +/* Print the rule for a configuration. +*/ +void ConfigPrint(FILE *fp, struct config *cfp){ + RulePrint(fp, cfp->rp, cfp->dot); +} + +/* #define TEST */ +#if 0 +/* Print a set */ +PRIVATE void SetPrint(out,set,lemp) +FILE *out; +char *set; +struct lemon *lemp; +{ + int i; + char *spacer; + spacer = ""; + fprintf(out,"%12s[",""); + for(i=0; interminal; i++){ + if( SetFind(set,i) ){ + fprintf(out,"%s%s",spacer,lemp->symbols[i]->name); + spacer = " "; + } + } + fprintf(out,"]\n"); +} + +/* Print a plink chain */ +PRIVATE void PlinkPrint(out,plp,tag) +FILE *out; +struct plink *plp; +char *tag; +{ + while( plp ){ + fprintf(out,"%12s%s (state %2d) ","",tag,plp->cfp->stp->statenum); + ConfigPrint(out,plp->cfp); + fprintf(out,"\n"); + plp = plp->next; + } +} +#endif + +/* Print an action to the given file descriptor. Return FALSE if +** nothing was actually printed. +*/ +int PrintAction( + struct action *ap, /* The action to print */ + FILE *fp, /* Print the action here */ + int indent /* Indent by this amount */ +){ + int result = 1; + switch( ap->type ){ + case SHIFT: { + struct state *stp = ap->x.stp; + fprintf(fp,"%*s shift %-7d",indent,ap->sp->name,stp->statenum); + break; + } + case REDUCE: { + struct rule *rp = ap->x.rp; + fprintf(fp,"%*s reduce %-7d",indent,ap->sp->name,rp->iRule); + RulePrint(fp, rp, -1); + break; + } + case SHIFTREDUCE: { + struct rule *rp = ap->x.rp; + fprintf(fp,"%*s shift-reduce %-7d",indent,ap->sp->name,rp->iRule); + RulePrint(fp, rp, -1); + break; + } + case ACCEPT: + fprintf(fp,"%*s accept",indent,ap->sp->name); + break; + case ERROR: + fprintf(fp,"%*s error",indent,ap->sp->name); + break; + case SRCONFLICT: + case RRCONFLICT: + fprintf(fp,"%*s reduce %-7d ** Parsing conflict **", + indent,ap->sp->name,ap->x.rp->iRule); + break; + case SSCONFLICT: + fprintf(fp,"%*s shift %-7d ** Parsing conflict **", + indent,ap->sp->name,ap->x.stp->statenum); + break; + case SH_RESOLVED: + if( showPrecedenceConflict ){ + fprintf(fp,"%*s shift %-7d -- dropped by precedence", + indent,ap->sp->name,ap->x.stp->statenum); + }else{ + result = 0; + } + break; + case RD_RESOLVED: + if( showPrecedenceConflict ){ + fprintf(fp,"%*s reduce %-7d -- dropped by precedence", + indent,ap->sp->name,ap->x.rp->iRule); + }else{ + result = 0; + } + break; + case NOT_USED: + result = 0; + break; + } + if( result && ap->spOpt ){ + fprintf(fp," /* because %s==%s */", ap->sp->name, ap->spOpt->name); + } + return result; +} + +/* Generate the "*.out" log file */ +void ReportOutput(struct lemon *lemp) +{ + int i, n; + struct state *stp; + struct config *cfp; + struct action *ap; + struct rule *rp; + FILE *fp; + + fp = file_open(lemp,".out","wb"); + if( fp==0 ) return; + for(i=0; inxstate; i++){ + stp = lemp->sorted[i]; + fprintf(fp,"State %d:\n",stp->statenum); + if( lemp->basisflag ) cfp=stp->bp; + else cfp=stp->cfp; + while( cfp ){ + char buf[20]; + if( cfp->dot==cfp->rp->nrhs ){ + lemon_sprintf(buf,"(%d)",cfp->rp->iRule); + fprintf(fp," %5s ",buf); + }else{ + fprintf(fp," "); + } + ConfigPrint(fp,cfp); + fprintf(fp,"\n"); +#if 0 + SetPrint(fp,cfp->fws,lemp); + PlinkPrint(fp,cfp->fplp,"To "); + PlinkPrint(fp,cfp->bplp,"From"); +#endif + if( lemp->basisflag ) cfp=cfp->bp; + else cfp=cfp->next; + } + fprintf(fp,"\n"); + for(ap=stp->ap; ap; ap=ap->next){ + if( PrintAction(ap,fp,30) ) fprintf(fp,"\n"); + } + fprintf(fp,"\n"); + } + fprintf(fp, "----------------------------------------------------\n"); + fprintf(fp, "Symbols:\n"); + fprintf(fp, "The first-set of non-terminals is shown after the name.\n\n"); + for(i=0; insymbol; i++){ + int j; + struct symbol *sp; + + sp = lemp->symbols[i]; + fprintf(fp, " %3d: %s", i, sp->name); + if( sp->type==NONTERMINAL ){ + fprintf(fp, ":"); + if( sp->lambda ){ + fprintf(fp, " "); + } + for(j=0; jnterminal; j++){ + if( sp->firstset && SetFind(sp->firstset, j) ){ + fprintf(fp, " %s", lemp->symbols[j]->name); + } + } + } + if( sp->prec>=0 ) fprintf(fp," (precedence=%d)", sp->prec); + fprintf(fp, "\n"); + } + fprintf(fp, "----------------------------------------------------\n"); + fprintf(fp, "Syntax-only Symbols:\n"); + fprintf(fp, "The following symbols never carry semantic content.\n\n"); + for(i=n=0; insymbol; i++){ + int w; + struct symbol *sp = lemp->symbols[i]; + if( sp->bContent ) continue; + w = (int)strlen(sp->name); + if( n>0 && n+w>75 ){ + fprintf(fp,"\n"); + n = 0; + } + if( n>0 ){ + fprintf(fp, " "); + n++; + } + fprintf(fp, "%s", sp->name); + n += w; + } + if( n>0 ) fprintf(fp, "\n"); + fprintf(fp, "----------------------------------------------------\n"); + fprintf(fp, "Rules:\n"); + for(rp=lemp->rule; rp; rp=rp->next){ + fprintf(fp, "%4d: ", rp->iRule); + rule_print(fp, rp); + fprintf(fp,"."); + if( rp->precsym ){ + fprintf(fp," [%s precedence=%d]", + rp->precsym->name, rp->precsym->prec); + } + fprintf(fp,"\n"); + } + fclose(fp); + return; +} + +/* Search for the file "name" which is in the same directory as +** the executable */ +PRIVATE char *pathsearch(char *argv0, char *name, int modemask) +{ + const char *pathlist; + char *pathbufptr = 0; + char *pathbuf = 0; + char *path,*cp; + char c; + +#ifdef __WIN32__ + cp = strrchr(argv0,'\\'); +#else + cp = strrchr(argv0,'/'); +#endif + if( cp ){ + c = *cp; + *cp = 0; + path = (char *)malloc( lemonStrlen(argv0) + lemonStrlen(name) + 2 ); + if( path ) lemon_sprintf(path,"%s/%s",argv0,name); + *cp = c; + }else{ + pathlist = getenv("PATH"); + if( pathlist==0 ) pathlist = ".:/bin:/usr/bin"; + pathbuf = (char *) malloc( lemonStrlen(pathlist) + 1 ); + path = (char *)malloc( lemonStrlen(pathlist)+lemonStrlen(name)+2 ); + if( (pathbuf != 0) && (path!=0) ){ + pathbufptr = pathbuf; + lemon_strcpy(pathbuf, pathlist); + while( *pathbuf ){ + cp = strchr(pathbuf,':'); + if( cp==0 ) cp = &pathbuf[lemonStrlen(pathbuf)]; + c = *cp; + *cp = 0; + lemon_sprintf(path,"%s/%s",pathbuf,name); + *cp = c; + if( c==0 ) pathbuf[0] = 0; + else pathbuf = &cp[1]; + if( access(path,modemask)==0 ) break; + } + } + free(pathbufptr); + } + return path; +} + +/* Given an action, compute the integer value for that action +** which is to be put in the action table of the generated machine. +** Return negative if no action should be generated. +*/ +PRIVATE int compute_action(struct lemon *lemp, struct action *ap) +{ + int act; + switch( ap->type ){ + case SHIFT: act = ap->x.stp->statenum; break; + case SHIFTREDUCE: { + /* Since a SHIFT is inherient after a prior REDUCE, convert any + ** SHIFTREDUCE action with a nonterminal on the LHS into a simple + ** REDUCE action: */ + if( ap->sp->index>=lemp->nterminal + && (lemp->errsym==0 || ap->sp->index!=lemp->errsym->index) + ){ + act = lemp->minReduce + ap->x.rp->iRule; + }else{ + act = lemp->minShiftReduce + ap->x.rp->iRule; + } + break; + } + case REDUCE: act = lemp->minReduce + ap->x.rp->iRule; break; + case ERROR: act = lemp->errAction; break; + case ACCEPT: act = lemp->accAction; break; + default: act = -1; break; + } + return act; +} + +#define LINESIZE 1000 +/* The next cluster of routines are for reading the template file +** and writing the results to the generated parser */ +/* The first function transfers data from "in" to "out" until +** a line is seen which begins with "%%". The line number is +** tracked. +** +** if name!=0, then any word that begin with "Parse" is changed to +** begin with *name instead. +*/ +PRIVATE void tplt_xfer(char *name, FILE *in, FILE *out, int *lineno) +{ + int i, iStart; + char line[LINESIZE]; + while( fgets(line,LINESIZE,in) && (line[0]!='%' || line[1]!='%') ){ + (*lineno)++; + iStart = 0; + if( name ){ + for(i=0; line[i]; i++){ + if( line[i]=='P' && strncmp(&line[i],"Parse",5)==0 + && (i==0 || !ISALPHA(line[i-1])) + ){ + if( i>iStart ) fprintf(out,"%.*s",i-iStart,&line[iStart]); + fprintf(out,"%s",name); + i += 4; + iStart = i+1; + } + } + } + fprintf(out,"%s",&line[iStart]); + } +} + +/* Skip forward past the header of the template file to the first "%%" +*/ +PRIVATE void tplt_skip_header(FILE *in, int *lineno) +{ + char line[LINESIZE]; + while( fgets(line,LINESIZE,in) && (line[0]!='%' || line[1]!='%') ){ + (*lineno)++; + } +} + +/* The next function finds the template file and opens it, returning +** a pointer to the opened file. */ +PRIVATE FILE *tplt_open(struct lemon *lemp) +{ + static char templatename[] = "lempar.c"; + char buf[1000]; + FILE *in; + char *tpltname; + char *toFree = 0; + char *cp; + + /* first, see if user specified a template filename on the command line. */ + if (user_templatename != 0) { + if( access(user_templatename,004)==-1 ){ + fprintf(stderr,"Can't find the parser driver template file \"%s\".\n", + user_templatename); + lemp->errorcnt++; + return 0; + } + in = fopen(user_templatename,"rb"); + if( in==0 ){ + fprintf(stderr,"Can't open the template file \"%s\".\n", + user_templatename); + lemp->errorcnt++; + return 0; + } + return in; + } + + cp = strrchr(lemp->filename,'.'); + if( cp ){ + lemon_sprintf(buf,"%.*s.lt",(int)(cp-lemp->filename),lemp->filename); + }else{ + lemon_sprintf(buf,"%s.lt",lemp->filename); + } + if( access(buf,004)==0 ){ + tpltname = buf; + }else if( access(templatename,004)==0 ){ + tpltname = templatename; + }else{ + toFree = tpltname = pathsearch(lemp->argv0,templatename,0); + } + if( tpltname==0 ){ + fprintf(stderr,"Can't find the parser driver template file \"%s\".\n", + templatename); + lemp->errorcnt++; + return 0; + } + in = fopen(tpltname,"rb"); + if( in==0 ){ + fprintf(stderr,"Can't open the template file \"%s\".\n",tpltname); + lemp->errorcnt++; + } + free(toFree); + return in; +} + +/* Print a #line directive line to the output file. */ +PRIVATE void tplt_linedir(FILE *out, int lineno, char *filename) +{ + fprintf(out,"#line %d \"",lineno); + while( *filename ){ + if( *filename == '\\' ) putc('\\',out); + putc(*filename,out); + filename++; + } + fprintf(out,"\"\n"); +} + +/* Print a string to the file and keep the linenumber up to date */ +PRIVATE void tplt_print(FILE *out, struct lemon *lemp, char *str, int *lineno) +{ + if( str==0 ) return; + while( *str ){ + putc(*str,out); + if( *str=='\n' ) (*lineno)++; + str++; + } + if( str[-1]!='\n' ){ + putc('\n',out); + (*lineno)++; + } + if (!lemp->nolinenosflag) { + (*lineno)++; tplt_linedir(out,*lineno,lemp->outname); + } + return; +} + +/* +** The following routine emits code for the destructor for the +** symbol sp +*/ +void emit_destructor_code( + FILE *out, + struct symbol *sp, + struct lemon *lemp, + int *lineno +){ + char *cp = 0; + + if( sp->type==TERMINAL ){ + cp = lemp->tokendest; + if( cp==0 ) return; + fprintf(out,"{\n"); (*lineno)++; + }else if( sp->destructor ){ + cp = sp->destructor; + fprintf(out,"{\n"); (*lineno)++; + if( !lemp->nolinenosflag ){ + (*lineno)++; + tplt_linedir(out,sp->destLineno,lemp->filename); + } + }else if( lemp->vardest ){ + cp = lemp->vardest; + if( cp==0 ) return; + fprintf(out,"{\n"); (*lineno)++; + }else{ + assert( 0 ); /* Cannot happen */ + } + for(; *cp; cp++){ + if( *cp=='$' && cp[1]=='$' ){ + fprintf(out,"(yypminor->yy%d)",sp->dtnum); + cp++; + continue; + } + if( *cp=='\n' ) (*lineno)++; + fputc(*cp,out); + } + fprintf(out,"\n"); (*lineno)++; + if (!lemp->nolinenosflag) { + (*lineno)++; tplt_linedir(out,*lineno,lemp->outname); + } + fprintf(out,"}\n"); (*lineno)++; + return; +} + +/* +** Return TRUE (non-zero) if the given symbol has a destructor. +*/ +int has_destructor(struct symbol *sp, struct lemon *lemp) +{ + int ret; + if( sp->type==TERMINAL ){ + ret = lemp->tokendest!=0; + }else{ + ret = lemp->vardest!=0 || sp->destructor!=0; + } + return ret; +} + +/* +** Append text to a dynamically allocated string. If zText is 0 then +** reset the string to be empty again. Always return the complete text +** of the string (which is overwritten with each call). +** +** n bytes of zText are stored. If n==0 then all of zText up to the first +** \000 terminator is stored. zText can contain up to two instances of +** %d. The values of p1 and p2 are written into the first and second +** %d. +** +** If n==-1, then the previous character is overwritten. +*/ +PRIVATE char *append_str(const char *zText, int n, int p1, int p2){ + static char empty[1] = { 0 }; + static char *z = 0; + static int alloced = 0; + static int used = 0; + int c; + char zInt[40]; + if( zText==0 ){ + if( used==0 && z!=0 ) z[0] = 0; + used = 0; + return z; + } + if( n<=0 ){ + if( n<0 ){ + used += n; + assert( used>=0 ); + } + n = lemonStrlen(zText); + } + if( (int) (n+sizeof(zInt)*2+used) >= alloced ){ + alloced = n + sizeof(zInt)*2 + used + 200; + z = (char *) realloc(z, alloced); + } + if( z==0 ) return empty; + while( n-- > 0 ){ + c = *(zText++); + if( c=='%' && n>0 && zText[0]=='d' ){ + lemon_sprintf(zInt, "%d", p1); + p1 = p2; + lemon_strcpy(&z[used], zInt); + used += lemonStrlen(&z[used]); + zText++; + n--; + }else{ + z[used++] = (char)c; + } + } + z[used] = 0; + return z; +} + +/* +** Write and transform the rp->code string so that symbols are expanded. +** Populate the rp->codePrefix and rp->codeSuffix strings, as appropriate. +** +** Return 1 if the expanded code requires that "yylhsminor" local variable +** to be defined. +*/ +PRIVATE int translate_code(struct lemon *lemp, struct rule *rp){ + char *cp, *xp; + int i; + int rc = 0; /* True if yylhsminor is used */ + int dontUseRhs0 = 0; /* If true, use of left-most RHS label is illegal */ + const char *zSkip = 0; /* The zOvwrt comment within rp->code, or NULL */ + char lhsused = 0; /* True if the LHS element has been used */ + char lhsdirect; /* True if LHS writes directly into stack */ + char used[MAXRHS]; /* True for each RHS element which is used */ + char zLhs[50]; /* Convert the LHS symbol into this string */ + char zOvwrt[900]; /* Comment that to allow LHS to overwrite RHS */ + + for(i=0; inrhs; i++) used[i] = 0; + lhsused = 0; + + if( rp->code==0 ){ + static char newlinestr[2] = { '\n', '\0' }; + rp->code = newlinestr; + rp->line = rp->ruleline; + rp->noCode = 1; + }else{ + rp->noCode = 0; + } + + + if( rp->nrhs==0 ){ + /* If there are no RHS symbols, then writing directly to the LHS is ok */ + lhsdirect = 1; + }else if( rp->rhsalias[0]==0 ){ + /* The left-most RHS symbol has no value. LHS direct is ok. But + ** we have to call the destructor on the RHS symbol first. */ + lhsdirect = 1; + if( has_destructor(rp->rhs[0],lemp) ){ + append_str(0,0,0,0); + append_str(" yy_destructor(yypParser,%d,&yymsp[%d].minor);\n", 0, + rp->rhs[0]->index,1-rp->nrhs); + rp->codePrefix = Strsafe(append_str(0,0,0,0)); + rp->noCode = 0; + } + }else if( rp->lhsalias==0 ){ + /* There is no LHS value symbol. */ + lhsdirect = 1; + }else if( strcmp(rp->lhsalias,rp->rhsalias[0])==0 ){ + /* The LHS symbol and the left-most RHS symbol are the same, so + ** direct writing is allowed */ + lhsdirect = 1; + lhsused = 1; + used[0] = 1; + if( rp->lhs->dtnum!=rp->rhs[0]->dtnum ){ + ErrorMsg(lemp->filename,rp->ruleline, + "%s(%s) and %s(%s) share the same label but have " + "different datatypes.", + rp->lhs->name, rp->lhsalias, rp->rhs[0]->name, rp->rhsalias[0]); + lemp->errorcnt++; + } + }else{ + lemon_sprintf(zOvwrt, "/*%s-overwrites-%s*/", + rp->lhsalias, rp->rhsalias[0]); + zSkip = strstr(rp->code, zOvwrt); + if( zSkip!=0 ){ + /* The code contains a special comment that indicates that it is safe + ** for the LHS label to overwrite left-most RHS label. */ + lhsdirect = 1; + }else{ + lhsdirect = 0; + } + } + if( lhsdirect ){ + sprintf(zLhs, "yymsp[%d].minor.yy%d",1-rp->nrhs,rp->lhs->dtnum); + }else{ + rc = 1; + sprintf(zLhs, "yylhsminor.yy%d",rp->lhs->dtnum); + } + + append_str(0,0,0,0); + + /* This const cast is wrong but harmless, if we're careful. */ + for(cp=(char *)rp->code; *cp; cp++){ + if( cp==zSkip ){ + append_str(zOvwrt,0,0,0); + cp += lemonStrlen(zOvwrt)-1; + dontUseRhs0 = 1; + continue; + } + if( ISALPHA(*cp) && (cp==rp->code || (!ISALNUM(cp[-1]) && cp[-1]!='_')) ){ + char saved; + for(xp= &cp[1]; ISALNUM(*xp) || *xp=='_'; xp++); + saved = *xp; + *xp = 0; + if( rp->lhsalias && strcmp(cp,rp->lhsalias)==0 ){ + append_str(zLhs,0,0,0); + cp = xp; + lhsused = 1; + }else{ + for(i=0; inrhs; i++){ + if( rp->rhsalias[i] && strcmp(cp,rp->rhsalias[i])==0 ){ + if( i==0 && dontUseRhs0 ){ + ErrorMsg(lemp->filename,rp->ruleline, + "Label %s used after '%s'.", + rp->rhsalias[0], zOvwrt); + lemp->errorcnt++; + }else if( cp!=rp->code && cp[-1]=='@' ){ + /* If the argument is of the form @X then substituted + ** the token number of X, not the value of X */ + append_str("yymsp[%d].major",-1,i-rp->nrhs+1,0); + }else{ + struct symbol *sp = rp->rhs[i]; + int dtnum; + if( sp->type==MULTITERMINAL ){ + dtnum = sp->subsym[0]->dtnum; + }else{ + dtnum = sp->dtnum; + } + append_str("yymsp[%d].minor.yy%d",0,i-rp->nrhs+1, dtnum); + } + cp = xp; + used[i] = 1; + break; + } + } + } + *xp = saved; + } + append_str(cp, 1, 0, 0); + } /* End loop */ + + /* Main code generation completed */ + cp = append_str(0,0,0,0); + if( cp && cp[0] ) rp->code = Strsafe(cp); + append_str(0,0,0,0); + + /* Check to make sure the LHS has been used */ + if( rp->lhsalias && !lhsused ){ + ErrorMsg(lemp->filename,rp->ruleline, + "Label \"%s\" for \"%s(%s)\" is never used.", + rp->lhsalias,rp->lhs->name,rp->lhsalias); + lemp->errorcnt++; + } + + /* Generate destructor code for RHS minor values which are not referenced. + ** Generate error messages for unused labels and duplicate labels. + */ + for(i=0; inrhs; i++){ + if( rp->rhsalias[i] ){ + if( i>0 ){ + int j; + if( rp->lhsalias && strcmp(rp->lhsalias,rp->rhsalias[i])==0 ){ + ErrorMsg(lemp->filename,rp->ruleline, + "%s(%s) has the same label as the LHS but is not the left-most " + "symbol on the RHS.", + rp->rhs[i]->name, rp->rhsalias[i]); + lemp->errorcnt++; + } + for(j=0; jrhsalias[j] && strcmp(rp->rhsalias[j],rp->rhsalias[i])==0 ){ + ErrorMsg(lemp->filename,rp->ruleline, + "Label %s used for multiple symbols on the RHS of a rule.", + rp->rhsalias[i]); + lemp->errorcnt++; + break; + } + } + } + if( !used[i] ){ + ErrorMsg(lemp->filename,rp->ruleline, + "Label %s for \"%s(%s)\" is never used.", + rp->rhsalias[i],rp->rhs[i]->name,rp->rhsalias[i]); + lemp->errorcnt++; + } + }else if( i>0 && has_destructor(rp->rhs[i],lemp) ){ + append_str(" yy_destructor(yypParser,%d,&yymsp[%d].minor);\n", 0, + rp->rhs[i]->index,i-rp->nrhs+1); + } + } + + /* If unable to write LHS values directly into the stack, write the + ** saved LHS value now. */ + if( lhsdirect==0 ){ + append_str(" yymsp[%d].minor.yy%d = ", 0, 1-rp->nrhs, rp->lhs->dtnum); + append_str(zLhs, 0, 0, 0); + append_str(";\n", 0, 0, 0); + } + + /* Suffix code generation complete */ + cp = append_str(0,0,0,0); + if( cp && cp[0] ){ + rp->codeSuffix = Strsafe(cp); + rp->noCode = 0; + } + + return rc; +} + +/* +** Generate code which executes when the rule "rp" is reduced. Write +** the code to "out". Make sure lineno stays up-to-date. +*/ +PRIVATE void emit_code( + FILE *out, + struct rule *rp, + struct lemon *lemp, + int *lineno +){ + const char *cp; + + /* Setup code prior to the #line directive */ + if( rp->codePrefix && rp->codePrefix[0] ){ + fprintf(out, "{%s", rp->codePrefix); + for(cp=rp->codePrefix; *cp; cp++){ if( *cp=='\n' ) (*lineno)++; } + } + + /* Generate code to do the reduce action */ + if( rp->code ){ + if( !lemp->nolinenosflag ){ + (*lineno)++; + tplt_linedir(out,rp->line,lemp->filename); + } + fprintf(out,"{%s",rp->code); + for(cp=rp->code; *cp; cp++){ if( *cp=='\n' ) (*lineno)++; } + fprintf(out,"}\n"); (*lineno)++; + if( !lemp->nolinenosflag ){ + (*lineno)++; + tplt_linedir(out,*lineno,lemp->outname); + } + } + + /* Generate breakdown code that occurs after the #line directive */ + if( rp->codeSuffix && rp->codeSuffix[0] ){ + fprintf(out, "%s", rp->codeSuffix); + for(cp=rp->codeSuffix; *cp; cp++){ if( *cp=='\n' ) (*lineno)++; } + } + + if( rp->codePrefix ){ + fprintf(out, "}\n"); (*lineno)++; + } + + return; +} + +/* +** Print the definition of the union used for the parser's data stack. +** This union contains fields for every possible data type for tokens +** and nonterminals. In the process of computing and printing this +** union, also set the ".dtnum" field of every terminal and nonterminal +** symbol. +*/ +void print_stack_union( + FILE *out, /* The output stream */ + struct lemon *lemp, /* The main info structure for this parser */ + int *plineno, /* Pointer to the line number */ + int mhflag /* True if generating makeheaders output */ +){ + int lineno; /* The line number of the output */ + char **types; /* A hash table of datatypes */ + int arraysize; /* Size of the "types" array */ + int maxdtlength; /* Maximum length of any ".datatype" field. */ + char *stddt; /* Standardized name for a datatype */ + int i,j; /* Loop counters */ + unsigned hash; /* For hashing the name of a type */ + const char *name; /* Name of the parser */ + + /* Allocate and initialize types[] and allocate stddt[] */ + arraysize = lemp->nsymbol * 2; + types = (char**)calloc( arraysize, sizeof(char*) ); + if( types==0 ){ + fprintf(stderr,"Out of memory.\n"); + exit(1); + } + for(i=0; ivartype ){ + maxdtlength = lemonStrlen(lemp->vartype); + } + for(i=0; insymbol; i++){ + int len; + struct symbol *sp = lemp->symbols[i]; + if( sp->datatype==0 ) continue; + len = lemonStrlen(sp->datatype); + if( len>maxdtlength ) maxdtlength = len; + } + stddt = (char*)malloc( maxdtlength*2 + 1 ); + if( stddt==0 ){ + fprintf(stderr,"Out of memory.\n"); + exit(1); + } + + /* Build a hash table of datatypes. The ".dtnum" field of each symbol + ** is filled in with the hash index plus 1. A ".dtnum" value of 0 is + ** used for terminal symbols. If there is no %default_type defined then + ** 0 is also used as the .dtnum value for nonterminals which do not specify + ** a datatype using the %type directive. + */ + for(i=0; insymbol; i++){ + struct symbol *sp = lemp->symbols[i]; + char *cp; + if( sp==lemp->errsym ){ + sp->dtnum = arraysize+1; + continue; + } + if( sp->type!=NONTERMINAL || (sp->datatype==0 && lemp->vartype==0) ){ + sp->dtnum = 0; + continue; + } + cp = sp->datatype; + if( cp==0 ) cp = lemp->vartype; + j = 0; + while( ISSPACE(*cp) ) cp++; + while( *cp ) stddt[j++] = *cp++; + while( j>0 && ISSPACE(stddt[j-1]) ) j--; + stddt[j] = 0; + if( lemp->tokentype && strcmp(stddt, lemp->tokentype)==0 ){ + sp->dtnum = 0; + continue; + } + hash = 0; + for(j=0; stddt[j]; j++){ + hash = hash*53 + stddt[j]; + } + hash = (hash & 0x7fffffff)%arraysize; + while( types[hash] ){ + if( strcmp(types[hash],stddt)==0 ){ + sp->dtnum = hash + 1; + break; + } + hash++; + if( hash>=(unsigned)arraysize ) hash = 0; + } + if( types[hash]==0 ){ + sp->dtnum = hash + 1; + types[hash] = (char*)malloc( lemonStrlen(stddt)+1 ); + if( types[hash]==0 ){ + fprintf(stderr,"Out of memory.\n"); + exit(1); + } + lemon_strcpy(types[hash],stddt); + } + } + + /* Print out the definition of YYTOKENTYPE and YYMINORTYPE */ + name = lemp->name ? lemp->name : "Parse"; + lineno = *plineno; + if( mhflag ){ fprintf(out,"#if INTERFACE\n"); lineno++; } + fprintf(out,"#define %sTOKENTYPE %s\n",name, + lemp->tokentype?lemp->tokentype:"void*"); lineno++; + if( mhflag ){ fprintf(out,"#endif\n"); lineno++; } + fprintf(out,"typedef union {\n"); lineno++; + fprintf(out," int yyinit;\n"); lineno++; + fprintf(out," %sTOKENTYPE yy0;\n",name); lineno++; + for(i=0; ierrsym && lemp->errsym->useCnt ){ + fprintf(out," int yy%d;\n",lemp->errsym->dtnum); lineno++; + } + free(stddt); + free(types); + fprintf(out,"} YYMINORTYPE;\n"); lineno++; + *plineno = lineno; +} + +/* +** Return the name of a C datatype able to represent values between +** lwr and upr, inclusive. If pnByte!=NULL then also write the sizeof +** for that type (1, 2, or 4) into *pnByte. +*/ +static const char *minimum_size_type(int lwr, int upr, int *pnByte){ + const char *zType = "int"; + int nByte = 4; + if( lwr>=0 ){ + if( upr<=255 ){ + zType = "unsigned char"; + nByte = 1; + }else if( upr<65535 ){ + zType = "unsigned short int"; + nByte = 2; + }else{ + zType = "unsigned int"; + nByte = 4; + } + }else if( lwr>=-127 && upr<=127 ){ + zType = "signed char"; + nByte = 1; + }else if( lwr>=-32767 && upr<32767 ){ + zType = "short"; + nByte = 2; + } + if( pnByte ) *pnByte = nByte; + return zType; +} + +/* +** Each state contains a set of token transaction and a set of +** nonterminal transactions. Each of these sets makes an instance +** of the following structure. An array of these structures is used +** to order the creation of entries in the yy_action[] table. +*/ +struct axset { + struct state *stp; /* A pointer to a state */ + int isTkn; /* True to use tokens. False for non-terminals */ + int nAction; /* Number of actions */ + int iOrder; /* Original order of action sets */ +}; + +/* +** Compare to axset structures for sorting purposes +*/ +static int axset_compare(const void *a, const void *b){ + struct axset *p1 = (struct axset*)a; + struct axset *p2 = (struct axset*)b; + int c; + c = p2->nAction - p1->nAction; + if( c==0 ){ + c = p1->iOrder - p2->iOrder; + } + assert( c!=0 || p1==p2 ); + return c; +} + +/* +** Write text on "out" that describes the rule "rp". +*/ +static void writeRuleText(FILE *out, struct rule *rp){ + int j; + fprintf(out,"%s ::=", rp->lhs->name); + for(j=0; jnrhs; j++){ + struct symbol *sp = rp->rhs[j]; + if( sp->type!=MULTITERMINAL ){ + fprintf(out," %s", sp->name); + }else{ + int k; + fprintf(out," %s", sp->subsym[0]->name); + for(k=1; knsubsym; k++){ + fprintf(out,"|%s",sp->subsym[k]->name); + } + } + } +} + + +/* Generate C source code for the parser */ +void ReportTable( + struct lemon *lemp, + int mhflag, /* Output in makeheaders format if true */ + int sqlFlag /* Generate the *.sql file too */ +){ + FILE *out, *in, *sql; + int lineno; + struct state *stp; + struct action *ap; + struct rule *rp; + struct acttab *pActtab; + int i, j, n, sz; + int nLookAhead; + int szActionType; /* sizeof(YYACTIONTYPE) */ + int szCodeType; /* sizeof(YYCODETYPE) */ + const char *name; + int mnTknOfst, mxTknOfst; + int mnNtOfst, mxNtOfst; + struct axset *ax; + char *prefix; + + lemp->minShiftReduce = lemp->nstate; + lemp->errAction = lemp->minShiftReduce + lemp->nrule; + lemp->accAction = lemp->errAction + 1; + lemp->noAction = lemp->accAction + 1; + lemp->minReduce = lemp->noAction + 1; + lemp->maxAction = lemp->minReduce + lemp->nrule; + + in = tplt_open(lemp); + if( in==0 ) return; + out = file_open(lemp,".c","wb"); + if( out==0 ){ + fclose(in); + return; + } + if( sqlFlag==0 ){ + sql = 0; + }else{ + sql = file_open(lemp, ".sql", "wb"); + if( sql==0 ){ + fclose(in); + fclose(out); + return; + } + fprintf(sql, + "BEGIN;\n" + "CREATE TABLE symbol(\n" + " id INTEGER PRIMARY KEY,\n" + " name TEXT NOT NULL,\n" + " isTerminal BOOLEAN NOT NULL,\n" + " fallback INTEGER REFERENCES symbol" + " DEFERRABLE INITIALLY DEFERRED\n" + ");\n" + ); + for(i=0; insymbol; i++){ + fprintf(sql, + "INSERT INTO symbol(id,name,isTerminal,fallback)" + "VALUES(%d,'%s',%s", + i, lemp->symbols[i]->name, + interminal ? "TRUE" : "FALSE" + ); + if( lemp->symbols[i]->fallback ){ + fprintf(sql, ",%d);\n", lemp->symbols[i]->fallback->index); + }else{ + fprintf(sql, ",NULL);\n"); + } + } + fprintf(sql, + "CREATE TABLE rule(\n" + " ruleid INTEGER PRIMARY KEY,\n" + " lhs INTEGER REFERENCES symbol(id),\n" + " txt TEXT\n" + ");\n" + "CREATE TABLE rulerhs(\n" + " ruleid INTEGER REFERENCES rule(ruleid),\n" + " pos INTEGER,\n" + " sym INTEGER REFERENCES symbol(id)\n" + ");\n" + ); + for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){ + assert( i==rp->iRule ); + fprintf(sql, + "INSERT INTO rule(ruleid,lhs,txt)VALUES(%d,%d,'", + rp->iRule, rp->lhs->index + ); + writeRuleText(sql, rp); + fprintf(sql,"');\n"); + for(j=0; jnrhs; j++){ + struct symbol *sp = rp->rhs[j]; + if( sp->type!=MULTITERMINAL ){ + fprintf(sql, + "INSERT INTO rulerhs(ruleid,pos,sym)VALUES(%d,%d,%d);\n", + i,j,sp->index + ); + }else{ + int k; + for(k=0; knsubsym; k++){ + fprintf(sql, + "INSERT INTO rulerhs(ruleid,pos,sym)VALUES(%d,%d,%d);\n", + i,j,sp->subsym[k]->index + ); + } + } + } + } + fprintf(sql, "COMMIT;\n"); + } + lineno = 1; + + fprintf(out, + "/* This file is automatically generated by Lemon from input grammar\n" + "** source file \"%s\". */\n", lemp->filename); lineno += 2; + + /* The first %include directive begins with a C-language comment, + ** then skip over the header comment of the template file + */ + if( lemp->include==0 ) lemp->include = ""; + for(i=0; ISSPACE(lemp->include[i]); i++){ + if( lemp->include[i]=='\n' ){ + lemp->include += i+1; + i = -1; + } + } + if( lemp->include[0]=='/' ){ + tplt_skip_header(in,&lineno); + }else{ + tplt_xfer(lemp->name,in,out,&lineno); + } + + /* Generate the include code, if any */ + tplt_print(out,lemp,lemp->include,&lineno); + if( mhflag ){ + char *incName = file_makename(lemp, ".h"); + fprintf(out,"#include \"%s\"\n", incName); lineno++; + free(incName); + } + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate #defines for all tokens */ + if( lemp->tokenprefix ) prefix = lemp->tokenprefix; + else prefix = ""; + if( mhflag ){ + fprintf(out,"#if INTERFACE\n"); lineno++; + }else{ + fprintf(out,"#ifndef %s%s\n", prefix, lemp->symbols[1]->name); + } + for(i=1; interminal; i++){ + fprintf(out,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i); + lineno++; + } + fprintf(out,"#endif\n"); lineno++; + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate the defines */ + fprintf(out,"#define YYCODETYPE %s\n", + minimum_size_type(0, lemp->nsymbol, &szCodeType)); lineno++; + fprintf(out,"#define YYNOCODE %d\n",lemp->nsymbol); lineno++; + fprintf(out,"#define YYACTIONTYPE %s\n", + minimum_size_type(0,lemp->maxAction,&szActionType)); lineno++; + if( lemp->wildcard ){ + fprintf(out,"#define YYWILDCARD %d\n", + lemp->wildcard->index); lineno++; + } + print_stack_union(out,lemp,&lineno,mhflag); + fprintf(out, "#ifndef YYSTACKDEPTH\n"); lineno++; + if( lemp->stacksize ){ + fprintf(out,"#define YYSTACKDEPTH %s\n",lemp->stacksize); lineno++; + }else{ + fprintf(out,"#define YYSTACKDEPTH 100\n"); lineno++; + } + fprintf(out, "#endif\n"); lineno++; + if( mhflag ){ + fprintf(out,"#if INTERFACE\n"); lineno++; + } + name = lemp->name ? lemp->name : "Parse"; + if( lemp->arg && lemp->arg[0] ){ + i = lemonStrlen(lemp->arg); + while( i>=1 && ISSPACE(lemp->arg[i-1]) ) i--; + while( i>=1 && (ISALNUM(lemp->arg[i-1]) || lemp->arg[i-1]=='_') ) i--; + fprintf(out,"#define %sARG_SDECL %s;\n",name,lemp->arg); lineno++; + fprintf(out,"#define %sARG_PDECL ,%s\n",name,lemp->arg); lineno++; + fprintf(out,"#define %sARG_PARAM ,%s\n",name,&lemp->arg[i]); lineno++; + fprintf(out,"#define %sARG_FETCH %s=yypParser->%s;\n", + name,lemp->arg,&lemp->arg[i]); lineno++; + fprintf(out,"#define %sARG_STORE yypParser->%s=%s;\n", + name,&lemp->arg[i],&lemp->arg[i]); lineno++; + }else{ + fprintf(out,"#define %sARG_SDECL\n",name); lineno++; + fprintf(out,"#define %sARG_PDECL\n",name); lineno++; + fprintf(out,"#define %sARG_PARAM\n",name); lineno++; + fprintf(out,"#define %sARG_FETCH\n",name); lineno++; + fprintf(out,"#define %sARG_STORE\n",name); lineno++; + } + if( lemp->ctx && lemp->ctx[0] ){ + i = lemonStrlen(lemp->ctx); + while( i>=1 && ISSPACE(lemp->ctx[i-1]) ) i--; + while( i>=1 && (ISALNUM(lemp->ctx[i-1]) || lemp->ctx[i-1]=='_') ) i--; + fprintf(out,"#define %sCTX_SDECL %s;\n",name,lemp->ctx); lineno++; + fprintf(out,"#define %sCTX_PDECL ,%s\n",name,lemp->ctx); lineno++; + fprintf(out,"#define %sCTX_PARAM ,%s\n",name,&lemp->ctx[i]); lineno++; + fprintf(out,"#define %sCTX_FETCH %s=yypParser->%s;\n", + name,lemp->ctx,&lemp->ctx[i]); lineno++; + fprintf(out,"#define %sCTX_STORE yypParser->%s=%s;\n", + name,&lemp->ctx[i],&lemp->ctx[i]); lineno++; + }else{ + fprintf(out,"#define %sCTX_SDECL\n",name); lineno++; + fprintf(out,"#define %sCTX_PDECL\n",name); lineno++; + fprintf(out,"#define %sCTX_PARAM\n",name); lineno++; + fprintf(out,"#define %sCTX_FETCH\n",name); lineno++; + fprintf(out,"#define %sCTX_STORE\n",name); lineno++; + } + if( mhflag ){ + fprintf(out,"#endif\n"); lineno++; + } + if( lemp->errsym && lemp->errsym->useCnt ){ + fprintf(out,"#define YYERRORSYMBOL %d\n",lemp->errsym->index); lineno++; + fprintf(out,"#define YYERRSYMDT yy%d\n",lemp->errsym->dtnum); lineno++; + } + if( lemp->has_fallback ){ + fprintf(out,"#define YYFALLBACK 1\n"); lineno++; + } + + /* Compute the action table, but do not output it yet. The action + ** table must be computed before generating the YYNSTATE macro because + ** we need to know how many states can be eliminated. + */ + ax = (struct axset *) calloc(lemp->nxstate*2, sizeof(ax[0])); + if( ax==0 ){ + fprintf(stderr,"malloc failed\n"); + exit(1); + } + for(i=0; inxstate; i++){ + stp = lemp->sorted[i]; + ax[i*2].stp = stp; + ax[i*2].isTkn = 1; + ax[i*2].nAction = stp->nTknAct; + ax[i*2+1].stp = stp; + ax[i*2+1].isTkn = 0; + ax[i*2+1].nAction = stp->nNtAct; + } + mxTknOfst = mnTknOfst = 0; + mxNtOfst = mnNtOfst = 0; + /* In an effort to minimize the action table size, use the heuristic + ** of placing the largest action sets first */ + for(i=0; inxstate*2; i++) ax[i].iOrder = i; + qsort(ax, lemp->nxstate*2, sizeof(ax[0]), axset_compare); + pActtab = acttab_alloc(lemp->nsymbol, lemp->nterminal); + for(i=0; inxstate*2 && ax[i].nAction>0; i++){ + stp = ax[i].stp; + if( ax[i].isTkn ){ + for(ap=stp->ap; ap; ap=ap->next){ + int action; + if( ap->sp->index>=lemp->nterminal ) continue; + action = compute_action(lemp, ap); + if( action<0 ) continue; + acttab_action(pActtab, ap->sp->index, action); + } + stp->iTknOfst = acttab_insert(pActtab, 1); + if( stp->iTknOfstiTknOfst; + if( stp->iTknOfst>mxTknOfst ) mxTknOfst = stp->iTknOfst; + }else{ + for(ap=stp->ap; ap; ap=ap->next){ + int action; + if( ap->sp->indexnterminal ) continue; + if( ap->sp->index==lemp->nsymbol ) continue; + action = compute_action(lemp, ap); + if( action<0 ) continue; + acttab_action(pActtab, ap->sp->index, action); + } + stp->iNtOfst = acttab_insert(pActtab, 0); + if( stp->iNtOfstiNtOfst; + if( stp->iNtOfst>mxNtOfst ) mxNtOfst = stp->iNtOfst; + } +#if 0 /* Uncomment for a trace of how the yy_action[] table fills out */ + { int jj, nn; + for(jj=nn=0; jjnAction; jj++){ + if( pActtab->aAction[jj].action<0 ) nn++; + } + printf("%4d: State %3d %s n: %2d size: %5d freespace: %d\n", + i, stp->statenum, ax[i].isTkn ? "Token" : "Var ", + ax[i].nAction, pActtab->nAction, nn); + } +#endif + } + free(ax); + + /* Mark rules that are actually used for reduce actions after all + ** optimizations have been applied + */ + for(rp=lemp->rule; rp; rp=rp->next) rp->doesReduce = LEMON_FALSE; + for(i=0; inxstate; i++){ + for(ap=lemp->sorted[i]->ap; ap; ap=ap->next){ + if( ap->type==REDUCE || ap->type==SHIFTREDUCE ){ + ap->x.rp->doesReduce = 1; + } + } + } + + /* Finish rendering the constants now that the action table has + ** been computed */ + fprintf(out,"#define YYNSTATE %d\n",lemp->nxstate); lineno++; + fprintf(out,"#define YYNRULE %d\n",lemp->nrule); lineno++; + fprintf(out,"#define YYNRULE_WITH_ACTION %d\n",lemp->nruleWithAction); + lineno++; + fprintf(out,"#define YYNTOKEN %d\n",lemp->nterminal); lineno++; + fprintf(out,"#define YY_MAX_SHIFT %d\n",lemp->nxstate-1); lineno++; + i = lemp->minShiftReduce; + fprintf(out,"#define YY_MIN_SHIFTREDUCE %d\n",i); lineno++; + i += lemp->nrule; + fprintf(out,"#define YY_MAX_SHIFTREDUCE %d\n", i-1); lineno++; + fprintf(out,"#define YY_ERROR_ACTION %d\n", lemp->errAction); lineno++; + fprintf(out,"#define YY_ACCEPT_ACTION %d\n", lemp->accAction); lineno++; + fprintf(out,"#define YY_NO_ACTION %d\n", lemp->noAction); lineno++; + fprintf(out,"#define YY_MIN_REDUCE %d\n", lemp->minReduce); lineno++; + i = lemp->minReduce + lemp->nrule; + fprintf(out,"#define YY_MAX_REDUCE %d\n", i-1); lineno++; + tplt_xfer(lemp->name,in,out,&lineno); + + /* Now output the action table and its associates: + ** + ** yy_action[] A single table containing all actions. + ** yy_lookahead[] A table containing the lookahead for each entry in + ** yy_action. Used to detect hash collisions. + ** yy_shift_ofst[] For each state, the offset into yy_action for + ** shifting terminals. + ** yy_reduce_ofst[] For each state, the offset into yy_action for + ** shifting non-terminals after a reduce. + ** yy_default[] Default action for each state. + */ + + /* Output the yy_action table */ + lemp->nactiontab = n = acttab_action_size(pActtab); + lemp->tablesize += n*szActionType; + fprintf(out,"#define YY_ACTTAB_COUNT (%d)\n", n); lineno++; + fprintf(out,"static const YYACTIONTYPE yy_action[] = {\n"); lineno++; + for(i=j=0; inoAction; + if( j==0 ) fprintf(out," /* %5d */ ", i); + fprintf(out, " %4d,", action); + if( j==9 || i==n-1 ){ + fprintf(out, "\n"); lineno++; + j = 0; + }else{ + j++; + } + } + fprintf(out, "};\n"); lineno++; + + /* Output the yy_lookahead table */ + lemp->nlookaheadtab = n = acttab_lookahead_size(pActtab); + lemp->tablesize += n*szCodeType; + fprintf(out,"static const YYCODETYPE yy_lookahead[] = {\n"); lineno++; + for(i=j=0; insymbol; + if( j==0 ) fprintf(out," /* %5d */ ", i); + fprintf(out, " %4d,", la); + if( j==9 ){ + fprintf(out, "\n"); lineno++; + j = 0; + }else{ + j++; + } + } + /* Add extra entries to the end of the yy_lookahead[] table so that + ** yy_shift_ofst[]+iToken will always be a valid index into the array, + ** even for the largest possible value of yy_shift_ofst[] and iToken. */ + nLookAhead = lemp->nterminal + lemp->nactiontab; + while( interminal); + if( j==9 ){ + fprintf(out, "\n"); lineno++; + j = 0; + }else{ + j++; + } + i++; + } + if( j>0 ){ fprintf(out, "\n"); lineno++; } + fprintf(out, "};\n"); lineno++; + + /* Output the yy_shift_ofst[] table */ + n = lemp->nxstate; + while( n>0 && lemp->sorted[n-1]->iTknOfst==NO_OFFSET ) n--; + fprintf(out, "#define YY_SHIFT_COUNT (%d)\n", n-1); lineno++; + fprintf(out, "#define YY_SHIFT_MIN (%d)\n", mnTknOfst); lineno++; + fprintf(out, "#define YY_SHIFT_MAX (%d)\n", mxTknOfst); lineno++; + fprintf(out, "static const %s yy_shift_ofst[] = {\n", + minimum_size_type(mnTknOfst, lemp->nterminal+lemp->nactiontab, &sz)); + lineno++; + lemp->tablesize += n*sz; + for(i=j=0; isorted[i]; + ofst = stp->iTknOfst; + if( ofst==NO_OFFSET ) ofst = lemp->nactiontab; + if( j==0 ) fprintf(out," /* %5d */ ", i); + fprintf(out, " %4d,", ofst); + if( j==9 || i==n-1 ){ + fprintf(out, "\n"); lineno++; + j = 0; + }else{ + j++; + } + } + fprintf(out, "};\n"); lineno++; + + /* Output the yy_reduce_ofst[] table */ + n = lemp->nxstate; + while( n>0 && lemp->sorted[n-1]->iNtOfst==NO_OFFSET ) n--; + fprintf(out, "#define YY_REDUCE_COUNT (%d)\n", n-1); lineno++; + fprintf(out, "#define YY_REDUCE_MIN (%d)\n", mnNtOfst); lineno++; + fprintf(out, "#define YY_REDUCE_MAX (%d)\n", mxNtOfst); lineno++; + fprintf(out, "static const %s yy_reduce_ofst[] = {\n", + minimum_size_type(mnNtOfst-1, mxNtOfst, &sz)); lineno++; + lemp->tablesize += n*sz; + for(i=j=0; isorted[i]; + ofst = stp->iNtOfst; + if( ofst==NO_OFFSET ) ofst = mnNtOfst - 1; + if( j==0 ) fprintf(out," /* %5d */ ", i); + fprintf(out, " %4d,", ofst); + if( j==9 || i==n-1 ){ + fprintf(out, "\n"); lineno++; + j = 0; + }else{ + j++; + } + } + fprintf(out, "};\n"); lineno++; + + /* Output the default action table */ + fprintf(out, "static const YYACTIONTYPE yy_default[] = {\n"); lineno++; + n = lemp->nxstate; + lemp->tablesize += n*szActionType; + for(i=j=0; isorted[i]; + if( j==0 ) fprintf(out," /* %5d */ ", i); + if( stp->iDfltReduce<0 ){ + fprintf(out, " %4d,", lemp->errAction); + }else{ + fprintf(out, " %4d,", stp->iDfltReduce + lemp->minReduce); + } + if( j==9 || i==n-1 ){ + fprintf(out, "\n"); lineno++; + j = 0; + }else{ + j++; + } + } + fprintf(out, "};\n"); lineno++; + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate the table of fallback tokens. + */ + if( lemp->has_fallback ){ + int mx = lemp->nterminal - 1; + /* 2019-08-28: Generate fallback entries for every token to avoid + ** having to do a range check on the index */ + /* while( mx>0 && lemp->symbols[mx]->fallback==0 ){ mx--; } */ + lemp->tablesize += (mx+1)*szCodeType; + for(i=0; i<=mx; i++){ + struct symbol *p = lemp->symbols[i]; + if( p->fallback==0 ){ + fprintf(out, " 0, /* %10s => nothing */\n", p->name); + }else{ + fprintf(out, " %3d, /* %10s => %s */\n", p->fallback->index, + p->name, p->fallback->name); + } + lineno++; + } + } + tplt_xfer(lemp->name, in, out, &lineno); + + /* Generate a table containing the symbolic name of every symbol + */ + for(i=0; insymbol; i++){ + fprintf(out," /* %4d */ \"%s\",\n",i, lemp->symbols[i]->name); lineno++; + } + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate a table containing a text string that describes every + ** rule in the rule set of the grammar. This information is used + ** when tracing REDUCE actions. + */ + for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){ + assert( rp->iRule==i ); + fprintf(out," /* %3d */ \"", i); + writeRuleText(out, rp); + fprintf(out,"\",\n"); lineno++; + } + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate code which executes every time a symbol is popped from + ** the stack while processing errors or while destroying the parser. + ** (In other words, generate the %destructor actions) + */ + if( lemp->tokendest ){ + int once = 1; + for(i=0; insymbol; i++){ + struct symbol *sp = lemp->symbols[i]; + if( sp==0 || sp->type!=TERMINAL ) continue; + if( once ){ + fprintf(out, " /* TERMINAL Destructor */\n"); lineno++; + once = 0; + } + fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++; + } + for(i=0; insymbol && lemp->symbols[i]->type!=TERMINAL; i++); + if( insymbol ){ + emit_destructor_code(out,lemp->symbols[i],lemp,&lineno); + fprintf(out," break;\n"); lineno++; + } + } + if( lemp->vardest ){ + struct symbol *dflt_sp = 0; + int once = 1; + for(i=0; insymbol; i++){ + struct symbol *sp = lemp->symbols[i]; + if( sp==0 || sp->type==TERMINAL || + sp->index<=0 || sp->destructor!=0 ) continue; + if( once ){ + fprintf(out, " /* Default NON-TERMINAL Destructor */\n");lineno++; + once = 0; + } + fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++; + dflt_sp = sp; + } + if( dflt_sp!=0 ){ + emit_destructor_code(out,dflt_sp,lemp,&lineno); + } + fprintf(out," break;\n"); lineno++; + } + for(i=0; insymbol; i++){ + struct symbol *sp = lemp->symbols[i]; + if( sp==0 || sp->type==TERMINAL || sp->destructor==0 ) continue; + if( sp->destLineno<0 ) continue; /* Already emitted */ + fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++; + + /* Combine duplicate destructors into a single case */ + for(j=i+1; jnsymbol; j++){ + struct symbol *sp2 = lemp->symbols[j]; + if( sp2 && sp2->type!=TERMINAL && sp2->destructor + && sp2->dtnum==sp->dtnum + && strcmp(sp->destructor,sp2->destructor)==0 ){ + fprintf(out," case %d: /* %s */\n", + sp2->index, sp2->name); lineno++; + sp2->destLineno = -1; /* Avoid emitting this destructor again */ + } + } + + emit_destructor_code(out,lemp->symbols[i],lemp,&lineno); + fprintf(out," break;\n"); lineno++; + } + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate code which executes whenever the parser stack overflows */ + tplt_print(out,lemp,lemp->overflow,&lineno); + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate the tables of rule information. yyRuleInfoLhs[] and + ** yyRuleInfoNRhs[]. + ** + ** Note: This code depends on the fact that rules are number + ** sequentially beginning with 0. + */ + for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){ + fprintf(out," %4d, /* (%d) ", rp->lhs->index, i); + rule_print(out, rp); + fprintf(out," */\n"); lineno++; + } + tplt_xfer(lemp->name,in,out,&lineno); + for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){ + fprintf(out," %3d, /* (%d) ", -rp->nrhs, i); + rule_print(out, rp); + fprintf(out," */\n"); lineno++; + } + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate code which execution during each REDUCE action */ + i = 0; + for(rp=lemp->rule; rp; rp=rp->next){ + i += translate_code(lemp, rp); + } + if( i ){ + fprintf(out," YYMINORTYPE yylhsminor;\n"); lineno++; + } + /* First output rules other than the default: rule */ + for(rp=lemp->rule; rp; rp=rp->next){ + struct rule *rp2; /* Other rules with the same action */ + if( rp->codeEmitted ) continue; + if( rp->noCode ){ + /* No C code actions, so this will be part of the "default:" rule */ + continue; + } + fprintf(out," case %d: /* ", rp->iRule); + writeRuleText(out, rp); + fprintf(out, " */\n"); lineno++; + for(rp2=rp->next; rp2; rp2=rp2->next){ + if( rp2->code==rp->code && rp2->codePrefix==rp->codePrefix + && rp2->codeSuffix==rp->codeSuffix ){ + fprintf(out," case %d: /* ", rp2->iRule); + writeRuleText(out, rp2); + fprintf(out," */ yytestcase(yyruleno==%d);\n", rp2->iRule); lineno++; + rp2->codeEmitted = 1; + } + } + emit_code(out,rp,lemp,&lineno); + fprintf(out," break;\n"); lineno++; + rp->codeEmitted = 1; + } + /* Finally, output the default: rule. We choose as the default: all + ** empty actions. */ + fprintf(out," default:\n"); lineno++; + for(rp=lemp->rule; rp; rp=rp->next){ + if( rp->codeEmitted ) continue; + assert( rp->noCode ); + fprintf(out," /* (%d) ", rp->iRule); + writeRuleText(out, rp); + if( rp->neverReduce ){ + fprintf(out, " (NEVER REDUCES) */ assert(yyruleno!=%d);\n", + rp->iRule); lineno++; + }else if( rp->doesReduce ){ + fprintf(out, " */ yytestcase(yyruleno==%d);\n", rp->iRule); lineno++; + }else{ + fprintf(out, " (OPTIMIZED OUT) */ assert(yyruleno!=%d);\n", + rp->iRule); lineno++; + } + } + fprintf(out," break;\n"); lineno++; + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate code which executes if a parse fails */ + tplt_print(out,lemp,lemp->failure,&lineno); + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate code which executes when a syntax error occurs */ + tplt_print(out,lemp,lemp->error,&lineno); + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate code which executes when the parser accepts its input */ + tplt_print(out,lemp,lemp->accept,&lineno); + tplt_xfer(lemp->name,in,out,&lineno); + + /* Append any addition code the user desires */ + tplt_print(out,lemp,lemp->extracode,&lineno); + + acttab_free(pActtab); + fclose(in); + fclose(out); + if( sql ) fclose(sql); + return; +} + +/* Generate a header file for the parser */ +void ReportHeader(struct lemon *lemp) +{ + FILE *out, *in; + const char *prefix; + char line[LINESIZE]; + char pattern[LINESIZE]; + int i; + + if( lemp->tokenprefix ) prefix = lemp->tokenprefix; + else prefix = ""; + in = file_open(lemp,".h","rb"); + if( in ){ + int nextChar; + for(i=1; interminal && fgets(line,LINESIZE,in); i++){ + lemon_sprintf(pattern,"#define %s%-30s %3d\n", + prefix,lemp->symbols[i]->name,i); + if( strcmp(line,pattern) ) break; + } + nextChar = fgetc(in); + fclose(in); + if( i==lemp->nterminal && nextChar==EOF ){ + /* No change in the file. Don't rewrite it. */ + return; + } + } + out = file_open(lemp,".h","wb"); + if( out ){ + for(i=1; interminal; i++){ + fprintf(out,"#define %s%-30s %3d\n",prefix,lemp->symbols[i]->name,i); + } + fclose(out); + } + return; +} + +/* Reduce the size of the action tables, if possible, by making use +** of defaults. +** +** In this version, we take the most frequent REDUCE action and make +** it the default. Except, there is no default if the wildcard token +** is a possible look-ahead. +*/ +void CompressTables(struct lemon *lemp) +{ + struct state *stp; + struct action *ap, *ap2, *nextap; + struct rule *rp, *rp2, *rbest; + int nbest, n; + int i; + int usesWildcard; + + for(i=0; instate; i++){ + stp = lemp->sorted[i]; + nbest = 0; + rbest = 0; + usesWildcard = 0; + + for(ap=stp->ap; ap; ap=ap->next){ + if( ap->type==SHIFT && ap->sp==lemp->wildcard ){ + usesWildcard = 1; + } + if( ap->type!=REDUCE ) continue; + rp = ap->x.rp; + if( rp->lhsStart ) continue; + if( rp==rbest ) continue; + n = 1; + for(ap2=ap->next; ap2; ap2=ap2->next){ + if( ap2->type!=REDUCE ) continue; + rp2 = ap2->x.rp; + if( rp2==rbest ) continue; + if( rp2==rp ) n++; + } + if( n>nbest ){ + nbest = n; + rbest = rp; + } + } + + /* Do not make a default if the number of rules to default + ** is not at least 1 or if the wildcard token is a possible + ** lookahead. + */ + if( nbest<1 || usesWildcard ) continue; + + + /* Combine matching REDUCE actions into a single default */ + for(ap=stp->ap; ap; ap=ap->next){ + if( ap->type==REDUCE && ap->x.rp==rbest ) break; + } + assert( ap ); + ap->sp = Symbol_new("{default}"); + for(ap=ap->next; ap; ap=ap->next){ + if( ap->type==REDUCE && ap->x.rp==rbest ) ap->type = NOT_USED; + } + stp->ap = Action_sort(stp->ap); + + for(ap=stp->ap; ap; ap=ap->next){ + if( ap->type==SHIFT ) break; + if( ap->type==REDUCE && ap->x.rp!=rbest ) break; + } + if( ap==0 ){ + stp->autoReduce = 1; + stp->pDfltReduce = rbest; + } + } + + /* Make a second pass over all states and actions. Convert + ** every action that is a SHIFT to an autoReduce state into + ** a SHIFTREDUCE action. + */ + for(i=0; instate; i++){ + stp = lemp->sorted[i]; + for(ap=stp->ap; ap; ap=ap->next){ + struct state *pNextState; + if( ap->type!=SHIFT ) continue; + pNextState = ap->x.stp; + if( pNextState->autoReduce && pNextState->pDfltReduce!=0 ){ + ap->type = SHIFTREDUCE; + ap->x.rp = pNextState->pDfltReduce; + } + } + } + + /* If a SHIFTREDUCE action specifies a rule that has a single RHS term + ** (meaning that the SHIFTREDUCE will land back in the state where it + ** started) and if there is no C-code associated with the reduce action, + ** then we can go ahead and convert the action to be the same as the + ** action for the RHS of the rule. + */ + for(i=0; instate; i++){ + stp = lemp->sorted[i]; + for(ap=stp->ap; ap; ap=nextap){ + nextap = ap->next; + if( ap->type!=SHIFTREDUCE ) continue; + rp = ap->x.rp; + if( rp->noCode==0 ) continue; + if( rp->nrhs!=1 ) continue; +#if 1 + /* Only apply this optimization to non-terminals. It would be OK to + ** apply it to terminal symbols too, but that makes the parser tables + ** larger. */ + if( ap->sp->indexnterminal ) continue; +#endif + /* If we reach this point, it means the optimization can be applied */ + nextap = ap; + for(ap2=stp->ap; ap2 && (ap2==ap || ap2->sp!=rp->lhs); ap2=ap2->next){} + assert( ap2!=0 ); + ap->spOpt = ap2->sp; + ap->type = ap2->type; + ap->x = ap2->x; + } + } +} + + +/* +** Compare two states for sorting purposes. The smaller state is the +** one with the most non-terminal actions. If they have the same number +** of non-terminal actions, then the smaller is the one with the most +** token actions. +*/ +static int stateResortCompare(const void *a, const void *b){ + const struct state *pA = *(const struct state**)a; + const struct state *pB = *(const struct state**)b; + int n; + + n = pB->nNtAct - pA->nNtAct; + if( n==0 ){ + n = pB->nTknAct - pA->nTknAct; + if( n==0 ){ + n = pB->statenum - pA->statenum; + } + } + assert( n!=0 ); + return n; +} + + +/* +** Renumber and resort states so that states with fewer choices +** occur at the end. Except, keep state 0 as the first state. +*/ +void ResortStates(struct lemon *lemp) +{ + int i; + struct state *stp; + struct action *ap; + + for(i=0; instate; i++){ + stp = lemp->sorted[i]; + stp->nTknAct = stp->nNtAct = 0; + stp->iDfltReduce = -1; /* Init dflt action to "syntax error" */ + stp->iTknOfst = NO_OFFSET; + stp->iNtOfst = NO_OFFSET; + for(ap=stp->ap; ap; ap=ap->next){ + int iAction = compute_action(lemp,ap); + if( iAction>=0 ){ + if( ap->sp->indexnterminal ){ + stp->nTknAct++; + }else if( ap->sp->indexnsymbol ){ + stp->nNtAct++; + }else{ + assert( stp->autoReduce==0 || stp->pDfltReduce==ap->x.rp ); + stp->iDfltReduce = iAction; + } + } + } + } + qsort(&lemp->sorted[1], lemp->nstate-1, sizeof(lemp->sorted[0]), + stateResortCompare); + for(i=0; instate; i++){ + lemp->sorted[i]->statenum = i; + } + lemp->nxstate = lemp->nstate; + while( lemp->nxstate>1 && lemp->sorted[lemp->nxstate-1]->autoReduce ){ + lemp->nxstate--; + } +} + + +/***************** From the file "set.c" ************************************/ +/* +** Set manipulation routines for the LEMON parser generator. +*/ + +static int size = 0; + +/* Set the set size */ +void SetSize(int n) +{ + size = n+1; +} + +/* Allocate a new set */ +char *SetNew(void){ + char *s; + s = (char*)calloc( size, 1); + if( s==0 ){ + memory_error(); + } + return s; +} + +/* Deallocate a set */ +void SetFree(char *s) +{ + free(s); +} + +/* Add a new element to the set. Return TRUE if the element was added +** and FALSE if it was already there. */ +int SetAdd(char *s, int e) +{ + int rv; + assert( e>=0 && esize = 1024; + x1a->count = 0; + x1a->tbl = (x1node*)calloc(1024, sizeof(x1node) + sizeof(x1node*)); + if( x1a->tbl==0 ){ + free(x1a); + x1a = 0; + }else{ + int i; + x1a->ht = (x1node**)&(x1a->tbl[1024]); + for(i=0; i<1024; i++) x1a->ht[i] = 0; + } + } +} +/* Insert a new record into the array. Return TRUE if successful. +** Prior data with the same key is NOT overwritten */ +int Strsafe_insert(const char *data) +{ + x1node *np; + unsigned h; + unsigned ph; + + if( x1a==0 ) return 0; + ph = strhash(data); + h = ph & (x1a->size-1); + np = x1a->ht[h]; + while( np ){ + if( strcmp(np->data,data)==0 ){ + /* An existing entry with the same key is found. */ + /* Fail because overwrite is not allows. */ + return 0; + } + np = np->next; + } + if( x1a->count>=x1a->size ){ + /* Need to make the hash table bigger */ + int i,arrSize; + struct s_x1 array; + array.size = arrSize = x1a->size*2; + array.count = x1a->count; + array.tbl = (x1node*)calloc(arrSize, sizeof(x1node) + sizeof(x1node*)); + if( array.tbl==0 ) return 0; /* Fail due to malloc failure */ + array.ht = (x1node**)&(array.tbl[arrSize]); + for(i=0; icount; i++){ + x1node *oldnp, *newnp; + oldnp = &(x1a->tbl[i]); + h = strhash(oldnp->data) & (arrSize-1); + newnp = &(array.tbl[i]); + if( array.ht[h] ) array.ht[h]->from = &(newnp->next); + newnp->next = array.ht[h]; + newnp->data = oldnp->data; + newnp->from = &(array.ht[h]); + array.ht[h] = newnp; + } + /* free(x1a->tbl); // This program was originally for 16-bit machines. + ** Don't worry about freeing memory on modern platforms. */ + *x1a = array; + } + /* Insert the new data */ + h = ph & (x1a->size-1); + np = &(x1a->tbl[x1a->count++]); + np->data = data; + if( x1a->ht[h] ) x1a->ht[h]->from = &(np->next); + np->next = x1a->ht[h]; + x1a->ht[h] = np; + np->from = &(x1a->ht[h]); + return 1; +} + +/* Return a pointer to data assigned to the given key. Return NULL +** if no such key. */ +const char *Strsafe_find(const char *key) +{ + unsigned h; + x1node *np; + + if( x1a==0 ) return 0; + h = strhash(key) & (x1a->size-1); + np = x1a->ht[h]; + while( np ){ + if( strcmp(np->data,key)==0 ) break; + np = np->next; + } + return np ? np->data : 0; +} + +/* Return a pointer to the (terminal or nonterminal) symbol "x". +** Create a new symbol if this is the first time "x" has been seen. +*/ +struct symbol *Symbol_new(const char *x) +{ + struct symbol *sp; + + sp = Symbol_find(x); + if( sp==0 ){ + sp = (struct symbol *)calloc(1, sizeof(struct symbol) ); + MemoryCheck(sp); + sp->name = Strsafe(x); + sp->type = ISUPPER(*x) ? TERMINAL : NONTERMINAL; + sp->rule = 0; + sp->fallback = 0; + sp->prec = -1; + sp->assoc = UNK; + sp->firstset = 0; + sp->lambda = LEMON_FALSE; + sp->destructor = 0; + sp->destLineno = 0; + sp->datatype = 0; + sp->useCnt = 0; + Symbol_insert(sp,sp->name); + } + sp->useCnt++; + return sp; +} + +/* Compare two symbols for sorting purposes. Return negative, +** zero, or positive if a is less then, equal to, or greater +** than b. +** +** Symbols that begin with upper case letters (terminals or tokens) +** must sort before symbols that begin with lower case letters +** (non-terminals). And MULTITERMINAL symbols (created using the +** %token_class directive) must sort at the very end. Other than +** that, the order does not matter. +** +** We find experimentally that leaving the symbols in their original +** order (the order they appeared in the grammar file) gives the +** smallest parser tables in SQLite. +*/ +int Symbolcmpp(const void *_a, const void *_b) +{ + const struct symbol *a = *(const struct symbol **) _a; + const struct symbol *b = *(const struct symbol **) _b; + int i1 = a->type==MULTITERMINAL ? 3 : a->name[0]>'Z' ? 2 : 1; + int i2 = b->type==MULTITERMINAL ? 3 : b->name[0]>'Z' ? 2 : 1; + return i1==i2 ? a->index - b->index : i1 - i2; +} + +/* There is one instance of the following structure for each +** associative array of type "x2". +*/ +struct s_x2 { + int size; /* The number of available slots. */ + /* Must be a power of 2 greater than or */ + /* equal to 1 */ + int count; /* Number of currently slots filled */ + struct s_x2node *tbl; /* The data stored here */ + struct s_x2node **ht; /* Hash table for lookups */ +}; + +/* There is one instance of this structure for every data element +** in an associative array of type "x2". +*/ +typedef struct s_x2node { + struct symbol *data; /* The data */ + const char *key; /* The key */ + struct s_x2node *next; /* Next entry with the same hash */ + struct s_x2node **from; /* Previous link */ +} x2node; + +/* There is only one instance of the array, which is the following */ +static struct s_x2 *x2a; + +/* Allocate a new associative array */ +void Symbol_init(void){ + if( x2a ) return; + x2a = (struct s_x2*)malloc( sizeof(struct s_x2) ); + if( x2a ){ + x2a->size = 128; + x2a->count = 0; + x2a->tbl = (x2node*)calloc(128, sizeof(x2node) + sizeof(x2node*)); + if( x2a->tbl==0 ){ + free(x2a); + x2a = 0; + }else{ + int i; + x2a->ht = (x2node**)&(x2a->tbl[128]); + for(i=0; i<128; i++) x2a->ht[i] = 0; + } + } +} +/* Insert a new record into the array. Return TRUE if successful. +** Prior data with the same key is NOT overwritten */ +int Symbol_insert(struct symbol *data, const char *key) +{ + x2node *np; + unsigned h; + unsigned ph; + + if( x2a==0 ) return 0; + ph = strhash(key); + h = ph & (x2a->size-1); + np = x2a->ht[h]; + while( np ){ + if( strcmp(np->key,key)==0 ){ + /* An existing entry with the same key is found. */ + /* Fail because overwrite is not allows. */ + return 0; + } + np = np->next; + } + if( x2a->count>=x2a->size ){ + /* Need to make the hash table bigger */ + int i,arrSize; + struct s_x2 array; + array.size = arrSize = x2a->size*2; + array.count = x2a->count; + array.tbl = (x2node*)calloc(arrSize, sizeof(x2node) + sizeof(x2node*)); + if( array.tbl==0 ) return 0; /* Fail due to malloc failure */ + array.ht = (x2node**)&(array.tbl[arrSize]); + for(i=0; icount; i++){ + x2node *oldnp, *newnp; + oldnp = &(x2a->tbl[i]); + h = strhash(oldnp->key) & (arrSize-1); + newnp = &(array.tbl[i]); + if( array.ht[h] ) array.ht[h]->from = &(newnp->next); + newnp->next = array.ht[h]; + newnp->key = oldnp->key; + newnp->data = oldnp->data; + newnp->from = &(array.ht[h]); + array.ht[h] = newnp; + } + /* free(x2a->tbl); // This program was originally written for 16-bit + ** machines. Don't worry about freeing this trivial amount of memory + ** on modern platforms. Just leak it. */ + *x2a = array; + } + /* Insert the new data */ + h = ph & (x2a->size-1); + np = &(x2a->tbl[x2a->count++]); + np->key = key; + np->data = data; + if( x2a->ht[h] ) x2a->ht[h]->from = &(np->next); + np->next = x2a->ht[h]; + x2a->ht[h] = np; + np->from = &(x2a->ht[h]); + return 1; +} + +/* Return a pointer to data assigned to the given key. Return NULL +** if no such key. */ +struct symbol *Symbol_find(const char *key) +{ + unsigned h; + x2node *np; + + if( x2a==0 ) return 0; + h = strhash(key) & (x2a->size-1); + np = x2a->ht[h]; + while( np ){ + if( strcmp(np->key,key)==0 ) break; + np = np->next; + } + return np ? np->data : 0; +} + +/* Return the n-th data. Return NULL if n is out of range. */ +struct symbol *Symbol_Nth(int n) +{ + struct symbol *data; + if( x2a && n>0 && n<=x2a->count ){ + data = x2a->tbl[n-1].data; + }else{ + data = 0; + } + return data; +} + +/* Return the size of the array */ +int Symbol_count() +{ + return x2a ? x2a->count : 0; +} + +/* Return an array of pointers to all data in the table. +** The array is obtained from malloc. Return NULL if memory allocation +** problems, or if the array is empty. */ +struct symbol **Symbol_arrayof() +{ + struct symbol **array; + int i,arrSize; + if( x2a==0 ) return 0; + arrSize = x2a->count; + array = (struct symbol **)calloc(arrSize, sizeof(struct symbol *)); + if( array ){ + for(i=0; itbl[i].data; + } + return array; +} + +/* Compare two configurations */ +int Configcmp(const char *_a,const char *_b) +{ + const struct config *a = (struct config *) _a; + const struct config *b = (struct config *) _b; + int x; + x = a->rp->index - b->rp->index; + if( x==0 ) x = a->dot - b->dot; + return x; +} + +/* Compare two states */ +PRIVATE int statecmp(struct config *a, struct config *b) +{ + int rc; + for(rc=0; rc==0 && a && b; a=a->bp, b=b->bp){ + rc = a->rp->index - b->rp->index; + if( rc==0 ) rc = a->dot - b->dot; + } + if( rc==0 ){ + if( a ) rc = 1; + if( b ) rc = -1; + } + return rc; +} + +/* Hash a state */ +PRIVATE unsigned statehash(struct config *a) +{ + unsigned h=0; + while( a ){ + h = h*571 + a->rp->index*37 + a->dot; + a = a->bp; + } + return h; +} + +/* Allocate a new state structure */ +struct state *State_new() +{ + struct state *newstate; + newstate = (struct state *)calloc(1, sizeof(struct state) ); + MemoryCheck(newstate); + return newstate; +} + +/* There is one instance of the following structure for each +** associative array of type "x3". +*/ +struct s_x3 { + int size; /* The number of available slots. */ + /* Must be a power of 2 greater than or */ + /* equal to 1 */ + int count; /* Number of currently slots filled */ + struct s_x3node *tbl; /* The data stored here */ + struct s_x3node **ht; /* Hash table for lookups */ +}; + +/* There is one instance of this structure for every data element +** in an associative array of type "x3". +*/ +typedef struct s_x3node { + struct state *data; /* The data */ + struct config *key; /* The key */ + struct s_x3node *next; /* Next entry with the same hash */ + struct s_x3node **from; /* Previous link */ +} x3node; + +/* There is only one instance of the array, which is the following */ +static struct s_x3 *x3a; + +/* Allocate a new associative array */ +void State_init(void){ + if( x3a ) return; + x3a = (struct s_x3*)malloc( sizeof(struct s_x3) ); + if( x3a ){ + x3a->size = 128; + x3a->count = 0; + x3a->tbl = (x3node*)calloc(128, sizeof(x3node) + sizeof(x3node*)); + if( x3a->tbl==0 ){ + free(x3a); + x3a = 0; + }else{ + int i; + x3a->ht = (x3node**)&(x3a->tbl[128]); + for(i=0; i<128; i++) x3a->ht[i] = 0; + } + } +} +/* Insert a new record into the array. Return TRUE if successful. +** Prior data with the same key is NOT overwritten */ +int State_insert(struct state *data, struct config *key) +{ + x3node *np; + unsigned h; + unsigned ph; + + if( x3a==0 ) return 0; + ph = statehash(key); + h = ph & (x3a->size-1); + np = x3a->ht[h]; + while( np ){ + if( statecmp(np->key,key)==0 ){ + /* An existing entry with the same key is found. */ + /* Fail because overwrite is not allows. */ + return 0; + } + np = np->next; + } + if( x3a->count>=x3a->size ){ + /* Need to make the hash table bigger */ + int i,arrSize; + struct s_x3 array; + array.size = arrSize = x3a->size*2; + array.count = x3a->count; + array.tbl = (x3node*)calloc(arrSize, sizeof(x3node) + sizeof(x3node*)); + if( array.tbl==0 ) return 0; /* Fail due to malloc failure */ + array.ht = (x3node**)&(array.tbl[arrSize]); + for(i=0; icount; i++){ + x3node *oldnp, *newnp; + oldnp = &(x3a->tbl[i]); + h = statehash(oldnp->key) & (arrSize-1); + newnp = &(array.tbl[i]); + if( array.ht[h] ) array.ht[h]->from = &(newnp->next); + newnp->next = array.ht[h]; + newnp->key = oldnp->key; + newnp->data = oldnp->data; + newnp->from = &(array.ht[h]); + array.ht[h] = newnp; + } + free(x3a->tbl); + *x3a = array; + } + /* Insert the new data */ + h = ph & (x3a->size-1); + np = &(x3a->tbl[x3a->count++]); + np->key = key; + np->data = data; + if( x3a->ht[h] ) x3a->ht[h]->from = &(np->next); + np->next = x3a->ht[h]; + x3a->ht[h] = np; + np->from = &(x3a->ht[h]); + return 1; +} + +/* Return a pointer to data assigned to the given key. Return NULL +** if no such key. */ +struct state *State_find(struct config *key) +{ + unsigned h; + x3node *np; + + if( x3a==0 ) return 0; + h = statehash(key) & (x3a->size-1); + np = x3a->ht[h]; + while( np ){ + if( statecmp(np->key,key)==0 ) break; + np = np->next; + } + return np ? np->data : 0; +} + +/* Return an array of pointers to all data in the table. +** The array is obtained from malloc. Return NULL if memory allocation +** problems, or if the array is empty. */ +struct state **State_arrayof(void) +{ + struct state **array; + int i,arrSize; + if( x3a==0 ) return 0; + arrSize = x3a->count; + array = (struct state **)calloc(arrSize, sizeof(struct state *)); + if( array ){ + for(i=0; itbl[i].data; + } + return array; +} + +/* Hash a configuration */ +PRIVATE unsigned confighash(struct config *a) +{ + unsigned h=0; + h = h*571 + a->rp->index*37 + a->dot; + return h; +} + +/* There is one instance of the following structure for each +** associative array of type "x4". +*/ +struct s_x4 { + int size; /* The number of available slots. */ + /* Must be a power of 2 greater than or */ + /* equal to 1 */ + int count; /* Number of currently slots filled */ + struct s_x4node *tbl; /* The data stored here */ + struct s_x4node **ht; /* Hash table for lookups */ +}; + +/* There is one instance of this structure for every data element +** in an associative array of type "x4". +*/ +typedef struct s_x4node { + struct config *data; /* The data */ + struct s_x4node *next; /* Next entry with the same hash */ + struct s_x4node **from; /* Previous link */ +} x4node; + +/* There is only one instance of the array, which is the following */ +static struct s_x4 *x4a; + +/* Allocate a new associative array */ +void Configtable_init(void){ + if( x4a ) return; + x4a = (struct s_x4*)malloc( sizeof(struct s_x4) ); + if( x4a ){ + x4a->size = 64; + x4a->count = 0; + x4a->tbl = (x4node*)calloc(64, sizeof(x4node) + sizeof(x4node*)); + if( x4a->tbl==0 ){ + free(x4a); + x4a = 0; + }else{ + int i; + x4a->ht = (x4node**)&(x4a->tbl[64]); + for(i=0; i<64; i++) x4a->ht[i] = 0; + } + } +} +/* Insert a new record into the array. Return TRUE if successful. +** Prior data with the same key is NOT overwritten */ +int Configtable_insert(struct config *data) +{ + x4node *np; + unsigned h; + unsigned ph; + + if( x4a==0 ) return 0; + ph = confighash(data); + h = ph & (x4a->size-1); + np = x4a->ht[h]; + while( np ){ + if( Configcmp((const char *) np->data,(const char *) data)==0 ){ + /* An existing entry with the same key is found. */ + /* Fail because overwrite is not allows. */ + return 0; + } + np = np->next; + } + if( x4a->count>=x4a->size ){ + /* Need to make the hash table bigger */ + int i,arrSize; + struct s_x4 array; + array.size = arrSize = x4a->size*2; + array.count = x4a->count; + array.tbl = (x4node*)calloc(arrSize, sizeof(x4node) + sizeof(x4node*)); + if( array.tbl==0 ) return 0; /* Fail due to malloc failure */ + array.ht = (x4node**)&(array.tbl[arrSize]); + for(i=0; icount; i++){ + x4node *oldnp, *newnp; + oldnp = &(x4a->tbl[i]); + h = confighash(oldnp->data) & (arrSize-1); + newnp = &(array.tbl[i]); + if( array.ht[h] ) array.ht[h]->from = &(newnp->next); + newnp->next = array.ht[h]; + newnp->data = oldnp->data; + newnp->from = &(array.ht[h]); + array.ht[h] = newnp; + } + /* free(x4a->tbl); // This code was originall written for 16-bit machines. + ** on modern machines, don't worry about freeing this trival amount of + ** memory. */ + *x4a = array; + } + /* Insert the new data */ + h = ph & (x4a->size-1); + np = &(x4a->tbl[x4a->count++]); + np->data = data; + if( x4a->ht[h] ) x4a->ht[h]->from = &(np->next); + np->next = x4a->ht[h]; + x4a->ht[h] = np; + np->from = &(x4a->ht[h]); + return 1; +} + +/* Return a pointer to data assigned to the given key. Return NULL +** if no such key. */ +struct config *Configtable_find(struct config *key) +{ + int h; + x4node *np; + + if( x4a==0 ) return 0; + h = confighash(key) & (x4a->size-1); + np = x4a->ht[h]; + while( np ){ + if( Configcmp((const char *) np->data,(const char *) key)==0 ) break; + np = np->next; + } + return np ? np->data : 0; +} + +/* Remove all data from the table. Pass each data to the function "f" +** as it is removed. ("f" may be null to avoid this step.) */ +void Configtable_clear(int(*f)(struct config *)) +{ + int i; + if( x4a==0 || x4a->count==0 ) return; + if( f ) for(i=0; icount; i++) (*f)(x4a->tbl[i].data); + for(i=0; isize; i++) x4a->ht[i] = 0; + x4a->count = 0; + return; +} diff --git a/tools/lemon/lempar.c b/tools/lemon/lempar.c new file mode 100644 index 0000000..fcb72b8 --- /dev/null +++ b/tools/lemon/lempar.c @@ -0,0 +1,1068 @@ +/* +** 2000-05-29 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Driver template for the LEMON parser generator. +** +** The "lemon" program processes an LALR(1) input grammar file, then uses +** this template to construct a parser. The "lemon" program inserts text +** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the +** interstitial "-" characters) contained in this template is changed into +** the value of the %name directive from the grammar. Otherwise, the content +** of this template is copied straight through into the generate parser +** source file. +** +** The following is the concatenation of all %include directives from the +** input grammar file: +*/ +/************ Begin %include sections from the grammar ************************/ +%% +/**************** End of %include directives **********************************/ +/* These constants specify the various numeric values for terminal symbols. +***************** Begin token definitions *************************************/ +%% +/**************** End token definitions ***************************************/ + +/* The next sections is a series of control #defines. +** various aspects of the generated parser. +** YYCODETYPE is the data type used to store the integer codes +** that represent terminal and non-terminal symbols. +** "unsigned char" is used if there are fewer than +** 256 symbols. Larger types otherwise. +** YYNOCODE is a number of type YYCODETYPE that is not used for +** any terminal or nonterminal symbol. +** YYFALLBACK If defined, this indicates that one or more tokens +** (also known as: "terminal symbols") have fall-back +** values which should be used if the original symbol +** would not parse. This permits keywords to sometimes +** be used as identifiers, for example. +** YYACTIONTYPE is the data type used for "action codes" - numbers +** that indicate what to do in response to the next +** token. +** ParseTOKENTYPE is the data type used for minor type for terminal +** symbols. Background: A "minor type" is a semantic +** value associated with a terminal or non-terminal +** symbols. For example, for an "ID" terminal symbol, +** the minor type might be the name of the identifier. +** Each non-terminal can have a different minor type. +** Terminal symbols all have the same minor type, though. +** This macros defines the minor type for terminal +** symbols. +** YYMINORTYPE is the data type used for all minor types. +** This is typically a union of many types, one of +** which is ParseTOKENTYPE. The entry in the union +** for terminal symbols is called "yy0". +** YYSTACKDEPTH is the maximum depth of the parser's stack. If +** zero the stack is dynamically sized using realloc() +** ParseARG_SDECL A static variable declaration for the %extra_argument +** ParseARG_PDECL A parameter declaration for the %extra_argument +** ParseARG_PARAM Code to pass %extra_argument as a subroutine parameter +** ParseARG_STORE Code to store %extra_argument into yypParser +** ParseARG_FETCH Code to extract %extra_argument from yypParser +** ParseCTX_* As ParseARG_ except for %extra_context +** YYERRORSYMBOL is the code number of the error symbol. If not +** defined, then do no error processing. +** YYNSTATE the combined number of states. +** YYNRULE the number of rules in the grammar +** YYNTOKEN Number of terminal symbols +** YY_MAX_SHIFT Maximum value for shift actions +** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions +** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions +** YY_ERROR_ACTION The yy_action[] code for syntax error +** YY_ACCEPT_ACTION The yy_action[] code for accept +** YY_NO_ACTION The yy_action[] code for no-op +** YY_MIN_REDUCE Minimum value for reduce actions +** YY_MAX_REDUCE Maximum value for reduce actions +*/ +#ifndef INTERFACE +# define INTERFACE 1 +#endif +/************* Begin control #defines *****************************************/ +%% +/************* End control #defines *******************************************/ +#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) + +/* Define the yytestcase() macro to be a no-op if is not already defined +** otherwise. +** +** Applications can choose to define yytestcase() in the %include section +** to a macro that can assist in verifying code coverage. For production +** code the yytestcase() macro should be turned off. But it is useful +** for testing. +*/ +#ifndef yytestcase +# define yytestcase(X) +#endif + + +/* Next are the tables used to determine what action to take based on the +** current state and lookahead token. These tables are used to implement +** functions that take a state number and lookahead value and return an +** action integer. +** +** Suppose the action integer is N. Then the action is determined as +** follows +** +** 0 <= N <= YY_MAX_SHIFT Shift N. That is, push the lookahead +** token onto the stack and goto state N. +** +** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then +** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE. +** +** N == YY_ERROR_ACTION A syntax error has occurred. +** +** N == YY_ACCEPT_ACTION The parser accepts its input. +** +** N == YY_NO_ACTION No such action. Denotes unused +** slots in the yy_action[] table. +** +** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE +** and YY_MAX_REDUCE +** +** The action table is constructed as a single large table named yy_action[]. +** Given state S and lookahead X, the action is computed as either: +** +** (A) N = yy_action[ yy_shift_ofst[S] + X ] +** (B) N = yy_default[S] +** +** The (A) formula is preferred. The B formula is used instead if +** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X. +** +** The formulas above are for computing the action when the lookahead is +** a terminal symbol. If the lookahead is a non-terminal (as occurs after +** a reduce action) then the yy_reduce_ofst[] array is used in place of +** the yy_shift_ofst[] array. +** +** The following are the tables generated in this section: +** +** yy_action[] A single table containing all actions. +** yy_lookahead[] A table containing the lookahead for each entry in +** yy_action. Used to detect hash collisions. +** yy_shift_ofst[] For each state, the offset into yy_action for +** shifting terminals. +** yy_reduce_ofst[] For each state, the offset into yy_action for +** shifting non-terminals after a reduce. +** yy_default[] Default action for each state. +** +*********** Begin parsing tables **********************************************/ +%% +/********** End of lemon-generated parsing tables *****************************/ + +/* The next table maps tokens (terminal symbols) into fallback tokens. +** If a construct like the following: +** +** %fallback ID X Y Z. +** +** appears in the grammar, then ID becomes a fallback token for X, Y, +** and Z. Whenever one of the tokens X, Y, or Z is input to the parser +** but it does not parse, the type of the token is changed to ID and +** the parse is retried before an error is thrown. +** +** This feature can be used, for example, to cause some keywords in a language +** to revert to identifiers if they keyword does not apply in the context where +** it appears. +*/ +#ifdef YYFALLBACK +static const YYCODETYPE yyFallback[] = { +%% +}; +#endif /* YYFALLBACK */ + +/* The following structure represents a single element of the +** parser's stack. Information stored includes: +** +** + The state number for the parser at this level of the stack. +** +** + The value of the token stored at this level of the stack. +** (In other words, the "major" token.) +** +** + The semantic value stored at this level of the stack. This is +** the information used by the action routines in the grammar. +** It is sometimes called the "minor" token. +** +** After the "shift" half of a SHIFTREDUCE action, the stateno field +** actually contains the reduce action for the second half of the +** SHIFTREDUCE. +*/ +struct yyStackEntry { + YYACTIONTYPE stateno; /* The state-number, or reduce action in SHIFTREDUCE */ + YYCODETYPE major; /* The major token value. This is the code + ** number for the token at this stack level */ + YYMINORTYPE minor; /* The user-supplied minor token value. This + ** is the value of the token */ +}; +typedef struct yyStackEntry yyStackEntry; + +/* The state of the parser is completely contained in an instance of +** the following structure */ +struct yyParser { + yyStackEntry *yytos; /* Pointer to top element of the stack */ +#ifdef YYTRACKMAXSTACKDEPTH + int yyhwm; /* High-water mark of the stack */ +#endif +#ifndef YYNOERRORRECOVERY + int yyerrcnt; /* Shifts left before out of the error */ +#endif + ParseARG_SDECL /* A place to hold %extra_argument */ + ParseCTX_SDECL /* A place to hold %extra_context */ +#if YYSTACKDEPTH<=0 + int yystksz; /* Current side of the stack */ + yyStackEntry *yystack; /* The parser's stack */ + yyStackEntry yystk0; /* First stack entry */ +#else + yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */ + yyStackEntry *yystackEnd; /* Last entry in the stack */ +#endif +}; +typedef struct yyParser yyParser; + +#include +#ifndef NDEBUG +#include +static FILE *yyTraceFILE = 0; +static char *yyTracePrompt = 0; +#endif /* NDEBUG */ + +#ifndef NDEBUG +/* +** Turn parser tracing on by giving a stream to which to write the trace +** and a prompt to preface each trace message. Tracing is turned off +** by making either argument NULL +** +** Inputs: +**
    +**
  • A FILE* to which trace output should be written. +** If NULL, then tracing is turned off. +**
  • A prefix string written at the beginning of every +** line of trace output. If NULL, then tracing is +** turned off. +**
+** +** Outputs: +** None. +*/ +void ParseTrace(FILE *TraceFILE, char *zTracePrompt){ + yyTraceFILE = TraceFILE; + yyTracePrompt = zTracePrompt; + if( yyTraceFILE==0 ) yyTracePrompt = 0; + else if( yyTracePrompt==0 ) yyTraceFILE = 0; +} +#endif /* NDEBUG */ + +#if defined(YYCOVERAGE) || !defined(NDEBUG) +/* For tracing shifts, the names of all terminals and nonterminals +** are required. The following table supplies these names */ +static const char *const yyTokenName[] = { +%% +}; +#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ + +#ifndef NDEBUG +/* For tracing reduce actions, the names of all rules are required. +*/ +static const char *const yyRuleName[] = { +%% +}; +#endif /* NDEBUG */ + + +#if YYSTACKDEPTH<=0 +/* +** Try to increase the size of the parser stack. Return the number +** of errors. Return 0 on success. +*/ +static int yyGrowStack(yyParser *p){ + int newSize; + int idx; + yyStackEntry *pNew; + + newSize = p->yystksz*2 + 100; + idx = p->yytos ? (int)(p->yytos - p->yystack) : 0; + if( p->yystack==&p->yystk0 ){ + pNew = malloc(newSize*sizeof(pNew[0])); + if( pNew ) pNew[0] = p->yystk0; + }else{ + pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); + } + if( pNew ){ + p->yystack = pNew; + p->yytos = &p->yystack[idx]; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", + yyTracePrompt, p->yystksz, newSize); + } +#endif + p->yystksz = newSize; + } + return pNew==0; +} +#endif + +/* Datatype of the argument to the memory allocated passed as the +** second argument to ParseAlloc() below. This can be changed by +** putting an appropriate #define in the %include section of the input +** grammar. +*/ +#ifndef YYMALLOCARGTYPE +# define YYMALLOCARGTYPE size_t +#endif + +/* Initialize a new parser that has already been allocated. +*/ +void ParseInit(void *yypRawParser ParseCTX_PDECL){ + yyParser *yypParser = (yyParser*)yypRawParser; + ParseCTX_STORE +#ifdef YYTRACKMAXSTACKDEPTH + yypParser->yyhwm = 0; +#endif +#if YYSTACKDEPTH<=0 + yypParser->yytos = NULL; + yypParser->yystack = NULL; + yypParser->yystksz = 0; + if( yyGrowStack(yypParser) ){ + yypParser->yystack = &yypParser->yystk0; + yypParser->yystksz = 1; + } +#endif +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + yypParser->yytos = yypParser->yystack; + yypParser->yystack[0].stateno = 0; + yypParser->yystack[0].major = 0; +#if YYSTACKDEPTH>0 + yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; +#endif +} + +#ifndef Parse_ENGINEALWAYSONSTACK +/* +** This function allocates a new parser. +** The only argument is a pointer to a function which works like +** malloc. +** +** Inputs: +** A pointer to the function used to allocate memory. +** +** Outputs: +** A pointer to a parser. This pointer is used in subsequent calls +** to Parse and ParseFree. +*/ +void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE) ParseCTX_PDECL){ + yyParser *yypParser; + yypParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); + if( yypParser ){ + ParseCTX_STORE + ParseInit(yypParser ParseCTX_PARAM); + } + return (void*)yypParser; +} +#endif /* Parse_ENGINEALWAYSONSTACK */ + + +/* The following function deletes the "minor type" or semantic value +** associated with a symbol. The symbol can be either a terminal +** or nonterminal. "yymajor" is the symbol code, and "yypminor" is +** a pointer to the value to be deleted. The code used to do the +** deletions is derived from the %destructor and/or %token_destructor +** directives of the input grammar. +*/ +static void yy_destructor( + yyParser *yypParser, /* The parser */ + YYCODETYPE yymajor, /* Type code for object to destroy */ + YYMINORTYPE *yypminor /* The object to be destroyed */ +){ + ParseARG_FETCH + ParseCTX_FETCH + switch( yymajor ){ + /* Here is inserted the actions which take place when a + ** terminal or non-terminal is destroyed. This can happen + ** when the symbol is popped from the stack during a + ** reduce or during error processing or when a parser is + ** being destroyed before it is finished parsing. + ** + ** Note: during a reduce, the only symbols destroyed are those + ** which appear on the RHS of the rule, but which are *not* used + ** inside the C code. + */ +/********* Begin destructor definitions ***************************************/ +%% +/********* End destructor definitions *****************************************/ + default: break; /* If no destructor action specified: do nothing */ + } +} + +/* +** Pop the parser's stack once. +** +** If there is a destructor routine associated with the token which +** is popped from the stack, then call it. +*/ +static void yy_pop_parser_stack(yyParser *pParser){ + yyStackEntry *yytos; + assert( pParser->yytos!=0 ); + assert( pParser->yytos > pParser->yystack ); + yytos = pParser->yytos--; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sPopping %s\n", + yyTracePrompt, + yyTokenName[yytos->major]); + } +#endif + yy_destructor(pParser, yytos->major, &yytos->minor); +} + +/* +** Clear all secondary memory allocations from the parser +*/ +void ParseFinalize(void *p){ + yyParser *pParser = (yyParser*)p; + while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser); +#if YYSTACKDEPTH<=0 + if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack); +#endif +} + +#ifndef Parse_ENGINEALWAYSONSTACK +/* +** Deallocate and destroy a parser. Destructors are called for +** all stack elements before shutting the parser down. +** +** If the YYPARSEFREENEVERNULL macro exists (for example because it +** is defined in a %include section of the input grammar) then it is +** assumed that the input pointer is never NULL. +*/ +void ParseFree( + void *p, /* The parser to be deleted */ + void (*freeProc)(void*) /* Function used to reclaim memory */ +){ +#ifndef YYPARSEFREENEVERNULL + if( p==0 ) return; +#endif + ParseFinalize(p); + (*freeProc)(p); +} +#endif /* Parse_ENGINEALWAYSONSTACK */ + +/* +** Return the peak depth of the stack for a parser. +*/ +#ifdef YYTRACKMAXSTACKDEPTH +int ParseStackPeak(void *p){ + yyParser *pParser = (yyParser*)p; + return pParser->yyhwm; +} +#endif + +/* This array of booleans keeps track of the parser statement +** coverage. The element yycoverage[X][Y] is set when the parser +** is in state X and has a lookahead token Y. In a well-tested +** systems, every element of this matrix should end up being set. +*/ +#if defined(YYCOVERAGE) +static unsigned char yycoverage[YYNSTATE][YYNTOKEN]; +#endif + +/* +** Write into out a description of every state/lookahead combination that +** +** (1) has not been used by the parser, and +** (2) is not a syntax error. +** +** Return the number of missed state/lookahead combinations. +*/ +#if defined(YYCOVERAGE) +int ParseCoverage(FILE *out){ + int stateno, iLookAhead, i; + int nMissed = 0; + for(stateno=0; statenoYY_MAX_SHIFT ) return stateno; + assert( stateno <= YY_SHIFT_COUNT ); +#if defined(YYCOVERAGE) + yycoverage[stateno][iLookAhead] = 1; +#endif + do{ + i = yy_shift_ofst[stateno]; + assert( i>=0 ); + assert( i<=YY_ACTTAB_COUNT ); + assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD ); + assert( iLookAhead!=YYNOCODE ); + assert( iLookAhead < YYNTOKEN ); + i += iLookAhead; + assert( i<(int)YY_NLOOKAHEAD ); + if( yy_lookahead[i]!=iLookAhead ){ +#ifdef YYFALLBACK + YYCODETYPE iFallback; /* Fallback token */ + assert( iLookAhead %s\n", + yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]); + } +#endif + assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */ + iLookAhead = iFallback; + continue; + } +#endif +#ifdef YYWILDCARD + { + int j = i - iLookAhead + YYWILDCARD; + assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) ); + if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){ +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", + yyTracePrompt, yyTokenName[iLookAhead], + yyTokenName[YYWILDCARD]); + } +#endif /* NDEBUG */ + return yy_action[j]; + } + } +#endif /* YYWILDCARD */ + return yy_default[stateno]; + }else{ + assert( i>=0 && i<(int)(sizeof(yy_action)/sizeof(yy_action[0])) ); + return yy_action[i]; + } + }while(1); +} + +/* +** Find the appropriate action for a parser given the non-terminal +** look-ahead token iLookAhead. +*/ +static YYACTIONTYPE yy_find_reduce_action( + YYACTIONTYPE stateno, /* Current state number */ + YYCODETYPE iLookAhead /* The look-ahead token */ +){ + int i; +#ifdef YYERRORSYMBOL + if( stateno>YY_REDUCE_COUNT ){ + return yy_default[stateno]; + } +#else + assert( stateno<=YY_REDUCE_COUNT ); +#endif + i = yy_reduce_ofst[stateno]; + assert( iLookAhead!=YYNOCODE ); + i += iLookAhead; +#ifdef YYERRORSYMBOL + if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ + return yy_default[stateno]; + } +#else + assert( i>=0 && iyytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); + /* Here code is inserted which will execute if the parser + ** stack every overflows */ +/******** Begin %stack_overflow code ******************************************/ +%% +/******** End %stack_overflow code ********************************************/ + ParseARG_STORE /* Suppress warning about unused %extra_argument var */ + ParseCTX_STORE +} + +/* +** Print tracing information for a SHIFT action +*/ +#ifndef NDEBUG +static void yyTraceShift(yyParser *yypParser, int yyNewState, const char *zTag){ + if( yyTraceFILE ){ + if( yyNewStateyytos->major], + yyNewState); + }else{ + fprintf(yyTraceFILE,"%s%s '%s', pending reduce %d\n", + yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major], + yyNewState - YY_MIN_REDUCE); + } + } +} +#else +# define yyTraceShift(X,Y,Z) +#endif + +/* +** Perform a shift action. +*/ +static void yy_shift( + yyParser *yypParser, /* The parser to be shifted */ + YYACTIONTYPE yyNewState, /* The new state to shift in */ + YYCODETYPE yyMajor, /* The major token to shift in */ + ParseTOKENTYPE yyMinor /* The minor token to shift in */ +){ + yyStackEntry *yytos; + yypParser->yytos++; +#ifdef YYTRACKMAXSTACKDEPTH + if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) ); + } +#endif +#if YYSTACKDEPTH>0 + if( yypParser->yytos>yypParser->yystackEnd ){ + yypParser->yytos--; + yyStackOverflow(yypParser); + return; + } +#else + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){ + if( yyGrowStack(yypParser) ){ + yypParser->yytos--; + yyStackOverflow(yypParser); + return; + } + } +#endif + if( yyNewState > YY_MAX_SHIFT ){ + yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; + } + yytos = yypParser->yytos; + yytos->stateno = yyNewState; + yytos->major = yyMajor; + yytos->minor.yy0 = yyMinor; + yyTraceShift(yypParser, yyNewState, "Shift"); +} + +/* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side +** of that rule */ +static const YYCODETYPE yyRuleInfoLhs[] = { +%% +}; + +/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number +** of symbols on the right-hand side of that rule. */ +static const signed char yyRuleInfoNRhs[] = { +%% +}; + +static void yy_accept(yyParser*); /* Forward Declaration */ + +/* +** Perform a reduce action and the shift that must immediately +** follow the reduce. +** +** The yyLookahead and yyLookaheadToken parameters provide reduce actions +** access to the lookahead token (if any). The yyLookahead will be YYNOCODE +** if the lookahead token has already been consumed. As this procedure is +** only called from one place, optimizing compilers will in-line it, which +** means that the extra parameters have no performance impact. +*/ +static YYACTIONTYPE yy_reduce( + yyParser *yypParser, /* The parser */ + unsigned int yyruleno, /* Number of the rule by which to reduce */ + int yyLookahead, /* Lookahead token, or YYNOCODE if none */ + ParseTOKENTYPE yyLookaheadToken /* Value of the lookahead token */ + ParseCTX_PDECL /* %extra_context */ +){ + int yygoto; /* The next state */ + YYACTIONTYPE yyact; /* The next action */ + yyStackEntry *yymsp; /* The top of the parser's stack */ + int yysize; /* Amount to pop the stack */ + ParseARG_FETCH + (void)yyLookahead; + (void)yyLookaheadToken; + yymsp = yypParser->yytos; + + switch( yyruleno ){ + /* Beginning here are the reduction cases. A typical example + ** follows: + ** case 0: + ** #line + ** { ... } // User supplied code + ** #line + ** break; + */ +/********** Begin reduce actions **********************************************/ +%% +/********** End reduce actions ************************************************/ + }; + assert( yyrulenoYY_MAX_SHIFT && yyact<=YY_MAX_SHIFTREDUCE) ); + + /* It is not possible for a REDUCE to be followed by an error */ + assert( yyact!=YY_ERROR_ACTION ); + + yymsp += yysize+1; + yypParser->yytos = yymsp; + yymsp->stateno = (YYACTIONTYPE)yyact; + yymsp->major = (YYCODETYPE)yygoto; + yyTraceShift(yypParser, yyact, "... then shift"); + return yyact; +} + +/* +** The following code executes when the parse fails +*/ +#ifndef YYNOERRORRECOVERY +static void yy_parse_failed( + yyParser *yypParser /* The parser */ +){ + ParseARG_FETCH + ParseCTX_FETCH +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); + } +#endif + while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); + /* Here code is inserted which will be executed whenever the + ** parser fails */ +/************ Begin %parse_failure code ***************************************/ +%% +/************ End %parse_failure code *****************************************/ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE +} +#endif /* YYNOERRORRECOVERY */ + +/* +** The following code executes when a syntax error first occurs. +*/ +static void yy_syntax_error( + yyParser *yypParser, /* The parser */ + int yymajor, /* The major type of the error token */ + ParseTOKENTYPE yyminor /* The minor type of the error token */ +){ + ParseARG_FETCH + ParseCTX_FETCH +#define TOKEN yyminor +/************ Begin %syntax_error code ****************************************/ +%% +/************ End %syntax_error code ******************************************/ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE +} + +/* +** The following is executed when the parser accepts +*/ +static void yy_accept( + yyParser *yypParser /* The parser */ +){ + ParseARG_FETCH + ParseCTX_FETCH +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); + } +#endif +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + assert( yypParser->yytos==yypParser->yystack ); + /* Here code is inserted which will be executed whenever the + ** parser accepts */ +/*********** Begin %parse_accept code *****************************************/ +%% +/*********** End %parse_accept code *******************************************/ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE +} + +/* The main parser program. +** The first argument is a pointer to a structure obtained from +** "ParseAlloc" which describes the current state of the parser. +** The second argument is the major token number. The third is +** the minor token. The fourth optional argument is whatever the +** user wants (and specified in the grammar) and is available for +** use by the action routines. +** +** Inputs: +**
    +**
  • A pointer to the parser (an opaque structure.) +**
  • The major token number. +**
  • The minor token number. +**
  • An option argument of a grammar-specified type. +**
+** +** Outputs: +** None. +*/ +void Parse( + void *yyp, /* The parser */ + int yymajor, /* The major token code number */ + ParseTOKENTYPE yyminor /* The value for the token */ + ParseARG_PDECL /* Optional %extra_argument parameter */ +){ + YYMINORTYPE yyminorunion; + YYACTIONTYPE yyact; /* The parser action. */ +#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) + int yyendofinput; /* True if we are at the end of input */ +#endif +#ifdef YYERRORSYMBOL + int yyerrorhit = 0; /* True if yymajor has invoked an error */ +#endif + yyParser *yypParser = (yyParser*)yyp; /* The parser */ + ParseCTX_FETCH + ParseARG_STORE + + assert( yypParser->yytos!=0 ); +#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) + yyendofinput = (yymajor==0); +#endif + + yyact = yypParser->yytos->stateno; +#ifndef NDEBUG + if( yyTraceFILE ){ + if( yyact < YY_MIN_REDUCE ){ + fprintf(yyTraceFILE,"%sInput '%s' in state %d\n", + yyTracePrompt,yyTokenName[yymajor],yyact); + }else{ + fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n", + yyTracePrompt,yyTokenName[yymajor],yyact-YY_MIN_REDUCE); + } + } +#endif + + while(1){ /* Exit by "break" */ + assert( yypParser->yytos>=yypParser->yystack ); + assert( yyact==yypParser->yytos->stateno ); + yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact); + if( yyact >= YY_MIN_REDUCE ){ + unsigned int yyruleno = yyact - YY_MIN_REDUCE; /* Reduce by this rule */ +#ifndef NDEBUG + assert( yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ); + if( yyTraceFILE ){ + int yysize = yyRuleInfoNRhs[yyruleno]; + if( yysize ){ + fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", + yyTracePrompt, + yyruleno, yyRuleName[yyruleno], + yyrulenoyytos[yysize].stateno); + }else{ + fprintf(yyTraceFILE, "%sReduce %d [%s]%s.\n", + yyTracePrompt, yyruleno, yyRuleName[yyruleno], + yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == + (int)(yypParser->yytos - yypParser->yystack)); + } +#endif +#if YYSTACKDEPTH>0 + if( yypParser->yytos>=yypParser->yystackEnd ){ + yyStackOverflow(yypParser); + break; + } +#else + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ + if( yyGrowStack(yypParser) ){ + yyStackOverflow(yypParser); + break; + } + } +#endif + } + yyact = yy_reduce(yypParser,yyruleno,yymajor,yyminor ParseCTX_PARAM); + }else if( yyact <= YY_MAX_SHIFTREDUCE ){ + yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt--; +#endif + break; + }else if( yyact==YY_ACCEPT_ACTION ){ + yypParser->yytos--; + yy_accept(yypParser); + return; + }else{ + assert( yyact == YY_ERROR_ACTION ); + yyminorunion.yy0 = yyminor; +#ifdef YYERRORSYMBOL + int yymx; +#endif +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt); + } +#endif +#ifdef YYERRORSYMBOL + /* A syntax error has occurred. + ** The response to an error depends upon whether or not the + ** grammar defines an error token "ERROR". + ** + ** This is what we do if the grammar does define ERROR: + ** + ** * Call the %syntax_error function. + ** + ** * Begin popping the stack until we enter a state where + ** it is legal to shift the error symbol, then shift + ** the error symbol. + ** + ** * Set the error count to three. + ** + ** * Begin accepting and shifting new tokens. No new error + ** processing will occur until three tokens have been + ** shifted successfully. + ** + */ + if( yypParser->yyerrcnt<0 ){ + yy_syntax_error(yypParser,yymajor,yyminor); + } + yymx = yypParser->yytos->major; + if( yymx==YYERRORSYMBOL || yyerrorhit ){ +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sDiscard input token %s\n", + yyTracePrompt,yyTokenName[yymajor]); + } +#endif + yy_destructor(yypParser, (YYCODETYPE)yymajor, &yyminorunion); + yymajor = YYNOCODE; + }else{ + while( yypParser->yytos > yypParser->yystack ){ + yyact = yy_find_reduce_action(yypParser->yytos->stateno, + YYERRORSYMBOL); + if( yyact<=YY_MAX_SHIFTREDUCE ) break; + yy_pop_parser_stack(yypParser); + } + if( yypParser->yytos <= yypParser->yystack || yymajor==0 ){ + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + yy_parse_failed(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + yymajor = YYNOCODE; + }else if( yymx!=YYERRORSYMBOL ){ + yy_shift(yypParser,yyact,YYERRORSYMBOL,yyminor); + } + } + yypParser->yyerrcnt = 3; + yyerrorhit = 1; + if( yymajor==YYNOCODE ) break; + yyact = yypParser->yytos->stateno; +#elif defined(YYNOERRORRECOVERY) + /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to + ** do any kind of error recovery. Instead, simply invoke the syntax + ** error routine and continue going as if nothing had happened. + ** + ** Applications can set this macro (for example inside %include) if + ** they intend to abandon the parse upon the first syntax error seen. + */ + yy_syntax_error(yypParser,yymajor, yyminor); + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + break; +#else /* YYERRORSYMBOL is not defined */ + /* This is what we do if the grammar does not define ERROR: + ** + ** * Report an error message, and throw away the input token. + ** + ** * If the input token is $, then fail the parse. + ** + ** As before, subsequent error messages are suppressed until + ** three input tokens have been successfully shifted. + */ + if( yypParser->yyerrcnt<=0 ){ + yy_syntax_error(yypParser,yymajor, yyminor); + } + yypParser->yyerrcnt = 3; + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + if( yyendofinput ){ + yy_parse_failed(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + } + break; +#endif + } + } +#ifndef NDEBUG + if( yyTraceFILE ){ + yyStackEntry *i; + char cDiv = '['; + fprintf(yyTraceFILE,"%sReturn. Stack=",yyTracePrompt); + for(i=&yypParser->yystack[1]; i<=yypParser->yytos; i++){ + fprintf(yyTraceFILE,"%c%s", cDiv, yyTokenName[i->major]); + cDiv = ' '; + } + fprintf(yyTraceFILE,"]\n"); + } +#endif + return; +} + +/* +** Return the fallback token corresponding to canonical token iToken, or +** 0 if iToken has no fallback. +*/ +int ParseFallback(int iToken){ +#ifdef YYFALLBACK + assert( iToken<(int)(sizeof(yyFallback)/sizeof(yyFallback[0])) ); + return yyFallback[iToken]; +#else + (void)iToken; + return 0; +#endif +} diff --git a/tools/lex.py b/tools/lex.py new file mode 100644 index 0000000..103cf96 --- /dev/null +++ b/tools/lex.py @@ -0,0 +1,1074 @@ +# ----------------------------------------------------------------------------- +# ply: lex.py +# +# Copyright (C) 2001-2015, +# David M. Beazley (Dabeaz LLC) +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# ----------------------------------------------------------------------------- + +__version__ = '3.8' +__tabversion__ = '3.8' + +import re +import sys +import types +import copy +import os +import inspect + +# This tuple contains known string types +try: + # Python 2.6 + StringTypes = (types.StringType, types.UnicodeType) +except AttributeError: + # Python 3.0 + StringTypes = (str, bytes) + +# This regular expression is used to match valid token names +_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') + +# Exception thrown when invalid token encountered and no default error +# handler is defined. +class LexError(Exception): + def __init__(self, message, s): + self.args = (message,) + self.text = s + + +# Token class. This class is used to represent the tokens produced. +class LexToken(object): + def __str__(self): + return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos) + + def __repr__(self): + return str(self) + + +# This object is a stand-in for a logging object created by the +# logging module. + +class PlyLogger(object): + def __init__(self, f): + self.f = f + + def critical(self, msg, *args, **kwargs): + self.f.write((msg % args) + '\n') + + def warning(self, msg, *args, **kwargs): + self.f.write('WARNING: ' + (msg % args) + '\n') + + def error(self, msg, *args, **kwargs): + self.f.write('ERROR: ' + (msg % args) + '\n') + + info = critical + debug = critical + + +# Null logger is used when no output is generated. Does nothing. +class NullLogger(object): + def __getattribute__(self, name): + return self + + def __call__(self, *args, **kwargs): + return self + + +# ----------------------------------------------------------------------------- +# === Lexing Engine === +# +# The following Lexer class implements the lexer runtime. There are only +# a few public methods and attributes: +# +# input() - Store a new string in the lexer +# token() - Get the next token +# clone() - Clone the lexer +# +# lineno - Current line number +# lexpos - Current position in the input string +# ----------------------------------------------------------------------------- + +class Lexer: + def __init__(self): + self.lexre = None # Master regular expression. This is a list of + # tuples (re, findex) where re is a compiled + # regular expression and findex is a list + # mapping regex group numbers to rules + self.lexretext = None # Current regular expression strings + self.lexstatere = {} # Dictionary mapping lexer states to master regexs + self.lexstateretext = {} # Dictionary mapping lexer states to regex strings + self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names + self.lexstate = 'INITIAL' # Current lexer state + self.lexstatestack = [] # Stack of lexer states + self.lexstateinfo = None # State information + self.lexstateignore = {} # Dictionary of ignored characters for each state + self.lexstateerrorf = {} # Dictionary of error functions for each state + self.lexstateeoff = {} # Dictionary of eof functions for each state + self.lexreflags = 0 # Optional re compile flags + self.lexdata = None # Actual input data (as a string) + self.lexpos = 0 # Current position in input text + self.lexlen = 0 # Length of the input text + self.lexerrorf = None # Error rule (if any) + self.lexeoff = None # EOF rule (if any) + self.lextokens = None # List of valid tokens + self.lexignore = '' # Ignored characters + self.lexliterals = '' # Literal characters that can be passed through + self.lexmodule = None # Module + self.lineno = 1 # Current line number + self.lexoptimize = False # Optimized mode + + def clone(self, object=None): + c = copy.copy(self) + + # If the object parameter has been supplied, it means we are attaching the + # lexer to a new object. In this case, we have to rebind all methods in + # the lexstatere and lexstateerrorf tables. + + if object: + newtab = {} + for key, ritem in self.lexstatere.items(): + newre = [] + for cre, findex in ritem: + newfindex = [] + for f in findex: + if not f or not f[0]: + newfindex.append(f) + continue + newfindex.append((getattr(object, f[0].__name__), f[1])) + newre.append((cre, newfindex)) + newtab[key] = newre + c.lexstatere = newtab + c.lexstateerrorf = {} + for key, ef in self.lexstateerrorf.items(): + c.lexstateerrorf[key] = getattr(object, ef.__name__) + c.lexmodule = object + return c + + # ------------------------------------------------------------ + # writetab() - Write lexer information to a table file + # ------------------------------------------------------------ + def writetab(self, lextab, outputdir=''): + if isinstance(lextab, types.ModuleType): + raise IOError("Won't overwrite existing lextab module") + basetabmodule = lextab.split('.')[-1] + filename = os.path.join(outputdir, basetabmodule) + '.py' + with open(filename, 'w') as tf: + tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__)) + tf.write('_tabversion = %s\n' % repr(__tabversion__)) + tf.write('_lextokens = %s\n' % repr(self.lextokens)) + tf.write('_lexreflags = %s\n' % repr(self.lexreflags)) + tf.write('_lexliterals = %s\n' % repr(self.lexliterals)) + tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo)) + + # Rewrite the lexstatere table, replacing function objects with function names + tabre = {} + for statename, lre in self.lexstatere.items(): + titem = [] + for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]): + titem.append((retext, _funcs_to_names(func, renames))) + tabre[statename] = titem + + tf.write('_lexstatere = %s\n' % repr(tabre)) + tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore)) + + taberr = {} + for statename, ef in self.lexstateerrorf.items(): + taberr[statename] = ef.__name__ if ef else None + tf.write('_lexstateerrorf = %s\n' % repr(taberr)) + + tabeof = {} + for statename, ef in self.lexstateeoff.items(): + tabeof[statename] = ef.__name__ if ef else None + tf.write('_lexstateeoff = %s\n' % repr(tabeof)) + + # ------------------------------------------------------------ + # readtab() - Read lexer information from a tab file + # ------------------------------------------------------------ + def readtab(self, tabfile, fdict): + if isinstance(tabfile, types.ModuleType): + lextab = tabfile + else: + exec('import %s' % tabfile) + lextab = sys.modules[tabfile] + + if getattr(lextab, '_tabversion', '0.0') != __tabversion__: + raise ImportError('Inconsistent PLY version') + + self.lextokens = lextab._lextokens + self.lexreflags = lextab._lexreflags + self.lexliterals = lextab._lexliterals + self.lextokens_all = self.lextokens | set(self.lexliterals) + self.lexstateinfo = lextab._lexstateinfo + self.lexstateignore = lextab._lexstateignore + self.lexstatere = {} + self.lexstateretext = {} + for statename, lre in lextab._lexstatere.items(): + titem = [] + txtitem = [] + for pat, func_name in lre: + titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict))) + + self.lexstatere[statename] = titem + self.lexstateretext[statename] = txtitem + + self.lexstateerrorf = {} + for statename, ef in lextab._lexstateerrorf.items(): + self.lexstateerrorf[statename] = fdict[ef] + + self.lexstateeoff = {} + for statename, ef in lextab._lexstateeoff.items(): + self.lexstateeoff[statename] = fdict[ef] + + self.begin('INITIAL') + + # ------------------------------------------------------------ + # input() - Push a new string into the lexer + # ------------------------------------------------------------ + def input(self, s): + # Pull off the first character to see if s looks like a string + c = s[:1] + if not isinstance(c, StringTypes): + raise ValueError('Expected a string') + self.lexdata = s + self.lexpos = 0 + self.lexlen = len(s) + + # ------------------------------------------------------------ + # begin() - Changes the lexing state + # ------------------------------------------------------------ + def begin(self, state): + if state not in self.lexstatere: + raise ValueError('Undefined state') + self.lexre = self.lexstatere[state] + self.lexretext = self.lexstateretext[state] + self.lexignore = self.lexstateignore.get(state, '') + self.lexerrorf = self.lexstateerrorf.get(state, None) + self.lexeoff = self.lexstateeoff.get(state, None) + self.lexstate = state + + # ------------------------------------------------------------ + # push_state() - Changes the lexing state and saves old on stack + # ------------------------------------------------------------ + def push_state(self, state): + self.lexstatestack.append(self.lexstate) + self.begin(state) + + # ------------------------------------------------------------ + # pop_state() - Restores the previous state + # ------------------------------------------------------------ + def pop_state(self): + self.begin(self.lexstatestack.pop()) + + # ------------------------------------------------------------ + # current_state() - Returns the current lexing state + # ------------------------------------------------------------ + def current_state(self): + return self.lexstate + + # ------------------------------------------------------------ + # skip() - Skip ahead n characters + # ------------------------------------------------------------ + def skip(self, n): + self.lexpos += n + + # ------------------------------------------------------------ + # opttoken() - Return the next token from the Lexer + # + # Note: This function has been carefully implemented to be as fast + # as possible. Don't make changes unless you really know what + # you are doing + # ------------------------------------------------------------ + def token(self): + # Make local copies of frequently referenced attributes + lexpos = self.lexpos + lexlen = self.lexlen + lexignore = self.lexignore + lexdata = self.lexdata + + while lexpos < lexlen: + # This code provides some short-circuit code for whitespace, tabs, and other ignored characters + if lexdata[lexpos] in lexignore: + lexpos += 1 + continue + + # Look for a regular expression match + for lexre, lexindexfunc in self.lexre: + m = lexre.match(lexdata, lexpos) + if not m: + continue + + # Create a token for return + tok = LexToken() + tok.value = m.group() + tok.lineno = self.lineno + tok.lexpos = lexpos + + i = m.lastindex + func, tok.type = lexindexfunc[i] + + if not func: + # If no token type was set, it's an ignored token + if tok.type: + self.lexpos = m.end() + return tok + else: + lexpos = m.end() + break + + lexpos = m.end() + + # If token is processed by a function, call it + + tok.lexer = self # Set additional attributes useful in token rules + self.lexmatch = m + self.lexpos = lexpos + + newtok = func(tok) + + # Every function must return a token, if nothing, we just move to next token + if not newtok: + lexpos = self.lexpos # This is here in case user has updated lexpos. + lexignore = self.lexignore # This is here in case there was a state change + break + + # Verify type of the token. If not in the token map, raise an error + if not self.lexoptimize: + if newtok.type not in self.lextokens_all: + raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( + func.__code__.co_filename, func.__code__.co_firstlineno, + func.__name__, newtok.type), lexdata[lexpos:]) + + return newtok + else: + # No match, see if in literals + if lexdata[lexpos] in self.lexliterals: + tok = LexToken() + tok.value = lexdata[lexpos] + tok.lineno = self.lineno + tok.type = tok.value + tok.lexpos = lexpos + self.lexpos = lexpos + 1 + return tok + + # No match. Call t_error() if defined. + if self.lexerrorf: + tok = LexToken() + tok.value = self.lexdata[lexpos:] + tok.lineno = self.lineno + tok.type = 'error' + tok.lexer = self + tok.lexpos = lexpos + self.lexpos = lexpos + newtok = self.lexerrorf(tok) + if lexpos == self.lexpos: + # Error method didn't change text position at all. This is an error. + raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) + lexpos = self.lexpos + if not newtok: + continue + return newtok + + self.lexpos = lexpos + raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:]) + + if self.lexeoff: + tok = LexToken() + tok.type = 'eof' + tok.value = '' + tok.lineno = self.lineno + tok.lexpos = lexpos + tok.lexer = self + self.lexpos = lexpos + newtok = self.lexeoff(tok) + return newtok + + self.lexpos = lexpos + 1 + if self.lexdata is None: + raise RuntimeError('No input string given with input()') + return None + + # Iterator interface + def __iter__(self): + return self + + def next(self): + t = self.token() + if t is None: + raise StopIteration + return t + + __next__ = next + +# ----------------------------------------------------------------------------- +# ==== Lex Builder === +# +# The functions and classes below are used to collect lexing information +# and build a Lexer object from it. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# _get_regex(func) +# +# Returns the regular expression assigned to a function either as a doc string +# or as a .regex attribute attached by the @TOKEN decorator. +# ----------------------------------------------------------------------------- +def _get_regex(func): + return getattr(func, 'regex', func.__doc__) + +# ----------------------------------------------------------------------------- +# get_caller_module_dict() +# +# This function returns a dictionary containing all of the symbols defined within +# a caller further down the call stack. This is used to get the environment +# associated with the yacc() call if none was provided. +# ----------------------------------------------------------------------------- +def get_caller_module_dict(levels): + f = sys._getframe(levels) + ldict = f.f_globals.copy() + if f.f_globals != f.f_locals: + ldict.update(f.f_locals) + return ldict + +# ----------------------------------------------------------------------------- +# _funcs_to_names() +# +# Given a list of regular expression functions, this converts it to a list +# suitable for output to a table file +# ----------------------------------------------------------------------------- +def _funcs_to_names(funclist, namelist): + result = [] + for f, name in zip(funclist, namelist): + if f and f[0]: + result.append((name, f[1])) + else: + result.append(f) + return result + +# ----------------------------------------------------------------------------- +# _names_to_funcs() +# +# Given a list of regular expression function names, this converts it back to +# functions. +# ----------------------------------------------------------------------------- +def _names_to_funcs(namelist, fdict): + result = [] + for n in namelist: + if n and n[0]: + result.append((fdict[n[0]], n[1])) + else: + result.append(n) + return result + +# ----------------------------------------------------------------------------- +# _form_master_re() +# +# This function takes a list of all of the regex components and attempts to +# form the master regular expression. Given limitations in the Python re +# module, it may be necessary to break the master regex into separate expressions. +# ----------------------------------------------------------------------------- +def _form_master_re(relist, reflags, ldict, toknames): + if not relist: + return [] + regex = '|'.join(relist) + try: + lexre = re.compile(regex, re.VERBOSE | reflags) + + # Build the index to function map for the matching engine + lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1) + lexindexnames = lexindexfunc[:] + + for f, i in lexre.groupindex.items(): + handle = ldict.get(f, None) + if type(handle) in (types.FunctionType, types.MethodType): + lexindexfunc[i] = (handle, toknames[f]) + lexindexnames[i] = f + elif handle is not None: + lexindexnames[i] = f + if f.find('ignore_') > 0: + lexindexfunc[i] = (None, None) + else: + lexindexfunc[i] = (None, toknames[f]) + + return [(lexre, lexindexfunc)], [regex], [lexindexnames] + except Exception: + m = int(len(relist)/2) + if m == 0: + m = 1 + llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) + rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames) + return (llist+rlist), (lre+rre), (lnames+rnames) + +# ----------------------------------------------------------------------------- +# def _statetoken(s,names) +# +# Given a declaration name s of the form "t_" and a dictionary whose keys are +# state names, this function returns a tuple (states,tokenname) where states +# is a tuple of state names and tokenname is the name of the token. For example, +# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') +# ----------------------------------------------------------------------------- +def _statetoken(s, names): + nonstate = 1 + parts = s.split('_') + for i, part in enumerate(parts[1:], 1): + if part not in names and part != 'ANY': + break + + if i > 1: + states = tuple(parts[1:i]) + else: + states = ('INITIAL',) + + if 'ANY' in states: + states = tuple(names) + + tokenname = '_'.join(parts[i:]) + return (states, tokenname) + + +# ----------------------------------------------------------------------------- +# LexerReflect() +# +# This class represents information needed to build a lexer as extracted from a +# user's input file. +# ----------------------------------------------------------------------------- +class LexerReflect(object): + def __init__(self, ldict, log=None, reflags=0): + self.ldict = ldict + self.error_func = None + self.tokens = [] + self.reflags = reflags + self.stateinfo = {'INITIAL': 'inclusive'} + self.modules = set() + self.error = False + self.log = PlyLogger(sys.stderr) if log is None else log + + # Get all of the basic information + def get_all(self): + self.get_tokens() + self.get_literals() + self.get_states() + self.get_rules() + + # Validate all of the information + def validate_all(self): + self.validate_tokens() + self.validate_literals() + self.validate_rules() + return self.error + + # Get the tokens map + def get_tokens(self): + tokens = self.ldict.get('tokens', None) + if not tokens: + self.log.error('No token list is defined') + self.error = True + return + + if not isinstance(tokens, (list, tuple)): + self.log.error('tokens must be a list or tuple') + self.error = True + return + + if not tokens: + self.log.error('tokens is empty') + self.error = True + return + + self.tokens = tokens + + # Validate the tokens + def validate_tokens(self): + terminals = {} + for n in self.tokens: + if not _is_identifier.match(n): + self.log.error("Bad token name '%s'", n) + self.error = True + if n in terminals: + self.log.warning("Token '%s' multiply defined", n) + terminals[n] = 1 + + # Get the literals specifier + def get_literals(self): + self.literals = self.ldict.get('literals', '') + if not self.literals: + self.literals = '' + + # Validate literals + def validate_literals(self): + try: + for c in self.literals: + if not isinstance(c, StringTypes) or len(c) > 1: + self.log.error('Invalid literal %s. Must be a single character', repr(c)) + self.error = True + + except TypeError: + self.log.error('Invalid literals specification. literals must be a sequence of characters') + self.error = True + + def get_states(self): + self.states = self.ldict.get('states', None) + # Build statemap + if self.states: + if not isinstance(self.states, (tuple, list)): + self.log.error('states must be defined as a tuple or list') + self.error = True + else: + for s in self.states: + if not isinstance(s, tuple) or len(s) != 2: + self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s)) + self.error = True + continue + name, statetype = s + if not isinstance(name, StringTypes): + self.log.error('State name %s must be a string', repr(name)) + self.error = True + continue + if not (statetype == 'inclusive' or statetype == 'exclusive'): + self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) + self.error = True + continue + if name in self.stateinfo: + self.log.error("State '%s' already defined", name) + self.error = True + continue + self.stateinfo[name] = statetype + + # Get all of the symbols with a t_ prefix and sort them into various + # categories (functions, strings, error functions, and ignore characters) + + def get_rules(self): + tsymbols = [f for f in self.ldict if f[:2] == 't_'] + + # Now build up a list of functions and a list of strings + self.toknames = {} # Mapping of symbols to token names + self.funcsym = {} # Symbols defined as functions + self.strsym = {} # Symbols defined as strings + self.ignore = {} # Ignore strings by state + self.errorf = {} # Error functions by state + self.eoff = {} # EOF functions by state + + for s in self.stateinfo: + self.funcsym[s] = [] + self.strsym[s] = [] + + if len(tsymbols) == 0: + self.log.error('No rules of the form t_rulename are defined') + self.error = True + return + + for f in tsymbols: + t = self.ldict[f] + states, tokname = _statetoken(f, self.stateinfo) + self.toknames[f] = tokname + + if hasattr(t, '__call__'): + if tokname == 'error': + for s in states: + self.errorf[s] = t + elif tokname == 'eof': + for s in states: + self.eoff[s] = t + elif tokname == 'ignore': + line = t.__code__.co_firstlineno + file = t.__code__.co_filename + self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__) + self.error = True + else: + for s in states: + self.funcsym[s].append((f, t)) + elif isinstance(t, StringTypes): + if tokname == 'ignore': + for s in states: + self.ignore[s] = t + if '\\' in t: + self.log.warning("%s contains a literal backslash '\\'", f) + + elif tokname == 'error': + self.log.error("Rule '%s' must be defined as a function", f) + self.error = True + else: + for s in states: + self.strsym[s].append((f, t)) + else: + self.log.error('%s not defined as a function or string', f) + self.error = True + + # Sort the functions by line number + for f in self.funcsym.values(): + f.sort(key=lambda x: x[1].__code__.co_firstlineno) + + # Sort the strings by regular expression length + for s in self.strsym.values(): + s.sort(key=lambda x: len(x[1]), reverse=True) + + # Validate all of the t_rules collected + def validate_rules(self): + for state in self.stateinfo: + # Validate all rules defined by functions + + for fname, f in self.funcsym[state]: + line = f.__code__.co_firstlineno + file = f.__code__.co_filename + module = inspect.getmodule(f) + self.modules.add(module) + + tokname = self.toknames[fname] + if isinstance(f, types.MethodType): + reqargs = 2 + else: + reqargs = 1 + nargs = f.__code__.co_argcount + if nargs > reqargs: + self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) + self.error = True + continue + + if nargs < reqargs: + self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) + self.error = True + continue + + if not _get_regex(f): + self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__) + self.error = True + continue + + try: + c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags) + if c.match(''): + self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__) + self.error = True + except re.error as e: + self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e) + if '#' in _get_regex(f): + self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__) + self.error = True + + # Validate all rules defined by strings + for name, r in self.strsym[state]: + tokname = self.toknames[name] + if tokname == 'error': + self.log.error("Rule '%s' must be defined as a function", name) + self.error = True + continue + + if tokname not in self.tokens and tokname.find('ignore_') < 0: + self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname) + self.error = True + continue + + try: + c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags) + if (c.match('')): + self.log.error("Regular expression for rule '%s' matches empty string", name) + self.error = True + except re.error as e: + self.log.error("Invalid regular expression for rule '%s'. %s", name, e) + if '#' in r: + self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name) + self.error = True + + if not self.funcsym[state] and not self.strsym[state]: + self.log.error("No rules defined for state '%s'", state) + self.error = True + + # Validate the error function + efunc = self.errorf.get(state, None) + if efunc: + f = efunc + line = f.__code__.co_firstlineno + file = f.__code__.co_filename + module = inspect.getmodule(f) + self.modules.add(module) + + if isinstance(f, types.MethodType): + reqargs = 2 + else: + reqargs = 1 + nargs = f.__code__.co_argcount + if nargs > reqargs: + self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) + self.error = True + + if nargs < reqargs: + self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) + self.error = True + + for module in self.modules: + self.validate_module(module) + + # ----------------------------------------------------------------------------- + # validate_module() + # + # This checks to see if there are duplicated t_rulename() functions or strings + # in the parser input file. This is done using a simple regular expression + # match on each line in the source code of the given module. + # ----------------------------------------------------------------------------- + + def validate_module(self, module): + lines, linen = inspect.getsourcelines(module) + + fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') + sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') + + counthash = {} + linen += 1 + for line in lines: + m = fre.match(line) + if not m: + m = sre.match(line) + if m: + name = m.group(1) + prev = counthash.get(name) + if not prev: + counthash[name] = linen + else: + filename = inspect.getsourcefile(module) + self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev) + self.error = True + linen += 1 + +# ----------------------------------------------------------------------------- +# lex(module) +# +# Build all of the regular expression rules from definitions in the supplied module +# ----------------------------------------------------------------------------- +def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab', + reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None): + + if lextab is None: + lextab = 'lextab' + + global lexer + + ldict = None + stateinfo = {'INITIAL': 'inclusive'} + lexobj = Lexer() + lexobj.lexoptimize = optimize + global token, input + + if errorlog is None: + errorlog = PlyLogger(sys.stderr) + + if debug: + if debuglog is None: + debuglog = PlyLogger(sys.stderr) + + # Get the module dictionary used for the lexer + if object: + module = object + + # Get the module dictionary used for the parser + if module: + _items = [(k, getattr(module, k)) for k in dir(module)] + ldict = dict(_items) + # If no __file__ attribute is available, try to obtain it from the __module__ instead + if '__file__' not in ldict: + ldict['__file__'] = sys.modules[ldict['__module__']].__file__ + else: + ldict = get_caller_module_dict(2) + + # Determine if the module is package of a package or not. + # If so, fix the tabmodule setting so that tables load correctly + pkg = ldict.get('__package__') + if pkg and isinstance(lextab, str): + if '.' not in lextab: + lextab = pkg + '.' + lextab + + # Collect parser information from the dictionary + linfo = LexerReflect(ldict, log=errorlog, reflags=reflags) + linfo.get_all() + if not optimize: + if linfo.validate_all(): + raise SyntaxError("Can't build lexer") + + if optimize and lextab: + try: + lexobj.readtab(lextab, ldict) + token = lexobj.token + input = lexobj.input + lexer = lexobj + return lexobj + + except ImportError: + pass + + # Dump some basic debugging information + if debug: + debuglog.info('lex: tokens = %r', linfo.tokens) + debuglog.info('lex: literals = %r', linfo.literals) + debuglog.info('lex: states = %r', linfo.stateinfo) + + # Build a dictionary of valid token names + lexobj.lextokens = set() + for n in linfo.tokens: + lexobj.lextokens.add(n) + + # Get literals specification + if isinstance(linfo.literals, (list, tuple)): + lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) + else: + lexobj.lexliterals = linfo.literals + + lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals) + + # Get the stateinfo dictionary + stateinfo = linfo.stateinfo + + regexs = {} + # Build the master regular expressions + for state in stateinfo: + regex_list = [] + + # Add rules defined by functions first + for fname, f in linfo.funcsym[state]: + line = f.__code__.co_firstlineno + file = f.__code__.co_filename + regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f))) + if debug: + debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state) + + # Now add all of the simple rules + for name, r in linfo.strsym[state]: + regex_list.append('(?P<%s>%s)' % (name, r)) + if debug: + debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) + + regexs[state] = regex_list + + # Build the master regular expressions + + if debug: + debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====') + + for state in regexs: + lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames) + lexobj.lexstatere[state] = lexre + lexobj.lexstateretext[state] = re_text + lexobj.lexstaterenames[state] = re_names + if debug: + for i, text in enumerate(re_text): + debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text) + + # For inclusive states, we need to add the regular expressions from the INITIAL state + for state, stype in stateinfo.items(): + if state != 'INITIAL' and stype == 'inclusive': + lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) + lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) + lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) + + lexobj.lexstateinfo = stateinfo + lexobj.lexre = lexobj.lexstatere['INITIAL'] + lexobj.lexretext = lexobj.lexstateretext['INITIAL'] + lexobj.lexreflags = reflags + + # Set up ignore variables + lexobj.lexstateignore = linfo.ignore + lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '') + + # Set up error functions + lexobj.lexstateerrorf = linfo.errorf + lexobj.lexerrorf = linfo.errorf.get('INITIAL', None) + if not lexobj.lexerrorf: + errorlog.warning('No t_error rule is defined') + + # Set up eof functions + lexobj.lexstateeoff = linfo.eoff + lexobj.lexeoff = linfo.eoff.get('INITIAL', None) + + # Check state information for ignore and error rules + for s, stype in stateinfo.items(): + if stype == 'exclusive': + if s not in linfo.errorf: + errorlog.warning("No error rule is defined for exclusive state '%s'", s) + if s not in linfo.ignore and lexobj.lexignore: + errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) + elif stype == 'inclusive': + if s not in linfo.errorf: + linfo.errorf[s] = linfo.errorf.get('INITIAL', None) + if s not in linfo.ignore: + linfo.ignore[s] = linfo.ignore.get('INITIAL', '') + + # Create global versions of the token() and input() functions + token = lexobj.token + input = lexobj.input + lexer = lexobj + + # If in optimize mode, we write the lextab + if lextab and optimize: + if outputdir is None: + # If no output directory is set, the location of the output files + # is determined according to the following rules: + # - If lextab specifies a package, files go into that package directory + # - Otherwise, files go in the same directory as the specifying module + if isinstance(lextab, types.ModuleType): + srcfile = lextab.__file__ + else: + if '.' not in lextab: + srcfile = ldict['__file__'] + else: + parts = lextab.split('.') + pkgname = '.'.join(parts[:-1]) + exec('import %s' % pkgname) + srcfile = getattr(sys.modules[pkgname], '__file__', '') + outputdir = os.path.dirname(srcfile) + try: + lexobj.writetab(lextab, outputdir) + except IOError as e: + errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e)) + + return lexobj + +# ----------------------------------------------------------------------------- +# runmain() +# +# This runs the lexer as a main program +# ----------------------------------------------------------------------------- + +def runmain(lexer=None, data=None): + if not data: + try: + filename = sys.argv[1] + f = open(filename) + data = f.read() + f.close() + except IndexError: + sys.stdout.write('Reading from standard input (type EOF to end):\n') + data = sys.stdin.read() + + if lexer: + _input = lexer.input + else: + _input = input + _input(data) + if lexer: + _token = lexer.token + else: + _token = token + + while True: + tok = _token() + if not tok: + break + sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos)) + +# ----------------------------------------------------------------------------- +# @TOKEN(regex) +# +# This decorator function can be used to set the regex expression on a function +# when its docstring might need to be set in an alternative way +# ----------------------------------------------------------------------------- + +def TOKEN(r): + def set_regex(f): + if hasattr(r, '__call__'): + f.regex = _get_regex(r) + else: + f.regex = r + return f + return set_regex + +# Alternative spelling of the TOKEN decorator +Token = TOKEN + diff --git a/tools/licensecheck.pl b/tools/licensecheck.pl new file mode 100755 index 0000000..0778153 --- /dev/null +++ b/tools/licensecheck.pl @@ -0,0 +1,874 @@ +#!/usr/bin/perl +# -*- tab-width: 8; indent-tabs-mode: t; cperl-indent-level: 4 -*- +# This script was originally based on the script of the same name from +# the KDE SDK (by dfaure@kde.org) +# +# This version is +# Copyright (C) 2007, 2008 Adam D. Barratt +# Copyright (C) 2012 Francesco Poli +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +# Originally copied from Debian's devscripts. A more modern version of +# this can be found at +# https://anonscm.debian.org/git/pkg-perl/packages/licensecheck.git/ + +=head1 NAME + +licensecheck - simple license checker for source files + +=head1 SYNOPSIS + +B B<--help>|B<--version> + +B [B<--no-conf>] [B<--verbose>] [B<--copyright>] +[B<-l>|B<--lines=>I] [B<-i>|B<--ignore=>I] [B<-c>|B<--check=>I] +[B<-m>|B<--machine>] [B<-r>|B<--recursive>] [B<-e>|B<--encoding=>I<...>] +I + +=head1 DESCRIPTION + +B attempts to determine the license that applies to each file +passed to it, by searching the start of the file for text belonging to +various licenses. + +If any of the arguments passed are directories, B will add +the files contained within to the list of files to process. + +=head1 OPTIONS + +=over 4 + +=item B<--verbose>, B<--no-verbose> + +Specify whether to output the text being processed from each file before +the corresponding license information. + +Default is to be quiet. + +=item B<-l=>I, B<--lines=>I + +Specify the number of lines of each file's header which should be parsed +for license information. (Default is 60). + +=item B<--tail=>I + +By default, the last 5k bytes of each files are parsed to get license +information. You may use this option to set the size of this parsed chunk. +You may set this value to 0 to avoid parsing the end of the file. + +=item B<-i=>I, B<--ignore=>I + +When processing the list of files and directories, the regular +expression specified by this option will be used to indicate those which +should not be considered (e.g. backup files, VCS metadata). + +=item B<-r>, B<--recursive> + +Specify that the contents of directories should be added +recursively. + +=item B<-c=>I, B<--check=>I + +Specify a pattern against which filenames will be matched in order to +decide which files to check the license of. + +The default includes common source files. + +=item B<-s>, B<--skipped> + +Specify whether to show skipped files, i.e. files found which do not +match the check regexp (see C<--check> option). Default is to not show +skipped files. + +Note that ignored files (like C<.git> or C<.svn>) are not shown even when +this option is used. + +=item B<--copyright> + +Also display copyright text found within the file + +=item B<-e> B<--encoding> + +Specifies input encoding of source files. By default, input files are +not decoded. When encoding is specified, license and copyright +information are printed on STDOUT as utf8, or garbage if you got the +encoding wrong. + +=item B<-m>, B<--machine> + +Display the information in a machine readable way, i.e. in the form +[] so that it can be easily sorted +and/or filtered, e.g. with the B and B commands. +Note that using the B<--verbose> option will kill the readability. + +=item B<--no-conf>, B<--noconf> + +Do not read any configuration files. This can only be used as the first +option given on the command line. + +=back + +=head1 CONFIGURATION VARIABLES + +The two configuration files F and +F<~/.devscripts> are sourced by a shell in that order to set +configuration variables. Command line options can be used to override +configuration file settings. Environment variable settings are +ignored for this purpose. The currently recognised variables are: + +=over 4 + +=item B + +If this is set to I, then it is the same as the B<--verbose> command +line parameter being used. The default is I. + +=item B + +If this is set to a positive number then the specified number of lines +at the start of each file will be read whilst attempting to determine +the license(s) in use. This is equivalent to the B<--lines> command line +option. + +=back + +=head1 LICENSE + +This code is copyright by Adam D. Barratt >, +all rights reserved; based on a script of the same name from the KDE +SDK, which is copyright by >. +This program comes with ABSOLUTELY NO WARRANTY. +You are free to redistribute this code under the terms of the GNU +General Public License, version 2 or later. + +=head1 AUTHOR + +Adam D. Barratt + +=cut + +# see https://stackoverflow.com/questions/6162484/why-does-modern-perl-avoid-utf-8-by-default/6163129#6163129 +use v5.14; +use utf8; + +use strict; +use autodie; +use warnings; +use warnings qw< FATAL utf8 >; + +use Getopt::Long qw(:config gnu_getopt); +use File::Basename; +use File::stat; +use IO::File; +use Fcntl qw/:seek/; + +binmode STDOUT, ':utf8'; + +my $progname = basename($0); + +# From dpkg-source +my $default_ignore_regex = qr! +# Ignore general backup files +~$| +# Ignore emacs recovery files +(?:^|/)\.#| +# Ignore vi swap files +(?:^|/)\..*\.swp$| +# Ignore baz-style junk files or directories +(?:^|/),,.*(?:$|/.*$)| +# File-names that should be ignored (never directories) +(?:^|/)(?:DEADJOE|\.cvsignore|\.arch-inventory|\.bzrignore|\.gitignore)$| +# File or directory names that should be ignored +(?:^|/)(?:CVS|RCS|\.pc|\.deps|\{arch\}|\.arch-ids|\.svn|\.hg|_darcs|\.git| +\.shelf|_MTN|\.bzr(?:\.backup|tags)?)(?:$|/.*$) +!x; + +# The original Debian version checks Markdown (.md and .markdown) files. +# If we add those extensions back, we should add Asciidoctor (.adoc) as +# well, and add SPDX IDs to all of those files. +my $default_check_regex = + qr! + \.( # search for file suffix + c(c|pp|xx)? # c and c++ + |h(h|pp|xx)? # header files for c and c++ + |S + |css|less # HTML css and similar + |f(77|90)? + |go + |groovy + |lisp + |scala + |clj + |p(l|m)?6?|t|xs|pod6? # perl5 or perl6 + |sh + |php + |py(|x) + |rb + |java + |js + |vala + |el + |sc(i|e) + |cs + |pas + |inc + |dtd|xsl + |mod + |m + |tex + |mli? + |(c|l)?hs + ) + $ + !x; + +# also used to cleanup +my $copyright_indicator_regex + = qr! + (?:copyright # The full word + |copr\. # Legally-valid abbreviation + |\xc2\xa9 # Unicode copyright sign encoded in iso8859 + |\x{00a9} # Unicode character COPYRIGHT SIGN + #|© # Unicode character COPYRIGHT SIGN + |\(c\) # Legally-null representation of sign + ) + !lix; + +my $copyright_indicator_regex_with_capture = qr!$copyright_indicator_regex(?::\s*|\s+)(\S.*)$!lix; + +# avoid ditching things like +my $copyright_disindicator_regex + = qr{ + \b(?:info(?:rmation)?(?!@) # Discussing copyright information + |(notice|statement|claim|string)s? # Discussing the notice + |is|in|to # Part of a sentence + |(holder|owner)s? # Part of a sentence + |ownership # Part of a sentence + )\b + }ix; + +my $copyright_predisindicator_regex + = qr!( + ^[#]define\s+.*\(c\) # #define foo(c) -- not copyright + )!ix; + +my $modified_conf_msg; + +my %OPT=( + verbose => '', + lines => '', + noconf => '', + ignore => '', + check => '', + recursive => 0, + copyright => 0, + machine => 0, + text => 0, + skipped => 0, +); + +my $def_lines = 60; +my $def_tail = 5000; # roughly 60 lines of 80 chars + +# Read configuration files and then command line +# This is boilerplate + +if (@ARGV and $ARGV[0] =~ /^--no-?conf$/) { + $modified_conf_msg = " (no configuration files read)"; + shift; +} else { + my @config_files = ('/etc/devscripts.conf', '~/.devscripts'); + my %config_vars = ( + 'LICENSECHECK_VERBOSE' => 'no', + 'LICENSECHECK_PARSELINES' => $def_lines, + ); + my %config_default = %config_vars; + + my $shell_cmd; + # Set defaults + foreach my $var (keys %config_vars) { + $shell_cmd .= qq[$var="$config_vars{$var}";\n]; + } + $shell_cmd .= 'for file in ' . join(" ", @config_files) . "; do\n"; + $shell_cmd .= '[ -f $file ] && . $file; done;' . "\n"; + # Read back values + foreach my $var (keys %config_vars) { $shell_cmd .= "echo \$$var;\n" } + my $shell_out = `/bin/bash -c '$shell_cmd'`; + @config_vars{keys %config_vars} = split /\n/, $shell_out, -1; + + # Check validity + $config_vars{'LICENSECHECK_VERBOSE'} =~ /^(yes|no)$/ + or $config_vars{'LICENSECHECK_VERBOSE'} = 'no'; + $config_vars{'LICENSECHECK_PARSELINES'} =~ /^[1-9][0-9]*$/ + or $config_vars{'LICENSECHECK_PARSELINES'} = $def_lines; + + foreach my $var (sort keys %config_vars) { + if ($config_vars{$var} ne $config_default{$var}) { + $modified_conf_msg .= " $var=$config_vars{$var}\n"; + } + } + $modified_conf_msg ||= " (none)\n"; + chomp $modified_conf_msg; + + $OPT{'verbose'} = $config_vars{'LICENSECHECK_VERBOSE'} eq 'yes' ? 1 : 0; + $OPT{'lines'} = $config_vars{'LICENSECHECK_PARSELINES'}; +} + +GetOptions(\%OPT, + "help|h", + "check|c=s", + "copyright", + "encoding|e=s", + "ignore|i=s", + "lines|l=i", + "machine|m", + "noconf|no-conf", + "recursive|r", + "skipped|s", + "tail", + "text|t", + "verbose!", + "version|v", +) or die "Usage: $progname [options] filelist\nRun $progname --help for more details\n"; + +$OPT{'lines'} = $def_lines if $OPT{'lines'} !~ /^[1-9][0-9]*$/; +my $ignore_regex = length($OPT{ignore}) ? qr/$OPT{ignore}/ : $default_ignore_regex; + +my $check_regex = $default_check_regex; +$check_regex = qr/$OPT{check}/ if length $OPT{check}; + +if ($OPT{'noconf'}) { + fatal("--no-conf is only acceptable as the first command-line option!"); +} +if ($OPT{'help'}) { help(); exit 0; } +if ($OPT{'version'}) { version(); exit 0; } + +if ($OPT{text}) { + warn "$0 warning: option -text is deprecated\n"; # remove -text end 2015 +} + +die "Usage: $progname [options] filelist\nRun $progname --help for more details\n" unless @ARGV; + +$OPT{'lines'} = $def_lines if not defined $OPT{'lines'}; + +my @files = (); +my @find_args = (); +my $files_count = @ARGV; + +push @find_args, qw(-maxdepth 1) unless $OPT{'recursive'}; +push @find_args, qw(-follow -type f -print); + +while (@ARGV) { + my $file = shift @ARGV; + + if (-d $file) { + open my $FIND, '-|', 'find', $file, @find_args + or die "$progname: couldn't exec find: $!\n"; + + while (my $found = <$FIND>) { + chomp ($found); + # Silently skip empty files or ignored files + next if -z $found or $found =~ $ignore_regex; + if ( not $check_regex or $found =~ $check_regex ) { + # Silently skip empty files or ignored files + push @files, $found ; + } + else { + warn "skipped file $found\n" if $OPT{skipped}; + } + } + close $FIND; + } + elsif ($file =~ $ignore_regex) { + # Silently skip ignored files + next; + } + elsif ( $files_count == 1 or not $check_regex or $file =~ $check_regex ) { + push @files, $file; + } + else { + warn "skipped file $file\n" if $OPT{skipped}; + } +} + +while (@files) { + my $file = shift @files; + my $content = ''; + my $copyright_match; + my $copyright = ''; + + my $st = stat $file; + + my $enc = $OPT{encoding} ; + my $mode = $enc ? "<:encoding($enc)" : '<'; + # need to use "<" when encoding is unknown otherwise we break compatibility + my $fh = IO::File->new ($file ,$mode) or die "Unable to access $file\n"; + + while ( my $line = $fh->getline ) { + last if ($fh->input_line_number > $OPT{'lines'}); + $content .= $line; + } + + my %copyrights = extract_copyright($content); + + print qq(----- $file header -----\n$content----- end header -----\n\n) + if $OPT{'verbose'}; + + my $license = parselicense(clean_cruft_and_spaces(clean_comments($content))); + $copyright = join(" / ", reverse sort values %copyrights); + + if ( not $copyright and $license eq 'UNKNOWN') { + my $position = $fh->tell; # See IO::Seekable + my $tail_size = $OPT{tail} // $def_tail; + my $jump = $st->size - $tail_size; + $jump = $position if $jump < $position; + + my $tail ; + if ( $tail_size and $jump < $st->size) { + $fh->seek($jump, SEEK_SET) ; # also IO::Seekable + $tail .= join('',$fh->getlines); + } + + print qq(----- $file tail -----\n$tail----- end tail -----\n\n) + if $OPT{'verbose'}; + + %copyrights = extract_copyright($tail); + $license = parselicense(clean_cruft_and_spaces(clean_comments($tail))); + $copyright = join(" / ", reverse sort values %copyrights); + } + + $fh->close; + + if ($OPT{'machine'}) { + print "$file\t$license"; + print "\t" . ($copyright or "*No copyright*") if $OPT{'copyright'}; + print "\n"; + } else { + print "$file: "; + print "*No copyright* " unless $copyright; + print $license . "\n"; + print " [Copyright: " . $copyright . "]\n" + if $copyright and $OPT{'copyright'}; + print "\n" if $OPT{'copyright'}; + } +} + +sub extract_copyright { + my $content = shift; + my @c = split /\n/, clean_comments($content); + + my %copyrights; + my $lines_after_copyright_block = 0; + + my $in_copyright_block = 0; + while (@c) { + my $line = shift @c ; + my $copyright_match = parse_copyright($line, \$in_copyright_block) ; + if ($copyright_match) { + while (@c and $copyright_match =~ /\d[,.]?\s*$/) { + # looks like copyright end with a year, assume the owner is on next line(s) + $copyright_match .= ' '. shift @c; + } + $copyright_match =~ s/\s+/ /g; + $copyright_match =~ s/\s*$//; + $copyrights{lc("$copyright_match")} = "$copyright_match"; + } + elsif (scalar keys %copyrights) { + # skip remaining lines if a copyright blocks was found more than 5 lines ago. + # so a copyright block may contain up to 5 blank lines, but no more + last if $lines_after_copyright_block++ > 5; + } + } + return %copyrights; +} + +sub parse_copyright { + my $data = shift ; + my $in_copyright_block_ref = shift; + my $copyright = ''; + my $match; + + if ( $data !~ $copyright_predisindicator_regex) { + #print "match against ->$data<-\n"; + if ($data =~ $copyright_indicator_regex_with_capture) { + $match = $1; + $$in_copyright_block_ref = 1; + # Ignore lines matching "see foo for copyright information" etc. + if ($match !~ $copyright_disindicator_regex) { + # De-cruft + $match =~ s/$copyright_indicator_regex//igx; + $match =~ s/^\s+//; + $match =~ s/\s*\bby\b\s*/ /; + $match =~ s/([,.])?\s*$//; + $match =~ s/\s{2,}/ /g; + $match =~ s/\\//g; # de-cruft nroff files + $match =~ s/\s*[*#]\s*$//; + $copyright = $match; + } + } + elsif ($$in_copyright_block_ref and $data =~ /^\d{2,}[,\s]+/) { + # following lines beginning with a year are supposed to be + # continued copyright blocks + $copyright = $data; + } + else { + $$in_copyright_block_ref = 0; + } + } + + return $copyright; +} + +sub clean_comments { + local $_ = shift or return q{}; + + # Remove generic comments: look for 4 or more lines beginning with + # regular comment pattern and trim it. Fall back to old algorithm + # if no such pattern found. + my @matches = m/^\s*((?:[^a-zA-Z0-9\s]{1,3}|\bREM\b))\s\w/mg; + if (@matches >= 4) { + my $comment_re = qr/\s*[\Q$matches[0]\E]{1,3}\s*/; + s/^$comment_re//mg; + } + + # Remove Fortran comments + s/^[cC] //gm; + + # Remove C / C++ comments + s#(\*/|/[/*])##g; + + return $_; +} + +sub clean_cruft_and_spaces { + local $_ = shift or return q{}; + + tr/\t\r\n/ /; + + # this also removes quotes + tr% A-Za-z.+,@:;0-9\(\)/-%%cd; + tr/ //s; + + return $_; +} + +sub help { + print <<"EOF"; +Usage: $progname [options] filename [filename ...] +Valid options are: + --help, -h Display this message + --version, -v Display version and copyright info + --no-conf, --noconf Don't read devscripts config files; must be + the first option given + --verbose Display the header of each file before its + license information + --skipped, -s Show skipped files + --lines, -l Specify how many lines of the file header + should be parsed for license information + (Default: $def_lines) + --tail Specify how many bytes to parse at end of file + (Default: $def_tail) + --check, -c Specify a pattern indicating which files should + be checked + (Default: '$default_check_regex') + --machine, -m Display in a machine readable way (good for awk) + --recursive, -r Add the contents of directories recursively + --copyright Also display the file's copyright + --ignore, -i Specify that files / directories matching the + regular expression should be ignored when + checking files + (Default: '$default_ignore_regex') + +Default settings modified by devscripts configuration files: +$modified_conf_msg +EOF +} + +sub version { + print <<"EOF"; +This is $progname, from the Debian devscripts package, version 2.16.2 +Copyright (C) 2007, 2008 by Adam D. Barratt ; based +on a script of the same name from the KDE SDK by . + +This program comes with ABSOLUTELY NO WARRANTY. +You are free to redistribute this code under the terms of the +GNU General Public License, version 2, or (at your option) any +later version. +EOF +} + +sub parselicense { + my ($licensetext) = @_; + + my $gplver = ""; + my $extrainfo = ""; + my $license = ""; + + if ($licensetext =~ /version ([^ ]+)(?: of the License)?,? or(?: \(at your option\))? version (\d(?:[.-]\d+)*)/) { + $gplver = " (v$1 or v$2)"; + } elsif ($licensetext =~ /version ([^, ]+?)[.,]? (?:\(?only\)?.? )?(?:of the GNU (Affero )?(Lesser |Library )?General Public License )?(as )?published by the Free Software Foundation/i or + $licensetext =~ /GNU (?:Affero )?(?:Lesser |Library )?General Public License (?:as )?published by the Free Software Foundation[;,] version ([^, ]+?)[.,]? /i) { + + $gplver = " (v$1)"; + } elsif ($licensetext =~ /GNU (?:Affero )?(?:Lesser |Library )?General Public License\s*(?:[(),GPL]+)\s*version (\d+(?:\.\d+)?)[ \.]/i) { + $gplver = " (v$1)"; + } elsif ($licensetext =~ /either version ([^ ]+)(?: of the License)?, or (?:\(at your option\) )?any later version/) { + $gplver = " (v$1 or later)"; + } elsif ($licensetext =~ /GPL\sas\spublished\sby\sthe\sFree\sSoftware\sFoundation,\sversion\s([\d.]+)/i ) { + $gplver = " (v$1)"; + } elsif ($licensetext =~ /SPDX-License-Identifier:\s+GPL-([1-9])\.0-or-later/i ){ + $gplver = " (v$1 or later)"; + } elsif ($licensetext =~ /SPDX-License-Identifier:\s+GPL-([1-9])\.0[^+]/i ) { + $gplver = " (v$1)"; + } elsif ($licensetext =~ /SPDX-License-Identifier:\s+GPL-([1-9])\.0\+/i ) { + $gplver = " (v$1 or later)"; + } elsif ($licensetext =~ /SPDX-License-Identifier:\s+LGPL-([1-9])\.[0-1]\-or-later/i ) { + $gplver = " (v$1 or later)"; + } + + if ($licensetext =~ /(?:675 Mass Ave|59 Temple Place|51 Franklin Steet|02139|02111-1307)/i) { + $extrainfo = " (with incorrect FSF address)$extrainfo"; + } + + if ($licensetext =~ /permission (?:is (also granted|given))? to link (the code of )?this program with (any edition of )?(Qt|the Qt library)/i) { + $extrainfo = " (with Qt exception)$extrainfo" + } + + if ($licensetext =~ /As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice/) { + $extrainfo = " (with Bison parser exception)$extrainfo"; + } + + # exclude blurb found in boost license text + if ($licensetext =~ /(All changes made in this file will be lost|DO NOT (EDIT|delete this file)|Generated (automatically|by|from)|generated.*file)/i + and $licensetext !~ /unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor/) { + $license = "GENERATED FILE"; + } + + if ($licensetext =~ /(are made available|(is free software.? )?you can redistribute (it|them) and(?:\/|\s+)or modify (it|them)|is licensed) under the terms of (version [^ ]+ of )?the (GNU (Library |Lesser )General Public License|LGPL)/i) { + $license = "LGPL$gplver$extrainfo $license"; + } + # For Perl modules handled by Dist::Zilla + elsif ($licensetext =~ /this is free software,? licensed under:? (?:the )?(?:GNU (?:Library |Lesser )General Public License|LGPL),? version ([\d\.]+)/i) { + $license = "LGPL (v$1) $license"; + } + + if ($licensetext =~ /is free software.? you can redistribute (it|them) and(?:\/|\s+)or modify (it|them) under the terms of the (GNU Affero General Public License|AGPL)/i) { + $license = "AGPL$gplver$extrainfo $license"; + } + + if ($licensetext =~ /(is free software.? )?you (can|may) redistribute (it|them) and(?:\/|\s+)or modify (it|them) under the terms of (?:version [^ ]+ (?:\(?only\)? )?of )?the GNU General Public License/i) { + $license = "GPL$gplver$extrainfo $license"; + } + + if ($licensetext =~ /is distributed under the terms of the GNU General Public License,/ + and length $gplver) { + $license = "GPL$gplver$extrainfo $license"; + } + + if ($licensetext =~ /SPDX-License-Identifier:\s+GPL/i and length $gplver) { + $license = "GPL$gplver$extrainfo $license"; + } + + if ($licensetext =~ /SPDX-License-Identifier:\s+GPL-2.0-or-later/i and length $gplver) { + $license = "GPL$gplver$extrainfo"; + } + + if ($licensetext =~ /SPDX-License-Identifier:\s+LGPL/i and length $gplver) { + $license = "LGPL$gplver$extrainfo $license"; + } + + if ($licensetext =~ /SPDX-License-Identifier:\s+Zlib/i) { + $license = "zlib/libpng $license"; + } + + if ($licensetext =~ /SPDX-License-Identifier:\s+BSD-3-Clause/i) { + $license = 'BSD (3 clause)'; + } + + if ($licensetext =~ /SPDX-License-Identifier:\s+BSD-2-Clause/i) { + $license = 'BSD (2 clause)'; + } + + if ($licensetext =~ /SPDX-License-Identifier:\s+BSD-1-Clause/i) { + $license = 'BSD (1 clause)'; + } + + if ($licensetext =~ /SPDX-License-Identifier:\s+MIT/i) { + $license = 'MIT/X11 (BSD like)'; + } + + if ($licensetext =~ /SPDX-License-Identifier:\s+ISC/i) { + $license = 'ISC'; + } + + if ($licensetext =~ /(?:is|may be)\s(?:(?:distributed|used).*?terms|being\s+released).*?\b(L?GPL)\b/) { + my $v = $gplver || ' (unversioned/unknown version)'; + $license = "$1$v $license"; + } + + if ($licensetext =~ /the rights to distribute and use this software as governed by the terms of the Lisp Lesser General Public License|\bLLGPL\b/ ) { + $license = "LLGPL $license"; + } + + if ($licensetext =~ /This file is part of the .*Qt GUI Toolkit. This file may be distributed under the terms of the Q Public License as defined/) { + $license = "QPL (part of Qt) $license"; + } elsif ($licensetext =~ /may (be distributed|redistribute it) under the terms of the Q Public License/) { + $license = "QPL $license"; + } + + if ($licensetext =~ /opensource\.org\/licenses\/mit-license\.php/) { + $license = "MIT/X11 (BSD like) $license"; + } elsif ($licensetext =~ /Permission is hereby granted, free of charge, to any person obtaining a copy of this software and(\/or)? associated documentation files \(the (Software|Materials)\), to deal in the (Software|Materials)/) { + $license = "MIT/X11 (BSD like) $license"; + } elsif ($licensetext =~ /Permission is hereby granted, without written agreement and without license or royalty fees, to use, copy, modify, and distribute this software and its documentation for any purpose/) { + $license = "MIT/X11 (BSD like) $license"; + } + + if ($licensetext =~ /Permission to use, copy, modify, and(\/or)? distribute this software for any purpose with or without fee is hereby granted, provided.*copyright notice.*permission notice.*all copies/) { + $license = "ISC $license"; + } + + if ($licensetext =~ /THIS SOFTWARE IS PROVIDED .*AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY/) { + if ($licensetext =~ /All advertising materials mentioning features or use of this software must display the following acknowledge?ment.*This product includes software developed by/i) { + $license = "BSD (4 clause) $license"; + } elsif ($licensetext =~ /(The name(?:\(s\))? .*? may not|Neither the (names? .*?|authors?) nor the names of( (its|their|other|any))? contributors may) be used to endorse or promote products derived from this software/i) { + $license = "BSD (3 clause) $license"; + } elsif ($licensetext =~ /Redistributions in binary form must reproduce the above copyright notice/i) { + $license = "BSD (2 clause) $license"; + } else { + $license = "BSD $license"; + } + } + + if ($licensetext =~ /Mozilla Public License,? (?:(?:Version|v\.)\s+)?(\d+(?:\.\d+)?)/) { + $license = "MPL (v$1) $license"; + } + elsif ($licensetext =~ /Mozilla Public License,? \((?:Version|v\.) (\d+(?:\.\d+)?)\)/) { + $license = "MPL (v$1) $license"; + } + + # match when either: + # - the text *begins* with "The Artistic license v2.0" which is (hopefully) the actual artistic license v2.0 text. + # - a license grant is found. i.e something like "this is free software, licensed under the artistic license v2.0" + if ($licensetext =~ /(?:^\s*|(?:This is free software, licensed|Released|be used|use and modify this (?:module|software)) under (?:the terms of )?)[Tt]he Artistic License ([v\d.]*\d)/) { + $license = "Artistic (v$1) $license"; + } + + if ($licensetext =~ /is free software under the Artistic [Ll]icense/) { + $license = "Artistic $license"; + } + + if ($licensetext =~ /This program is free software; you can redistribute it and\/or modify it under the same terms as Perl itself/) { + $license = "Perl $license"; + } + + if ($licensetext =~ /under the Apache License, Version ([^ ]+)/) { + $license = "Apache (v$1) $license"; + } + + if ($licensetext =~ /(THE BEER-WARE LICENSE)/i) { + $license = "Beerware $license"; + } + + if ($licensetext =~ /distributed under the terms of the FreeType project/i) { + $license = "FreeType $license"; # aka FTL see https://www.freetype.org/license.html + } + + if ($licensetext =~ /This source file is subject to version ([^ ]+) of the PHP license/) { + $license = "PHP (v$1) $license"; + } + + if ($licensetext =~ /under the terms of the CeCILL /) { + $license = "CeCILL $license"; + } + + if ($licensetext =~ /under the terms of the CeCILL-([^ ]+) /) { + $license = "CeCILL-$1 $license"; + } + + if ($licensetext =~ /under the SGI Free Software License B/) { + $license = "SGI Free Software License B $license"; + } + + if ($licensetext =~ /is in the public domain/i) { + $license = "Public domain $license"; + } + + if ($licensetext =~ /terms of the Common Development and Distribution License(, Version ([^(]+))? \(the License\)/) { + $license = "CDDL " . ($1 ? "(v$2) " : '') . $license; + } + + if ($licensetext =~ /Microsoft Permissive License \(Ms-PL\)/) { + $license = "Ms-PL $license"; + } + + if ($licensetext =~ /Licensed under the Academic Free License version ([\d.]+)/) { + $license = $1 ? "AFL-$1" : "AFL"; + } + + if ($licensetext =~ /This program and the accompanying materials are made available under the terms of the Eclipse Public License v?([\d.]+)/) { + $license = $1 ? "EPL-$1" : "EPL"; + } + + # quotes were removed by clean_comments function + if ($licensetext =~ /Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license \(the Software\)/ or + $licensetext =~ /Boost Software License([ ,-]+Version ([^ ]+)?(\.))/i) { + $license = "BSL " . ($1 ? "(v$2) " : '') . $license; + } + + if ($licensetext =~ /PYTHON SOFTWARE FOUNDATION LICENSE (VERSION ([^ ]+))/i) { + $license = "PSF " . ($1 ? "(v$2) " : '') . $license; + } + + if ($licensetext =~ /The origin of this software must not be misrepresented.*Altered source versions must be plainly marked as such.*This notice may not be removed or altered from any source distribution/ or + $licensetext =~ /see copyright notice in zlib\.h/) { + $license = "zlib/libpng $license"; + } elsif ($licensetext =~ /This code is released under the libpng license/) { + $license = "libpng $license"; + } + + if ($licensetext =~ /Do What The Fuck You Want To Public License, Version ([^, ]+)/i) { + $license = "WTFPL (v$1) $license"; + } + + if ($licensetext =~ /Do what The Fuck You Want To Public License/i) { + $license = "WTFPL $license"; + } + + if ($licensetext =~ /(License WTFPL|Under (the|a) WTFPL)/i) { + $license = "WTFPL $license"; + } + + if ($licensetext =~ /SPDX-License-Identifier:\s+\(([a-zA-Z0-9-\.]+)\s+OR\s+([a-zA-Z0-9-\.]+)\)/i) { + my $license1 = $1; + my $license2 = $2; + $license = parselicense("SPDX-License-Identifier: $license1") . ";" . parselicense("SPDX-License-Identifier: $license2"); + } + + $license = "UNKNOWN" if (!length($license)); + + # Remove trailing spaces. + $license =~ s/\s+$//; + + return $license; +} + +sub fatal { + my ($pack,$file,$line); + ($pack,$file,$line) = caller(); + (my $msg = "$progname: fatal error at line $line:\n@_\n") =~ tr/\0//d; + $msg =~ s/\n\n$/\n/; + die $msg; +} diff --git a/tools/list_protos_in_cap.sh b/tools/list_protos_in_cap.sh new file mode 100755 index 0000000..0ddfdd1 --- /dev/null +++ b/tools/list_protos_in_cap.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +# List the protocols (dissectors) used in capture file(s) +# +# The Python script indexcap.py does the same thing. +# +# This script extracts the protocol names contained in a given capture file. +# This is useful for generating a "database" (flat file :-)) of in what file +# a given protocol can be found. +# +# Output consists of the file name followed by the protocols, for example: +# /path/to/the/file.pcap eth ip sctp +# +# Copyright 2012 Jeff Morriss +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# Directory containing binaries. Default current directory. +WS_BIN_PATH=${WS_BIN_PATH:-.} + +# Tweak the following to your liking. Editcap must support "-E". +TSHARK="$WS_BIN_PATH/tshark" +CAPINFOS="$WS_BIN_PATH/capinfos" + +if [ "$WS_BIN_PATH" = "." ]; then + export WIRESHARK_RUN_FROM_BUILD_DIRECTORY= +fi + +NOTFOUND=0 +for i in "$TSHARK" "$CAPINFOS" +do + if [ ! -x $i ] + then + echo "Couldn't find $i" 1>&2 + NOTFOUND=1 + fi +done +if [ $NOTFOUND -eq 1 ] +then + exit 1 +fi + +# Make sure we have at least one file +FOUND=0 +for CF in "$@" +do + if [ "$OSTYPE" == "cygwin" ] + then + CF=`cygpath --windows "$CF"` + fi + "$CAPINFOS" "$CF" > /dev/null 2>&1 && FOUND=1 + if [ $FOUND -eq 1 ] + then + break + fi +done + +if [ $FOUND -eq 0 ] ; then + cat <&2 + continue + fi + + "$CAPINFOS" "$CF" > /dev/null + RETVAL=$? + if [ $RETVAL -ne 0 ] ; then + echo "Not a valid capture file (or some other problem)" 1>&2 + continue + fi + + printf "%s: " "$CF" + + # Extract the protocol names. + $TSHARK -T fields -eframe.protocols -nr "$CF" 2>/dev/null | \ + tr ':\r' '\n' | sort -u | tr '\n\r' ' ' + + printf "\n" +done + diff --git a/tools/macos-setup-brew.sh b/tools/macos-setup-brew.sh new file mode 100755 index 0000000..17c92ce --- /dev/null +++ b/tools/macos-setup-brew.sh @@ -0,0 +1,173 @@ +#!/bin/bash +# Copyright 2014, Evan Huus (See AUTHORS file) +# +# Enhance (2016) by Alexis La Goutte (For use with Travis CI) +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +set -e -u -o pipefail + +eval "$(brew shellenv)" + +HOMEBREW_NO_AUTO_UPDATE=${HOMEBREW_NO_AUTO_UPDATE:-} + +function print_usage() { + printf "\\nUtility to setup a macOS system for Wireshark Development using Homebrew.\\n" + printf "The basic usage installs the needed software\\n\\n" + printf "Usage: %s [--install-optional] [--install-dmg-deps] [...other options...]\\n" "$0" + printf "\\t--install-optional: install optional software as well\\n" + printf "\\t--install-dmg-deps: install packages required to build the .dmg file\\n" + printf "\\t--install-sparkle-deps: install the Sparkle automatic updater\\n" + printf "\\t--install-all: install everything\\n" + printf "\\t[other]: other options are passed as-is to apt\\n" +} + +INSTALLED_FORMULAE=$( brew list --formulae ) +function install_formulae() { + INSTALL_LIST=() + for FORMULA in "$@" ; do + if ! grep --word-regexp "$FORMULA" > /dev/null 2>&1 <<<"$INSTALLED_FORMULAE" ; then + INSTALL_LIST+=( "$FORMULA" ) + fi + done + if (( ${#INSTALL_LIST[@]} != 0 )); then + brew install "${INSTALL_LIST[@]}" + else + printf "Nothing to install.\n" + fi +} + +INSTALL_OPTIONAL=0 +INSTALL_DOC_DEPS=0 +INSTALL_DMG_DEPS=0 +INSTALL_SPARKLE_DEPS=0 +INSTALL_TEST_DEPS=0 +OPTIONS=() +for arg; do + case $arg in + --help) + print_usage + exit 0 + ;; + --install-optional) + INSTALL_OPTIONAL=1 + ;; + --install-doc-deps) + INSTALL_DOC_DEPS=1 + ;; + --install-dmg-deps) + INSTALL_DMG_DEPS=1 + ;; + --install-sparkle-deps) + INSTALL_SPARKLE_DEPS=1 + ;; + --install-test-deps) + INSTALL_TEST_DEPS=1 + ;; + --install-all) + INSTALL_OPTIONAL=1 + INSTALL_DOC_DEPS=1 + INSTALL_DMG_DEPS=1 + INSTALL_SPARKLE_DEPS=1 + INSTALL_TEST_DEPS=1 + ;; + *) + OPTIONS+=("$arg") + ;; + esac +done + +BUILD_LIST=( + ccache + cmake + ninja +) + +# Qt isn't technically required, but... +REQUIRED_LIST=( + c-ares + glib + libgcrypt + pcre2 + qt6 + speexdsp +) + +ADDITIONAL_LIST=( + brotli + gettext + gnutls + libilbc + libmaxminddb + libnghttp2 + libnghttp3 + libsmi + libssh + libxml2 + lua@5.1 + lz4 + minizip + opus + snappy + spandsp + zstd +) + +DOC_DEPS_LIST=( + asciidoctor + docbook + docbook-xsl +) + +ACTUAL_LIST=( "${BUILD_LIST[@]}" "${REQUIRED_LIST[@]}" ) + +# Now arrange for optional support libraries +if [ $INSTALL_OPTIONAL -ne 0 ] ; then + ACTUAL_LIST+=( "${ADDITIONAL_LIST[@]}" ) +fi + +if [ $INSTALL_DOC_DEPS -ne 0 ] ; then + ACTUAL_LIST+=( "${DOC_DEPS_LIST[@]}" ) +fi + +if (( ${#OPTIONS[@]} != 0 )); then + ACTUAL_LIST+=( "${OPTIONS[@]}" ) +fi + +install_formulae "${ACTUAL_LIST[@]}" + +if [ $INSTALL_DMG_DEPS -ne 0 ] ; then + pip3 install dmgbuild +fi + +if [ $INSTALL_SPARKLE_DEPS -ne 0 ] ; then + brew cask install sparkle +fi + +if [ $INSTALL_TEST_DEPS -ne 0 ] ; then + pip3 install pytest pytest-xdist +fi + +# Uncomment to add PNG compression utilities used by compress-pngs: +# brew install advancecomp optipng oxipng pngcrush + +# Uncomment to enable generation of documentation +# brew install asciidoctor + +exit 0 +# +# Editor modelines +# +# Local Variables: +# c-basic-offset: 4 +# tab-width: 8 +# indent-tabs-mode: nil +# End: +# +# ex: set shiftwidth=4 tabstop=8 expandtab: +# :indentSize=4:tabSize=8:noTabs=true: +# diff --git a/tools/macos-setup.sh b/tools/macos-setup.sh new file mode 100755 index 0000000..ec25bf7 --- /dev/null +++ b/tools/macos-setup.sh @@ -0,0 +1,3865 @@ +#!/bin/bash +# Setup development environment on macOS (tested with 10.6.8 and Xcode +# 3.2.6 and with 10.12.4 and Xcode 8.3). +# +# Copyright 2011 Michael Tuexen, Joerg Mayer, Guy Harris (see AUTHORS file) +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +shopt -s extglob + +# +# Get the major version of Darwin, so we can check the major macOS +# version. +# +DARWIN_MAJOR_VERSION=`uname -r | sed 's/\([0-9]*\).*/\1/'` + +# +# The minimum supported version of Qt is 5.9, so the minimum supported version +# of macOS is OS X 10.10 (Yosemite), aka Darwin 14.0. +# +if [[ $DARWIN_MAJOR_VERSION -lt 14 ]]; then + echo "This script does not support any versions of macOS before Yosemite" 1>&2 + exit 1 +fi + +# +# Get the processor architecture of Darwin. Currently supported: arm, i386 +# +DARWIN_PROCESSOR_ARCH=`uname -p` + +if [ "$DARWIN_PROCESSOR_ARCH" != "arm" -a "$DARWIN_PROCESSOR_ARCH" != "i386" ]; then + echo "This script does not support this processor architecture" 1>&2 + exit 1 +fi + +# +# Versions of packages to download and install. +# + +# +# We use curl, but older versions of curl in older macOS releases can't +# handle some sites - including the xz site. +# +# If the version of curl in the system is older than 7.54.0, download +# curl and install it. +# +current_curl_version=`curl --version | sed -n 's/curl \([0-9.]*\) .*/\1/p'` +current_curl_major_version="`expr $current_curl_version : '\([0-9][0-9]*\).*'`" +current_curl_minor_version="`expr $current_curl_version : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`" +if [[ $current_curl_major_version -lt 7 || + ($current_curl_major_version -eq 7 && + $current_curl_minor_version -lt 54) ]]; then + CURL_VERSION=${CURL_VERSION-7.60.0} +fi + +# +# Some packages need xz to unpack their current source. +# XXX: tar, since macOS 10.9, can uncompress xz'ed tarballs, +# so perhaps we could get rid of this now? +# +XZ_VERSION=5.2.5 + +# +# Some packages need lzip to unpack their current source. +# +LZIP_VERSION=1.21 + +# +# The version of libPCRE on Catalina is insufficient to build glib due to +# missing UTF-8 support. +# +PCRE_VERSION=8.45 + +# +# CMake is required to do the build - and to build some of the +# dependencies. +# +CMAKE_VERSION=${CMAKE_VERSION-3.21.4} + +# +# Ninja isn't required, as make is provided with Xcode, but it is +# claimed to build faster than make. +# Comment it out if you don't want it. +# +NINJA_VERSION=${NINJA_VERSION-1.10.2} + +# +# The following libraries and tools are required even to build only TShark. +# +GETTEXT_VERSION=0.21 +GLIB_VERSION=2.76.6 +if [ "$GLIB_VERSION" ]; then + GLIB_MAJOR_VERSION="`expr $GLIB_VERSION : '\([0-9][0-9]*\).*'`" + GLIB_MINOR_VERSION="`expr $GLIB_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + GLIB_DOTDOT_VERSION="`expr $GLIB_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + GLIB_MAJOR_MINOR_VERSION=$GLIB_MAJOR_VERSION.$GLIB_MINOR_VERSION + GLIB_MAJOR_MINOR_DOTDOT_VERSION=$GLIB_MAJOR_VERSION.$GLIB_MINOR_VERSION.$GLIB_DOTDOT_VERSION +fi +PKG_CONFIG_VERSION=0.29.2 +# +# libgpg-error is required for libgcrypt. +# +LIBGPG_ERROR_VERSION=1.39 +# +# libgcrypt is required. +# +LIBGCRYPT_VERSION=1.8.7 +# +# libpcre2 is required. +# +PCRE2_VERSION=10.39 + +# +# One or more of the following libraries are required to build Wireshark. +# +# To override the version of Qt call the script with some of the variables +# set to the new values. Setting the variable to empty will disable building +# the toolkit and will uninstall # any version previously installed by the +# script, e.g. +# "QT_VERSION=5.10.1 ./macos-setup.sh" +# will build and install with QT 5.10.1. +# +QT_VERSION=${QT_VERSION-6.2.4} + +if [ "$QT_VERSION" ]; then + QT_MAJOR_VERSION="`expr $QT_VERSION : '\([0-9][0-9]*\).*'`" + QT_MINOR_VERSION="`expr $QT_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + QT_DOTDOT_VERSION="`expr $QT_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + QT_MAJOR_MINOR_VERSION=$QT_MAJOR_VERSION.$QT_MINOR_VERSION + QT_MAJOR_MINOR_DOTDOT_VERSION=$QT_MAJOR_VERSION.$QT_MINOR_VERSION.$QT_DOTDOT_VERSION +fi + +# +# The following libraries are optional. +# Comment them out if you don't want them, but note that some of +# the optional libraries are required by other optional libraries. +# +LIBSMI_VERSION=0.4.8 +GNUTLS_VERSION=3.7.8 +if [ "$GNUTLS_VERSION" ]; then + # + # We'll be building GnuTLS, so we may need some additional libraries. + # We assume GnuTLS can work with Nettle; newer versions *only* use + # Nettle, not libgcrypt. + # + GNUTLS_MAJOR_VERSION="`expr $GNUTLS_VERSION : '\([0-9][0-9]*\).*'`" + GNUTLS_MINOR_VERSION="`expr $GNUTLS_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + NETTLE_VERSION=3.9.1 + + # + # And, in turn, Nettle requires GMP. + # + GMP_VERSION=6.3.0 + + # + # And p11-kit + P11KIT_VERSION=0.25.0 + + # Which requires libtasn1 + LIBTASN1_VERSION=4.19.0 +fi +# Use 5.2.4, not 5.3, for now; lua_bitop.c hasn't been ported to 5.3 +# yet, and we need to check for compatibility issues (we'd want Lua +# scripts to work with 5.1, 5.2, and 5.3, as long as they only use Lua +# features present in all three versions) +LUA_VERSION=5.2.4 +SNAPPY_VERSION=1.1.10 +ZSTD_VERSION=1.5.5 +LIBXML2_VERSION=2.11.5 +LZ4_VERSION=1.9.4 +SBC_VERSION=2.0 +CARES_VERSION=1.19.1 +LIBSSH_VERSION=0.10.5 +# mmdbresolve +MAXMINDDB_VERSION=1.4.3 +NGHTTP2_VERSION=1.56.0 +NGHTTP3_VERSION=0.15.0 +SPANDSP_VERSION=0.0.6 +SPEEXDSP_VERSION=1.2.1 +if [ "$SPANDSP_VERSION" ]; then + # + # SpanDSP depends on libtiff. + # + LIBTIFF_VERSION=3.8.1 +fi +BCG729_VERSION=1.1.1 +# libilbc 3.0.0 & later link with absiel, which is released under Apache 2.0 +ILBC_VERSION=2.0.2 +OPUS_VERSION=1.4 + +# +# Is /usr/bin/python3 a working version of Python? It may be, as it +# might be a wrapper that runs the Python 3 that's part of Xcode. +# +if /usr/bin/python3 --version >/dev/null 2>&1 +then + # + # Yes - don't bother installing Python 3 from elsewhere + # + : +else + # + # No - install a Python package. + # + PYTHON3_VERSION=3.9.5 +fi +BROTLI_VERSION=1.0.9 +# minizip +ZLIB_VERSION=1.3 +# Uncomment to enable automatic updates using Sparkle +#SPARKLE_VERSION=2.1.0 + +# +# Asciidoctor is required to build the documentation. +# +ASCIIDOCTOR_VERSION=${ASCIIDOCTOR_VERSION-2.0.16} +ASCIIDOCTORPDF_VERSION=${ASCIIDOCTORPDF_VERSION-1.6.1} + +# +# GNU autotools. They're not supplied with the macOS versions we +# support, and we currently use them for minizip. +# +AUTOCONF_VERSION=2.71 +AUTOMAKE_VERSION=1.16.5 +LIBTOOL_VERSION=2.4.6 + +install_curl() { + if [ "$CURL_VERSION" -a ! -f curl-$CURL_VERSION-done ] ; then + echo "Downloading, building, and installing curl:" + [ -f curl-$CURL_VERSION.tar.bz2 ] || curl -L -O https://curl.haxx.se/download/curl-$CURL_VERSION.tar.bz2 || exit 1 + $no_build && echo "Skipping installation" && return + bzcat curl-$CURL_VERSION.tar.bz2 | tar xf - || exit 1 + cd curl-$CURL_VERSION + ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch curl-$CURL_VERSION-done + fi +} + +uninstall_curl() { + if [ ! -z "$installed_curl_version" ] ; then + echo "Uninstalling curl:" + cd curl-$installed_curl_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm curl-$installed_curl_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf curl-$installed_curl_version + rm -rf curl-$installed_curl_version.tar.bz2 + fi + + installed_curl_version="" + fi +} + +install_xz() { + if [ "$XZ_VERSION" -a ! -f xz-$XZ_VERSION-done ] ; then + echo "Downloading, building, and installing xz:" + [ -f xz-$XZ_VERSION.tar.bz2 ] || curl -L -O https://tukaani.org/xz/xz-$XZ_VERSION.tar.bz2 || exit 1 + $no_build && echo "Skipping installation" && return + bzcat xz-$XZ_VERSION.tar.bz2 | tar xf - || exit 1 + cd xz-$XZ_VERSION + # + # This builds and installs liblzma, which libxml2 uses, and + # Wireshark uses liblzma, so we need to build this with + # all the minimum-deployment-version and SDK stuff. + # + CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch xz-$XZ_VERSION-done + fi +} + +uninstall_xz() { + if [ ! -z "$installed_xz_version" ] ; then + echo "Uninstalling xz:" + cd xz-$installed_xz_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm xz-$installed_xz_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf xz-$installed_xz_version + rm -rf xz-$installed_xz_version.tar.bz2 + fi + + installed_xz_version="" + fi +} + +install_lzip() { + if [ "$LZIP_VERSION" -a ! -f lzip-$LZIP_VERSION-done ] ; then + echo "Downloading, building, and installing lzip:" + [ -f lzip-$LZIP_VERSION.tar.gz ] || curl -L -O https://download.savannah.gnu.org/releases/lzip/lzip-$LZIP_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat lzip-$LZIP_VERSION.tar.gz | tar xf - || exit 1 + cd lzip-$LZIP_VERSION + ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch lzip-$LZIP_VERSION-done + fi +} + +uninstall_lzip() { + if [ ! -z "$installed_lzip_version" ] ; then + echo "Uninstalling lzip:" + cd lzip-$installed_lzip_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm lzip-$installed_lzip_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf lzip-$installed_lzip_version + rm -rf lzip-$installed_lzip_version.tar.gz + fi + + installed_lzip_version="" + fi +} + +install_pcre() { + if [ "$PCRE_VERSION" -a ! -f pcre-$PCRE_VERSION-done ] ; then + echo "Downloading, building, and installing pcre:" + [ -f pcre-$PCRE_VERSION.tar.bz2 ] || curl -L -O https://sourceforge.net/projects/pcre/files/pcre/$PCRE_VERSION/pcre-$PCRE_VERSION.tar.bz2 || exit 1 + $no_build && echo "Skipping installation" && return + bzcat pcre-$PCRE_VERSION.tar.bz2 | tar xf - || exit 1 + cd pcre-$PCRE_VERSION + ./configure --enable-unicode-properties || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch pcre-$PCRE_VERSION-done + fi +} + +uninstall_pcre() { + if [ ! -z "$installed_pcre_version" ] ; then + echo "Uninstalling pcre:" + cd pcre-$installed_pcre_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm pcre-$installed_pcre_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf pcre-$installed_pcre_version + rm -rf pcre-$installed_pcre_version.tar.bz2 + fi + + installed_pcre_version="" + fi +} + +install_pcre2() { + if [ "$PCRE2_VERSION" -a ! -f "pcre2-$PCRE2_VERSION-done" ] ; then + echo "Downloading, building, and installing pcre2:" + [ -f "pcre2-$PCRE2_VERSION.tar.bz2" ] || curl -L -O "https://github.com/PhilipHazel/pcre2/releases/download/pcre2-$PCRE2_VERSION/pcre2-10.39.tar.bz2" || exit 1 + $no_build && echo "Skipping installation" && return + bzcat "pcre2-$PCRE2_VERSION.tar.bz2" | tar xf - || exit 1 + cd "pcre2-$PCRE2_VERSION" + mkdir build_dir + cd build_dir + # https://github.com/Homebrew/homebrew-core/blob/master/Formula/pcre2.rb + # https://github.com/microsoft/vcpkg/blob/master/ports/pcre2/portfile.cmake + MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" \ + $DO_CMAKE -DBUILD_STATIC_LIBS=OFF -DBUILD_SHARED_LIBS=ON -DPCRE2_SUPPORT_JIT=ON -DPCRE2_SUPPORT_UNICODE=ON .. || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd ../.. + touch "pcre2-$PCRE2_VERSION-done" + fi +} + +uninstall_pcre2() { + if [ -n "$installed_pcre2_version" ] && [ -s "pcre2-$installed_pcre2_version/build_dir/install_manifest.txt" ] ; then + echo "Uninstalling pcre2:" + # PCRE2 10.39 installs pcre2unicode.3 twice, so this will return an error. + while read -r ; do $DO_RM -v "$REPLY" ; done < <(cat "pcre2-$installed_pcre2_version/build_dir/install_manifest.txt"; echo) + rm "pcre2-$installed_pcre2_version-done" + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf "pcre2-$installed_pcre2_version" + rm -rf "pcre2-$installed_pcre2_version.tar.bz2" + fi + + installed_pcre2_version="" + fi +} + +install_autoconf() { + if [ "$AUTOCONF_VERSION" -a ! -f autoconf-$AUTOCONF_VERSION-done ] ; then + echo "Downloading, building and installing GNU autoconf..." + [ -f autoconf-$AUTOCONF_VERSION.tar.xz ] || curl -L -O ftp://ftp.gnu.org/gnu/autoconf/autoconf-$AUTOCONF_VERSION.tar.xz || exit 1 + $no_build && echo "Skipping installation" && return + xzcat autoconf-$AUTOCONF_VERSION.tar.xz | tar xf - || exit 1 + cd autoconf-$AUTOCONF_VERSION + ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch autoconf-$AUTOCONF_VERSION-done + fi +} + +uninstall_autoconf() { + if [ ! -z "$installed_autoconf_version" ] ; then + # + # automake and libtool depend on this, so uninstall them. + # + uninstall_libtool "$@" + uninstall_automake "$@" + + echo "Uninstalling GNU autoconf:" + cd autoconf-$installed_autoconf_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm autoconf-$installed_autoconf_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf autoconf-$installed_autoconf_version + rm -rf autoconf-$installed_autoconf_version.tar.xz + fi + + installed_autoconf_version="" + fi +} + +install_automake() { + if [ "$AUTOMAKE_VERSION" -a ! -f automake-$AUTOMAKE_VERSION-done ] ; then + echo "Downloading, building and installing GNU automake..." + [ -f automake-$AUTOMAKE_VERSION.tar.xz ] || curl -L -O ftp://ftp.gnu.org/gnu/automake/automake-$AUTOMAKE_VERSION.tar.xz || exit 1 + $no_build && echo "Skipping installation" && return + xzcat automake-$AUTOMAKE_VERSION.tar.xz | tar xf - || exit 1 + cd automake-$AUTOMAKE_VERSION + ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch automake-$AUTOMAKE_VERSION-done + fi +} + +uninstall_automake() { + if [ ! -z "$installed_automake_version" ] ; then + # + # libtool depends on this(?), so uninstall it. + # + uninstall_libtool "$@" + + echo "Uninstalling GNU automake:" + cd automake-$installed_automake_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm automake-$installed_automake_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf automake-$installed_automake_version + rm -rf automake-$installed_automake_version.tar.xz + fi + + installed_automake_version="" + fi +} + +install_libtool() { + if [ "$LIBTOOL_VERSION" -a ! -f libtool-$LIBTOOL_VERSION-done ] ; then + echo "Downloading, building and installing GNU libtool..." + [ -f libtool-$LIBTOOL_VERSION.tar.xz ] || curl -L -O ftp://ftp.gnu.org/gnu/libtool/libtool-$LIBTOOL_VERSION.tar.xz || exit 1 + $no_build && echo "Skipping installation" && return + xzcat libtool-$LIBTOOL_VERSION.tar.xz | tar xf - || exit 1 + cd libtool-$LIBTOOL_VERSION + ./configure --program-prefix=g || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch libtool-$LIBTOOL_VERSION-done + fi +} + +uninstall_libtool() { + if [ ! -z "$installed_libtool_version" ] ; then + echo "Uninstalling GNU libtool:" + cd libtool-$installed_libtool_version + $DO_MV /usr/local/bin/glibtool /usr/local/bin/libtool + $DO_MV /usr/local/bin/glibtoolize /usr/local/bin/libtoolize + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm libtool-$installed_libtool_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf libtool-$installed_libtool_version + rm -rf libtool-$installed_libtool_version.tar.xz + fi + + installed_libtool_version="" + fi +} + +install_ninja() { + if [ "$NINJA_VERSION" -a ! -f ninja-$NINJA_VERSION-done ] ; then + echo "Downloading and installing Ninja:" + # + # Download the zipball, unpack it, and move the binary to + # /usr/local/bin. + # + [ -f ninja-mac-v$NINJA_VERSION.zip ] || curl -L -o ninja-mac-v$NINJA_VERSION.zip https://github.com/ninja-build/ninja/releases/download/v$NINJA_VERSION/ninja-mac.zip || exit 1 + $no_build && echo "Skipping installation" && return + unzip ninja-mac-v$NINJA_VERSION.zip + sudo mv ninja /usr/local/bin + touch ninja-$NINJA_VERSION-done + fi +} + +uninstall_ninja() { + if [ ! -z "$installed_ninja_version" ]; then + echo "Uninstalling Ninja:" + sudo rm /usr/local/bin/ninja + rm ninja-$installed_ninja_version-done + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + rm -f ninja-mac-v$installed_ninja_version.zip + fi + + installed_ninja_version="" + fi +} + +install_asciidoctor() { + if [ ! -f asciidoctor-${ASCIIDOCTOR_VERSION}-done ]; then + echo "Downloading and installing Asciidoctor:" + sudo gem install -V asciidoctor --version "=${ASCIIDOCTOR_VERSION}" + touch asciidoctor-${ASCIIDOCTOR_VERSION}-done + fi +} + +uninstall_asciidoctor() { + if [ ! -z "$installed_asciidoctor_version" ]; then + echo "Uninstalling Asciidoctor:" + sudo gem uninstall -V asciidoctor --version "=${installed_asciidoctor_version}" + rm asciidoctor-$installed_asciidoctor_version-done + + ##if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version, + # whatever it might happen to be called. + # + ## rm -f asciidoctor-$installed_asciidoctor_version + ##fi + installed_asciidoctor_version="" + fi +} + +install_asciidoctorpdf() { + if [ ! -f asciidoctorpdf-${ASCIIDOCTORPDF_VERSION}-done ]; then + ## XXX gem does not track dependencies that are installed for asciidoctor-pdf + ## record them for uninstallation + ## ttfunk, pdf-core, prawn, prawn-table, Ascii85, ruby-rc4, hashery, afm, pdf-reader, prawn-templates, public_suffix, addressable, css_parser, prawn-svg, prawn-icon, safe_yaml, thread_safe, polyglot, treetop, asciidoctor-pdf + echo "Downloading and installing Asciidoctor-pdf:" + sudo gem install -V asciidoctor-pdf --version "=${ASCIIDOCTORPDF_VERSION}" + touch asciidoctorpdf-${ASCIIDOCTORPDF_VERSION}-done + fi +} + +uninstall_asciidoctorpdf() { + if [ ! -z "$installed_asciidoctorpdf_version" ]; then + echo "Uninstalling Asciidoctor:" + sudo gem uninstall -V asciidoctor-pdf --version "=${installed_asciidoctorpdf_version}" + ## XXX uninstall dependencies + rm asciidoctorpdf-$installed_asciidoctorpdf_version-done + + ##if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version, + # whatever it might happen to be called. + # + ## rm -f asciidoctorpdf-$installed_asciidoctorpdf_version + ##fi + installed_asciidoctorpdf_version="" + fi +} + +install_cmake() { + if [ ! -f cmake-$CMAKE_VERSION-done ]; then + echo "Downloading and installing CMake:" + CMAKE_MAJOR_VERSION="`expr $CMAKE_VERSION : '\([0-9][0-9]*\).*'`" + CMAKE_MINOR_VERSION="`expr $CMAKE_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + CMAKE_MAJOR_MINOR_VERSION=$CMAKE_MAJOR_VERSION.$CMAKE_MINOR_VERSION + + # + # NOTE: the "64" in "Darwin64" doesn't mean "64-bit-only"; the + # package in question supports both 32-bit and 64-bit x86. + # + case "$CMAKE_MAJOR_VERSION" in + + 0|1|2) + echo "CMake $CMAKE_VERSION" is too old 1>&2 + ;; + + 3) + # + # Download the DMG and do a drag install, where "drag" means + # "mv". + # + # 3.1.1 to 3.19.1 have a Darwin-x86_64 DMG. + # 3.19.2 has a macos-universal DMG for 10.10 and later + # 3.19.3 and later have a macos-universal DMG for 10.13 and later, + # and a macos10.10-universal DMG for 10.10 and later. + # + if [ "$CMAKE_MINOR_VERSION" -lt 5 ]; then + echo "CMake $CMAKE_VERSION" is too old 1>&2 + elif [ "$CMAKE_MINOR_VERSION" -lt 19 -o \ + "$CMAKE_VERSION" = 3.19.0 -o \ + "$CMAKE_VERSION" = 3.19.1 ]; then + type="Darwin-x86_64" + elif [ "$CMAKE_VERSION" = 3.19.2 -o \ + "$DARWIN_MAJOR_VERSION" -ge 17 ]; then + type="macos-universal" + else + type="macos10.0-universal" + fi + [ -f cmake-$CMAKE_VERSION-$type.dmg ] || curl -L -O https://cmake.org/files/v$CMAKE_MAJOR_MINOR_VERSION/cmake-$CMAKE_VERSION-$type.dmg || exit 1 + $no_build && echo "Skipping installation" && return + sudo hdiutil attach cmake-$CMAKE_VERSION-$type.dmg || exit 1 + sudo ditto /Volumes/cmake-$CMAKE_VERSION-$type/CMake.app /Applications/CMake.app || exit 1 + + # + # Plant the appropriate symbolic links in /usr/local/bin. + # It's a drag-install, so there's no installer to make them, + # and the CMake code to put them in place is lame, as + # + # 1) it defaults to /usr/bin, not /usr/local/bin; + # 2) it doesn't request the necessary root privileges; + # 3) it can't be run from the command line; + # + # so we do it ourselves. + # + for i in ccmake cmake cmake-gui cmakexbuild cpack ctest + do + sudo ln -s /Applications/CMake.app/Contents/bin/$i /usr/local/bin/$i + done + sudo hdiutil detach /Volumes/cmake-$CMAKE_VERSION-$type + ;; + + *) + ;; + esac + touch cmake-$CMAKE_VERSION-done + fi +} + +uninstall_cmake() { + if [ ! -z "$installed_cmake_version" ]; then + echo "Uninstalling CMake:" + installed_cmake_major_version="`expr $installed_cmake_version : '\([0-9][0-9]*\).*'`" + case "$installed_cmake_major_version" in + + 0|1|2) + echo "CMake $installed_cmake_version" is too old 1>&2 + ;; + + 3) + sudo rm -rf /Applications/CMake.app + for i in ccmake cmake cmake-gui cmakexbuild cpack ctest + do + sudo rm -f /usr/local/bin/$i + done + rm cmake-$installed_cmake_version-done + ;; + esac + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version, + # whatever it might happen to be called. + # + rm -f cmake-$installed_cmake_version-Darwin-x86_64.dmg + rm -f cmake-$installed_cmake_version-macos-universal.dmg + rm -f cmake-$installed_cmake_version-macos10.0-universal.dmg + fi + + installed_cmake_version="" + fi +} + +install_meson() { + # + # Install Meson with pip3 if we don't have it already. + # + if $MESON --version >/dev/null 2>&1 + then + # We have it. + : + else + sudo pip3 install meson + touch meson-done + fi +} + +uninstall_meson() { + # + # If we installed Meson, uninstal it with pip3. + # + if [ -f meson-done ] ; then + sudo pip3 uninstall meson + rm -f meson-done + fi +} + +install_pytest() { + # + # Install pytest with pip3 if we don't have it already. + # + if python3 -m pytest --version >/dev/null 2>&1 + then + # We have it. + : + else + sudo pip3 install pytest pytest-xdist + touch pytest-done + fi +} + +uninstall_pytest() { + # + # If we installed pytest, uninstal it with pip3. + # + if [ -f pytest-done ] ; then + sudo pip3 uninstall pytest pytest-xdist + rm -f pytest-done + fi +} + +install_gettext() { + if [ ! -f gettext-$GETTEXT_VERSION-done ] ; then + echo "Downloading, building, and installing GNU gettext:" + [ -f gettext-$GETTEXT_VERSION.tar.gz ] || curl -L -O https://ftp.gnu.org/pub/gnu/gettext/gettext-$GETTEXT_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat gettext-$GETTEXT_VERSION.tar.gz | tar xf - || exit 1 + cd gettext-$GETTEXT_VERSION + + # + # This is annoying. + # + # GNU gettext's configuration script checks for the presence of an + # implementation of iconv(). Not only does it check whether iconv() + # is available, *but* it checks for certain behavior *not* specified + # by POSIX that the GNU implementation provides, namely that an + # attempt to convert the UTF-8 for the EURO SYMBOL chaaracter to + # ISO 8859-1 results in an error. + # + # macOS, prior to Sierra, provided the GNU iconv library (as it's + # a POSIX API). + # + # Sierra appears to have picked up an implementation from FreeBSD + # (that implementation originated with the CITRUS project: + # + # http://citrus.bsdclub.org + # + # with additional work done to integrate it into NetBSD, and then + # adopted by FreeBSD with further work done). + # + # That implementation does *NOT* return an error in that case; instead, + # it transliterates the EURO SYMBOL to "EUR". + # + # Both behaviors conform to POSIX. + # + # This causes GNU gettext's configure script to conclude that it + # should not say iconv() is available. That, unfortunately, causes + # the build to fail with a linking error when trying to build + # libtextstyle (a library for which we have no use, that is offered + # as a separate library by the GNU project: + # + # https://www.gnu.org/software/gettext/libtextstyle/manual/libtextstyle.html + # + # and that is presumably bundled in GNU gettext because some gettext + # tool depends on it). The failure appears to be due to: + # + # libtextstyle's exported symbols file is generated from a + # template and a script that passes through only symbols + # that appear in a header file that declares the symbol + # as extern; + # + # one such header file declares iconv_ostream_create, but only + # if HAVE_ICONV is defined. + # + # the source file that defines iconv_ostream_create does so + # only if HAVE_ICONV is defined; + # + # the aforementioned script pays *NO ATTENTION* to #ifdefs, + # so it will include iconv_ostream_create in the list of + # symbols to export regardless of whether a working iconv() + # was found; + # + # the linker failing because it was told to export a symbol + # that doesn't exist. + # + # This is a collection of multiple messes: + # + # 1) not all versions of iconv() defaulting to "return an error + # if the target character set doesn't have a character that + # corresponds to the source character" and not offering a way + # to force that behavior; + # + # 2) either some parts of GNU gettext - and libraries bundled + # with it, for some mysterious reason - depending on the GNU + # behavior rather than assuming only what POSIX specifies, and + # the configure script checking for the GNU behavior and not + # setting HAVE_ICONV if it's not found; + # + # 3) the process for building the exported symbols file not + # removing symbols that won't exist in the build due to + # a "working" iconv() not being found; + # + # 4) the file that would define iconv_ostream_create() not + # defining as an always-failing stub if HAVE_ICONV isn't + # defined; + # + # 5) macOS's linker failing if a symbol is specified in an + # exported symbols file but not found, while other linkers + # just ignore it? (I add this because I'm a bit surprised + # that this has not been fixed, as I suspect it would fail + # on FreeBSD and possibly NetBSD as well, as I think their + # iconv()s also default to transliterating rather than failing + # if an input character has no corresponding character in + # the output encoding.) + # + # The Homebrew folks are aware of this and have reported it to + # Apple as a "feedback", for what that's worth: + # + # https://github.com/Homebrew/homebrew-core/commit/af3b4da5a096db3d9ee885e99ed29b33dec1f1c4 + # + # We adopt their fix, which is to run the configure script with + # "am_cv_func_iconv_works=y" as one of the arguments if it's + # running on Sonoma; in at least one test, doing so on Ventura + # caused the build to fail. + # + if [[ $DARWIN_MAJOR_VERSION -ge 23 ]]; then + workaround_arg="am_cv_func_iconv_works=y" + else + workaround_arg= + fi + CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure $workaround_arg || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch gettext-$GETTEXT_VERSION-done + fi +} + +uninstall_gettext() { + if [ ! -z "$installed_gettext_version" ] ; then + # + # GLib depends on this, so uninstall it. + # + uninstall_glib "$@" + + echo "Uninstalling GNU gettext:" + cd gettext-$installed_gettext_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm gettext-$installed_gettext_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf gettext-$installed_gettext_version + rm -rf gettext-$installed_gettext_version.tar.gz + fi + + installed_gettext_version="" + fi +} + +install_pkg_config() { + if [ ! -f pkg-config-$PKG_CONFIG_VERSION-done ] ; then + echo "Downloading, building, and installing pkg-config:" + [ -f pkg-config-$PKG_CONFIG_VERSION.tar.gz ] || curl -L -O https://pkgconfig.freedesktop.org/releases/pkg-config-$PKG_CONFIG_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat pkg-config-$PKG_CONFIG_VERSION.tar.gz | tar xf - || exit 1 + cd pkg-config-$PKG_CONFIG_VERSION + ./configure --with-internal-glib || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch pkg-config-$PKG_CONFIG_VERSION-done + fi +} + +uninstall_pkg_config() { + if [ ! -z "$installed_pkg_config_version" ] ; then + echo "Uninstalling pkg-config:" + cd pkg-config-$installed_pkg_config_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm pkg-config-$installed_pkg_config_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf pkg-config-$installed_pkg_config_version + rm -rf pkg-config-$installed_pkg_config_version.tar.gz + fi + + installed_pkg_config_version="" + fi +} + +install_glib() { + if [ ! -f glib-$GLIB_VERSION-done ] ; then + echo "Downloading, building, and installing GLib:" + glib_dir=`expr $GLIB_VERSION : '\([0-9][0-9]*\.[0-9][0-9]*\).*'` + # + # Starting with GLib 2.28.8, xz-compressed tarballs are available. + # + [ -f glib-$GLIB_VERSION.tar.xz ] || curl -L -O https://download.gnome.org/sources/glib/$glib_dir/glib-$GLIB_VERSION.tar.xz || exit 1 + $no_build && echo "Skipping installation" && return + xzcat glib-$GLIB_VERSION.tar.xz | tar xf - || exit 1 + cd glib-$GLIB_VERSION + # + # First, determine where the system include files are. + # (It's not necessarily /usr/include.) There's a bit of a + # greasy hack here; pre-5.x versions of the developer tools + # don't support the --show-sdk-path option, and will produce + # no output, so includedir will be set to /usr/include + # (in those older versions of the developer tools, there is + # a /usr/include directory). + # + # We need this for several things we do later. + # + includedir=`SDKROOT="$SDKPATH" xcrun --show-sdk-path 2>/dev/null`/usr/include + # + # GLib's configuration procedure, whether autotools-based or + # Meson-based, really likes to use pkg-config to find libraries, + # including libffi. + # + # At least some versions of macOS provide libffi, but, as macOS + # doesn't provide pkg-config, they don't provide a .pc file for + # it, so the autotools-based configuration needs some trickery + # to get it to find the OS-supplied libffi, and the Meson-based + # configuration simply won't find it at all. + # + # So, if we have a system-provided libffi, but pkg-config + # doesn't find libffi, we construct a .pc file for that libffi, + # and install it in /usr/local/lib/pkgconfig. + # + # First, check whether pkg-config finds libffi but thinks its + # header files are in a non-existent directory. That probaby + # means that we generated the .pc file when some SDK was the + # appropriate choice, but Xcode has been updated since then + # and that SDK is no longer present. If so, we remove it, + # so that we will regenerate it if necessary, rather than + # trying to build with a bogus include directory. (Yes, this + # can happen, and has happened, causing mysterius build + # failures when "#include " fails.) + # + if pkg-config libffi ; then + # We have a .pc file for libffi; what does it say the + # include directory is? + incldir=`pkg-config --variable=includedir libffi` + if [ ! -z "$incldir" -a ! -d "$incldir" ] ; then + # Bogus - remove it, assuming + $DO_RM /usr/local/lib/pkgconfig/libffi.pc + fi + fi + if pkg-config libffi ; then + # It found libffi; no need to install a .pc file, and we + # don't want to overwrite what's there already. + : + elif [ ! -e $includedir/ffi/ffi.h ] ; then + # We don't appear to have libffi as part of the system, so + # let the configuration process figure out what to do. + # + # We test for the header file, not the library, because, in + # Big Sur and later, there's no guarantee that, for a system + # shared library, there's a corresponding dylib file in + # /usr/lib. + : + else + # + # We have libffi, but pkg-config didn't find it; generate + # and install the .pc file. + # + + # + # Now generate the .pc file. + # + # We generate the contents of the .pc file by using cat with + # a here document containing a template for the file and + # piping that to a sed command that replaces @INCLUDEDIR@ in + # the template with the include directory we discovered + # above, so that the .pc file gives the compiler flags + # necessary to find the libffi headers (which are *not* + # necessarily in /usr/include, as per the above). + # + # The EOF marker for the here document is in quotes, to tell + # the shell not to do shell expansion, as .pc files use a + # syntax to refer to .pc file variables that looks like the + # syntax to refer to shell variables. + # + # The writing of the libffi.pc file is a greasy hack - the + # process of generating the contents of the .pc file writes + # to the standard output, but running the last process in + # the pipeline as root won't allow the shell that's + # *running* it to open the .pc file if we don't have write + # permission on /usr/local/lib/pkgconfig, so we need a + # program that creates a file and then reads from the + # standard input and writes to that file. UN*Xes have a + # program that does that; it's called "tee". :-) + # + # However, it *also* writes the file to the standard output, + # so we redirect that to /dev/null when we run it. + # + cat <<"EOF" | sed "s;@INCLUDEDIR@;$includedir;" | $DO_TEE_TO_PC_FILE /usr/local/lib/pkgconfig/libffi.pc >/dev/null +prefix=/usr +libdir=${prefix}/lib +includedir=@INCLUDEDIR@ + +Name: ffi +Description: Library supporting Foreign Function Interfaces +Version: 3.2.9999 +Libs: -L${libdir} -lffi +Cflags: -I${includedir}/ffi +EOF + fi + + # + # GLib 2.59.1 and later use Meson+Ninja as the build system. + # + case $GLIB_MAJOR_VERSION in + + 1) + echo "GLib $GLIB_VERSION" is too old 1>&2 + ;; + + *) + case $GLIB_MINOR_VERSION in + + [0-9]|1[0-9]|2[0-9]|3[0-7]) + echo "GLib $GLIB_VERSION" is too old 1>&2 + ;; + + 3[8-9]|4[0-9]|5[0-8]) + if [ ! -f ./configure ]; then + LIBTOOLIZE=glibtoolize ./autogen.sh + fi + # + # At least with the version of Xcode that comes with + # Leopard, /usr/include/ffi/fficonfig.h doesn't define + # MACOSX, which causes the build of GLib to fail for at + # least some versions of GLib. If we don't find + # "#define.*MACOSX" in /usr/include/ffi/fficonfig.h, + # explicitly define it. + # + # While we're at it, suppress -Wformat-nonliteral to + # avoid a case where clang's stricter rules on when not + # to complain about non-literal format arguments cause + # it to complain about code that's safe but it wasn't + # told that. See my comment #25 in GNOME bug 691608: + # + # https://bugzilla.gnome.org/show_bug.cgi?id=691608#c25 + # + if grep -qs '#define.*MACOSX' $includedir/ffi/fficonfig.h + then + # It's defined, nothing to do + CFLAGS="$CFLAGS -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + else + CFLAGS="$CFLAGS -DMACOSX -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -DMACOSX -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + fi + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + ;; + + 59|[6-9][0-9]|[1-9][0-9][0-9]) + # + # 2.59.0 doesn't require Meson and Ninja, but it + # supports it, and I'm too lazy to add a dot-dot + # version check. + # + # Disable tests to work around + # + # https://gitlab.gnome.org/GNOME/glib/-/issues/2902 + # + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" $MESON -Dtests=false _build || exit 1 + ninja $MAKE_BUILD_OPTS -C _build || exit 1 + $DO_NINJA_INSTALL || exit 1 + ;; + *) + echo "Glib's put out 1000 2.x releases?" 1>&2 + ;; + + esac + esac + cd .. + touch glib-$GLIB_VERSION-done + fi +} + +uninstall_glib() { + if [ ! -z "$installed_glib_version" ] ; then + echo "Uninstalling GLib:" + cd glib-$installed_glib_version + installed_glib_major_version="`expr $installed_glib_version : '\([0-9][0-9]*\).*'`" + installed_glib_minor_version="`expr $installed_glib_version : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + installed_glib_dotdot_version="`expr $installed_glib_version : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + installed_glib_major_minor_version=$installed_glib_major_version.$installed_glib_minor_version + installed_glib_major_minor_dotdot_version=$installed_glib_major_version.$installed_glib_minor_version.$installed_glib_dotdot_version + # + # GLib 2.59.1 and later use Meson+Ninja as the build system. + # + case $installed_glib_major_version in + + 1) + $DO_MAKE_UNINSTALL || exit 1 + # + # This appears to delete dependencies out from under other + # Makefiles in the tree, causing it to fail. At least until + # that gets fixed, if it ever gets fixed, we just ignore the + # exit status of "make distclean" + # + # make distclean || exit 1 + make distclean || echo "Ignoring make distclean failure" 1>&2 + ;; + + *) + case $installed_glib_minor_version in + + [0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-8]) + $DO_MAKE_UNINSTALL || exit 1 + # + # This appears to delete dependencies out from under other + # Makefiles in the tree, causing it to fail. At least until + # that gets fixed, if it ever gets fixed, we just ignore the + # exit status of "make distclean" + # + # make distclean || exit 1 + make distclean || echo "Ignoring make distclean failure" 1>&2 + ;; + + 59|[6-9][0-9]|[1-9][0-9][0-9]) + # + # 2.59.0 doesn't require Meson and Ninja, but it + # supports it, and I'm too lazy to add a dot-dot + # version check. + # + $DO_NINJA_UNINSTALL || exit 1 + # + # For Meson+Ninja, we do the build in an _build + # subdirectory, so the equivalent of "make distclean" + # is just to remove the directory tree. + # + rm -rf _build + ;; + + *) + echo "Glib's put out 1000 2.x releases?" 1>&2 + ;; + esac + esac + cd .. + rm glib-$installed_glib_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf glib-$installed_glib_version + rm -rf glib-$installed_glib_version.tar.xz + fi + + installed_glib_version="" + fi +} + +install_qt() { + if [ "$QT_VERSION" -a ! -f qt-$QT_VERSION-done ]; then + echo "Downloading and installing Qt:" + # + # What you get for this URL might just be a 302 Found reply, so use + # -L so we get redirected. + # + # 5.0 - 5.1: qt-mac-opensource-{version}-clang-offline.dmg + # 5.2.0: qt-mac-opensource-{version}.dmg + # 5.2.1: qt-opensource-mac-x64-clang-{version}.dmg + # 5.3 - 5.8: qt-opensource-mac-x64-clang-{version}.dmg + # 5.9 - 5.14: qt-opensource-mac-x64-{version}.dmg + # 5.15 - 6.0: Offline installers no longer provided. + # ( https://download.qt.io/archive/qt/5.15/5.15.0/OFFLINE_README.txt ) + # XXX: We need a different approach for QT >= 5.15. One option would be to + # install https://github.com/miurahr/aqtinstall, either permanently or into + # a temporary venv. + # + case $QT_MAJOR_VERSION in + + 1|2|3|4) + echo "Qt $QT_VERSION" is too old 1>&2 + ;; + + 5) + case $QT_MINOR_VERSION in + + 0|1|2|3|4|5|6|7|8) + echo "Qt $QT_VERSION" is too old 1>&2 + ;; + + 9|10|11|12|13|14) + QT_VOLUME=qt-opensource-mac-x64-$QT_VERSION + ;; + *) + echo "The Qt Company no longer provides open source offline installers for Qt $QT_VERSION" 1>&2 + ;; + + esac + [ -f $QT_VOLUME.dmg ] || curl -L -O https://download.qt.io/archive/qt/$QT_MAJOR_MINOR_VERSION/$QT_MAJOR_MINOR_DOTDOT_VERSION/$QT_VOLUME.dmg || exit 1 + $no_build && echo "Skipping installation" && return + sudo hdiutil attach $QT_VOLUME.dmg || exit 1 + + # + # Run the installer executable directly, so that we wait for + # it to finish. Then unmount the volume. + # + /Volumes/$QT_VOLUME/$QT_VOLUME.app/Contents/MacOS/$QT_VOLUME + sudo hdiutil detach /Volumes/$QT_VOLUME + touch qt-$QT_VERSION-done + ;; + *) + echo "The Qt Company no longer provides open source offline installers for Qt $QT_VERSION" 1>&2 + ;; + esac + fi +} + +uninstall_qt() { + if [ ! -z "$installed_qt_version" ] ; then + echo "Uninstalling Qt:" + rm -rf $HOME/Qt$installed_qt_version + rm qt-$installed_qt_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded version. + # + # 5.0 - 5.1: qt-mac-opensource-{version}-clang-offline.dmg + # 5.2.0: qt-mac-opensource-{version}.dmg + # 5.2.1: qt-opensource-mac-x64-clang-{version}.dmg + # 5.3 - 5.8: qt-opensource-mac-x64-clang-{version}.dmg + # 5.9 - 5.14: qt-opensource-mac-x64-{version}.dmg + # + installed_qt_major_version="`expr $installed_qt_version : '\([0-9][0-9]*\).*'`" + installed_qt_minor_version="`expr $installed_qt_version : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + installed_qt_dotdot_version="`expr $installed_qt_version : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + case $installed_qt_major_version in + + 1|2|3|4) + echo "Qt $installed_qt_version" is too old 1>&2 + ;; + + 5*) + case $installed_qt_minor_version in + + 0|1|2|3|4|5) + echo "Qt $installed_qt_version" is too old 1>&2 + ;; + + 6|7|8) + installed_qt_volume=qt-opensource-mac-x64-clang-$installed_qt_version.dmg + ;; + + 9|10|11|12|13|14) + installed_qt_volume=qt-opensource-mac-x64-$installed_qt_version.dmg + ;; + esac + esac + rm -f $installed_qt_volume + fi + + installed_qt_version="" + fi +} + +install_libsmi() { + if [ "$LIBSMI_VERSION" -a ! -f libsmi-$LIBSMI_VERSION-done ] ; then + echo "Downloading, building, and installing libsmi:" + [ -f libsmi-$LIBSMI_VERSION.tar.gz ] || curl -L -O https://www.ibr.cs.tu-bs.de/projects/libsmi/download/libsmi-$LIBSMI_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat libsmi-$LIBSMI_VERSION.tar.gz | tar xf - || exit 1 + cd libsmi-$LIBSMI_VERSION + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch libsmi-$LIBSMI_VERSION-done + fi +} + +uninstall_libsmi() { + if [ ! -z "$installed_libsmi_version" ] ; then + echo "Uninstalling libsmi:" + cd libsmi-$installed_libsmi_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm libsmi-$installed_libsmi_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf libsmi-$installed_libsmi_version + rm -rf libsmi-$installed_libsmi_version.tar.gz + fi + + installed_libsmi_version="" + fi +} + +install_libgpg_error() { + if [ "$LIBGPG_ERROR_VERSION" -a ! -f libgpg-error-$LIBGPG_ERROR_VERSION-done ] ; then + echo "Downloading, building, and installing libgpg-error:" + [ -f libgpg-error-$LIBGPG_ERROR_VERSION.tar.bz2 ] || curl -L -O https://www.gnupg.org/ftp/gcrypt/libgpg-error/libgpg-error-$LIBGPG_ERROR_VERSION.tar.bz2 || exit 1 + $no_build && echo "Skipping installation" && return + bzcat libgpg-error-$LIBGPG_ERROR_VERSION.tar.bz2 | tar xf - || exit 1 + cd libgpg-error-$LIBGPG_ERROR_VERSION + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch libgpg-error-$LIBGPG_ERROR_VERSION-done + fi +} + +uninstall_libgpg_error() { + if [ ! -z "$installed_libgpg_error_version" ] ; then + # + # libgcrypt depends on this, so uninstall it. + # + uninstall_libgcrypt "$@" + + echo "Uninstalling libgpg-error:" + cd libgpg-error-$installed_libgpg_error_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm libgpg-error-$installed_libgpg_error_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf libgpg-error-$installed_libgpg_error_version + rm -rf libgpg-error-$installed_libgpg_error_version.tar.bz2 + fi + + installed_libgpg_error_version="" + fi +} + +install_libgcrypt() { + if [ "$LIBGCRYPT_VERSION" -a ! -f libgcrypt-$LIBGCRYPT_VERSION-done ] ; then + # + # libgpg-error is required for libgcrypt. + # + if [ -z $LIBGPG_ERROR_VERSION ] + then + echo "libgcrypt requires libgpg-error, but you didn't install libgpg-error." 1>&2 + exit 1 + fi + + echo "Downloading, building, and installing libgcrypt:" + [ -f libgcrypt-$LIBGCRYPT_VERSION.tar.gz ] || curl -L -O https://www.gnupg.org/ftp/gcrypt/libgcrypt/libgcrypt-$LIBGCRYPT_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat libgcrypt-$LIBGCRYPT_VERSION.tar.gz | tar xf - || exit 1 + cd libgcrypt-$LIBGCRYPT_VERSION + # + # The assembler language code is not compatible with the macOS + # x86 assembler (or is it an x86-64 vs. x86-32 issue?). + # + # libgcrypt expects gnu89, not c99/gnu99, semantics for + # "inline". See, for example: + # + # https://lists.freebsd.org/pipermail/freebsd-ports-bugs/2010-October/198809.html + # + CFLAGS="$CFLAGS -std=gnu89 $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-asm || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch libgcrypt-$LIBGCRYPT_VERSION-done + fi +} + +uninstall_libgcrypt() { + if [ ! -z "$installed_libgcrypt_version" ] ; then + echo "Uninstalling libgcrypt:" + cd libgcrypt-$installed_libgcrypt_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm libgcrypt-$installed_libgcrypt_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf libgcrypt-$installed_libgcrypt_version + rm -rf libgcrypt-$installed_libgcrypt_version.tar.gz + fi + + installed_libgcrypt_version="" + fi +} + +install_gmp() { + if [ "$GMP_VERSION" -a ! -f gmp-$GMP_VERSION-done ] ; then + echo "Downloading, building, and installing GMP:" + [ -f gmp-$GMP_VERSION.tar.lz ] || curl -L -O https://gmplib.org/download/gmp/gmp-$GMP_VERSION.tar.lz || exit 1 + $no_build && echo "Skipping installation" && return + lzip -c -d gmp-$GMP_VERSION.tar.lz | tar xf - || exit 1 + cd gmp-$GMP_VERSION + # + # Create a fat binary: https://gmplib.org/manual/Notes-for-Package-Builds.html + # + # According to + # + # https://www.mail-archive.com/gmp-bugs@gmplib.org/msg01492.html + # + # and other pages, the Shiny New Linker in Xcode 15 causes this + # build to fail with "ld: branch8 out of range 384833 in + # ___gmpn_add_nc_x86_64"; linking with -ld64 is a workaround. + # + # For now, link with -ld64 on Xcode 15 and later. + # + XCODE_VERSION=`xcodebuild -version | sed -n 's;Xcode \(.*\);\1;p'` + XCODE_MAJOR_VERSION="`expr $XCODE_VERSION : '\([0-9][0-9]*\).*'`" + XCODE_MINOR_VERSION="`expr $XCODE_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + XCODE_DOTDOT_VERSION="`expr $XCODE_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + if [ "$XCODE_MAJOR_VERSION" -ge 15 ] + then + LD64_FLAG="-ld64" + else + LD64_FLAG="" + fi + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS $LD64_FLAG" ./configure --enable-fat || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch gmp-$GMP_VERSION-done + fi +} + +uninstall_gmp() { + if [ ! -z "$installed_gmp_version" ] ; then + # + # Nettle depends on this, so uninstall it. + # + uninstall_nettle "$@" + + echo "Uninstalling GMP:" + cd gmp-$installed_gmp_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm gmp-$installed_gmp_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf gmp-$installed_gmp_version + rm -rf gmp-$installed_gmp_version.tar.lz + fi + + installed_gmp_version="" + fi +} + +install_libtasn1() { + if [ "$LIBTASN1_VERSION" -a ! -f libtasn1-$LIBTASN1_VERSION-done ] ; then + echo "Downloading, building, and installing libtasn1:" + [ -f libtasn1-$LIBTASN1_VERSION.tar.gz ] || curl -L -O https://ftpmirror.gnu.org/libtasn1/libtasn1-$LIBTASN1_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat libtasn1-$LIBTASN1_VERSION.tar.gz | tar xf - || exit 1 + cd libtasn1-$LIBTASN1_VERSION + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch libtasn1-$LIBTASN1_VERSION-done + fi +} + +uninstall_libtasn1() { + if [ ! -z "$installed_libtasn1_version" ] ; then + # + # p11-kit depends on this, so uninstall it. + # + uninstall_p11_kit "$@" + + echo "Uninstalling libtasn1:" + cd libtasn1-$installed_libtasn1_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm libtasn1-$installed_libtasn1_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf libtasn1-$installed_libtasn1_version + rm -rf libtasn1-$installed_libtasn1_version.tar.gz + fi + + installed_libtasn1_version="" + fi +} + +install_p11_kit() { + if [ "$P11KIT_VERSION" -a ! -f p11-kit-$P11KIT_VERSION-done ] ; then + echo "Downloading, building, and installing p11-kit:" + [ -f p11-kit-$P11KIT_VERSION.tar.xz ] || curl -L -O https://github.com/p11-glue/p11-kit/releases/download/$P11KIT_VERSION/p11-kit-$P11KIT_VERSION.tar.xz || exit 1 + $no_build && echo "Skipping installation" && return + xzcat p11-kit-$P11KIT_VERSION.tar.xz | tar xf - || exit 1 + cd p11-kit-$P11KIT_VERSION + # + # Prior to Catalina, the libffi that's supplied with macOS + # doesn't support ffi_closure_alloc() or ffi_prep_closure_loc(), + # both of which are required by p11-kit if built with libffi. + # + # According to + # + # https://p11-glue.github.io/p11-glue/p11-kit/manual/devel-building.html + # + # libffi is used "for sharing of PKCS#11 modules between + # multiple callers in the same process. It is highly recommended + # that this dependency be treated as a required dependency.", + # but it's not clear that this matters to us, so we just + # configure p11-kit not to use libffi. + # + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -L/usr/local/lib" LIBS=-lintl ./configure --without-libffi --without-trust-paths || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch p11-kit-$P11KIT_VERSION-done + fi +} + +uninstall_p11_kit() { + if [ ! -z "$installed_p11_kit_version" ] ; then + # + # Nettle depends on this, so uninstall it. + # + uninstall_nettle "$@" + + echo "Uninstalling p11-kit:" + cd p11-kit-$installed_p11_kit_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm p11-kit-$installed_p11_kit_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf p11-kit-$installed_p11_kit_version + rm -rf p11-kit-$installed_p11_kit_version.tar.xz + fi + + installed_p11_kit_version="" + fi +} + +install_nettle() { + if [ "$NETTLE_VERSION" -a ! -f nettle-$NETTLE_VERSION-done ] ; then + echo "Downloading, building, and installing Nettle:" + [ -f nettle-$NETTLE_VERSION.tar.gz ] || curl -L -O https://ftp.gnu.org/gnu/nettle/nettle-$NETTLE_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat nettle-$NETTLE_VERSION.tar.gz | tar xf - || exit 1 + cd nettle-$NETTLE_VERSION + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -I/usr/local/include" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -L/usr/local/lib" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch nettle-$NETTLE_VERSION-done + fi +} + +uninstall_nettle() { + if [ ! -z "$installed_nettle_version" ] ; then + # + # GnuTLS depends on this, so uninstall it. + # + uninstall_gnutls "$@" + + echo "Uninstalling Nettle:" + cd nettle-$installed_nettle_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm nettle-$installed_nettle_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf nettle-$installed_nettle_version + rm -rf nettle-$installed_nettle_version.tar.gz + fi + + installed_nettle_version="" + fi +} + +install_gnutls() { + if [ "$GNUTLS_VERSION" -a ! -f gnutls-$GNUTLS_VERSION-done ] ; then + # + # GnuTLS requires Nettle. + # + if [ -z $NETTLE_VERSION ] + then + echo "GnuTLS requires Nettle, but you didn't install Nettle" 1>&2 + exit 1 + fi + + echo "Downloading, building, and installing GnuTLS:" + if [[ $GNUTLS_MAJOR_VERSION -ge 3 ]] + then + # + # Starting with GnuTLS 3.x, the tarballs are compressed with + # xz rather than bzip2. + # + [ -f gnutls-$GNUTLS_VERSION.tar.xz ] || curl -L -O https://www.gnupg.org/ftp/gcrypt/gnutls/v$GNUTLS_MAJOR_VERSION.$GNUTLS_MINOR_VERSION/gnutls-$GNUTLS_VERSION.tar.xz || exit 1 + $no_build && echo "Skipping installation" && return + xzcat gnutls-$GNUTLS_VERSION.tar.xz | tar xf - || exit 1 + else + [ -f gnutls-$GNUTLS_VERSION.tar.bz2 ] || curl -L -O https://www.gnupg.org/ftp/gcrypt/gnutls/v$GNUTLS_MAJOR_VERSION.$GNUTLS_MINOR_VERSION/gnutls-$GNUTLS_VERSION.tar.bz2 || exit 1 + $no_build && echo "Skipping installation" && return + bzcat gnutls-$GNUTLS_VERSION.tar.bz2 | tar xf - || exit 1 + fi + cd gnutls-$GNUTLS_VERSION + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -I /usr/local/include" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -I/usr/local/include/" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -L/usr/local/lib" ./configure --with-included-unistring --disable-guile || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch gnutls-$GNUTLS_VERSION-done + fi +} + +uninstall_gnutls() { + if [ ! -z "$installed_gnutls_version" ] ; then + echo "Uninstalling GnuTLS:" + cd gnutls-$installed_gnutls_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm gnutls-$installed_gnutls_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf gnutls-$installed_gnutls_version + rm -rf gnutls-$installed_gnutls_version.tar.bz2 + fi + + installed_gnutls_version="" + fi +} + +install_lua() { + if [ "$LUA_VERSION" -a ! -f lua-$LUA_VERSION-done ] ; then + echo "Downloading, building, and installing Lua:" + [ -f lua-$LUA_VERSION.tar.gz ] || curl -L -O https://www.lua.org/ftp/lua-$LUA_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat lua-$LUA_VERSION.tar.gz | tar xf - || exit 1 + cd lua-$LUA_VERSION + make MYCFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" MYLDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" $MAKE_BUILD_OPTS macosx || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch lua-$LUA_VERSION-done + fi +} + +uninstall_lua() { + if [ ! -z "$installed_lua_version" ] ; then + echo "Uninstalling Lua:" + # + # Lua has no "make uninstall", so just remove stuff manually. + # There's no configure script, so there's no need for + # "make distclean", either; just do "make clean". + # + (cd /usr/local/bin; $DO_RM -f lua luac) + (cd /usr/local/include; $DO_RM -f lua.h luaconf.h lualib.h lauxlib.h lua.hpp) + (cd /usr/local/lib; $DO_RM -f liblua.a) + (cd /usr/local/man/man1; $DO_RM -f lua.1 luac.1) + cd lua-$installed_lua_version + make clean || exit 1 + cd .. + rm lua-$installed_lua_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf lua-$installed_lua_version + rm -rf lua-$installed_lua_version.tar.gz + fi + + installed_lua_version="" + fi +} + +install_snappy() { + if [ "$SNAPPY_VERSION" -a ! -f snappy-$SNAPPY_VERSION-done ] ; then + echo "Downloading, building, and installing snappy:" + [ -f snappy-$SNAPPY_VERSION.tar.gz ] || curl -L -o snappy-$SNAPPY_VERSION.tar.gz https://github.com/google/snappy/archive/$SNAPPY_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat snappy-$SNAPPY_VERSION.tar.gz | tar xf - || exit 1 + cd snappy-$SNAPPY_VERSION + if [ "$SNAPPY_VERSION" = "1.1.10" ] ; then + # This patch corresponds to https://github.com/google/snappy/commit/27f34a580be4a3becf5f8c0cba13433f53c21337 + patch -p0 <${topdir}/macosx-support-lib-patches/snappy-signed.patch || exit 1 + fi + mkdir build_dir + cd build_dir + # + # Build a shared library, because we'll be linking libwireshark, + # which is a C library, with libsnappy, and libsnappy is a C++ + # library and requires the C++ run time; the shared library + # will carry that dependency with it, so linking with it should + # Just Work. + # + MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" $DO_CMAKE -DBUILD_SHARED_LIBS=YES -DSNAPPY_BUILD_BENCHMARKS=NO -DSNAPPY_BUILD_TESTS=NO ../ || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd ../.. + touch snappy-$SNAPPY_VERSION-done + fi +} + +uninstall_snappy() { + if [ ! -z "$installed_snappy_version" ] ; then + echo "Uninstalling snappy:" + cd snappy-$installed_snappy_version + # + # snappy uses cmake and doesn't support "make uninstall"; + # just remove what we know it installs. + # + # $DO_MAKE_UNINSTALL || exit 1 + if [ -s build_dir/install_manifest.txt ] ; then + while read -r ; do $DO_RM -v "$REPLY" ; done < <(cat build_dir/install_manifest.txt; echo) + else + $DO_RM -f /usr/local/lib/libsnappy.1.1.8.dylib \ + /usr/local/lib/libsnappy.1.dylib \ + /usr/local/lib/libsnappy.dylib \ + /usr/local/include/snappy-c.h \ + /usr/local/include/snappy-sinksource.h \ + /usr/local/include/snappy-stubs-public.h \ + /usr/local/include/snappy.h \ + /usr/local/lib/cmake/Snappy/SnappyConfig.cmake \ + /usr/local/lib/cmake/Snappy/SnappyConfigVersion.cmake \ + /usr/local/lib/cmake/Snappy/SnappyTargets-noconfig.cmake \ + /usr/local/lib/cmake/Snappy/SnappyTargets.cmake || exit 1 + fi + # + # snappy uses cmake and doesn't support "make distclean"; + #.just remove the entire build directory. + # + # make distclean || exit 1 + rm -rf build_dir || exit 1 + cd .. + rm snappy-$installed_snappy_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf snappy-$installed_snappy_version + rm -rf snappy-$installed_snappy_version.tar.gz + fi + + installed_snappy_version="" + fi +} + +install_zstd() { + if [ "$ZSTD_VERSION" -a ! -f zstd-$ZSTD_VERSION-done ] ; then + echo "Downloading, building, and installing zstd:" + [ -f zstd-$ZSTD_VERSION.tar.gz ] || curl -L -O https://github.com/facebook/zstd/releases/download/v$ZSTD_VERSION/zstd-$ZSTD_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat zstd-$ZSTD_VERSION.tar.gz | tar xf - || exit 1 + cd zstd-$ZSTD_VERSION + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch zstd-$ZSTD_VERSION-done + fi +} + +uninstall_zstd() { + if [ ! -z "$installed_zstd_version" ] ; then + echo "Uninstalling zstd:" + cd zstd-$installed_zstd_version + $DO_MAKE_UNINSTALL || exit 1 + # + # zstd has no configure script, so there's no need for + # "make distclean", and the Makefile supplied with it + # has no "make distclean" rule; just do "make clean". + # + make clean || exit 1 + cd .. + rm zstd-$installed_zstd_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf zstd-$installed_zstd_version + rm -rf zstd-$installed_zstd_version.tar.gz + fi + + installed_zstd_version="" + fi +} + +install_libxml2() { + if [ "$LIBXML2_VERSION" -a ! -f libxml2-$LIBXML2_VERSION-done ] ; then + echo "Downloading, building, and installing libxml2:" + LIBXML2_MAJOR_VERSION="`expr $LIBXML2_VERSION : '\([0-9][0-9]*\).*'`" + LIBXML2_MINOR_VERSION="`expr $LIBXML2_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + LIBXML2_MAJOR_MINOR_VERSION=$LIBXML2_MAJOR_VERSION.$LIBXML2_MINOR_VERSION + [ -f libxml2-$LIBXML2_VERSION.tar.gz ] || curl -L -O https://download.gnome.org/sources/libxml2/$LIBXML2_MAJOR_MINOR_VERSION/libxml2-$LIBXML2_VERSION.tar.xz || exit 1 + $no_build && echo "Skipping installation" && return + xzcat libxml2-$LIBXML2_VERSION.tar.xz | tar xf - || exit 1 + cd libxml2-$LIBXML2_VERSION + # + # At least on macOS 12.0.1 with Xcode 13.1, when we build + # libxml2, the linker complains that we don't have the right + # to link with the Python framework, so don't build with + # Python. + # + CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --without-python || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch libxml2-$LIBXML2_VERSION-done + fi +} + +uninstall_libxml2() { + if [ ! -z "$installed_libxml2_version" ] ; then + echo "Uninstalling libxml2:" + cd libxml2-$installed_libxml2_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm libxml2-$installed_libxml2_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf libxml2-$installed_libxml2_version + rm -rf libxml2-$installed_libxml2_version.tar.xz + fi + + installed_libxml2_version="" + fi +} + +install_lz4() { + if [ "$LZ4_VERSION" -a ! -f lz4-$LZ4_VERSION-done ] ; then + echo "Downloading, building, and installing lz4:" + # + # lz4 switched from sequentially numbered releases, named rN, + # to vX.Y.Z-numbered releases. + # + # The old sequentially-numbered releases were in tarballs + # at https://github.com/lz4/lz4/archive/rN.tar.gz, which + # extract into an lz4-rN directory. + # + # THe new vX.Y.Z-numbered releases are in tarballs at + # https://github.com/lz4/lz4/archive/vX.Y.Z.tar.gz, which + # extract into an lz4-X.Y.Z directory - no, not lz4-vX.Y.Z, + # just lz4-X.Y.Z. + # + # We expect LZ4_VERSION to be set to rN for the sequentially- + # numbered releases and X.Y.Z - not vX.Y.Z - for the vX.Y.Z- + # numbered releases. We also tell Curl to download the tarball + # with a name that corresponds to the name of the target + # directory, so that it begins with "lz4-" and ends with either + # "rN" or "X.Y.Z", to match what almost all of the other + # support libraries do. + # + if [[ "$LZ4_VERSION" == r* ]] + then + [ -f lz4-$LZ4_VERSION.tar.gz ] || curl -L -o lz4-$LZ4_VERSION.tar.gz https://github.com/lz4/lz4/archive/$LZ4_VERSION.tar.gz || exit 1 + else + [ -f lz4-$LZ4_VERSION.tar.gz ] || curl -L -o lz4-$LZ4_VERSION.tar.gz https://github.com/lz4/lz4/archive/v$LZ4_VERSION.tar.gz || exit 1 + fi + $no_build && echo "Skipping installation" && return + gzcat lz4-$LZ4_VERSION.tar.gz | tar xf - || exit 1 + cd lz4-$LZ4_VERSION + # + # No configure script here, but it appears that if MOREFLAGS is + # set, that's added to CFLAGS, and those are combined with LDFLAGS + # and CXXFLAGS into FLAGS, which is used when building source + # files and libraries. + # + MOREFLAGS="-D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch lz4-$LZ4_VERSION-done + fi +} + +uninstall_lz4() { + if [ ! -z "$installed_lz4_version" ] ; then + echo "Uninstalling lz4:" + cd lz4-$installed_lz4_version + $DO_MAKE_UNINSTALL || exit 1 + # + # lz4's Makefile doesn't support "make distclean"; just do + # "make clean". Perhaps not using autotools means that + # there's no need for "make distclean". + # + # make distclean || exit 1 + make clean || exit 1 + cd .. + rm lz4-$installed_lz4_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + # "make install" apparently causes some stuff to be + # modified in the build tree, so, as it's done as + # root, that leaves stuff owned by root in the build + # tree. Therefore, we have to remove the build tree + # as root. + # + sudo rm -rf lz4-$installed_lz4_version + rm -rf lz4-$installed_lz4_version.tar.gz + fi + + installed_lz4_version="" + fi +} + +install_sbc() { + if [ "$SBC_VERSION" -a ! -f sbc-$SBC_VERSION-done ] ; then + echo "Downloading, building, and installing sbc:" + [ -f sbc-$SBC_VERSION.tar.gz ] || curl -L -O https://www.kernel.org/pub/linux/bluetooth/sbc-$SBC_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat sbc-$SBC_VERSION.tar.gz | tar xf - || exit 1 + cd sbc-$SBC_VERSION + if [ "$DARWIN_PROCESSOR_ARCH" = "arm" ] ; then + CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS -U__ARM_NEON__" CXXFLAGS="$CXXFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-tools --disable-tester --disable-shared || exit 1 + else + CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-tools --disable-tester --disable-shared || exit 1 + fi + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch sbc-$SBC_VERSION-done + fi +} + +uninstall_sbc() { + if [ ! -z "$installed_sbc_version" ] ; then + echo "Uninstalling sbc:" + cd sbc-$installed_sbc_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm sbc-$installed_sbc_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf sbc-$installed_sbc_version + rm -rf sbc-$installed_sbc_version.tar.gz + fi + + installed_sbc_version="" + fi +} + +install_maxminddb() { + if [ "$MAXMINDDB_VERSION" -a ! -f maxminddb-$MAXMINDDB_VERSION-done ] ; then + echo "Downloading, building, and installing MaxMindDB API:" + [ -f libmaxminddb-$MAXMINDDB_VERSION.tar.gz ] || curl -L -O https://github.com/maxmind/libmaxminddb/releases/download/$MAXMINDDB_VERSION/libmaxminddb-$MAXMINDDB_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat libmaxminddb-$MAXMINDDB_VERSION.tar.gz | tar xf - || exit 1 + cd libmaxminddb-$MAXMINDDB_VERSION + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch maxminddb-$MAXMINDDB_VERSION-done + fi +} + +uninstall_maxminddb() { + if [ ! -z "$installed_maxminddb_version" ] ; then + echo "Uninstalling MaxMindDB API:" + cd libmaxminddb-$installed_maxminddb_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm maxminddb-$installed_maxminddb_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf libmaxminddb-$installed_maxminddb_version + rm -rf libmaxminddb-$installed_maxminddb_version.tar.gz + fi + + installed_maxminddb_version="" + fi +} + +install_c_ares() { + if [ "$CARES_VERSION" -a ! -f c-ares-$CARES_VERSION-done ] ; then + echo "Downloading, building, and installing C-Ares API:" + [ -f c-ares-$CARES_VERSION.tar.gz ] || curl -L -O https://c-ares.org/download/c-ares-$CARES_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat c-ares-$CARES_VERSION.tar.gz | tar xf - || exit 1 + cd c-ares-$CARES_VERSION + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch c-ares-$CARES_VERSION-done + fi +} + +uninstall_c_ares() { + if [ ! -z "$installed_cares_version" ] ; then + echo "Uninstalling C-Ares API:" + cd c-ares-$installed_cares_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm c-ares-$installed_cares_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf c-ares-$installed_cares_version + rm -rf c-ares-$installed_cares_version.tar.gz + fi + + installed_cares_version="" + fi +} + +install_libssh() { + if [ "$LIBSSH_VERSION" -a ! -f libssh-$LIBSSH_VERSION-done ] ; then + echo "Downloading, building, and installing libssh:" + LIBSSH_MAJOR_VERSION="`expr $LIBSSH_VERSION : '\([0-9][0-9]*\).*'`" + LIBSSH_MINOR_VERSION="`expr $LIBSSH_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`" + LIBSSH_MAJOR_MINOR_VERSION=$LIBSSH_MAJOR_VERSION.$LIBSSH_MINOR_VERSION + [ -f libssh-$LIBSSH_VERSION.tar.xz ] || curl -L -O https://www.libssh.org/files/$LIBSSH_MAJOR_MINOR_VERSION/libssh-$LIBSSH_VERSION.tar.xz + $no_build && echo "Skipping installation" && return + xzcat libssh-$LIBSSH_VERSION.tar.xz | tar xf - || exit 1 + cd libssh-$LIBSSH_VERSION + mkdir build + cd build + MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" $DO_CMAKE -DWITH_GCRYPT=1 ../ || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd ../.. + touch libssh-$LIBSSH_VERSION-done + fi +} + +uninstall_libssh() { + if [ ! -z "$installed_libssh_version" ] ; then + echo "Uninstalling libssh:" + cd libssh-$installed_libssh_version + # + # libssh uses cmake and doesn't support "make uninstall"; + # just remove what we know it installs. + # + # $DO_MAKE_UNINSTALL || exit 1 + $DO_RM -rf /usr/local/lib/libssh* \ + /usr/local/include/libssh \ + /usr/local/lib/pkgconfig/libssh* \ + /usr/local/lib/cmake/libssh || exit 1 + # + # libssh uses cmake and doesn't support "make distclean"; + # just remove the entire build directory. + # + # make distclean || exit 1 + rm -rf build || exit 1 + cd .. + rm libssh-$installed_libssh_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf libssh-$installed_libssh_version + rm -rf libssh-$installed_libssh_version.tar.gz + fi + + installed_libssh_version="" + fi +} + +install_nghttp2() { + if [ "$NGHTTP2_VERSION" -a ! -f nghttp2-$NGHTTP2_VERSION-done ] ; then + echo "Downloading, building, and installing nghttp2:" + [ -f nghttp2-$NGHTTP2_VERSION.tar.xz ] || curl -L -O https://github.com/nghttp2/nghttp2/releases/download/v$NGHTTP2_VERSION/nghttp2-$NGHTTP2_VERSION.tar.xz || exit 1 + $no_build && echo "Skipping installation" && return + xzcat nghttp2-$NGHTTP2_VERSION.tar.xz | tar xf - || exit 1 + cd nghttp2-$NGHTTP2_VERSION + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --enable-lib-only || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch nghttp2-$NGHTTP2_VERSION-done + fi +} + +uninstall_nghttp2() { + if [ ! -z "$installed_nghttp2_version" ] ; then + echo "Uninstalling nghttp2:" + cd nghttp2-$installed_nghttp2_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm nghttp2-$installed_nghttp2_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf nghttp2-$installed_nghttp2_version + rm -rf nghttp2-$installed_nghttp2_version.tar.xz + fi + + installed_nghttp2_version="" + fi +} + +install_nghttp3() { + if [ "$NGHTTP3_VERSION" -a ! -f nghttp3-$NGHTTP3_VERSION-done ] ; then + echo "Downloading, building, and installing nghttp3:" + [ -f nghttp3-$NGHTTP3_VERSION.tar.xz ] || curl -L -O https://github.com/ngtcp2/nghttp3/releases/download/v$NGHTTP3_VERSION/nghttp3-$NGHTTP3_VERSION.tar.xz || exit 1 + $no_build && echo "Skipping installation" && return + xzcat nghttp3-$NGHTTP3_VERSION.tar.xz | tar xf - || exit 1 + cd nghttp3-$NGHTTP3_VERSION + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --enable-lib-only || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch nghttp3-$NGHTTP3_VERSION-done + fi +} + +uninstall_nghttp3() { + if [ ! -z "$installed_nghttp3_version" ] ; then + echo "Uninstalling nghttp3:" + cd nghttp3-$installed_nghttp3_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm nghttp3-$installed_nghttp3_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf nghttp3-$installed_nghttp3_version + rm -rf nghttp3-$installed_nghttp3_version.tar.xz + fi + + installed_nghttp3_version="" + fi +} + +install_libtiff() { + if [ "$LIBTIFF_VERSION" -a ! -f tiff-$LIBTIFF_VERSION-done ] ; then + echo "Downloading, building, and installing libtiff:" + [ -f tiff-$LIBTIFF_VERSION.tar.gz ] || + curl --fail -L -O https://download.osgeo.org/libtiff/tiff-$LIBTIFF_VERSION.tar.gz || + curl --fail -L -O https://download.osgeo.org/libtiff/old/tiff-$LIBTIFF_VERSION.tar.gz || + exit 1 + $no_build && echo "Skipping installation" && return + gzcat tiff-$LIBTIFF_VERSION.tar.gz | tar xf - || exit 1 + cd tiff-$LIBTIFF_VERSION + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch tiff-$LIBTIFF_VERSION-done + fi +} + +uninstall_libtiff() { + if [ ! -z "$installed_libtiff_version" ] ; then + echo "Uninstalling libtiff:" + cd tiff-$installed_libtiff_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm tiff-$installed_libtiff_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf tiff-$installed_libtiff_version + rm -rf tiff-$installed_libtiff_version.tar.gz + fi + + installed_libtiff_version="" + fi +} + +install_spandsp() { + if [ "$SPANDSP_VERSION" -a ! -f spandsp-$SPANDSP_VERSION-done ] ; then + echo "Downloading, building, and installing SpanDSP:" + [ -f spandsp-$SPANDSP_VERSION.tar.gz ] || curl -L -O https://www.soft-switch.org/downloads/spandsp/spandsp-$SPANDSP_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat spandsp-$SPANDSP_VERSION.tar.gz | tar xf - || exit 1 + cd spandsp-$SPANDSP_VERSION + # + # Don't use -Wunused-but-set-variable, as it's not supported + # by all the gcc versions in the versions of Xcode that we + # support. + # + patch -p0 <${topdir}/macosx-support-lib-patches/spandsp-configure-patch || exit 1 + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch spandsp-$SPANDSP_VERSION-done + fi +} + +uninstall_spandsp() { + if [ ! -z "$installed_spandsp_version" ] ; then + echo "Uninstalling SpanDSP:" + cd spandsp-$installed_spandsp_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm spandsp-$installed_spandsp_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf spandsp-$installed_spandsp_version + rm -rf spandsp-$installed_spandsp_version.tar.gz + fi + + installed_spandsp_version="" + fi +} + +install_speexdsp() { + if [ "$SPEEXDSP_VERSION" -a ! -f speexdsp-$SPEEXDSP_VERSION-done ] ; then + echo "Downloading, building, and installing SpeexDSP:" + [ -f speexdsp-$SPEEXDSP_VERSION.tar.gz ] || curl -L -O https://ftp.osuosl.org/pub/xiph/releases/speex/speexdsp-$SPEEXDSP_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat speexdsp-$SPEEXDSP_VERSION.tar.gz | tar xf - || exit 1 + cd speexdsp-$SPEEXDSP_VERSION + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch speexdsp-$SPEEXDSP_VERSION-done + fi +} + +uninstall_speexdsp() { + if [ ! -z "$installed_speexdsp_version" ] ; then + echo "Uninstalling SpeexDSP:" + cd speexdsp-$installed_speexdsp_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm speexdsp-$installed_speexdsp_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf speexdsp-$installed_speexdsp_version + rm -rf speexdsp-$installed_speexdsp_version.tar.gz + fi + + installed_speexdsp_version="" + fi +} + +install_bcg729() { + if [ "$BCG729_VERSION" -a ! -f bcg729-$BCG729_VERSION-done ] ; then + echo "Downloading, building, and installing bcg729:" + [ -f bcg729-$BCG729_VERSION.tar.gz ] || curl -L -O https://gitlab.linphone.org/BC/public/bcg729/-/archive/$BCG729_VERSION/bcg729-$BCG729_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat bcg729-$BCG729_VERSION.tar.gz | tar xf - || exit 1 + cd bcg729-$BCG729_VERSION + mkdir build_dir + cd build_dir + MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" $DO_CMAKE ../ || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd ../.. + touch bcg729-$BCG729_VERSION-done + fi +} + +uninstall_bcg729() { + if [ ! -z "$installed_bcg729_version" ] ; then + echo "Uninstalling bcg729:" + cd bcg729-$installed_bcg729_version + # + # bcg729 uses cmake on macOS and doesn't support "make uninstall"; + # just remove what we know it installs. + # + # $DO_MAKE_UNINSTALL || exit 1 + $DO_RM -rf /usr/local/share/Bcg729 \ + /usr/local/lib/libbcg729* \ + /usr/local/include/bcg729 \ + /usr/local/lib/pkgconfig/libbcg729* || exit 1 + # + # bcg729 uses cmake on macOS and doesn't support "make distclean"; + # just remove the enire build directory. + # + # make distclean || exit 1 + rm -rf build_dir || exit 1 + cd .. + rm bcg729-$installed_bcg729_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf bcg729-$installed_bcg729_version + rm -rf bcg729-$installed_bcg729_version.tar.gz + fi + + installed_bcg729_version="" + fi +} + +install_ilbc() { + if [ -n "$ILBC_VERSION" ] && [ ! -f ilbc-$ILBC_VERSION-done ] ; then + echo "Downloading, building, and installing iLBC:" + [ -f libilbc-$ILBC_VERSION.tar.bz ] || curl --location --remote-name https://github.com/TimothyGu/libilbc/releases/download/v$ILBC_VERSION/libilbc-$ILBC_VERSION.tar.bz2 || exit 1 + $no_build && echo "Skipping installation" && return + bzcat libilbc-$ILBC_VERSION.tar.bz2 | tar xf - || exit 1 + cd libilbc-$ILBC_VERSION || exit 1 + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch ilbc-$ILBC_VERSION-done + fi +} + +uninstall_ilbc() { + if [ -n "$installed_ilbc_version" ] ; then + echo "Uninstalling iLBC:" + cd "libilbc-$installed_ilbc_version" || exit 1 + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm "ilbc-$installed_ilbc_version-done" + + if [ "$#" -eq 1 ] && [ "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf "libilbc-$installed_ilbc_version" + rm -rf "libilbc-$installed_ilbc_version.tar.bz2" + fi + + installed_ilbc_version="" + fi +} + +install_opus() { + if [ "$OPUS_VERSION" -a ! -f opus-$OPUS_VERSION-done ] ; then + echo "Downloading, building, and installing opus:" + [ -f opus-$OPUS_VERSION.tar.gz ] || curl -L -O https://downloads.xiph.org/releases/opus/opus-$OPUS_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat opus-$OPUS_VERSION.tar.gz | tar xf - || exit 1 + cd opus-$OPUS_VERSION + CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd .. + touch opus-$OPUS_VERSION-done + fi +} + +uninstall_opus() { + if [ ! -z "$installed_opus_version" ] ; then + echo "Uninstalling opus:" + cd opus-$installed_opus_version + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd .. + rm opus-$installed_opus_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf opus-$installed_opus_version + rm -rf opus-$installed_opus_version.tar.gz + fi + + installed_opus_version="" + fi +} + +install_python3() { + # The macos11 installer can be deployed to older versions, down to + # 10.9 (Mavericks), but is still considered experimental so continue + # to use the 64-bit installer (10.9) on earlier releases for now. + local macver=x10.9 + if [[ $DARWIN_MAJOR_VERSION -gt 19 ]]; then + # The macos11 installer is required for Arm-based Macs, which require + # macOS 11 Big Sur. Note that the package name is "11.0" (no x) for + # 3.9.1 but simply "11" for 3.9.2 (and later) + if [[ $PYTHON3_VERSION = 3.9.1 ]]; then + macver=11.0 + else + macver=11 + fi + fi + if [ "$PYTHON3_VERSION" -a ! -f python3-$PYTHON3_VERSION-done ] ; then + echo "Downloading and installing python3:" + [ -f python-$PYTHON3_VERSION-macos$macver.pkg ] || curl -L -O https://www.python.org/ftp/python/$PYTHON3_VERSION/python-$PYTHON3_VERSION-macos$macver.pkg || exit 1 + $no_build && echo "Skipping installation" && return + sudo installer -target / -pkg python-$PYTHON3_VERSION-macos$macver.pkg || exit 1 + touch python3-$PYTHON3_VERSION-done + + # + # On macOS, the pip3 installed from Python packages appears to + # install scripts /Library/Frameworks/Python.framework/Versions/M.N/bin, + # where M.N is the major and minor version of Python (the dot-dot + # release is irrelevant). + # + # Strip off any dot-dot component in $PYTHON3_VERSION. + # + python_version=`echo $PYTHON3_VERSION | sed 's/\([1-9][0-9]*\.[1-9][0-9]*\).*/\1/'` + # + # Now treat Meson as being in the directory in question. + # + MESON="/Library/Frameworks/Python.framework/Versions/$python_version/bin/meson" + else + # + # We're using the Python 3 that's in /usr/bin, the pip3 for + # which installs scripts in /usr/local/bin, so, when we + # install Meson, look for it there. + # + MESON=/usr/local/bin/meson + fi +} + +uninstall_python3() { + # Major version (e.g. "3.7") + local PYTHON_VERSION=${installed_python3_version%.*} + if [ ! -z "$installed_python3_version" ] ; then + echo "Uninstalling python3:" + frameworkdir="/Library/Frameworks/Python.framework/Versions/$PYTHON_VERSION" + sudo rm -rf "$frameworkdir" + sudo rm -rf "/Applications/Python $PYTHON_VERSION" + sudo find /usr/local/bin -maxdepth 1 -lname "*$frameworkdir/bin/*" -delete + # Remove three symlinks and empty directories. Removing directories + # might fail if for some reason multiple versions are installed. + sudo rm /Library/Frameworks/Python.framework/Headers + sudo rm /Library/Frameworks/Python.framework/Python + sudo rm /Library/Frameworks/Python.framework/Resources + sudo rmdir /Library/Frameworks/Python.framework/Versions + sudo rmdir /Library/Frameworks/Python.framework + sudo pkgutil --forget org.python.Python.PythonApplications-$PYTHON_VERSION + sudo pkgutil --forget org.python.Python.PythonDocumentation-$PYTHON_VERSION + sudo pkgutil --forget org.python.Python.PythonFramework-$PYTHON_VERSION + sudo pkgutil --forget org.python.Python.PythonUnixTools-$PYTHON_VERSION + rm python3-$installed_python3_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -f python-$installed_python3_version-macos11.pkg + rm -f python-$installed_python3_version-macos11.0.pkg + rm -f python-$installed_python3_version-macosx10.9.pkg + rm -f python-$installed_python3_version-macosx10.6.pkg + fi + + installed_python3_version="" + fi +} + +install_brotli() { + if [ "$BROTLI_VERSION" -a ! -f brotli-$BROTLI_VERSION-done ] ; then + echo "Downloading, building, and installing brotli:" + [ -f brotli-$BROTLI_VERSION.tar.gz ] || curl -L -o brotli-$BROTLI_VERSION.tar.gz https://github.com/google/brotli/archive/v$BROTLI_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat brotli-$BROTLI_VERSION.tar.gz | tar xf - || exit 1 + cd brotli-$BROTLI_VERSION + mkdir build_dir + cd build_dir + MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" $DO_CMAKE ../ || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd ../.. + touch brotli-$BROTLI_VERSION-done + fi +} + +uninstall_brotli() { + if [ ! -z "$installed_brotli_version" ] ; then + echo "Uninstalling brotli:" + cd brotli-$installed_brotli_version + # + # brotli uses cmake on macOS and doesn't support "make uninstall"; + # just remove what we know it installs. + # + # $DO_MAKE_UNINSTALL || exit 1 + $DO_RM -rf /usr/local/bin/brotli \ + /usr/local/lib/libbrotli* \ + /usr/local/include/brotli \ + /usr/local/lib/pkgconfig/libbrotli* || exit 1 + # + # brotli uses cmake on macOS and doesn't support "make distclean"; + # just remove the enire build directory. + # + # make distclean || exit 1 + rm -rf build_dir || exit 1 + cd .. + rm brotli-$installed_brotli_version-done + + if [ "$#" -eq 1 -a "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf brotli-$installed_brotli_version + rm -rf brotli-$installed_brotli_version.tar.gz + fi + + installed_brotli_version="" + fi +} + +install_minizip() { + if [ "$ZLIB_VERSION" ] && [ ! -f minizip-$ZLIB_VERSION-done ] ; then + echo "Downloading, building, and installing zlib for minizip:" + [ -f zlib-$ZLIB_VERSION.tar.gz ] || curl -L -o zlib-$ZLIB_VERSION.tar.gz https://zlib.net/zlib-$ZLIB_VERSION.tar.gz || exit 1 + $no_build && echo "Skipping installation" && return + gzcat zlib-$ZLIB_VERSION.tar.gz | tar xf - || exit 1 + # + # minizip ships both with a minimal Makefile that doesn't + # support "make install", "make uninstall", or "make distclean", + # and with a Makefile.am file that, if we do an autoreconf, + # gives us a configure script, and a Makefile.in that, if we run + # the configure script, gives us a Makefile that supports ll of + # those targets, and that installs a pkg-config .pc file for + # minizip. + # + # So that's what we do. + # + cd zlib-$ZLIB_VERSION/contrib/minizip || exit 1 + LIBTOOLIZE=glibtoolize autoreconf --force --install + CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1 + make $MAKE_BUILD_OPTS || exit 1 + $DO_MAKE_INSTALL || exit 1 + cd ../../.. + touch minizip-$ZLIB_VERSION-done + fi +} + +uninstall_minizip() { + if [ -n "$installed_minizip_version" ] ; then + echo "Uninstalling minizip:" + cd zlib-$installed_minizip_version/contrib/minizip + $DO_MAKE_UNINSTALL || exit 1 + make distclean || exit 1 + cd ../../.. + + rm minizip-$installed_minizip_version-done + + if [ "$#" -eq 1 ] && [ "$1" = "-r" ] ; then + # + # Get rid of the previously downloaded and unpacked version. + # + rm -rf zlib-$installed_minizip_version + rm -rf zlib-$installed_minizip_version.tar.gz + fi + + installed_minizip_version="" + fi +} + +install_sparkle() { + if [ "$SPARKLE_VERSION" ] && [ ! -f sparkle-$SPARKLE_VERSION-done ] ; then + echo "Downloading and installing Sparkle:" + # + # Download the tarball and unpack it in /usr/local/Sparkle-x.y.z + # + [ -f Sparkle-$SPARKLE_VERSION.tar.xz ] || curl -L -o Sparkle-$SPARKLE_VERSION.tar.xz https://github.com/sparkle-project/Sparkle/releases/download/$SPARKLE_VERSION/Sparkle-$SPARKLE_VERSION.tar.xz || exit 1 + $no_build && echo "Skipping installation" && return + test -d "/usr/local/Sparkle-$SPARKLE_VERSION" || sudo mkdir "/usr/local/Sparkle-$SPARKLE_VERSION" + sudo tar -C "/usr/local/Sparkle-$SPARKLE_VERSION" -xpof Sparkle-$SPARKLE_VERSION.tar.xz + touch sparkle-$SPARKLE_VERSION-done + fi +} + +uninstall_sparkle() { + if [ -n "$installed_sparkle_version" ]; then + echo "Uninstalling Sparkle:" + sudo rm -rf "/usr/local/Sparkle-$installed_sparkle_version" + if [ "$#" -eq 1 ] && [ "$1" = "-r" ] ; then + rm -f "Sparkle-$installed_sparkle_version.tar.xz" + fi + + installed_sparkle_version="" + fi +} + +install_all() { + # + # Check whether the versions we have installed are the versions + # requested; if not, uninstall the installed versions. + # + if [ ! -z "$installed_brotli_version" -a \ + "$installed_brotli_version" != "$BROTLI_VERSION" ] ; then + echo "Installed brotli version is $installed_brotli_version" + if [ -z "$BROTLI_VERSION" ] ; then + echo "brotli is not requested" + else + echo "Requested brotli version is $BROTLI_VERSION" + fi + uninstall_brotli -r + fi + + if [ ! -z "$installed_python3_version" -a \ + "$installed_python3_version" != "$PYTHON3_VERSION" ] ; then + echo "Installed python3 version is $installed_python3_version" + if [ -z "$PYTHON3_VERSION" ] ; then + echo "python3 is not requested" + else + echo "Requested python3 version is $PYTHON3_VERSION" + fi + uninstall_python3 -r + fi + + if [ ! -z "$installed_bcg729_version" -a \ + "$installed_bcg729_version" != "$BCG729_VERSION" ] ; then + echo "Installed bcg729 version is $installed_bcg729_version" + if [ -z "$BCG729_VERSION" ] ; then + echo "bcg729 is not requested" + else + echo "Requested bcg729 version is $BCG729_VERSION" + fi + uninstall_bcg729 -r + fi + + if [ -n "$installed_ilbc_version" ] \ + && [ "$installed_ilbc_version" != "$ILBC_VERSION" ] ; then + echo "Installed iLBC version is $installed_ilbc_version" + if [ -z "$ILBC_VERSION" ] ; then + echo "iLBC is not requested" + else + echo "Requested iLBC version is $ILBC_VERSION" + fi + uninstall_ilbc -r + fi + + if [ -n "$installed_opus_version" ] \ + && [ "$installed_opus_version" != "$OPUS_VERSION" ] ; then + echo "Installed opus version is $installed_opus_version" + if [ -z "$OPUS_VERSION" ] ; then + echo "opus is not requested" + else + echo "Requested opus version is $OPUS_VERSION" + fi + uninstall_opus -r + fi + + if [ ! -z "$installed_spandsp_version" -a \ + "$installed_spandsp_version" != "$SPANDSP_VERSION" ] ; then + echo "Installed SpanDSP version is $installed_spandsp_version" + if [ -z "$SPANDSP_VERSION" ] ; then + echo "spandsp is not requested" + else + echo "Requested SpanDSP version is $SPANDSP_VERSION" + fi + uninstall_spandsp -r + fi + + if [ ! -z "$installed_speexdsp_version" -a \ + "$installed_speexdsp_version" != "$SPEEXDSP_VERSION" ] ; then + echo "Installed SpeexDSP version is $installed_speexdsp_version" + if [ -z "$SPEEXDSP_VERSION" ] ; then + echo "speexdsp is not requested" + else + echo "Requested SpeexDSP version is $SPEEXDSP_VERSION" + fi + uninstall_speexdsp -r + fi + + if [ ! -z "$installed_libtiff_version" -a \ + "$installed_libtiff_version" != "$LIBTIFF_VERSION" ] ; then + echo "Installed libtiff version is $installed_libtiff_version" + if [ -z "$LIBTIFF_VERSION" ] ; then + echo "libtiff is not requested" + else + echo "Requested libtiff version is $LIBTIFF_VERSION" + fi + uninstall_libtiff -r + fi + + if [ ! -z "$installed_nghttp2_version" -a \ + "$installed_nghttp2_version" != "$NGHTTP2_VERSION" ] ; then + echo "Installed nghttp2 version is $installed_nghttp2_version" + if [ -z "$NGHTTP2_VERSION" ] ; then + echo "nghttp2 is not requested" + else + echo "Requested nghttp2 version is $NGHTTP2_VERSION" + fi + uninstall_nghttp2 -r + fi + + if [ ! -z "$installed_nghttp3_version" -a \ + "$installed_nghttp3_version" != "$NGHTTP3_VERSION" ] ; then + echo "Installed nghttp3 version is $installed_nghttp3_version" + if [ -z "$NGHTTP3_VERSION" ] ; then + echo "nghttp3 is not requested" + else + echo "Requested nghttp3 version is $NGHTTP3_VERSION" + fi + uninstall_nghttp3 -r + fi + + if [ ! -z "$installed_libssh_version" -a \ + "$installed_libssh_version" != "$LIBSSH_VERSION" ] ; then + echo "Installed libssh version is $installed_libssh_version" + if [ -z "$LIBSSH_VERSION" ] ; then + echo "libssh is not requested" + else + echo "Requested libssh version is $LIBSSH_VERSION" + fi + uninstall_libssh -r + fi + + if [ ! -z "$installed_cares_version" -a \ + "$installed_cares_version" != "$CARES_VERSION" ] ; then + echo "Installed C-Ares version is $installed_cares_version" + if [ -z "$CARES_VERSION" ] ; then + echo "C-Ares is not requested" + else + echo "Requested C-Ares version is $CARES_VERSION" + fi + uninstall_c_ares -r + fi + + if [ ! -z "$installed_maxminddb_version" -a \ + "$installed_maxminddb_version" != "$MAXMINDDB_VERSION" ] ; then + echo "Installed MaxMindDB API version is $installed_maxminddb_version" + if [ -z "$MAXMINDDB_VERSION" ] ; then + echo "MaxMindDB is not requested" + else + echo "Requested MaxMindDB version is $MAXMINDDB_VERSION" + fi + uninstall_maxminddb -r + fi + + if [ ! -z "$installed_sbc_version" -a \ + "$installed_sbc_version" != "$SBC_VERSION" ] ; then + echo "Installed SBC version is $installed_sbc_version" + if [ -z "$SBC_VERSION" ] ; then + echo "SBC is not requested" + else + echo "Requested SBC version is $SBC_VERSION" + fi + uninstall_sbc -r + fi + + if [ ! -z "$installed_lz4_version" -a \ + "$installed_lz4_version" != "$LZ4_VERSION" ] ; then + echo "Installed LZ4 version is $installed_lz4_version" + if [ -z "$LZ4_VERSION" ] ; then + echo "LZ4 is not requested" + else + echo "Requested LZ4 version is $LZ4_VERSION" + fi + uninstall_lz4 -r + fi + + if [ ! -z "$installed_libxml2_version" -a \ + "$installed_libxml2_version" != "$LIBXML2_VERSION" ] ; then + echo "Installed libxml2 version is $installed_libxml2_version" + if [ -z "$LIBXML2_VERSION" ] ; then + echo "libxml2 is not requested" + else + echo "Requested libxml2 version is $LIBXML2_VERSION" + fi + uninstall_libxml2 -r + fi + + if [ ! -z "$installed_snappy_version" -a \ + "$installed_snappy_version" != "$SNAPPY_VERSION" ] ; then + echo "Installed SNAPPY version is $installed_snappy_version" + if [ -z "$SNAPPY_VERSION" ] ; then + echo "SNAPPY is not requested" + else + echo "Requested SNAPPY version is $SNAPPY_VERSION" + fi + uninstall_snappy -r + fi + + if [ ! -z "$installed_lua_version" -a \ + "$installed_lua_version" != "$LUA_VERSION" ] ; then + echo "Installed Lua version is $installed_lua_version" + if [ -z "$LUA_VERSION" ] ; then + echo "Lua is not requested" + else + echo "Requested Lua version is $LUA_VERSION" + fi + uninstall_lua -r + fi + + if [ ! -z "$installed_gnutls_version" -a \ + "$installed_gnutls_version" != "$GNUTLS_VERSION" ] ; then + echo "Installed GnuTLS version is $installed_gnutls_version" + if [ -z "$GNUTLS_VERSION" ] ; then + echo "GnuTLS is not requested" + else + echo "Requested GnuTLS version is $GNUTLS_VERSION" + fi + uninstall_gnutls -r + fi + + if [ ! -z "$installed_nettle_version" -a \ + "$installed_nettle_version" != "$NETTLE_VERSION" ] ; then + echo "Installed Nettle version is $installed_nettle_version" + if [ -z "$NETTLE_VERSION" ] ; then + echo "Nettle is not requested" + else + echo "Requested Nettle version is $NETTLE_VERSION" + fi + uninstall_nettle -r + fi + + if [ ! -z "$installed_gmp_version" -a \ + "$installed_gmp_version" != "$GMP_VERSION" ] ; then + echo "Installed GMP version is $installed_gmp_version" + if [ -z "$GMP_VERSION" ] ; then + echo "GMP is not requested" + else + echo "Requested GMP version is $GMP_VERSION" + fi + uninstall_gmp -r + fi + + if [ ! -z "$installed_p11_kit_version" -a \ + "$installed_p11_kit_version" != "$P11KIT_VERSION" ] ; then + echo "Installed p11-kit version is $installed_p11_kit_version" + if [ -z "$P11KIT_VERSION" ] ; then + echo "p11-kit is not requested" + else + echo "Requested p11-kit version is $P11KIT_VERSION" + fi + uninstall_p11_kit -r + fi + + if [ ! -z "$installed_libtasn1_version" -a \ + "$installed_libtasn1_version" != "$LIBTASN1_VERSION" ] ; then + echo "Installed libtasn1 version is $installed_libtasn1_version" + if [ -z "$LIBTASN1_VERSION" ] ; then + echo "libtasn1 is not requested" + else + echo "Requested libtasn1 version is $LIBTASN1_VERSION" + fi + uninstall_libtasn1 -r + fi + + if [ ! -z "$installed_libgcrypt_version" -a \ + "$installed_libgcrypt_version" != "$LIBGCRYPT_VERSION" ] ; then + echo "Installed libgcrypt version is $installed_libgcrypt_version" + if [ -z "$LIBGCRYPT_VERSION" ] ; then + echo "libgcrypt is not requested" + else + echo "Requested libgcrypt version is $LIBGCRYPT_VERSION" + fi + uninstall_libgcrypt -r + fi + + if [ ! -z "$installed_libgpg_error_version" -a \ + "$installed_libgpg_error_version" != "$LIBGPG_ERROR_VERSION" ] ; then + echo "Installed libgpg-error version is $installed_libgpg_error_version" + if [ -z "$LIBGPG_ERROR_VERSION" ] ; then + echo "libgpg-error is not requested" + else + echo "Requested libgpg-error version is $LIBGPG_ERROR_VERSION" + fi + uninstall_libgpg_error -r + fi + + if [ ! -z "$installed_libsmi_version" -a \ + "$installed_libsmi_version" != "$LIBSMI_VERSION" ] ; then + echo "Installed libsmi version is $installed_libsmi_version" + if [ -z "$LIBSMI_VERSION" ] ; then + echo "libsmi is not requested" + else + echo "Requested libsmi version is $LIBSMI_VERSION" + fi + uninstall_libsmi -r + fi + + if [ ! -z "$installed_qt_version" -a \ + "$installed_qt_version" != "$QT_VERSION" ] ; then + echo "Installed Qt version is $installed_qt_version" + if [ -z "$QT_VERSION" ] ; then + echo "Qt is not requested" + else + echo "Requested Qt version is $QT_VERSION" + fi + uninstall_qt -r + fi + + if [ ! -z "$installed_glib_version" -a \ + "$installed_glib_version" != "$GLIB_VERSION" ] ; then + echo "Installed GLib version is $installed_glib_version" + if [ -z "$GLIB_VERSION" ] ; then + echo "GLib is not requested" + else + echo "Requested GLib version is $GLIB_VERSION" + fi + uninstall_glib -r + fi + + if [ ! -z "$installed_pkg_config_version" -a \ + "$installed_pkg_config_version" != "$PKG_CONFIG_VERSION" ] ; then + echo "Installed pkg-config version is $installed_pkg_config_version" + if [ -z "$PKG_CONFIG_VERSION" ] ; then + echo "pkg-config is not requested" + else + echo "Requested pkg-config version is $PKG_CONFIG_VERSION" + fi + uninstall_pkg_config -r + fi + + if [ ! -z "$installed_gettext_version" -a \ + "$installed_gettext_version" != "$GETTEXT_VERSION" ] ; then + echo "Installed GNU gettext version is $installed_gettext_version" + if [ -z "$GETTEXT_VERSION" ] ; then + echo "GNU gettext is not requested" + else + echo "Requested GNU gettext version is $GETTEXT_VERSION" + fi + uninstall_gettext -r + fi + + if [ ! -z "$installed_ninja_version" -a \ + "$installed_ninja_version" != "$NINJA_VERSION" ] ; then + echo "Installed Ninja version is $installed_ninja_version" + if [ -z "$NINJA_VERSION" ] ; then + echo "Ninja is not requested" + else + echo "Requested Ninja version is $NINJA_VERSION" + fi + uninstall_ninja -r + fi + + if [ ! -z "$installed_asciidoctorpdf_version" -a \ + "$installed_asciidoctorpdf_version" != "$ASCIIDOCTORPDF_VERSION" ] ; then + echo "Installed Asciidoctor-pdf version is $installed_asciidoctorpdf_version" + if [ -z "$ASCIIDOCTORPDF_VERSION" ] ; then + echo "Asciidoctor-pdf is not requested" + else + echo "Requested Asciidoctor-pdf version is $ASCIIDOCTORPDF_VERSION" + fi + # XXX - really remove this? + # Or should we remember it as installed only if this script + # installed it? + # + uninstall_asciidoctorpdf -r + fi + + if [ ! -z "$installed_asciidoctor_version" -a \ + "$installed_asciidoctor_version" != "$ASCIIDOCTOR_VERSION" ] ; then + echo "Installed Asciidoctor version is $installed_asciidoctor_version" + if [ -z "$ASCIIDOCTOR_VERSION" ] ; then + echo "Asciidoctor is not requested" + else + echo "Requested Asciidoctor version is $ASCIIDOCTOR_VERSION" + fi + # XXX - really remove this? + # Or should we remember it as installed only if this script + # installed it? + # + uninstall_asciidoctor -r + fi + + if [ ! -z "$installed_cmake_version" -a \ + "$installed_cmake_version" != "$CMAKE_VERSION" ] ; then + echo "Installed CMake version is $installed_cmake_version" + if [ -z "$CMAKE_VERSION" ] ; then + echo "CMake is not requested" + else + echo "Requested CMake version is $CMAKE_VERSION" + fi + uninstall_cmake -r + fi + + if [ ! -z "$installed_libtool_version" -a \ + "$installed_libtool_version" != "$LIBTOOL_VERSION" ] ; then + echo "Installed GNU libtool version is $installed_libtool_version" + if [ -z "$LIBTOOL_VERSION" ] ; then + echo "GNU libtool is not requested" + else + echo "Requested GNU libtool version is $LIBTOOL_VERSION" + fi + uninstall_libtool -r + fi + + if [ ! -z "$installed_automake_version" -a \ + "$installed_automake_version" != "$AUTOMAKE_VERSION" ] ; then + echo "Installed GNU automake version is $installed_automake_version" + if [ -z "$AUTOMAKE_VERSION" ] ; then + echo "GNU automake is not requested" + else + echo "Requested GNU automake version is $AUTOMAKE_VERSION" + fi + uninstall_automake -r + fi + + if [ ! -z "$installed_autoconf_version" -a \ + "$installed_autoconf_version" != "$AUTOCONF_VERSION" ] ; then + echo "Installed GNU autoconf version is $installed_autoconf_version" + if [ -z "$AUTOCONF_VERSION" ] ; then + echo "GNU autoconf is not requested" + else + echo "Requested GNU autoconf version is $AUTOCONF_VERSION" + fi + uninstall_autoconf -r + fi + + if [ ! -z "$installed_pcre_version" -a \ + "$installed_pcre_version" != "$PCRE_VERSION" ] ; then + echo "Installed pcre version is $installed_pcre_version" + if [ -z "$PCRE_VERSION" ] ; then + echo "pcre is not requested" + else + echo "Requested pcre version is $PCRE_VERSION" + fi + uninstall_pcre -r + fi + + if [ -n "$installed_pcre2_version" -a \ + "$installed_pcre2_version" != "$PCRE2_VERSION" ] ; then + echo "Installed pcre2 version is $installed_pcre2_version" + if [ -z "$PCRE2_VERSION" ] ; then + echo "pcre2 is not requested" + else + echo "Requested pcre2 version is $PCRE2_VERSION" + fi + uninstall_pcre2 -r + fi + + if [ ! -z "$installed_lzip_version" -a \ + "$installed_lzip_version" != "$LZIP_VERSION" ] ; then + echo "Installed lzip version is $installed_lzip_version" + if [ -z "$LZIP_VERSION" ] ; then + echo "lzip is not requested" + else + echo "Requested lzip version is $LZIP_VERSION" + fi + uninstall_lzip -r + fi + + if [ ! -z "$installed_xz_version" -a \ + "$installed_xz_version" != "$XZ_VERSION" ] ; then + echo "Installed xz version is $installed_xz_version" + if [ -z "$XZ_VERSION" ] ; then + echo "xz is not requested" + else + echo "Requested xz version is $XZ_VERSION" + fi + uninstall_xz -r + fi + + if [ ! -z "$installed_curl_version" -a \ + "$installed_curl_version" != "$CURL_VERSION" ] ; then + echo "Installed curl version is $installed_curl_version" + if [ -z "$CURL_VERSION" ] ; then + echo "curl is not requested" + else + echo "Requested curl version is $CURL_VERSION" + fi + uninstall_curl -r + fi + + if [ ! -z "$installed_minizip_version" -a \ + "$installed_minizip_version" != "$ZLIB_VERSION" ] ; then + echo "Installed minizip (zlib) version is $installed_minizip_version" + if [ -z "$ZLIB_VERSION" ] ; then + echo "minizip is not requested" + else + echo "Requested minizip (zlib) version is $ZLIB_VERSION" + fi + uninstall_minizip -r + fi + + if [ ! -z "$installed_sparkle_version" -a \ + "$installed_sparkle_version" != "$SPARKLE_VERSION" ] ; then + echo "Installed Sparkle version is $installed_sparkle_version" + if [ -z "$SPARKLE_VERSION" ] ; then + echo "Sparkle is not requested" + else + echo "Requested Sparkle version is $SPARKLE_VERSION" + fi + uninstall_sparkle -r + fi + + # + # Start with curl: we may need it to download and install xz. + # + install_curl + + # + # Now intall xz: it is the sole download format of glib later than 2.31.2. + # + install_xz + + install_lzip + + install_pcre + + install_autoconf + + install_automake + + install_libtool + + install_cmake + + install_pcre2 + + # + # Install Python 3 now; not only is it needed for the Wireshark + # build process, it's also needed for the Meson build system, + # which newer versions of GLib use as their build system. + # + install_python3 + + # + # Now install Meson and pytest. + # + install_meson + + install_pytest + + install_ninja + + install_asciidoctor + + install_asciidoctorpdf + + # + # Start with GNU gettext; GLib requires it, and macOS doesn't have it + # or a BSD-licensed replacement. + # + # At least on Lion with Xcode 4, _FORTIFY_SOURCE gets defined as 2 + # by default, which causes, for example, stpncpy to be defined as + # a hairy macro that collides with the GNU gettext configure script's + # attempts to workaround AIX's lack of a declaration for stpncpy, + # with the result being a huge train wreck. Define _FORTIFY_SOURCE + # as 0 in an attempt to keep the trains on separate tracks. + # + install_gettext + + # + # GLib depends on pkg-config. + # By default, pkg-config depends on GLib; we break the dependency cycle + # by configuring pkg-config to use its own internal version of GLib. + # + install_pkg_config + + install_glib + + # + # Now we have reached a point where we can build everything but + # the GUI (Wireshark). + # + install_qt + + # + # Now we have reached a point where we can build everything including + # the GUI (Wireshark), but not with any optional features such as + # SNMP OID resolution, some forms of decryption, Lua scripting, playback + # of audio, or MaxMindDB mapping of IP addresses. + # + # We now conditionally download optional libraries to support them; + # the default is to download them all. + # + + install_libsmi + + install_libgpg_error + + install_libgcrypt + + install_gmp + + install_libtasn1 + + install_p11_kit + + install_nettle + + install_gnutls + + install_lua + + install_snappy + + install_zstd + + install_libxml2 + + install_lz4 + + install_sbc + + install_maxminddb + + install_c_ares + + install_libssh + + install_nghttp2 + + install_nghttp3 + + install_libtiff + + install_spandsp + + install_speexdsp + + install_bcg729 + + install_ilbc + + install_opus + + install_brotli + + install_minizip + + install_sparkle +} + +uninstall_all() { + if [ -d "${MACOSX_SUPPORT_LIBS}" ] + then + cd "${MACOSX_SUPPORT_LIBS}" + + # + # Uninstall items in the reverse order from the order in which they're + # installed. Only uninstall if the download/build/install process + # completed; uninstall the version that appears in the name of + # the -done file. + # + # We also do a "make distclean", so that we don't have leftovers from + # old configurations. + # + uninstall_sparkle + + uninstall_minizip + + uninstall_brotli + + uninstall_opus + + uninstall_ilbc + + uninstall_bcg729 + + uninstall_speexdsp + + uninstall_spandsp + + uninstall_libtiff + + uninstall_nghttp2 + + uninstall_nghttp3 + + uninstall_libssh + + uninstall_c_ares + + uninstall_maxminddb + + uninstall_snappy + + uninstall_zstd + + uninstall_libxml2 + + uninstall_lz4 + + uninstall_sbc + + uninstall_lua + + uninstall_gnutls + + uninstall_nettle + + uninstall_p11_kit + + uninstall_libtasn1 + + uninstall_gmp + + uninstall_libgcrypt + + uninstall_libgpg_error + + uninstall_libsmi + + uninstall_qt + + uninstall_glib + + uninstall_pkg_config + + uninstall_gettext + + uninstall_ninja + + # + # XXX - really remove this? + # Or should we remember it as installed only if this script + # installed it? + # + uninstall_asciidoctorpdf + + uninstall_asciidoctor + + uninstall_pytest + + uninstall_meson + + uninstall_python3 + + uninstall_cmake + + uninstall_libtool + + uninstall_automake + + uninstall_autoconf + + uninstall_pcre + + uninstall_lzip + + uninstall_xz + + uninstall_curl + fi +} + +# +# Do we have permission to write in /usr/local? +# +# If so, assume we have permission to write in its subdirectories. +# (If that's not the case, this test needs to check the subdirectories +# as well.) +# +# If not, do "make install", "make uninstall", "ninja install", +# "ninja uninstall", the removes for dependencies that don't support +# "make uninstall" or "ninja uninstall", the renames of [g]libtool*, +# and the writing of a libffi .pc file with sudo. +# +if [ -w /usr/local ] +then + DO_MAKE_INSTALL="make install" + DO_MAKE_UNINSTALL="make uninstall" + DO_NINJA_INSTALL="ninja -C _build install" + DO_NINJA_UNINSTALL="ninja -C _build uninstall" + DO_TEE_TO_PC_FILE="tee" + DO_RM="rm" + DO_MV="mv" +else + DO_MAKE_INSTALL="sudo make install" + DO_MAKE_UNINSTALL="sudo make uninstall" + DO_NINJA_INSTALL="sudo ninja -C _build install" + DO_NINJA_UNINSTALL="sudo ninja -C _build uninstall" + DO_TEE_TO_PC_FILE="sudo tee" + DO_RM="sudo rm" + DO_MV="sudo mv" +fi + +# +# When building with CMake, don't build libraries with an install path +# that begins with @rpath because that will cause binaries linked with it +# to use that path as the library to look for, and that will cause the +# run-time linker, at least on macOS 14 and later, not to find the library +# in /usr/local/lib unless you explicitly set DYLD_LIBRARY_PATH to include +# /usr/local/lib. That means that you get "didn't find libpcre" errors if +# you try to run binaries from a build unless you set DYLD_LIBRARYPATH to +# include /usr/local/lib. +# +# However, setting CMAKE_MACOSX_RPATH to OFF causes the installed +# library just to have the file name of the library as its install +# name. It needs to be the full installed path of the library in +# order to make running binaries from the build directory work, so +# we set CMAKE_INSTALL_NAME_DIR to /usr/local/lib. +# +# packaging/macosx/osx-app.sh will convert *all* libraries in +# the app bundle to have an @rpath install name, so this won't +# break anything there; it just fixes the ability to run from the +# build directory. +# +DO_CMAKE="cmake -DCMAKE_MACOSX_RPATH=OFF -DCMAKE_INSTALL_NAME_DIR=/usr/local/lib" + +# This script is meant to be run in the source root. The following +# code will attempt to get you there, but is not perfect (particulary +# if someone copies the script). + +topdir=`pwd`/`dirname $0`/.. +cd $topdir + +# Preference of the support libraries directory: +# ${MACOSX_SUPPORT_LIBS} +# ../macosx-support-libs +# ./macosx-support-libs (default if none exists) +if [ ! -d "${MACOSX_SUPPORT_LIBS}" ]; then + unset MACOSX_SUPPORT_LIBS +fi +if [ -d ../macosx-support-libs ]; then + MACOSX_SUPPORT_LIBS=${MACOSX_SUPPORT_LIBS-../macosx-support-libs} +else + MACOSX_SUPPORT_LIBS=${MACOSX_SUPPORT_LIBS-./macosx-support-libs} +fi + +# +# If we have SDKs available, the default target OS is the major version +# of the one we're running; get that and strip off the third component +# if present. +# +for i in /Developer/SDKs \ + /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs \ + /Library/Developer/CommandLineTools/SDKs +do + if [ -d "$i" ] + then + min_osx_target=`sw_vers -productVersion | sed 's/\([0-9]*\)\.\([0-9]*\)\.[0-9]*/\1.\2/'` + break + fi +done + +# +# Parse command-line flags: +# +# -h - print help. +# -t - build libraries so that they'll work on the specified +# version of macOS and later versions. +# -u - do an uninstall. +# -n - download all packages, but don't build or install. +# + +no_build=false + +while getopts ht:un name +do + case $name in + u) + do_uninstall=yes + ;; + n) + no_build=true + ;; + t) + min_osx_target="$OPTARG" + ;; + h|?) + echo "Usage: macos-setup.sh [ -t ] [ -u ] [ -n ]" 1>&1 + exit 0 + ;; + esac +done + +# +# Get the version numbers of installed packages, if any. +# +if [ -d "${MACOSX_SUPPORT_LIBS}" ] +then + cd "${MACOSX_SUPPORT_LIBS}" + + installed_xz_version=`ls xz-*-done 2>/dev/null | sed 's/xz-\(.*\)-done/\1/'` + installed_lzip_version=`ls lzip-*-done 2>/dev/null | sed 's/lzip-\(.*\)-done/\1/'` + installed_pcre_version=`ls pcre-*-done 2>/dev/null | sed 's/pcre-\(.*\)-done/\1/'` + installed_pcre2_version=$(ls pcre2-*-done 2>/dev/null | sed 's/pcre2-\(.*\)-done/\1/') + installed_autoconf_version=`ls autoconf-*-done 2>/dev/null | sed 's/autoconf-\(.*\)-done/\1/'` + installed_automake_version=`ls automake-*-done 2>/dev/null | sed 's/automake-\(.*\)-done/\1/'` + installed_libtool_version=`ls libtool-*-done 2>/dev/null | sed 's/libtool-\(.*\)-done/\1/'` + installed_cmake_version=`ls cmake-*-done 2>/dev/null | sed 's/cmake-\(.*\)-done/\1/'` + installed_ninja_version=`ls ninja-*-done 2>/dev/null | sed 's/ninja-\(.*\)-done/\1/'` + installed_asciidoctor_version=`ls asciidoctor-*-done 2>/dev/null | sed 's/asciidoctor-\(.*\)-done/\1/'` + installed_asciidoctorpdf_version=`ls asciidoctorpdf-*-done 2>/dev/null | sed 's/asciidoctorpdf-\(.*\)-done/\1/'` + installed_gettext_version=`ls gettext-*-done 2>/dev/null | sed 's/gettext-\(.*\)-done/\1/'` + installed_pkg_config_version=`ls pkg-config-*-done 2>/dev/null | sed 's/pkg-config-\(.*\)-done/\1/'` + installed_glib_version=`ls glib-*-done 2>/dev/null | sed 's/glib-\(.*\)-done/\1/'` + installed_qt_version=`ls qt-*-done 2>/dev/null | sed 's/qt-\(.*\)-done/\1/'` + installed_libsmi_version=`ls libsmi-*-done 2>/dev/null | sed 's/libsmi-\(.*\)-done/\1/'` + installed_libgpg_error_version=`ls libgpg-error-*-done 2>/dev/null | sed 's/libgpg-error-\(.*\)-done/\1/'` + installed_libgcrypt_version=`ls libgcrypt-*-done 2>/dev/null | sed 's/libgcrypt-\(.*\)-done/\1/'` + installed_gmp_version=`ls gmp-*-done 2>/dev/null | sed 's/gmp-\(.*\)-done/\1/'` + installed_libtasn1_version=`ls libtasn1-*-done 2>/dev/null | sed 's/libtasn1-\(.*\)-done/\1/'` + installed_p11_kit_version=`ls p11-kit-*-done 2>/dev/null | sed 's/p11-kit-\(.*\)-done/\1/'` + installed_nettle_version=`ls nettle-*-done 2>/dev/null | sed 's/nettle-\(.*\)-done/\1/'` + installed_gnutls_version=`ls gnutls-*-done 2>/dev/null | sed 's/gnutls-\(.*\)-done/\1/'` + installed_lua_version=`ls lua-*-done 2>/dev/null | sed 's/lua-\(.*\)-done/\1/'` + installed_snappy_version=`ls snappy-*-done 2>/dev/null | sed 's/snappy-\(.*\)-done/\1/'` + installed_zstd_version=`ls zstd-*-done 2>/dev/null | sed 's/zstd-\(.*\)-done/\1/'` + installed_libxml2_version=`ls libxml2-*-done 2>/dev/null | sed 's/libxml2-\(.*\)-done/\1/'` + installed_lz4_version=`ls lz4-*-done 2>/dev/null | sed 's/lz4-\(.*\)-done/\1/'` + installed_sbc_version=`ls sbc-*-done 2>/dev/null | sed 's/sbc-\(.*\)-done/\1/'` + installed_maxminddb_version=`ls maxminddb-*-done 2>/dev/null | sed 's/maxminddb-\(.*\)-done/\1/'` + installed_cares_version=`ls c-ares-*-done 2>/dev/null | sed 's/c-ares-\(.*\)-done/\1/'` + installed_libssh_version=`ls libssh-*-done 2>/dev/null | sed 's/libssh-\(.*\)-done/\1/'` + installed_nghttp2_version=`ls nghttp2-*-done 2>/dev/null | sed 's/nghttp2-\(.*\)-done/\1/'` + installed_nghttp3_version=`ls nghttp3-*-done 2>/dev/null | sed 's/nghttp3-\(.*\)-done/\1/'` + installed_libtiff_version=`ls tiff-*-done 2>/dev/null | sed 's/tiff-\(.*\)-done/\1/'` + installed_spandsp_version=`ls spandsp-*-done 2>/dev/null | sed 's/spandsp-\(.*\)-done/\1/'` + installed_speexdsp_version=`ls speexdsp-*-done 2>/dev/null | sed 's/speexdsp-\(.*\)-done/\1/'` + installed_bcg729_version=`ls bcg729-*-done 2>/dev/null | sed 's/bcg729-\(.*\)-done/\1/'` + installed_ilbc_version=`ls ilbc-*-done 2>/dev/null | sed 's/ilbc-\(.*\)-done/\1/'` + installed_opus_version=`ls opus-*-done 2>/dev/null | sed 's/opus-\(.*\)-done/\1/'` + installed_python3_version=`ls python3-*-done 2>/dev/null | sed 's/python3-\(.*\)-done/\1/'` + installed_brotli_version=`ls brotli-*-done 2>/dev/null | sed 's/brotli-\(.*\)-done/\1/'` + installed_minizip_version=`ls minizip-*-done 2>/dev/null | sed 's/minizip-\(.*\)-done/\1/'` + installed_sparkle_version=`ls sparkle-*-done 2>/dev/null | sed 's/sparkle-\(.*\)-done/\1/'` + + cd $topdir +fi + +if [ "$do_uninstall" = "yes" ] +then + uninstall_all + exit 0 +fi + +# +# Configure scripts tend to set CFLAGS and CXXFLAGS to "-g -O2" if +# invoked without CFLAGS or CXXFLAGS being set in the environment. +# +# However, we *are* setting them in the environment, for our own +# nefarious purposes, so start them out as "-g -O2". +# +CFLAGS="-g -O2" +CXXFLAGS="-g -O2" + +# if no make options are present, set default options +if [ -z "$MAKE_BUILD_OPTS" ] ; then + # by default use 1.5x number of cores for parallel build + MAKE_BUILD_OPTS="-j $(( $(sysctl -n hw.logicalcpu) * 3 / 2))" +fi + +# +# If we have a target release, look for the oldest SDK that's for an +# OS equal to or later than that one, and build libraries against it +# rather than against the headers and, more importantly, libraries +# that come with the OS, so that we don't end up with support libraries +# that only work on the OS version on which we built them, not earlier +# versions of the same release, or earlier releases if the minimum is +# earlier. +# +if [ ! -z "$min_osx_target" ] +then + # + # Get the major and minor version of the target release. + # We assume it'll be a while before there's a macOS 100. :-) + # + case "$min_osx_target" in + + [1-9][0-9].*) + # + # major.minor. + # + min_osx_target_major=`echo "$min_osx_target" | sed -n 's/\([1-9][0-9]*\)\..*/\1/p'` + min_osx_target_minor=`echo "$min_osx_target" | sed -n 's/[1-9][0-9]*\.\(.*\)/\1/p'` + ;; + + [1-9][0-9]) + # + # Just a major version number was specified; make the minor + # version 0. + # + min_osx_target_major="$min_osx_target" + min_osx_target_minor=0 + ;; + + *) + echo "macosx-setup.sh: Invalid target release $min_osx_target" 1>&2 + exit 1 + ;; + esac + + # + # Search each directory that might contain SDKs. + # + sdkpath="" + for sdksdir in /Developer/SDKs \ + /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs \ + /Library/Developer/CommandLineTools/SDKs + do + # + # Get a list of all the SDKs. + # + if ! test -d "$sdksdir" + then + # + # There is no directory with that name. + # Move on to the next one in the list, if any. + # + continue + fi + + # + # Get a list of all the SDKs in that directory, if any. + # We assume it'll be a while before there's a macOS 100. :-) + # + sdklist=`(cd "$sdksdir"; ls -d MacOSX[1-9][0-9].[0-9]*.sdk 2>/dev/null)` + + for sdk in $sdklist + do + # + # Get the major and minor version for this SDK. + # + sdk_major=`echo "$sdk" | sed -n 's/MacOSX\([1-9][0-9]*\)\..*\.sdk/\1/p'` + sdk_minor=`echo "$sdk" | sed -n 's/MacOSX[1-9][0-9]*\.\(.*\)\.sdk/\1/p'` + + # + # Is it for the deployment target or some later release? + # Starting with major 11, the minor version no longer matters. + # + if test "$sdk_major" -gt "$min_osx_target_major" -o \ + \( "$sdk_major" -eq "$min_osx_target_major" -a \ + \( "$sdk_major" -ge 11 -o \ + "$sdk_minor" -ge "$min_osx_target_minor" \) \) + then + # + # Yes, use it. + # + sdkpath="$sdksdir/$sdk" + break 2 + fi + done + done + + if [ -z "$sdkpath" ] + then + echo "macos-setup.sh: Couldn't find an SDK for macOS $min_osx_target or later" 1>&2 + exit 1 + fi + + SDKPATH="$sdkpath" + echo "Using the $sdk_major.$sdk_minor SDK" + + # + # Make sure there are links to /usr/local/include and /usr/local/lib + # in the SDK's usr/local. + # + if [ ! -e $SDKPATH/usr/local/include ] + then + if [ ! -d $SDKPATH/usr/local ] + then + sudo mkdir $SDKPATH/usr/local + fi + sudo ln -s /usr/local/include $SDKPATH/usr/local/include + fi + if [ ! -e $SDKPATH/usr/local/lib ] + then + if [ ! -d $SDKPATH/usr/local ] + then + sudo mkdir $SDKPATH/usr/local + fi + sudo ln -s /usr/local/lib $SDKPATH/usr/local/lib + fi + + # + # Set the minimum OS version for which to build to the specified + # minimum target OS version, so we don't, for example, end up using + # linker features supported by the OS verson on which we're building + # but not by the target version. + # + VERSION_MIN_FLAGS="-mmacosx-version-min=$min_osx_target" + + # + # Compile and link against the SDK. + # + SDKFLAGS="-isysroot $SDKPATH" + +fi + +export CFLAGS +export CXXFLAGS + +# +# You need Xcode or the command-line tools installed to get the compilers (xcrun checks both). +# + if [ ! -x /usr/bin/xcrun ]; then + echo "Please install Xcode (app or command line) first (should be available on DVD or from the Mac App Store)." + exit 1 +fi + +if [ "$QT_VERSION" ]; then + # + # We need Xcode, not just the command-line tools, installed to build + # Qt. + # + # At least with Xcode 8, /usr/bin/xcodebuild --help fails if only + # the command-line tools are installed and succeeds if Xcode is + # installed. Unfortunately, it fails *with* Xcode 3, but + # /usr/bin/xcodebuild -version works with that and with Xcode 8. + # Hopefully it fails with only the command-line tools installed. + # + if /usr/bin/xcodebuild -version >/dev/null 2>&1; then + : + elif qmake --version >/dev/null 2>&1; then + : + else + echo "Please install Xcode first (should be available on DVD or from the Mac App Store)." + echo "The command-line build tools are not sufficient to build Qt." + echo "Alternatively build QT according to: https://gist.github.com/shoogle/750a330c851bd1a924dfe1346b0b4a08#:~:text=MacOS%2FQt%5C%20Creator-,Go%20to%20Qt%20Creator%20%3E%20Preferences%20%3E%20Build%20%26%20Run%20%3E%20Kits,for%20both%20compilers%2C%20not%20gcc%20." + exit 1 + fi +fi + +export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig + +# +# Do all the downloads and untarring in a subdirectory, so all that +# stuff can be removed once we've installed the support libraries. + +if [ ! -d "${MACOSX_SUPPORT_LIBS}" ] +then + mkdir "${MACOSX_SUPPORT_LIBS}" || exit 1 +fi +cd "${MACOSX_SUPPORT_LIBS}" + +install_all + +echo "" + +# +# Indicate what paths to use for pkg-config and cmake. +# +pkg_config_path=/usr/local/lib/pkgconfig +if [ "$QT_VERSION" ]; then + qt_base_path=$HOME/Qt$QT_VERSION/$QT_VERSION/clang_64 + pkg_config_path="$pkg_config_path":"$qt_base_path/lib/pkgconfig" + CMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH":"$qt_base_path/lib/cmake" +fi + +if $no_build; then + echo "All required dependencies downloaded. Run without -n to install them." + exit 0 +fi + +if [ "$QT_VERSION" ]; then + if [ -f qt-$QT_VERSION-done ]; then + echo "You are now prepared to build Wireshark." + else + echo "Qt was not installed; you will have to install it in order to build the" + echo "Wireshark application, but you can build all the command-line tools in" + echo "the Wireshark distribution." + echo "" + echo "See section 2.1.1. \"Build environment setup\" of the Wireshark Developer's" + echo "Guide for instructions on how to install Qt." + fi +else + echo "You did not install Qt; you will have to install it in order to build" + echo "the Wireshark application, but you can build all the command-line tools in" + echo "the Wireshark distribution." +fi +echo +echo "To build:" +echo +echo "export PKG_CONFIG_PATH=$pkg_config_path" +echo "export CMAKE_PREFIX_PATH=$CMAKE_PREFIX_PATH" +echo "export PATH=$PATH:$qt_base_path/bin" +echo +echo "mkdir build; cd build" +if [ ! -z "$NINJA_VERSION" ]; then + echo "cmake -G Ninja .." + echo "ninja wireshark_app_bundle logray_app_bundle # (Modify as needed)" + echo "ninja install/strip" +else + echo "cmake .." + echo "make $MAKE_BUILD_OPTS wireshark_app_bundle logray_app_bundle # (Modify as needed)" + echo "make install/strip" +fi +echo +echo "Make sure you are allowed capture access to the network devices" +echo "See: https://gitlab.com/wireshark/wireshark/-/wikis/CaptureSetup/CapturePrivileges" +echo + +exit 0 diff --git a/tools/make-authors-csv.py b/tools/make-authors-csv.py new file mode 100755 index 0000000..7652803 --- /dev/null +++ b/tools/make-authors-csv.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# +# Generate the authors.csv file. +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +'''\ +Remove tasks from individual author entries from the AUTHORS file +for use in the "About" dialog. +''' + +import io +import re +import sys + + +def remove_tasks(stdinu8): + in_subinfo = False + all_lines = [] + + # Assume the first line is blank and skip it. make-authors-short.pl + # skipped over the UTF-8 BOM as well. Do we need to do that here? + + stdinu8.readline() + + for line in stdinu8: + + sub_m = re.search(r'(.*?)\s*\{', line) + if sub_m: + in_subinfo = True + all_lines.append(sub_m.group(1)) + elif '}' in line: + in_subinfo = False + nextline = next(stdinu8) + if not re.match(r'^\s*$', nextline): + # if '{' in nextline: + # stderru8.write("No blank line after '}', found " + nextline) + all_lines.append(nextline) + elif in_subinfo: + continue + else: + all_lines.append(line) + return all_lines + + +def main(): + stdinu8 = io.TextIOWrapper(sys.stdin.buffer, encoding='utf8') + stdoutu8 = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8') + stderru8 = io.TextIOWrapper(sys.stderr.buffer, encoding='utf8') + + lines = remove_tasks(stdinu8) + patt = re.compile("(.*)[<(]([\\s'a-zA-Z0-9._%+-]+(\\[[Aa][Tt]\\])?[a-zA-Z0-9._%+-]+)[>)]") + + for line in lines: + match = patt.match(line) + if match: + name = match.group(1).strip() + mail = match.group(2).strip().replace("[AT]", "@") + stdoutu8.write("{},{}\n".format(name, mail)) + + +if __name__ == '__main__': + main() diff --git a/tools/make-enterprises.py b/tools/make-enterprises.py new file mode 100755 index 0000000..1b2b2d0 --- /dev/null +++ b/tools/make-enterprises.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 +# create the enterprises.c file from +# https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers +# or an offline copy +# +# Copyright 2022 by Moshe Kaplan +# Based on make-sminmpec.pl by Gerald Combs +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 2004 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import argparse +import re +import urllib.request + + +ENTERPRISES_CFILE = os.path.join('epan', 'enterprises.c') + +ENTERPRISE_NUMBERS_URL = "https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers" + +DECIMAL_PATTERN = r"^(\d+)" +# up to three spaces because of formatting errors in the source +ORGANIZATION_PATTERN = r"^ ?(\S.*)" +FORMERLY_PATTERN = r" \(((formerly|previously) .*)\)" + + +LOOKUP_FUNCTION = r""" +const char* global_enterprises_lookup(uint32_t value) +{ + if (value > table.max_idx) { + return NULL; + } + else return table.values[value]; +} +""" + +DUMP_FUNCTION = r""" +void global_enterprises_dump(FILE *fp) +{ + for (size_t idx = 0; idx <= table.max_idx; idx++) { + if (table.values[idx] != NULL) { + fprintf(fp, "%zu\t%s\n", idx, table.values[idx]); + } + } +} +""" + +# This intermediate format is no longer written to a file - returned as string +def generate_enterprise_entries(file_content): + # We only care about the "Decimal" and "Organization", + # not the contact or email + org_lines = [] + last_updated = "" + end_seen = False + for line in file_content.splitlines(): + decimal_match = re.match(DECIMAL_PATTERN, line) + if decimal_match: + decimal = decimal_match.group(0) + elif re.match(ORGANIZATION_PATTERN, line): + organization = line.strip() + if organization.lower() == "unassigned": + continue + organization = re.sub(FORMERLY_PATTERN, r"\t# \1", organization) + org_lines += [decimal + "\t" + organization] + elif "last updated" in line.lower(): + last_updated = line + elif "end of document" in line.lower(): + end_seen = True + + if not end_seen: + raise Exception('"End of Document" not found. Truncated source file?') + + last_updated_line = "/* " + last_updated + " */\n\n" + output = "\n".join(org_lines) + "\n" + return (output,last_updated_line) + +class CFile: + def __init__(self, filename, last_updated_line): + self.filename = filename + self.f = open(filename, 'w') + self.mappings = {} + self.highest_num = 0 + + # Write file header + self.f.write('/* ' + os.path.basename(self.filename) + '\n') + self.f.write(' *\n') + self.f.write(' * Wireshark - Network traffic analyzer\n') + self.f.write(' * By Gerald Combs \n') + self.f.write(' * Copyright 1998 Gerald Combs\n') + self.f.write(' *\n') + self.f.write(' * Do not edit - this file is automatically generated\n') + self.f.write(' * SPDX-License-Identifier: GPL-2.0-or-later\n') + self.f.write(' */\n\n') + self.f.write(last_updated_line) + + # Include header files + self.f.write('#include "config.h"\n\n') + self.f.write('#include \n') + self.f.write('#include "enterprises.h"\n') + self.f.write('\n\n') + + def __del__(self): + self.f.write('typedef struct\n') + self.f.write('{\n') + self.f.write(' uint32_t max_idx;\n') + self.f.write(' const char* values[' + str(self.highest_num+1) + '];\n') + self.f.write('} global_enterprises_table_t;\n\n') + + # Write static table + self.f.write('static global_enterprises_table_t table =\n') + self.f.write('{\n') + # Largest index + self.f.write(' ' + str(self.highest_num) + ',\n') + self.f.write(' {\n') + # Entries (read from dict) + for n in range(0, self.highest_num+1): + if n not in self.mappings: + # There are some gaps, write a NULL entry so can lookup by index + line = ' NULL' + else: + line = ' "' + self.mappings[n] + '"' + # Add coma. + if n < self.highest_num: + line += ',' + # Add number as aligned comment. + line += ' '*(90-len(line)) + '// ' + str(n) + + self.f.write(line+'\n') + + # End of array + self.f.write(' }\n') + # End of struct + self.f.write('};\n') + print('Re-generated', self.filename) + + # Lookup function + self.f.write(LOOKUP_FUNCTION) + + # Dump function + self.f.write(DUMP_FUNCTION) + + # Add an individual mapping to the function + def addMapping(self, num, name): + # Handle some escapings + name = name.replace('\\', '\\\\') + name = name.replace('"', '""') + + # Record. + self.mappings[num] = name + self.highest_num = num if num>self.highest_num else self.highest_num + + + +def main(): + parser = argparse.ArgumentParser(description="Create the {} file.".format(ENTERPRISES_CFILE)) + parser.add_argument('--infile') + parser.add_argument('outfile', nargs='?', default=ENTERPRISES_CFILE) + parsed_args = parser.parse_args() + + # Read data from file or webpage + if parsed_args.infile: + with open(parsed_args.infile, encoding='utf-8') as fh: + data = fh.read() + else: + with urllib.request.urlopen(ENTERPRISE_NUMBERS_URL) as f: + if f.status != 200: + raise Exception("request for " + ENTERPRISE_NUMBERS_URL + " failed with result code " + f.status) + data = f.read().decode('utf-8') + + # Find bits we need and generate enterprise entries + enterprises_content,last_updated_line = generate_enterprise_entries(data) + + # Now write to a C file the contents (which is faster than parsing the global file at runtime). + c_file = CFile(parsed_args.outfile, last_updated_line) + + mapping_re = re.compile(r'^(\d+)\s+(.*)$') + for line in enterprises_content.splitlines(): + match = mapping_re.match(line) + if match: + num, name = match.group(1), match.group(2) + # Strip any comments and/or trailing whitespace + idx = name.find('#') + if idx != -1: + name = name[0:idx] + name = name.rstrip() + # Add + c_file.addMapping(int(num), name) + + + +if __name__ == "__main__": + main() diff --git a/tools/make-enums.py b/tools/make-enums.py new file mode 100755 index 0000000..b6a2835 --- /dev/null +++ b/tools/make-enums.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# +# Copyright 2021, João Valverde +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# +# Uses pyclibrary to parse C headers for enums and integer macro +# definitions. Exports that data to a C file for the introspection API. +# +# Requires: https://github.com/MatthieuDartiailh/pyclibrary +# + +import os +import sys +import argparse +from pyclibrary import CParser + +def parse_files(infiles, outfile): + + print("Input: {}".format(infiles)) + print("Output: '{}'".format(outfile)) + + parser = CParser(infiles) + + source = """\ +/* + * Wireshark - Network traffic analyzer + * By Gerald Combs + * Copyright 1998 Gerald Combs + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Generated automatically from %s. It can be re-created by running + * "tools/make-enums.py" from the top source directory. + * + * It is fine to edit this file by hand. Particularly if a symbol + * disappears from the API it can just be removed here. There is no + * requirement to re-run the generator script. + * + */ +""" % (os.path.basename(sys.argv[0])) + + for f in infiles: + source += '#include <{}>\n'.format(f) + + source += """ +#define ENUM(arg) { #arg, arg } + +static ws_enum_t all_enums[] = { +""" + + definitions = parser.defs['values'] + symbols = list(definitions.keys()) + symbols.sort() + + for s in symbols: + if isinstance(definitions[s], int): + source += ' ENUM({}),\n'.format(s) + + source += """\ + { NULL, 0 }, +}; +""" + + try: + fh = open(outfile, 'w') + except OSError: + sys.exit('Unable to write ' + outfile + '.\n') + + fh.write(source) + fh.close() + +epan_files = [ + "epan/address.h", + "epan/ipproto.h", + "epan/proto.h", + "epan/ftypes/ftypes.h", + "epan/stat_groups.h", +] +parse_files(epan_files, "epan/introspection-enums.c") + +wtap_files = [ + "wiretap/wtap.h", +] +parse_files(wtap_files, "wiretap/introspection-enums.c") + +# +# Editor modelines - https://www.wireshark.org/tools/modelines.html +# +# Local variables: +# c-basic-offset: 4 +# indent-tabs-mode: nil +# End: +# +# vi: set shiftwidth=4 expandtab: +# :indentSize=4:noTabs=true: +# diff --git a/tools/make-isobus.py b/tools/make-isobus.py new file mode 100644 index 0000000..ce0259c --- /dev/null +++ b/tools/make-isobus.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +'''Update the "packet-isobus-parameters.h" file. +Make-isobus creates a file containing isobus parameters +from the databases at isobus.net. +''' + +import csv +import io +import os +import sys +import urllib.request, urllib.error, urllib.parse +import zipfile + +def exit_msg(msg=None, status=1): + if msg is not None: + sys.stderr.write(msg + '\n\n') + sys.stderr.write(__doc__ + '\n') + sys.exit(status) + +def open_url_zipped(url): + '''Open a URL of a zipped file. + + ''' + + url_path = '/'.join(url) + + req_headers = { 'User-Agent': 'Wireshark make-isobus' } + try: + req = urllib.request.Request(url_path, headers=req_headers) + response = urllib.request.urlopen(req) + body = response.read() + except Exception: + exit_msg('Error opening ' + url_path) + + return zipfile.ZipFile(io.BytesIO(body)) + +def main(): + this_dir = os.path.dirname(__file__) + isobus_output_path = os.path.join('epan', 'dissectors', 'packet-isobus-parameters.h') + + isobus_zip_url = [ "https://www.isobus.net/isobus/attachments/", "isoExport_csv.zip"] + + isobus_files = { + 'indust' : 'Industry Groups.csv', + 'glblfcts' : 'Global NAME Functions.csv', + 'igfcts' :'IG Specific NAME Function.csv', + 'manuf' : 'Manufacturer IDs.csv', + 'pgn_spns' : 'SPNs and PGNs.csv' + } + + zipf = open_url_zipped(isobus_zip_url) + + # Industries csv + min_total = 4 # typically 8 + f = zipf.read(isobus_files['indust']) + lines = f.decode('UTF-8', 'replace').splitlines() + + if len(lines) < min_total: + exit_msg("{}: Not enough entries ({})".format(isobus_files['indust'], len(lines))) + + indust_csv = csv.reader(lines) + next(indust_csv) + + # Global Name Functions csv + min_total = 50 # XXX as of 2023-10-18 + f = zipf.read(isobus_files['glblfcts']) + lines = f.decode('UTF-8', 'replace').splitlines() + + if len(lines) < min_total: + exit_msg("{}: Not enough entries ({})".format(isobus_files['glblfcts'], len(lines))) + + glbl_name_functions_csv = csv.reader(lines) + next(glbl_name_functions_csv) + + # Specific Name Functions csv + min_total = 200 # 295 as of 2023-10-18 + f = zipf.read(isobus_files['igfcts']) + lines = f.decode('UTF-8', 'replace').splitlines() + + if len(lines) < min_total: + exit_msg("{}: Not enough entries ({})".format(isobus_files['igfcts'], len(lines))) + + vehicle_system_names = {} + specific_functions = {} + + specific_functions_csv = csv.reader(lines) + next(specific_functions_csv) + for row in specific_functions_csv: + ig_id, vs_id, vs_name, f_id, f_name = row[:5] + new_id = int(ig_id) * 256 + int(vs_id) + if len(vs_name) > 50: + if new_id != 539: # 539: Weeders ... + print(f"shortening {new_id}: {vs_name} -> {vs_name[:36]}") + vs_name = vs_name[:36] + vehicle_system_names[new_id] = vs_name + + #vehicle_system_names.setdefault(ig_id, {}).setdefault(vs_id, vs_name) + new_id2 = 256 * new_id + int(f_id) + specific_functions[new_id2] = f_name + + # Manufacturers csv + min_total = 1000 # 1396 as of 2023-10-18 + f = zipf.read(isobus_files['manuf']) + lines = f.decode('UTF-8', 'replace').splitlines() + + if len(lines) < min_total: + exit_msg("{}: Not enough entries ({})".format(isobus_files['manuf'], len(lines))) + + manuf_csv = csv.reader(lines) + next(manuf_csv) + + # PGN SPN csv + min_total = 20000 # 23756 as of 2023-10-18 + f = zipf.read(isobus_files['pgn_spns']) + lines = f.decode('UTF-8', 'replace').splitlines() + + if len(lines) < min_total: + exit_msg("{}: Not enough entries ({})".format(isobus_files['pgn_spns'], len(lines))) + + pgn_names = {} + + pgn_spn_csv = csv.reader(lines) + next(pgn_spn_csv) + for row in pgn_spn_csv: + try: + pgn_id, pgn_name, = row[:2] + if not pgn_name.startswith("Proprietary B"): + pgn_names[int(pgn_id)] = pgn_name.replace("\"","'") + except: + pass + + # prepare output file + try: + output_fd = io.open(isobus_output_path, 'w', encoding='UTF-8') + except Exception: + exit_msg("Couldn't open ({}) ".format(isobus_output_path)) + + output_fd.write('''/* + * This file was generated by running ./tools/make-isobus.py. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * The ISOBUS public listings available from: + * + * + */ + +#ifndef __PACKET_ISOBUS_PARAMETERS_H__ +#define __PACKET_ISOBUS_PARAMETERS_H__ + +''') + + # Write Industries + output_fd.write("static const value_string _isobus_industry_groups[] = {\n") + + for row in sorted(indust_csv, key=lambda x: int(x[0])): + output_fd.write(f" {{ {row[0]}, \"{row[1]}\" }},\n") + + output_fd.write(" { 0, NULL }\n") + output_fd.write("};\n") + output_fd.write("static value_string_ext isobus_industry_groups_ext = VALUE_STRING_EXT_INIT(_isobus_industry_groups);\n\n"); + + # Write Vehicle System Names + output_fd.write("/* key: 256 * Industry-Group-ID + Vehicle-Group-ID */\n") + output_fd.write("static const value_string _isobus_vehicle_systems[] = {\n") + + for key in sorted(vehicle_system_names): + output_fd.write(f" {{ {hex(key)}, \"{vehicle_system_names[key]}\" }},\n") + + output_fd.write(" { 0, NULL }\n") + output_fd.write("};\n") + output_fd.write("static value_string_ext isobus_vehicle_systems_ext = VALUE_STRING_EXT_INIT(_isobus_vehicle_systems);\n\n"); + + # Write Global Name Functions + output_fd.write("static const value_string _isobus_global_name_functions[] = {\n") + + for row in sorted(glbl_name_functions_csv, key=lambda x: int(x[0])): + output_fd.write(f" {{ {row[0]}, \"{row[1]}\" }},\n") + + output_fd.write(" { 0, NULL }\n") + output_fd.write("};\n") + output_fd.write("static value_string_ext isobus_global_name_functions_ext = VALUE_STRING_EXT_INIT(_isobus_global_name_functions);\n\n"); + + # IG Specific Global Name Functions + output_fd.write("/* key: 65536 * Industry-Group-ID + 256 * Vehicle-System-ID + Function-ID */\n") + output_fd.write("static const value_string _isobus_ig_specific_name_functions[] = {\n") + + for key in sorted(specific_functions): + output_fd.write(f" {{ {hex(key)}, \"{specific_functions[key]}\" }},\n") + + output_fd.write(" { 0, NULL }\n") + output_fd.write("};\n") + output_fd.write("static value_string_ext isobus_ig_specific_name_functions_ext = VALUE_STRING_EXT_INIT(_isobus_ig_specific_name_functions);\n\n"); + + # Write Manufacturers + output_fd.write("static const value_string _isobus_manufacturers[] = {\n") + + for row in sorted(manuf_csv, key=lambda x: int(x[0])): + output_fd.write(f" {{ {row[0]}, \"{row[1]}\" }},\n") + + output_fd.write(" { 0, NULL }\n") + output_fd.write("};\n") + output_fd.write("static value_string_ext isobus_manufacturers_ext = VALUE_STRING_EXT_INIT(_isobus_manufacturers);\n\n"); + + # PGN Names + output_fd.write("static const value_string _isobus_pgn_names[] = {\n") + + for key in sorted(pgn_names): + output_fd.write(f" {{ {key}, \"{pgn_names[key]}\" }},\n") + + output_fd.write(" { 0, NULL }\n") + output_fd.write("};\n") + output_fd.write("static value_string_ext isobus_pgn_names_ext = VALUE_STRING_EXT_INIT(_isobus_pgn_names);\n\n"); + + output_fd.write("#endif /* __PACKET_ISOBUS_PARAMETERS_H__ */") +if __name__ == '__main__': + main() diff --git a/tools/make-manuf.py b/tools/make-manuf.py new file mode 100755 index 0000000..22f3aa0 --- /dev/null +++ b/tools/make-manuf.py @@ -0,0 +1,401 @@ +#!/usr/bin/env python3 +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +'''Update the "manuf" file. + +Make-manuf creates a file containing ethernet OUIs and their company +IDs from the databases at IEEE. +''' + +import csv +import html +import io +import os +import re +import sys +import urllib.request, urllib.error, urllib.parse + +have_icu = False +try: + # Use the grapheme or segments module instead? + import icu + have_icu = True +except ImportError: + pass + +def exit_msg(msg=None, status=1): + if msg is not None: + sys.stderr.write(msg + '\n\n') + sys.stderr.write(__doc__ + '\n') + sys.exit(status) + +def open_url(url): + '''Open a URL. + Returns a tuple containing the body and response dict. The body is a + str in Python 3 and bytes in Python 2 in order to be compatibile with + csv.reader. + ''' + + if len(sys.argv) > 1: + url_path = os.path.join(sys.argv[1], url[1]) + url_fd = open(url_path) + body = url_fd.read() + url_fd.close() + else: + url_path = '/'.join(url) + + req_headers = { 'User-Agent': 'Wireshark make-manuf' } + try: + req = urllib.request.Request(url_path, headers=req_headers) + response = urllib.request.urlopen(req) + body = response.read().decode('UTF-8', 'replace') + except Exception: + exit_msg('Error opening ' + url_path) + + return body + +# These are applied after punctuation has been removed. +# More examples at https://en.wikipedia.org/wiki/Incorporation_(business) +general_terms = '|'.join([ + ' a +s\\b', # A/S and A.S. but not "As" as in "Connect As". + ' ab\\b', # Also follows "Oy", which is covered below. + ' ag\\b', + ' b ?v\\b', + ' closed joint stock company\\b', + ' co\\b', + ' company\\b', + ' corp\\b', + ' corporation\\b', + ' corporate\\b', + ' de c ?v\\b', # Follows "S.A.", which is covered separately below. + ' gmbh\\b', + ' holding\\b', + ' inc\\b', + ' incorporated\\b', + ' jsc\\b', + ' kg\\b', + ' k k\\b', # "K.K." as in "kabushiki kaisha", but not "K+K" as in "K+K Messtechnik". + ' limited\\b', + ' llc\\b', + ' ltd\\b', + ' n ?v\\b', + ' oao\\b', + ' of\\b', + ' open joint stock company\\b', + ' ooo\\b', + ' oü\\b', + ' oy\\b', + ' oyj\\b', + ' plc\\b', + ' pty\\b', + ' pvt\\b', + ' s ?a ?r ?l\\b', + ' s ?a\\b', + ' s ?p ?a\\b', + ' sp ?k\\b', + ' s ?r ?l\\b', + ' systems\\b', + '\\bthe\\b', + ' zao\\b', + ' z ?o ?o\\b' + ]) + +# Chinese company names tend to start with the location, skip it (non-exhaustive list). +skip_start = [ + 'shengzen', + 'shenzhen', + 'beijing', + 'shanghai', + 'wuhan', + 'hangzhou', + 'guangxi', + 'guangdong', + 'chengdu', +] + +# Special cases handled directly +special_case = { + "Advanced Micro Devices": "AMD", + "杭州德澜科技有限公司": "DelanTech" # 杭州德澜科技有限公司(HangZhou Delan Technology Co.,Ltd) +} + +def shorten(manuf): + '''Convert a long manufacturer name to abbreviated and short names''' + # Normalize whitespace. + manuf = ' '.join(manuf.split()) + orig_manuf = manuf + # Convert all caps to title case + if manuf.isupper(): + manuf = manuf.title() + # Remove the contents of parenthesis as ancillary data + manuf = re.sub(r"\(.*\)", '', manuf) + # Remove the contents of fullwidth parenthesis (mostly in Asian names) + manuf = re.sub(r"(.*)", '', manuf) + # Remove "a" before removing punctuation ("Aruba, a Hewlett [...]" etc.) + manuf = manuf.replace(" a ", " ") + # Remove any punctuation + # XXX Use string.punctuation? Note that it includes '-' and '*'. + manuf = re.sub(r"[\"',./:()+-]", ' ', manuf) + # XXX For some reason including the double angle brackets in the above + # regex makes it bomb + manuf = re.sub(r"[«»“”]", ' ', manuf) + # & isn't needed when Standalone + manuf = manuf.replace(" & ", " ") + # Remove business types and other general terms ("the", "inc", "plc", etc.) + plain_manuf = re.sub(general_terms, '', manuf, flags=re.IGNORECASE) + # ...but make sure we don't remove everything. + if not all(s == ' ' for s in plain_manuf): + manuf = plain_manuf + + manuf = manuf.strip() + + # Check for special case + if manuf in special_case.keys(): + manuf = special_case[manuf] + + # XXX: Some of the entries have Chinese city or other location + # names written with spaces between each character, like + # Bei jing, Wu Han, Shen Zhen, etc. We should remove that too. + split = manuf.split() + if len(split) > 1 and split[0].lower() in skip_start: + manuf = ' '.join(split[1:]) + + # Remove all spaces + manuf = re.sub(r'\s+', '', manuf) + + if len(manuf) < 1: + sys.stderr.write('Manufacturer "{}" shortened to nothing.\n'.format(orig_manuf)) + sys.exit(1) + + # Truncate names to a reasonable length, say, 12 characters. If + # the string contains UTF-8, this may be substantially more than + # 12 bytes. It might also be less than 12 visible characters. Plain + # Python slices Unicode strings by code point, which is better + # than raw bytes but not as good as grapheme clusters. PyICU + # supports grapheme clusters. https://bugs.python.org/issue30717 + # + + # Truncate by code points + trunc_len = 12 + + if have_icu: + # Truncate by grapheme clusters + bi_ci = icu.BreakIterator.createCharacterInstance(icu.Locale('en_US')) + bi_ci.setText(manuf) + bounds = list(bi_ci) + bounds = bounds[0:trunc_len] + trunc_len = bounds[-1] + + manuf = manuf[:trunc_len] + + if manuf.lower() == orig_manuf.lower(): + # Original manufacturer name was short and simple. + return [manuf, None] + + mixed_manuf = orig_manuf + # At least one entry has whitespace in front of a period. + mixed_manuf = re.sub(r'\s+\.', '.', mixed_manuf) + #If company is all caps, convert to mixed case (so it doesn't look like we're screaming the company name) + if mixed_manuf.upper() == mixed_manuf: + mixed_manuf = mixed_manuf.title() + + return [manuf, mixed_manuf] + +MA_L = 'MA_L' +MA_M = 'MA_M' +MA_S = 'MA_S' + +def prefix_to_oui(prefix, prefix_map): + pfx_len = int(len(prefix) * 8 / 2) + prefix24 = prefix[:6] + oui24 = ':'.join(hi + lo for hi, lo in zip(prefix24[0::2], prefix24[1::2])) + + if pfx_len == 24: + # 24-bit OUI assignment, no mask + return oui24, MA_L + + # Other lengths which require a mask. + oui = prefix.ljust(12, '0') + oui = ':'.join(hi + lo for hi, lo in zip(oui[0::2], oui[1::2])) + if pfx_len == 28: + kind = MA_M + elif pfx_len == 36: + kind = MA_S + prefix_map[oui24] = kind + + return '{}/{:d}'.format(oui, int(pfx_len)), kind + +def main(): + this_dir = os.path.dirname(__file__) + manuf_path = os.path.join('epan', 'manuf-data.c') + + ieee_d = { + 'OUI': { 'url': ["https://standards-oui.ieee.org/oui/", "oui.csv"], 'min_entries': 1000 }, + 'CID': { 'url': ["https://standards-oui.ieee.org/cid/", "cid.csv"], 'min_entries': 75 }, + 'IAB': { 'url': ["https://standards-oui.ieee.org/iab/", "iab.csv"], 'min_entries': 1000 }, + 'OUI28': { 'url': ["https://standards-oui.ieee.org/oui28/", "mam.csv"], 'min_entries': 1000 }, + 'OUI36': { 'url': ["https://standards-oui.ieee.org/oui36/", "oui36.csv"], 'min_entries': 1000 }, + } + oui_d = { + MA_L: { '00:00:00' : ['00:00:00', 'Officially Xerox, but 0:0:0:0:0:0 is more common'] }, + MA_M: {}, + MA_S: {}, + } + + min_total = 35000; # 35830 as of 2018-09-05 + total_added = 0 + + # Add IEEE entries from each of their databases + ieee_db_l = ['OUI', 'OUI28', 'OUI36', 'CID', 'IAB'] + + # map a 24-bit prefix to MA-M/MA-S or none (MA-L by default) + prefix_map = {} + + for db in ieee_db_l: + db_url = ieee_d[db]['url'] + ieee_d[db]['skipped'] = 0 + ieee_d[db]['added'] = 0 + ieee_d[db]['total'] = 0 + print('Merging {} data from {}'.format(db, db_url)) + body = open_url(db_url) + ieee_csv = csv.reader(body.splitlines()) + + # Pop the title row. + next(ieee_csv) + for ieee_row in ieee_csv: + #Registry,Assignment,Organization Name,Organization Address + #IAB,0050C2DD6,Transas Marine Limited,Datavagen 37 Askim Vastra Gotaland SE 436 32 + oui, kind = prefix_to_oui(ieee_row[1].upper(), prefix_map) + manuf = ieee_row[2].strip() + # The Organization Name field occasionally contains HTML entities. Undo them. + manuf = html.unescape(manuf) + # "Watts A\S" + manuf = manuf.replace('\\', '/') + if manuf == 'IEEE Registration Authority': + continue + if manuf == 'Private': + continue + if oui in oui_d[kind]: + action = 'Skipping' + print('{} - {} IEEE "{}" in favor of "{}"'.format(oui, action, manuf, oui_d[kind][oui])) + ieee_d[db]['skipped'] += 1 + else: + oui_d[kind][oui] = shorten(manuf) + ieee_d[db]['added'] += 1 + ieee_d[db]['total'] += 1 + + if ieee_d[db]['total'] < ieee_d[db]['min_entries']: + exit_msg("Too few {} entries. Got {}, wanted {}".format(db, ieee_d[db]['total'], ieee_d[db]['min_entries'])) + total_added += ieee_d[db]['total'] + + if total_added < min_total: + exit_msg("Too few total entries ({})".format(total_added)) + + try: + manuf_fd = io.open(manuf_path, 'w', encoding='UTF-8') + except Exception: + exit_msg("Couldn't open manuf file for reading ({}) ".format(manuf_path)) + + manuf_fd.write('''/* + * This file was generated by running ./tools/make-manuf.py. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * The data below has been assembled from the following sources: + * + * The IEEE public OUI listings available from: + * + * + * + * + * + * + */ + +''') + + # Write the prefix map + manuf_fd.write("static const manuf_registry_t ieee_registry_table[] = {\n") + keys = list(prefix_map.keys()) + keys.sort() + for oui in keys: + manuf_fd.write(" {{ {{ 0x{}, 0x{}, 0x{} }}, {} }},\n".format(oui[0:2], oui[3:5], oui[6:8], prefix_map[oui])) + manuf_fd.write("};\n\n") + + # write the MA-L table + manuf_fd.write("static const manuf_oui24_t global_manuf_oui24_table[] = {\n") + keys = list(oui_d[MA_L].keys()) + keys.sort() + for oui in keys: + short = oui_d[MA_L][oui][0] + if oui_d[MA_L][oui][1]: + long = oui_d[MA_L][oui][1] + else: + long = short + line = " {{ {{ 0x{}, 0x{}, 0x{} }}, \"{}\", ".format(oui[0:2], oui[3:5], oui[6:8], short) + sep = 44 - len(line) + if sep <= 0: + sep = 0 + line += sep * ' ' + line += "\"{}\" }},\n".format(long.replace('"', '\\"')) + manuf_fd.write(line) + manuf_fd.write("};\n\n") + + # write the MA-M table + manuf_fd.write("static const manuf_oui28_t global_manuf_oui28_table[] = {\n") + keys = list(oui_d[MA_M].keys()) + keys.sort() + for oui in keys: + short = oui_d[MA_M][oui][0] + if oui_d[MA_M][oui][1]: + long = oui_d[MA_M][oui][1] + else: + long = short + line = " {{ {{ 0x{}, 0x{}, 0x{}, 0x{} }}, \"{}\", ".format(oui[0:2], oui[3:5], oui[6:8], oui[9:11], short) + sep = 50 - len(line) + if sep <= 0: + sep = 0 + line += sep * ' ' + line += "\"{}\" }},\n".format(long.replace('"', '\\"')) + manuf_fd.write(line) + manuf_fd.write("};\n\n") + + #write the MA-S table + manuf_fd.write("static const manuf_oui36_t global_manuf_oui36_table[] = {\n") + keys = list(oui_d[MA_S].keys()) + keys.sort() + for oui in keys: + short = oui_d[MA_S][oui][0] + if oui_d[MA_S][oui][1]: + long = oui_d[MA_S][oui][1] + else: + long = short + line = " {{ {{ 0x{}, 0x{}, 0x{}, 0x{}, 0x{} }}, \"{}\", ".format(oui[0:2], oui[3:5], oui[6:8], oui[9:11], oui[12:14], short) + sep = 56 - len(line) + if sep <= 0: + sep = 0 + line += sep * ' ' + line += "\"{}\" }},\n".format(long.replace('"', '\\"')) + manuf_fd.write(line) + manuf_fd.write("};\n") + + manuf_fd.close() + + for db in ieee_d: + print('{:<20}: {}'.format('IEEE ' + db + ' added', ieee_d[db]['added'])) + print('{:<20}: {}'.format('Total added', total_added)) + + print() + for db in ieee_d: + print('{:<20}: {}'.format('IEEE ' + db + ' total', ieee_d[db]['total'])) + + print() + for db in ieee_d: + print('{:<20}: {}'.format('IEEE ' + db + ' skipped', ieee_d[db]['skipped'])) + +if __name__ == '__main__': + main() diff --git a/tools/make-no-reassembly-profile.py b/tools/make-no-reassembly-profile.py new file mode 100755 index 0000000..cd68155 --- /dev/null +++ b/tools/make-no-reassembly-profile.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# +# Generate preferences for a "No Reassembly" profile. +# By Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +'''Generate preferences for a "No Reassembly" profile.''' + +import argparse +import os.path +import re +import subprocess +import sys + +MIN_PLUGINS = 10 + +def main(): + parser = argparse.ArgumentParser(description='No reassembly profile generator') + parser.add_argument('-p', '--program-path', default=os.path.curdir, help='Path to TShark.') + parser.add_argument('-v', '--verbose', action='store_const', const=True, default=False, help='Verbose output.') + args = parser.parse_args() + + this_dir = os.path.dirname(__file__) + profile_path = os.path.join(this_dir, '..', 'resources', 'share', 'wireshark', 'profiles', 'No Reassembly', 'preferences') + + tshark_path = os.path.join(args.program_path, 'tshark') + if not os.path.isfile(tshark_path): + print('tshark not found at {}\n'.format(tshark_path)) + parser.print_usage() + sys.exit(1) + + # Make sure plugin prefs are present. + cp = subprocess.run([tshark_path, '-G', 'plugins'], stdout=subprocess.PIPE, check=True, encoding='utf-8') + plugin_lines = cp.stdout.splitlines() + dissector_count = len(tuple(filter(lambda p: re.search('\sdissector\s', p), plugin_lines))) + if dissector_count < MIN_PLUGINS: + print('Found {} plugins but require {}.'.format(dissector_count, MIN_PLUGINS)) + sys.exit(1) + + rd_pref_re = re.compile('^#\s*(.*(reassembl|desegment)\S*):\s*TRUE') + out_prefs = [ + '# Generated by ' + os.path.basename(__file__), '', + '####### Protocols ########', '', + ] + cp = subprocess.run([tshark_path, '-G', 'defaultprefs'], stdout=subprocess.PIPE, check=True, encoding='utf-8') + pref_lines = cp.stdout.splitlines() + for pref_line in pref_lines: + m = rd_pref_re.search(pref_line) + if m: + rd_pref = m.group(1) + ': FALSE' + if args.verbose is True: + print(rd_pref) + out_prefs.append(rd_pref) + + if len(pref_lines) < 5000: + print("Too few preference lines.") + sys.exit(1) + + if len(out_prefs) < 150: + print("Too few changed preferences.") + sys.exit(1) + + with open(profile_path, 'w') as profile_f: + for pref_line in out_prefs: + profile_f.write(pref_line + '\n') + +if __name__ == '__main__': + main() diff --git a/tools/make-packet-dcm.py b/tools/make-packet-dcm.py new file mode 100755 index 0000000..028bde4 --- /dev/null +++ b/tools/make-packet-dcm.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python3 +import os.path +import sys +import itertools +import lxml.etree + +# This utility scrapes the DICOM standard document in DocBook format, finds the appropriate tables, +# and extracts the data needed to build the lists of DICOM attributes, UIDs and value representations. + +# If the files part05.xml, part06.xml and part07.xml exist in the current directory, use them. +# Otherwise, download the current release from the current DICOM official sources. +if os.path.exists("part05.xml"): + print("Using local part05 docbook.", file=sys.stderr) + part05 = lxml.etree.parse("part05.xml") +else: + print("Downloading part05 docbook...", file=sys.stderr) + part05 = lxml.etree.parse("http://dicom.nema.org/medical/dicom/current/source/docbook/part05/part05.xml") +if os.path.exists("part06.xml"): + print("Using local part06 docbook.", file=sys.stderr) + part06 = lxml.etree.parse("part06.xml") +else: + print("Downloading part06 docbook...", file=sys.stderr) + part06 = lxml.etree.parse("http://dicom.nema.org/medical/dicom/current/source/docbook/part06/part06.xml") +if os.path.exists("part07.xml"): + print("Using local part07 docbook.", file=sys.stderr) + part07 = lxml.etree.parse("part07.xml") +else: + print("Downloading part07 docbook...", file=sys.stderr) + part07 = lxml.etree.parse("http://dicom.nema.org/medical/dicom/current/source/docbook/part07/part07.xml") +dbns = {'db':'http://docbook.org/ns/docbook', 'xml':'http://www.w3.org/XML/1998/namespace'} + +# When displaying the dissected packets, some attributes are nice to include in the description of their parent. +include_in_parent = {"Patient Position", + "ROI Number", + "ROI Name", + "Contour Geometric Type", + "Observation Number", + "ROI Observation Label", + "RT ROI Interpreted Type", + "Dose Reference Structure Type", + "Dose Reference Description", + "Dose Reference Type", + "Target Prescription Dose", + "Tolerance Table Label", + "Beam Limiting Device Position Tolerance", + "Number of Fractions Planned", + "Treatment Machine Name", + "RT Beam Limiting Device Type", + "Beam Number", + "Beam Name", + "Beam Type", + "Radiation Type", + "Wedge Type", + "Wedge ID", + "Wedge Angle", + "Material ID", + "Block Tray ID", + "Block Name", + "Applicator ID", + "Applicator Type", + "Control Point Index", + "Nominal Beam Energy", + "Cumulative Meterset Weight", + "Patient Setup Number"} + +# Data elements are listed in three tables in Part 6: +# * Table 6-1. Registry of DICOM Data Elements +# * Table 7-1. Registry of DICOM File Meta Elements +# * Table 8-1. Registry of DICOM Directory Structuring Elements +# All three tables are in the same format and can be merged for processing. + +# The Command data elements (used only in networking), are listed in two tables in Part 7: +# * Table E.1-1. Command Fields +# * Table E.2-1. Retired Command Fields +# The Retired Command Fields are missing the last column. For processing here, +# we just add a last column with "RET", and they can be parsed with the same +# as for the Data elements. + +data_element_tables=["table_6-1", "table_7-1", "table_8-1"] + +def get_trs(document, table_id): + return document.findall(f"//db:table[@xml:id='{table_id}']/db:tbody/db:tr", + namespaces=dbns) + +data_trs = sum((get_trs(part06, table_id) for table_id in data_element_tables), []) +cmd_trs = get_trs(part07, "table_E.1-1") +retired_cmd_trs = get_trs(part07, "table_E.2-1") + +def get_texts_in_row(tr): + tds = tr.findall("db:td", namespaces=dbns) + texts = [" ".join(x.replace('\u200b', '').replace('\u00b5', 'u').strip() for x in td.itertext() if x.strip() != '') for td in tds] + return texts + +data_rows = [get_texts_in_row(x) for x in data_trs] +retired_cmd_rows = [get_texts_in_row(x) for x in retired_cmd_trs] +cmd_rows = ([get_texts_in_row(x) for x in cmd_trs] + + [x + ["RET"] for x in retired_cmd_rows]) + +def parse_tag(tag): + # To handle some old cases where "x" is included as part of the tag number + tag = tag.replace("x", "0") + return f"0x{tag[1:5]}{tag[6:10]}" +def parse_ret(ret): + if ret.startswith("RET"): + return -1 + else: + return 0 +def include_in_parent_bit(name): + if name in include_in_parent: + return -1 + else: + return 0 +def text_for_row(row): + return f' {{ {parse_tag(row[0])}, "{row[1]}", "{row[3]}", "{row[4]}", {parse_ret(row[5])}, {include_in_parent_bit(row[1])}}},' + +def text_for_rows(rows): + return "\n".join(text_for_row(row) for row in rows) + +vrs = {i+1: get_texts_in_row(x)[0].split(maxsplit=1) for i,x in enumerate(get_trs(part05, "table_6.2-1"))} + + +# Table A-1. UID Values +uid_trs = get_trs(part06, "table_A-1") +uid_rows = [get_texts_in_row(x) for x in uid_trs] + +def uid_define_name(uid): + if uid[1] == "(Retired)": + return f'"{uid[0]}"' + uid_type = uid[3] + uid_name = uid[1] + uid_name = re.sub(":.*", "", uid[1]) + if uid_name.endswith(uid_type): + uid_name = uid_name[:-len(uid_type)].strip() + return f"DCM_UID_{definify(uid_type)}_{definify(uid_name)}" + +import re +def definify(s): + return re.sub('[^A-Z0-9]+', '_', re.sub(' +', ' ', re.sub('[^-A-Z0-9 ]+', '', s.upper()))) + +uid_rows = sorted(uid_rows, key=lambda uid_row: [int(i) for i in uid_row[0].split(".")]) +packet_dcm_h = """/* packet-dcm.h + * Definitions for DICOM dissection + * Copyright 2003, Rich Coe + * Copyright 2008-2018, David Aggeler + * + * DICOM communication protocol: https://www.dicomstandard.org/current/ + * + * Generated automatically by """ + os.path.basename(sys.argv[0]) + """ from the following sources: + * + * """ + part05.find("./db:subtitle", namespaces=dbns).text + """ + * """ + part06.find("./db:subtitle", namespaces=dbns).text + """ + * """ + part07.find("./db:subtitle", namespaces=dbns).text + """ + * + * Wireshark - Network traffic analyzer + * By Gerald Combs + * Copyright 1998 Gerald Combs + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef __PACKET_DCM_H__ +#define __PACKET_DCM_H__ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +""" + "\n".join(f"#define DCM_VR_{vr[0]} {i:2d} /* {vr[1]:25s} */" for i,vr in vrs.items()) + """ + +/* Following must be in the same order as the definitions above */ +static const gchar* dcm_tag_vr_lookup[] = { + " ", + """ + ",\n ".join(",".join(f'"{x[1][0]}"' for x in j[1]) for j in itertools.groupby(vrs.items(), lambda i: (i[0]-1)//8)) + """ +}; + + +/* --------------------------------------------------------------------- + * DICOM Tag Definitions + * + * Some Tags can have different VRs + * + * Group 1000 is not supported, multiple tags with same description (retired anyhow) + * Group 7Fxx is not supported, multiple tags with same description (retired anyhow) + * + * Tags (0020,3100 to 0020, 31FF) not supported, multiple tags with same description (retired anyhow) + * + * Repeating groups (50xx & 60xx) are manually added. Declared as 5000 & 6000 + */ + +typedef struct dcm_tag { + const guint32 tag; + const gchar *description; + const gchar *vr; + const gchar *vm; + const gboolean is_retired; + const gboolean add_to_summary; /* Add to parent's item description */ +} dcm_tag_t; + +static dcm_tag_t dcm_tag_data[] = { + + /* Command Tags */ +""" + text_for_rows(cmd_rows) + """ + + /* Data Tags */ +""" + text_for_rows(data_rows) + """ +}; + +/* --------------------------------------------------------------------- + * DICOM UID Definitions + + * Part 6 lists following different UID Types (2006-2008) + + * Application Context Name + * Coding Scheme + * DICOM UIDs as a Coding Scheme + * LDAP OID + * Meta SOP Class + * SOP Class + * Service Class + * Transfer Syntax + * Well-known Print Queue SOP Instance + * Well-known Printer SOP Instance + * Well-known SOP Instance + * Well-known frame of reference + */ + +typedef struct dcm_uid { + const gchar *value; + const gchar *name; + const gchar *type; +} dcm_uid_t; + +""" + "\n".join(f'#define {uid_define_name(uid)} "{uid[0]}"' + for uid in uid_rows if uid[1] != '(Retired)') + """ + +static dcm_uid_t dcm_uid_data[] = { +""" + "\n".join(f' {{ {uid_define_name(uid)}, "{uid[1]}", "{uid[3]}"}},' + for uid in uid_rows)+ """ +}; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* packet-dcm.h */""" + +print(packet_dcm_h) diff --git a/tools/make-pci-ids.py b/tools/make-pci-ids.py new file mode 100755 index 0000000..0a77f76 --- /dev/null +++ b/tools/make-pci-ids.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python3 +# +# make-pci-ids - Creates a file containing PCI IDs. +# It use the databases from +# https://github.com/pciutils/pciids/raw/master/pci.ids +# to create our file epan/dissectors/pci-ids.c +# +# Wireshark - Network traffic analyzer +# +# By Caleb Chiu +# Copyright 2021 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +import string +import sys +import urllib.request, urllib.error, urllib.parse + +OUTPUT_FILE = "epan/pci-ids.c" + +MIN_VENDOR_COUNT = 2250 # 2261 on 2021-11-01 +MIN_DEVICE_COUNT = 33000 # 33724 on 2021-11-01 + +CODE_PREFIX = """\ + * + * Generated by tools/make-pci-ids.py + * By Caleb Chiu + * Copyright 2021 + * + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include + +#include + +#include "pci-ids.h" + +typedef struct +{ + uint16_t vid; + uint16_t did; + uint16_t svid; + uint16_t ssid; + char *name; + +} pci_id_t; + +typedef struct +{ + uint16_t vid; + uint16_t count; + pci_id_t *ids_ptr; + +} pci_vid_index_t; + +""" + +CODE_POSTFIX = """ +static pci_vid_index_t *get_vid_index(uint16_t vid) +{ + uint32_t start_index = 0; + uint32_t end_index = 0; + uint32_t idx = 0; + + end_index = sizeof(pci_vid_index)/sizeof(pci_vid_index[0]); + + while(start_index != end_index) + { + if(end_index - start_index == 1) + { + if(pci_vid_index[start_index].vid == vid) + return &pci_vid_index[start_index]; + + break; + } + + idx = (start_index + end_index)/2; + + if(pci_vid_index[idx].vid < vid) + start_index = idx; + else + if(pci_vid_index[idx].vid > vid) + end_index = idx; + else + return &pci_vid_index[idx]; + + } + + return NULL; + +} + +const char *pci_id_str(uint16_t vid, uint16_t did, uint16_t svid, uint16_t ssid) +{ + unsigned int i; + static char *not_found = \"Not found\"; + pci_vid_index_t *index_ptr; + pci_id_t *ids_ptr; + + index_ptr = get_vid_index(vid); + + if(index_ptr == NULL) + return not_found; + + ids_ptr = index_ptr->ids_ptr; + for(i = 0; i < index_ptr->count; ids_ptr++, i++) + if(vid == ids_ptr->vid && + did == ids_ptr->did && + svid == ids_ptr->svid && + ssid == ids_ptr->ssid) + return ids_ptr->name; + return not_found; + +} +""" + + +id_list=[] +count_list=[] + + +def exit_msg(msg=None, status=1): + if msg is not None: + sys.stderr.write(msg + '\n') + sys.exit(status) + + +def main(): + req_headers = { 'User-Agent': 'Wireshark make-pci-ids' } + req = urllib.request.Request('https://github.com/pciutils/pciids/raw/master/pci.ids', headers=req_headers) + response = urllib.request.urlopen(req) + lines = response.read().decode('UTF-8', 'replace').splitlines() + + out_lines = '''\ +/* pci-ids.c + * + * pci-ids.c is based on the pci.ids of The PCI ID Repository at + * https://pci-ids.ucw.cz/, fetched indirectly via + * https://github.com/pciutils/pciids +''' + vid = -1 + did = -1 + svid = -1 + entries = 0 + line_num = 0 + + for line in lines: + line = line.strip('\n') + line_num += 1 + + if line_num <= 15: + line = line.replace('#', ' ', 1) + line = line.lstrip() + line = line.replace("GNU General Public License", "GPL") + if line: + line = ' * ' + line + else: + line = ' *' + line + out_lines += line + '\n' + if line_num == 15: + out_lines += CODE_PREFIX + + line = line.replace("\\","\\\\") + line = line.replace("\"","\\\"") + line = line.replace("?","?-") + tabs = len(line) - len(line.lstrip('\t')) + if tabs == 0: + #print line + words = line.split(" ", 1) + if len(words) < 2: + continue + if len(words[0]) != 4: + continue + if all(c in string.hexdigits for c in words[0]): + hex_int = int(words[0], 16) + if vid != -1: + out_lines += "}; /* pci_vid_%04X[] */\n\n" % (vid) + count_list.append(entries) + vid = hex_int + entries = 1 + did = -1 + svid = -1 + ssid = -1 + out_lines += "pci_id_t pci_vid_%04X[] = {\n" % (vid) + out_lines += "{0x%04X, 0xFFFF, 0xFFFF, 0xFFFF, \"%s(0x%04X)\"},\n" % (vid, words[1].strip(), vid) + id_list.append(vid) + continue + + if tabs == 1: + line = line.strip('\t') + words = line.split(" ", 1) + if len(words) < 2: + continue + if len(words[0]) != 4: + continue + if all(c in string.hexdigits for c in words[0]): + hex_int = int(words[0], 16) + did = hex_int + svid = -1 + ssid = -1 + out_lines += "{0x%04X, 0x%04X, 0xFFFF, 0xFFFF, \"%s(0x%04X)\"},\n" % (vid, did, words[1].strip(), did) + entries += 1 + continue + + if tabs == 2: + line = line.strip('\t') + words = line.split(" ", 2) + if len(words[0]) != 4: + continue + if all(c in string.hexdigits for c in words[0]): + hex_int = int(words[0], 16) + svid = hex_int + + if all(c in string.hexdigits for c in words[1]): + hex_int = int(words[1], 16) + ssid = hex_int + + out_lines += "{0x%04X, 0x%04X, 0x%04X, 0x%04X, \"%s(0x%04X-0x%04X)\"},\n" % (vid, did, svid, ssid, words[2].strip(), svid, ssid) + entries += 1 + svid = -1 + ssid = -1 + continue + + out_lines += "}; /* pci_vid_%04X[] */\n" % (vid) + count_list.append(entries) + + out_lines += "\npci_vid_index_t pci_vid_index[] = {\n" + + vendor_count = len(id_list) + device_count = 0 + for i in range(vendor_count): + out_lines += "{0x%04X, %d, pci_vid_%04X },\n" % (id_list[i], count_list[i], id_list[i]) + device_count += count_list[i] + + out_lines += "}; /* We have %d VIDs */\n" % (vendor_count) + + out_lines += CODE_POSTFIX + + if vendor_count < MIN_VENDOR_COUNT: + exit_msg(f'Too view vendors. Wanted {MIN_VENDOR_COUNT}, got {vendor_count}.') + + if device_count < MIN_DEVICE_COUNT: + exit_msg(f'Too view devices. Wanted {MIN_DEVICE_COUNT}, got {device_count}.') + + with open(OUTPUT_FILE, "w", encoding="utf-8") as pci_ids_f: + pci_ids_f.write(out_lines) + +if __name__ == '__main__': + main() diff --git a/tools/make-plugin-reg.py b/tools/make-plugin-reg.py new file mode 100755 index 0000000..2b9bc34 --- /dev/null +++ b/tools/make-plugin-reg.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python3 +# +# Looks for registration routines in the plugins +# and assembles C code to call all the routines. +# A new "plugin.c" file will be written in the current directory. +# + +import os +import sys +import re + +# +# The first argument is the directory in which the source files live. +# +srcdir = sys.argv[1] +# +# The second argument is either "plugin", "plugin_wtap", "plugin_codec", +# or "plugin_tap". +# +registertype = sys.argv[2] +# +# All subsequent arguments are the files to scan. +# +files = sys.argv[3:] + +final_filename = "plugin.c" +preamble = """\ +/* + * Do not modify this file. Changes will be overwritten. + * + * Generated automatically from %s. + */ +""" % (os.path.basename(sys.argv[0])) + +# Create the proper list of filenames +filenames = [] +for file in files: + if os.path.isfile(file): + filenames.append(file) + else: + filenames.append(os.path.join(srcdir, file)) + +if len(filenames) < 1: + print("No files found") + sys.exit(1) + + +# Look through all files, applying the regex to each line. +# If the pattern matches, save the "symbol" section to the +# appropriate set. +regs = { + 'proto_reg': set(), + 'handoff_reg': set(), + 'wtap_register': set(), + 'codec_register': set(), + 'register_tap_listener': set(), + } + +# For those that don't know Python, r"" indicates a raw string, +# devoid of Python escapes. +proto_regex = r"\bproto_register_(?P[\w]+)\s*\(\s*void\s*\)\s*{" + +handoff_regex = r"\bproto_reg_handoff_(?P[\w]+)\s*\(\s*void\s*\)\s*{" + +wtap_reg_regex = r"\bwtap_register_(?P[\w]+)\s*\(\s*void\s*\)\s*{" + +codec_reg_regex = r"\bcodec_register_(?P[\w]+)\s*\(\s*void\s*\)\s*{" + +tap_reg_regex = r"\bregister_tap_listener_(?P[\w]+)\s*\(\s*void\s*\)\s*{" + +# This table drives the pattern-matching and symbol-harvesting +patterns = [ + ( 'proto_reg', re.compile(proto_regex, re.MULTILINE | re.ASCII) ), + ( 'handoff_reg', re.compile(handoff_regex, re.MULTILINE | re.ASCII) ), + ( 'wtap_register', re.compile(wtap_reg_regex, re.MULTILINE | re.ASCII) ), + ( 'codec_register', re.compile(codec_reg_regex, re.MULTILINE | re.ASCII) ), + ( 'register_tap_listener', re.compile(tap_reg_regex, re.MULTILINE | re.ASCII) ), + ] + +# Grep +for filename in filenames: + file = open(filename) + # Read the whole file into memory + contents = file.read() + for action in patterns: + regex = action[1] + for match in regex.finditer(contents): + symbol = match.group("symbol") + sym_type = action[0] + regs[sym_type].add(symbol) + # We're done with the file contents + del contents + file.close() + +# Make sure we actually processed something +if (len(regs['proto_reg']) < 1 and len(regs['wtap_register']) < 1 and len(regs['codec_register']) < 1 and len(regs['register_tap_listener']) < 1): + print("No plugin registrations found") + sys.exit(1) + +# Convert the sets into sorted lists to make the output pretty +regs['proto_reg'] = sorted(regs['proto_reg']) +regs['handoff_reg'] = sorted(regs['handoff_reg']) +regs['wtap_register'] = sorted(regs['wtap_register']) +regs['codec_register'] = sorted(regs['codec_register']) +regs['register_tap_listener'] = sorted(regs['register_tap_listener']) + +reg_code = "" + +reg_code += preamble + +reg_code += """ +#include "config.h" + +#include + +/* plugins are DLLs on Windows */ +#define WS_BUILD_DLL +#include "ws_symbol_export.h" + +""" + +if registertype == "plugin": + reg_code += "#include \"epan/proto.h\"\n\n" +if registertype == "plugin_wtap": + reg_code += "#include \"wiretap/wtap.h\"\n\n" +if registertype == "plugin_codec": + reg_code += "#include \"wsutil/codecs.h\"\n\n" +if registertype == "plugin_tap": + reg_code += "#include \"epan/tap.h\"\n\n" + +for symbol in regs['proto_reg']: + reg_code += "void proto_register_%s(void);\n" % (symbol) +for symbol in regs['handoff_reg']: + reg_code += "void proto_reg_handoff_%s(void);\n" % (symbol) +for symbol in regs['wtap_register']: + reg_code += "void wtap_register_%s(void);\n" % (symbol) +for symbol in regs['codec_register']: + reg_code += "void codec_register_%s(void);\n" % (symbol) +for symbol in regs['register_tap_listener']: + reg_code += "void register_tap_listener_%s(void);\n" % (symbol) + +reg_code += """ +WS_DLL_PUBLIC_DEF const gchar plugin_version[] = PLUGIN_VERSION; +WS_DLL_PUBLIC_DEF const int plugin_want_major = VERSION_MAJOR; +WS_DLL_PUBLIC_DEF const int plugin_want_minor = VERSION_MINOR; + +WS_DLL_PUBLIC void plugin_register(void); + +void plugin_register(void) +{ +""" + +if registertype == "plugin": + for symbol in regs['proto_reg']: + reg_code +=" static proto_plugin plug_%s;\n\n" % (symbol) + reg_code +=" plug_%s.register_protoinfo = proto_register_%s;\n" % (symbol, symbol) + if symbol in regs['handoff_reg']: + reg_code +=" plug_%s.register_handoff = proto_reg_handoff_%s;\n" % (symbol, symbol) + else: + reg_code +=" plug_%s.register_handoff = NULL;\n" % (symbol) + reg_code += " proto_register_plugin(&plug_%s);\n" % (symbol) +if registertype == "plugin_wtap": + for symbol in regs['wtap_register']: + reg_code += " static wtap_plugin plug_%s;\n\n" % (symbol) + reg_code += " plug_%s.register_wtap_module = wtap_register_%s;\n" % (symbol, symbol) + reg_code += " wtap_register_plugin(&plug_%s);\n" % (symbol) +if registertype == "plugin_codec": + for symbol in regs['codec_register']: + reg_code += " static codecs_plugin plug_%s;\n\n" % (symbol) + reg_code += " plug_%s.register_codec_module = codec_register_%s;\n" % (symbol, symbol) + reg_code += " codecs_register_plugin(&plug_%s);\n" % (symbol) +if registertype == "plugin_tap": + for symbol in regs['register_tap_listener']: + reg_code += " static tap_plugin plug_%s;\n\n" % (symbol) + reg_code += " plug_%s.register_tap_listener = register_tap_listener_%s;\n" % (symbol, symbol) + reg_code += " tap_register_plugin(&plug_%s);\n" % (symbol) + +reg_code += "}\n" + +try: + fh = open(final_filename, 'w') + fh.write(reg_code) + fh.close() +except OSError: + sys.exit('Unable to write ' + final_filename + '.\n') + +# +# Editor modelines - https://www.wireshark.org/tools/modelines.html +# +# Local variables: +# c-basic-offset: 4 +# indent-tabs-mode: nil +# End: +# +# vi: set shiftwidth=4 expandtab: +# :indentSize=4:noTabs=true: +# diff --git a/tools/make-regs.py b/tools/make-regs.py new file mode 100755 index 0000000..376b3c6 --- /dev/null +++ b/tools/make-regs.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python3 +# +# Looks for registration routines in the source files +# and assembles C code to call all the routines. +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +import sys +import re + +preamble = """\ +/* + * Do not modify this file. Changes will be overwritten. + * + * Generated automatically using \"make-regs.py\". + */ + +""" + +def gen_prototypes(funcs): + output = "" + for f in funcs: + output += "void {}(void);\n".format(f) + return output + +def gen_array(funcs, name): + output = "{}[] = {{\n".format(name) + for f in funcs: + output += " {{ \"{0}\", {0} }},\n".format(f) + output += " { NULL, NULL }\n};\n" + return output + +def scan_files(infiles, regs): + for path in infiles: + with open(path, 'r', encoding='utf8') as f: + source = f.read() + for array, regex in regs: + matches = re.findall(regex, source) + array.extend(matches) + +def make_dissectors(outfile, infiles): + protos = [] + protos_regex = r"void\s+(proto_register_[\w]+)\s*\(\s*void\s*\)\s*{" + handoffs = [] + handoffs_regex = r"void\s+(proto_reg_handoff_[\w]+)\s*\(\s*void\s*\)\s*{" + + scan_files(infiles, [(protos, protos_regex), (handoffs, handoffs_regex)]) + + if len(protos) < 1: + sys.exit("No protocol registrations found.") + + protos.sort() + handoffs.sort() + + output = preamble + output += """\ +#include "dissectors.h" + +const unsigned long dissector_reg_proto_count = {0}; +const unsigned long dissector_reg_handoff_count = {1}; + +""".format(len(protos), len(handoffs)) + + output += gen_prototypes(protos) + output += "\n" + output += gen_array(protos, "dissector_reg_t dissector_reg_proto") + output += "\n" + output += gen_prototypes(handoffs) + output += "\n" + output += gen_array(handoffs, "dissector_reg_t dissector_reg_handoff") + + with open(outfile, "w") as f: + f.write(output) + + print("Found {0} registrations and {1} handoffs.".format(len(protos), len(handoffs))) + +def make_wtap_modules(outfile, infiles): + wtap_modules = [] + wtap_modules_regex = r"void\s+(register_[\w]+)\s*\(\s*void\s*\)\s*{" + + scan_files(infiles, [(wtap_modules, wtap_modules_regex)]) + + if len(wtap_modules) < 1: + sys.exit("No wiretap registrations found.") + + wtap_modules.sort() + + output = preamble + output += """\ +#include "wtap_modules.h" + +const unsigned wtap_module_count = {0}; + +""".format(len(wtap_modules)) + + output += gen_prototypes(wtap_modules) + output += "\n" + output += gen_array(wtap_modules, "wtap_module_reg_t wtap_module_reg") + + with open(outfile, "w") as f: + f.write(output) + + print("Found {0} registrations.".format(len(wtap_modules))) + +def make_taps(outfile, infiles): + taps = [] + taps_regex = r"void\s+(register_tap_listener_[\w]+)\s*\(\s*void\s*\)\s*{" + + scan_files(infiles, [(taps, taps_regex)]) + + if len(taps) < 1: + sys.exit("No tap registrations found.") + + taps.sort() + + output = preamble + output += """\ +#include "ui/taps.h" + +const unsigned long tap_reg_listener_count = {0}; + +""".format(len(taps)) + + output += gen_prototypes(taps) + output += "\n" + output += gen_array(taps, "tap_reg_t tap_reg_listener") + + with open(outfile, "w") as f: + f.write(output) + + print("Found {0} registrations.".format(len(taps))) + + +def print_usage(): + sys.exit("Usage: {0} \n".format(sys.argv[0])) + +if __name__ == "__main__": + if len(sys.argv) < 4: + print_usage() + + mode = sys.argv[1] + outfile = sys.argv[2] + if sys.argv[3].startswith("@"): + with open(sys.argv[3][1:]) as f: + infiles = [l.strip() for l in f.readlines()] + else: + infiles = sys.argv[3:] + + if mode == "dissectors": + make_dissectors(outfile, infiles) + elif mode == "wtap_modules": + make_wtap_modules(outfile, infiles) + elif mode == "taps": + make_taps(outfile, infiles) + else: + print_usage() diff --git a/tools/make-services.py b/tools/make-services.py new file mode 100755 index 0000000..e608af7 --- /dev/null +++ b/tools/make-services.py @@ -0,0 +1,292 @@ +#!/usr/bin/env python3 +# +# Parses the CSV version of the IANA Service Name and Transport Protocol Port Number Registry +# and generates a services(5) file. +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 2013 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +iana_svc_url = 'https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' + +__doc__ = '''\ +Usage: make-services.py [url] + +url defaults to + %s +''' % (iana_svc_url) + +import sys +import getopt +import csv +import re +import collections +import urllib.request, urllib.error, urllib.parse +import codecs + +services_file = 'epan/services-data.c' + +exclude_services = [ + '^spr-itunes', + '^spl-itunes', + '^shilp', + ] + +min_source_lines = 14000 # Size was ~ 14800 on 2017-07-20 + +def parse_port(port_str): + + p = port_str.split('-') + try: + if len(p) == 1: + return tuple([int(p[0])]) + if len(p) == 2: + return tuple([int(p[0]), int(p[1])]) + except ValueError: + pass + return () + +def port_to_str(port): + if len(port) == 2: + return str(port[0]) + '-' + str(port[1]) + return str(port[0]) + +def parse_rows(svc_fd): + port_reader = csv.reader(svc_fd) + count = 0 + + # Header positions as of 2013-08-06 + headers = next(port_reader) + + try: + sn_pos = headers.index('Service Name') + except Exception: + sn_pos = 0 + try: + pn_pos = headers.index('Port Number') + except Exception: + pn_pos = 1 + try: + tp_pos = headers.index('Transport Protocol') + except Exception: + tp_pos = 2 + try: + desc_pos = headers.index('Description') + except Exception: + desc_pos = 3 + + services_map = {} + + for row in port_reader: + service = row[sn_pos] + port = parse_port(row[pn_pos]) + proto = row[tp_pos] + description = row[desc_pos] + count += 1 + + if len(service) < 1 or not port or len(proto) < 1: + continue + + if re.search('|'.join(exclude_services), service): + continue + + # max 15 chars + service = service[:15].rstrip() + + # replace blanks (for some non-standard long names) + service = service.replace(" ", "-") + + description = description.replace("\n", "") + description = re.sub("IANA assigned this well-formed service .+$", "", description) + description = re.sub(" +", " ", description) + description = description.strip() + if description == service or description == service.replace("-", " "): + description = None + + if not port in services_map: + services_map[port] = collections.OrderedDict() + + # Remove some duplicates (first entry wins) + proto_exists = False + for k in services_map[port].keys(): + if proto in services_map[port][k]: + proto_exists = True + break + if proto_exists: + continue + + if not service in services_map[port]: + services_map[port][service] = [description] + services_map[port][service].append(proto) + + if count < min_source_lines: + exit_msg('Not enough parsed data') + + return services_map + +def compile_body(d): + keys = list(d.keys()) + keys.sort() + body = [] + + for port in keys: + for serv in d[port].keys(): + line = [port, d[port][serv][1:], serv] + description = d[port][serv][0] + if description: + line.append(description) + body.append(line) + + return body + +def add_entry(table, port, service_name, description): + table.append([int(port), service_name, description]) + + + # body = [(port-range,), [proto-list], service-name, optional-description] + # table = [port-number, service-name, optional-description] +def compile_tables(body): + + body.sort() + tcp_udp_table = [] + tcp_table = [] + udp_table = [] + sctp_table = [] + dccp_table = [] + + for entry in body: + if len(entry) == 4: + port_range, proto_list, service_name, description = entry + else: + port_range, proto_list, service_name = entry + description = None + + for port in port_range: + if 'tcp' in proto_list and 'udp' in proto_list: + add_entry(tcp_udp_table, port, service_name, description) + else: + if 'tcp' in proto_list: + add_entry(tcp_table, port, service_name, description) + if 'udp' in proto_list: + add_entry(udp_table, port, service_name, description) + if 'sctp' in proto_list: + add_entry(sctp_table, port, service_name, description) + if 'dccp' in proto_list: + add_entry(dccp_table, port, service_name, description) + + return tcp_udp_table, tcp_table, udp_table, sctp_table, dccp_table + + +def exit_msg(msg=None, status=1): + if msg is not None: + sys.stderr.write(msg + '\n\n') + sys.stderr.write(__doc__ + '\n') + sys.exit(status) + +def main(argv): + if sys.version_info[0] < 3: + print("This requires Python 3") + sys.exit(2) + + try: + opts, _ = getopt.getopt(argv, "h", ["help"]) + except getopt.GetoptError: + exit_msg() + for opt, _ in opts: + if opt in ("-h", "--help"): + exit_msg(None, 0) + + if (len(argv) > 0): + svc_url = argv[0] + else: + svc_url = iana_svc_url + + try: + if not svc_url.startswith('http'): + svc_fd = open(svc_url) + else: + req = urllib.request.urlopen(svc_url) + svc_fd = codecs.getreader('utf8')(req) + except Exception: + exit_msg('Error opening ' + svc_url) + + body = parse_rows(svc_fd) + + out = open(services_file, 'w') + out.write('''\ +/* + * Wireshark - Network traffic analyzer + * By Gerald Combs + * Copyright 1998 Gerald Combs + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This is a local copy of the IANA port-numbers file. + * + * Wireshark uses it to resolve port numbers into human readable + * service names, e.g. TCP port 80 -> http. + * + * It is subject to copyright and being used with IANA's permission: + * https://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html + * + * The original file can be found at: + * %s + */ + +''' % (iana_svc_url)) + + body = compile_body(body) + # body = [(port-range,), [proto-list], service-name, optional-description] + + max_port = 0 + + tcp_udp, tcp, udp, sctp, dccp = compile_tables(body) + + def write_entry(f, e, max_port): + line = " {{ {}, \"{}\", ".format(*e) + sep_len = 32 - len(line) + if sep_len <= 0: + sep_len = 1 + line += ' ' * sep_len + if len(e) == 3 and e[2]: + line += "\"{}\" }},\n".format(e[2].replace('"', '\\"')) + else: + line += "\"\" },\n" + f.write(line) + if int(e[0]) > int(max_port): + return e[0] + return max_port + + out.write("static ws_services_entry_t global_tcp_udp_services_table[] = {\n") + for e in tcp_udp: + max_port = write_entry(out, e, max_port) + out.write("};\n\n") + + out.write("static ws_services_entry_t global_tcp_services_table[] = {\n") + for e in tcp: + max_port = write_entry(out, e, max_port) + out.write("};\n\n") + + out.write("static ws_services_entry_t global_udp_services_table[] = {\n") + for e in udp: + max_port = write_entry(out, e, max_port) + out.write("};\n\n") + + out.write("static ws_services_entry_t global_sctp_services_table[] = {\n") + for e in sctp: + max_port = write_entry(out, e, max_port) + out.write("};\n\n") + + out.write("static ws_services_entry_t global_dccp_services_table[] = {\n") + for e in dccp: + max_port = write_entry(out, e, max_port) + out.write("};\n\n") + + out.write("static const uint16_t _services_max_port = {};\n".format(max_port)) + + out.close() + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/tools/make-tls-ct-logids.py b/tools/make-tls-ct-logids.py new file mode 100755 index 0000000..0b74c51 --- /dev/null +++ b/tools/make-tls-ct-logids.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +# Generate the array of Certificate Transparency Log ID to description mappings +# for the TLS dissector. +# +# To update the TLS dissector source file, run this from the source directory: +# +# python3 tools/make-tls-ct-logids.py --update +# + +import argparse +from base64 import b64decode, b64encode +from enum import Enum +import itertools +import os +import requests +from hashlib import sha256 + + +# Begin of comment, followed by the actual array definition +HEADER = "/* Generated by tools/make-tls-ct-logids.py\n" +# See also https://www.certificate-transparency.org/known-logs +CT_JSON_URL = 'https://www.gstatic.com/ct/log_list/v3/all_logs_list.json' +# File to be patched +SOURCE_FILE = os.path.join('epan', 'dissectors', 'packet-tls-utils.c') + +# Maximum elements per line in the value array. 11 is chosen because it results +# in output consistent with clang-format. +BYTES_PER_LINE = 11 + +class SourceStage(Enum): + BEGIN = 1 + IN_METAINFO = 2 + IN_BLOCK = 3 + END = 4 + + +def escape_c(s): + return s.replace('\\', '\\\\').replace('"', '\\"') + + +def byteshex(b): + return " ".join("0x%02x," % b for b in bytearray(b)) + + +def process_json(obj, lastmod): + logs = list(itertools.chain(*[op['logs'] for op in obj['operators']])) + metainfo, block = HEADER, '' + metainfo += " * Last-Modified %s, %s entries. */\n" % (lastmod, len(logs)) + block += "static const bytes_string ct_logids[] = {\n" + for entry in logs: + desc = entry["description"] + pubkey_der = b64decode(entry["key"]) + key_id = sha256(pubkey_der).digest() + block += ' { (const uint8_t[]){\n' + for offset in range(0, len(key_id), BYTES_PER_LINE): + block += ' %s\n' % \ + byteshex(key_id[offset:offset+BYTES_PER_LINE]) + block += ' },\n' + block += ' %d, "%s" },\n' % (len(key_id), escape_c(desc)) + block += " { NULL, 0, NULL }\n" + block += "};\n" + return metainfo, block + + +def parse_source(source_path): + """ + Reads the source file and tries to split it in the parts before, inside and + after the block. + """ + begin, metainfo, block, end = '', '', '', '' + # Stages: BEGIN (before block), IN_METAINFO, IN_BLOCK (skip), END + stage = SourceStage.BEGIN + with open(source_path) as f: + for line in f: + if line.startswith('/* Generated by '): + stage = SourceStage.IN_METAINFO + + + if stage == SourceStage.BEGIN: + begin += line + elif stage == SourceStage.IN_METAINFO: + metainfo += line + elif stage == SourceStage.IN_BLOCK: + block += line + if line.startswith('}'): + stage = SourceStage.END + elif stage == SourceStage.END: + end += line + + if line.startswith(' * Last-Modified '): + stage = SourceStage.IN_BLOCK + + if stage != SourceStage.END: + raise RuntimeError("Could not parse file (in stage %s)" % stage.name) + return begin, metainfo, block, end + + +parser = argparse.ArgumentParser() +parser.add_argument("--update", action="store_true", + help="Update %s as needed instead of writing to stdout" % SOURCE_FILE) + + +def main(): + args = parser.parse_args() + this_dir = os.path.dirname(__file__) + r = requests.get(CT_JSON_URL) + j_metainfo, j_block = process_json(r.json(), lastmod=r.headers['Last-Modified']) + source_path = os.path.join(this_dir, '..', SOURCE_FILE) + + if args.update: + s_begin, _, s_block, s_end = parse_source(source_path) + if s_block == j_block: + print("File is up-to-date") + else: + with open(source_path, "w") as f: + f.write(s_begin) + f.write(j_metainfo) + f.write(j_block) + f.write(s_end) + print("Updated %s" % source_path) + else: + print(j_metainfo, j_block) + + +if __name__ == '__main__': + main() diff --git a/tools/make-usb.py b/tools/make-usb.py new file mode 100755 index 0000000..6540803 --- /dev/null +++ b/tools/make-usb.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python3 +# +# make-usb - Creates a file containing vendor and product ids. +# It use the databases from +# - The USB ID Repository: https://usb-ids.gowdy.us (http://www.linux-usb.org), mirrored at Sourceforge +# - libgphoto2 from gPhoto: https://github.com/gphoto/libgphoto2 (http://gphoto.org), available at GitHub +# to create our file epan/dissectors/usb.c + +import re +import sys +import urllib.request, urllib.error, urllib.parse + +MODE_IDLE = 0 +MODE_VENDOR_PRODUCT = 1 +MIN_VENDORS = 3400 # 3409 as of 2020-11-15 +MIN_PRODUCTS = 20000 # 20361 as of 2020-11-15 + +mode = MODE_IDLE + +req_headers = { 'User-Agent': 'Wireshark make-usb' } +req = urllib.request.Request('https://sourceforge.net/p/linux-usb/repo/HEAD/tree/trunk/htdocs/usb.ids?format=raw', headers=req_headers) +response = urllib.request.urlopen(req) +lines = response.read().decode('UTF-8', 'replace').splitlines() + +vendors = dict() +products = dict() +vendors_str="static const value_string usb_vendors_vals[] = {\n" +products_str="static const value_string usb_products_vals[] = {\n" + +# Escape backslashes, quotes, control characters and non-ASCII characters. +escapes = {} +for i in range(256): + if i in b'\\"': + escapes[i] = '\\%c' % i + elif i in range(0x20, 0x80) or i in b'\t': + escapes[i] = chr(i) + else: + escapes[i] = '\\%03o' % i + +for utf8line in lines: + # Convert single backslashes to double (escaped) backslashes, escape quotes, etc. + utf8line = utf8line.rstrip() + utf8line = re.sub("\?+", "?", utf8line) + line = ''.join(escapes[byte] for byte in utf8line.encode('utf8')) + + if line == "# Vendors, devices and interfaces. Please keep sorted.": + mode = MODE_VENDOR_PRODUCT + continue + elif line == "# List of known device classes, subclasses and protocols": + mode = MODE_IDLE + continue + + if mode == MODE_VENDOR_PRODUCT: + if re.match("^[0-9a-f]{4}", line): + last_vendor=line[:4] + vendors[last_vendor] = line[4:].strip() + elif re.match("^\t[0-9a-f]{4}", line): + line = line.strip() + product = "%s%s"%(last_vendor, line[:4]) + products[product] = line[4:].strip() + +req = urllib.request.Request('https://raw.githubusercontent.com/gphoto/libgphoto2/master/camlibs/ptp2/library.c', headers=req_headers) +response = urllib.request.urlopen(req) +lines = response.read().decode('UTF-8', 'replace').splitlines() + +mode = MODE_IDLE + +for line in lines: + if mode == MODE_IDLE and re.match(r".*\bmodels\[\]", line): + mode = MODE_VENDOR_PRODUCT + continue + + if mode == MODE_VENDOR_PRODUCT and re.match(r"};", line): + mode = MODE_IDLE + + if mode == MODE_IDLE: + continue + + m = re.match(r"\s*{\"(.*):(.*)\",\s*0x([0-9a-fA-F]{4}),\s*0x([0-9a-fA-F]{4}),.*},", line) + if m is not None: + manuf = m.group(1).strip() + model = re.sub(r"\(.*\)", "", m.group(2)).strip() + product = m.group(3) + m.group(4) + products[product] = ' '.join((manuf, model)) + +req = urllib.request.Request('https://raw.githubusercontent.com/gphoto/libgphoto2/master/camlibs/ptp2/music-players.h', headers=req_headers) +response = urllib.request.urlopen(req) +lines = response.read().decode('UTF-8', 'replace').splitlines() + +for line in lines: + m = re.match(r"\s*{\s*\"(.*)\",\s*0x([0-9a-fA-F]{4}),\s*\"(.*)\",\s*0x([0-9a-fA-F]{4}),", line) + if m is not None: + manuf = m.group(1).strip() + model = m.group(3).strip() + product = m.group(2) + m.group(4) + products[product] = ' '.join((manuf, model)) + + +if (len(vendors) < MIN_VENDORS): + sys.stderr.write("Not enough vendors: %d\n" % len(vendors)) + sys.exit(1) + +if (len(products) < MIN_PRODUCTS): + sys.stderr.write("Not enough products: %d\n" % len(products)) + sys.exit(1) + +for v in sorted(vendors): + vendors_str += " { 0x%s, \"%s\" },\n"%(v,vendors[v]) + +vendors_str += """ { 0, NULL }\n}; +value_string_ext ext_usb_vendors_vals = VALUE_STRING_EXT_INIT(usb_vendors_vals); +""" + +for p in sorted(products): + products_str += " { 0x%s, \"%s\" },\n"%(p,products[p]) + +products_str += """ { 0, NULL }\n}; +value_string_ext ext_usb_products_vals = VALUE_STRING_EXT_INIT(usb_products_vals); +""" + +header="""/* usb.c + * USB vendor id and product ids + * This file was generated by running python ./tools/make-usb.py + * Don't change it directly. + * + * Copyright 2012, Michal Labedzki for Tieto Corporation + * + * Other values imported from libghoto2/camlibs/ptp2/library.c, music-players.h + * + * Copyright (C) 2001-2005 Mariusz Woloszyn + * Copyright (C) 2003-2013 Marcus Meissner + * Copyright (C) 2005 Hubert Figuiere + * Copyright (C) 2009 Axel Waggershauser + * Copyright (C) 2005-2007 Richard A. Low + * Copyright (C) 2005-2012 Linus Walleij + * Copyright (C) 2007 Ted Bullock + * Copyright (C) 2012 Sony Mobile Communications AB + * + * Wireshark - Network traffic analyzer + * By Gerald Combs + * Copyright 1998 Gerald Combs + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +/* + * XXX We should probably parse a USB ID file at program start instead + * of generating this file. + */ + +#include "config.h" +#include +""" + +f = open('epan/dissectors/usb.c', 'w') +f.write(header) +f.write("\n") +f.write(vendors_str) +f.write("\n\n") +f.write(products_str) +f.write("\n") +f.close() + +print("Success!") diff --git a/tools/make-version.py b/tools/make-version.py new file mode 100755 index 0000000..4adc7b2 --- /dev/null +++ b/tools/make-version.py @@ -0,0 +1,459 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 by Moshe Kaplan +# Based on make-version.pl by Jörg Mayer +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# See below for usage. +# +# If run with the "-r" or "--set-release" argument the VERSION macro in +# CMakeLists.txt will have the version_extra template appended to the +# version number. vcs_version.h will _not_ be generated if either argument is +# present. +# +# make-version.py is called during the build to update vcs_version.h in the build +# directory. To set a fixed version, use something like: +# +# cmake -DVCSVERSION_OVERRIDE="Git v3.1.0 packaged as 3.1.0-1" +# + +# XXX - We're pretty dumb about the "{vcsinfo}" substitution, and about having +# spaces in the package format. + +import argparse +import os +import os.path +import re +import shlex +import shutil +import sys +import subprocess + +GIT_ABBREV_LENGTH = 12 + +# `git archive` will use an 'export-subst' entry in .gitattributes to replace +# the $Format strings with `git log --pretty=format:` placeholders. +# The output will look something like the following: +# GIT_EXPORT_SUBST_H = '51315cf37cdf6c0add1b1c99cb7941aac4489a6f' +# GIT_EXPORT_SUBST_D = 'HEAD -> master, upstream/master, upstream/HEAD' +# If the text "$Format" is still present, it means that +# git archive did not replace the $Format string, which +# means that this not a git archive. +GIT_EXPORT_SUBST_H = '40459284278611128aac5cef35a563218933f8da' +GIT_EXPORT_SUBST_D = 'tag: wireshark-4.2.2, tag: v4.2.2, refs/merge-requests/13920/head, refs/keep-around/40459284278611128aac5cef35a563218933f8da' +IS_GIT_ARCHIVE = not GIT_EXPORT_SUBST_H.startswith('$Format') + + +def update_cmakelists_txt(src_dir, set_version, repo_data): + if not set_version and repo_data['package_string'] == "": + return + + cmake_filepath = os.path.join(src_dir, "CMakeLists.txt") + + with open(cmake_filepath, encoding='utf-8') as fh: + cmake_contents = fh.read() + + MAJOR_PATTERN = r"^set *\( *PROJECT_MAJOR_VERSION *\d+ *\)$" + MINOR_PATTERN = r"^set *\( *PROJECT_MINOR_VERSION *\d+ *\)$" + PATCH_PATTERN = r"^set *\( *PROJECT_PATCH_VERSION *\d+ *\)$" + VERSION_EXTENSION_PATTERN = r"^set *\( *PROJECT_VERSION_EXTENSION .*?$" + + new_cmake_contents = cmake_contents + new_cmake_contents = re.sub(MAJOR_PATTERN, + f"set(PROJECT_MAJOR_VERSION {repo_data['version_major']})", + new_cmake_contents, + flags=re.MULTILINE) + new_cmake_contents = re.sub(MINOR_PATTERN, + f"set(PROJECT_MINOR_VERSION {repo_data['version_minor']})", + new_cmake_contents, + flags=re.MULTILINE) + new_cmake_contents = re.sub(PATCH_PATTERN, + f"set(PROJECT_PATCH_VERSION {repo_data['version_patch']})", + new_cmake_contents, + flags=re.MULTILINE) + new_cmake_contents = re.sub(VERSION_EXTENSION_PATTERN, + f"set(PROJECT_VERSION_EXTENSION \"{repo_data['package_string']}\")", + new_cmake_contents, + flags=re.MULTILINE) + + with open(cmake_filepath, mode='w', encoding='utf-8') as fh: + fh.write(new_cmake_contents) + print(cmake_filepath + " has been updated.") + + +def update_debian_changelog(src_dir, repo_data): + # Read packaging/debian/changelog, then write back out an updated version. + + deb_changelog_filepath = os.path.join(src_dir, "packaging", "debian", "changelog") + with open(deb_changelog_filepath, encoding='utf-8') as fh: + changelog_contents = fh.read() + + CHANGELOG_PATTERN = r"^.*" + text_replacement = f"wireshark ({repo_data['version_major']}.{repo_data['version_minor']}.{repo_data['version_patch']}{repo_data['package_string']}) unstable; urgency=low" + # Note: Only need to replace the first line, so we don't use re.MULTILINE or re.DOTALL + new_changelog_contents = re.sub(CHANGELOG_PATTERN, text_replacement, changelog_contents) + with open(deb_changelog_filepath, mode='w', encoding='utf-8') as fh: + fh.write(new_changelog_contents) + print(deb_changelog_filepath + " has been updated.") + + +def create_version_file(version_f, repo_data): + 'Write the version to the specified file handle' + + version_f.write(f"{repo_data['version_major']}.{repo_data['version_minor']}.{repo_data['version_patch']}{repo_data['package_string']}\n") + print(version_f.name + " has been created.") + + +def update_attributes_asciidoc(src_dir, repo_data): + # Read docbook/attributes.adoc, then write it back out with an updated + # wireshark-version replacement line. + asiidoc_filepath = os.path.join(src_dir, "docbook", "attributes.adoc") + with open(asiidoc_filepath, encoding='utf-8') as fh: + asciidoc_contents = fh.read() + + # Sample line (without quotes): ":wireshark-version: 2.3.1" + ASCIIDOC_PATTERN = r"^:wireshark-version:.*$" + text_replacement = f":wireshark-version: {repo_data['version_major']}.{repo_data['version_minor']}.{repo_data['version_patch']}" + + new_asciidoc_contents = re.sub(ASCIIDOC_PATTERN, text_replacement, asciidoc_contents, flags=re.MULTILINE) + + with open(asiidoc_filepath, mode='w', encoding='utf-8') as fh: + fh.write(new_asciidoc_contents) + print(asiidoc_filepath + " has been updated.") + + +def update_docinfo_asciidoc(src_dir, repo_data): + doc_paths = [] + doc_paths += [os.path.join(src_dir, 'docbook', 'wsdg_src', 'developer-guide-docinfo.xml')] + doc_paths += [os.path.join(src_dir, 'docbook', 'wsug_src', 'user-guide-docinfo.xml')] + + for doc_path in doc_paths: + with open(doc_path, encoding='utf-8') as fh: + doc_contents = fh.read() + + # Sample line (without quotes): "For Wireshark 1.2" + DOC_PATTERN = r"^For Wireshark \d+.\d+<\/subtitle>$" + text_replacement = f"For Wireshark {repo_data['version_major']}.{repo_data['version_minor']}" + + new_doc_contents = re.sub(DOC_PATTERN, text_replacement, doc_contents, flags=re.MULTILINE) + + with open(doc_path, mode='w', encoding='utf-8') as fh: + fh.write(new_doc_contents) + print(doc_path + " has been updated.") + + +def update_cmake_lib_releases(src_dir, repo_data): + # Read CMakeLists.txt for each library, then write back out an updated version. + dir_paths = [] + dir_paths += [os.path.join(src_dir, 'epan')] + dir_paths += [os.path.join(src_dir, 'wiretap')] + + for dir_path in dir_paths: + cmakelists_filepath = os.path.join(dir_path, "CMakeLists.txt") + with open(cmakelists_filepath, encoding='utf-8') as fh: + cmakelists_contents = fh.read() + + # Sample line (without quotes; note leading tab: " VERSION "0.0.0" SOVERSION 0") + VERSION_PATTERN = r'^(\s*VERSION\s+"\d+\.\d+\.)\d+' + replacement_text = f"\\g<1>{repo_data['version_patch']}" + new_cmakelists_contents = re.sub(VERSION_PATTERN, + replacement_text, + cmakelists_contents, + flags=re.MULTILINE) + + with open(cmakelists_filepath, mode='w', encoding='utf-8') as fh: + fh.write(new_cmakelists_contents) + print(cmakelists_filepath + " has been updated.") + + +# Update distributed files that contain any version information +def update_versioned_files(src_dir, set_version, repo_data): + update_cmakelists_txt(src_dir, set_version, repo_data) + update_debian_changelog(src_dir, repo_data) + if set_version: + update_attributes_asciidoc(src_dir, repo_data) + update_docinfo_asciidoc(src_dir, repo_data) + update_cmake_lib_releases(src_dir, repo_data) + + +def generate_version_h(repo_data): + # Generate new contents of version.h from repository data + + if not repo_data.get('enable_vcsversion'): + return "/* #undef VCSVERSION */\n" + + if repo_data.get('git_description'): + # Do not bother adding the git branch, the git describe output + # normally contains the base tag and commit ID which is more + # than sufficient to determine the actual source tree. + return f'#define VCSVERSION "{repo_data["git_description"]}"\n' + + if repo_data.get('last_change') and repo_data.get('num_commits'): + version_string = f"v{repo_data['version_major']}.{repo_data['version_minor']}.{repo_data['version_patch']}" + vcs_line = f'#define VCSVERSION "{version_string}-Git-{repo_data["num_commits"]}"\n' + return vcs_line + + if repo_data.get('commit_id'): + vcs_line = f'#define VCSVERSION "Git commit {repo_data["commit_id"]}"\n' + return vcs_line + + vcs_line = '#define VCSVERSION "Git Rev Unknown from unknown"\n' + return vcs_line + + +def print_VCS_REVISION(version_file, repo_data, set_vcs): + # Write the version control system's version to $version_file. + # Don't change the file if it is not needed. + # + # XXX - We might want to add VCSVERSION to CMakeLists.txt so that it can + # generate vcs_version.h independently. + + new_version_h = generate_version_h(repo_data) + + needs_update = True + if os.path.exists(version_file): + with open(version_file, encoding='utf-8') as fh: + current_version_h = fh.read() + if current_version_h == new_version_h: + needs_update = False + + if not set_vcs: + return + + if needs_update: + with open(version_file, mode='w', encoding='utf-8') as fh: + fh.write(new_version_h) + print(version_file + " has been updated.") + elif not repo_data['enable_vcsversion']: + print(version_file + " disabled.") + else: + print(version_file + " unchanged.") + return + + +def get_version(cmakelists_file_data): + # Reads major, minor, and patch + # Sample data: + # set(PROJECT_MAJOR_VERSION 3) + # set(PROJECT_MINOR_VERSION 7) + # set(PROJECT_PATCH_VERSION 2) + + MAJOR_PATTERN = r"^set *\( *PROJECT_MAJOR_VERSION *(\d+) *\)$" + MINOR_PATTERN = r"^set *\( *PROJECT_MINOR_VERSION *(\d+) *\)$" + PATCH_PATTERN = r"^set *\( *PROJECT_PATCH_VERSION *(\d+) *\)$" + + major_match = re.search(MAJOR_PATTERN, cmakelists_file_data, re.MULTILINE) + minor_match = re.search(MINOR_PATTERN, cmakelists_file_data, re.MULTILINE) + patch_match = re.search(PATCH_PATTERN, cmakelists_file_data, re.MULTILINE) + + if not major_match: + raise Exception("Couldn't get major version") + if not minor_match: + raise Exception("Couldn't get minor version") + if not patch_match: + raise Exception("Couldn't get patch version") + + major_version = major_match.groups()[0] + minor_version = minor_match.groups()[0] + patch_version = patch_match.groups()[0] + return major_version, minor_version, patch_version + + +def read_git_archive(tagged_version_extra, untagged_version_extra): + # Reads key data from the git repo. + # For git archives, this does not need to access the source directory because + # `git archive` will use an 'export-subst' entry in .gitattributes to replace + # the value for GIT_EXPORT_SUBST_H in the script. + # Returns a dictionary with key values from the repository + + is_tagged = False + for git_ref in GIT_EXPORT_SUBST_D.split(r', '): + match = re.match(r'^tag: (v[1-9].+)', git_ref) + if match: + is_tagged = True + vcs_tag = match.groups()[0] + + if is_tagged: + print(f"We are on tag {vcs_tag}.") + package_string = tagged_version_extra + else: + print("We are not tagged.") + package_string = untagged_version_extra + + # Always 0 commits for a git archive + num_commits = 0 + + # Assume a full commit hash, abbreviate it. + commit_id = GIT_EXPORT_SUBST_H[:GIT_ABBREV_LENGTH] + package_string = package_string.replace("{vcsinfo}", str(num_commits) + "-" + commit_id) + + repo_data = {} + repo_data['commit_id'] = commit_id + repo_data['enable_vcsversion'] = True + repo_data['info_source'] = "git archive" + repo_data['is_tagged'] = is_tagged + repo_data['num_commits'] = num_commits + repo_data['package_string'] = package_string + return repo_data + + +def read_git_repo(src_dir, tagged_version_extra, untagged_version_extra): + # Reads metadata from the git repo for generating the version string + # Returns the data in a dict + + IS_GIT_INSTALLED = shutil.which('git') != '' + if not IS_GIT_INSTALLED: + print("Git unavailable. Git revision will be missing from version string.", file=sys.stderr) + return {} + + GIT_DIR = os.path.join(src_dir, '.git') + # Check whether to include VCS version information in vcs_version.h + enable_vcsversion = True + git_get_commondir_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" rev-parse --git-common-dir') + git_commondir = subprocess.check_output(git_get_commondir_cmd, universal_newlines=True).strip() + if git_commondir and os.path.exists(f"{git_commondir}{os.sep}wireshark-disable-versioning"): + print("Header versioning disabled using git override.") + enable_vcsversion = False + + git_last_changetime_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" log -1 --pretty=format:%at') + git_last_changetime = subprocess.check_output(git_last_changetime_cmd, universal_newlines=True).strip() + + # Commits since last annotated tag. + # Output could be something like: v3.7.2rc0-64-g84d83a8292cb + # Or g84d83a8292cb + git_last_annotated_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" describe --abbrev={GIT_ABBREV_LENGTH} --long --always --match "v[1-9]*"') + git_last_annotated = subprocess.check_output(git_last_annotated_cmd, universal_newlines=True).strip() + parts = git_last_annotated.split('-') + git_description = git_last_annotated + if len(parts) > 1: + num_commits = int(parts[1]) + else: + num_commits = 0 + commit_id = parts[-1] + + release_candidate = '' + RC_PATTERN = r'^v\d+\.\d+\.\d+(rc\d+)$' + match = re.match(RC_PATTERN, parts[0]) + if match: + release_candidate = match.groups()[0] + + # This command is expected to fail if the version is not tagged + try: + git_vcs_tag_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" describe --exact-match --match "v[1-9]*"') + git_vcs_tag = subprocess.check_output(git_vcs_tag_cmd, stderr=subprocess.DEVNULL, universal_newlines=True).strip() + is_tagged = True + except subprocess.CalledProcessError: + is_tagged = False + + git_timestamp = "" + if num_commits == 0: + # Get the timestamp; format is similar to: 2022-06-27 23:09:20 -0400 + # Note: This doesn't appear to be used, only checked for command success + git_timestamp_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" log --format="%ad" -n 1 --date=iso') + git_timestamp = subprocess.check_output(git_timestamp_cmd, universal_newlines=True).strip() + + if is_tagged: + print(f"We are on tag {git_vcs_tag}.") + package_string = tagged_version_extra + else: + print("We are not tagged.") + package_string = untagged_version_extra + + package_string = release_candidate + package_string.replace("{vcsinfo}", str(num_commits) + "-" + commit_id) + + repo_data = {} + repo_data['commit_id'] = commit_id + repo_data['enable_vcsversion'] = enable_vcsversion + repo_data['git_timestamp'] = git_timestamp + repo_data['git_description'] = git_description + repo_data['info_source'] = "Command line (git)" + repo_data['is_tagged'] = is_tagged + repo_data['last_change'] = git_last_changetime + repo_data['num_commits'] = num_commits + repo_data['package_string'] = package_string + return repo_data + + +def parse_versionstring(version_arg): + version_parts = version_arg.split('.') + if len(version_parts) != 3: + msg = "Version must have three numbers of the form x.y.z. You entered: " + version_arg + raise argparse.ArgumentTypeError(msg) + for i, version_type in enumerate(('Major', 'Minor', 'Patch')): + try: + int(version_parts[i]) + except ValueError: + msg = f"{version_type} version must be a number! {version_type} version was '{version_parts[i]}'" + raise argparse.ArgumentTypeError(msg) + return version_parts + + +def read_repo_info(src_dir, tagged_version_extra, untagged_version_extra): + if IS_GIT_ARCHIVE: + repo_data = read_git_archive(tagged_version_extra, untagged_version_extra) + elif os.path.exists(src_dir + os.sep + '.git') and not os.path.exists(os.path.join(src_dir, '.git', 'svn')): + repo_data = read_git_repo(src_dir, tagged_version_extra, untagged_version_extra) + else: + raise Exception(src_dir + " does not appear to be a git repo or git archive!") + + cmake_path = os.path.join(src_dir, "CMakeLists.txt") + with open(cmake_path, encoding='utf-8') as fh: + version_major, version_minor, version_patch = get_version(fh.read()) + repo_data['version_major'] = version_major + repo_data['version_minor'] = version_minor + repo_data['version_patch'] = version_patch + + return repo_data + + +# CMakeLists.txt calls this with no arguments to create vcs_version.h +# AppVeyor calls this with --set-release --untagged-version-extra=-{vcsinfo}-AppVeyor --tagged-version-extra=-AppVeyor +# .gitlab-ci calls this with --set-release +# Release checklist requires --set-version +def main(): + parser = argparse.ArgumentParser(description='Wireshark file and package versions') + action_group = parser.add_mutually_exclusive_group() + action_group.add_argument('--set-version', '-v', metavar='', type=parse_versionstring, help='Set the major, minor, and patch versions in the top-level CMakeLists.txt, docbook/attributes.adoc, packaging/debian/changelog, and the CMakeLists.txt for all libraries to the provided version number') + action_group.add_argument('--set-release', '-r', action='store_true', help='Set the extra release information in the top-level CMakeLists.txt based on either default or command-line specified options.') + setrel_group = parser.add_argument_group() + setrel_group.add_argument('--tagged-version-extra', '-t', default="", help="Extra version information format to use when a tag is found. No format \ +(an empty string) is used by default.") + setrel_group.add_argument('--untagged-version-extra', '-u', default='-{vcsinfo}', help='Extra version information format to use when no tag is found. The format "-{vcsinfo}" (the number of commits and commit ID) is used by default.') + parser.add_argument('--version-file', '-f', metavar='', type=argparse.FileType('w'), help='path to version file') + parser.add_argument("src_dir", metavar='src_dir', nargs=1, help="path to source code") + args = parser.parse_args() + + if args.version_file and not args.set_release: + sys.stderr.write('Error: --version-file must be used with --set-release.\n') + sys.exit(1) + + src_dir = args.src_dir[0] + + if args.set_version: + repo_data = {} + repo_data['version_major'] = args.set_version[0] + repo_data['version_minor'] = args.set_version[1] + repo_data['version_patch'] = args.set_version[2] + repo_data['package_string'] = '' + else: + repo_data = read_repo_info(src_dir, args.tagged_version_extra, args.untagged_version_extra) + + set_vcs = not (args.set_release or args.set_version) + VERSION_FILE = 'vcs_version.h' + print_VCS_REVISION(VERSION_FILE, repo_data, set_vcs) + + if args.set_release or args.set_version: + update_versioned_files(src_dir, args.set_version, repo_data) + + if args.version_file: + create_version_file(args.version_file, repo_data) + + + +if __name__ == "__main__": + main() diff --git a/tools/make_charset_table.c b/tools/make_charset_table.c new file mode 100644 index 0000000..27d921a --- /dev/null +++ b/tools/make_charset_table.c @@ -0,0 +1,125 @@ +/* make_charset_table.c + * sample program to generate tables for charsets.c using iconv + * + * public domain + */ + +#include +#include +#include +#include + +#define UNREPL 0xFFFD + +int main(int argc, char **argv) { + /* for now only UCS-2 */ + uint16_t table[0x100]; + + iconv_t conv; + const char *charset; + int i, j; + + /* 0x00 ... 0x7F same as ASCII? */ + int ascii_based = 1; + /* 0x00 ... 0x9F same as ISO? */ + int iso_based = 1; + + if (argc != 2) { + printf("usage: %s \n", argv[0]); + return 1; + } + + charset = argv[1]; + + conv = iconv_open("UCS-2", charset); + if (conv == (iconv_t) -1) { + perror("iconv_open"); + return 2; + } + iconv_close(conv); + + for (i = 0x00; i < 0x100; i++) { + unsigned char in[1], out[2]; + size_t inlen = 1, outlen = 2; + + char *inbuf = (char *) in; + char *outbuf = (char *) out; + + size_t ret; + + in[0] = i; + + conv = iconv_open("UCS-2BE", charset); + + if (conv == (iconv_t) -1) { + /* shouldn't fail now */ + perror("iconv_open"); + return 2; + } + + ret = iconv(conv, &inbuf, &inlen, &outbuf, &outlen); + + if (ret == (size_t) -1 && errno == EILSEQ) { + table[i] = UNREPL; + iconv_close(conv); + continue; + } + + if (ret == (size_t) -1) { + perror("iconv"); + iconv_close(conv); + return 4; + } + + iconv_close(conv); + + if (ret != 0 || inlen != 0 || outlen != 0) { + fprintf(stderr, "%d: smth went wrong: %zu %zu %zu\n", i, ret, inlen, outlen); + return 3; + } + + if (i < 0x80 && (out[0] != 0 || out[1] != i)) + ascii_based = 0; + + if (i < 0xA0 && (out[0] != 0 || out[1] != i)) + iso_based = 0; + + table[i] = (out[0] << 8) | out[1]; + } + + /* iso_based not supported */ + iso_based = 0; + + printf("/* generated by %s %s */\n", argv[0], charset); + + if (iso_based) + i = 0xA0; + else if (ascii_based) + i = 0x80; + else + i = 0; + + printf("const gunichar2 charset_table_%s[0x%x] = {\n", charset, 0x100 - i); + while (i < 0x100) { + int start = i; + + printf(" "); + + for (j = 0; j < 8; j++, i++) { + if (table[i] == UNREPL) + printf("UNREPL, "); + else + printf("0x%.4x, ", table[i]); + } + + if ((start & 0xf) == 0) + printf(" /* 0x%.2X - */", start); + else + printf(" /* - 0x%.2X */", i - 1); + + printf("\n"); + } + printf("};\n"); + + return 0; +} diff --git a/tools/mingw-rpm-setup.sh b/tools/mingw-rpm-setup.sh new file mode 100755 index 0000000..602c0fb --- /dev/null +++ b/tools/mingw-rpm-setup.sh @@ -0,0 +1,70 @@ +#!/bin/bash +# Setup development environment on Fedora Linux for MinGW-w64 +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# We drag in tools that might not be needed by all users; it's easier +# that way. +# + +function print_usage() { + printf "\\nUtility to setup a Fedora MinGW-w64 system for Wireshark development.\\n" + printf "The basic usage installs the needed software\\n\\n" + printf "Usage: %s [...other options...]\\n" "$0" + printf "\\t--install-all: install everything\\n" + printf "\\t[other]: other options are passed as-is to pacman\\n" + printf "\\tPass --noconfirm to bypass any \"are you sure?\" messages.\\n" +} + +OPTIONS= +for arg; do + case $arg in + --help) + print_usage + exit 0 + ;; + --install-all) + ;; + *) + OPTIONS="$OPTIONS $arg" + ;; + esac +done + +BASIC_LIST="mingw64-gcc \ + mingw64-gcc-c++ \ + mingw64-glib2 \ + mingw64-libgcrypt \ + mingw64-c-ares \ + mingw64-qt6-qtbase \ + mingw64-qt6-qt5compat \ + mingw64-qt6-qtmultimedia \ + mingw64-qt6-qttools \ + mingw64-speexdsp \ + mingw32-nsis \ + mingw64-nsis \ + mingw64-gnutls \ + mingw64-brotli \ + mingw64-minizip \ + mingw64-opus \ + mingw64-wpcap \ + mingw64-libxml2 \ + ninja-build \ + flex \ + lemon \ + asciidoctor \ + libxslt \ + docbook-style-xsl \ + ccache \ + git \ + patch \ + cmake + cmake-rpm-macros" + +ACTUAL_LIST=$BASIC_LIST + +dnf install $ACTUAL_LIST $OPTIONS diff --git a/tools/msnchat b/tools/msnchat new file mode 100755 index 0000000..c2fcaab --- /dev/null +++ b/tools/msnchat @@ -0,0 +1,315 @@ +#!/usr/bin/env python +""" +Process packet capture files and produce a nice HTML +report of MSN Chat sessions. + +Copyright (c) 2003 by Gilbert Ramirez + +SPDX-License-Identifier: GPL-2.0-or-later +""" + +import os +import re +import sys +import array +import string +import WiresharkXML +import getopt + +# By default we output the HTML to stdout +out_fh = sys.stdout + +class MSNMessage: + pass + +class MSN_MSG(MSNMessage): + def __init__(self, timestamp, user, message): + self.timestamp = timestamp + self.user = user + self.message = message + + +class Conversation: + """Keeps track of a single MSN chat session""" + + re_MSG_out = re.compile("MSG (?P\d+) (?P[UNA]) (?P\d+)") + re_MSG_in = re.compile("MSG (?P\S+)@(?P\S+) (?P\S+) (?P\d+)") + + USER_NOT_FOUND = -1 + DEFAULT_USER = None + + + DEFAULT_USER_COLOR = "#0000ff" + USER_COLORS = [ "#ff0000", "#00ff00", + "#800000", "#008000", "#000080" ] + + DEFAULT_USER_TEXT_COLOR = "#000000" + USER_TEXT_COLOR = "#000080" + + def __init__(self): + self.packets = [] + self.messages = [] + + def AddPacket(self, packet): + self.packets.append(packet) + + def Summarize(self): + for packet in self.packets: + msg = self.CreateMSNMessage(packet) + if msg: + self.messages.append(msg) + else: + #XXX + pass + + + def CreateMSNMessage(self, packet): + msnms = packet.get_items("msnms")[0] + + # Check the first line in the msnms transmission for the user + child = msnms.children[0] + user = self.USER_NOT_FOUND + + m = self.re_MSG_out.search(child.show) + if m: + user = self.DEFAULT_USER + + else: + m = self.re_MSG_in.search(child.show) + if m: + user = m.group("alias") + + if user == self.USER_NOT_FOUND: + print >> sys.stderr, "No match for", child.show + sys.exit(1) + return None + + msg = "" + + i = 5 + check_trailing = 0 + if len(msnms.children) > 5: + check_trailing = 1 + + while i < len(msnms.children): + msg += msnms.children[i].show + if check_trailing: + j = msg.find("MSG ") + if j >= 0: + msg = msg[:j] + i += 5 + else: + i += 6 + else: + i += 6 + + timestamp = packet.get_items("frame.time")[0].get_show() + i = timestamp.rfind(".") + timestamp = timestamp[:i] + + return MSN_MSG(timestamp, user, msg) + + def MsgToHTML(self, text): + bytes = array.array("B") + + new_string = text + i = new_string.find("\\") + + while i > -1: + # At the end? + if i == len(new_string) - 1: + # Just let the default action + # copy everything to 'bytes' + break + + if new_string[i+1] in string.digits: + left = new_string[:i] + bytes.fromstring(left) + + right = new_string[i+4:] + + oct_string = new_string[i+1:i+4] + char = int(oct_string, 8) + bytes.append(char) + + new_string = right + + # ignore \r and \n + elif new_string[i+1] in "rn": + copy_these = new_string[:i] + bytes.fromstring(copy_these) + new_string = new_string[i+2:] + + else: + copy_these = new_string[:i+2] + bytes.fromstring(copy_these) + new_string = new_string[i+2:] + + i = new_string.find("\\") + + + bytes.fromstring(new_string) + + return bytes + + def CreateHTML(self, default_user): + if not self.messages: + return + + print >> out_fh, """ +

---- New Conversation @ %s ----


""" \ + % (self.messages[0].timestamp) + + user_color_assignments = {} + + for msg in self.messages: + # Calculate 'user' and 'user_color' and 'user_text_color' + if msg.user == self.DEFAULT_USER: + user = default_user + user_color = self.DEFAULT_USER_COLOR + user_text_color = self.DEFAULT_USER_TEXT_COLOR + else: + user = msg.user + user_text_color = self.USER_TEXT_COLOR + if user_color_assignments.has_key(user): + user_color = user_color_assignments[user] + else: + num_assigned = len(user_color_assignments.keys()) + user_color = self.USER_COLORS[num_assigned] + user_color_assignments[user] = user_color + + # "Oct 6, 2003 21:45:25" --> "21:45:25" + timestamp = msg.timestamp.split()[-1] + + htmlmsg = self.MsgToHTML(msg.message) + + print >> out_fh, """ +(%s) %s: """ \ + % (user_color, timestamp, user, user_text_color) + + htmlmsg.tofile(out_fh) + + print >> out_fh, "
" + + +class CaptureFile: + """Parses a single a capture file and keeps track of + all chat sessions in the file.""" + + def __init__(self, capture_filename, tshark): + """Run tshark on the capture file and parse + the data.""" + self.conversations = [] + self.conversations_map = {} + + pipe = os.popen(tshark + " -Tpdml -n -R " + "'msnms contains \"X-MMS-IM-Format\"' " + "-r " + capture_filename, "r") + + WiresharkXML.parse_fh(pipe, self.collect_packets) + + for conv in self.conversations: + conv.Summarize() + + def collect_packets(self, packet): + """Collect the packets passed back from WiresharkXML. + Sort them by TCP/IP conversation, as there could be multiple + clients per machine.""" + # Just in case we're looking at tunnelling protocols where + # more than one IP or TCP header exists, look at the last one, + # which would be the one inside the tunnel. + src_ip = packet.get_items("ip.src")[-1].get_show() + dst_ip = packet.get_items("ip.dst")[-1].get_show() + src_tcp = packet.get_items("tcp.srcport")[-1].get_show() + dst_tcp = packet.get_items("tcp.dstport")[-1].get_show() + + key_params = [src_ip, dst_ip, src_tcp, dst_tcp] + key_params.sort() + key = '|'.join(key_params) + + if not self.conversations_map.has_key(key): + conv = self.conversations_map[key] = Conversation() + self.conversations.append(conv) + else: + conv = self.conversations_map[key] + + conv.AddPacket(packet) + + + def CreateHTML(self, default_user): + if not self.conversations: + return + + for conv in self.conversations: + conv.CreateHTML(default_user) + + +def run_filename(filename, default_user, tshark): + """Process one capture file.""" + + capture = CaptureFile(filename, tshark) + capture.CreateHTML(default_user) + + +def run(filenames, default_user, tshark): + # HTML Header + print >> out_fh, """ +MSN Conversation + + +""" + for filename in filenames: + run_filename(filename, default_user, tshark) + + # HTML Footer + print >> out_fh, """ +
+ + +""" + + +def usage(): + print >> sys.stderr, "msnchat [OPTIONS] CAPTURE_FILE [...]" + print >> sys.stderr, " -o FILE name of output file" + print >> sys.stderr, " -t TSHARK location of tshark binary" + print >> sys.stderr, " -u USER name for unknown user" + sys.exit(1) + +def main(): + default_user = "Unknown" + tshark = "tshark" + + optstring = "ho:t:u:" + longopts = ["help"] + + try: + opts, args = getopt.getopt(sys.argv[1:], optstring, longopts) + except getopt.GetoptError: + usage() + + for opt, arg in opts: + if opt == "-h" or opt == "--help": + usage() + + elif opt == "-o": + filename = arg + global out_fh + try: + out_fh = open(filename, "w") + except IOError: + sys.exit("Could not open %s for writing." % (filename,)) + + elif opt == "-u": + default_user = arg + + elif opt == "-t": + tshark = arg + + else: + sys.exit("Unhandled command-line option: " + opt) + + run(args, default_user, tshark) + +if __name__ == '__main__': + main() diff --git a/tools/msys2-setup.sh b/tools/msys2-setup.sh new file mode 100644 index 0000000..0ca6329 --- /dev/null +++ b/tools/msys2-setup.sh @@ -0,0 +1,129 @@ +#!/bin/bash +# Setup development environment on MSYS2 +# +# Wireshark - Network traffic analyzer +# By Gerald Combs +# Copyright 1998 Gerald Combs +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# We drag in tools that might not be needed by all users; it's easier +# that way. +# + +function print_usage() { + printf "\\nUtility to setup an MSYS2 MinGW-w64 system for Wireshark development.\\n" + printf "The basic usage installs the needed software\\n\\n" + printf "Usage: %s [--install-optional] [...other options...]\\n" "$0" + printf "\\t--install-optional: install optional software as well\\n" + printf "\\t--install-test-deps: install packages required to run all tests\\n" + printf "\\t--install-all: install everything\\n" + printf "\\t[other]: other options are passed as-is to pacman\\n" + printf "\\tPass --noconfirm to bypass any \"are you sure?\" messages.\\n" +} + +ADDITIONAL=0 +TESTDEPS=0 +OPTIONS= +for arg; do + case $arg in + --help) + print_usage + exit 0 + ;; + --install-optional) + ADDITIONAL=1 + ;; + --install-test-deps) + TESTDEPS=1 + ;; + --install-all) + ADDITIONAL=1 + TESTDEPS=1 + ;; + *) + OPTIONS="$OPTIONS $arg" + ;; + esac +done + +PACKAGE_PREFIX="${MINGW_PACKAGE_PREFIX:-mingw-w64-x86_64}" + +# +# Lua packaging is kind of a mess. Lua 5.2 is not available. Some packages have +# a hard dependy on LuaJIT and it conflicts with Lua 5.1 and vice-versa. +# This will probably have to be fixed by the MSYS2 maintainers. +# XXX Is this still true? +# +BASIC_LIST="base-devel \ + git \ + ${PACKAGE_PREFIX}-bcg729 \ + ${PACKAGE_PREFIX}-brotli \ + ${PACKAGE_PREFIX}-c-ares \ + ${PACKAGE_PREFIX}-cmake \ + ${PACKAGE_PREFIX}-glib2 \ + ${PACKAGE_PREFIX}-gnutls \ + ${PACKAGE_PREFIX}-libgcrypt \ + ${PACKAGE_PREFIX}-libilbc \ + ${PACKAGE_PREFIX}-libmaxminddb \ + ${PACKAGE_PREFIX}-nghttp2 \ + ${PACKAGE_PREFIX}-libpcap \ + ${PACKAGE_PREFIX}-libsmi \ + ${PACKAGE_PREFIX}-libssh \ + ${PACKAGE_PREFIX}-libxml2 \ + ${PACKAGE_PREFIX}-lz4 \ + ${PACKAGE_PREFIX}-minizip \ + ${PACKAGE_PREFIX}-ninja \ + ${PACKAGE_PREFIX}-opencore-amr \ + ${PACKAGE_PREFIX}-opus \ + ${PACKAGE_PREFIX}-pcre2 \ + ${PACKAGE_PREFIX}-python \ + ${PACKAGE_PREFIX}-qt6-base \ + ${PACKAGE_PREFIX}-qt6-multimedia \ + ${PACKAGE_PREFIX}-qt6-tools \ + ${PACKAGE_PREFIX}-qt6-translations \ + ${PACKAGE_PREFIX}-qt6-5compat \ + ${PACKAGE_PREFIX}-sbc \ + ${PACKAGE_PREFIX}-snappy \ + ${PACKAGE_PREFIX}-spandsp \ + ${PACKAGE_PREFIX}-speexdsp \ + ${PACKAGE_PREFIX}-toolchain \ + ${PACKAGE_PREFIX}-winsparkle \ + ${PACKAGE_PREFIX}-zlib \ + ${PACKAGE_PREFIX}-zstd" + +ADDITIONAL_LIST="${PACKAGE_PREFIX}-asciidoctor \ + ${PACKAGE_PREFIX}-ccache \ + ${PACKAGE_PREFIX}-docbook-xsl \ + ${PACKAGE_PREFIX}-doxygen \ + ${PACKAGE_PREFIX}-libxslt \ + ${PACKAGE_PREFIX}-perl \ + ${PACKAGE_PREFIX}-ntldd" + +TESTDEPS_LIST="${PACKAGE_PREFIX}-python-pytest \ + ${PACKAGE_PREFIX}-python-pytest-xdist" + +ACTUAL_LIST=$BASIC_LIST + +if [ $ADDITIONAL -ne 0 ] +then + ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST" +fi + +if [ $TESTDEPS -ne 0 ] +then + ACTUAL_LIST="$ACTUAL_LIST $TESTDEPS_LIST" +fi + +# Partial upgrades are unsupported. +pacman --sync --refresh --sysupgrade --needed $ACTUAL_LIST $OPTIONS || exit 2 + +if [ $ADDITIONAL -eq 0 ] +then + printf "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n" +fi + +if [ $TESTDEPS -eq 0 ] +then + printf "\n*** Test deps not installed. Rerun with --install-test-deps to have them.\n" +fi diff --git a/tools/msys2checkdeps.py b/tools/msys2checkdeps.py new file mode 100644 index 0000000..f46eb50 --- /dev/null +++ b/tools/msys2checkdeps.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +# ------------------------------------------------------------------------------------------------------------------ +# list or check dependencies for binary distributions based on MSYS2 (requires the package mingw-w64-ntldd) +# +# run './msys2checkdeps.py --help' for usage information +# ------------------------------------------------------------------------------------------------------------------ +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +from __future__ import print_function + + +import argparse +import os +import subprocess +import sys + + +SYSTEMROOT = os.environ['SYSTEMROOT'] + + +class Dependency: + def __init__(self): + self.location = None + self.dependents = set() + + +def warning(msg): + print("Warning: " + msg, file=sys.stderr) + + +def error(msg): + print("Error: " + msg, file=sys.stderr) + exit(1) + + +def call_ntldd(filename): + try: + output = subprocess.check_output(['ntldd', '-R', filename], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + error("'ntldd' failed with '" + str(e) + "'") + except WindowsError as e: + error("Calling 'ntldd' failed with '" + str(e) + "' (have you installed 'mingw-w64-ntldd-git'?)") + except Exception as e: + error("Calling 'ntldd' failed with '" + str(e) + "'") + return output.decode('utf-8') + + +def get_dependencies(filename, deps): + raw_list = call_ntldd(filename) + + skip_indent = float('Inf') + parents = {} + parents[0] = os.path.basename(filename) + for line in raw_list.splitlines(): + line = line[1:] + indent = len(line) - len(line.lstrip()) + if indent > skip_indent: + continue + else: + skip_indent = float('Inf') + + # if the dependency is not found in the working directory ntldd tries to find it on the search path + # which is indicated by the string '=>' followed by the determined location or 'not found' + if ('=>' in line): + (lib, location) = line.lstrip().split(' => ') + if location == 'not found': + location = None + else: + location = location.rsplit('(', 1)[0].strip() + else: + lib = line.rsplit('(', 1)[0].strip() + location = os.getcwd() + + parents[indent+1] = lib + + # we don't care about Microsoft libraries and their dependencies + if location and SYSTEMROOT in location: + skip_indent = indent + continue + + if lib not in deps: + deps[lib] = Dependency() + deps[lib].location = location + deps[lib].dependents.add(parents[indent]) + return deps + + +def collect_dependencies(path): + # collect dependencies + # - each key in 'deps' will be the filename of a dependency + # - the corresponding value is an instance of class Dependency (containing full path and dependents) + deps = {} + if os.path.isfile(path): + deps = get_dependencies(path, deps) + elif os.path.isdir(path): + extensions = ['.exe', '.pyd', '.dll'] + exclusions = ['distutils/command/wininst'] # python + for base, dirs, files in os.walk(path): + for f in files: + filepath = os.path.join(base, f) + (_, ext) = os.path.splitext(f) + if (ext.lower() not in extensions) or any(exclusion in filepath for exclusion in exclusions): + continue + deps = get_dependencies(filepath, deps) + return deps + + +if __name__ == '__main__': + modes = ['list', 'list-compact', 'check', 'check-missing', 'check-unused'] + + # parse arguments from command line + parser = argparse.ArgumentParser(description="List or check dependencies for binary distributions based on MSYS2.\n" + "(requires the package 'mingw-w64-ntldd')", + formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument('mode', metavar="MODE", choices=modes, + help="One of the following:\n" + " list - list dependencies in human-readable form\n" + " with full path and list of dependents\n" + " list-compact - list dependencies in compact form (as a plain list of filenames)\n" + " check - check for missing or unused dependencies (see below for details)\n" + " check-missing - check if all required dependencies are present in PATH\n" + " exits with error code 2 if missing dependencies are found\n" + " and prints the list to stderr\n" + " check-unused - check if any of the libraries in the root of PATH are unused\n" + " and prints the list to stderr") + parser.add_argument('path', metavar='PATH', + help="full or relative path to a single file or a directory to work on\n" + "(directories will be checked recursively)") + parser.add_argument('-w', '--working-directory', metavar="DIR", + help="Use custom working directory (instead of 'dirname PATH')") + args = parser.parse_args() + + # check if path exists + args.path = os.path.abspath(args.path) + if not os.path.exists(args.path): + error("Can't find file/folder '" + args.path + "'") + + # get root and set it as working directory (unless one is explicitly specified) + if args.working_directory: + root = os.path.abspath(args.working_directory) + elif os.path.isdir(args.path): + root = args.path + elif os.path.isfile(args.path): + root = os.path.dirname(args.path) + os.chdir(root) + + # get dependencies for path recursively + deps = collect_dependencies(args.path) + + # print output / prepare exit code + exit_code = 0 + for dep in sorted(deps): + location = deps[dep].location + dependents = deps[dep].dependents + + if args.mode == 'list': + if (location is None): + location = '---MISSING---' + print(dep + " - " + location + " (" + ", ".join(dependents) + ")") + elif args.mode == 'list-compact': + print(dep) + elif args.mode in ['check', 'check-missing']: + if ((location is None) or (root not in os.path.abspath(location))): + warning("Missing dependency " + dep + " (" + ", ".join(dependents) + ")") + exit_code = 2 + + # check for unused libraries + if args.mode in ['check', 'check-unused']: + installed_libs = [file for file in os.listdir(root) if file.endswith(".dll")] + deps_lower = [dep.lower() for dep in deps] + top_level_libs = [lib for lib in installed_libs if lib.lower() not in deps_lower] + for top_level_lib in top_level_libs: + warning("Unused dependency " + top_level_lib) + + exit(exit_code) diff --git a/tools/ncp2222.py b/tools/ncp2222.py new file mode 100755 index 0000000..f14d0c5 --- /dev/null +++ b/tools/ncp2222.py @@ -0,0 +1,16921 @@ +#!/usr/bin/env python3 + +""" +Creates C code from a table of NCP type 0x2222 packet types. +(And 0x3333, which are the replies, but the packets are more commonly +refered to as type 0x2222; the 0x3333 replies are understood to be +part of the 0x2222 "family") + +The data-munging code was written by Gilbert Ramirez. +The NCP data comes from Greg Morris . +Many thanks to Novell for letting him work on this. + +Additional data sources: +"Programmer's Guide to the NetWare Core Protocol" by Steve Conner and Dianne Conner. + +At one time, Novell provided a list of NCPs by number at: + +http://developer.novell.com/ndk/ncp.htm (where you could download an +*.exe file which installs a PDF, although you may have to create a login +to do this) + +or + +http://developer.novell.com/ndk/doc/ncp/ +for a badly-formatted HTML version of the same PDF. + +Currently, NCP documentation can be found at: + +https://www.microfocus.com/documentation/open-enterprise-server-developer-documentation/ncp/ + +with a list of NCPs by number at + +https://www.microfocus.com/documentation/open-enterprise-server-developer-documentation/ncp/ncpdocs/main.htm + +and some additional NCPs to support volumes > 16TB at + +https://www.microfocus.com/documentation/open-enterprise-server-developer-documentation/ncp/ncpdocs/16tb+.htm + +NDS information can be found at: + +https://www.microfocus.com/documentation/edirectory-developer-documentation/edirectory-libraries-for-c/ + +and PDFs linked from there, and from + +https://www.novell.com/documentation/developer/ndslib/ + +and HTML versions linked from there. + +The Novell eDirectory Schema Reference gives a "Transfer Format" for +some types, which may be the way they're sent over the wire. + +Portions Copyright (c) 2000-2002 by Gilbert Ramirez . +Portions Copyright (c) Novell, Inc. 2000-2003. + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +""" + +import os +import sys +import string +import getopt +import traceback + +errors = {} +groups = {} +packets = [] +compcode_lists = None +ptvc_lists = None +msg = None +reply_var = None +#ensure unique expert function declarations +expert_hash = {} + +REC_START = 0 +REC_LENGTH = 1 +REC_FIELD = 2 +REC_ENDIANNESS = 3 +REC_VAR = 4 +REC_REPEAT = 5 +REC_REQ_COND = 6 +REC_INFO_STR = 7 + +NO_VAR = -1 +NO_REPEAT = -1 +NO_REQ_COND = -1 +NO_LENGTH_CHECK = -2 + + +PROTO_LENGTH_UNKNOWN = -1 + +global_highest_var = -1 +global_req_cond = {} + + +REQ_COND_SIZE_VARIABLE = "REQ_COND_SIZE_VARIABLE" +REQ_COND_SIZE_CONSTANT = "REQ_COND_SIZE_CONSTANT" + +############################################################################## +# Global containers +############################################################################## + +class UniqueCollection: + """The UniqueCollection class stores objects which can be compared to other + objects of the same class. If two objects in the collection are equivalent, + only one is stored.""" + + def __init__(self, name): + "Constructor" + self.name = name + self.members = [] + self.member_reprs = {} + + def Add(self, object): + """Add an object to the members lists, if a comparable object + doesn't already exist. The object that is in the member list, that is + either the object that was added or the comparable object that was + already in the member list, is returned.""" + + r = repr(object) + # Is 'object' a duplicate of some other member? + if r in self.member_reprs: + return self.member_reprs[r] + else: + self.member_reprs[r] = object + self.members.append(object) + return object + + def Members(self): + "Returns the list of members." + return self.members + + def HasMember(self, object): + "Does the list of members contain the object?" + if repr(object) in self.member_reprs: + return 1 + else: + return 0 + +# This list needs to be defined before the NCP types are defined, +# because the NCP types are defined in the global scope, not inside +# a function's scope. +ptvc_lists = UniqueCollection('PTVC Lists') + +############################################################################## + +class NamedList: + "NamedList's keep track of PTVC's and Completion Codes" + def __init__(self, name, list): + "Constructor" + self.name = name + self.list = list + + def __cmp__(self, other): + "Compare this NamedList to another" + + if isinstance(other, NamedList): + return cmp(self.list, other.list) + else: + return 0 + + + def Name(self, new_name = None): + "Get/Set name of list" + if new_name is not None: + self.name = new_name + return self.name + + def Records(self): + "Returns record lists" + return self.list + + def Null(self): + "Is there no list (different from an empty list)?" + return self.list is None + + def Empty(self): + "It the list empty (different from a null list)?" + assert(not self.Null()) + + if self.list: + return 0 + else: + return 1 + + def __repr__(self): + return repr(self.list) + +class PTVC(NamedList): + """ProtoTree TVBuff Cursor List ("PTVC List") Class""" + + def __init__(self, name, records, code): + "Constructor" + NamedList.__init__(self, name, []) + + global global_highest_var + + expected_offset = None + highest_var = -1 + + named_vars = {} + + # Make a PTVCRecord object for each list in 'records' + for record in records: + offset = record[REC_START] + length = record[REC_LENGTH] + field = record[REC_FIELD] + endianness = record[REC_ENDIANNESS] + info_str = record[REC_INFO_STR] + + # Variable + var_name = record[REC_VAR] + if var_name: + # Did we already define this var? + if var_name in named_vars: + sys.exit("%s has multiple %s vars." % \ + (name, var_name)) + + highest_var = highest_var + 1 + var = highest_var + if highest_var > global_highest_var: + global_highest_var = highest_var + named_vars[var_name] = var + else: + var = NO_VAR + + # Repeat + repeat_name = record[REC_REPEAT] + if repeat_name: + # Do we have this var? + if repeat_name not in named_vars: + sys.exit("%s does not have %s var defined." % \ + (name, repeat_name)) + repeat = named_vars[repeat_name] + else: + repeat = NO_REPEAT + + # Request Condition + req_cond = record[REC_REQ_COND] + if req_cond != NO_REQ_COND: + global_req_cond[req_cond] = None + + ptvc_rec = PTVCRecord(field, length, endianness, var, repeat, req_cond, info_str, code) + + if expected_offset is None: + expected_offset = offset + + elif expected_offset == -1: + pass + + elif expected_offset != offset and offset != -1: + msg.write("Expected offset in %s for %s to be %d\n" % \ + (name, field.HFName(), expected_offset)) + sys.exit(1) + + # We can't make a PTVC list from a variable-length + # packet, unless the fields can tell us at run time + # how long the packet is. That is, nstring8 is fine, since + # the field has an integer telling us how long the string is. + # Fields that don't have a length determinable at run-time + # cannot be variable-length. + if type(ptvc_rec.Length()) == type(()): + if isinstance(ptvc_rec.Field(), nstring): + expected_offset = -1 + pass + elif isinstance(ptvc_rec.Field(), nbytes): + expected_offset = -1 + pass + elif isinstance(ptvc_rec.Field(), struct): + expected_offset = -1 + pass + else: + field = ptvc_rec.Field() + assert 0, "Cannot make PTVC from %s, type %s" % \ + (field.HFName(), field) + + elif expected_offset > -1: + if ptvc_rec.Length() < 0: + expected_offset = -1 + else: + expected_offset = expected_offset + ptvc_rec.Length() + + + self.list.append(ptvc_rec) + + def ETTName(self): + return "ett_%s" % (self.Name(),) + + + def Code(self): + x = "static const ptvc_record %s[] = {\n" % (self.Name()) + for ptvc_rec in self.list: + x = x + " %s,\n" % (ptvc_rec.Code()) + x = x + " { NULL, 0, NULL, NULL, NO_ENDIANNESS, NO_VAR, NO_REPEAT, NO_REQ_COND }\n" + x = x + "};\n" + return x + + def __repr__(self): + x = "" + for ptvc_rec in self.list: + x = x + repr(ptvc_rec) + return x + + +class PTVCBitfield(PTVC): + def __init__(self, name, vars): + NamedList.__init__(self, name, []) + + for var in vars: + ptvc_rec = PTVCRecord(var, var.Length(), var.Endianness(), + NO_VAR, NO_REPEAT, NO_REQ_COND, None, 0) + self.list.append(ptvc_rec) + + def Code(self): + ett_name = self.ETTName() + x = "static int %s = -1;\n" % (ett_name,) + + x = x + "static const ptvc_record ptvc_%s[] = {\n" % (self.Name()) + for ptvc_rec in self.list: + x = x + " %s,\n" % (ptvc_rec.Code()) + x = x + " { NULL, 0, NULL, NULL, NO_ENDIANNESS, NO_VAR, NO_REPEAT, NO_REQ_COND }\n" + x = x + "};\n" + + x = x + "static const sub_ptvc_record %s = {\n" % (self.Name(),) + x = x + " &%s,\n" % (ett_name,) + x = x + " NULL,\n" + x = x + " ptvc_%s,\n" % (self.Name(),) + x = x + "};\n" + return x + + +class PTVCRecord: + def __init__(self, field, length, endianness, var, repeat, req_cond, info_str, code): + "Constructor" + self.field = field + self.length = length + self.endianness = endianness + self.var = var + self.repeat = repeat + self.req_cond = req_cond + self.req_info_str = info_str + self.__code__ = code + + def __cmp__(self, other): + "Comparison operator" + if self.field != other.field: + return 1 + elif self.length < other.length: + return -1 + elif self.length > other.length: + return 1 + elif self.endianness != other.endianness: + return 1 + else: + return 0 + + def Code(self): + # Nice textual representations + if self.var == NO_VAR: + var = "NO_VAR" + else: + var = self.var + + if self.repeat == NO_REPEAT: + repeat = "NO_REPEAT" + else: + repeat = self.repeat + + if self.req_cond == NO_REQ_COND: + req_cond = "NO_REQ_COND" + else: + req_cond = global_req_cond[self.req_cond] + assert req_cond is not None + + if isinstance(self.field, struct): + return self.field.ReferenceString(var, repeat, req_cond) + else: + return self.RegularCode(var, repeat, req_cond) + + def InfoStrName(self): + "Returns a C symbol based on the NCP function code, for the info_str" + return "info_str_0x%x" % (self.__code__) + + def RegularCode(self, var, repeat, req_cond): + "String representation" + endianness = 'ENC_BIG_ENDIAN' + if self.endianness == ENC_LITTLE_ENDIAN: + endianness = 'ENC_LITTLE_ENDIAN' + + length = None + + if type(self.length) == type(0): + length = self.length + else: + # This is for cases where a length is needed + # in order to determine a following variable-length, + # like nstring8, where 1 byte is needed in order + # to determine the variable length. + var_length = self.field.Length() + if var_length > 0: + length = var_length + + if length == PROTO_LENGTH_UNKNOWN: + # XXX length = "PROTO_LENGTH_UNKNOWN" + pass + + assert length, "Length not handled for %s" % (self.field.HFName(),) + + sub_ptvc_name = self.field.PTVCName() + if sub_ptvc_name != "NULL": + sub_ptvc_name = "&%s" % (sub_ptvc_name,) + + if self.req_info_str: + req_info_str = "&" + self.InfoStrName() + "_req" + else: + req_info_str = "NULL" + + return "{ &%s, %s, %s, %s, %s, %s, %s, %s }" % \ + (self.field.HFName(), length, sub_ptvc_name, + req_info_str, endianness, var, repeat, req_cond) + + def Offset(self): + return self.offset + + def Length(self): + return self.length + + def Field(self): + return self.field + + def __repr__(self): + if self.req_info_str: + return "{%s len=%s end=%s var=%s rpt=%s rqc=%s info=%s}" % \ + (self.field.HFName(), self.length, + self.endianness, self.var, self.repeat, self.req_cond, self.req_info_str[1]) + else: + return "{%s len=%s end=%s var=%s rpt=%s rqc=%s}" % \ + (self.field.HFName(), self.length, + self.endianness, self.var, self.repeat, self.req_cond) + +############################################################################## + +class NCP: + "NCP Packet class" + def __init__(self, func_code, description, group, has_length=1): + "Constructor" + self.__code__ = func_code + self.description = description + self.group = group + self.codes = None + self.request_records = None + self.reply_records = None + self.has_length = has_length + self.req_cond_size = None + self.req_info_str = None + self.expert_func = None + + if group not in groups: + msg.write("NCP 0x%x has invalid group '%s'\n" % \ + (self.__code__, group)) + sys.exit(1) + + if self.HasSubFunction(): + # NCP Function with SubFunction + self.start_offset = 10 + else: + # Simple NCP Function + self.start_offset = 7 + + def ReqCondSize(self): + return self.req_cond_size + + def ReqCondSizeVariable(self): + self.req_cond_size = REQ_COND_SIZE_VARIABLE + + def ReqCondSizeConstant(self): + self.req_cond_size = REQ_COND_SIZE_CONSTANT + + def FunctionCode(self, part=None): + "Returns the function code for this NCP packet." + if part is None: + return self.__code__ + elif part == 'high': + if self.HasSubFunction(): + return (self.__code__ & 0xff00) >> 8 + else: + return self.__code__ + elif part == 'low': + if self.HasSubFunction(): + return self.__code__ & 0x00ff + else: + return 0x00 + else: + msg.write("Unknown directive '%s' for function_code()\n" % (part)) + sys.exit(1) + + def HasSubFunction(self): + "Does this NPC packet require a subfunction field?" + if self.__code__ <= 0xff: + return 0 + else: + return 1 + + def HasLength(self): + return self.has_length + + def Description(self): + return self.description + + def Group(self): + return self.group + + def PTVCRequest(self): + return self.ptvc_request + + def PTVCReply(self): + return self.ptvc_reply + + def Request(self, size, records=[], **kwargs): + self.request_size = size + self.request_records = records + if self.HasSubFunction(): + if self.HasLength(): + self.CheckRecords(size, records, "Request", 10) + else: + self.CheckRecords(size, records, "Request", 8) + else: + self.CheckRecords(size, records, "Request", 7) + self.ptvc_request = self.MakePTVC(records, "request", self.__code__) + + if "info_str" in kwargs: + self.req_info_str = kwargs["info_str"] + + def Reply(self, size, records=[]): + self.reply_size = size + self.reply_records = records + self.CheckRecords(size, records, "Reply", 8) + self.ptvc_reply = self.MakePTVC(records, "reply", self.__code__) + + def CheckRecords(self, size, records, descr, min_hdr_length): + "Simple sanity check" + if size == NO_LENGTH_CHECK: + return + min = size + max = size + if type(size) == type(()): + min = size[0] + max = size[1] + + lower = min_hdr_length + upper = min_hdr_length + + for record in records: + rec_size = record[REC_LENGTH] + rec_lower = rec_size + rec_upper = rec_size + if type(rec_size) == type(()): + rec_lower = rec_size[0] + rec_upper = rec_size[1] + + lower = lower + rec_lower + upper = upper + rec_upper + + error = 0 + if min != lower: + msg.write("%s records for 2222/0x%x sum to %d bytes minimum, but param1 shows %d\n" \ + % (descr, self.FunctionCode(), lower, min)) + error = 1 + if max != upper: + msg.write("%s records for 2222/0x%x sum to %d bytes maximum, but param1 shows %d\n" \ + % (descr, self.FunctionCode(), upper, max)) + error = 1 + + if error == 1: + sys.exit(1) + + + def MakePTVC(self, records, name_suffix, code): + """Makes a PTVC out of a request or reply record list. Possibly adds + it to the global list of PTVCs (the global list is a UniqueCollection, + so an equivalent PTVC may already be in the global list).""" + + name = "%s_%s" % (self.CName(), name_suffix) + #if any individual record has an info_str, bubble it up to the top + #so an info_string_t can be created for it + for record in records: + if record[REC_INFO_STR]: + self.req_info_str = record[REC_INFO_STR] + + ptvc = PTVC(name, records, code) + + #if the record is a duplicate, remove the req_info_str so + #that an unused info_string isn't generated + remove_info = 0 + if ptvc_lists.HasMember(ptvc): + if 'info' in repr(ptvc): + remove_info = 1 + + ptvc_test = ptvc_lists.Add(ptvc) + + if remove_info: + self.req_info_str = None + + return ptvc_test + + def CName(self): + "Returns a C symbol based on the NCP function code" + return "ncp_0x%x" % (self.__code__) + + def InfoStrName(self): + "Returns a C symbol based on the NCP function code, for the info_str" + return "info_str_0x%x" % (self.__code__) + + def MakeExpert(self, func): + self.expert_func = func + expert_hash[func] = func + + def Variables(self): + """Returns a list of variables used in the request and reply records. + A variable is listed only once, even if it is used twice (once in + the request, once in the reply).""" + + variables = {} + if self.request_records: + for record in self.request_records: + var = record[REC_FIELD] + variables[var.HFName()] = var + + sub_vars = var.SubVariables() + for sv in sub_vars: + variables[sv.HFName()] = sv + + if self.reply_records: + for record in self.reply_records: + var = record[REC_FIELD] + variables[var.HFName()] = var + + sub_vars = var.SubVariables() + for sv in sub_vars: + variables[sv.HFName()] = sv + + return list(variables.values()) + + def CalculateReqConds(self): + """Returns a list of request conditions (dfilter text) used + in the reply records. A request condition is listed only once,""" + texts = {} + if self.reply_records: + for record in self.reply_records: + text = record[REC_REQ_COND] + if text != NO_REQ_COND: + texts[text] = None + + if len(texts) == 0: + self.req_conds = None + return None + + dfilter_texts = list(texts.keys()) + dfilter_texts.sort() + name = "%s_req_cond_indexes" % (self.CName(),) + return NamedList(name, dfilter_texts) + + def GetReqConds(self): + return self.req_conds + + def SetReqConds(self, new_val): + self.req_conds = new_val + + + def CompletionCodes(self, codes=None): + """Sets or returns the list of completion + codes. Internally, a NamedList is used to store the + completion codes, but the caller of this function never + realizes that because Python lists are the input and + output.""" + + if codes is None: + return self.codes + + # Sanity check + okay = 1 + for code in codes: + if code not in errors: + msg.write("Errors table does not have key 0x%04x for NCP=0x%x\n" % (code, + self.__code__)) + okay = 0 + + # Delay the exit until here so that the programmer can get + # the complete list of missing error codes + if not okay: + sys.exit(1) + + # Create CompletionCode (NamedList) object and possible + # add it to the global list of completion code lists. + name = "%s_errors" % (self.CName(),) + codes.sort() + codes_list = NamedList(name, codes) + self.codes = compcode_lists.Add(codes_list) + + self.Finalize() + + def Finalize(self): + """Adds the NCP object to the global collection of NCP + objects. This is done automatically after setting the + CompletionCode list. Yes, this is a shortcut, but it makes + our list of NCP packet definitions look neater, since an + explicit "add to global list of packets" is not needed.""" + + # Add packet to global collection of packets + packets.append(self) + +def rec(start, length, field, endianness=None, **kw): + return _rec(start, length, field, endianness, kw) + +def srec(field, endianness=None, **kw): + return _rec(-1, -1, field, endianness, kw) + +def _rec(start, length, field, endianness, kw): + # If endianness not explicitly given, use the field's + # default endiannes. + if endianness is None: + endianness = field.Endianness() + + # Setting a var? + if "var" in kw: + # Is the field an INT ? + if not isinstance(field, CountingNumber): + sys.exit("Field %s used as count variable, but not integer." \ + % (field.HFName())) + var = kw["var"] + else: + var = None + + # If 'var' not used, 'repeat' can be used. + if not var and "repeat" in kw: + repeat = kw["repeat"] + else: + repeat = None + + # Request-condition ? + if "req_cond" in kw: + req_cond = kw["req_cond"] + else: + req_cond = NO_REQ_COND + + if "info_str" in kw: + req_info_str = kw["info_str"] + else: + req_info_str = None + + return [start, length, field, endianness, var, repeat, req_cond, req_info_str] + + + +############################################################################## + +ENC_LITTLE_ENDIAN = 1 # Little-Endian +ENC_BIG_ENDIAN = 0 # Big-Endian +NA = -1 # Not Applicable + +class Type: + " Virtual class for NCP field types" + type = "Type" + ftype = None + disp = "BASE_DEC" + custom_func = None + endianness = NA + values = [] + + def __init__(self, abbrev, descr, bytes, endianness = NA): + self.abbrev = abbrev + self.descr = descr + self.bytes = bytes + self.endianness = endianness + self.hfname = "hf_ncp_" + self.abbrev + + def Length(self): + return self.bytes + + def Abbreviation(self): + return self.abbrev + + def Description(self): + return self.descr + + def HFName(self): + return self.hfname + + def DFilter(self): + return "ncp." + self.abbrev + + def WiresharkFType(self): + return self.ftype + + def Display(self, newval=None): + if newval is not None: + self.disp = newval + return self.disp + + def ValuesName(self): + if self.custom_func: + return "CF_FUNC(" + self.custom_func + ")" + else: + return "NULL" + + def Mask(self): + return 0 + + def Endianness(self): + return self.endianness + + def SubVariables(self): + return [] + + def PTVCName(self): + return "NULL" + + def NWDate(self): + self.disp = "BASE_CUSTOM" + self.custom_func = "padd_date" + + def NWTime(self): + self.disp = "BASE_CUSTOM" + self.custom_func = "padd_time" + + #def __cmp__(self, other): + # return cmp(self.hfname, other.hfname) + + def __lt__(self, other): + return (self.hfname < other.hfname) + +class struct(PTVC, Type): + def __init__(self, name, items, descr=None): + name = "struct_%s" % (name,) + NamedList.__init__(self, name, []) + + self.bytes = 0 + self.descr = descr + for item in items: + if isinstance(item, Type): + field = item + length = field.Length() + endianness = field.Endianness() + var = NO_VAR + repeat = NO_REPEAT + req_cond = NO_REQ_COND + elif type(item) == type([]): + field = item[REC_FIELD] + length = item[REC_LENGTH] + endianness = item[REC_ENDIANNESS] + var = item[REC_VAR] + repeat = item[REC_REPEAT] + req_cond = item[REC_REQ_COND] + else: + assert 0, "Item %s item not handled." % (item,) + + ptvc_rec = PTVCRecord(field, length, endianness, var, + repeat, req_cond, None, 0) + self.list.append(ptvc_rec) + self.bytes = self.bytes + field.Length() + + self.hfname = self.name + + def Variables(self): + vars = [] + for ptvc_rec in self.list: + vars.append(ptvc_rec.Field()) + return vars + + def ReferenceString(self, var, repeat, req_cond): + return "{ PTVC_STRUCT, NO_LENGTH, &%s, NULL, NO_ENDIANNESS, %s, %s, %s }" % \ + (self.name, var, repeat, req_cond) + + def Code(self): + ett_name = self.ETTName() + x = "static int %s = -1;\n" % (ett_name,) + x = x + "static const ptvc_record ptvc_%s[] = {\n" % (self.name,) + for ptvc_rec in self.list: + x = x + " %s,\n" % (ptvc_rec.Code()) + x = x + " { NULL, NO_LENGTH, NULL, NULL, NO_ENDIANNESS, NO_VAR, NO_REPEAT, NO_REQ_COND }\n" + x = x + "};\n" + + x = x + "static const sub_ptvc_record %s = {\n" % (self.name,) + x = x + " &%s,\n" % (ett_name,) + if self.descr: + x = x + ' "%s",\n' % (self.descr,) + else: + x = x + " NULL,\n" + x = x + " ptvc_%s,\n" % (self.Name(),) + x = x + "};\n" + return x + + def __cmp__(self, other): + return cmp(self.HFName(), other.HFName()) + + +class byte(Type): + type = "byte" + ftype = "FT_UINT8" + def __init__(self, abbrev, descr): + Type.__init__(self, abbrev, descr, 1) + +class CountingNumber: + pass + +# Same as above. Both are provided for convenience +class uint8(Type, CountingNumber): + type = "uint8" + ftype = "FT_UINT8" + bytes = 1 + def __init__(self, abbrev, descr): + Type.__init__(self, abbrev, descr, 1) + +class uint16(Type, CountingNumber): + type = "uint16" + ftype = "FT_UINT16" + def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN): + Type.__init__(self, abbrev, descr, 2, endianness) + +class uint24(Type, CountingNumber): + type = "uint24" + ftype = "FT_UINT24" + def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN): + Type.__init__(self, abbrev, descr, 3, endianness) + +class uint32(Type, CountingNumber): + type = "uint32" + ftype = "FT_UINT32" + def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN): + Type.__init__(self, abbrev, descr, 4, endianness) + +class uint64(Type, CountingNumber): + type = "uint64" + ftype = "FT_UINT64" + def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN): + Type.__init__(self, abbrev, descr, 8, endianness) + +class eptime(Type, CountingNumber): + type = "eptime" + ftype = "FT_ABSOLUTE_TIME" + disp = "ABSOLUTE_TIME_LOCAL" + def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN): + Type.__init__(self, abbrev, descr, 4, endianness) + +class boolean8(uint8): + type = "boolean8" + ftype = "FT_BOOLEAN" + disp = "BASE_NONE" + +class boolean16(uint16): + type = "boolean16" + ftype = "FT_BOOLEAN" + disp = "BASE_NONE" + +class boolean24(uint24): + type = "boolean24" + ftype = "FT_BOOLEAN" + disp = "BASE_NONE" + +class boolean32(uint32): + type = "boolean32" + ftype = "FT_BOOLEAN" + disp = "BASE_NONE" + +class nstring: + pass + +class nstring8(Type, nstring): + """A string of up to (2^8)-1 characters. The first byte + gives the string length.""" + + type = "nstring8" + ftype = "FT_UINT_STRING" + disp = "BASE_NONE" + def __init__(self, abbrev, descr): + Type.__init__(self, abbrev, descr, 1) + +class nstring16(Type, nstring): + """A string of up to (2^16)-2 characters. The first 2 bytes + gives the string length.""" + + type = "nstring16" + ftype = "FT_UINT_STRING" + disp = "BASE_NONE" + def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN): + Type.__init__(self, abbrev, descr, 2, endianness) + +class nstring32(Type, nstring): + """A string of up to (2^32)-4 characters. The first 4 bytes + gives the string length.""" + + type = "nstring32" + ftype = "FT_UINT_STRING" + disp = "BASE_NONE" + def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN): + Type.__init__(self, abbrev, descr, 4, endianness) + +class fw_string(Type): + """A fixed-width string of n bytes.""" + + type = "fw_string" + disp = "BASE_NONE" + ftype = "FT_STRING" + + def __init__(self, abbrev, descr, bytes): + Type.__init__(self, abbrev, descr, bytes) + + +class stringz(Type): + "NUL-terminated string, with a maximum length" + + type = "stringz" + disp = "BASE_NONE" + ftype = "FT_STRINGZ" + def __init__(self, abbrev, descr): + Type.__init__(self, abbrev, descr, PROTO_LENGTH_UNKNOWN) + +class val_string(Type): + """Abstract class for val_stringN, where N is number + of bits that key takes up.""" + + type = "val_string" + disp = 'BASE_HEX' + + def __init__(self, abbrev, descr, val_string_array, endianness = ENC_LITTLE_ENDIAN): + Type.__init__(self, abbrev, descr, self.bytes, endianness) + self.values = val_string_array + + def Code(self): + result = "static const value_string %s[] = {\n" \ + % (self.ValuesCName()) + for val_record in self.values: + value = val_record[0] + text = val_record[1] + value_repr = self.value_format % value + result = result + ' { %s, "%s" },\n' \ + % (value_repr, text) + + value_repr = self.value_format % 0 + result = result + " { %s, NULL },\n" % (value_repr) + result = result + "};\n" + REC_VAL_STRING_RES = self.value_format % value + return result + + def ValuesCName(self): + return "ncp_%s_vals" % (self.abbrev) + + def ValuesName(self): + return "VALS(%s)" % (self.ValuesCName()) + +class val_string8(val_string): + type = "val_string8" + ftype = "FT_UINT8" + bytes = 1 + value_format = "0x%02x" + +class val_string16(val_string): + type = "val_string16" + ftype = "FT_UINT16" + bytes = 2 + value_format = "0x%04x" + +class val_string32(val_string): + type = "val_string32" + ftype = "FT_UINT32" + bytes = 4 + value_format = "0x%08x" + +class bytes(Type): + type = 'bytes' + disp = "BASE_NONE" + ftype = 'FT_BYTES' + + def __init__(self, abbrev, descr, bytes): + Type.__init__(self, abbrev, descr, bytes, NA) + +class nbytes: + pass + +class nbytes8(Type, nbytes): + """A series of up to (2^8)-1 bytes. The first byte + gives the byte-string length.""" + + type = "nbytes8" + ftype = "FT_UINT_BYTES" + disp = "BASE_NONE" + def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN): + Type.__init__(self, abbrev, descr, 1, endianness) + +class nbytes16(Type, nbytes): + """A series of up to (2^16)-2 bytes. The first 2 bytes + gives the byte-string length.""" + + type = "nbytes16" + ftype = "FT_UINT_BYTES" + disp = "BASE_NONE" + def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN): + Type.__init__(self, abbrev, descr, 2, endianness) + +class nbytes32(Type, nbytes): + """A series of up to (2^32)-4 bytes. The first 4 bytes + gives the byte-string length.""" + + type = "nbytes32" + ftype = "FT_UINT_BYTES" + disp = "BASE_NONE" + def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN): + Type.__init__(self, abbrev, descr, 4, endianness) + +class bf_uint(Type): + type = "bf_uint" + disp = None + + def __init__(self, bitmask, abbrev, descr, endianness=ENC_LITTLE_ENDIAN): + Type.__init__(self, abbrev, descr, self.bytes, endianness) + self.bitmask = bitmask + + def Mask(self): + return self.bitmask + +class bf_val_str(bf_uint): + type = "bf_uint" + disp = None + + def __init__(self, bitmask, abbrev, descr, val_string_array, endiannes=ENC_LITTLE_ENDIAN): + bf_uint.__init__(self, bitmask, abbrev, descr, endiannes) + self.values = val_string_array + + def ValuesName(self): + return "VALS(%s)" % (self.ValuesCName()) + +class bf_val_str8(bf_val_str, val_string8): + type = "bf_val_str8" + ftype = "FT_UINT8" + disp = "BASE_HEX" + bytes = 1 + +class bf_val_str16(bf_val_str, val_string16): + type = "bf_val_str16" + ftype = "FT_UINT16" + disp = "BASE_HEX" + bytes = 2 + +class bf_val_str32(bf_val_str, val_string32): + type = "bf_val_str32" + ftype = "FT_UINT32" + disp = "BASE_HEX" + bytes = 4 + +class bf_boolean: + disp = "BASE_NONE" + +class bf_boolean8(bf_uint, boolean8, bf_boolean): + type = "bf_boolean8" + ftype = "FT_BOOLEAN" + disp = "8" + bytes = 1 + +class bf_boolean16(bf_uint, boolean16, bf_boolean): + type = "bf_boolean16" + ftype = "FT_BOOLEAN" + disp = "16" + bytes = 2 + +class bf_boolean24(bf_uint, boolean24, bf_boolean): + type = "bf_boolean24" + ftype = "FT_BOOLEAN" + disp = "24" + bytes = 3 + +class bf_boolean32(bf_uint, boolean32, bf_boolean): + type = "bf_boolean32" + ftype = "FT_BOOLEAN" + disp = "32" + bytes = 4 + +class bitfield(Type): + type = "bitfield" + disp = 'BASE_HEX' + + def __init__(self, vars): + var_hash = {} + for var in vars: + if isinstance(var, bf_boolean): + if not isinstance(var, self.bf_type): + print("%s must be of type %s" % \ + (var.Abbreviation(), + self.bf_type)) + sys.exit(1) + var_hash[var.bitmask] = var + + bitmasks = list(var_hash.keys()) + bitmasks.sort() + bitmasks.reverse() + + ordered_vars = [] + for bitmask in bitmasks: + var = var_hash[bitmask] + ordered_vars.append(var) + + self.vars = ordered_vars + self.ptvcname = "ncp_%s_bitfield" % (self.abbrev,) + self.hfname = "hf_ncp_%s" % (self.abbrev,) + self.sub_ptvc = PTVCBitfield(self.PTVCName(), self.vars) + + def SubVariables(self): + return self.vars + + def SubVariablesPTVC(self): + return self.sub_ptvc + + def PTVCName(self): + return self.ptvcname + + +class bitfield8(bitfield, uint8): + type = "bitfield8" + ftype = "FT_UINT8" + bf_type = bf_boolean8 + + def __init__(self, abbrev, descr, vars): + uint8.__init__(self, abbrev, descr) + bitfield.__init__(self, vars) + +class bitfield16(bitfield, uint16): + type = "bitfield16" + ftype = "FT_UINT16" + bf_type = bf_boolean16 + + def __init__(self, abbrev, descr, vars, endianness=ENC_LITTLE_ENDIAN): + uint16.__init__(self, abbrev, descr, endianness) + bitfield.__init__(self, vars) + +class bitfield24(bitfield, uint24): + type = "bitfield24" + ftype = "FT_UINT24" + bf_type = bf_boolean24 + + def __init__(self, abbrev, descr, vars, endianness=ENC_LITTLE_ENDIAN): + uint24.__init__(self, abbrev, descr, endianness) + bitfield.__init__(self, vars) + +class bitfield32(bitfield, uint32): + type = "bitfield32" + ftype = "FT_UINT32" + bf_type = bf_boolean32 + + def __init__(self, abbrev, descr, vars, endianness=ENC_LITTLE_ENDIAN): + uint32.__init__(self, abbrev, descr, endianness) + bitfield.__init__(self, vars) + +# +# Force the endianness of a field to a non-default value; used in +# the list of fields of a structure. +# +def endian(field, endianness): + return [-1, field.Length(), field, endianness, NO_VAR, NO_REPEAT, NO_REQ_COND] + +############################################################################## +# NCP Field Types. Defined in Appendix A of "Programmer's Guide..." +############################################################################## + +AbortQueueFlag = val_string8("abort_q_flag", "Abort Queue Flag", [ + [ 0x00, "Place at End of Queue" ], + [ 0x01, "Do Not Place Spool File, Examine Flags" ], +]) +AcceptedMaxSize = uint16("accepted_max_size", "Accepted Max Size") +AcceptedMaxSize64 = uint64("accepted_max_size64", "Accepted Max Size") +AccessControl = val_string8("access_control", "Access Control", [ + [ 0x00, "Open for read by this client" ], + [ 0x01, "Open for write by this client" ], + [ 0x02, "Deny read requests from other stations" ], + [ 0x03, "Deny write requests from other stations" ], + [ 0x04, "File detached" ], + [ 0x05, "TTS holding detach" ], + [ 0x06, "TTS holding open" ], +]) +AccessDate = uint16("access_date", "Access Date") +AccessDate.NWDate() +AccessMode = bitfield8("access_mode", "Access Mode", [ + bf_boolean8(0x01, "acc_mode_read", "Read Access"), + bf_boolean8(0x02, "acc_mode_write", "Write Access"), + bf_boolean8(0x04, "acc_mode_deny_read", "Deny Read Access"), + bf_boolean8(0x08, "acc_mode_deny_write", "Deny Write Access"), + bf_boolean8(0x10, "acc_mode_comp", "Compatibility Mode"), +]) +AccessPrivileges = bitfield8("access_privileges", "Access Privileges", [ + bf_boolean8(0x01, "acc_priv_read", "Read Privileges (files only)"), + bf_boolean8(0x02, "acc_priv_write", "Write Privileges (files only)"), + bf_boolean8(0x04, "acc_priv_open", "Open Privileges (files only)"), + bf_boolean8(0x08, "acc_priv_create", "Create Privileges (files only)"), + bf_boolean8(0x10, "acc_priv_delete", "Delete Privileges (files only)"), + bf_boolean8(0x20, "acc_priv_parent", "Parental Privileges (directories only for creating, deleting, and renaming)"), + bf_boolean8(0x40, "acc_priv_search", "Search Privileges (directories only)"), + bf_boolean8(0x80, "acc_priv_modify", "Modify File Status Flags Privileges (files and directories)"), +]) +AccessRightsMask = bitfield8("access_rights_mask", "Access Rights", [ + bf_boolean8(0x0001, "acc_rights_read", "Read Rights"), + bf_boolean8(0x0002, "acc_rights_write", "Write Rights"), + bf_boolean8(0x0004, "acc_rights_open", "Open Rights"), + bf_boolean8(0x0008, "acc_rights_create", "Create Rights"), + bf_boolean8(0x0010, "acc_rights_delete", "Delete Rights"), + bf_boolean8(0x0020, "acc_rights_parent", "Parental Rights"), + bf_boolean8(0x0040, "acc_rights_search", "Search Rights"), + bf_boolean8(0x0080, "acc_rights_modify", "Modify Rights"), +]) +AccessRightsMaskWord = bitfield16("access_rights_mask_word", "Access Rights", [ + bf_boolean16(0x0001, "acc_rights1_read", "Read Rights"), + bf_boolean16(0x0002, "acc_rights1_write", "Write Rights"), + bf_boolean16(0x0004, "acc_rights1_open", "Open Rights"), + bf_boolean16(0x0008, "acc_rights1_create", "Create Rights"), + bf_boolean16(0x0010, "acc_rights1_delete", "Delete Rights"), + bf_boolean16(0x0020, "acc_rights1_parent", "Parental Rights"), + bf_boolean16(0x0040, "acc_rights1_search", "Search Rights"), + bf_boolean16(0x0080, "acc_rights1_modify", "Modify Rights"), + bf_boolean16(0x0100, "acc_rights1_supervisor", "Supervisor Access Rights"), +]) +AccountBalance = uint32("account_balance", "Account Balance") +AccountVersion = uint8("acct_version", "Acct Version") +ActionFlag = bitfield8("action_flag", "Action Flag", [ + bf_boolean8(0x01, "act_flag_open", "Open"), + bf_boolean8(0x02, "act_flag_replace", "Replace"), + bf_boolean8(0x10, "act_flag_create", "Create"), +]) +ActiveConnBitList = fw_string("active_conn_bit_list", "Active Connection List", 512) +ActiveIndexedFiles = uint16("active_indexed_files", "Active Indexed Files") +ActualMaxBinderyObjects = uint16("actual_max_bindery_objects", "Actual Max Bindery Objects") +ActualMaxIndexedFiles = uint16("actual_max_indexed_files", "Actual Max Indexed Files") +ActualMaxOpenFiles = uint16("actual_max_open_files", "Actual Max Open Files") +ActualMaxSimultaneousTransactions = uint16("actual_max_sim_trans", "Actual Max Simultaneous Transactions") +ActualMaxUsedDirectoryEntries = uint16("actual_max_used_directory_entries", "Actual Max Used Directory Entries") +ActualMaxUsedRoutingBuffers = uint16("actual_max_used_routing_buffers", "Actual Max Used Routing Buffers") +ActualResponseCount = uint16("actual_response_count", "Actual Response Count") +AddNameSpaceAndVol = stringz("add_nm_spc_and_vol", "Add Name Space and Volume") +AFPEntryID = uint32("afp_entry_id", "AFP Entry ID", ENC_BIG_ENDIAN) +AFPEntryID.Display("BASE_HEX") +AllocAvailByte = uint32("alloc_avail_byte", "Bytes Available for Allocation") +AllocateMode = bitfield16("alloc_mode", "Allocate Mode", [ + bf_val_str16(0x0001, "alloc_dir_hdl", "Dir Handle Type",[ + [0x00, "Permanent"], + [0x01, "Temporary"], + ]), + bf_boolean16(0x0002, "alloc_spec_temp_dir_hdl","Special Temporary Directory Handle"), + bf_boolean16(0x4000, "alloc_reply_lvl2","Reply Level 2"), + bf_boolean16(0x8000, "alloc_dst_name_spc","Destination Name Space Input Parameter"), +]) +AllocationBlockSize = uint32("allocation_block_size", "Allocation Block Size") +AllocFreeCount = uint32("alloc_free_count", "Reclaimable Free Bytes") +ApplicationNumber = uint16("application_number", "Application Number") +ArchivedTime = uint16("archived_time", "Archived Time") +ArchivedTime.NWTime() +ArchivedDate = uint16("archived_date", "Archived Date") +ArchivedDate.NWDate() +ArchiverID = uint32("archiver_id", "Archiver ID", ENC_BIG_ENDIAN) +ArchiverID.Display("BASE_HEX") +AssociatedNameSpace = uint8("associated_name_space", "Associated Name Space") +AttachDuringProcessing = uint16("attach_during_processing", "Attach During Processing") +AttachedIndexedFiles = uint8("attached_indexed_files", "Attached Indexed Files") +AttachWhileProcessingAttach = uint16("attach_while_processing_attach", "Attach While Processing Attach") +Attributes = uint32("attributes", "Attributes") +AttributesDef = bitfield8("attr_def", "Attributes", [ + bf_boolean8(0x01, "att_def_ro", "Read Only"), + bf_boolean8(0x02, "att_def_hidden", "Hidden"), + bf_boolean8(0x04, "att_def_system", "System"), + bf_boolean8(0x08, "att_def_execute", "Execute"), + bf_boolean8(0x10, "att_def_sub_only", "Subdirectory"), + bf_boolean8(0x20, "att_def_archive", "Archive"), + bf_boolean8(0x80, "att_def_shareable", "Shareable"), +]) +AttributesDef16 = bitfield16("attr_def_16", "Attributes", [ + bf_boolean16(0x0001, "att_def16_ro", "Read Only"), + bf_boolean16(0x0002, "att_def16_hidden", "Hidden"), + bf_boolean16(0x0004, "att_def16_system", "System"), + bf_boolean16(0x0008, "att_def16_execute", "Execute"), + bf_boolean16(0x0010, "att_def16_sub_only", "Subdirectory"), + bf_boolean16(0x0020, "att_def16_archive", "Archive"), + bf_boolean16(0x0080, "att_def16_shareable", "Shareable"), + bf_boolean16(0x1000, "att_def16_transaction", "Transactional"), + bf_boolean16(0x4000, "att_def16_read_audit", "Read Audit"), + bf_boolean16(0x8000, "att_def16_write_audit", "Write Audit"), +]) +AttributesDef32 = bitfield32("attr_def_32", "Attributes", [ + bf_boolean32(0x00000001, "att_def32_ro", "Read Only"), + bf_boolean32(0x00000002, "att_def32_hidden", "Hidden"), + bf_boolean32(0x00000004, "att_def32_system", "System"), + bf_boolean32(0x00000008, "att_def32_execute", "Execute"), + bf_boolean32(0x00000010, "att_def32_sub_only", "Subdirectory"), + bf_boolean32(0x00000020, "att_def32_archive", "Archive"), + bf_boolean32(0x00000040, "att_def32_execute_confirm", "Execute Confirm"), + bf_boolean32(0x00000080, "att_def32_shareable", "Shareable"), + bf_val_str32(0x00000700, "att_def32_search", "Search Mode",[ + [0, "Search on all Read Only Opens"], + [1, "Search on Read Only Opens with no Path"], + [2, "Shell Default Search Mode"], + [3, "Search on all Opens with no Path"], + [4, "Do not Search"], + [5, "Reserved - Do not Use"], + [6, "Search on All Opens"], + [7, "Reserved - Do not Use"], + ]), + bf_boolean32(0x00000800, "att_def32_no_suballoc", "No Suballoc"), + bf_boolean32(0x00001000, "att_def32_transaction", "Transactional"), + bf_boolean32(0x00004000, "att_def32_read_audit", "Read Audit"), + bf_boolean32(0x00008000, "att_def32_write_audit", "Write Audit"), + bf_boolean32(0x00010000, "att_def32_purge", "Immediate Purge"), + bf_boolean32(0x00020000, "att_def32_reninhibit", "Rename Inhibit"), + bf_boolean32(0x00040000, "att_def32_delinhibit", "Delete Inhibit"), + bf_boolean32(0x00080000, "att_def32_cpyinhibit", "Copy Inhibit"), + bf_boolean32(0x00100000, "att_def32_file_audit", "File Audit"), + bf_boolean32(0x00200000, "att_def32_reserved", "Reserved"), + bf_boolean32(0x00400000, "att_def32_data_migrate", "Data Migrated"), + bf_boolean32(0x00800000, "att_def32_inhibit_dm", "Inhibit Data Migration"), + bf_boolean32(0x01000000, "att_def32_dm_save_key", "Data Migration Save Key"), + bf_boolean32(0x02000000, "att_def32_im_comp", "Immediate Compress"), + bf_boolean32(0x04000000, "att_def32_comp", "Compressed"), + bf_boolean32(0x08000000, "att_def32_comp_inhibit", "Inhibit Compression"), + bf_boolean32(0x10000000, "att_def32_reserved2", "Reserved"), + bf_boolean32(0x20000000, "att_def32_cant_compress", "Can't Compress"), + bf_boolean32(0x40000000, "att_def32_attr_archive", "Archive Attributes"), + bf_boolean32(0x80000000, "att_def32_reserved3", "Reserved"), +]) +AttributeValidFlag = uint32("attribute_valid_flag", "Attribute Valid Flag") +AuditFileVersionDate = uint16("audit_file_ver_date", "Audit File Version Date") +AuditFileVersionDate.NWDate() +AuditFlag = val_string8("audit_flag", "Audit Flag", [ + [ 0x00, "Do NOT audit object" ], + [ 0x01, "Audit object" ], +]) +AuditHandle = uint32("audit_handle", "Audit File Handle") +AuditHandle.Display("BASE_HEX") +AuditID = uint32("audit_id", "Audit ID", ENC_BIG_ENDIAN) +AuditID.Display("BASE_HEX") +AuditIDType = val_string16("audit_id_type", "Audit ID Type", [ + [ 0x0000, "Volume" ], + [ 0x0001, "Container" ], +]) +AuditVersionDate = uint16("audit_ver_date", "Auditing Version Date") +AuditVersionDate.NWDate() +AvailableBlocks = uint32("available_blocks", "Available Blocks") +AvailableBlocks64 = uint64("available_blocks64", "Available Blocks") +AvailableClusters = uint16("available_clusters", "Available Clusters") +AvailableDirectorySlots = uint16("available_directory_slots", "Available Directory Slots") +AvailableDirEntries = uint32("available_dir_entries", "Available Directory Entries") +AvailableDirEntries64 = uint64("available_dir_entries64", "Available Directory Entries") +AvailableIndexedFiles = uint16("available_indexed_files", "Available Indexed Files") + +BackgroundAgedWrites = uint32("background_aged_writes", "Background Aged Writes") +BackgroundDirtyWrites = uint32("background_dirty_writes", "Background Dirty Writes") +BadLogicalConnectionCount = uint16("bad_logical_connection_count", "Bad Logical Connection Count") +BannerName = fw_string("banner_name", "Banner Name", 14) +BaseDirectoryID = uint32("base_directory_id", "Base Directory ID", ENC_BIG_ENDIAN) +BaseDirectoryID.Display("BASE_HEX") +binderyContext = nstring8("bindery_context", "Bindery Context") +BitMap = bytes("bit_map", "Bit Map", 512) +BlockNumber = uint32("block_number", "Block Number") +BlockSize = uint16("block_size", "Block Size") +BlockSizeInSectors = uint32("block_size_in_sectors", "Block Size in Sectors") +BoardInstalled = uint8("board_installed", "Board Installed") +BoardNumber = uint32("board_number", "Board Number") +BoardNumbers = uint32("board_numbers", "Board Numbers") +BufferSize = uint16("buffer_size", "Buffer Size") +BusString = stringz("bus_string", "Bus String") +BusType = val_string8("bus_type", "Bus Type", [ + [0x00, "ISA"], + [0x01, "Micro Channel" ], + [0x02, "EISA"], + [0x04, "PCI"], + [0x08, "PCMCIA"], + [0x10, "ISA"], + [0x14, "ISA/PCI"], +]) +BytesActuallyTransferred = uint32("bytes_actually_transferred", "Bytes Actually Transferred") +BytesActuallyTransferred64bit = uint64("bytes_actually_transferred_64", "Bytes Actually Transferred", ENC_LITTLE_ENDIAN) +BytesActuallyTransferred64bit.Display("BASE_DEC") +BytesRead = fw_string("bytes_read", "Bytes Read", 6) +BytesToCopy = uint32("bytes_to_copy", "Bytes to Copy") +BytesToCopy64bit = uint64("bytes_to_copy_64", "Bytes to Copy") +BytesToCopy64bit.Display("BASE_DEC") +BytesWritten = fw_string("bytes_written", "Bytes Written", 6) + +CacheAllocations = uint32("cache_allocations", "Cache Allocations") +CacheBlockScrapped = uint16("cache_block_scrapped", "Cache Block Scrapped") +CacheBufferCount = uint16("cache_buffer_count", "Cache Buffer Count") +CacheBufferSize = uint16("cache_buffer_size", "Cache Buffer Size") +CacheFullWriteRequests = uint32("cache_full_write_requests", "Cache Full Write Requests") +CacheGetRequests = uint32("cache_get_requests", "Cache Get Requests") +CacheHitOnUnavailableBlock = uint16("cache_hit_on_unavailable_block", "Cache Hit On Unavailable Block") +CacheHits = uint32("cache_hits", "Cache Hits") +CacheMisses = uint32("cache_misses", "Cache Misses") +CachePartialWriteRequests = uint32("cache_partial_write_requests", "Cache Partial Write Requests") +CacheReadRequests = uint32("cache_read_requests", "Cache Read Requests") +CacheWriteRequests = uint32("cache_write_requests", "Cache Write Requests") +CategoryName = stringz("category_name", "Category Name") +CCFileHandle = uint32("cc_file_handle", "File Handle") +CCFileHandle.Display("BASE_HEX") +CCFunction = val_string8("cc_function", "OP-Lock Flag", [ + [ 0x01, "Clear OP-Lock" ], + [ 0x02, "Acknowledge Callback" ], + [ 0x03, "Decline Callback" ], + [ 0x04, "Level 2" ], +]) +ChangeBits = bitfield16("change_bits", "Change Bits", [ + bf_boolean16(0x0001, "change_bits_modify", "Modify Name"), + bf_boolean16(0x0002, "change_bits_fatt", "File Attributes"), + bf_boolean16(0x0004, "change_bits_cdate", "Creation Date"), + bf_boolean16(0x0008, "change_bits_ctime", "Creation Time"), + bf_boolean16(0x0010, "change_bits_owner", "Owner ID"), + bf_boolean16(0x0020, "change_bits_adate", "Archive Date"), + bf_boolean16(0x0040, "change_bits_atime", "Archive Time"), + bf_boolean16(0x0080, "change_bits_aid", "Archiver ID"), + bf_boolean16(0x0100, "change_bits_udate", "Update Date"), + bf_boolean16(0x0200, "change_bits_utime", "Update Time"), + bf_boolean16(0x0400, "change_bits_uid", "Update ID"), + bf_boolean16(0x0800, "change_bits_acc_date", "Access Date"), + bf_boolean16(0x1000, "change_bits_max_acc_mask", "Maximum Access Mask"), + bf_boolean16(0x2000, "change_bits_max_space", "Maximum Space"), +]) +ChannelState = val_string8("channel_state", "Channel State", [ + [ 0x00, "Channel is running" ], + [ 0x01, "Channel is stopping" ], + [ 0x02, "Channel is stopped" ], + [ 0x03, "Channel is not functional" ], +]) +ChannelSynchronizationState = val_string8("channel_synchronization_state", "Channel Synchronization State", [ + [ 0x00, "Channel is not being used" ], + [ 0x02, "NetWare is using the channel; no one else wants it" ], + [ 0x04, "NetWare is using the channel; someone else wants it" ], + [ 0x06, "Someone else is using the channel; NetWare does not need it" ], + [ 0x08, "Someone else is using the channel; NetWare needs it" ], + [ 0x0A, "Someone else has released the channel; NetWare should use it" ], +]) +ChargeAmount = uint32("charge_amount", "Charge Amount") +ChargeInformation = uint32("charge_information", "Charge Information") +ClientCompFlag = val_string16("client_comp_flag", "Completion Flag", [ + [ 0x0000, "Successful" ], + [ 0x0001, "Illegal Station Number" ], + [ 0x0002, "Client Not Logged In" ], + [ 0x0003, "Client Not Accepting Messages" ], + [ 0x0004, "Client Already has a Message" ], + [ 0x0096, "No Alloc Space for the Message" ], + [ 0x00fd, "Bad Station Number" ], + [ 0x00ff, "Failure" ], +]) +ClientIDNumber = uint32("client_id_number", "Client ID Number", ENC_BIG_ENDIAN) +ClientIDNumber.Display("BASE_HEX") +ClientList = uint32("client_list", "Client List") +ClientListCount = uint16("client_list_cnt", "Client List Count") +ClientListLen = uint8("client_list_len", "Client List Length") +ClientName = nstring8("client_name", "Client Name") +ClientRecordArea = fw_string("client_record_area", "Client Record Area", 152) +ClientStation = uint8("client_station", "Client Station") +ClientStationLong = uint32("client_station_long", "Client Station") +ClientTaskNumber = uint8("client_task_number", "Client Task Number") +ClientTaskNumberLong = uint32("client_task_number_long", "Client Task Number") +ClusterCount = uint16("cluster_count", "Cluster Count") +ClustersUsedByDirectories = uint32("clusters_used_by_directories", "Clusters Used by Directories") +ClustersUsedByExtendedDirectories = uint32("clusters_used_by_extended_dirs", "Clusters Used by Extended Directories") +ClustersUsedByFAT = uint32("clusters_used_by_fat", "Clusters Used by FAT") +CodePage = uint32("code_page", "Code Page") +ComCnts = uint16("com_cnts", "Communication Counters") +Comment = nstring8("comment", "Comment") +CommentType = uint16("comment_type", "Comment Type") +CompletionCode = uint32("ncompletion_code", "Completion Code") +CompressedDataStreamsCount = uint32("compressed_data_streams_count", "Compressed Data Streams Count") +CompressedLimboDataStreamsCount = uint32("compressed_limbo_data_streams_count", "Compressed Limbo Data Streams Count") +CompressedSectors = uint32("compressed_sectors", "Compressed Sectors") +compressionStage = uint32("compression_stage", "Compression Stage") +compressVolume = uint32("compress_volume", "Volume Compression") +ConfigMajorVN = uint8("config_major_vn", "Configuration Major Version Number") +ConfigMinorVN = uint8("config_minor_vn", "Configuration Minor Version Number") +ConfigurationDescription = fw_string("configuration_description", "Configuration Description", 80) +ConfigurationText = fw_string("configuration_text", "Configuration Text", 160) +ConfiguredMaxBinderyObjects = uint16("configured_max_bindery_objects", "Configured Max Bindery Objects") +ConfiguredMaxOpenFiles = uint16("configured_max_open_files", "Configured Max Open Files") +ConfiguredMaxRoutingBuffers = uint16("configured_max_routing_buffers", "Configured Max Routing Buffers") +ConfiguredMaxSimultaneousTransactions = uint16("cfg_max_simultaneous_transactions", "Configured Max Simultaneous Transactions") +ConnectedLAN = uint32("connected_lan", "LAN Adapter") +ConnectionControlBits = bitfield8("conn_ctrl_bits", "Connection Control", [ + bf_boolean8(0x01, "enable_brdcasts", "Enable Broadcasts"), + bf_boolean8(0x02, "enable_personal_brdcasts", "Enable Personal Broadcasts"), + bf_boolean8(0x04, "enable_wdog_messages", "Enable Watchdog Message"), + bf_boolean8(0x10, "disable_brdcasts", "Disable Broadcasts"), + bf_boolean8(0x20, "disable_personal_brdcasts", "Disable Personal Broadcasts"), + bf_boolean8(0x40, "disable_wdog_messages", "Disable Watchdog Message"), +]) +ConnectionListCount = uint32("conn_list_count", "Connection List Count") +ConnectionList = uint32("connection_list", "Connection List") +ConnectionNumber = uint32("connection_number", "Connection Number", ENC_BIG_ENDIAN) +ConnectionNumberList = nstring8("connection_number_list", "Connection Number List") +ConnectionNumberWord = uint16("conn_number_word", "Connection Number") +ConnectionNumberByte = uint8("conn_number_byte", "Connection Number") +ConnectionServiceType = val_string8("connection_service_type","Connection Service Type",[ + [ 0x01, "CLIB backward Compatibility" ], + [ 0x02, "NCP Connection" ], + [ 0x03, "NLM Connection" ], + [ 0x04, "AFP Connection" ], + [ 0x05, "FTAM Connection" ], + [ 0x06, "ANCP Connection" ], + [ 0x07, "ACP Connection" ], + [ 0x08, "SMB Connection" ], + [ 0x09, "Winsock Connection" ], +]) +ConnectionsInUse = uint16("connections_in_use", "Connections In Use") +ConnectionsMaxUsed = uint16("connections_max_used", "Connections Max Used") +ConnectionsSupportedMax = uint16("connections_supported_max", "Connections Supported Max") +ConnectionType = val_string8("connection_type", "Connection Type", [ + [ 0x00, "Not in use" ], + [ 0x02, "NCP" ], + [ 0x0b, "UDP (for IP)" ], +]) +ConnListLen = uint8("conn_list_len", "Connection List Length") +connList = uint32("conn_list", "Connection List") +ControlFlags = val_string8("control_flags", "Control Flags", [ + [ 0x00, "Forced Record Locking is Off" ], + [ 0x01, "Forced Record Locking is On" ], +]) +ControllerDriveNumber = uint8("controller_drive_number", "Controller Drive Number") +ControllerNumber = uint8("controller_number", "Controller Number") +ControllerType = uint8("controller_type", "Controller Type") +Cookie1 = uint32("cookie_1", "Cookie 1") +Cookie2 = uint32("cookie_2", "Cookie 2") +Copies = uint8( "copies", "Copies" ) +CoprocessorFlag = uint32("co_processor_flag", "CoProcessor Present Flag") +CoProcessorString = stringz("co_proc_string", "CoProcessor String") +CounterMask = val_string8("counter_mask", "Counter Mask", [ + [ 0x00, "Counter is Valid" ], + [ 0x01, "Counter is not Valid" ], +]) +CPUNumber = uint32("cpu_number", "CPU Number") +CPUString = stringz("cpu_string", "CPU String") +CPUType = val_string8("cpu_type", "CPU Type", [ + [ 0x00, "80386" ], + [ 0x01, "80486" ], + [ 0x02, "Pentium" ], + [ 0x03, "Pentium Pro" ], +]) +CreationDate = uint16("creation_date", "Creation Date") +CreationDate.NWDate() +CreationTime = uint16("creation_time", "Creation Time") +CreationTime.NWTime() +CreatorID = uint32("creator_id", "Creator ID", ENC_BIG_ENDIAN) +CreatorID.Display("BASE_HEX") +CreatorNameSpaceNumber = val_string8("creator_name_space_number", "Creator Name Space Number", [ + [ 0x00, "DOS Name Space" ], + [ 0x01, "MAC Name Space" ], + [ 0x02, "NFS Name Space" ], + [ 0x04, "Long Name Space" ], +]) +CreditLimit = uint32("credit_limit", "Credit Limit") +CtrlFlags = val_string16("ctrl_flags", "Control Flags", [ + [ 0x0000, "Do Not Return File Name" ], + [ 0x0001, "Return File Name" ], +]) +curCompBlks = uint32("cur_comp_blks", "Current Compression Blocks") +curInitialBlks = uint32("cur_initial_blks", "Current Initial Blocks") +curIntermediateBlks = uint32("cur_inter_blks", "Current Intermediate Blocks") +CurNumOfRTags = uint32("cur_num_of_r_tags", "Current Number of Resource Tags") +CurrentBlockBeingDecompressed = uint32("cur_blk_being_dcompress", "Current Block Being Decompressed") +CurrentChangedFATs = uint16("current_changed_fats", "Current Changed FAT Entries") +CurrentEntries = uint32("current_entries", "Current Entries") +CurrentFormType = uint8( "current_form_type", "Current Form Type" ) +CurrentLFSCounters = uint32("current_lfs_counters", "Current LFS Counters") +CurrentlyUsedRoutingBuffers = uint16("currently_used_routing_buffers", "Currently Used Routing Buffers") +CurrentOpenFiles = uint16("current_open_files", "Current Open Files") +CurrentReferenceID = uint16("curr_ref_id", "Current Reference ID") +CurrentServers = uint32("current_servers", "Current Servers") +CurrentServerTime = uint32("current_server_time", "Time Elapsed Since Server Was Brought Up") +CurrentSpace = uint32("current_space", "Current Space") +CurrentTransactionCount = uint32("current_trans_count", "Current Transaction Count") +CurrentUsedBinderyObjects = uint16("current_used_bindery_objects", "Current Used Bindery Objects") +CurrentUsedDynamicSpace = uint32("current_used_dynamic_space", "Current Used Dynamic Space") +CustomCnts = uint32("custom_cnts", "Custom Counters") +CustomCount = uint32("custom_count", "Custom Count") +CustomCounters = uint32("custom_counters", "Custom Counters") +CustomString = nstring8("custom_string", "Custom String") +CustomVariableValue = uint32("custom_var_value", "Custom Variable Value") + +Data = nstring8("data", "Data") +Data64 = stringz("data64", "Data") +DataForkFirstFAT = uint32("data_fork_first_fat", "Data Fork First FAT Entry") +DataForkLen = uint32("data_fork_len", "Data Fork Len") +DataForkSize = uint32("data_fork_size", "Data Fork Size") +DataSize = uint32("data_size", "Data Size") +DataStream = val_string8("data_stream", "Data Stream", [ + [ 0x00, "Resource Fork or DOS" ], + [ 0x01, "Data Fork" ], +]) +DataStreamFATBlocks = uint32("data_stream_fat_blks", "Data Stream FAT Blocks") +DataStreamName = nstring8("data_stream_name", "Data Stream Name") +DataStreamNumber = uint8("data_stream_number", "Data Stream Number") +DataStreamNumberLong = uint32("data_stream_num_long", "Data Stream Number") +DataStreamsCount = uint32("data_streams_count", "Data Streams Count") +DataStreamSize = uint32("data_stream_size", "Size") +DataStreamSize64 = uint64("data_stream_size_64", "Size") +DataStreamSpaceAlloc = uint32( "data_stream_space_alloc", "Space Allocated for Data Stream" ) +DataTypeFlag = val_string8("data_type_flag", "Data Type Flag", [ + [ 0x00, "ASCII Data" ], + [ 0x01, "UTF8 Data" ], +]) +Day = uint8("s_day", "Day") +DayOfWeek = val_string8("s_day_of_week", "Day of Week", [ + [ 0x00, "Sunday" ], + [ 0x01, "Monday" ], + [ 0x02, "Tuesday" ], + [ 0x03, "Wednesday" ], + [ 0x04, "Thursday" ], + [ 0x05, "Friday" ], + [ 0x06, "Saturday" ], +]) +DeadMirrorTable = bytes("dead_mirror_table", "Dead Mirror Table", 32) +DefinedDataStreams = uint8("defined_data_streams", "Defined Data Streams") +DefinedNameSpaces = uint8("defined_name_spaces", "Defined Name Spaces") +DeletedDate = uint16("deleted_date", "Deleted Date") +DeletedDate.NWDate() +DeletedFileTime = uint32( "deleted_file_time", "Deleted File Time") +DeletedFileTime.Display("BASE_HEX") +DeletedTime = uint16("deleted_time", "Deleted Time") +DeletedTime.NWTime() +DeletedID = uint32( "delete_id", "Deleted ID", ENC_BIG_ENDIAN) +DeletedID.Display("BASE_HEX") +DeleteExistingFileFlag = val_string8("delete_existing_file_flag", "Delete Existing File Flag", [ + [ 0x00, "Do Not Delete Existing File" ], + [ 0x01, "Delete Existing File" ], +]) +DenyReadCount = uint16("deny_read_count", "Deny Read Count") +DenyWriteCount = uint16("deny_write_count", "Deny Write Count") +DescriptionStrings = fw_string("description_string", "Description", 100) +DesiredAccessRights = bitfield16("desired_access_rights", "Desired Access Rights", [ + bf_boolean16(0x0001, "dsired_acc_rights_read_o", "Read Only"), + bf_boolean16(0x0002, "dsired_acc_rights_write_o", "Write Only"), + bf_boolean16(0x0004, "dsired_acc_rights_deny_r", "Deny Read"), + bf_boolean16(0x0008, "dsired_acc_rights_deny_w", "Deny Write"), + bf_boolean16(0x0010, "dsired_acc_rights_compat", "Compatibility"), + bf_boolean16(0x0040, "dsired_acc_rights_w_thru", "File Write Through"), + bf_boolean16(0x0400, "dsired_acc_rights_del_file_cls", "Delete File Close"), +]) +DesiredResponseCount = uint16("desired_response_count", "Desired Response Count") +DestDirHandle = uint8("dest_dir_handle", "Destination Directory Handle") +DestNameSpace = val_string8("dest_name_space", "Destination Name Space", [ + [ 0x00, "DOS Name Space" ], + [ 0x01, "MAC Name Space" ], + [ 0x02, "NFS Name Space" ], + [ 0x04, "Long Name Space" ], +]) +DestPathComponentCount = uint8("dest_component_count", "Destination Path Component Count") +DestPath = nstring8("dest_path", "Destination Path") +DestPath16 = nstring16("dest_path_16", "Destination Path") +DetachDuringProcessing = uint16("detach_during_processing", "Detach During Processing") +DetachForBadConnectionNumber = uint16("detach_for_bad_connection_number", "Detach For Bad Connection Number") +DirHandle = uint8("dir_handle", "Directory Handle") +DirHandleName = uint8("dir_handle_name", "Handle Name") +DirHandleLong = uint32("dir_handle_long", "Directory Handle") +DirHandle64 = uint64("dir_handle64", "Directory Handle") +DirectoryAccessRights = uint8("directory_access_rights", "Directory Access Rights") +# +# XXX - what do the bits mean here? +# +DirectoryAttributes = uint8("directory_attributes", "Directory Attributes") +DirectoryBase = uint32("dir_base", "Directory Base") +DirectoryBase.Display("BASE_HEX") +DirectoryCount = uint16("dir_count", "Directory Count") +DirectoryEntryNumber = uint32("directory_entry_number", "Directory Entry Number") +DirectoryEntryNumber.Display('BASE_HEX') +DirectoryEntryNumberWord = uint16("directory_entry_number_word", "Directory Entry Number") +DirectoryID = uint16("directory_id", "Directory ID", ENC_BIG_ENDIAN) +DirectoryID.Display("BASE_HEX") +DirectoryName = fw_string("directory_name", "Directory Name",12) +DirectoryName14 = fw_string("directory_name_14", "Directory Name", 14) +DirectoryNameLen = uint8("directory_name_len", "Directory Name Length") +DirectoryNumber = uint32("directory_number", "Directory Number") +DirectoryNumber.Display("BASE_HEX") +DirectoryPath = fw_string("directory_path", "Directory Path", 16) +DirectoryServicesObjectID = uint32("directory_services_object_id", "Directory Services Object ID") +DirectoryServicesObjectID.Display("BASE_HEX") +DirectoryStamp = uint16("directory_stamp", "Directory Stamp (0xD1D1)") +DirtyCacheBuffers = uint16("dirty_cache_buffers", "Dirty Cache Buffers") +DiskChannelNumber = uint8("disk_channel_number", "Disk Channel Number") +DiskChannelTable = val_string8("disk_channel_table", "Disk Channel Table", [ + [ 0x01, "XT" ], + [ 0x02, "AT" ], + [ 0x03, "SCSI" ], + [ 0x04, "Disk Coprocessor" ], +]) +DiskSpaceLimit = uint32("disk_space_limit", "Disk Space Limit") +DiskSpaceLimit64 = uint64("data_stream_size_64", "Size") +DMAChannelsUsed = uint32("dma_channels_used", "DMA Channels Used") +DMInfoEntries = uint32("dm_info_entries", "DM Info Entries") +DMInfoLevel = val_string8("dm_info_level", "DM Info Level", [ + [ 0x00, "Return Detailed DM Support Module Information" ], + [ 0x01, "Return Number of DM Support Modules" ], + [ 0x02, "Return DM Support Modules Names" ], +]) +DMFlags = val_string8("dm_flags", "DM Flags", [ + [ 0x00, "OnLine Media" ], + [ 0x01, "OffLine Media" ], +]) +DMmajorVersion = uint32("dm_major_version", "DM Major Version") +DMminorVersion = uint32("dm_minor_version", "DM Minor Version") +DMPresentFlag = val_string8("dm_present_flag", "Data Migration Present Flag", [ + [ 0x00, "Data Migration NLM is not loaded" ], + [ 0x01, "Data Migration NLM has been loaded and is running" ], +]) +DOSDirectoryBase = uint32("dos_directory_base", "DOS Directory Base") +DOSDirectoryBase.Display("BASE_HEX") +DOSDirectoryEntry = uint32("dos_directory_entry", "DOS Directory Entry") +DOSDirectoryEntry.Display("BASE_HEX") +DOSDirectoryEntryNumber = uint32("dos_directory_entry_number", "DOS Directory Entry Number") +DOSDirectoryEntryNumber.Display('BASE_HEX') +DOSFileAttributes = uint8("dos_file_attributes", "DOS File Attributes") +DOSParentDirectoryEntry = uint32("dos_parent_directory_entry", "DOS Parent Directory Entry") +DOSParentDirectoryEntry.Display('BASE_HEX') +DOSSequence = uint32("dos_sequence", "DOS Sequence") +DriveCylinders = uint16("drive_cylinders", "Drive Cylinders") +DriveDefinitionString = fw_string("drive_definition_string", "Drive Definition", 64) +DriveHeads = uint8("drive_heads", "Drive Heads") +DriveMappingTable = bytes("drive_mapping_table", "Drive Mapping Table", 32) +DriveMirrorTable = bytes("drive_mirror_table", "Drive Mirror Table", 32) +DriverBoardName = stringz("driver_board_name", "Driver Board Name") +DriveRemovableFlag = val_string8("drive_removable_flag", "Drive Removable Flag", [ + [ 0x00, "Nonremovable" ], + [ 0xff, "Removable" ], +]) +DriverLogicalName = stringz("driver_log_name", "Driver Logical Name") +DriverShortName = stringz("driver_short_name", "Driver Short Name") +DriveSize = uint32("drive_size", "Drive Size") +DstEAFlags = val_string16("dst_ea_flags", "Destination EA Flags", [ + [ 0x0000, "Return EAHandle,Information Level 0" ], + [ 0x0001, "Return NetWareHandle,Information Level 0" ], + [ 0x0002, "Return Volume/Directory Number,Information Level 0" ], + [ 0x0004, "Return EAHandle,Close Handle on Error,Information Level 0" ], + [ 0x0005, "Return NetWareHandle,Close Handle on Error,Information Level 0" ], + [ 0x0006, "Return Volume/Directory Number,Close Handle on Error,Information Level 0" ], + [ 0x0010, "Return EAHandle,Information Level 1" ], + [ 0x0011, "Return NetWareHandle,Information Level 1" ], + [ 0x0012, "Return Volume/Directory Number,Information Level 1" ], + [ 0x0014, "Return EAHandle,Close Handle on Error,Information Level 1" ], + [ 0x0015, "Return NetWareHandle,Close Handle on Error,Information Level 1" ], + [ 0x0016, "Return Volume/Directory Number,Close Handle on Error,Information Level 1" ], + [ 0x0020, "Return EAHandle,Information Level 2" ], + [ 0x0021, "Return NetWareHandle,Information Level 2" ], + [ 0x0022, "Return Volume/Directory Number,Information Level 2" ], + [ 0x0024, "Return EAHandle,Close Handle on Error,Information Level 2" ], + [ 0x0025, "Return NetWareHandle,Close Handle on Error,Information Level 2" ], + [ 0x0026, "Return Volume/Directory Number,Close Handle on Error,Information Level 2" ], + [ 0x0030, "Return EAHandle,Information Level 3" ], + [ 0x0031, "Return NetWareHandle,Information Level 3" ], + [ 0x0032, "Return Volume/Directory Number,Information Level 3" ], + [ 0x0034, "Return EAHandle,Close Handle on Error,Information Level 3" ], + [ 0x0035, "Return NetWareHandle,Close Handle on Error,Information Level 3" ], + [ 0x0036, "Return Volume/Directory Number,Close Handle on Error,Information Level 3" ], + [ 0x0040, "Return EAHandle,Information Level 4" ], + [ 0x0041, "Return NetWareHandle,Information Level 4" ], + [ 0x0042, "Return Volume/Directory Number,Information Level 4" ], + [ 0x0044, "Return EAHandle,Close Handle on Error,Information Level 4" ], + [ 0x0045, "Return NetWareHandle,Close Handle on Error,Information Level 4" ], + [ 0x0046, "Return Volume/Directory Number,Close Handle on Error,Information Level 4" ], + [ 0x0050, "Return EAHandle,Information Level 5" ], + [ 0x0051, "Return NetWareHandle,Information Level 5" ], + [ 0x0052, "Return Volume/Directory Number,Information Level 5" ], + [ 0x0054, "Return EAHandle,Close Handle on Error,Information Level 5" ], + [ 0x0055, "Return NetWareHandle,Close Handle on Error,Information Level 5" ], + [ 0x0056, "Return Volume/Directory Number,Close Handle on Error,Information Level 5" ], + [ 0x0060, "Return EAHandle,Information Level 6" ], + [ 0x0061, "Return NetWareHandle,Information Level 6" ], + [ 0x0062, "Return Volume/Directory Number,Information Level 6" ], + [ 0x0064, "Return EAHandle,Close Handle on Error,Information Level 6" ], + [ 0x0065, "Return NetWareHandle,Close Handle on Error,Information Level 6" ], + [ 0x0066, "Return Volume/Directory Number,Close Handle on Error,Information Level 6" ], + [ 0x0070, "Return EAHandle,Information Level 7" ], + [ 0x0071, "Return NetWareHandle,Information Level 7" ], + [ 0x0072, "Return Volume/Directory Number,Information Level 7" ], + [ 0x0074, "Return EAHandle,Close Handle on Error,Information Level 7" ], + [ 0x0075, "Return NetWareHandle,Close Handle on Error,Information Level 7" ], + [ 0x0076, "Return Volume/Directory Number,Close Handle on Error,Information Level 7" ], + [ 0x0080, "Return EAHandle,Information Level 0,Immediate Close Handle" ], + [ 0x0081, "Return NetWareHandle,Information Level 0,Immediate Close Handle" ], + [ 0x0082, "Return Volume/Directory Number,Information Level 0,Immediate Close Handle" ], + [ 0x0084, "Return EAHandle,Close Handle on Error,Information Level 0,Immediate Close Handle" ], + [ 0x0085, "Return NetWareHandle,Close Handle on Error,Information Level 0,Immediate Close Handle" ], + [ 0x0086, "Return Volume/Directory Number,Close Handle on Error,Information Level 0,Immediate Close Handle" ], + [ 0x0090, "Return EAHandle,Information Level 1,Immediate Close Handle" ], + [ 0x0091, "Return NetWareHandle,Information Level 1,Immediate Close Handle" ], + [ 0x0092, "Return Volume/Directory Number,Information Level 1,Immediate Close Handle" ], + [ 0x0094, "Return EAHandle,Close Handle on Error,Information Level 1,Immediate Close Handle" ], + [ 0x0095, "Return NetWareHandle,Close Handle on Error,Information Level 1,Immediate Close Handle" ], + [ 0x0096, "Return Volume/Directory Number,Close Handle on Error,Information Level 1,Immediate Close Handle" ], + [ 0x00a0, "Return EAHandle,Information Level 2,Immediate Close Handle" ], + [ 0x00a1, "Return NetWareHandle,Information Level 2,Immediate Close Handle" ], + [ 0x00a2, "Return Volume/Directory Number,Information Level 2,Immediate Close Handle" ], + [ 0x00a4, "Return EAHandle,Close Handle on Error,Information Level 2,Immediate Close Handle" ], + [ 0x00a5, "Return NetWareHandle,Close Handle on Error,Information Level 2,Immediate Close Handle" ], + [ 0x00a6, "Return Volume/Directory Number,Close Handle on Error,Information Level 2,Immediate Close Handle" ], + [ 0x00b0, "Return EAHandle,Information Level 3,Immediate Close Handle" ], + [ 0x00b1, "Return NetWareHandle,Information Level 3,Immediate Close Handle" ], + [ 0x00b2, "Return Volume/Directory Number,Information Level 3,Immediate Close Handle" ], + [ 0x00b4, "Return EAHandle,Close Handle on Error,Information Level 3,Immediate Close Handle" ], + [ 0x00b5, "Return NetWareHandle,Close Handle on Error,Information Level 3,Immediate Close Handle" ], + [ 0x00b6, "Return Volume/Directory Number,Close Handle on Error,Information Level 3,Immediate Close Handle" ], + [ 0x00c0, "Return EAHandle,Information Level 4,Immediate Close Handle" ], + [ 0x00c1, "Return NetWareHandle,Information Level 4,Immediate Close Handle" ], + [ 0x00c2, "Return Volume/Directory Number,Information Level 4,Immediate Close Handle" ], + [ 0x00c4, "Return EAHandle,Close Handle on Error,Information Level 4,Immediate Close Handle" ], + [ 0x00c5, "Return NetWareHandle,Close Handle on Error,Information Level 4,Immediate Close Handle" ], + [ 0x00c6, "Return Volume/Directory Number,Close Handle on Error,Information Level 4,Immediate Close Handle" ], + [ 0x00d0, "Return EAHandle,Information Level 5,Immediate Close Handle" ], + [ 0x00d1, "Return NetWareHandle,Information Level 5,Immediate Close Handle" ], + [ 0x00d2, "Return Volume/Directory Number,Information Level 5,Immediate Close Handle" ], + [ 0x00d4, "Return EAHandle,Close Handle on Error,Information Level 5,Immediate Close Handle" ], + [ 0x00d5, "Return NetWareHandle,Close Handle on Error,Information Level 5,Immediate Close Handle" ], + [ 0x00d6, "Return Volume/Directory Number,Close Handle on Error,Information Level 5,Immediate Close Handle" ], + [ 0x00e0, "Return EAHandle,Information Level 6,Immediate Close Handle" ], + [ 0x00e1, "Return NetWareHandle,Information Level 6,Immediate Close Handle" ], + [ 0x00e2, "Return Volume/Directory Number,Information Level 6,Immediate Close Handle" ], + [ 0x00e4, "Return EAHandle,Close Handle on Error,Information Level 6,Immediate Close Handle" ], + [ 0x00e5, "Return NetWareHandle,Close Handle on Error,Information Level 6,Immediate Close Handle" ], + [ 0x00e6, "Return Volume/Directory Number,Close Handle on Error,Information Level 6,Immediate Close Handle" ], + [ 0x00f0, "Return EAHandle,Information Level 7,Immediate Close Handle" ], + [ 0x00f1, "Return NetWareHandle,Information Level 7,Immediate Close Handle" ], + [ 0x00f2, "Return Volume/Directory Number,Information Level 7,Immediate Close Handle" ], + [ 0x00f4, "Return EAHandle,Close Handle on Error,Information Level 7,Immediate Close Handle" ], + [ 0x00f5, "Return NetWareHandle,Close Handle on Error,Information Level 7,Immediate Close Handle" ], + [ 0x00f6, "Return Volume/Directory Number,Close Handle on Error,Information Level 7,Immediate Close Handle" ], +]) +dstNSIndicator = val_string16("dst_ns_indicator", "Destination Name Space Indicator", [ + [ 0x0000, "Return Source Name Space Information" ], + [ 0x0001, "Return Destination Name Space Information" ], +]) +DstQueueID = uint32("dst_queue_id", "Destination Queue ID") +DuplicateRepliesSent = uint16("duplicate_replies_sent", "Duplicate Replies Sent") + +EAAccessFlag = bitfield16("ea_access_flag", "EA Access Flag", [ + bf_boolean16(0x0001, "ea_permanent_memory", "Permanent Memory"), + bf_boolean16(0x0002, "ea_deep_freeze", "Deep Freeze"), + bf_boolean16(0x0004, "ea_in_progress", "In Progress"), + bf_boolean16(0x0008, "ea_header_being_enlarged", "Header Being Enlarged"), + bf_boolean16(0x0010, "ea_new_tally_used", "New Tally Used"), + bf_boolean16(0x0020, "ea_tally_need_update", "Tally Need Update"), + bf_boolean16(0x0040, "ea_score_card_present", "Score Card Present"), + bf_boolean16(0x0080, "ea_need_bit_flag", "EA Need Bit Flag"), + bf_boolean16(0x0100, "ea_write_privileges", "Write Privileges"), + bf_boolean16(0x0200, "ea_read_privileges", "Read Privileges"), + bf_boolean16(0x0400, "ea_delete_privileges", "Delete Privileges"), + bf_boolean16(0x0800, "ea_system_ea_only", "System EA Only"), + bf_boolean16(0x1000, "ea_write_in_progress", "Write In Progress"), +]) +EABytesWritten = uint32("ea_bytes_written", "Bytes Written") +EACount = uint32("ea_count", "Count") +EADataSize = uint32("ea_data_size", "Data Size") +EADataSizeDuplicated = uint32("ea_data_size_duplicated", "Data Size Duplicated") +EADuplicateCount = uint32("ea_duplicate_count", "Duplicate Count") +EAErrorCodes = val_string16("ea_error_codes", "EA Error Codes", [ + [ 0x0000, "SUCCESSFUL" ], + [ 0x00c8, "ERR_MISSING_EA_KEY" ], + [ 0x00c9, "ERR_EA_NOT_FOUND" ], + [ 0x00ca, "ERR_INVALID_EA_HANDLE_TYPE" ], + [ 0x00cb, "ERR_EA_NO_KEY_NO_DATA" ], + [ 0x00cc, "ERR_EA_NUMBER_MISMATCH" ], + [ 0x00cd, "ERR_EXTENT_NUMBER_OUT_OF_RANGE" ], + [ 0x00ce, "ERR_EA_BAD_DIR_NUM" ], + [ 0x00cf, "ERR_INVALID_EA_HANDLE" ], + [ 0x00d0, "ERR_EA_POSITION_OUT_OF_RANGE" ], + [ 0x00d1, "ERR_EA_ACCESS_DENIED" ], + [ 0x00d2, "ERR_DATA_PAGE_ODD_SIZE" ], + [ 0x00d3, "ERR_EA_VOLUME_NOT_MOUNTED" ], + [ 0x00d4, "ERR_BAD_PAGE_BOUNDARY" ], + [ 0x00d5, "ERR_INSPECT_FAILURE" ], + [ 0x00d6, "ERR_EA_ALREADY_CLAIMED" ], + [ 0x00d7, "ERR_ODD_BUFFER_SIZE" ], + [ 0x00d8, "ERR_NO_SCORECARDS" ], + [ 0x00d9, "ERR_BAD_EDS_SIGNATURE" ], + [ 0x00da, "ERR_EA_SPACE_LIMIT" ], + [ 0x00db, "ERR_EA_KEY_CORRUPT" ], + [ 0x00dc, "ERR_EA_KEY_LIMIT" ], + [ 0x00dd, "ERR_TALLY_CORRUPT" ], +]) +EAFlags = val_string16("ea_flags", "EA Flags", [ + [ 0x0000, "Return EAHandle,Information Level 0" ], + [ 0x0001, "Return NetWareHandle,Information Level 0" ], + [ 0x0002, "Return Volume/Directory Number,Information Level 0" ], + [ 0x0004, "Return EAHandle,Close Handle on Error,Information Level 0" ], + [ 0x0005, "Return NetWareHandle,Close Handle on Error,Information Level 0" ], + [ 0x0006, "Return Volume/Directory Number,Close Handle on Error,Information Level 0" ], + [ 0x0010, "Return EAHandle,Information Level 1" ], + [ 0x0011, "Return NetWareHandle,Information Level 1" ], + [ 0x0012, "Return Volume/Directory Number,Information Level 1" ], + [ 0x0014, "Return EAHandle,Close Handle on Error,Information Level 1" ], + [ 0x0015, "Return NetWareHandle,Close Handle on Error,Information Level 1" ], + [ 0x0016, "Return Volume/Directory Number,Close Handle on Error,Information Level 1" ], + [ 0x0020, "Return EAHandle,Information Level 2" ], + [ 0x0021, "Return NetWareHandle,Information Level 2" ], + [ 0x0022, "Return Volume/Directory Number,Information Level 2" ], + [ 0x0024, "Return EAHandle,Close Handle on Error,Information Level 2" ], + [ 0x0025, "Return NetWareHandle,Close Handle on Error,Information Level 2" ], + [ 0x0026, "Return Volume/Directory Number,Close Handle on Error,Information Level 2" ], + [ 0x0030, "Return EAHandle,Information Level 3" ], + [ 0x0031, "Return NetWareHandle,Information Level 3" ], + [ 0x0032, "Return Volume/Directory Number,Information Level 3" ], + [ 0x0034, "Return EAHandle,Close Handle on Error,Information Level 3" ], + [ 0x0035, "Return NetWareHandle,Close Handle on Error,Information Level 3" ], + [ 0x0036, "Return Volume/Directory Number,Close Handle on Error,Information Level 3" ], + [ 0x0040, "Return EAHandle,Information Level 4" ], + [ 0x0041, "Return NetWareHandle,Information Level 4" ], + [ 0x0042, "Return Volume/Directory Number,Information Level 4" ], + [ 0x0044, "Return EAHandle,Close Handle on Error,Information Level 4" ], + [ 0x0045, "Return NetWareHandle,Close Handle on Error,Information Level 4" ], + [ 0x0046, "Return Volume/Directory Number,Close Handle on Error,Information Level 4" ], + [ 0x0050, "Return EAHandle,Information Level 5" ], + [ 0x0051, "Return NetWareHandle,Information Level 5" ], + [ 0x0052, "Return Volume/Directory Number,Information Level 5" ], + [ 0x0054, "Return EAHandle,Close Handle on Error,Information Level 5" ], + [ 0x0055, "Return NetWareHandle,Close Handle on Error,Information Level 5" ], + [ 0x0056, "Return Volume/Directory Number,Close Handle on Error,Information Level 5" ], + [ 0x0060, "Return EAHandle,Information Level 6" ], + [ 0x0061, "Return NetWareHandle,Information Level 6" ], + [ 0x0062, "Return Volume/Directory Number,Information Level 6" ], + [ 0x0064, "Return EAHandle,Close Handle on Error,Information Level 6" ], + [ 0x0065, "Return NetWareHandle,Close Handle on Error,Information Level 6" ], + [ 0x0066, "Return Volume/Directory Number,Close Handle on Error,Information Level 6" ], + [ 0x0070, "Return EAHandle,Information Level 7" ], + [ 0x0071, "Return NetWareHandle,Information Level 7" ], + [ 0x0072, "Return Volume/Directory Number,Information Level 7" ], + [ 0x0074, "Return EAHandle,Close Handle on Error,Information Level 7" ], + [ 0x0075, "Return NetWareHandle,Close Handle on Error,Information Level 7" ], + [ 0x0076, "Return Volume/Directory Number,Close Handle on Error,Information Level 7" ], + [ 0x0080, "Return EAHandle,Information Level 0,Immediate Close Handle" ], + [ 0x0081, "Return NetWareHandle,Information Level 0,Immediate Close Handle" ], + [ 0x0082, "Return Volume/Directory Number,Information Level 0,Immediate Close Handle" ], + [ 0x0084, "Return EAHandle,Close Handle on Error,Information Level 0,Immediate Close Handle" ], + [ 0x0085, "Return NetWareHandle,Close Handle on Error,Information Level 0,Immediate Close Handle" ], + [ 0x0086, "Return Volume/Directory Number,Close Handle on Error,Information Level 0,Immediate Close Handle" ], + [ 0x0090, "Return EAHandle,Information Level 1,Immediate Close Handle" ], + [ 0x0091, "Return NetWareHandle,Information Level 1,Immediate Close Handle" ], + [ 0x0092, "Return Volume/Directory Number,Information Level 1,Immediate Close Handle" ], + [ 0x0094, "Return EAHandle,Close Handle on Error,Information Level 1,Immediate Close Handle" ], + [ 0x0095, "Return NetWareHandle,Close Handle on Error,Information Level 1,Immediate Close Handle" ], + [ 0x0096, "Return Volume/Directory Number,Close Handle on Error,Information Level 1,Immediate Close Handle" ], + [ 0x00a0, "Return EAHandle,Information Level 2,Immediate Close Handle" ], + [ 0x00a1, "Return NetWareHandle,Information Level 2,Immediate Close Handle" ], + [ 0x00a2, "Return Volume/Directory Number,Information Level 2,Immediate Close Handle" ], + [ 0x00a4, "Return EAHandle,Close Handle on Error,Information Level 2,Immediate Close Handle" ], + [ 0x00a5, "Return NetWareHandle,Close Handle on Error,Information Level 2,Immediate Close Handle" ], + [ 0x00a6, "Return Volume/Directory Number,Close Handle on Error,Information Level 2,Immediate Close Handle" ], + [ 0x00b0, "Return EAHandle,Information Level 3,Immediate Close Handle" ], + [ 0x00b1, "Return NetWareHandle,Information Level 3,Immediate Close Handle" ], + [ 0x00b2, "Return Volume/Directory Number,Information Level 3,Immediate Close Handle" ], + [ 0x00b4, "Return EAHandle,Close Handle on Error,Information Level 3,Immediate Close Handle" ], + [ 0x00b5, "Return NetWareHandle,Close Handle on Error,Information Level 3,Immediate Close Handle" ], + [ 0x00b6, "Return Volume/Directory Number,Close Handle on Error,Information Level 3,Immediate Close Handle" ], + [ 0x00c0, "Return EAHandle,Information Level 4,Immediate Close Handle" ], + [ 0x00c1, "Return NetWareHandle,Information Level 4,Immediate Close Handle" ], + [ 0x00c2, "Return Volume/Directory Number,Information Level 4,Immediate Close Handle" ], + [ 0x00c4, "Return EAHandle,Close Handle on Error,Information Level 4,Immediate Close Handle" ], + [ 0x00c5, "Return NetWareHandle,Close Handle on Error,Information Level 4,Immediate Close Handle" ], + [ 0x00c6, "Return Volume/Directory Number,Close Handle on Error,Information Level 4,Immediate Close Handle" ], + [ 0x00d0, "Return EAHandle,Information Level 5,Immediate Close Handle" ], + [ 0x00d1, "Return NetWareHandle,Information Level 5,Immediate Close Handle" ], + [ 0x00d2, "Return Volume/Directory Number,Information Level 5,Immediate Close Handle" ], + [ 0x00d4, "Return EAHandle,Close Handle on Error,Information Level 5,Immediate Close Handle" ], + [ 0x00d5, "Return NetWareHandle,Close Handle on Error,Information Level 5,Immediate Close Handle" ], + [ 0x00d6, "Return Volume/Directory Number,Close Handle on Error,Information Level 5,Immediate Close Handle" ], + [ 0x00e0, "Return EAHandle,Information Level 6,Immediate Close Handle" ], + [ 0x00e1, "Return NetWareHandle,Information Level 6,Immediate Close Handle" ], + [ 0x00e2, "Return Volume/Directory Number,Information Level 6,Immediate Close Handle" ], + [ 0x00e4, "Return EAHandle,Close Handle on Error,Information Level 6,Immediate Close Handle" ], + [ 0x00e5, "Return NetWareHandle,Close Handle on Error,Information Level 6,Immediate Close Handle" ], + [ 0x00e6, "Return Volume/Directory Number,Close Handle on Error,Information Level 6,Immediate Close Handle" ], + [ 0x00f0, "Return EAHandle,Information Level 7,Immediate Close Handle" ], + [ 0x00f1, "Return NetWareHandle,Information Level 7,Immediate Close Handle" ], + [ 0x00f2, "Return Volume/Directory Number,Information Level 7,Immediate Close Handle" ], + [ 0x00f4, "Return EAHandle,Close Handle on Error,Information Level 7,Immediate Close Handle" ], + [ 0x00f5, "Return NetWareHandle,Close Handle on Error,Information Level 7,Immediate Close Handle" ], + [ 0x00f6, "Return Volume/Directory Number,Close Handle on Error,Information Level 7,Immediate Close Handle" ], +]) +EAHandle = uint32("ea_handle", "EA Handle") +EAHandle.Display("BASE_HEX") +EAHandleOrNetWareHandleOrVolume = uint32("ea_handle_or_netware_handle_or_volume", "EAHandle or NetWare Handle or Volume (see EAFlags)") +EAHandleOrNetWareHandleOrVolume.Display("BASE_HEX") +EAKey = nstring16("ea_key", "EA Key") +EAKeySize = uint32("ea_key_size", "Key Size") +EAKeySizeDuplicated = uint32("ea_key_size_duplicated", "Key Size Duplicated") +EAValue = nstring16("ea_value", "EA Value") +EAValueRep = fw_string("ea_value_rep", "EA Value", 1) +EAValueLength = uint16("ea_value_length", "Value Length") +EchoSocket = uint16("echo_socket", "Echo Socket") +EchoSocket.Display('BASE_HEX') +EffectiveRights = bitfield8("effective_rights", "Effective Rights", [ + bf_boolean8(0x01, "effective_rights_read", "Read Rights"), + bf_boolean8(0x02, "effective_rights_write", "Write Rights"), + bf_boolean8(0x04, "effective_rights_open", "Open Rights"), + bf_boolean8(0x08, "effective_rights_create", "Create Rights"), + bf_boolean8(0x10, "effective_rights_delete", "Delete Rights"), + bf_boolean8(0x20, "effective_rights_parental", "Parental Rights"), + bf_boolean8(0x40, "effective_rights_search", "Search Rights"), + bf_boolean8(0x80, "effective_rights_modify", "Modify Rights"), +]) +EnumInfoMask = bitfield8("enum_info_mask", "Return Information Mask", [ + bf_boolean8(0x01, "enum_info_transport", "Transport Information"), + bf_boolean8(0x02, "enum_info_time", "Time Information"), + bf_boolean8(0x04, "enum_info_name", "Name Information"), + bf_boolean8(0x08, "enum_info_lock", "Lock Information"), + bf_boolean8(0x10, "enum_info_print", "Print Information"), + bf_boolean8(0x20, "enum_info_stats", "Statistical Information"), + bf_boolean8(0x40, "enum_info_account", "Accounting Information"), + bf_boolean8(0x80, "enum_info_auth", "Authentication Information"), +]) + +eventOffset = bytes("event_offset", "Event Offset", 8) +eventTime = uint32("event_time", "Event Time") +eventTime.Display("BASE_HEX") +ExpirationTime = uint32("expiration_time", "Expiration Time") +ExpirationTime.Display('BASE_HEX') +ExtAttrDataSize = uint32("ext_attr_data_size", "Extended Attributes Data Size") +ExtAttrCount = uint32("ext_attr_count", "Extended Attributes Count") +ExtAttrKeySize = uint32("ext_attr_key_size", "Extended Attributes Key Size") +ExtendedAttributesDefined = uint32("extended_attributes_defined", "Extended Attributes Defined") +ExtendedAttributeExtentsUsed = uint32("extended_attribute_extents_used", "Extended Attribute Extents Used") +ExtendedInfo = bitfield16("ext_info", "Extended Return Information", [ + bf_boolean16(0x0001, "ext_info_update", "Last Update"), + bf_boolean16(0x0002, "ext_info_dos_name", "DOS Name"), + bf_boolean16(0x0004, "ext_info_flush", "Flush Time"), + bf_boolean16(0x0008, "ext_info_parental", "Parental"), + bf_boolean16(0x0010, "ext_info_mac_finder", "MAC Finder"), + bf_boolean16(0x0020, "ext_info_sibling", "Sibling"), + bf_boolean16(0x0040, "ext_info_effective", "Effective"), + bf_boolean16(0x0080, "ext_info_mac_date", "MAC Date"), + bf_boolean16(0x0100, "ext_info_access", "Last Access"), + bf_boolean16(0x0400, "ext_info_64_bit_fs", "64 Bit File Sizes"), + bf_boolean16(0x8000, "ext_info_newstyle", "New Style"), +]) + +ExtentListFormat = uint8("ext_lst_format", "Extent List Format") +RetExtentListCount = uint8("ret_ext_lst_count", "Extent List Count") +EndingOffset = bytes("end_offset", "Ending Offset", 8) +#ExtentLength = bytes("extent_length", "Length", 8), +ExtentList = bytes("ext_lst", "Extent List", 512) +ExtRouterActiveFlag = boolean8("ext_router_active_flag", "External Router Active Flag") + +FailedAllocReqCnt = uint32("failed_alloc_req", "Failed Alloc Request Count") +FatalFATWriteErrors = uint16("fatal_fat_write_errors", "Fatal FAT Write Errors") +FATScanErrors = uint16("fat_scan_errors", "FAT Scan Errors") +FATWriteErrors = uint16("fat_write_errors", "FAT Write Errors") +FieldsLenTable = bytes("fields_len_table", "Fields Len Table", 32) +FileCount = uint16("file_count", "File Count") +FileDate = uint16("file_date", "File Date") +FileDate.NWDate() +FileDirWindow = uint16("file_dir_win", "File/Dir Window") +FileDirWindow.Display("BASE_HEX") +FileExecuteType = uint8("file_execute_type", "File Execute Type") +FileExtendedAttributes = val_string8("file_ext_attr", "File Extended Attributes", [ + [ 0x00, "Search On All Read Only Opens" ], + [ 0x01, "Search On Read Only Opens With No Path" ], + [ 0x02, "Shell Default Search Mode" ], + [ 0x03, "Search On All Opens With No Path" ], + [ 0x04, "Do Not Search" ], + [ 0x05, "Reserved" ], + [ 0x06, "Search On All Opens" ], + [ 0x07, "Reserved" ], + [ 0x08, "Search On All Read Only Opens/Indexed" ], + [ 0x09, "Search On Read Only Opens With No Path/Indexed" ], + [ 0x0a, "Shell Default Search Mode/Indexed" ], + [ 0x0b, "Search On All Opens With No Path/Indexed" ], + [ 0x0c, "Do Not Search/Indexed" ], + [ 0x0d, "Indexed" ], + [ 0x0e, "Search On All Opens/Indexed" ], + [ 0x0f, "Indexed" ], + [ 0x10, "Search On All Read Only Opens/Transactional" ], + [ 0x11, "Search On Read Only Opens With No Path/Transactional" ], + [ 0x12, "Shell Default Search Mode/Transactional" ], + [ 0x13, "Search On All Opens With No Path/Transactional" ], + [ 0x14, "Do Not Search/Transactional" ], + [ 0x15, "Transactional" ], + [ 0x16, "Search On All Opens/Transactional" ], + [ 0x17, "Transactional" ], + [ 0x18, "Search On All Read Only Opens/Indexed/Transactional" ], + [ 0x19, "Search On Read Only Opens With No Path/Indexed/Transactional" ], + [ 0x1a, "Shell Default Search Mode/Indexed/Transactional" ], + [ 0x1b, "Search On All Opens With No Path/Indexed/Transactional" ], + [ 0x1c, "Do Not Search/Indexed/Transactional" ], + [ 0x1d, "Indexed/Transactional" ], + [ 0x1e, "Search On All Opens/Indexed/Transactional" ], + [ 0x1f, "Indexed/Transactional" ], + [ 0x40, "Search On All Read Only Opens/Read Audit" ], + [ 0x41, "Search On Read Only Opens With No Path/Read Audit" ], + [ 0x42, "Shell Default Search Mode/Read Audit" ], + [ 0x43, "Search On All Opens With No Path/Read Audit" ], + [ 0x44, "Do Not Search/Read Audit" ], + [ 0x45, "Read Audit" ], + [ 0x46, "Search On All Opens/Read Audit" ], + [ 0x47, "Read Audit" ], + [ 0x48, "Search On All Read Only Opens/Indexed/Read Audit" ], + [ 0x49, "Search On Read Only Opens With No Path/Indexed/Read Audit" ], + [ 0x4a, "Shell Default Search Mode/Indexed/Read Audit" ], + [ 0x4b, "Search On All Opens With No Path/Indexed/Read Audit" ], + [ 0x4c, "Do Not Search/Indexed/Read Audit" ], + [ 0x4d, "Indexed/Read Audit" ], + [ 0x4e, "Search On All Opens/Indexed/Read Audit" ], + [ 0x4f, "Indexed/Read Audit" ], + [ 0x50, "Search On All Read Only Opens/Transactional/Read Audit" ], + [ 0x51, "Search On Read Only Opens With No Path/Transactional/Read Audit" ], + [ 0x52, "Shell Default Search Mode/Transactional/Read Audit" ], + [ 0x53, "Search On All Opens With No Path/Transactional/Read Audit" ], + [ 0x54, "Do Not Search/Transactional/Read Audit" ], + [ 0x55, "Transactional/Read Audit" ], + [ 0x56, "Search On All Opens/Transactional/Read Audit" ], + [ 0x57, "Transactional/Read Audit" ], + [ 0x58, "Search On All Read Only Opens/Indexed/Transactional/Read Audit" ], + [ 0x59, "Search On Read Only Opens With No Path/Indexed/Transactional/Read Audit" ], + [ 0x5a, "Shell Default Search Mode/Indexed/Transactional/Read Audit" ], + [ 0x5b, "Search On All Opens With No Path/Indexed/Transactional/Read Audit" ], + [ 0x5c, "Do Not Search/Indexed/Transactional/Read Audit" ], + [ 0x5d, "Indexed/Transactional/Read Audit" ], + [ 0x5e, "Search On All Opens/Indexed/Transactional/Read Audit" ], + [ 0x5f, "Indexed/Transactional/Read Audit" ], + [ 0x80, "Search On All Read Only Opens/Write Audit" ], + [ 0x81, "Search On Read Only Opens With No Path/Write Audit" ], + [ 0x82, "Shell Default Search Mode/Write Audit" ], + [ 0x83, "Search On All Opens With No Path/Write Audit" ], + [ 0x84, "Do Not Search/Write Audit" ], + [ 0x85, "Write Audit" ], + [ 0x86, "Search On All Opens/Write Audit" ], + [ 0x87, "Write Audit" ], + [ 0x88, "Search On All Read Only Opens/Indexed/Write Audit" ], + [ 0x89, "Search On Read Only Opens With No Path/Indexed/Write Audit" ], + [ 0x8a, "Shell Default Search Mode/Indexed/Write Audit" ], + [ 0x8b, "Search On All Opens With No Path/Indexed/Write Audit" ], + [ 0x8c, "Do Not Search/Indexed/Write Audit" ], + [ 0x8d, "Indexed/Write Audit" ], + [ 0x8e, "Search On All Opens/Indexed/Write Audit" ], + [ 0x8f, "Indexed/Write Audit" ], + [ 0x90, "Search On All Read Only Opens/Transactional/Write Audit" ], + [ 0x91, "Search On Read Only Opens With No Path/Transactional/Write Audit" ], + [ 0x92, "Shell Default Search Mode/Transactional/Write Audit" ], + [ 0x93, "Search On All Opens With No Path/Transactional/Write Audit" ], + [ 0x94, "Do Not Search/Transactional/Write Audit" ], + [ 0x95, "Transactional/Write Audit" ], + [ 0x96, "Search On All Opens/Transactional/Write Audit" ], + [ 0x97, "Transactional/Write Audit" ], + [ 0x98, "Search On All Read Only Opens/Indexed/Transactional/Write Audit" ], + [ 0x99, "Search On Read Only Opens With No Path/Indexed/Transactional/Write Audit" ], + [ 0x9a, "Shell Default Search Mode/Indexed/Transactional/Write Audit" ], + [ 0x9b, "Search On All Opens With No Path/Indexed/Transactional/Write Audit" ], + [ 0x9c, "Do Not Search/Indexed/Transactional/Write Audit" ], + [ 0x9d, "Indexed/Transactional/Write Audit" ], + [ 0x9e, "Search On All Opens/Indexed/Transactional/Write Audit" ], + [ 0x9f, "Indexed/Transactional/Write Audit" ], + [ 0xa0, "Search On All Read Only Opens/Read Audit/Write Audit" ], + [ 0xa1, "Search On Read Only Opens With No Path/Read Audit/Write Audit" ], + [ 0xa2, "Shell Default Search Mode/Read Audit/Write Audit" ], + [ 0xa3, "Search On All Opens With No Path/Read Audit/Write Audit" ], + [ 0xa4, "Do Not Search/Read Audit/Write Audit" ], + [ 0xa5, "Read Audit/Write Audit" ], + [ 0xa6, "Search On All Opens/Read Audit/Write Audit" ], + [ 0xa7, "Read Audit/Write Audit" ], + [ 0xa8, "Search On All Read Only Opens/Indexed/Read Audit/Write Audit" ], + [ 0xa9, "Search On Read Only Opens With No Path/Indexed/Read Audit/Write Audit" ], + [ 0xaa, "Shell Default Search Mode/Indexed/Read Audit/Write Audit" ], + [ 0xab, "Search On All Opens With No Path/Indexed/Read Audit/Write Audit" ], + [ 0xac, "Do Not Search/Indexed/Read Audit/Write Audit" ], + [ 0xad, "Indexed/Read Audit/Write Audit" ], + [ 0xae, "Search On All Opens/Indexed/Read Audit/Write Audit" ], + [ 0xaf, "Indexed/Read Audit/Write Audit" ], + [ 0xb0, "Search On All Read Only Opens/Transactional/Read Audit/Write Audit" ], + [ 0xb1, "Search On Read Only Opens With No Path/Transactional/Read Audit/Write Audit" ], + [ 0xb2, "Shell Default Search Mode/Transactional/Read Audit/Write Audit" ], + [ 0xb3, "Search On All Opens With No Path/Transactional/Read Audit/Write Audit" ], + [ 0xb4, "Do Not Search/Transactional/Read Audit/Write Audit" ], + [ 0xb5, "Transactional/Read Audit/Write Audit" ], + [ 0xb6, "Search On All Opens/Transactional/Read Audit/Write Audit" ], + [ 0xb7, "Transactional/Read Audit/Write Audit" ], + [ 0xb8, "Search On All Read Only Opens/Indexed/Transactional/Read Audit/Write Audit" ], + [ 0xb9, "Search On Read Only Opens With No Path/Indexed/Transactional/Read Audit/Write Audit" ], + [ 0xba, "Shell Default Search Mode/Indexed/Transactional/Read Audit/Write Audit" ], + [ 0xbb, "Search On All Opens With No Path/Indexed/Transactional/Read Audit/Write Audit" ], + [ 0xbc, "Do Not Search/Indexed/Transactional/Read Audit/Write Audit" ], + [ 0xbd, "Indexed/Transactional/Read Audit/Write Audit" ], + [ 0xbe, "Search On All Opens/Indexed/Transactional/Read Audit/Write Audit" ], + [ 0xbf, "Indexed/Transactional/Read Audit/Write Audit" ], +]) +fileFlags = uint32("file_flags", "File Flags") +FileHandle = bytes("file_handle", "File Handle", 6) +FileLimbo = uint32("file_limbo", "File Limbo") +FileListCount = uint32("file_list_count", "File List Count") +FileLock = val_string8("file_lock", "File Lock", [ + [ 0x00, "Not Locked" ], + [ 0xfe, "Locked by file lock" ], + [ 0xff, "Unknown" ], +]) +FileLockCount = uint16("file_lock_count", "File Lock Count") +FileMigrationState = val_string8("file_mig_state", "File Migration State", [ + [ 0x00, "Mark file ineligible for file migration" ], + [ 0x01, "Mark file eligible for file migration" ], + [ 0x02, "Mark file as migrated and delete fat chains" ], + [ 0x03, "Reset file status back to normal" ], + [ 0x04, "Get file data back and reset file status back to normal" ], +]) +FileMode = uint8("file_mode", "File Mode") +FileName = nstring8("file_name", "Filename") +FileName12 = fw_string("file_name_12", "Filename", 12) +FileName14 = fw_string("file_name_14", "Filename", 14) +FileName16 = nstring16("file_name_16", "Filename") +FileNameLen = uint8("file_name_len", "Filename Length") +FileOffset = uint32("file_offset", "File Offset") +FilePath = nstring8("file_path", "File Path") +FileSize = uint32("file_size", "File Size", ENC_BIG_ENDIAN) +FileSize64bit = uint64("f_size_64bit", "64bit File Size") +FileSystemID = uint8("file_system_id", "File System ID") +FileTime = uint16("file_time", "File Time") +FileTime.NWTime() +FileUseCount = uint16("file_use_count", "File Use Count") +FileWriteFlags = val_string8("file_write_flags", "File Write Flags", [ + [ 0x01, "Writing" ], + [ 0x02, "Write aborted" ], +]) +FileWriteState = val_string8("file_write_state", "File Write State", [ + [ 0x00, "Not Writing" ], + [ 0x01, "Write in Progress" ], + [ 0x02, "Write Being Stopped" ], +]) +Filler = uint8("filler", "Filler") +FinderAttr = bitfield16("finder_attr", "Finder Info Attributes", [ + bf_boolean16(0x0001, "finder_attr_desktop", "Object on Desktop"), + bf_boolean16(0x2000, "finder_attr_invisible", "Object is Invisible"), + bf_boolean16(0x4000, "finder_attr_bundle", "Object Has Bundle"), +]) +FixedBitMask = uint32("fixed_bit_mask", "Fixed Bit Mask") +FixedBitsDefined = uint16("fixed_bits_defined", "Fixed Bits Defined") +FlagBits = uint8("flag_bits", "Flag Bits") +Flags = uint8("flags", "Flags") +FlagsDef = uint16("flags_def", "Flags") +FlushTime = uint32("flush_time", "Flush Time") +FolderFlag = val_string8("folder_flag", "Folder Flag", [ + [ 0x00, "Not a Folder" ], + [ 0x01, "Folder" ], +]) +ForkCount = uint8("fork_count", "Fork Count") +ForkIndicator = val_string8("fork_indicator", "Fork Indicator", [ + [ 0x00, "Data Fork" ], + [ 0x01, "Resource Fork" ], +]) +ForceFlag = val_string8("force_flag", "Force Server Down Flag", [ + [ 0x00, "Down Server if No Files Are Open" ], + [ 0xff, "Down Server Immediately, Auto-Close Open Files" ], +]) +ForgedDetachedRequests = uint16("forged_detached_requests", "Forged Detached Requests") +FormType = uint16( "form_type", "Form Type" ) +FormTypeCnt = uint32("form_type_count", "Form Types Count") +FoundSomeMem = uint32("found_some_mem", "Found Some Memory") +FractionalSeconds = eptime("fractional_time", "Fractional Time in Seconds") +FraggerHandle = uint32("fragger_handle", "Fragment Handle") +FraggerHandle.Display('BASE_HEX') +FragmentWriteOccurred = uint16("fragment_write_occurred", "Fragment Write Occurred") +FragSize = uint32("frag_size", "Fragment Size") +FreeableLimboSectors = uint32("freeable_limbo_sectors", "Freeable Limbo Sectors") +FreeBlocks = uint32("free_blocks", "Free Blocks") +FreedClusters = uint32("freed_clusters", "Freed Clusters") +FreeDirectoryEntries = uint16("free_directory_entries", "Free Directory Entries") +FSEngineFlag = boolean8("fs_engine_flag", "FS Engine Flag") +FullName = fw_string("full_name", "Full Name", 39) + +GetSetFlag = val_string8("get_set_flag", "Get Set Flag", [ + [ 0x00, "Get the default support module ID" ], + [ 0x01, "Set the default support module ID" ], +]) +GUID = bytes("guid", "GUID", 16) + +HandleFlag = val_string8("handle_flag", "Handle Flag", [ + [ 0x00, "Short Directory Handle" ], + [ 0x01, "Directory Base" ], + [ 0xFF, "No Handle Present" ], +]) +HandleInfoLevel = val_string8("handle_info_level", "Handle Info Level", [ + [ 0x00, "Get Limited Information from a File Handle" ], + [ 0x01, "Get Limited Information from a File Handle Using a Name Space" ], + [ 0x02, "Get Information from a File Handle" ], + [ 0x03, "Get Information from a Directory Handle" ], + [ 0x04, "Get Complete Information from a Directory Handle" ], + [ 0x05, "Get Complete Information from a File Handle" ], +]) +HeldBytesRead = bytes("held_bytes_read", "Held Bytes Read", 6) +HeldBytesWritten = bytes("held_bytes_write", "Held Bytes Written", 6) +HeldConnectTimeInMinutes = uint32("held_conn_time", "Held Connect Time in Minutes") +HeldRequests = uint32("user_info_held_req", "Held Requests") +HoldAmount = uint32("hold_amount", "Hold Amount") +HoldCancelAmount = uint32("hold_cancel_amount", "Hold Cancel Amount") +HolderID = uint32("holder_id", "Holder ID") +HolderID.Display("BASE_HEX") +HoldTime = uint32("hold_time", "Hold Time") +HopsToNet = uint16("hops_to_net", "Hop Count") +HorizLocation = uint16("horiz_location", "Horizontal Location") +HostAddress = bytes("host_address", "Host Address", 6) +HotFixBlocksAvailable = uint16("hot_fix_blocks_available", "Hot Fix Blocks Available") +HotFixDisabled = val_string8("hot_fix_disabled", "Hot Fix Disabled", [ + [ 0x00, "Enabled" ], + [ 0x01, "Disabled" ], +]) +HotFixTableSize = uint16("hot_fix_table_size", "Hot Fix Table Size") +HotFixTableStart = uint32("hot_fix_table_start", "Hot Fix Table Start") +Hour = uint8("s_hour", "Hour") +HugeBitMask = uint32("huge_bit_mask", "Huge Bit Mask") +HugeBitsDefined = uint16("huge_bits_defined", "Huge Bits Defined") +HugeData = nstring8("huge_data", "Huge Data") +HugeDataUsed = uint32("huge_data_used", "Huge Data Used") +HugeStateInfo = bytes("huge_state_info", "Huge State Info", 16) + +IdentificationNumber = uint32("identification_number", "Identification Number") +IgnoredRxPkts = uint32("ignored_rx_pkts", "Ignored Receive Packets") +IncomingPacketDiscardedNoDGroup = uint16("incoming_packet_discarded_no_dgroup", "Incoming Packet Discarded No DGroup") +IndexNumber = uint8("index_number", "Index Number") +InfoCount = uint16("info_count", "Info Count") +InfoFlags = bitfield32("info_flags", "Info Flags", [ + bf_boolean32(0x10000000, "info_flags_security", "Return Object Security"), + bf_boolean32(0x20000000, "info_flags_flags", "Return Object Flags"), + bf_boolean32(0x40000000, "info_flags_type", "Return Object Type"), + bf_boolean32(0x80000000, "info_flags_name", "Return Object Name"), +]) +InfoLevelNumber = val_string8("info_level_num", "Information Level Number", [ + [ 0x0, "Single Directory Quota Information" ], + [ 0x1, "Multi-Level Directory Quota Information" ], +]) +InfoMask = bitfield32("info_mask", "Information Mask", [ + bf_boolean32(0x00000001, "info_flags_dos_time", "DOS Time"), + bf_boolean32(0x00000002, "info_flags_ref_count", "Reference Count"), + bf_boolean32(0x00000004, "info_flags_dos_attr", "DOS Attributes"), + bf_boolean32(0x00000008, "info_flags_ids", "ID's"), + bf_boolean32(0x00000010, "info_flags_ds_sizes", "Data Stream Sizes"), + bf_boolean32(0x00000020, "info_flags_ns_attr", "Name Space Attributes"), + bf_boolean32(0x00000040, "info_flags_ea_present", "EA Present Flag"), + bf_boolean32(0x00000080, "info_flags_all_attr", "All Attributes"), + bf_boolean32(0x00000100, "info_flags_all_dirbase_num", "All Directory Base Numbers"), + bf_boolean32(0x00000200, "info_flags_max_access_mask", "Maximum Access Mask"), + bf_boolean32(0x00000400, "info_flags_flush_time", "Flush Time"), + bf_boolean32(0x00000800, "info_flags_prnt_base_id", "Parent Base ID"), + bf_boolean32(0x00001000, "info_flags_mac_finder", "Mac Finder Information"), + bf_boolean32(0x00002000, "info_flags_sibling_cnt", "Sibling Count"), + bf_boolean32(0x00004000, "info_flags_effect_rights", "Effective Rights"), + bf_boolean32(0x00008000, "info_flags_mac_time", "Mac Time"), + bf_boolean32(0x20000000, "info_mask_dosname", "DOS Name"), + bf_boolean32(0x40000000, "info_mask_c_name_space", "Creator Name Space & Name"), + bf_boolean32(0x80000000, "info_mask_name", "Name"), +]) +InheritedRightsMask = bitfield16("inherited_rights_mask", "Inherited Rights Mask", [ + bf_boolean16(0x0001, "inh_rights_read", "Read Rights"), + bf_boolean16(0x0002, "inh_rights_write", "Write Rights"), + bf_boolean16(0x0004, "inh_rights_open", "Open Rights"), + bf_boolean16(0x0008, "inh_rights_create", "Create Rights"), + bf_boolean16(0x0010, "inh_rights_delete", "Delete Rights"), + bf_boolean16(0x0020, "inh_rights_parent", "Change Access"), + bf_boolean16(0x0040, "inh_rights_search", "See Files Flag"), + bf_boolean16(0x0080, "inh_rights_modify", "Modify Rights"), + bf_boolean16(0x0100, "inh_rights_supervisor", "Supervisor"), +]) +InheritanceRevokeMask = bitfield16("inheritance_revoke_mask", "Revoke Rights Mask", [ + bf_boolean16(0x0001, "inh_revoke_read", "Read Rights"), + bf_boolean16(0x0002, "inh_revoke_write", "Write Rights"), + bf_boolean16(0x0004, "inh_revoke_open", "Open Rights"), + bf_boolean16(0x0008, "inh_revoke_create", "Create Rights"), + bf_boolean16(0x0010, "inh_revoke_delete", "Delete Rights"), + bf_boolean16(0x0020, "inh_revoke_parent", "Change Access"), + bf_boolean16(0x0040, "inh_revoke_search", "See Files Flag"), + bf_boolean16(0x0080, "inh_revoke_modify", "Modify Rights"), + bf_boolean16(0x0100, "inh_revoke_supervisor", "Supervisor"), +]) +InitialSemaphoreValue = uint8("initial_semaphore_value", "Initial Semaphore Value") +InpInfotype = uint32("inp_infotype", "Information Type") +Inpld = uint32("inp_ld", "Volume Number or Directory Handle") +InspectSize = uint32("inspect_size", "Inspect Size") +InternetBridgeVersion = uint8("internet_bridge_version", "Internet Bridge Version") +InterruptNumbersUsed = uint32("interrupt_numbers_used", "Interrupt Numbers Used") +InUse = uint32("in_use", "Blocks in Use") +InUse64 = uint64("in_use64", "Blocks in Use") +IOAddressesUsed = bytes("io_addresses_used", "IO Addresses Used", 8) +IOErrorCount = uint16("io_error_count", "IO Error Count") +IOEngineFlag = boolean8("io_engine_flag", "IO Engine Flag") +IPXNotMyNetwork = uint16("ipx_not_my_network", "IPX Not My Network") +ItemsChanged = uint32("items_changed", "Items Changed") +ItemsChecked = uint32("items_checked", "Items Checked") +ItemsCount = uint32("items_count", "Items Count") +itemsInList = uint32("items_in_list", "Items in List") +ItemsInPacket = uint32("items_in_packet", "Items in Packet") + +JobControlFlags = bitfield8("job_control_flags", "Job Control Flags", [ + bf_boolean8(0x08, "job_control_job_recovery", "Job Recovery"), + bf_boolean8(0x10, "job_control_reservice", "ReService Job"), + bf_boolean8(0x20, "job_control_file_open", "File Open"), + bf_boolean8(0x40, "job_control_user_hold", "User Hold"), + bf_boolean8(0x80, "job_control_operator_hold", "Operator Hold"), + +]) +JobControlFlagsWord = bitfield16("job_control_flags_word", "Job Control Flags", [ + bf_boolean16(0x0008, "job_control1_job_recovery", "Job Recovery"), + bf_boolean16(0x0010, "job_control1_reservice", "ReService Job"), + bf_boolean16(0x0020, "job_control1_file_open", "File Open"), + bf_boolean16(0x0040, "job_control1_user_hold", "User Hold"), + bf_boolean16(0x0080, "job_control1_operator_hold", "Operator Hold"), + +]) +JobCount = uint32("job_count", "Job Count") +JobFileHandle = bytes("job_file_handle", "Job File Handle", 6) +JobFileHandleLong = uint32("job_file_handle_long", "Job File Handle", ENC_BIG_ENDIAN) +JobFileHandleLong.Display("BASE_HEX") +JobFileName = fw_string("job_file_name", "Job File Name", 14) +JobPosition = uint8("job_position", "Job Position") +JobPositionWord = uint16("job_position_word", "Job Position") +JobNumber = uint16("job_number", "Job Number", ENC_BIG_ENDIAN ) +JobNumberLong = uint32("job_number_long", "Job Number", ENC_BIG_ENDIAN ) +JobNumberLong.Display("BASE_HEX") +JobType = uint16("job_type", "Job Type", ENC_BIG_ENDIAN ) + +LANCustomVariablesCount = uint32("lan_cust_var_count", "LAN Custom Variables Count") +LANdriverBoardInstance = uint16("lan_drv_bd_inst", "LAN Driver Board Instance") +LANdriverBoardNumber = uint16("lan_drv_bd_num", "LAN Driver Board Number") +LANdriverCardID = uint16("lan_drv_card_id", "LAN Driver Card ID") +LANdriverCardName = fw_string("lan_drv_card_name", "LAN Driver Card Name", 28) +LANdriverCFG_MajorVersion = uint8("lan_dvr_cfg_major_vrs", "LAN Driver Config - Major Version") +LANdriverCFG_MinorVersion = uint8("lan_dvr_cfg_minor_vrs", "LAN Driver Config - Minor Version") +LANdriverDMAUsage1 = uint8("lan_drv_dma_usage1", "Primary DMA Channel") +LANdriverDMAUsage2 = uint8("lan_drv_dma_usage2", "Secondary DMA Channel") +LANdriverFlags = uint16("lan_drv_flags", "LAN Driver Flags") +LANdriverFlags.Display("BASE_HEX") +LANdriverInterrupt1 = uint8("lan_drv_interrupt1", "Primary Interrupt Vector") +LANdriverInterrupt2 = uint8("lan_drv_interrupt2", "Secondary Interrupt Vector") +LANdriverIOPortsAndRanges1 = uint16("lan_drv_io_ports_and_ranges_1", "Primary Base I/O Port") +LANdriverIOPortsAndRanges2 = uint16("lan_drv_io_ports_and_ranges_2", "Number of I/O Ports") +LANdriverIOPortsAndRanges3 = uint16("lan_drv_io_ports_and_ranges_3", "Secondary Base I/O Port") +LANdriverIOPortsAndRanges4 = uint16("lan_drv_io_ports_and_ranges_4", "Number of I/O Ports") +LANdriverIOReserved = bytes("lan_drv_io_reserved", "LAN Driver IO Reserved", 14) +LANdriverLineSpeed = uint16("lan_drv_line_speed", "LAN Driver Line Speed") +LANdriverLink = uint32("lan_drv_link", "LAN Driver Link") +LANdriverLogicalName = bytes("lan_drv_log_name", "LAN Driver Logical Name", 18) +LANdriverMajorVersion = uint8("lan_drv_major_ver", "LAN Driver Major Version") +LANdriverMaximumSize = uint32("lan_drv_max_size", "LAN Driver Maximum Size") +LANdriverMaxRecvSize = uint32("lan_drv_max_rcv_size", "LAN Driver Maximum Receive Size") +LANdriverMediaID = uint16("lan_drv_media_id", "LAN Driver Media ID") +LANdriverMediaType = fw_string("lan_drv_media_type", "LAN Driver Media Type", 40) +LANdriverMemoryDecode0 = uint32("lan_drv_mem_decode_0", "LAN Driver Memory Decode 0") +LANdriverMemoryDecode1 = uint32("lan_drv_mem_decode_1", "LAN Driver Memory Decode 1") +LANdriverMemoryLength0 = uint16("lan_drv_mem_length_0", "LAN Driver Memory Length 0") +LANdriverMemoryLength1 = uint16("lan_drv_mem_length_1", "LAN Driver Memory Length 1") +LANdriverMinorVersion = uint8("lan_drv_minor_ver", "LAN Driver Minor Version") +LANdriverModeFlags = val_string8("lan_dvr_mode_flags", "LAN Driver Mode Flags", [ + [0x80, "Canonical Address" ], + [0x81, "Canonical Address" ], + [0x82, "Canonical Address" ], + [0x83, "Canonical Address" ], + [0x84, "Canonical Address" ], + [0x85, "Canonical Address" ], + [0x86, "Canonical Address" ], + [0x87, "Canonical Address" ], + [0x88, "Canonical Address" ], + [0x89, "Canonical Address" ], + [0x8a, "Canonical Address" ], + [0x8b, "Canonical Address" ], + [0x8c, "Canonical Address" ], + [0x8d, "Canonical Address" ], + [0x8e, "Canonical Address" ], + [0x8f, "Canonical Address" ], + [0x90, "Canonical Address" ], + [0x91, "Canonical Address" ], + [0x92, "Canonical Address" ], + [0x93, "Canonical Address" ], + [0x94, "Canonical Address" ], + [0x95, "Canonical Address" ], + [0x96, "Canonical Address" ], + [0x97, "Canonical Address" ], + [0x98, "Canonical Address" ], + [0x99, "Canonical Address" ], + [0x9a, "Canonical Address" ], + [0x9b, "Canonical Address" ], + [0x9c, "Canonical Address" ], + [0x9d, "Canonical Address" ], + [0x9e, "Canonical Address" ], + [0x9f, "Canonical Address" ], + [0xa0, "Canonical Address" ], + [0xa1, "Canonical Address" ], + [0xa2, "Canonical Address" ], + [0xa3, "Canonical Address" ], + [0xa4, "Canonical Address" ], + [0xa5, "Canonical Address" ], + [0xa6, "Canonical Address" ], + [0xa7, "Canonical Address" ], + [0xa8, "Canonical Address" ], + [0xa9, "Canonical Address" ], + [0xaa, "Canonical Address" ], + [0xab, "Canonical Address" ], + [0xac, "Canonical Address" ], + [0xad, "Canonical Address" ], + [0xae, "Canonical Address" ], + [0xaf, "Canonical Address" ], + [0xb0, "Canonical Address" ], + [0xb1, "Canonical Address" ], + [0xb2, "Canonical Address" ], + [0xb3, "Canonical Address" ], + [0xb4, "Canonical Address" ], + [0xb5, "Canonical Address" ], + [0xb6, "Canonical Address" ], + [0xb7, "Canonical Address" ], + [0xb8, "Canonical Address" ], + [0xb9, "Canonical Address" ], + [0xba, "Canonical Address" ], + [0xbb, "Canonical Address" ], + [0xbc, "Canonical Address" ], + [0xbd, "Canonical Address" ], + [0xbe, "Canonical Address" ], + [0xbf, "Canonical Address" ], + [0xc0, "Non-Canonical Address" ], + [0xc1, "Non-Canonical Address" ], + [0xc2, "Non-Canonical Address" ], + [0xc3, "Non-Canonical Address" ], + [0xc4, "Non-Canonical Address" ], + [0xc5, "Non-Canonical Address" ], + [0xc6, "Non-Canonical Address" ], + [0xc7, "Non-Canonical Address" ], + [0xc8, "Non-Canonical Address" ], + [0xc9, "Non-Canonical Address" ], + [0xca, "Non-Canonical Address" ], + [0xcb, "Non-Canonical Address" ], + [0xcc, "Non-Canonical Address" ], + [0xcd, "Non-Canonical Address" ], + [0xce, "Non-Canonical Address" ], + [0xcf, "Non-Canonical Address" ], + [0xd0, "Non-Canonical Address" ], + [0xd1, "Non-Canonical Address" ], + [0xd2, "Non-Canonical Address" ], + [0xd3, "Non-Canonical Address" ], + [0xd4, "Non-Canonical Address" ], + [0xd5, "Non-Canonical Address" ], + [0xd6, "Non-Canonical Address" ], + [0xd7, "Non-Canonical Address" ], + [0xd8, "Non-Canonical Address" ], + [0xd9, "Non-Canonical Address" ], + [0xda, "Non-Canonical Address" ], + [0xdb, "Non-Canonical Address" ], + [0xdc, "Non-Canonical Address" ], + [0xdd, "Non-Canonical Address" ], + [0xde, "Non-Canonical Address" ], + [0xdf, "Non-Canonical Address" ], + [0xe0, "Non-Canonical Address" ], + [0xe1, "Non-Canonical Address" ], + [0xe2, "Non-Canonical Address" ], + [0xe3, "Non-Canonical Address" ], + [0xe4, "Non-Canonical Address" ], + [0xe5, "Non-Canonical Address" ], + [0xe6, "Non-Canonical Address" ], + [0xe7, "Non-Canonical Address" ], + [0xe8, "Non-Canonical Address" ], + [0xe9, "Non-Canonical Address" ], + [0xea, "Non-Canonical Address" ], + [0xeb, "Non-Canonical Address" ], + [0xec, "Non-Canonical Address" ], + [0xed, "Non-Canonical Address" ], + [0xee, "Non-Canonical Address" ], + [0xef, "Non-Canonical Address" ], + [0xf0, "Non-Canonical Address" ], + [0xf1, "Non-Canonical Address" ], + [0xf2, "Non-Canonical Address" ], + [0xf3, "Non-Canonical Address" ], + [0xf4, "Non-Canonical Address" ], + [0xf5, "Non-Canonical Address" ], + [0xf6, "Non-Canonical Address" ], + [0xf7, "Non-Canonical Address" ], + [0xf8, "Non-Canonical Address" ], + [0xf9, "Non-Canonical Address" ], + [0xfa, "Non-Canonical Address" ], + [0xfb, "Non-Canonical Address" ], + [0xfc, "Non-Canonical Address" ], + [0xfd, "Non-Canonical Address" ], + [0xfe, "Non-Canonical Address" ], + [0xff, "Non-Canonical Address" ], +]) +LANDriverNumber = uint8("lan_driver_number", "LAN Driver Number") +LANdriverNodeAddress = bytes("lan_dvr_node_addr", "LAN Driver Node Address", 6) +LANdriverRecvSize = uint32("lan_drv_rcv_size", "LAN Driver Receive Size") +LANdriverReserved = uint16("lan_drv_reserved", "LAN Driver Reserved") +LANdriverSendRetries = uint16("lan_drv_snd_retries", "LAN Driver Send Retries") +LANdriverSharingFlags = uint16("lan_drv_share", "LAN Driver Sharing Flags") +LANdriverShortName = fw_string("lan_drv_short_name", "LAN Driver Short Name", 40) +LANdriverSlot = uint16("lan_drv_slot", "LAN Driver Slot") +LANdriverSrcRouting = uint32("lan_drv_src_route", "LAN Driver Source Routing") +LANdriverTransportTime = uint16("lan_drv_trans_time", "LAN Driver Transport Time") +LastAccessedDate = uint16("last_access_date", "Last Accessed Date") +LastAccessedDate.NWDate() +LastAccessedTime = uint16("last_access_time", "Last Accessed Time") +LastAccessedTime.NWTime() +LastGarbCollect = uint32("last_garbage_collect", "Last Garbage Collection") +LastInstance = uint32("last_instance", "Last Instance") +LastRecordSeen = uint16("last_record_seen", "Last Record Seen") +LastSearchIndex = uint16("last_search_index", "Search Index") +LastSeen = uint32("last_seen", "Last Seen") +LastSequenceNumber = uint16("last_sequence_number", "Sequence Number") +Length64bit = bytes("length_64bit", "64bit Length", 64) +Level = uint8("level", "Level") +LFSCounters = uint32("lfs_counters", "LFS Counters") +LimboDataStreamsCount = uint32("limbo_data_streams_count", "Limbo Data Streams Count") +limbCount = uint32("limb_count", "Limb Count") +limbFlags = bitfield32("limb_flags", "Limb Flags", [ + bf_boolean32(0x00000002, "scan_entire_folder", "Wild Search"), + bf_boolean32(0x00000004, "scan_files_only", "Scan Files Only"), + bf_boolean32(0x00000008, "scan_folders_only", "Scan Folders Only"), + bf_boolean32(0x00000010, "allow_system", "Allow System Files and Folders"), + bf_boolean32(0x00000020, "allow_hidden", "Allow Hidden Files and Folders"), +]) + +limbScanNum = uint32("limb_scan_num", "Limb Scan Number") +LimboUsed = uint32("limbo_used", "Limbo Used") +LoadedNameSpaces = uint8("loaded_name_spaces", "Loaded Name Spaces") +LocalConnectionID = uint32("local_connection_id", "Local Connection ID") +LocalConnectionID.Display("BASE_HEX") +LocalMaxPacketSize = uint32("local_max_packet_size", "Local Max Packet Size") +LocalMaxSendSize = uint32("local_max_send_size", "Local Max Send Size") +LocalMaxRecvSize = uint32("local_max_recv_size", "Local Max Recv Size") +LocalLoginInfoCcode = uint8("local_login_info_ccode", "Local Login Info C Code") +LocalTargetSocket = uint32("local_target_socket", "Local Target Socket") +LocalTargetSocket.Display("BASE_HEX") +LockAreaLen = uint32("lock_area_len", "Lock Area Length") +LockAreasStartOffset = uint32("lock_areas_start_offset", "Lock Areas Start Offset") +LockTimeout = uint16("lock_timeout", "Lock Timeout") +Locked = val_string8("locked", "Locked Flag", [ + [ 0x00, "Not Locked Exclusively" ], + [ 0x01, "Locked Exclusively" ], +]) +LockFlag = val_string8("lock_flag", "Lock Flag", [ + [ 0x00, "Not Locked, Log for Future Exclusive Lock" ], + [ 0x01, "Exclusive Lock (Read/Write)" ], + [ 0x02, "Log for Future Shared Lock"], + [ 0x03, "Shareable Lock (Read-Only)" ], + [ 0xfe, "Locked by a File Lock" ], + [ 0xff, "Locked by Begin Share File Set" ], +]) +LockName = nstring8("lock_name", "Lock Name") +LockStatus = val_string8("lock_status", "Lock Status", [ + [ 0x00, "Locked Exclusive" ], + [ 0x01, "Locked Shareable" ], + [ 0x02, "Logged" ], + [ 0x06, "Lock is Held by TTS"], +]) +ConnLockStatus = val_string8("conn_lock_status", "Lock Status", [ + [ 0x00, "Normal (connection free to run)" ], + [ 0x01, "Waiting on physical record lock" ], + [ 0x02, "Waiting on a file lock" ], + [ 0x03, "Waiting on a logical record lock"], + [ 0x04, "Waiting on a semaphore"], +]) +LockType = val_string8("lock_type", "Lock Type", [ + [ 0x00, "Locked" ], + [ 0x01, "Open Shareable" ], + [ 0x02, "Logged" ], + [ 0x03, "Open Normal" ], + [ 0x06, "TTS Holding Lock" ], + [ 0x07, "Transaction Flag Set on This File" ], +]) +LogFileFlagHigh = bitfield8("log_file_flag_high", "Log File Flag (byte 2)", [ + bf_boolean8(0x80, "log_flag_call_back", "Call Back Requested" ), +]) +LogFileFlagLow = bitfield8("log_file_flag_low", "Log File Flag", [ + bf_boolean8(0x01, "log_flag_lock_file", "Lock File Immediately" ), +]) +LoggedObjectID = uint32("logged_object_id", "Logged in Object ID") +LoggedObjectID.Display("BASE_HEX") +LoggedCount = uint16("logged_count", "Logged Count") +LogicalConnectionNumber = uint16("logical_connection_number", "Logical Connection Number", ENC_BIG_ENDIAN) +LogicalDriveCount = uint8("logical_drive_count", "Logical Drive Count") +LogicalDriveNumber = uint8("logical_drive_number", "Logical Drive Number") +LogicalLockThreshold = uint8("logical_lock_threshold", "LogicalLockThreshold") +LogicalRecordName = nstring8("logical_record_name", "Logical Record Name") +LoginKey = bytes("login_key", "Login Key", 8) +LogLockType = uint8("log_lock_type", "Log Lock Type") +LogTtlRxPkts = uint32("log_ttl_rx_pkts", "Total Received Packets") +LogTtlTxPkts = uint32("log_ttl_tx_pkts", "Total Transmitted Packets") +LongName = fw_string("long_name", "Long Name", 32) +LRUBlockWasDirty = uint16("lru_block_was_dirty", "LRU Block Was Dirty") + +MacAttr = bitfield16("mac_attr", "Attributes", [ + bf_boolean16(0x0001, "mac_attr_smode1", "Search Mode"), + bf_boolean16(0x0002, "mac_attr_smode2", "Search Mode"), + bf_boolean16(0x0004, "mac_attr_smode3", "Search Mode"), + bf_boolean16(0x0010, "mac_attr_transaction", "Transaction"), + bf_boolean16(0x0020, "mac_attr_index", "Index"), + bf_boolean16(0x0040, "mac_attr_r_audit", "Read Audit"), + bf_boolean16(0x0080, "mac_attr_w_audit", "Write Audit"), + bf_boolean16(0x0100, "mac_attr_r_only", "Read Only"), + bf_boolean16(0x0200, "mac_attr_hidden", "Hidden"), + bf_boolean16(0x0400, "mac_attr_system", "System"), + bf_boolean16(0x0800, "mac_attr_execute_only", "Execute Only"), + bf_boolean16(0x1000, "mac_attr_subdirectory", "Subdirectory"), + bf_boolean16(0x2000, "mac_attr_archive", "Archive"), + bf_boolean16(0x8000, "mac_attr_share", "Shareable File"), +]) +MACBackupDate = uint16("mac_backup_date", "Mac Backup Date") +MACBackupDate.NWDate() +MACBackupTime = uint16("mac_backup_time", "Mac Backup Time") +MACBackupTime.NWTime() +MacBaseDirectoryID = uint32("mac_base_directory_id", "Mac Base Directory ID", ENC_BIG_ENDIAN) +MacBaseDirectoryID.Display("BASE_HEX") +MACCreateDate = uint16("mac_create_date", "Mac Create Date") +MACCreateDate.NWDate() +MACCreateTime = uint16("mac_create_time", "Mac Create Time") +MACCreateTime.NWTime() +MacDestinationBaseID = uint32("mac_destination_base_id", "Mac Destination Base ID") +MacDestinationBaseID.Display("BASE_HEX") +MacFinderInfo = bytes("mac_finder_info", "Mac Finder Information", 32) +MacLastSeenID = uint32("mac_last_seen_id", "Mac Last Seen ID") +MacLastSeenID.Display("BASE_HEX") +MacSourceBaseID = uint32("mac_source_base_id", "Mac Source Base ID") +MacSourceBaseID.Display("BASE_HEX") +MajorVersion = uint32("major_version", "Major Version") +MaxBytes = uint16("max_bytes", "Maximum Number of Bytes") +MaxDataStreams = uint32("max_data_streams", "Maximum Data Streams") +MaxDirDepth = uint32("max_dir_depth", "Maximum Directory Depth") +MaximumSpace = uint16("max_space", "Maximum Space") +MaxNumOfConn = uint32("max_num_of_conn", "Maximum Number of Connections") +MaxNumOfLANS = uint32("max_num_of_lans", "Maximum Number Of LAN's") +MaxNumOfMedias = uint32("max_num_of_medias", "Maximum Number Of Media's") +MaxNumOfNmeSps = uint32("max_num_of_nme_sps", "Maximum Number Of Name Spaces") +MaxNumOfSpoolPr = uint32("max_num_of_spool_pr", "Maximum Number Of Spool Printers") +MaxNumOfStacks = uint32("max_num_of_stacks", "Maximum Number Of Stacks") +MaxNumOfUsers = uint32("max_num_of_users", "Maximum Number Of Users") +MaxNumOfVol = uint32("max_num_of_vol", "Maximum Number of Volumes") +MaxReadDataReplySize = uint16("max_read_data_reply_size", "Max Read Data Reply Size") +MaxSpace = uint32("maxspace", "Maximum Space") +MaxSpace64 = uint64("maxspace64", "Maximum Space") +MaxUsedDynamicSpace = uint32("max_used_dynamic_space", "Max Used Dynamic Space") +MediaList = uint32("media_list", "Media List") +MediaListCount = uint32("media_list_count", "Media List Count") +MediaName = nstring8("media_name", "Media Name") +MediaNumber = uint32("media_number", "Media Number") +MaxReplyObjectIDCount = uint8("max_reply_obj_id_count", "Max Reply Object ID Count") +MediaObjectType = val_string8("media_object_type", "Object Type", [ + [ 0x00, "Adapter" ], + [ 0x01, "Changer" ], + [ 0x02, "Removable Device" ], + [ 0x03, "Device" ], + [ 0x04, "Removable Media" ], + [ 0x05, "Partition" ], + [ 0x06, "Slot" ], + [ 0x07, "Hotfix" ], + [ 0x08, "Mirror" ], + [ 0x09, "Parity" ], + [ 0x0a, "Volume Segment" ], + [ 0x0b, "Volume" ], + [ 0x0c, "Clone" ], + [ 0x0d, "Fixed Media" ], + [ 0x0e, "Unknown" ], +]) +MemberName = nstring8("member_name", "Member Name") +MemberType = val_string16("member_type", "Member Type", [ + [ 0x0000, "Unknown" ], + [ 0x0001, "User" ], + [ 0x0002, "User group" ], + [ 0x0003, "Print queue" ], + [ 0x0004, "NetWare file server" ], + [ 0x0005, "Job server" ], + [ 0x0006, "Gateway" ], + [ 0x0007, "Print server" ], + [ 0x0008, "Archive queue" ], + [ 0x0009, "Archive server" ], + [ 0x000a, "Job queue" ], + [ 0x000b, "Administration" ], + [ 0x0021, "NAS SNA gateway" ], + [ 0x0026, "Remote bridge server" ], + [ 0x0027, "TCP/IP gateway" ], +]) +MessageLanguage = uint32("message_language", "NLM Language") +MigratedFiles = uint32("migrated_files", "Migrated Files") +MigratedSectors = uint32("migrated_sectors", "Migrated Sectors") +MinorVersion = uint32("minor_version", "Minor Version") +MinSpaceLeft64 = uint64("min_space_left64", "Minimum Space Left") +Minute = uint8("s_minute", "Minutes") +MixedModePathFlag = val_string8("mixed_mode_path_flag", "Mixed Mode Path Flag", [ + [ 0x00, "Mixed mode path handling is not available"], + [ 0x01, "Mixed mode path handling is available"], +]) +ModifiedDate = uint16("modified_date", "Modified Date") +ModifiedDate.NWDate() +ModifiedTime = uint16("modified_time", "Modified Time") +ModifiedTime.NWTime() +ModifierID = uint32("modifier_id", "Modifier ID", ENC_BIG_ENDIAN) +ModifierID.Display("BASE_HEX") +ModifyDOSInfoMask = bitfield16("modify_dos_info_mask", "Modify DOS Info Mask", [ + bf_boolean16(0x0002, "modify_dos_read", "Attributes"), + bf_boolean16(0x0004, "modify_dos_write", "Creation Date"), + bf_boolean16(0x0008, "modify_dos_open", "Creation Time"), + bf_boolean16(0x0010, "modify_dos_create", "Creator ID"), + bf_boolean16(0x0020, "modify_dos_delete", "Archive Date"), + bf_boolean16(0x0040, "modify_dos_parent", "Archive Time"), + bf_boolean16(0x0080, "modify_dos_search", "Archiver ID"), + bf_boolean16(0x0100, "modify_dos_mdate", "Modify Date"), + bf_boolean16(0x0200, "modify_dos_mtime", "Modify Time"), + bf_boolean16(0x0400, "modify_dos_mid", "Modifier ID"), + bf_boolean16(0x0800, "modify_dos_laccess", "Last Access"), + bf_boolean16(0x1000, "modify_dos_inheritance", "Inheritance"), + bf_boolean16(0x2000, "modify_dos_max_space", "Maximum Space"), +]) +Month = val_string8("s_month", "Month", [ + [ 0x01, "January"], + [ 0x02, "February"], + [ 0x03, "March"], + [ 0x04, "April"], + [ 0x05, "May"], + [ 0x06, "June"], + [ 0x07, "July"], + [ 0x08, "August"], + [ 0x09, "September"], + [ 0x0a, "October"], + [ 0x0b, "November"], + [ 0x0c, "December"], +]) + +MoreFlag = val_string8("more_flag", "More Flag", [ + [ 0x00, "No More Segments/Entries Available" ], + [ 0x01, "More Segments/Entries Available" ], + [ 0xff, "More Segments/Entries Available" ], +]) +MoreProperties = val_string8("more_properties", "More Properties", [ + [ 0x00, "No More Properties Available" ], + [ 0x01, "No More Properties Available" ], + [ 0xff, "More Properties Available" ], +]) + +Name = nstring8("name", "Name") +Name12 = fw_string("name12", "Name", 12) +NameLen = uint8("name_len", "Name Space Length") +NameLength = uint8("name_length", "Name Length") +NameList = uint32("name_list", "Name List") +# +# XXX - should this value be used to interpret the characters in names, +# search patterns, and the like? +# +# We need to handle character sets better, e.g. translating strings +# from whatever character set they are in the packet (DOS/Windows code +# pages, ISO character sets, UNIX EUC character sets, UTF-8, UCS-2/Unicode, +# Mac character sets, etc.) into UCS-4 or UTF-8 and storing them as such +# in the protocol tree, and displaying them as best we can. +# +NameSpace = val_string8("name_space", "Name Space", [ + [ 0x00, "DOS" ], + [ 0x01, "MAC" ], + [ 0x02, "NFS" ], + [ 0x03, "FTAM" ], + [ 0x04, "OS/2, Long" ], +]) +NamesSpaceInfoMask = bitfield16("ns_info_mask", "Names Space Info Mask", [ + bf_boolean16(0x0001, "ns_info_mask_modify", "Modify Name"), + bf_boolean16(0x0002, "ns_info_mask_fatt", "File Attributes"), + bf_boolean16(0x0004, "ns_info_mask_cdate", "Creation Date"), + bf_boolean16(0x0008, "ns_info_mask_ctime", "Creation Time"), + bf_boolean16(0x0010, "ns_info_mask_owner", "Owner ID"), + bf_boolean16(0x0020, "ns_info_mask_adate", "Archive Date"), + bf_boolean16(0x0040, "ns_info_mask_atime", "Archive Time"), + bf_boolean16(0x0080, "ns_info_mask_aid", "Archiver ID"), + bf_boolean16(0x0100, "ns_info_mask_udate", "Update Date"), + bf_boolean16(0x0200, "ns_info_mask_utime", "Update Time"), + bf_boolean16(0x0400, "ns_info_mask_uid", "Update ID"), + bf_boolean16(0x0800, "ns_info_mask_acc_date", "Access Date"), + bf_boolean16(0x1000, "ns_info_mask_max_acc_mask", "Inheritance"), + bf_boolean16(0x2000, "ns_info_mask_max_space", "Maximum Space"), +]) +NameSpaceName = nstring8("name_space_name", "Name Space Name") +nameType = uint32("name_type", "nameType") +NCPdataSize = uint32("ncp_data_size", "NCP Data Size") +NCPEncodedStringsBits = uint32("ncp_encoded_strings_bits", "NCP Encoded Strings Bits") +NCPextensionMajorVersion = uint8("ncp_extension_major_version", "NCP Extension Major Version") +NCPextensionMinorVersion = uint8("ncp_extension_minor_version", "NCP Extension Minor Version") +NCPextensionName = nstring8("ncp_extension_name", "NCP Extension Name") +NCPextensionNumber = uint32("ncp_extension_number", "NCP Extension Number") +NCPextensionNumber.Display("BASE_HEX") +NCPExtensionNumbers = uint32("ncp_extension_numbers", "NCP Extension Numbers") +NCPextensionRevisionNumber = uint8("ncp_extension_revision_number", "NCP Extension Revision Number") +NCPPeakStaInUse = uint32("ncp_peak_sta_in_use", "Peak Number of Connections since Server was brought up") +NCPStaInUseCnt = uint32("ncp_sta_in_use", "Number of Workstations Connected to Server") +NDSRequestFlags = bitfield16("nds_request_flags", "NDS Request Flags", [ + bf_boolean16(0x0001, "nds_request_flags_output", "Output Fields"), + bf_boolean16(0x0002, "nds_request_flags_no_such_entry", "No Such Entry"), + bf_boolean16(0x0004, "nds_request_flags_local_entry", "Local Entry"), + bf_boolean16(0x0008, "nds_request_flags_type_ref", "Type Referral"), + bf_boolean16(0x0010, "nds_request_flags_alias_ref", "Alias Referral"), + bf_boolean16(0x0020, "nds_request_flags_req_cnt", "Request Count"), + bf_boolean16(0x0040, "nds_request_flags_req_data_size", "Request Data Size"), + bf_boolean16(0x0080, "nds_request_flags_reply_data_size", "Reply Data Size"), + bf_boolean16(0x0100, "nds_request_flags_trans_ref", "Transport Referral"), + bf_boolean16(0x0200, "nds_request_flags_trans_ref2", "Transport Referral"), + bf_boolean16(0x0400, "nds_request_flags_up_ref", "Up Referral"), + bf_boolean16(0x0800, "nds_request_flags_dn_ref", "Down Referral"), +]) +NDSStatus = uint32("nds_status", "NDS Status") +NetBIOSBroadcastWasPropagated = uint32("netbios_broadcast_was_propagated", "NetBIOS Broadcast Was Propagated") +NetIDNumber = uint32("net_id_number", "Net ID Number") +NetIDNumber.Display("BASE_HEX") +NetAddress = nbytes32("address", "Address") +NetStatus = uint16("net_status", "Network Status") +NetWareAccessHandle = bytes("netware_access_handle", "NetWare Access Handle", 6) +NetworkAddress = uint32("network_address", "Network Address") +NetworkAddress.Display("BASE_HEX") +NetworkNodeAddress = bytes("network_node_address", "Network Node Address", 6) +NetworkNumber = uint32("network_number", "Network Number") +NetworkNumber.Display("BASE_HEX") +# +# XXX - this should have the "ipx_socket_vals" value_string table +# from "packet-ipx.c". +# +NetworkSocket = uint16("network_socket", "Network Socket") +NetworkSocket.Display("BASE_HEX") +NewAccessRights = bitfield16("new_access_rights_mask", "New Access Rights", [ + bf_boolean16(0x0001, "new_access_rights_read", "Read"), + bf_boolean16(0x0002, "new_access_rights_write", "Write"), + bf_boolean16(0x0004, "new_access_rights_open", "Open"), + bf_boolean16(0x0008, "new_access_rights_create", "Create"), + bf_boolean16(0x0010, "new_access_rights_delete", "Delete"), + bf_boolean16(0x0020, "new_access_rights_parental", "Parental"), + bf_boolean16(0x0040, "new_access_rights_search", "Search"), + bf_boolean16(0x0080, "new_access_rights_modify", "Modify"), + bf_boolean16(0x0100, "new_access_rights_supervisor", "Supervisor"), +]) +NewDirectoryID = uint32("new_directory_id", "New Directory ID", ENC_BIG_ENDIAN) +NewDirectoryID.Display("BASE_HEX") +NewEAHandle = uint32("new_ea_handle", "New EA Handle") +NewEAHandle.Display("BASE_HEX") +NewFileName = fw_string("new_file_name", "New File Name", 14) +NewFileNameLen = nstring8("new_file_name_len", "New File Name") +NewFileSize = uint32("new_file_size", "New File Size") +NewPassword = nstring8("new_password", "New Password") +NewPath = nstring8("new_path", "New Path") +NewPosition = uint8("new_position", "New Position") +NewObjectName = nstring8("new_object_name", "New Object Name") +NextCntBlock = uint32("next_cnt_block", "Next Count Block") +NextHugeStateInfo = bytes("next_huge_state_info", "Next Huge State Info", 16) +nextLimbScanNum = uint32("next_limb_scan_num", "Next Limb Scan Number") +NextObjectID = uint32("next_object_id", "Next Object ID", ENC_BIG_ENDIAN) +NextObjectID.Display("BASE_HEX") +NextRecord = uint32("next_record", "Next Record") +NextRequestRecord = uint16("next_request_record", "Next Request Record") +NextSearchIndex = uint16("next_search_index", "Next Search Index") +NextSearchNumber = uint16("next_search_number", "Next Search Number") +NextSearchNum = uint32("nxt_search_num", "Next Search Number") +nextStartingNumber = uint32("next_starting_number", "Next Starting Number") +NextTrusteeEntry = uint32("next_trustee_entry", "Next Trustee Entry") +NextVolumeNumber = uint32("next_volume_number", "Next Volume Number") +NLMBuffer = nstring8("nlm_buffer", "Buffer") +NLMcount = uint32("nlm_count", "NLM Count") +NLMFlags = bitfield8("nlm_flags", "Flags", [ + bf_boolean8(0x01, "nlm_flags_reentrant", "ReEntrant"), + bf_boolean8(0x02, "nlm_flags_multiple", "Can Load Multiple Times"), + bf_boolean8(0x04, "nlm_flags_synchronize", "Synchronize Start"), + bf_boolean8(0x08, "nlm_flags_pseudo", "PseudoPreemption"), +]) +NLMLoadOptions = uint32("nlm_load_options", "NLM Load Options") +NLMName = stringz("nlm_name_stringz", "NLM Name") +NLMNumber = uint32("nlm_number", "NLM Number") +NLMNumbers = uint32("nlm_numbers", "NLM Numbers") +NLMsInList = uint32("nlms_in_list", "NLM's in List") +NLMStartNumber = uint32("nlm_start_num", "NLM Start Number") +NLMType = val_string8("nlm_type", "NLM Type", [ + [ 0x00, "Generic NLM (.NLM)" ], + [ 0x01, "LAN Driver (.LAN)" ], + [ 0x02, "Disk Driver (.DSK)" ], + [ 0x03, "Name Space Support Module (.NAM)" ], + [ 0x04, "Utility or Support Program (.NLM)" ], + [ 0x05, "Mirrored Server Link (.MSL)" ], + [ 0x06, "OS NLM (.NLM)" ], + [ 0x07, "Paged High OS NLM (.NLM)" ], + [ 0x08, "Host Adapter Module (.HAM)" ], + [ 0x09, "Custom Device Module (.CDM)" ], + [ 0x0a, "File System Engine (.NLM)" ], + [ 0x0b, "Real Mode NLM (.NLM)" ], + [ 0x0c, "Hidden NLM (.NLM)" ], + [ 0x15, "NICI Support (.NLM)" ], + [ 0x16, "NICI Support (.NLM)" ], + [ 0x17, "Cryptography (.NLM)" ], + [ 0x18, "Encryption (.NLM)" ], + [ 0x19, "NICI Support (.NLM)" ], + [ 0x1c, "NICI Support (.NLM)" ], +]) +nodeFlags = uint32("node_flags", "Node Flags") +nodeFlags.Display("BASE_HEX") +NoMoreMemAvlCnt = uint32("no_more_mem_avail", "No More Memory Available Count") +NonDedFlag = boolean8("non_ded_flag", "Non Dedicated Flag") +NonFreeableAvailableSubAllocSectors = uint32("non_freeable_avail_sub_alloc_sectors", "Non Freeable Available Sub Alloc Sectors") +NonFreeableLimboSectors = uint32("non_freeable_limbo_sectors", "Non Freeable Limbo Sectors") +NotUsableSubAllocSectors = uint32("not_usable_sub_alloc_sectors", "Not Usable Sub Alloc Sectors") +NotYetPurgeableBlocks = uint32("not_yet_purgeable_blocks", "Not Yet Purgeable Blocks") +NSInfoBitMask = uint32("ns_info_bit_mask", "Name Space Info Bit Mask") +NSSOAllInFlags = bitfield32("nsso_all_in_flags", "SecretStore All Input Flags",[ + bf_boolean32(0x00000010, "nsso_all_unicode", "Unicode Data"), + bf_boolean32(0x00000080, "nsso_set_tree", "Set Tree"), + bf_boolean32(0x00000200, "nsso_destroy_ctx", "Destroy Context"), +]) +NSSOGetServiceInFlags = bitfield32("nsso_get_svc_in_flags", "SecretStore Get Service Flags",[ + bf_boolean32(0x00000100, "nsso_get_ctx", "Get Context"), +]) +NSSOReadInFlags = bitfield32("nsso_read_in_flags", "SecretStore Read Flags",[ + bf_boolean32(0x00000001, "nsso_rw_enh_prot", "Read/Write Enhanced Protection"), + bf_boolean32(0x00000008, "nsso_repair", "Repair SecretStore"), +]) +NSSOReadOrUnlockInFlags = bitfield32("nsso_read_or_unlock_in_flags", "SecretStore Read or Unlock Flags",[ + bf_boolean32(0x00000004, "nsso_ep_master_pwd", "Master Password used instead of ENH Password"), +]) +NSSOUnlockInFlags = bitfield32("nsso_unlock_in_flags", "SecretStore Unlock Flags",[ + bf_boolean32(0x00000004, "nsso_rmv_lock", "Remove Lock from Store"), +]) +NSSOWriteInFlags = bitfield32("nsso_write_in_flags", "SecretStore Write Flags",[ + bf_boolean32(0x00000001, "nsso_enh_prot", "Enhanced Protection"), + bf_boolean32(0x00000002, "nsso_create_id", "Create ID"), + bf_boolean32(0x00000040, "nsso_ep_pwd_used", "Enhanced Protection Password Used"), +]) +NSSOContextOutFlags = bitfield32("nsso_cts_out_flags", "Type of Context",[ + bf_boolean32(0x00000001, "nsso_ds_ctx", "DSAPI Context"), + bf_boolean32(0x00000080, "nsso_ldap_ctx", "LDAP Context"), + bf_boolean32(0x00000200, "nsso_dc_ctx", "Reserved"), +]) +NSSOGetServiceOutFlags = bitfield32("nsso_get_svc_out_flags", "SecretStore Status Flags",[ + bf_boolean32(0x00400000, "nsso_mstr_pwd", "Master Password Present"), +]) +NSSOGetServiceReadOutFlags = bitfield32("nsso_get_svc_read_out_flags", "SecretStore Status Flags",[ + bf_boolean32(0x00800000, "nsso_mp_disabled", "Master Password Disabled"), +]) +NSSOReadOutFlags = bitfield32("nsso_read_out_flags", "SecretStore Read Flags",[ + bf_boolean32(0x00010000, "nsso_secret_locked", "Enhanced Protection Lock on Secret"), + bf_boolean32(0x00020000, "nsso_secret_not_init", "Secret Not Yet Initialized"), + bf_boolean32(0x00040000, "nsso_secret_marked", "Secret Marked for Enhanced Protection"), + bf_boolean32(0x00080000, "nsso_secret_not_sync", "Secret Not Yet Synchronized in NDS"), + bf_boolean32(0x00200000, "nsso_secret_enh_pwd", "Enhanced Protection Password on Secret"), +]) +NSSOReadOutStatFlags = bitfield32("nsso_read_out_stat_flags", "SecretStore Read Status Flags",[ + bf_boolean32(0x00100000, "nsso_admin_mod", "Admin Modified Secret Last"), +]) +NSSOVerb = val_string8("nsso_verb", "SecretStore Verb", [ + [ 0x00, "Query Server" ], + [ 0x01, "Read App Secrets" ], + [ 0x02, "Write App Secrets" ], + [ 0x03, "Add Secret ID" ], + [ 0x04, "Remove Secret ID" ], + [ 0x05, "Remove SecretStore" ], + [ 0x06, "Enumerate SecretID's" ], + [ 0x07, "Unlock Store" ], + [ 0x08, "Set Master Password" ], + [ 0x09, "Get Service Information" ], +]) +NSSpecificInfo = fw_string("ns_specific_info", "Name Space Specific Info", 512) +NumberOfActiveTasks = uint8("num_of_active_tasks", "Number of Active Tasks") +NumberOfAllocs = uint32("num_of_allocs", "Number of Allocations") +NumberOfCPUs = uint32("number_of_cpus", "Number of CPU's") +NumberOfDataStreams = uint16("number_of_data_streams", "Number of Data Streams") +NumberOfDataStreamsLong = uint32("number_of_data_streams_long", "Number of Data Streams") +NumberOfDynamicMemoryAreas = uint16("number_of_dynamic_memory_areas", "Number Of Dynamic Memory Areas") +NumberOfEntries = uint8("number_of_entries", "Number of Entries") +NumberOfEntriesLong = uint32("number_of_entries_long", "Number of Entries") +NumberOfLocks = uint8("number_of_locks", "Number of Locks") +NumberOfMinutesToDelay = uint32("number_of_minutes_to_delay", "Number of Minutes to Delay") +NumberOfNCPExtensions = uint32("number_of_ncp_extensions", "Number Of NCP Extensions") +NumberOfNSLoaded = uint16("number_of_ns_loaded", "Number Of Name Spaces Loaded") +NumberOfProtocols = uint8("number_of_protocols", "Number of Protocols") +NumberOfRecords = uint16("number_of_records", "Number of Records") +NumberOfReferencedPublics = uint32("num_of_ref_publics", "Number of Referenced Public Symbols") +NumberOfSemaphores = uint16("number_of_semaphores", "Number Of Semaphores") +NumberOfServiceProcesses = uint8("number_of_service_processes", "Number Of Service Processes") +NumberOfSetCategories = uint32("number_of_set_categories", "Number Of Set Categories") +NumberOfSMs = uint32("number_of_sms", "Number Of Storage Medias") +NumberOfStations = uint8("number_of_stations", "Number of Stations") +NumBytes = uint16("num_bytes", "Number of Bytes") +NumBytesLong = uint32("num_bytes_long", "Number of Bytes") +NumOfCCinPkt = uint32("num_of_cc_in_pkt", "Number of Custom Counters in Packet") +NumOfChecks = uint32("num_of_checks", "Number of Checks") +NumOfEntries = uint32("num_of_entries", "Number of Entries") +NumOfFilesMigrated = uint32("num_of_files_migrated", "Number Of Files Migrated") +NumOfGarbageColl = uint32("num_of_garb_coll", "Number of Garbage Collections") +NumOfNCPReqs = uint32("num_of_ncp_reqs", "Number of NCP Requests since Server was brought up") +NumOfSegments = uint32("num_of_segments", "Number of Segments") + +ObjectCount = uint32("object_count", "Object Count") +ObjectFlags = val_string8("object_flags", "Object Flags", [ + [ 0x00, "Dynamic object" ], + [ 0x01, "Static object" ], +]) +ObjectHasProperties = val_string8("object_has_properites", "Object Has Properties", [ + [ 0x00, "No properties" ], + [ 0xff, "One or more properties" ], +]) +ObjectID = uint32("object_id", "Object ID", ENC_BIG_ENDIAN) +ObjectID.Display('BASE_HEX') +ObjectIDCount = uint16("object_id_count", "Object ID Count") +ObjectIDInfo = uint32("object_id_info", "Object Information") +ObjectInfoReturnCount = uint32("object_info_rtn_count", "Object Information Count") +ObjectName = nstring8("object_name", "Object Name") +ObjectNameLen = fw_string("object_name_len", "Object Name", 48) +ObjectNameStringz = stringz("object_name_stringz", "Object Name") +ObjectNumber = uint32("object_number", "Object Number") +ObjectSecurity = val_string8("object_security", "Object Security", [ + [ 0x00, "Object Read (Anyone) / Object Write (Anyone)" ], + [ 0x01, "Object Read (Logged in) / Object Write (Anyone)" ], + [ 0x02, "Object Read (Logged in as Object) / Object Write (Anyone)" ], + [ 0x03, "Object Read (Supervisor) / Object Write (Anyone)" ], + [ 0x04, "Object Read (Operating System Only) / Object Write (Anyone)" ], + [ 0x10, "Object Read (Anyone) / Object Write (Logged in)" ], + [ 0x11, "Object Read (Logged in) / Object Write (Logged in)" ], + [ 0x12, "Object Read (Logged in as Object) / Object Write (Logged in)" ], + [ 0x13, "Object Read (Supervisor) / Object Write (Logged in)" ], + [ 0x14, "Object Read (Operating System Only) / Object Write (Logged in)" ], + [ 0x20, "Object Read (Anyone) / Object Write (Logged in as Object)" ], + [ 0x21, "Object Read (Logged in) / Object Write (Logged in as Object)" ], + [ 0x22, "Object Read (Logged in as Object) / Object Write (Logged in as Object)" ], + [ 0x23, "Object Read (Supervisor) / Object Write (Logged in as Object)" ], + [ 0x24, "Object Read (Operating System Only) / Object Write (Logged in as Object)" ], + [ 0x30, "Object Read (Anyone) / Object Write (Supervisor)" ], + [ 0x31, "Object Read (Logged in) / Object Write (Supervisor)" ], + [ 0x32, "Object Read (Logged in as Object) / Object Write (Supervisor)" ], + [ 0x33, "Object Read (Supervisor) / Object Write (Supervisor)" ], + [ 0x34, "Object Read (Operating System Only) / Object Write (Supervisor)" ], + [ 0x40, "Object Read (Anyone) / Object Write (Operating System Only)" ], + [ 0x41, "Object Read (Logged in) / Object Write (Operating System Only)" ], + [ 0x42, "Object Read (Logged in as Object) / Object Write (Operating System Only)" ], + [ 0x43, "Object Read (Supervisor) / Object Write (Operating System Only)" ], + [ 0x44, "Object Read (Operating System Only) / Object Write (Operating System Only)" ], +]) +# +# XXX - should this use the "server_vals[]" value_string array from +# "packet-ipx.c"? +# +# XXX - should this list be merged with that list? There are some +# oddities, e.g. this list has 0x03f5 for "Microsoft SQL Server", but +# the list from "packet-ipx.c" has 0xf503 for that - is that just +# byte-order confusion? +# +ObjectType = val_string16("object_type", "Object Type", [ + [ 0x0000, "Unknown" ], + [ 0x0001, "User" ], + [ 0x0002, "User group" ], + [ 0x0003, "Print queue" ], + [ 0x0004, "NetWare file server" ], + [ 0x0005, "Job server" ], + [ 0x0006, "Gateway" ], + [ 0x0007, "Print server" ], + [ 0x0008, "Archive queue" ], + [ 0x0009, "Archive server" ], + [ 0x000a, "Job queue" ], + [ 0x000b, "Administration" ], + [ 0x0021, "NAS SNA gateway" ], + [ 0x0026, "Remote bridge server" ], + [ 0x0027, "TCP/IP gateway" ], + [ 0x0047, "Novell Print Server" ], + [ 0x004b, "Btrieve Server" ], + [ 0x004c, "NetWare SQL Server" ], + [ 0x0064, "ARCserve" ], + [ 0x0066, "ARCserve 3.0" ], + [ 0x0076, "NetWare SQL" ], + [ 0x00a0, "Gupta SQL Base Server" ], + [ 0x00a1, "Powerchute" ], + [ 0x0107, "NetWare Remote Console" ], + [ 0x01cb, "Shiva NetModem/E" ], + [ 0x01cc, "Shiva LanRover/E" ], + [ 0x01cd, "Shiva LanRover/T" ], + [ 0x01d8, "Castelle FAXPress Server" ], + [ 0x01da, "Castelle Print Server" ], + [ 0x01dc, "Castelle Fax Server" ], + [ 0x0200, "Novell SQL Server" ], + [ 0x023a, "NetWare Lanalyzer Agent" ], + [ 0x023c, "DOS Target Service Agent" ], + [ 0x023f, "NetWare Server Target Service Agent" ], + [ 0x024f, "Appletalk Remote Access Service" ], + [ 0x0263, "NetWare Management Agent" ], + [ 0x0264, "Global MHS" ], + [ 0x0265, "SNMP" ], + [ 0x026a, "NetWare Management/NMS Console" ], + [ 0x026b, "NetWare Time Synchronization" ], + [ 0x0273, "Nest Device" ], + [ 0x0274, "GroupWise Message Multiple Servers" ], + [ 0x0278, "NDS Replica Server" ], + [ 0x0282, "NDPS Service Registry Service" ], + [ 0x028a, "MPR/IPX Address Mapping Gateway" ], + [ 0x028b, "ManageWise" ], + [ 0x0293, "NetWare 6" ], + [ 0x030c, "HP JetDirect" ], + [ 0x0328, "Watcom SQL Server" ], + [ 0x0355, "Backup Exec" ], + [ 0x039b, "Lotus Notes" ], + [ 0x03e1, "Univel Server" ], + [ 0x03f5, "Microsoft SQL Server" ], + [ 0x055e, "Lexmark Print Server" ], + [ 0x0640, "Microsoft Gateway Services for NetWare" ], + [ 0x064e, "Microsoft Internet Information Server" ], + [ 0x077b, "Advantage Database Server" ], + [ 0x07a7, "Backup Exec Job Queue" ], + [ 0x07a8, "Backup Exec Job Manager" ], + [ 0x07a9, "Backup Exec Job Service" ], + [ 0x5555, "Site Lock" ], + [ 0x8202, "NDPS Broker" ], +]) +OCRetFlags = val_string8("o_c_ret_flags", "Open Create Return Flags", [ + [ 0x00, "No CallBack has been registered (No Op-Lock)" ], + [ 0x01, "Request has been registered for CallBack (Op-Lock)" ], +]) +OESServer = val_string8("oes_server", "Type of Novell Server", [ + [ 0x00, "NetWare" ], + [ 0x01, "OES" ], + [ 0x02, "OES 64bit" ], +]) + +OESLinuxOrNetWare = val_string8("oeslinux_or_netware", "Kernel Type", [ + [ 0x00, "NetWare" ], + [ 0x01, "Linux" ], +]) + +OldestDeletedFileAgeInTicks = uint32("oldest_deleted_file_age_in_ticks", "Oldest Deleted File Age in Ticks") +OldFileName = bytes("old_file_name", "Old File Name", 15) +OldFileSize = uint32("old_file_size", "Old File Size") +OpenCount = uint16("open_count", "Open Count") +OpenCreateAction = bitfield8("open_create_action", "Open Create Action", [ + bf_boolean8(0x01, "open_create_action_opened", "Opened"), + bf_boolean8(0x02, "open_create_action_created", "Created"), + bf_boolean8(0x04, "open_create_action_replaced", "Replaced"), + bf_boolean8(0x08, "open_create_action_compressed", "Compressed"), + bf_boolean8(0x80, "open_create_action_read_only", "Read Only"), +]) +OpenCreateMode = bitfield8("open_create_mode", "Open Create Mode", [ + bf_boolean8(0x01, "open_create_mode_open", "Open existing file (file must exist)"), + bf_boolean8(0x02, "open_create_mode_replace", "Replace existing file"), + bf_boolean8(0x08, "open_create_mode_create", "Create new file or subdirectory (file or subdirectory cannot exist)"), + bf_boolean8(0x20, "open_create_mode_64bit", "Open 64-bit Access"), + bf_boolean8(0x40, "open_create_mode_ro", "Open with Read Only Access"), + bf_boolean8(0x80, "open_create_mode_oplock", "Open Callback (Op-Lock)"), +]) +OpenForReadCount = uint16("open_for_read_count", "Open For Read Count") +OpenForWriteCount = uint16("open_for_write_count", "Open For Write Count") +OpenRights = bitfield8("open_rights", "Open Rights", [ + bf_boolean8(0x01, "open_rights_read_only", "Read Only"), + bf_boolean8(0x02, "open_rights_write_only", "Write Only"), + bf_boolean8(0x04, "open_rights_deny_read", "Deny Read"), + bf_boolean8(0x08, "open_rights_deny_write", "Deny Write"), + bf_boolean8(0x10, "open_rights_compat", "Compatibility"), + bf_boolean8(0x40, "open_rights_write_thru", "File Write Through"), +]) +OptionNumber = uint8("option_number", "Option Number") +originalSize = uint32("original_size", "Original Size") +OSLanguageID = uint8("os_language_id", "OS Language ID") +OSMajorVersion = uint8("os_major_version", "OS Major Version") +OSMinorVersion = uint8("os_minor_version", "OS Minor Version") +OSRevision = uint32("os_revision", "OS Revision") +OtherFileForkSize = uint32("other_file_fork_size", "Other File Fork Size") +OtherFileForkFAT = uint32("other_file_fork_fat", "Other File Fork FAT Entry") +OutgoingPacketDiscardedNoTurboBuffer = uint16("outgoing_packet_discarded_no_turbo_buffer", "Outgoing Packet Discarded No Turbo Buffer") + +PacketsDiscardedByHopCount = uint16("packets_discarded_by_hop_count", "Packets Discarded By Hop Count") +PacketsDiscardedUnknownNet = uint16("packets_discarded_unknown_net", "Packets Discarded Unknown Net") +PacketsFromInvalidConnection = uint16("packets_from_invalid_connection", "Packets From Invalid Connection") +PacketsReceivedDuringProcessing = uint16("packets_received_during_processing", "Packets Received During Processing") +PacketsWithBadRequestType = uint16("packets_with_bad_request_type", "Packets With Bad Request Type") +PacketsWithBadSequenceNumber = uint16("packets_with_bad_sequence_number", "Packets With Bad Sequence Number") +PageTableOwnerFlag = uint32("page_table_owner_flag", "Page Table Owner") +ParentID = uint32("parent_id", "Parent ID") +ParentID.Display("BASE_HEX") +ParentBaseID = uint32("parent_base_id", "Parent Base ID") +ParentBaseID.Display("BASE_HEX") +ParentDirectoryBase = uint32("parent_directory_base", "Parent Directory Base") +ParentDOSDirectoryBase = uint32("parent_dos_directory_base", "Parent DOS Directory Base") +ParentObjectNumber = uint32("parent_object_number", "Parent Object Number") +ParentObjectNumber.Display("BASE_HEX") +Password = nstring8("password", "Password") +PathBase = uint8("path_base", "Path Base") +PathComponentCount = uint16("path_component_count", "Path Component Count") +PathComponentSize = uint16("path_component_size", "Path Component Size") +PathCookieFlags = val_string16("path_cookie_flags", "Path Cookie Flags", [ + [ 0x0000, "Last component is Not a File Name" ], + [ 0x0001, "Last component is a File Name" ], +]) +PathCount = uint8("path_count", "Path Count") +# +# XXX - in at least some File Search Continue requests, the string +# length value is longer than the string, and there's a NUL, followed +# by other non-zero cruft, in the string. Should this be an +# "nstringz8", with FT_UINT_STRINGZPAD added to support it? And +# does that apply to any other values? +# +Path = nstring8("path", "Path") +Path16 = nstring16("path16", "Path") +PathAndName = stringz("path_and_name", "Path and Name") +PendingIOCommands = uint16("pending_io_commands", "Pending IO Commands") +PhysicalDiskNumber = uint8("physical_disk_number", "Physical Disk Number") +PhysicalDriveCount = uint8("physical_drive_count", "Physical Drive Count") +PhysicalLockThreshold = uint8("physical_lock_threshold", "Physical Lock Threshold") +PingVersion = uint16("ping_version", "Ping Version") +PoolName = stringz("pool_name", "Pool Name") +PositiveAcknowledgesSent = uint16("positive_acknowledges_sent", "Positive Acknowledges Sent") +PreCompressedSectors = uint32("pre_compressed_sectors", "Precompressed Sectors") +PreviousRecord = uint32("previous_record", "Previous Record") +PrimaryEntry = uint32("primary_entry", "Primary Entry") +PrintFlags = bitfield8("print_flags", "Print Flags", [ + bf_boolean8(0x08, "print_flags_ff", "Suppress Form Feeds"), + bf_boolean8(0x10, "print_flags_cr", "Create"), + bf_boolean8(0x20, "print_flags_del_spool", "Delete Spool File after Printing"), + bf_boolean8(0x40, "print_flags_exp_tabs", "Expand Tabs in the File"), + bf_boolean8(0x80, "print_flags_banner", "Print Banner Page"), +]) +PrinterHalted = val_string8("printer_halted", "Printer Halted", [ + [ 0x00, "Printer is not Halted" ], + [ 0xff, "Printer is Halted" ], +]) +PrinterOffLine = val_string8( "printer_offline", "Printer Off-Line", [ + [ 0x00, "Printer is On-Line" ], + [ 0xff, "Printer is Off-Line" ], +]) +PrintServerVersion = uint8("print_server_version", "Print Server Version") +Priority = uint32("priority", "Priority") +Privileges = uint32("privileges", "Login Privileges") +ProcessorType = val_string8("processor_type", "Processor Type", [ + [ 0x00, "Motorola 68000" ], + [ 0x01, "Intel 8088 or 8086" ], + [ 0x02, "Intel 80286" ], +]) +ProDOSInfo = bytes("pro_dos_info", "Pro DOS Info", 6) +ProductMajorVersion = uint16("product_major_version", "Product Major Version") +ProductMinorVersion = uint16("product_minor_version", "Product Minor Version") +ProductRevisionVersion = uint8("product_revision_version", "Product Revision Version") +projectedCompSize = uint32("projected_comp_size", "Projected Compression Size") +PropertyHasMoreSegments = val_string8("property_has_more_segments", + "Property Has More Segments", [ + [ 0x00, "Is last segment" ], + [ 0xff, "More segments are available" ], +]) +PropertyName = nstring8("property_name", "Property Name") +PropertyName16 = fw_string("property_name_16", "Property Name", 16) +PropertyData = bytes("property_data", "Property Data", 128) +PropertySegment = uint8("property_segment", "Property Segment") +PropertyType = val_string8("property_type", "Property Type", [ + [ 0x00, "Display Static property" ], + [ 0x01, "Display Dynamic property" ], + [ 0x02, "Set Static property" ], + [ 0x03, "Set Dynamic property" ], +]) +PropertyValue = fw_string("property_value", "Property Value", 128) +ProposedMaxSize = uint16("proposed_max_size", "Proposed Max Size") +ProposedMaxSize64 = uint64("proposed_max_size64", "Proposed Max Size") +protocolFlags = uint32("protocol_flags", "Protocol Flags") +protocolFlags.Display("BASE_HEX") +PurgeableBlocks = uint32("purgeable_blocks", "Purgeable Blocks") +PurgeCcode = uint32("purge_c_code", "Purge Completion Code") +PurgeCount = uint32("purge_count", "Purge Count") +PurgeFlags = val_string16("purge_flags", "Purge Flags", [ + [ 0x0000, "Do not Purge All" ], + [ 0x0001, "Purge All" ], + [ 0xffff, "Do not Purge All" ], +]) +PurgeList = uint32("purge_list", "Purge List") +PhysicalDiskChannel = uint8("physical_disk_channel", "Physical Disk Channel") +PhysicalDriveType = val_string8("physical_drive_type", "Physical Drive Type", [ + [ 0x01, "XT" ], + [ 0x02, "AT" ], + [ 0x03, "SCSI" ], + [ 0x04, "Disk Coprocessor" ], + [ 0x05, "PS/2 with MFM Controller" ], + [ 0x06, "PS/2 with ESDI Controller" ], + [ 0x07, "Convergent Technology SBIC" ], +]) +PhysicalReadErrors = uint16("physical_read_errors", "Physical Read Errors") +PhysicalReadRequests = uint32("physical_read_requests", "Physical Read Requests") +PhysicalWriteErrors = uint16("physical_write_errors", "Physical Write Errors") +PhysicalWriteRequests = uint32("physical_write_requests", "Physical Write Requests") +PrintToFileFlag = boolean8("print_to_file_flag", "Print to File Flag") + +QueueID = uint32("queue_id", "Queue ID") +QueueID.Display("BASE_HEX") +QueueName = nstring8("queue_name", "Queue Name") +QueueStartPosition = uint32("queue_start_position", "Queue Start Position") +QueueStatus = bitfield8("queue_status", "Queue Status", [ + bf_boolean8(0x01, "queue_status_new_jobs", "Operator does not want to add jobs to the queue"), + bf_boolean8(0x02, "queue_status_pserver", "Operator does not want additional servers attaching"), + bf_boolean8(0x04, "queue_status_svc_jobs", "Operator does not want servers to service jobs"), +]) +QueueType = uint16("queue_type", "Queue Type") +QueueingVersion = uint8("qms_version", "QMS Version") + +ReadBeyondWrite = uint16("read_beyond_write", "Read Beyond Write") +RecordLockCount = uint16("rec_lock_count", "Record Lock Count") +RecordStart = uint32("record_start", "Record Start") +RecordEnd = uint32("record_end", "Record End") +RecordInUseFlag = val_string16("record_in_use", "Record in Use", [ + [ 0x0000, "Record In Use" ], + [ 0xffff, "Record Not In Use" ], +]) +RedirectedPrinter = uint8( "redirected_printer", "Redirected Printer" ) +ReferenceCount = uint32("reference_count", "Reference Count") +RelationsCount = uint16("relations_count", "Relations Count") +ReMirrorCurrentOffset = uint32("re_mirror_current_offset", "ReMirror Current Offset") +ReMirrorDriveNumber = uint8("re_mirror_drive_number", "ReMirror Drive Number") +RemoteMaxPacketSize = uint32("remote_max_packet_size", "Remote Max Packet Size") +RemoteTargetID = uint32("remote_target_id", "Remote Target ID") +RemoteTargetID.Display("BASE_HEX") +RemovableFlag = uint16("removable_flag", "Removable Flag") +RemoveOpenRights = bitfield8("remove_open_rights", "Remove Open Rights", [ + bf_boolean8(0x01, "remove_open_rights_ro", "Read Only"), + bf_boolean8(0x02, "remove_open_rights_wo", "Write Only"), + bf_boolean8(0x04, "remove_open_rights_dr", "Deny Read"), + bf_boolean8(0x08, "remove_open_rights_dw", "Deny Write"), + bf_boolean8(0x10, "remove_open_rights_comp", "Compatibility"), + bf_boolean8(0x40, "remove_open_rights_write_thru", "Write Through"), +]) +RenameFlag = bitfield8("rename_flag", "Rename Flag", [ + bf_boolean8(0x01, "rename_flag_ren", "Rename to Myself allows file to be renamed to its original name"), + bf_boolean8(0x02, "rename_flag_comp", "Compatibility allows files that are marked read only to be opened with read/write access"), + bf_boolean8(0x04, "rename_flag_no", "Name Only renames only the specified name space entry name"), +]) +RepliesCancelled = uint16("replies_cancelled", "Replies Cancelled") +ReplyBuffer = nstring8("reply_buffer", "Reply Buffer") +ReplyBufferSize = uint32("reply_buffer_size", "Reply Buffer Size") +ReplyQueueJobNumbers = uint32("reply_queue_job_numbers", "Reply Queue Job Numbers") +RequestBitMap = bitfield16("request_bit_map", "Request Bit Map", [ + bf_boolean16(0x0001, "request_bit_map_ret_afp_ent", "AFP Entry ID"), + bf_boolean16(0x0002, "request_bit_map_ret_data_fork", "Data Fork Length"), + bf_boolean16(0x0004, "request_bit_map_ret_res_fork", "Resource Fork Length"), + bf_boolean16(0x0008, "request_bit_map_ret_num_off", "Number of Offspring"), + bf_boolean16(0x0010, "request_bit_map_ret_owner", "Owner ID"), + bf_boolean16(0x0020, "request_bit_map_ret_short", "Short Name"), + bf_boolean16(0x0040, "request_bit_map_ret_acc_priv", "Access Privileges"), + bf_boolean16(0x0100, "request_bit_map_ratt", "Return Attributes"), + bf_boolean16(0x0200, "request_bit_map_ret_afp_parent", "AFP Parent Entry ID"), + bf_boolean16(0x0400, "request_bit_map_ret_cr_date", "Creation Date"), + bf_boolean16(0x0800, "request_bit_map_ret_acc_date", "Access Date"), + bf_boolean16(0x1000, "request_bit_map_ret_mod_date", "Modify Date&Time"), + bf_boolean16(0x2000, "request_bit_map_ret_bak_date", "Backup Date&Time"), + bf_boolean16(0x4000, "request_bit_map_ret_finder", "Finder Info"), + bf_boolean16(0x8000, "request_bit_map_ret_long_nm", "Long Name"), +]) +ResourceForkLen = uint32("resource_fork_len", "Resource Fork Len") +RequestCode = val_string8("request_code", "Request Code", [ + [ 0x00, "Change Logged in to Temporary Authenticated" ], + [ 0x01, "Change Temporary Authenticated to Logged in" ], +]) +RequestData = nstring8("request_data", "Request Data") +RequestsReprocessed = uint16("requests_reprocessed", "Requests Reprocessed") +Reserved = uint8( "reserved", "Reserved" ) +Reserved2 = bytes("reserved2", "Reserved", 2) +Reserved3 = bytes("reserved3", "Reserved", 3) +Reserved4 = bytes("reserved4", "Reserved", 4) +Reserved5 = bytes("reserved5", "Reserved", 5) +Reserved6 = bytes("reserved6", "Reserved", 6) +Reserved8 = bytes("reserved8", "Reserved", 8) +Reserved10 = bytes("reserved10", "Reserved", 10) +Reserved12 = bytes("reserved12", "Reserved", 12) +Reserved16 = bytes("reserved16", "Reserved", 16) +Reserved20 = bytes("reserved20", "Reserved", 20) +Reserved28 = bytes("reserved28", "Reserved", 28) +Reserved36 = bytes("reserved36", "Reserved", 36) +Reserved44 = bytes("reserved44", "Reserved", 44) +Reserved48 = bytes("reserved48", "Reserved", 48) +Reserved50 = bytes("reserved50", "Reserved", 50) +Reserved56 = bytes("reserved56", "Reserved", 56) +Reserved64 = bytes("reserved64", "Reserved", 64) +Reserved120 = bytes("reserved120", "Reserved", 120) +ReservedOrDirectoryNumber = uint32("reserved_or_directory_number", "Reserved or Directory Number (see EAFlags)") +ReservedOrDirectoryNumber.Display("BASE_HEX") +ResourceCount = uint32("resource_count", "Resource Count") +ResourceForkSize = uint32("resource_fork_size", "Resource Fork Size") +ResourceName = stringz("resource_name", "Resource Name") +ResourceSignature = fw_string("resource_sig", "Resource Signature", 4) +RestoreTime = eptime("restore_time", "Restore Time") +Restriction = uint32("restriction", "Disk Space Restriction") +RestrictionQuad = uint64("restriction_quad", "Restriction") +RestrictionsEnforced = val_string8("restrictions_enforced", "Disk Restrictions Enforce Flag", [ + [ 0x00, "Enforced" ], + [ 0xff, "Not Enforced" ], +]) +ReturnInfoCount = uint32("return_info_count", "Return Information Count") +ReturnInfoMask = bitfield16("ret_info_mask", "Return Information", [ + bf_boolean16(0x0001, "ret_info_mask_fname", "Return File Name Information"), + bf_boolean16(0x0002, "ret_info_mask_alloc", "Return Allocation Space Information"), + bf_boolean16(0x0004, "ret_info_mask_attr", "Return Attribute Information"), + bf_boolean16(0x0008, "ret_info_mask_size", "Return Size Information"), + bf_boolean16(0x0010, "ret_info_mask_tspace", "Return Total Space Information"), + bf_boolean16(0x0020, "ret_info_mask_eattr", "Return Extended Attributes Information"), + bf_boolean16(0x0040, "ret_info_mask_arch", "Return Archive Information"), + bf_boolean16(0x0080, "ret_info_mask_mod", "Return Modify Information"), + bf_boolean16(0x0100, "ret_info_mask_create", "Return Creation Information"), + bf_boolean16(0x0200, "ret_info_mask_ns", "Return Name Space Information"), + bf_boolean16(0x0400, "ret_info_mask_dir", "Return Directory Information"), + bf_boolean16(0x0800, "ret_info_mask_rights", "Return Rights Information"), + bf_boolean16(0x1000, "ret_info_mask_id", "Return ID Information"), + bf_boolean16(0x2000, "ret_info_mask_ns_attr", "Return Name Space Attributes Information"), + bf_boolean16(0x4000, "ret_info_mask_actual", "Return Actual Information"), + bf_boolean16(0x8000, "ret_info_mask_logical", "Return Logical Information"), +]) +ReturnedListCount = uint32("returned_list_count", "Returned List Count") +Revision = uint32("revision", "Revision") +RevisionNumber = uint8("revision_number", "Revision") +RevQueryFlag = val_string8("rev_query_flag", "Revoke Rights Query Flag", [ + [ 0x00, "Do not query the locks engine for access rights" ], + [ 0x01, "Query the locks engine and return the access rights" ], +]) +RightsGrantMask = bitfield8("rights_grant_mask", "Grant Rights", [ + bf_boolean8(0x01, "rights_grant_mask_read", "Read"), + bf_boolean8(0x02, "rights_grant_mask_write", "Write"), + bf_boolean8(0x04, "rights_grant_mask_open", "Open"), + bf_boolean8(0x08, "rights_grant_mask_create", "Create"), + bf_boolean8(0x10, "rights_grant_mask_del", "Delete"), + bf_boolean8(0x20, "rights_grant_mask_parent", "Parental"), + bf_boolean8(0x40, "rights_grant_mask_search", "Search"), + bf_boolean8(0x80, "rights_grant_mask_mod", "Modify"), +]) +RightsRevokeMask = bitfield8("rights_revoke_mask", "Revoke Rights", [ + bf_boolean8(0x01, "rights_revoke_mask_read", "Read"), + bf_boolean8(0x02, "rights_revoke_mask_write", "Write"), + bf_boolean8(0x04, "rights_revoke_mask_open", "Open"), + bf_boolean8(0x08, "rights_revoke_mask_create", "Create"), + bf_boolean8(0x10, "rights_revoke_mask_del", "Delete"), + bf_boolean8(0x20, "rights_revoke_mask_parent", "Parental"), + bf_boolean8(0x40, "rights_revoke_mask_search", "Search"), + bf_boolean8(0x80, "rights_revoke_mask_mod", "Modify"), +]) +RIPSocketNumber = uint16("rip_socket_num", "RIP Socket Number") +RIPSocketNumber.Display("BASE_HEX") +RouterDownFlag = boolean8("router_dn_flag", "Router Down Flag") +RPCccode = val_string16("rpc_c_code", "RPC Completion Code", [ + [ 0x0000, "Successful" ], +]) +RTagNumber = uint32("r_tag_num", "Resource Tag Number") +RTagNumber.Display("BASE_HEX") +RpyNearestSrvFlag = boolean8("rpy_nearest_srv_flag", "Reply to Nearest Server Flag") + +SalvageableFileEntryNumber = uint32("salvageable_file_entry_number", "Salvageable File Entry Number") +SalvageableFileEntryNumber.Display("BASE_HEX") +SAPSocketNumber = uint16("sap_socket_number", "SAP Socket Number") +SAPSocketNumber.Display("BASE_HEX") +ScanItems = uint32("scan_items", "Number of Items returned from Scan") +SearchAttributes = bitfield8("sattr", "Search Attributes", [ + bf_boolean8(0x01, "sattr_ronly", "Read-Only Files Allowed"), + bf_boolean8(0x02, "sattr_hid", "Hidden Files Allowed"), + bf_boolean8(0x04, "sattr_sys", "System Files Allowed"), + bf_boolean8(0x08, "sattr_exonly", "Execute-Only Files Allowed"), + bf_boolean8(0x10, "sattr_sub", "Subdirectories Only"), + bf_boolean8(0x20, "sattr_archive", "Archive"), + bf_boolean8(0x40, "sattr_execute_confirm", "Execute Confirm"), + bf_boolean8(0x80, "sattr_shareable", "Shareable"), +]) +SearchAttributesLow = bitfield16("search_att_low", "Search Attributes", [ + bf_boolean16(0x0001, "search_att_read_only", "Read-Only"), + bf_boolean16(0x0002, "search_att_hidden", "Hidden Files Allowed"), + bf_boolean16(0x0004, "search_att_system", "System"), + bf_boolean16(0x0008, "search_att_execute_only", "Execute-Only"), + bf_boolean16(0x0010, "search_att_sub", "Subdirectories Only"), + bf_boolean16(0x0020, "search_att_archive", "Archive"), + bf_boolean16(0x0040, "search_att_execute_confirm", "Execute Confirm"), + bf_boolean16(0x0080, "search_att_shareable", "Shareable"), + bf_boolean16(0x8000, "search_attr_all_files", "All Files and Directories"), +]) +SearchBitMap = bitfield8("search_bit_map", "Search Bit Map", [ + bf_boolean8(0x01, "search_bit_map_hidden", "Hidden"), + bf_boolean8(0x02, "search_bit_map_sys", "System"), + bf_boolean8(0x04, "search_bit_map_sub", "Subdirectory"), + bf_boolean8(0x08, "search_bit_map_files", "Files"), +]) +SearchConnNumber = uint32("search_conn_number", "Search Connection Number") +SearchInstance = uint32("search_instance", "Search Instance") +SearchNumber = uint32("search_number", "Search Number") +SearchPattern = nstring8("search_pattern", "Search Pattern") +SearchPattern16 = nstring16("search_pattern_16", "Search Pattern") +SearchSequence = bytes("search_sequence", "Search Sequence", 9) +SearchSequenceWord = uint16("search_sequence_word", "Search Sequence", ENC_BIG_ENDIAN) +Second = uint8("s_second", "Seconds") +SecondsRelativeToTheYear2000 = uint32("sec_rel_to_y2k", "Seconds Relative to the Year 2000") +SecretStoreVerb = val_string8("ss_verb", "Secret Store Verb",[ + [ 0x00, "Query Server" ], + [ 0x01, "Read App Secrets" ], + [ 0x02, "Write App Secrets" ], + [ 0x03, "Add Secret ID" ], + [ 0x04, "Remove Secret ID" ], + [ 0x05, "Remove SecretStore" ], + [ 0x06, "Enumerate Secret IDs" ], + [ 0x07, "Unlock Store" ], + [ 0x08, "Set Master Password" ], + [ 0x09, "Get Service Information" ], +]) +SecurityEquivalentList = fw_string("security_equiv_list", "Security Equivalent List", 128) +SecurityFlag = bitfield8("security_flag", "Security Flag", [ + bf_boolean8(0x01, "checksumming", "Checksumming"), + bf_boolean8(0x02, "signature", "Signature"), + bf_boolean8(0x04, "complete_signatures", "Complete Signatures"), + bf_boolean8(0x08, "encryption", "Encryption"), + bf_boolean8(0x80, "large_internet_packets", "Large Internet Packets (LIP) Disabled"), +]) +SecurityRestrictionVersion = uint8("security_restriction_version", "Security Restriction Version") +SectorsPerBlock = uint8("sectors_per_block", "Sectors Per Block") +SectorsPerBlockLong = uint32("sectors_per_block_long", "Sectors Per Block") +SectorsPerCluster = uint16("sectors_per_cluster", "Sectors Per Cluster" ) +SectorsPerClusterLong = uint32("sectors_per_cluster_long", "Sectors Per Cluster" ) +SectorsPerTrack = uint8("sectors_per_track", "Sectors Per Track") +SectorSize = uint32("sector_size", "Sector Size") +SemaphoreHandle = uint32("semaphore_handle", "Semaphore Handle") +SemaphoreName = nstring8("semaphore_name", "Semaphore Name") +SemaphoreOpenCount = uint8("semaphore_open_count", "Semaphore Open Count") +SemaphoreShareCount = uint8("semaphore_share_count", "Semaphore Share Count") +SemaphoreTimeOut = uint16("semaphore_time_out", "Semaphore Time Out") +SemaphoreValue = uint16("semaphore_value", "Semaphore Value") +SendStatus = val_string8("send_status", "Send Status", [ + [ 0x00, "Successful" ], + [ 0x01, "Illegal Station Number" ], + [ 0x02, "Client Not Logged In" ], + [ 0x03, "Client Not Accepting Messages" ], + [ 0x04, "Client Already has a Message" ], + [ 0x96, "No Alloc Space for the Message" ], + [ 0xfd, "Bad Station Number" ], + [ 0xff, "Failure" ], +]) +SequenceByte = uint8("sequence_byte", "Sequence") +SequenceNumber = uint32("sequence_number", "Sequence Number") +SequenceNumber.Display("BASE_HEX") +SequenceNumberLong = uint64("sequence_number64", "Sequence Number") +SequenceNumberLong.Display("BASE_HEX") +ServerAddress = bytes("server_address", "Server Address", 12) +ServerAppNumber = uint16("server_app_num", "Server App Number") +ServerID = uint32("server_id_number", "Server ID", ENC_BIG_ENDIAN ) +ServerID.Display("BASE_HEX") +ServerInfoFlags = val_string16("server_info_flags", "Server Information Flags", [ + [ 0x0000, "This server is not a member of a Cluster" ], + [ 0x0001, "This server is a member of a Cluster" ], +]) +serverListFlags = uint32("server_list_flags", "Server List Flags") +ServerName = fw_string("server_name", "Server Name", 48) +serverName50 = fw_string("server_name50", "Server Name", 50) +ServerNameLen = nstring8("server_name_len", "Server Name") +ServerNameStringz = stringz("server_name_stringz", "Server Name") +ServerNetworkAddress = bytes("server_network_address", "Server Network Address", 10) +ServerNode = bytes("server_node", "Server Node", 6) +ServerSerialNumber = uint32("server_serial_number", "Server Serial Number") +ServerStation = uint8("server_station", "Server Station") +ServerStationLong = uint32("server_station_long", "Server Station") +ServerStationList = uint8("server_station_list", "Server Station List") +ServerStatusRecord = fw_string("server_status_record", "Server Status Record", 64) +ServerTaskNumber = uint8("server_task_number", "Server Task Number") +ServerTaskNumberLong = uint32("server_task_number_long", "Server Task Number") +ServerType = uint16("server_type", "Server Type") +ServerType.Display("BASE_HEX") +ServerUtilization = uint32("server_utilization", "Server Utilization") +ServerUtilizationPercentage = uint8("server_utilization_percentage", "Server Utilization Percentage") +ServiceType = val_string16("Service_type", "Service Type", [ + [ 0x0000, "Unknown" ], + [ 0x0001, "User" ], + [ 0x0002, "User group" ], + [ 0x0003, "Print queue" ], + [ 0x0004, "NetWare file server" ], + [ 0x0005, "Job server" ], + [ 0x0006, "Gateway" ], + [ 0x0007, "Print server" ], + [ 0x0008, "Archive queue" ], + [ 0x0009, "Archive server" ], + [ 0x000a, "Job queue" ], + [ 0x000b, "Administration" ], + [ 0x0021, "NAS SNA gateway" ], + [ 0x0026, "Remote bridge server" ], + [ 0x0027, "TCP/IP gateway" ], + [ 0xffff, "All Types" ], +]) +SetCmdCategory = val_string8("set_cmd_category", "Set Command Category", [ + [ 0x00, "Communications" ], + [ 0x01, "Memory" ], + [ 0x02, "File Cache" ], + [ 0x03, "Directory Cache" ], + [ 0x04, "File System" ], + [ 0x05, "Locks" ], + [ 0x06, "Transaction Tracking" ], + [ 0x07, "Disk" ], + [ 0x08, "Time" ], + [ 0x09, "NCP" ], + [ 0x0a, "Miscellaneous" ], + [ 0x0b, "Error Handling" ], + [ 0x0c, "Directory Services" ], + [ 0x0d, "MultiProcessor" ], + [ 0x0e, "Service Location Protocol" ], + [ 0x0f, "Licensing Services" ], +]) +SetCmdFlags = bitfield8("set_cmd_flags", "Set Command Flags", [ + bf_boolean8(0x01, "cmd_flags_startup_only", "Startup.ncf Only"), + bf_boolean8(0x02, "cmd_flags_hidden", "Hidden"), + bf_boolean8(0x04, "cmd_flags_advanced", "Advanced"), + bf_boolean8(0x08, "cmd_flags_later", "Restart Server Required to Take Effect"), + bf_boolean8(0x80, "cmd_flags_secure", "Console Secured"), +]) +SetCmdName = stringz("set_cmd_name", "Set Command Name") +SetCmdType = val_string8("set_cmd_type", "Set Command Type", [ + [ 0x00, "Numeric Value" ], + [ 0x01, "Boolean Value" ], + [ 0x02, "Ticks Value" ], + [ 0x04, "Time Value" ], + [ 0x05, "String Value" ], + [ 0x06, "Trigger Value" ], + [ 0x07, "Numeric Value" ], +]) +SetCmdValueNum = uint32("set_cmd_value_num", "Set Command Value") +SetCmdValueString = stringz("set_cmd_value_string", "Set Command Value") +SetMask = bitfield32("set_mask", "Set Mask", [ + bf_boolean32(0x00000001, "ncp_encoded_strings", "NCP Encoded Strings"), + bf_boolean32(0x00000002, "connection_code_page", "Connection Code Page"), +]) +SetParmName = stringz("set_parm_name", "Set Parameter Name") +SFTErrorTable = bytes("sft_error_table", "SFT Error Table", 60) +SFTSupportLevel = val_string8("sft_support_level", "SFT Support Level", [ + [ 0x01, "Server Offers Hot Disk Error Fixing" ], + [ 0x02, "Server Offers Disk Mirroring and Transaction Tracking" ], + [ 0x03, "Server Offers Physical Server Mirroring" ], +]) +ShareableLockCount = uint16("shareable_lock_count", "Shareable Lock Count") +SharedMemoryAddresses = bytes("shared_memory_addresses", "Shared Memory Addresses", 10) +ShortName = fw_string("short_name", "Short Name", 12) +ShortStkName = fw_string("short_stack_name", "Short Stack Name", 16) +SiblingCount = uint32("sibling_count", "Sibling Count") +SixtyFourBitOffsetsSupportedFlag = val_string8("64_bit_flag", "64 Bit Support", [ + [ 0x00, "No support for 64 bit offsets" ], + [ 0x01, "64 bit offsets supported" ], + [ 0x02, "Use 64 bit file transfer NCP's" ], +]) +SMIDs = uint32("smids", "Storage Media ID's") +SoftwareDescription = fw_string("software_description", "Software Description", 65) +SoftwareDriverType = uint8("software_driver_type", "Software Driver Type") +SoftwareMajorVersionNumber = uint8("software_major_version_number", "Software Major Version Number") +SoftwareMinorVersionNumber = uint8("software_minor_version_number", "Software Minor Version Number") +SourceDirHandle = uint8("source_dir_handle", "Source Directory Handle") +SourceFileHandle = bytes("s_fhandle_64bit", "Source File Handle", 6) +SourceFileOffset = bytes("s_foffset", "Source File Offset", 8) +sourceOriginateTime = bytes("source_originate_time", "Source Originate Time", 8) +SourcePath = nstring8("source_path", "Source Path") +SourcePathComponentCount = uint8("source_component_count", "Source Path Component Count") +sourceReturnTime = bytes("source_return_time", "Source Return Time", 8) +SpaceUsed = uint32("space_used", "Space Used") +SpaceMigrated = uint32("space_migrated", "Space Migrated") +SrcNameSpace = val_string8("src_name_space", "Source Name Space", [ + [ 0x00, "DOS Name Space" ], + [ 0x01, "MAC Name Space" ], + [ 0x02, "NFS Name Space" ], + [ 0x04, "Long Name Space" ], +]) +SubFuncStrucLen = uint16("sub_func_struc_len", "Structure Length") +SupModID = uint32("sup_mod_id", "Sup Mod ID") +StackCount = uint32("stack_count", "Stack Count") +StackFullNameStr = nstring8("stack_full_name_str", "Stack Full Name") +StackMajorVN = uint8("stack_major_vn", "Stack Major Version Number") +StackMinorVN = uint8("stack_minor_vn", "Stack Minor Version Number") +StackNumber = uint32("stack_number", "Stack Number") +StartConnNumber = uint32("start_conn_num", "Starting Connection Number") +StartingBlock = uint16("starting_block", "Starting Block") +StartingNumber = uint32("starting_number", "Starting Number") +StartingSearchNumber = uint16("start_search_number", "Start Search Number") +StartNumber = uint32("start_number", "Start Number") +startNumberFlag = uint16("start_number_flag", "Start Number Flag") +StartOffset64bit = bytes("s_offset_64bit", "64bit Starting Offset", 64) +StartVolumeNumber = uint32("start_volume_number", "Starting Volume Number") +StationList = uint32("station_list", "Station List") +StationNumber = bytes("station_number", "Station Number", 3) +StatMajorVersion = uint8("stat_major_version", "Statistics Table Major Version") +StatMinorVersion = uint8("stat_minor_version", "Statistics Table Minor Version") +Status = bitfield16("status", "Status", [ + bf_boolean16(0x0001, "user_info_logged_in", "Logged In"), + bf_boolean16(0x0002, "user_info_being_abort", "Being Aborted"), + bf_boolean16(0x0004, "user_info_audited", "Audited"), + bf_boolean16(0x0008, "user_info_need_sec", "Needs Security Change"), + bf_boolean16(0x0010, "user_info_mac_station", "MAC Station"), + bf_boolean16(0x0020, "user_info_temp_authen", "Temporary Authenticated"), + bf_boolean16(0x0040, "user_info_audit_conn", "Audit Connection Recorded"), + bf_boolean16(0x0080, "user_info_dsaudit_conn", "DS Audit Connection Recorded"), + bf_boolean16(0x0100, "user_info_logout", "Logout in Progress"), + bf_boolean16(0x0200, "user_info_int_login", "Internal Login"), + bf_boolean16(0x0400, "user_info_bindery", "Bindery Connection"), +]) +StatusFlagBits = bitfield32("status_flag_bits", "Status Flag", [ + bf_boolean32(0x00000001, "status_flag_bits_suballoc", "Sub Allocation"), + bf_boolean32(0x00000002, "status_flag_bits_comp", "Compression"), + bf_boolean32(0x00000004, "status_flag_bits_migrate", "Migration"), + bf_boolean32(0x00000008, "status_flag_bits_audit", "Audit"), + bf_boolean32(0x00000010, "status_flag_bits_ro", "Read Only"), + bf_boolean32(0x00000020, "status_flag_bits_im_purge", "Immediate Purge"), + bf_boolean32(0x00000040, "status_flag_bits_64bit", "64Bit File Offsets"), + bf_boolean32(0x00000080, "status_flag_bits_utf8", "UTF8 NCP Strings"), + bf_boolean32(0x80000000, "status_flag_bits_nss", "NSS Volume"), +]) +SubAllocClusters = uint32("sub_alloc_clusters", "Sub Alloc Clusters") +SubAllocFreeableClusters = uint32("sub_alloc_freeable_clusters", "Sub Alloc Freeable Clusters") +Subdirectory = uint32("sub_directory", "Subdirectory") +Subdirectory.Display("BASE_HEX") +SuggestedFileSize = uint32("suggested_file_size", "Suggested File Size") +SupportModuleID = uint32("support_module_id", "Support Module ID") +SynchName = nstring8("synch_name", "Synch Name") +SystemIntervalMarker = uint32("system_interval_marker", "System Interval Marker") + +TabSize = uint8( "tab_size", "Tab Size" ) +TargetClientList = uint8("target_client_list", "Target Client List") +TargetConnectionNumber = uint16("target_connection_number", "Target Connection Number") +TargetDirectoryBase = uint32("target_directory_base", "Target Directory Base") +TargetDirHandle = uint8("target_dir_handle", "Target Directory Handle") +TargetEntryID = uint32("target_entry_id", "Target Entry ID") +TargetEntryID.Display("BASE_HEX") +TargetExecutionTime = bytes("target_execution_time", "Target Execution Time", 6) +TargetFileHandle = bytes("target_file_handle", "Target File Handle", 6) +TargetFileOffset = uint32("target_file_offset", "Target File Offset") +TargetFileOffset64bit = bytes("t_foffset", "Target File Offset", 8) +TargetMessage = nstring8("target_message", "Message") +TargetPrinter = uint8( "target_ptr", "Target Printer" ) +targetReceiveTime = bytes("target_receive_time", "Target Receive Time", 8) +TargetServerIDNumber = uint32("target_server_id_number", "Target Server ID Number", ENC_BIG_ENDIAN ) +TargetServerIDNumber.Display("BASE_HEX") +targetTransmitTime = bytes("target_transmit_time", "Target Transmit Time", 8) +TaskNumByte = uint8("task_num_byte", "Task Number") +TaskNumber = uint32("task_number", "Task Number") +TaskNumberWord = uint16("task_number_word", "Task Number") +TaskState = val_string8("task_state", "Task State", [ + [ 0x00, "Normal" ], + [ 0x01, "TTS explicit transaction in progress" ], + [ 0x02, "TTS implicit transaction in progress" ], + [ 0x04, "Shared file set lock in progress" ], +]) +TextJobDescription = fw_string("text_job_description", "Text Job Description", 50) +ThrashingCount = uint16("thrashing_count", "Thrashing Count") +TimeoutLimit = uint16("timeout_limit", "Timeout Limit") +TimesyncStatus = bitfield32("timesync_status_flags", "Timesync Status", [ + bf_boolean32(0x00000001, "timesync_status_sync", "Time is Synchronized"), + bf_boolean32(0x00000002, "timesync_status_net_sync", "Time is Synchronized to the Network"), + bf_boolean32(0x00000004, "timesync_status_active", "Time Synchronization is Active"), + bf_boolean32(0x00000008, "timesync_status_external", "External Time Synchronization Active"), + bf_val_str32(0x00000700, "timesync_status_server_type", "Time Server Type", [ + [ 0x01, "Client Time Server" ], + [ 0x02, "Secondary Time Server" ], + [ 0x03, "Primary Time Server" ], + [ 0x04, "Reference Time Server" ], + [ 0x05, "Single Reference Time Server" ], + ]), + bf_boolean32(0x000f0000, "timesync_status_ext_sync", "External Clock Status"), +]) +TimeToNet = uint16("time_to_net", "Time To Net") +TotalBlocks = uint32("total_blocks", "Total Blocks") +TotalBlocks64 = uint64("total_blocks64", "Total Blocks") +TotalBlocksToDecompress = uint32("total_blks_to_dcompress", "Total Blocks To Decompress") +TotalBytesRead = bytes("user_info_ttl_bytes_rd", "Total Bytes Read", 6) +TotalBytesWritten = bytes("user_info_ttl_bytes_wrt", "Total Bytes Written", 6) +TotalCacheWrites = uint32("total_cache_writes", "Total Cache Writes") +TotalChangedFATs = uint32("total_changed_fats", "Total Changed FAT Entries") +TotalCommonCnts = uint32("total_common_cnts", "Total Common Counts") +TotalCntBlocks = uint32("total_cnt_blocks", "Total Count Blocks") +TotalDataStreamDiskSpaceAlloc = uint32("ttl_data_str_size_space_alloc", "Total Data Stream Disk Space Alloc") +TotalDirectorySlots = uint16("total_directory_slots", "Total Directory Slots") +TotalDirectoryEntries = uint32("total_dir_entries", "Total Directory Entries") +TotalDirEntries64 = uint64("total_dir_entries64", "Total Directory Entries") +TotalDynamicSpace = uint32("total_dynamic_space", "Total Dynamic Space") +TotalExtendedDirectoryExtents = uint32("total_extended_directory_extents", "Total Extended Directory Extents") +TotalFileServicePackets = uint32("total_file_service_packets", "Total File Service Packets") +TotalFilesOpened = uint32("total_files_opened", "Total Files Opened") +TotalLFSCounters = uint32("total_lfs_counters", "Total LFS Counters") +TotalOffspring = uint16("total_offspring", "Total Offspring") +TotalOtherPackets = uint32("total_other_packets", "Total Other Packets") +TotalQueueJobs = uint32("total_queue_jobs", "Total Queue Jobs") +TotalReadRequests = uint32("total_read_requests", "Total Read Requests") +TotalRequest = uint32("total_request", "Total Requests") +TotalRequestPackets = uint32("total_request_packets", "Total Request Packets") +TotalRoutedPackets = uint32("total_routed_packets", "Total Routed Packets") +TotalRxPkts = uint32("total_rx_pkts", "Total Receive Packets") +TotalServerMemory = uint16("total_server_memory", "Total Server Memory", ENC_BIG_ENDIAN) +TotalTransactionsBackedOut = uint32("total_trans_backed_out", "Total Transactions Backed Out") +TotalTransactionsPerformed = uint32("total_trans_performed", "Total Transactions Performed") +TotalTxPkts = uint32("total_tx_pkts", "Total Transmit Packets") +TotalUnfilledBackoutRequests = uint16("total_unfilled_backout_requests", "Total Unfilled Backout Requests") +TotalVolumeClusters = uint16("total_volume_clusters", "Total Volume Clusters") +TotalWriteRequests = uint32("total_write_requests", "Total Write Requests") +TotalWriteTransactionsPerformed = uint32("total_write_trans_performed", "Total Write Transactions Performed") +TrackOnFlag = boolean8("track_on_flag", "Track On Flag") +TransactionDiskSpace = uint16("transaction_disk_space", "Transaction Disk Space") +TransactionFATAllocations = uint32("transaction_fat_allocations", "Transaction FAT Allocations") +TransactionFileSizeChanges = uint32("transaction_file_size_changes", "Transaction File Size Changes") +TransactionFilesTruncated = uint32("transaction_files_truncated", "Transaction Files Truncated") +TransactionNumber = uint32("transaction_number", "Transaction Number") +TransactionTrackingEnabled = uint8("transaction_tracking_enabled", "Transaction Tracking Enabled") +TransactionTrackingFlag = uint16("tts_flag", "Transaction Tracking Flag") +TransactionTrackingSupported = uint8("transaction_tracking_supported", "Transaction Tracking Supported") +TransactionVolumeNumber = uint16("transaction_volume_number", "Transaction Volume Number") +TransportType = val_string8("transport_type", "Communications Type", [ + [ 0x01, "Internet Packet Exchange (IPX)" ], + [ 0x05, "User Datagram Protocol (UDP)" ], + [ 0x06, "Transmission Control Protocol (TCP)" ], +]) +TreeLength = uint32("tree_length", "Tree Length") +TreeName = nstring32("tree_name", "Tree Name") +TrusteeAccessMask = uint8("trustee_acc_mask", "Trustee Access Mask") +TrusteeRights = bitfield16("trustee_rights_low", "Trustee Rights", [ + bf_boolean16(0x0001, "trustee_rights_read", "Read"), + bf_boolean16(0x0002, "trustee_rights_write", "Write"), + bf_boolean16(0x0004, "trustee_rights_open", "Open"), + bf_boolean16(0x0008, "trustee_rights_create", "Create"), + bf_boolean16(0x0010, "trustee_rights_del", "Delete"), + bf_boolean16(0x0020, "trustee_rights_parent", "Parental"), + bf_boolean16(0x0040, "trustee_rights_search", "Search"), + bf_boolean16(0x0080, "trustee_rights_modify", "Modify"), + bf_boolean16(0x0100, "trustee_rights_super", "Supervisor"), +]) +TTSLevel = uint8("tts_level", "TTS Level") +TrusteeSetNumber = uint8("trustee_set_number", "Trustee Set Number") +TrusteeID = uint32("trustee_id_set", "Trustee ID") +TrusteeID.Display("BASE_HEX") +ttlCompBlks = uint32("ttl_comp_blks", "Total Compression Blocks") +TtlDSDskSpaceAlloc = uint32("ttl_ds_disk_space_alloc", "Total Streams Space Allocated") +TtlEAs = uint32("ttl_eas", "Total EA's") +TtlEAsDataSize = uint32("ttl_eas_data_size", "Total EA's Data Size") +TtlEAsKeySize = uint32("ttl_eas_key_size", "Total EA's Key Size") +ttlIntermediateBlks = uint32("ttl_inter_blks", "Total Intermediate Blocks") +TtlMigratedSize = uint32("ttl_migrated_size", "Total Migrated Size") +TtlNumOfRTags = uint32("ttl_num_of_r_tags", "Total Number of Resource Tags") +TtlNumOfSetCmds = uint32("ttl_num_of_set_cmds", "Total Number of Set Commands") +TtlValuesLength = uint32("ttl_values_length", "Total Values Length") +TtlWriteDataSize = uint32("ttl_write_data_size", "Total Write Data Size") +TurboUsedForFileService = uint16("turbo_used_for_file_service", "Turbo Used For File Service") + +UnclaimedPkts = uint32("un_claimed_packets", "Unclaimed Packets") +UnCompressableDataStreamsCount = uint32("un_compressable_data_streams_count", "Uncompressable Data Streams Count") +Undefined8 = bytes("undefined_8", "Undefined", 8) +Undefined28 = bytes("undefined_28", "Undefined", 28) +UndefinedWord = uint16("undefined_word", "Undefined") +UniqueID = uint8("unique_id", "Unique ID") +UnknownByte = uint8("unknown_byte", "Unknown Byte") +Unused = uint8("un_used", "Unused") +UnusedBlocks = uint32("unused_blocks", "Unused Blocks") +UnUsedDirectoryEntries = uint32("un_used_directory_entries", "Unused Directory Entries") +UnusedDiskBlocks = uint32("unused_disk_blocks", "Unused Disk Blocks") +UnUsedExtendedDirectoryExtents = uint32("un_used_extended_directory_extents", "Unused Extended Directory Extents") +UpdateDate = uint16("update_date", "Update Date") +UpdateDate.NWDate() +UpdateID = uint32("update_id", "Update ID", ENC_BIG_ENDIAN) +UpdateID.Display("BASE_HEX") +UpdateTime = uint16("update_time", "Update Time") +UpdateTime.NWTime() +UseCount = val_string16("user_info_use_count", "Use Count", [ + [ 0x0000, "Connection is not in use" ], + [ 0x0001, "Connection is in use" ], +]) +UsedBlocks = uint32("used_blocks", "Used Blocks") +UserID = uint32("user_id", "User ID", ENC_BIG_ENDIAN) +UserID.Display("BASE_HEX") +UserLoginAllowed = val_string8("user_login_allowed", "Login Status", [ + [ 0x00, "Client Login Disabled" ], + [ 0x01, "Client Login Enabled" ], +]) + +UserName = nstring8("user_name", "User Name") +UserName16 = fw_string("user_name_16", "User Name", 16) +UserName48 = fw_string("user_name_48", "User Name", 48) +UserType = uint16("user_type", "User Type") +UTCTimeInSeconds = eptime("uts_time_in_seconds", "UTC Time in Seconds") + +ValueAvailable = val_string8("value_available", "Value Available", [ + [ 0x00, "Has No Value" ], + [ 0xff, "Has Value" ], +]) +VAPVersion = uint8("vap_version", "VAP Version") +VariableBitMask = uint32("variable_bit_mask", "Variable Bit Mask") +VariableBitsDefined = uint16("variable_bits_defined", "Variable Bits Defined") +VConsoleRevision = uint8("vconsole_rev", "Console Revision") +VConsoleVersion = uint8("vconsole_ver", "Console Version") +Verb = uint32("verb", "Verb") +VerbData = uint8("verb_data", "Verb Data") +version = uint32("version", "Version") +VersionNumber = uint8("version_number", "Version") +VersionNumberLong = uint32("version_num_long", "Version") +VertLocation = uint16("vert_location", "Vertical Location") +VirtualConsoleVersion = uint8("virtual_console_version", "Virtual Console Version") +VolumeID = uint32("volume_id", "Volume ID") +VolumeID.Display("BASE_HEX") +VolInfoReplyLen = uint16("vol_info_reply_len", "Volume Information Reply Length") +VolInfoReturnInfoMask = bitfield32("vol_info_ret_info_mask", "Return Information Mask", [ + bf_boolean32(0x00000001, "vinfo_info64", "Return 64 bit Volume Information"), + bf_boolean32(0x00000002, "vinfo_volname", "Return Volume Name Details"), +]) +VolumeCapabilities = bitfield32("volume_capabilities", "Volume Capabilities", [ + bf_boolean32(0x00000001, "vol_cap_user_space", "NetWare User Space Restrictions Supported"), + bf_boolean32(0x00000002, "vol_cap_dir_quota", "NetWare Directory Quotas Supported"), + bf_boolean32(0x00000004, "vol_cap_dfs", "DFS is Active on Volume"), + bf_boolean32(0x00000008, "vol_cap_sal_purge", "NetWare Salvage and Purge Operations Supported"), + bf_boolean32(0x00000010, "vol_cap_comp", "NetWare Compression Supported"), + bf_boolean32(0x00000020, "vol_cap_cluster", "Volume is a Cluster Resource"), + bf_boolean32(0x00000040, "vol_cap_nss_admin", "Volume is the NSS Admin Volume"), + bf_boolean32(0x00000080, "vol_cap_nss", "Volume is Mounted by NSS"), + bf_boolean32(0x00000100, "vol_cap_ea", "OS2 style EA's Supported"), + bf_boolean32(0x00000200, "vol_cap_archive", "NetWare Archive bit Supported"), + bf_boolean32(0x00000400, "vol_cap_file_attr", "Full NetWare file Attributes Supported"), +]) +VolumeCachedFlag = val_string8("volume_cached_flag", "Volume Cached Flag", [ + [ 0x00, "Volume is Not Cached" ], + [ 0xff, "Volume is Cached" ], +]) +VolumeDataStreams = uint8("volume_data_streams", "Volume Data Streams") +VolumeEpochTime = eptime("epoch_time", "Last Modified Timestamp") +VolumeGUID = stringz("volume_guid", "Volume GUID") +VolumeHashedFlag = val_string8("volume_hashed_flag", "Volume Hashed Flag", [ + [ 0x00, "Volume is Not Hashed" ], + [ 0xff, "Volume is Hashed" ], +]) +VolumeMountedFlag = val_string8("volume_mounted_flag", "Volume Mounted Flag", [ + [ 0x00, "Volume is Not Mounted" ], + [ 0xff, "Volume is Mounted" ], +]) +VolumeMountPoint = stringz("volume_mnt_point", "Volume Mount Point") +VolumeName = fw_string("volume_name", "Volume Name", 16) +VolumeNameLen = nstring8("volume_name_len", "Volume Name") +VolumeNameSpaces = uint8("volume_name_spaces", "Volume Name Spaces") +VolumeNameStringz = stringz("vol_name_stringz", "Volume Name") +VolumeNumber = uint8("volume_number", "Volume Number") +VolumeNumberLong = uint32("volume_number_long", "Volume Number") +VolumeRemovableFlag = val_string8("volume_removable_flag", "Volume Removable Flag", [ + [ 0x00, "Disk Cannot be Removed from Server" ], + [ 0xff, "Disk Can be Removed from Server" ], +]) +VolumeRequestFlags = val_string16("volume_request_flags", "Volume Request Flags", [ + [ 0x0000, "Do not return name with volume number" ], + [ 0x0001, "Return name with volume number" ], +]) +VolumeSizeInClusters = uint32("volume_size_in_clusters", "Volume Size in Clusters") +VolumesSupportedMax = uint16("volumes_supported_max", "Volumes Supported Max") +VolumeType = val_string16("volume_type", "Volume Type", [ + [ 0x0000, "NetWare 386" ], + [ 0x0001, "NetWare 286" ], + [ 0x0002, "NetWare 386 Version 30" ], + [ 0x0003, "NetWare 386 Version 31" ], +]) +VolumeTypeLong = val_string32("volume_type_long", "Volume Type", [ + [ 0x00000000, "NetWare 386" ], + [ 0x00000001, "NetWare 286" ], + [ 0x00000002, "NetWare 386 Version 30" ], + [ 0x00000003, "NetWare 386 Version 31" ], +]) +WastedServerMemory = uint16("wasted_server_memory", "Wasted Server Memory", ENC_BIG_ENDIAN) +WaitTime = uint32("wait_time", "Wait Time") + +Year = val_string8("year", "Year",[ + [ 0x50, "1980" ], + [ 0x51, "1981" ], + [ 0x52, "1982" ], + [ 0x53, "1983" ], + [ 0x54, "1984" ], + [ 0x55, "1985" ], + [ 0x56, "1986" ], + [ 0x57, "1987" ], + [ 0x58, "1988" ], + [ 0x59, "1989" ], + [ 0x5a, "1990" ], + [ 0x5b, "1991" ], + [ 0x5c, "1992" ], + [ 0x5d, "1993" ], + [ 0x5e, "1994" ], + [ 0x5f, "1995" ], + [ 0x60, "1996" ], + [ 0x61, "1997" ], + [ 0x62, "1998" ], + [ 0x63, "1999" ], + [ 0x64, "2000" ], + [ 0x65, "2001" ], + [ 0x66, "2002" ], + [ 0x67, "2003" ], + [ 0x68, "2004" ], + [ 0x69, "2005" ], + [ 0x6a, "2006" ], + [ 0x6b, "2007" ], + [ 0x6c, "2008" ], + [ 0x6d, "2009" ], + [ 0x6e, "2010" ], + [ 0x6f, "2011" ], + [ 0x70, "2012" ], + [ 0x71, "2013" ], + [ 0x72, "2014" ], + [ 0x73, "2015" ], + [ 0x74, "2016" ], + [ 0x75, "2017" ], + [ 0x76, "2018" ], + [ 0x77, "2019" ], + [ 0x78, "2020" ], + [ 0x79, "2021" ], + [ 0x7a, "2022" ], + [ 0x7b, "2023" ], + [ 0x7c, "2024" ], + [ 0x7d, "2025" ], + [ 0x7e, "2026" ], + [ 0x7f, "2027" ], + [ 0xc0, "1984" ], + [ 0xc1, "1985" ], + [ 0xc2, "1986" ], + [ 0xc3, "1987" ], + [ 0xc4, "1988" ], + [ 0xc5, "1989" ], + [ 0xc6, "1990" ], + [ 0xc7, "1991" ], + [ 0xc8, "1992" ], + [ 0xc9, "1993" ], + [ 0xca, "1994" ], + [ 0xcb, "1995" ], + [ 0xcc, "1996" ], + [ 0xcd, "1997" ], + [ 0xce, "1998" ], + [ 0xcf, "1999" ], + [ 0xd0, "2000" ], + [ 0xd1, "2001" ], + [ 0xd2, "2002" ], + [ 0xd3, "2003" ], + [ 0xd4, "2004" ], + [ 0xd5, "2005" ], + [ 0xd6, "2006" ], + [ 0xd7, "2007" ], + [ 0xd8, "2008" ], + [ 0xd9, "2009" ], + [ 0xda, "2010" ], + [ 0xdb, "2011" ], + [ 0xdc, "2012" ], + [ 0xdd, "2013" ], + [ 0xde, "2014" ], + [ 0xdf, "2015" ], +]) +############################################################################## +# Structs +############################################################################## + + +acctngInfo = struct("acctng_info_struct", [ + HoldTime, + HoldAmount, + ChargeAmount, + HeldConnectTimeInMinutes, + HeldRequests, + HeldBytesRead, + HeldBytesWritten, +],"Accounting Information") +AFP10Struct = struct("afp_10_struct", [ + AFPEntryID, + ParentID, + AttributesDef16, + DataForkLen, + ResourceForkLen, + TotalOffspring, + CreationDate, + LastAccessedDate, + ModifiedDate, + ModifiedTime, + ArchivedDate, + ArchivedTime, + CreatorID, + Reserved4, + FinderAttr, + HorizLocation, + VertLocation, + FileDirWindow, + Reserved16, + LongName, + CreatorID, + ShortName, + AccessPrivileges, +], "AFP Information" ) +AFP20Struct = struct("afp_20_struct", [ + AFPEntryID, + ParentID, + AttributesDef16, + DataForkLen, + ResourceForkLen, + TotalOffspring, + CreationDate, + LastAccessedDate, + ModifiedDate, + ModifiedTime, + ArchivedDate, + ArchivedTime, + CreatorID, + Reserved4, + FinderAttr, + HorizLocation, + VertLocation, + FileDirWindow, + Reserved16, + LongName, + CreatorID, + ShortName, + AccessPrivileges, + Reserved, + ProDOSInfo, +], "AFP Information" ) +ArchiveDateStruct = struct("archive_date_struct", [ + ArchivedDate, +]) +ArchiveIdStruct = struct("archive_id_struct", [ + ArchiverID, +]) +ArchiveInfoStruct = struct("archive_info_struct", [ + ArchivedTime, + ArchivedDate, + ArchiverID, +], "Archive Information") +ArchiveTimeStruct = struct("archive_time_struct", [ + ArchivedTime, +]) +AttributesStruct = struct("attributes_struct", [ + AttributesDef32, + FlagsDef, +], "Attributes") +authInfo = struct("auth_info_struct", [ + Status, + Reserved2, + Privileges, +]) +BoardNameStruct = struct("board_name_struct", [ + DriverBoardName, + DriverShortName, + DriverLogicalName, +], "Board Name") +CacheInfo = struct("cache_info", [ + uint32("max_byte_cnt", "Maximum Byte Count"), + uint32("min_num_of_cache_buff", "Minimum Number Of Cache Buffers"), + uint32("min_cache_report_thresh", "Minimum Cache Report Threshold"), + uint32("alloc_waiting", "Allocate Waiting Count"), + uint32("ndirty_blocks", "Number of Dirty Blocks"), + uint32("cache_dirty_wait_time", "Cache Dirty Wait Time"), + uint32("cache_max_concur_writes", "Cache Maximum Concurrent Writes"), + uint32("max_dirty_time", "Maximum Dirty Time"), + uint32("num_dir_cache_buff", "Number Of Directory Cache Buffers"), + uint32("cache_byte_to_block", "Cache Byte To Block Shift Factor"), +], "Cache Information") +CommonLanStruc = struct("common_lan_struct", [ + boolean8("not_supported_mask", "Bit Counter Supported"), + Reserved3, + uint32("total_tx_packet_count", "Total Transmit Packet Count"), + uint32("total_rx_packet_count", "Total Receive Packet Count"), + uint32("no_ecb_available_count", "No ECB Available Count"), + uint32("packet_tx_too_big_count", "Transmit Packet Too Big Count"), + uint32("packet_tx_too_small_count", "Transmit Packet Too Small Count"), + uint32("packet_rx_overflow_count", "Receive Packet Overflow Count"), + uint32("packet_rx_too_big_count", "Receive Packet Too Big Count"), + uint32("packet_rs_too_small_count", "Receive Packet Too Small Count"), + uint32("packet_tx_misc_error_count", "Transmit Packet Misc Error Count"), + uint32("packet_rx_misc_error_count", "Receive Packet Misc Error Count"), + uint32("retry_tx_count", "Transmit Retry Count"), + uint32("checksum_error_count", "Checksum Error Count"), + uint32("hardware_rx_mismatch_count", "Hardware Receive Mismatch Count"), +], "Common LAN Information") +CompDeCompStat = struct("comp_d_comp_stat", [ + uint32("cmphitickhigh", "Compress High Tick"), + uint32("cmphitickcnt", "Compress High Tick Count"), + uint32("cmpbyteincount", "Compress Byte In Count"), + uint32("cmpbyteoutcnt", "Compress Byte Out Count"), + uint32("cmphibyteincnt", "Compress High Byte In Count"), + uint32("cmphibyteoutcnt", "Compress High Byte Out Count"), + uint32("decphitickhigh", "DeCompress High Tick"), + uint32("decphitickcnt", "DeCompress High Tick Count"), + uint32("decpbyteincount", "DeCompress Byte In Count"), + uint32("decpbyteoutcnt", "DeCompress Byte Out Count"), + uint32("decphibyteincnt", "DeCompress High Byte In Count"), + uint32("decphibyteoutcnt", "DeCompress High Byte Out Count"), +], "Compression/Decompression Information") +ConnFileStruct = struct("conn_file_struct", [ + ConnectionNumberWord, + TaskNumberWord, + LockType, + AccessControl, + LockFlag, +], "File Connection Information") +ConnStruct = struct("conn_struct", [ + TaskNumByte, + LockType, + AccessControl, + LockFlag, + VolumeNumber, + DirectoryEntryNumberWord, + FileName14, +], "Connection Information") +ConnTaskStruct = struct("conn_task_struct", [ + ConnectionNumberByte, + TaskNumByte, +], "Task Information") +Counters = struct("counters_struct", [ + uint32("read_exist_blck", "Read Existing Block Count"), + uint32("read_exist_write_wait", "Read Existing Write Wait Count"), + uint32("read_exist_part_read", "Read Existing Partial Read Count"), + uint32("read_exist_read_err", "Read Existing Read Error Count"), + uint32("wrt_blck_cnt", "Write Block Count"), + uint32("wrt_entire_blck", "Write Entire Block Count"), + uint32("internl_dsk_get", "Internal Disk Get Count"), + uint32("internl_dsk_get_need_to_alloc", "Internal Disk Get Need To Allocate Count"), + uint32("internl_dsk_get_someone_beat", "Internal Disk Get Someone Beat My Count"), + uint32("internl_dsk_get_part_read", "Internal Disk Get Partial Read Count"), + uint32("internl_dsk_get_read_err", "Internal Disk Get Read Error Count"), + uint32("async_internl_dsk_get", "Async Internal Disk Get Count"), + uint32("async_internl_dsk_get_need_to_alloc", "Async Internal Disk Get Need To Alloc"), + uint32("async_internl_dsk_get_someone_beat", "Async Internal Disk Get Someone Beat Me"), + uint32("err_doing_async_read", "Error Doing Async Read Count"), + uint32("internl_dsk_get_no_read", "Internal Disk Get No Read Count"), + uint32("internl_dsk_get_no_read_alloc", "Internal Disk Get No Read Allocate Count"), + uint32("internl_dsk_get_no_read_someone_beat", "Internal Disk Get No Read Someone Beat Me Count"), + uint32("internl_dsk_write", "Internal Disk Write Count"), + uint32("internl_dsk_write_alloc", "Internal Disk Write Allocate Count"), + uint32("internl_dsk_write_someone_beat", "Internal Disk Write Someone Beat Me Count"), + uint32("write_err", "Write Error Count"), + uint32("wait_on_sema", "Wait On Semaphore Count"), + uint32("alloc_blck_i_had_to_wait_for", "Allocate Block I Had To Wait For Someone Count"), + uint32("alloc_blck", "Allocate Block Count"), + uint32("alloc_blck_i_had_to_wait", "Allocate Block I Had To Wait Count"), +], "Disk Counter Information") +CPUInformation = struct("cpu_information", [ + PageTableOwnerFlag, + CPUType, + Reserved3, + CoprocessorFlag, + BusType, + Reserved3, + IOEngineFlag, + Reserved3, + FSEngineFlag, + Reserved3, + NonDedFlag, + Reserved3, + CPUString, + CoProcessorString, + BusString, +], "CPU Information") +CreationDateStruct = struct("creation_date_struct", [ + CreationDate, +]) +CreationInfoStruct = struct("creation_info_struct", [ + CreationTime, + CreationDate, + endian(CreatorID, ENC_LITTLE_ENDIAN), +], "Creation Information") +CreationTimeStruct = struct("creation_time_struct", [ + CreationTime, +]) +CustomCntsInfo = struct("custom_cnts_info", [ + CustomVariableValue, + CustomString, +], "Custom Counters" ) +DataStreamInfo = struct("data_stream_info", [ + AssociatedNameSpace, + DataStreamName +]) +DataStreamSizeStruct = struct("data_stream_size_struct", [ + DataStreamSize, +]) +DirCacheInfo = struct("dir_cache_info", [ + uint32("min_time_since_file_delete", "Minimum Time Since File Delete"), + uint32("abs_min_time_since_file_delete", "Absolute Minimum Time Since File Delete"), + uint32("min_num_of_dir_cache_buff", "Minimum Number Of Directory Cache Buffers"), + uint32("max_num_of_dir_cache_buff", "Maximum Number Of Directory Cache Buffers"), + uint32("num_of_dir_cache_buff", "Number Of Directory Cache Buffers"), + uint32("dc_min_non_ref_time", "DC Minimum Non-Referenced Time"), + uint32("dc_wait_time_before_new_buff", "DC Wait Time Before New Buffer"), + uint32("dc_max_concurrent_writes", "DC Maximum Concurrent Writes"), + uint32("dc_dirty_wait_time", "DC Dirty Wait Time"), + uint32("dc_double_read_flag", "DC Double Read Flag"), + uint32("map_hash_node_count", "Map Hash Node Count"), + uint32("space_restriction_node_count", "Space Restriction Node Count"), + uint32("trustee_list_node_count", "Trustee List Node Count"), + uint32("percent_of_vol_used_by_dirs", "Percent Of Volume Used By Directories"), +], "Directory Cache Information") +DirDiskSpaceRest64bit = struct("dir_disk_space_rest_64bit", [ + Level, + MaxSpace64, + MinSpaceLeft64 +], "Directory Disk Space Restriction 64 bit") +DirEntryStruct = struct("dir_entry_struct", [ + DirectoryEntryNumber, + DOSDirectoryEntryNumber, + VolumeNumberLong, +], "Directory Entry Information") +DirectoryInstance = struct("directory_instance", [ + SearchSequenceWord, + DirectoryID, + DirectoryName14, + DirectoryAttributes, + DirectoryAccessRights, + endian(CreationDate, ENC_BIG_ENDIAN), + endian(AccessDate, ENC_BIG_ENDIAN), + CreatorID, + Reserved2, + DirectoryStamp, +], "Directory Information") +DMInfoLevel0 = struct("dm_info_level_0", [ + uint32("io_flag", "IO Flag"), + uint32("sm_info_size", "Storage Module Information Size"), + uint32("avail_space", "Available Space"), + uint32("used_space", "Used Space"), + stringz("s_module_name", "Storage Module Name"), + uint8("s_m_info", "Storage Media Information"), +]) +DMInfoLevel1 = struct("dm_info_level_1", [ + NumberOfSMs, + SMIDs, +]) +DMInfoLevel2 = struct("dm_info_level_2", [ + Name, +]) +DOSDirectoryEntryStruct = struct("dos_directory_entry_struct", [ + AttributesDef32, + UniqueID, + PurgeFlags, + DestNameSpace, + DirectoryNameLen, + DirectoryName, + CreationTime, + CreationDate, + CreatorID, + ArchivedTime, + ArchivedDate, + ArchiverID, + UpdateTime, + UpdateDate, + NextTrusteeEntry, + Reserved48, + InheritedRightsMask, +], "DOS Directory Information") +DOSFileEntryStruct = struct("dos_file_entry_struct", [ + AttributesDef32, + UniqueID, + PurgeFlags, + DestNameSpace, + NameLen, + Name12, + CreationTime, + CreationDate, + CreatorID, + ArchivedTime, + ArchivedDate, + ArchiverID, + UpdateTime, + UpdateDate, + UpdateID, + FileSize, + DataForkFirstFAT, + NextTrusteeEntry, + Reserved36, + InheritedRightsMask, + LastAccessedDate, + Reserved20, + PrimaryEntry, + NameList, +], "DOS File Information") +DSSpaceAllocateStruct = struct("ds_space_alloc_struct", [ + DataStreamSpaceAlloc, +]) +DynMemStruct = struct("dyn_mem_struct", [ + uint32("dyn_mem_struct_total", "Total Dynamic Space" ), + uint32("dyn_mem_struct_max", "Max Used Dynamic Space" ), + uint32("dyn_mem_struct_cur", "Current Used Dynamic Space" ), +], "Dynamic Memory Information") +EAInfoStruct = struct("ea_info_struct", [ + EADataSize, + EACount, + EAKeySize, +], "Extended Attribute Information") +ExtraCacheCntrs = struct("extra_cache_cntrs", [ + uint32("internl_dsk_get_no_wait", "Internal Disk Get No Wait Count"), + uint32("internl_dsk_get_no_wait_need", "Internal Disk Get No Wait Need To Allocate Count"), + uint32("internl_dsk_get_no_wait_no_blk", "Internal Disk Get No Wait No Block Count"), + uint32("id_get_no_read_no_wait", "ID Get No Read No Wait Count"), + uint32("id_get_no_read_no_wait_sema", "ID Get No Read No Wait Semaphored Count"), + uint32("id_get_no_read_no_wait_buffer", "ID Get No Read No Wait No Buffer Count"), + uint32("id_get_no_read_no_wait_alloc", "ID Get No Read No Wait Allocate Count"), + uint32("id_get_no_read_no_wait_no_alloc", "ID Get No Read No Wait No Alloc Count"), + uint32("id_get_no_read_no_wait_no_alloc_sema", "ID Get No Read No Wait No Alloc Semaphored Count"), + uint32("id_get_no_read_no_wait_no_alloc_alloc", "ID Get No Read No Wait No Alloc Allocate Count"), +], "Extra Cache Counters Information") + +FileSize64bitStruct = struct("file_sz_64bit_struct", [ + FileSize64bit, +]) + +ReferenceIDStruct = struct("ref_id_struct", [ + CurrentReferenceID, +]) +NSAttributeStruct = struct("ns_attrib_struct", [ + AttributesDef32, +]) +DStreamActual = struct("d_stream_actual", [ + DataStreamNumberLong, + DataStreamFATBlocks, +], "Actual Stream") +DStreamLogical = struct("d_string_logical", [ + DataStreamNumberLong, + DataStreamSize, +], "Logical Stream") +LastUpdatedInSecondsStruct = struct("last_update_in_seconds_struct", [ + SecondsRelativeToTheYear2000, +]) +DOSNameStruct = struct("dos_name_struct", [ + FileName, +], "DOS File Name") +DOSName16Struct = struct("dos_name_16_struct", [ + FileName16, +], "DOS File Name") +FlushTimeStruct = struct("flush_time_struct", [ + FlushTime, +]) +ParentBaseIDStruct = struct("parent_base_id_struct", [ + ParentBaseID, +]) +MacFinderInfoStruct = struct("mac_finder_info_struct", [ + MacFinderInfo, +]) +SiblingCountStruct = struct("sibling_count_struct", [ + SiblingCount, +]) +EffectiveRightsStruct = struct("eff_rights_struct", [ + EffectiveRights, + Reserved3, +]) +MacTimeStruct = struct("mac_time_struct", [ + MACCreateDate, + MACCreateTime, + MACBackupDate, + MACBackupTime, +]) +LastAccessedTimeStruct = struct("last_access_time_struct", [ + LastAccessedTime, +]) +FileAttributesStruct = struct("file_attributes_struct", [ + AttributesDef32, +]) +FileInfoStruct = struct("file_info_struct", [ + ParentID, + DirectoryEntryNumber, + TotalBlocksToDecompress, + #CurrentBlockBeingDecompressed, +], "File Information") +FileInstance = struct("file_instance", [ + SearchSequenceWord, + DirectoryID, + FileName14, + AttributesDef, + FileMode, + FileSize, + endian(CreationDate, ENC_BIG_ENDIAN), + endian(AccessDate, ENC_BIG_ENDIAN), + endian(UpdateDate, ENC_BIG_ENDIAN), + endian(UpdateTime, ENC_BIG_ENDIAN), +], "File Instance") +FileNameStruct = struct("file_name_struct", [ + FileName, +], "File Name") +FileName16Struct = struct("file_name16_struct", [ + FileName16, +], "File Name") +FileServerCounters = struct("file_server_counters", [ + uint16("too_many_hops", "Too Many Hops"), + uint16("unknown_network", "Unknown Network"), + uint16("no_space_for_service", "No Space For Service"), + uint16("no_receive_buff", "No Receive Buffers"), + uint16("not_my_network", "Not My Network"), + uint32("netbios_progated", "NetBIOS Propagated Count"), + uint32("ttl_pckts_srvcd", "Total Packets Serviced"), + uint32("ttl_pckts_routed", "Total Packets Routed"), +], "File Server Counters") +FileSystemInfo = struct("file_system_info", [ + uint32("fat_moved", "Number of times the OS has move the location of FAT"), + uint32("fat_write_err", "Number of write errors in both original and mirrored copies of FAT"), + uint32("someone_else_did_it_0", "Someone Else Did It Count 0"), + uint32("someone_else_did_it_1", "Someone Else Did It Count 1"), + uint32("someone_else_did_it_2", "Someone Else Did It Count 2"), + uint32("i_ran_out_someone_else_did_it_0", "I Ran Out Someone Else Did It Count 0"), + uint32("i_ran_out_someone_else_did_it_1", "I Ran Out Someone Else Did It Count 1"), + uint32("i_ran_out_someone_else_did_it_2", "I Ran Out Someone Else Did It Count 2"), + uint32("turbo_fat_build_failed", "Turbo FAT Build Failed Count"), + uint32("extra_use_count_node_count", "Errors allocating a use count node for TTS"), + uint32("extra_extra_use_count_node_count", "Errors allocating an additional use count node for TTS"), + uint32("error_read_last_fat", "Error Reading Last FAT Count"), + uint32("someone_else_using_this_file", "Someone Else Using This File Count"), +], "File System Information") +GenericInfoDef = struct("generic_info_def", [ + fw_string("generic_label", "Label", 64), + uint32("generic_ident_type", "Identification Type"), + uint32("generic_ident_time", "Identification Time"), + uint32("generic_media_type", "Media Type"), + uint32("generic_cartridge_type", "Cartridge Type"), + uint32("generic_unit_size", "Unit Size"), + uint32("generic_block_size", "Block Size"), + uint32("generic_capacity", "Capacity"), + uint32("generic_pref_unit_size", "Preferred Unit Size"), + fw_string("generic_name", "Name",64), + uint32("generic_type", "Type"), + uint32("generic_status", "Status"), + uint32("generic_func_mask", "Function Mask"), + uint32("generic_ctl_mask", "Control Mask"), + uint32("generic_parent_count", "Parent Count"), + uint32("generic_sib_count", "Sibling Count"), + uint32("generic_child_count", "Child Count"), + uint32("generic_spec_info_sz", "Specific Information Size"), + uint32("generic_object_uniq_id", "Unique Object ID"), + uint32("generic_media_slot", "Media Slot"), +], "Generic Information") +HandleInfoLevel0 = struct("handle_info_level_0", [ +# DataStream, +]) +HandleInfoLevel1 = struct("handle_info_level_1", [ + DataStream, +]) +HandleInfoLevel2 = struct("handle_info_level_2", [ + DOSDirectoryBase, + NameSpace, + DataStream, +]) +HandleInfoLevel3 = struct("handle_info_level_3", [ + DOSDirectoryBase, + NameSpace, +]) +HandleInfoLevel4 = struct("handle_info_level_4", [ + DOSDirectoryBase, + NameSpace, + ParentDirectoryBase, + ParentDOSDirectoryBase, +]) +HandleInfoLevel5 = struct("handle_info_level_5", [ + DOSDirectoryBase, + NameSpace, + DataStream, + ParentDirectoryBase, + ParentDOSDirectoryBase, +]) +IPXInformation = struct("ipx_information", [ + uint32("ipx_send_pkt", "IPX Send Packet Count"), + uint16("ipx_malform_pkt", "IPX Malformed Packet Count"), + uint32("ipx_get_ecb_req", "IPX Get ECB Request Count"), + uint32("ipx_get_ecb_fail", "IPX Get ECB Fail Count"), + uint32("ipx_aes_event", "IPX AES Event Count"), + uint16("ipx_postponed_aes", "IPX Postponed AES Count"), + uint16("ipx_max_conf_sock", "IPX Max Configured Socket Count"), + uint16("ipx_max_open_sock", "IPX Max Open Socket Count"), + uint16("ipx_open_sock_fail", "IPX Open Socket Fail Count"), + uint32("ipx_listen_ecb", "IPX Listen ECB Count"), + uint16("ipx_ecb_cancel_fail", "IPX ECB Cancel Fail Count"), + uint16("ipx_get_lcl_targ_fail", "IPX Get Local Target Fail Count"), +], "IPX Information") +JobEntryTime = struct("job_entry_time", [ + Year, + Month, + Day, + Hour, + Minute, + Second, +], "Job Entry Time") +JobStruct3x = struct("job_struct_3x", [ + RecordInUseFlag, + PreviousRecord, + NextRecord, + ClientStationLong, + ClientTaskNumberLong, + ClientIDNumber, + TargetServerIDNumber, + TargetExecutionTime, + JobEntryTime, + JobNumberLong, + JobType, + JobPositionWord, + JobControlFlagsWord, + JobFileName, + JobFileHandleLong, + ServerStationLong, + ServerTaskNumberLong, + ServerID, + TextJobDescription, + ClientRecordArea, +], "Job Information") +JobStruct = struct("job_struct", [ + ClientStation, + ClientTaskNumber, + ClientIDNumber, + TargetServerIDNumber, + TargetExecutionTime, + JobEntryTime, + JobNumber, + JobType, + JobPosition, + JobControlFlags, + JobFileName, + JobFileHandle, + ServerStation, + ServerTaskNumber, + ServerID, + TextJobDescription, + ClientRecordArea, +], "Job Information") +JobStructNew = struct("job_struct_new", [ + RecordInUseFlag, + PreviousRecord, + NextRecord, + ClientStationLong, + ClientTaskNumberLong, + ClientIDNumber, + TargetServerIDNumber, + TargetExecutionTime, + JobEntryTime, + JobNumberLong, + JobType, + JobPositionWord, + JobControlFlagsWord, + JobFileName, + JobFileHandleLong, + ServerStationLong, + ServerTaskNumberLong, + ServerID, +], "Job Information") +KnownRoutes = struct("known_routes", [ + NetIDNumber, + HopsToNet, + NetStatus, + TimeToNet, +], "Known Routes") +SrcEnhNWHandlePathS1 = struct("source_nwhandle", [ + DirectoryBase, + VolumeNumber, + HandleFlag, + DataTypeFlag, + Reserved5, +], "Source Information") +DstEnhNWHandlePathS1 = struct("destination_nwhandle", [ + DirectoryBase, + VolumeNumber, + HandleFlag, + DataTypeFlag, + Reserved5, +], "Destination Information") +KnownServStruc = struct("known_server_struct", [ + ServerAddress, + HopsToNet, + ServerNameStringz, +], "Known Servers") +LANConfigInfo = struct("lan_cfg_info", [ + LANdriverCFG_MajorVersion, + LANdriverCFG_MinorVersion, + LANdriverNodeAddress, + Reserved, + LANdriverModeFlags, + LANdriverBoardNumber, + LANdriverBoardInstance, + LANdriverMaximumSize, + LANdriverMaxRecvSize, + LANdriverRecvSize, + LANdriverCardID, + LANdriverMediaID, + LANdriverTransportTime, + LANdriverSrcRouting, + LANdriverLineSpeed, + LANdriverReserved, + LANdriverMajorVersion, + LANdriverMinorVersion, + LANdriverFlags, + LANdriverSendRetries, + LANdriverLink, + LANdriverSharingFlags, + LANdriverSlot, + LANdriverIOPortsAndRanges1, + LANdriverIOPortsAndRanges2, + LANdriverIOPortsAndRanges3, + LANdriverIOPortsAndRanges4, + LANdriverMemoryDecode0, + LANdriverMemoryLength0, + LANdriverMemoryDecode1, + LANdriverMemoryLength1, + LANdriverInterrupt1, + LANdriverInterrupt2, + LANdriverDMAUsage1, + LANdriverDMAUsage2, + LANdriverLogicalName, + LANdriverIOReserved, + LANdriverCardName, +], "LAN Configuration Information") +LastAccessStruct = struct("last_access_struct", [ + LastAccessedDate, +]) +lockInfo = struct("lock_info_struct", [ + LogicalLockThreshold, + PhysicalLockThreshold, + FileLockCount, + RecordLockCount, +], "Lock Information") +LockStruct = struct("lock_struct", [ + TaskNumByte, + LockType, + RecordStart, + RecordEnd, +], "Locks") +LoginTime = struct("login_time", [ + Year, + Month, + Day, + Hour, + Minute, + Second, + DayOfWeek, +], "Login Time") +LogLockStruct = struct("log_lock_struct", [ + TaskNumberWord, + LockStatus, + LockName, +], "Logical Locks") +LogRecStruct = struct("log_rec_struct", [ + ConnectionNumberWord, + TaskNumByte, + LockStatus, +], "Logical Record Locks") +LSLInformation = struct("lsl_information", [ + uint32("rx_buffers", "Receive Buffers"), + uint32("rx_buffers_75", "Receive Buffers Warning Level"), + uint32("rx_buffers_checked_out", "Receive Buffers Checked Out Count"), + uint32("rx_buffer_size", "Receive Buffer Size"), + uint32("max_phy_packet_size", "Maximum Physical Packet Size"), + uint32("last_time_rx_buff_was_alloc", "Last Time a Receive Buffer was Allocated"), + uint32("max_num_of_protocols", "Maximum Number of Protocols"), + uint32("max_num_of_media_types", "Maximum Number of Media Types"), + uint32("total_tx_packets", "Total Transmit Packets"), + uint32("get_ecb_buf", "Get ECB Buffers"), + uint32("get_ecb_fails", "Get ECB Failures"), + uint32("aes_event_count", "AES Event Count"), + uint32("post_poned_events", "Postponed Events"), + uint32("ecb_cxl_fails", "ECB Cancel Failures"), + uint32("valid_bfrs_reused", "Valid Buffers Reused"), + uint32("enqueued_send_cnt", "Enqueued Send Count"), + uint32("total_rx_packets", "Total Receive Packets"), + uint32("unclaimed_packets", "Unclaimed Packets"), + uint8("stat_table_major_version", "Statistics Table Major Version"), + uint8("stat_table_minor_version", "Statistics Table Minor Version"), +], "LSL Information") +MaximumSpaceStruct = struct("max_space_struct", [ + MaxSpace, +]) +MemoryCounters = struct("memory_counters", [ + uint32("orig_num_cache_buff", "Original Number Of Cache Buffers"), + uint32("curr_num_cache_buff", "Current Number Of Cache Buffers"), + uint32("cache_dirty_block_thresh", "Cache Dirty Block Threshold"), + uint32("wait_node", "Wait Node Count"), + uint32("wait_node_alloc_fail", "Wait Node Alloc Failure Count"), + uint32("move_cache_node", "Move Cache Node Count"), + uint32("move_cache_node_from_avai", "Move Cache Node From Avail Count"), + uint32("accel_cache_node_write", "Accelerate Cache Node Write Count"), + uint32("rem_cache_node", "Remove Cache Node Count"), + uint32("rem_cache_node_from_avail", "Remove Cache Node From Avail Count"), +], "Memory Counters") +MLIDBoardInfo = struct("mlid_board_info", [ + uint32("protocol_board_num", "Protocol Board Number"), + uint16("protocol_number", "Protocol Number"), + bytes("protocol_id", "Protocol ID", 6), + nstring8("protocol_name", "Protocol Name"), +], "MLID Board Information") +ModifyInfoStruct = struct("modify_info_struct", [ + ModifiedTime, + ModifiedDate, + endian(ModifierID, ENC_LITTLE_ENDIAN), + LastAccessedDate, +], "Modification Information") +nameInfo = struct("name_info_struct", [ + ObjectType, + nstring8("login_name", "Login Name"), +], "Name Information") +NCPNetworkAddress = struct("ncp_network_address_struct", [ + TransportType, + Reserved3, + NetAddress, +], "Network Address") + +netAddr = struct("net_addr_struct", [ + TransportType, + nbytes32("transport_addr", "Transport Address"), +], "Network Address") + +NetWareInformationStruct = struct("netware_information_struct", [ + DataStreamSpaceAlloc, # (Data Stream Alloc Bit) + AttributesDef32, # (Attributes Bit) + FlagsDef, + DataStreamSize, # (Data Stream Size Bit) + TotalDataStreamDiskSpaceAlloc, # (Total Stream Size Bit) + NumberOfDataStreams, + CreationTime, # (Creation Bit) + CreationDate, + CreatorID, + ModifiedTime, # (Modify Bit) + ModifiedDate, + ModifierID, + LastAccessedDate, + ArchivedTime, # (Archive Bit) + ArchivedDate, + ArchiverID, + InheritedRightsMask, # (Rights Bit) + DirectoryEntryNumber, # (Directory Entry Bit) + DOSDirectoryEntryNumber, + VolumeNumberLong, + EADataSize, # (Extended Attribute Bit) + EACount, + EAKeySize, + CreatorNameSpaceNumber, # (Name Space Bit) + Reserved3, +], "NetWare Information") +NLMInformation = struct("nlm_information", [ + IdentificationNumber, + NLMFlags, + Reserved3, + NLMType, + Reserved3, + ParentID, + MajorVersion, + MinorVersion, + Revision, + Year, + Reserved3, + Month, + Reserved3, + Day, + Reserved3, + AllocAvailByte, + AllocFreeCount, + LastGarbCollect, + MessageLanguage, + NumberOfReferencedPublics, +], "NLM Information") +NSInfoStruct = struct("ns_info_struct", [ + CreatorNameSpaceNumber, + Reserved3, +]) +NWAuditStatus = struct("nw_audit_status", [ + AuditVersionDate, + AuditFileVersionDate, + val_string16("audit_enable_flag", "Auditing Enabled Flag", [ + [ 0x0000, "Auditing Disabled" ], + [ 0x0001, "Auditing Enabled" ], + ]), + Reserved2, + uint32("audit_file_size", "Audit File Size"), + uint32("modified_counter", "Modified Counter"), + uint32("audit_file_max_size", "Audit File Maximum Size"), + uint32("audit_file_size_threshold", "Audit File Size Threshold"), + uint32("audit_record_count", "Audit Record Count"), + uint32("auditing_flags", "Auditing Flags"), +], "NetWare Audit Status") +ObjectSecurityStruct = struct("object_security_struct", [ + ObjectSecurity, +]) +ObjectFlagsStruct = struct("object_flags_struct", [ + ObjectFlags, +]) +ObjectTypeStruct = struct("object_type_struct", [ + endian(ObjectType, ENC_BIG_ENDIAN), + Reserved2, +]) +ObjectNameStruct = struct("object_name_struct", [ + ObjectNameStringz, +]) +ObjectIDStruct = struct("object_id_struct", [ + ObjectID, + Restriction, +]) +ObjectIDStruct64 = struct("object_id_struct64", [ + endian(ObjectID, ENC_LITTLE_ENDIAN), + endian(RestrictionQuad, ENC_LITTLE_ENDIAN), +]) +OpnFilesStruct = struct("opn_files_struct", [ + TaskNumberWord, + LockType, + AccessControl, + LockFlag, + VolumeNumber, + DOSParentDirectoryEntry, + DOSDirectoryEntry, + ForkCount, + NameSpace, + FileName, +], "Open Files Information") +OwnerIDStruct = struct("owner_id_struct", [ + CreatorID, +]) +PacketBurstInformation = struct("packet_burst_information", [ + uint32("big_invalid_slot", "Big Invalid Slot Count"), + uint32("big_forged_packet", "Big Forged Packet Count"), + uint32("big_invalid_packet", "Big Invalid Packet Count"), + uint32("big_still_transmitting", "Big Still Transmitting Count"), + uint32("still_doing_the_last_req", "Still Doing The Last Request Count"), + uint32("invalid_control_req", "Invalid Control Request Count"), + uint32("control_invalid_message_number", "Control Invalid Message Number Count"), + uint32("control_being_torn_down", "Control Being Torn Down Count"), + uint32("big_repeat_the_file_read", "Big Repeat the File Read Count"), + uint32("big_send_extra_cc_count", "Big Send Extra CC Count"), + uint32("big_return_abort_mess", "Big Return Abort Message Count"), + uint32("big_read_invalid_mess", "Big Read Invalid Message Number Count"), + uint32("big_read_do_it_over", "Big Read Do It Over Count"), + uint32("big_read_being_torn_down", "Big Read Being Torn Down Count"), + uint32("previous_control_packet", "Previous Control Packet Count"), + uint32("send_hold_off_message", "Send Hold Off Message Count"), + uint32("big_read_no_data_avail", "Big Read No Data Available Count"), + uint32("big_read_trying_to_read", "Big Read Trying To Read Too Much Count"), + uint32("async_read_error", "Async Read Error Count"), + uint32("big_read_phy_read_err", "Big Read Physical Read Error Count"), + uint32("ctl_bad_ack_frag_list", "Control Bad ACK Fragment List Count"), + uint32("ctl_no_data_read", "Control No Data Read Count"), + uint32("write_dup_req", "Write Duplicate Request Count"), + uint32("shouldnt_be_ack_here", "Shouldn't Be ACKing Here Count"), + uint32("write_incon_packet_len", "Write Inconsistent Packet Lengths Count"), + uint32("first_packet_isnt_a_write", "First Packet Isn't A Write Count"), + uint32("write_trash_dup_req", "Write Trashed Duplicate Request Count"), + uint32("big_write_inv_message_num", "Big Write Invalid Message Number Count"), + uint32("big_write_being_torn_down", "Big Write Being Torn Down Count"), + uint32("big_write_being_abort", "Big Write Being Aborted Count"), + uint32("zero_ack_frag", "Zero ACK Fragment Count"), + uint32("write_curr_trans", "Write Currently Transmitting Count"), + uint32("try_to_write_too_much", "Trying To Write Too Much Count"), + uint32("write_out_of_mem_for_ctl_nodes", "Write Out Of Memory For Control Nodes Count"), + uint32("write_didnt_need_this_frag", "Write Didn't Need This Fragment Count"), + uint32("write_too_many_buf_check", "Write Too Many Buffers Checked Out Count"), + uint32("write_timeout", "Write Time Out Count"), + uint32("write_got_an_ack0", "Write Got An ACK Count 0"), + uint32("write_got_an_ack1", "Write Got An ACK Count 1"), + uint32("poll_abort_conn", "Poller Aborted The Connection Count"), + uint32("may_had_out_of_order", "Maybe Had Out Of Order Writes Count"), + uint32("had_an_out_of_order", "Had An Out Of Order Write Count"), + uint32("moved_the_ack_bit_dn", "Moved The ACK Bit Down Count"), + uint32("bumped_out_of_order", "Bumped Out Of Order Write Count"), + uint32("poll_rem_old_out_of_order", "Poller Removed Old Out Of Order Count"), + uint32("write_didnt_need_but_req_ack", "Write Didn't Need But Requested ACK Count"), + uint32("write_trash_packet", "Write Trashed Packet Count"), + uint32("too_many_ack_frag", "Too Many ACK Fragments Count"), + uint32("saved_an_out_of_order_packet", "Saved An Out Of Order Packet Count"), + uint32("conn_being_aborted", "Connection Being Aborted Count"), +], "Packet Burst Information") + +PadDSSpaceAllocate = struct("pad_ds_space_alloc", [ + Reserved4, +]) +PadAttributes = struct("pad_attributes", [ + Reserved6, +]) +PadDataStreamSize = struct("pad_data_stream_size", [ + Reserved4, +]) +PadTotalStreamSize = struct("pad_total_stream_size", [ + Reserved6, +]) +PadCreationInfo = struct("pad_creation_info", [ + Reserved8, +]) +PadModifyInfo = struct("pad_modify_info", [ + Reserved10, +]) +PadArchiveInfo = struct("pad_archive_info", [ + Reserved8, +]) +PadRightsInfo = struct("pad_rights_info", [ + Reserved2, +]) +PadDirEntry = struct("pad_dir_entry", [ + Reserved12, +]) +PadEAInfo = struct("pad_ea_info", [ + Reserved12, +]) +PadNSInfo = struct("pad_ns_info", [ + Reserved4, +]) +PhyLockStruct = struct("phy_lock_struct", [ + LoggedCount, + ShareableLockCount, + RecordStart, + RecordEnd, + LogicalConnectionNumber, + TaskNumByte, + LockType, +], "Physical Locks") +printInfo = struct("print_info_struct", [ + PrintFlags, + TabSize, + Copies, + PrintToFileFlag, + BannerName, + TargetPrinter, + FormType, +], "Print Information") +ReplyLevel1Struct = struct("reply_lvl_1_struct", [ + DirHandle, + VolumeNumber, + Reserved4, +], "Reply Level 1") +ReplyLevel2Struct = struct("reply_lvl_2_struct", [ + VolumeNumberLong, + DirectoryBase, + DOSDirectoryBase, + NameSpace, + DirHandle, +], "Reply Level 2") +RightsInfoStruct = struct("rights_info_struct", [ + InheritedRightsMask, +]) +RoutersInfo = struct("routers_info", [ + bytes("node", "Node", 6), + ConnectedLAN, + uint16("route_hops", "Hop Count"), + uint16("route_time", "Route Time"), +], "Router Information") +RTagStructure = struct("r_tag_struct", [ + RTagNumber, + ResourceSignature, + ResourceCount, + ResourceName, +], "Resource Tag") +ScanInfoFileName = struct("scan_info_file_name", [ + SalvageableFileEntryNumber, + FileName, +]) +ScanInfoFileNoName = struct("scan_info_file_no_name", [ + SalvageableFileEntryNumber, +]) +SeachSequenceStruct = struct("search_seq", [ + VolumeNumber, + DirectoryEntryNumber, + SequenceNumber, +], "Search Sequence") +Segments = struct("segments", [ + uint32("volume_segment_dev_num", "Volume Segment Device Number"), + uint32("volume_segment_offset", "Volume Segment Offset"), + uint32("volume_segment_size", "Volume Segment Size"), +], "Volume Segment Information") +SemaInfoStruct = struct("sema_info_struct", [ + LogicalConnectionNumber, + TaskNumByte, +]) +SemaStruct = struct("sema_struct", [ + OpenCount, + SemaphoreValue, + TaskNumberWord, + SemaphoreName, +], "Semaphore Information") +ServerInfo = struct("server_info", [ + uint32("reply_canceled", "Reply Canceled Count"), + uint32("write_held_off", "Write Held Off Count"), + uint32("write_held_off_with_dup", "Write Held Off With Duplicate Request"), + uint32("invalid_req_type", "Invalid Request Type Count"), + uint32("being_aborted", "Being Aborted Count"), + uint32("already_doing_realloc", "Already Doing Re-Allocate Count"), + uint32("dealloc_invalid_slot", "De-Allocate Invalid Slot Count"), + uint32("dealloc_being_proc", "De-Allocate Being Processed Count"), + uint32("dealloc_forged_packet", "De-Allocate Forged Packet Count"), + uint32("dealloc_still_transmit", "De-Allocate Still Transmitting Count"), + uint32("start_station_error", "Start Station Error Count"), + uint32("invalid_slot", "Invalid Slot Count"), + uint32("being_processed", "Being Processed Count"), + uint32("forged_packet", "Forged Packet Count"), + uint32("still_transmitting", "Still Transmitting Count"), + uint32("reexecute_request", "Re-Execute Request Count"), + uint32("invalid_sequence_number", "Invalid Sequence Number Count"), + uint32("dup_is_being_sent", "Duplicate Is Being Sent Already Count"), + uint32("sent_pos_ack", "Sent Positive Acknowledge Count"), + uint32("sent_a_dup_reply", "Sent A Duplicate Reply Count"), + uint32("no_mem_for_station", "No Memory For Station Control Count"), + uint32("no_avail_conns", "No Available Connections Count"), + uint32("realloc_slot", "Re-Allocate Slot Count"), + uint32("realloc_slot_came_too_soon", "Re-Allocate Slot Came Too Soon Count"), +], "Server Information") +ServersSrcInfo = struct("servers_src_info", [ + ServerNode, + ConnectedLAN, + HopsToNet, +], "Source Server Information") +SpaceStruct = struct("space_struct", [ + Level, + MaxSpace, + CurrentSpace, +], "Space Information") +SPXInformation = struct("spx_information", [ + uint16("spx_max_conn", "SPX Max Connections Count"), + uint16("spx_max_used_conn", "SPX Max Used Connections"), + uint16("spx_est_conn_req", "SPX Establish Connection Requests"), + uint16("spx_est_conn_fail", "SPX Establish Connection Fail"), + uint16("spx_listen_con_req", "SPX Listen Connect Request"), + uint16("spx_listen_con_fail", "SPX Listen Connect Fail"), + uint32("spx_send", "SPX Send Count"), + uint32("spx_window_choke", "SPX Window Choke Count"), + uint16("spx_bad_send", "SPX Bad Send Count"), + uint16("spx_send_fail", "SPX Send Fail Count"), + uint16("spx_abort_conn", "SPX Aborted Connection"), + uint32("spx_listen_pkt", "SPX Listen Packet Count"), + uint16("spx_bad_listen", "SPX Bad Listen Count"), + uint32("spx_incoming_pkt", "SPX Incoming Packet Count"), + uint16("spx_bad_in_pkt", "SPX Bad In Packet Count"), + uint16("spx_supp_pkt", "SPX Suppressed Packet Count"), + uint16("spx_no_ses_listen", "SPX No Session Listen ECB Count"), + uint16("spx_watch_dog", "SPX Watch Dog Destination Session Count"), +], "SPX Information") +StackInfo = struct("stack_info", [ + StackNumber, + fw_string("stack_short_name", "Stack Short Name", 16), +], "Stack Information") +statsInfo = struct("stats_info_struct", [ + TotalBytesRead, + TotalBytesWritten, + TotalRequest, +], "Statistics") +TaskStruct = struct("task_struct", [ + TaskNumberWord, + TaskState, +], "Task Information") +theTimeStruct = struct("the_time_struct", [ + UTCTimeInSeconds, + FractionalSeconds, + TimesyncStatus, +]) +timeInfo = struct("time_info", [ + Year, + Month, + Day, + Hour, + Minute, + Second, + DayOfWeek, + uint32("login_expiration_time", "Login Expiration Time"), +]) +TotalStreamSizeStruct = struct("total_stream_size_struct", [ + TtlDSDskSpaceAlloc, + NumberOfDataStreams, +]) +TrendCounters = struct("trend_counters", [ + uint32("num_of_cache_checks", "Number Of Cache Checks"), + uint32("num_of_cache_hits", "Number Of Cache Hits"), + uint32("num_of_dirty_cache_checks", "Number Of Dirty Cache Checks"), + uint32("num_of_cache_dirty_checks", "Number Of Cache Dirty Checks"), + uint32("cache_used_while_check", "Cache Used While Checking"), + uint32("wait_till_dirty_blcks_dec", "Wait Till Dirty Blocks Decrease Count"), + uint32("alloc_blck_frm_avail", "Allocate Block From Available Count"), + uint32("alloc_blck_frm_lru", "Allocate Block From LRU Count"), + uint32("alloc_blck_already_wait", "Allocate Block Already Waiting"), + uint32("lru_sit_time", "LRU Sitting Time"), + uint32("num_of_cache_check_no_wait", "Number Of Cache Check No Wait"), + uint32("num_of_cache_hits_no_wait", "Number Of Cache Hits No Wait"), +], "Trend Counters") +TrusteeStruct = struct("trustee_struct", [ + endian(ObjectID, ENC_LITTLE_ENDIAN), + AccessRightsMaskWord, +]) +UpdateDateStruct = struct("update_date_struct", [ + UpdateDate, +]) +UpdateIDStruct = struct("update_id_struct", [ + UpdateID, +]) +UpdateTimeStruct = struct("update_time_struct", [ + UpdateTime, +]) +UserInformation = struct("user_info", [ + endian(ConnectionNumber, ENC_LITTLE_ENDIAN), + UseCount, + Reserved2, + ConnectionServiceType, + Year, + Month, + Day, + Hour, + Minute, + Second, + DayOfWeek, + Status, + Reserved2, + ExpirationTime, + ObjectType, + Reserved2, + TransactionTrackingFlag, + LogicalLockThreshold, + FileWriteFlags, + FileWriteState, + Reserved, + FileLockCount, + RecordLockCount, + TotalBytesRead, + TotalBytesWritten, + TotalRequest, + HeldRequests, + HeldBytesRead, + HeldBytesWritten, +], "User Information") +VolInfoStructure = struct("vol_info_struct", [ + VolumeType, + Reserved2, + StatusFlagBits, + SectorSize, + SectorsPerClusterLong, + VolumeSizeInClusters, + FreedClusters, + SubAllocFreeableClusters, + FreeableLimboSectors, + NonFreeableLimboSectors, + NonFreeableAvailableSubAllocSectors, + NotUsableSubAllocSectors, + SubAllocClusters, + DataStreamsCount, + LimboDataStreamsCount, + OldestDeletedFileAgeInTicks, + CompressedDataStreamsCount, + CompressedLimboDataStreamsCount, + UnCompressableDataStreamsCount, + PreCompressedSectors, + CompressedSectors, + MigratedFiles, + MigratedSectors, + ClustersUsedByFAT, + ClustersUsedByDirectories, + ClustersUsedByExtendedDirectories, + TotalDirectoryEntries, + UnUsedDirectoryEntries, + TotalExtendedDirectoryExtents, + UnUsedExtendedDirectoryExtents, + ExtendedAttributesDefined, + ExtendedAttributeExtentsUsed, + DirectoryServicesObjectID, + VolumeEpochTime, + +], "Volume Information") +VolInfoStructure64 = struct("vol_info_struct64", [ + VolumeTypeLong, + StatusFlagBits, + uint64("sectoresize64", "Sector Size"), + uint64("sectorspercluster64", "Sectors Per Cluster"), + uint64("volumesizeinclusters64", "Volume Size in Clusters"), + uint64("freedclusters64", "Freed Clusters"), + uint64("suballocfreeableclusters64", "Sub Alloc Freeable Clusters"), + uint64("freeablelimbosectors64", "Freeable Limbo Sectors"), + uint64("nonfreeablelimbosectors64", "Non-Freeable Limbo Sectors"), + uint64("nonfreeableavailalesuballocsectors64", "Non-Freeable Available Sub Alloc Sectors"), + uint64("notusablesuballocsectors64", "Not Usable Sub Alloc Sectors"), + uint64("suballocclusters64", "Sub Alloc Clusters"), + uint64("datastreamscount64", "Data Streams Count"), + uint64("limbodatastreamscount64", "Limbo Data Streams Count"), + uint64("oldestdeletedfileageinticks64", "Oldest Deleted File Age in Ticks"), + uint64("compressdatastreamscount64", "Compressed Data Streams Count"), + uint64("compressedlimbodatastreamscount64", "Compressed Limbo Data Streams Count"), + uint64("uncompressabledatastreamscount64", "Uncompressable Data Streams Count"), + uint64("precompressedsectors64", "Precompressed Sectors"), + uint64("compressedsectors64", "Compressed Sectors"), + uint64("migratedfiles64", "Migrated Files"), + uint64("migratedsectors64", "Migrated Sectors"), + uint64("clustersusedbyfat64", "Clusters Used by FAT"), + uint64("clustersusedbydirectories64", "Clusters Used by Directories"), + uint64("clustersusedbyextendeddirectories64", "Clusters Used by Extended Directories"), + uint64("totaldirectoryentries64", "Total Directory Entries"), + uint64("unuseddirectoryentries64", "Unused Directory Entries"), + uint64("totalextendeddirectoryextents64", "Total Extended Directory Extents"), + uint64("unusedextendeddirectoryextents64", "Unused Total Extended Directory Extents"), + uint64("extendedattributesdefined64", "Extended Attributes Defined"), + uint64("extendedattributeextentsused64", "Extended Attribute Extents Used"), + uint64("directoryservicesobjectid64", "Directory Services Object ID"), + VolumeEpochTime, + +], "Volume Information") +VolInfo2Struct = struct("vol_info_struct_2", [ + uint32("volume_active_count", "Volume Active Count"), + uint32("volume_use_count", "Volume Use Count"), + uint32("mac_root_ids", "MAC Root IDs"), + VolumeEpochTime, + uint32("volume_reference_count", "Volume Reference Count"), + uint32("compression_lower_limit", "Compression Lower Limit"), + uint32("outstanding_ios", "Outstanding IOs"), + uint32("outstanding_compression_ios", "Outstanding Compression IOs"), + uint32("compression_ios_limit", "Compression IOs Limit"), +], "Extended Volume Information") +VolumeWithNameStruct = struct("volume_with_name_struct", [ + VolumeNumberLong, + VolumeNameLen, +]) +VolumeStruct = struct("volume_struct", [ + VolumeNumberLong, +]) + +zFileMap_Allocation = struct("zfilemap_allocation_struct", [ + uint64("extent_byte_offset", "Byte Offset"), + endian(uint64("extent_length_alloc", "Length"), ENC_LITTLE_ENDIAN), + #ExtentLength, +], "File Map Allocation") +zFileMap_Logical = struct("zfilemap_logical_struct", [ + uint64("extent_block_number", "Block Number"), + uint64("extent_number_of_blocks", "Number of Blocks"), +], "File Map Logical") +zFileMap_Physical = struct("zfilemap_physical_struct", [ + uint64("extent_length_physical", "Length"), + uint64("extent_logical_offset", "Logical Offset"), + uint64("extent_pool_offset", "Pool Offset"), + uint64("extent_physical_offset", "Physical Offset"), + fw_string("extent_device_id", "Device ID", 8), +], "File Map Physical") + +############################################################################## +# NCP Groups +############################################################################## +def define_groups(): + groups['accounting'] = "Accounting" + groups['afp'] = "AFP" + groups['auditing'] = "Auditing" + groups['bindery'] = "Bindery" + groups['connection'] = "Connection" + groups['enhanced'] = "Enhanced File System" + groups['extended'] = "Extended Attribute" + groups['extension'] = "NCP Extension" + groups['file'] = "File System" + groups['fileserver'] = "File Server Environment" + groups['message'] = "Message" + groups['migration'] = "Data Migration" + groups['nds'] = "Novell Directory Services" + groups['pburst'] = "Packet Burst" + groups['print'] = "Print" + groups['remote'] = "Remote" + groups['sync'] = "Synchronization" + groups['tsync'] = "Time Synchronization" + groups['tts'] = "Transaction Tracking" + groups['qms'] = "Queue Management System (QMS)" + groups['stats'] = "Server Statistics" + groups['nmas'] = "Novell Modular Authentication Service" + groups['sss'] = "SecretStore Services" + +############################################################################## +# NCP Errors +############################################################################## +def define_errors(): + errors[0x0000] = "Ok" + errors[0x0001] = "Transaction tracking is available" + errors[0x0002] = "Ok. The data has been written" + errors[0x0003] = "Calling Station is a Manager" + + errors[0x0100] = "One or more of the Connection Numbers in the send list are invalid" + errors[0x0101] = "Invalid space limit" + errors[0x0102] = "Insufficient disk space" + errors[0x0103] = "Queue server cannot add jobs" + errors[0x0104] = "Out of disk space" + errors[0x0105] = "Semaphore overflow" + errors[0x0106] = "Invalid Parameter" + errors[0x0107] = "Invalid Number of Minutes to Delay" + errors[0x0108] = "Invalid Start or Network Number" + errors[0x0109] = "Cannot Obtain License" + errors[0x010a] = "No Purgeable Files Available" + + errors[0x0200] = "One or more clients in the send list are not logged in" + errors[0x0201] = "Queue server cannot attach" + + errors[0x0300] = "One or more clients in the send list are not accepting messages" + + errors[0x0400] = "Client already has message" + errors[0x0401] = "Queue server cannot service job" + + errors[0x7300] = "Revoke Handle Rights Not Found" + errors[0x7700] = "Buffer Too Small" + errors[0x7900] = "Invalid Parameter in Request Packet" + errors[0x7901] = "Nothing being Compressed" + errors[0x7902] = "No Items Found" + errors[0x7a00] = "Connection Already Temporary" + errors[0x7b00] = "Connection Already Logged in" + errors[0x7c00] = "Connection Not Authenticated" + errors[0x7d00] = "Connection Not Logged In" + + errors[0x7e00] = "NCP failed boundary check" + errors[0x7e01] = "Invalid Length" + + errors[0x7f00] = "Lock Waiting" + errors[0x8000] = "Lock fail" + errors[0x8001] = "File in Use" + + errors[0x8100] = "A file handle could not be allocated by the file server" + errors[0x8101] = "Out of File Handles" + + errors[0x8200] = "Unauthorized to open the file" + errors[0x8300] = "Unable to read/write the volume. Possible bad sector on the file server" + errors[0x8301] = "Hard I/O Error" + + errors[0x8400] = "Unauthorized to create the directory" + errors[0x8401] = "Unauthorized to create the file" + + errors[0x8500] = "Unauthorized to delete the specified file" + errors[0x8501] = "Unauthorized to overwrite an existing file in this directory" + + errors[0x8700] = "An unexpected character was encountered in the filename" + errors[0x8701] = "Create Filename Error" + + errors[0x8800] = "Invalid file handle" + errors[0x8900] = "Unauthorized to search this file/directory" + errors[0x8a00] = "Unauthorized to delete this file/directory" + errors[0x8b00] = "Unauthorized to rename a file in this directory" + + errors[0x8c00] = "No set privileges" + errors[0x8c01] = "Unauthorized to modify a file in this directory" + errors[0x8c02] = "Unauthorized to change the restriction on this volume" + + errors[0x8d00] = "Some of the affected files are in use by another client" + errors[0x8d01] = "The affected file is in use" + + errors[0x8e00] = "All of the affected files are in use by another client" + errors[0x8f00] = "Some of the affected files are read-only" + + errors[0x9000] = "An attempt to modify a read-only volume occurred" + errors[0x9001] = "All of the affected files are read-only" + errors[0x9002] = "Read Only Access to Volume" + + errors[0x9100] = "Some of the affected files already exist" + errors[0x9101] = "Some Names Exist" + + errors[0x9200] = "Directory with the new name already exists" + errors[0x9201] = "All of the affected files already exist" + + errors[0x9300] = "Unauthorized to read from this file" + errors[0x9400] = "Unauthorized to write to this file" + errors[0x9500] = "The affected file is detached" + + errors[0x9600] = "The file server has run out of memory to service this request" + errors[0x9601] = "No alloc space for message" + errors[0x9602] = "Server Out of Space" + + errors[0x9800] = "The affected volume is not mounted" + errors[0x9801] = "The volume associated with Volume Number is not mounted" + errors[0x9802] = "The resulting volume does not exist" + errors[0x9803] = "The destination volume is not mounted" + errors[0x9804] = "Disk Map Error" + + errors[0x9900] = "The file server has run out of directory space on the affected volume" + errors[0x9a00] = "Invalid request to rename the affected file to another volume" + + errors[0x9b00] = "DirHandle is not associated with a valid directory path" + errors[0x9b01] = "A resulting directory handle is not associated with a valid directory path" + errors[0x9b02] = "The directory associated with DirHandle does not exist" + errors[0x9b03] = "Bad directory handle" + + errors[0x9c00] = "The resulting path is not valid" + errors[0x9c01] = "The resulting file path is not valid" + errors[0x9c02] = "The resulting directory path is not valid" + errors[0x9c03] = "Invalid path" + errors[0x9c04] = "No more trustees found, based on requested search sequence number" + + errors[0x9d00] = "A directory handle was not available for allocation" + + errors[0x9e00] = "The name of the directory does not conform to a legal name for this name space" + errors[0x9e01] = "The new directory name does not conform to a legal name for this name space" + errors[0x9e02] = "Bad File Name" + + errors[0x9f00] = "The request attempted to delete a directory that is in use by another client" + + errors[0xa000] = "The request attempted to delete a directory that is not empty" + errors[0xa100] = "An unrecoverable error occurred on the affected directory" + + errors[0xa200] = "The request attempted to read from a file region that is physically locked" + errors[0xa201] = "I/O Lock Error" + + errors[0xa400] = "Invalid directory rename attempted" + errors[0xa500] = "Invalid open create mode" + errors[0xa600] = "Auditor Access has been Removed" + errors[0xa700] = "Error Auditing Version" + + errors[0xa800] = "Invalid Support Module ID" + errors[0xa801] = "No Auditing Access Rights" + errors[0xa802] = "No Access Rights" + + errors[0xa900] = "Error Link in Path" + errors[0xa901] = "Invalid Path With Junction Present" + + errors[0xaa00] = "Invalid Data Type Flag" + + errors[0xac00] = "Packet Signature Required" + + errors[0xbe00] = "Invalid Data Stream" + errors[0xbf00] = "Requests for this name space are not valid on this volume" + + errors[0xc000] = "Unauthorized to retrieve accounting data" + + errors[0xc100] = "The ACCOUNT_BALANCE property does not exist" + errors[0xc101] = "No Account Balance" + + errors[0xc200] = "The object has exceeded its credit limit" + errors[0xc300] = "Too many holds have been placed against this account" + errors[0xc400] = "The client account has been disabled" + + errors[0xc500] = "Access to the account has been denied because of intruder detection" + errors[0xc501] = "Login lockout" + errors[0xc502] = "Server Login Locked" + + errors[0xc600] = "The caller does not have operator privileges" + errors[0xc601] = "The client does not have operator privileges" + + errors[0xc800] = "Missing EA Key" + errors[0xc900] = "EA Not Found" + errors[0xca00] = "Invalid EA Handle Type" + errors[0xcb00] = "EA No Key No Data" + errors[0xcc00] = "EA Number Mismatch" + errors[0xcd00] = "Extent Number Out of Range" + errors[0xce00] = "EA Bad Directory Number" + errors[0xcf00] = "Invalid EA Handle" + + errors[0xd000] = "Queue error" + errors[0xd001] = "EA Position Out of Range" + + errors[0xd100] = "The queue does not exist" + errors[0xd101] = "EA Access Denied" + + errors[0xd200] = "A queue server is not associated with this queue" + errors[0xd201] = "A queue server is not associated with the selected queue" + errors[0xd202] = "No queue server" + errors[0xd203] = "Data Page Odd Size" + + errors[0xd300] = "No queue rights" + errors[0xd301] = "EA Volume Not Mounted" + + errors[0xd400] = "The queue is full and cannot accept another request" + errors[0xd401] = "The queue associated with ObjectId is full and cannot accept another request" + errors[0xd402] = "Bad Page Boundary" + + errors[0xd500] = "A job does not exist in this queue" + errors[0xd501] = "No queue job" + errors[0xd502] = "The job associated with JobNumber does not exist in this queue" + errors[0xd503] = "Inspect Failure" + errors[0xd504] = "Unknown NCP Extension Number" + + errors[0xd600] = "The file server does not allow unencrypted passwords" + errors[0xd601] = "No job right" + errors[0xd602] = "EA Already Claimed" + + errors[0xd700] = "Bad account" + errors[0xd701] = "The old and new password strings are identical" + errors[0xd702] = "The job is currently being serviced" + errors[0xd703] = "The queue is currently servicing a job" + errors[0xd704] = "Queue servicing" + errors[0xd705] = "Odd Buffer Size" + + errors[0xd800] = "Queue not active" + errors[0xd801] = "No Scorecards" + + errors[0xd900] = "The file server cannot accept another connection as it has reached its limit" + errors[0xd901] = "The client is not security equivalent to one of the objects in the Q_SERVERS group property of the target queue" + errors[0xd902] = "Queue Station is not a server" + errors[0xd903] = "Bad EDS Signature" + errors[0xd904] = "Attempt to log in using an account which has limits on the number of concurrent connections and that number has been reached." + + errors[0xda00] = "Attempted to login to the file server during a restricted time period" + errors[0xda01] = "Queue halted" + errors[0xda02] = "EA Space Limit" + + errors[0xdb00] = "Attempted to login to the file server from an unauthorized workstation or network" + errors[0xdb01] = "The queue cannot attach another queue server" + errors[0xdb02] = "Maximum queue servers" + errors[0xdb03] = "EA Key Corrupt" + + errors[0xdc00] = "Account Expired" + errors[0xdc01] = "EA Key Limit" + + errors[0xdd00] = "Tally Corrupt" + errors[0xde00] = "Attempted to login to the file server with an incorrect password" + errors[0xdf00] = "Attempted to login to the file server with a password that has expired" + + errors[0xe000] = "No Login Connections Available" + errors[0xe700] = "No disk track" + errors[0xe800] = "Write to group" + errors[0xe900] = "The object is already a member of the group property" + + errors[0xea00] = "No such member" + errors[0xea01] = "The bindery object is not a member of the set" + errors[0xea02] = "Non-existent member" + + errors[0xeb00] = "The property is not a set property" + + errors[0xec00] = "No such set" + errors[0xec01] = "The set property does not exist" + + errors[0xed00] = "Property exists" + errors[0xed01] = "The property already exists" + errors[0xed02] = "An attempt was made to create a bindery object property that already exists" + + errors[0xee00] = "The object already exists" + errors[0xee01] = "The bindery object already exists" + + errors[0xef00] = "Illegal name" + errors[0xef01] = "Illegal characters in ObjectName field" + errors[0xef02] = "Invalid name" + + errors[0xf000] = "A wildcard was detected in a field that does not support wildcards" + errors[0xf001] = "An illegal wildcard was detected in ObjectName" + + errors[0xf100] = "The client does not have the rights to access this bindery object" + errors[0xf101] = "Bindery security" + errors[0xf102] = "Invalid bindery security" + + errors[0xf200] = "Unauthorized to read from this object" + errors[0xf300] = "Unauthorized to rename this object" + + errors[0xf400] = "Unauthorized to delete this object" + errors[0xf401] = "No object delete privileges" + errors[0xf402] = "Unauthorized to delete this queue" + + errors[0xf500] = "Unauthorized to create this object" + errors[0xf501] = "No object create" + + errors[0xf600] = "No property delete" + errors[0xf601] = "Unauthorized to delete the property of this object" + errors[0xf602] = "Unauthorized to delete this property" + + errors[0xf700] = "Unauthorized to create this property" + errors[0xf701] = "No property create privilege" + + errors[0xf800] = "Unauthorized to write to this property" + errors[0xf900] = "Unauthorized to read this property" + errors[0xfa00] = "Temporary remap error" + + errors[0xfb00] = "No such property" + errors[0xfb01] = "The file server does not support this request" + errors[0xfb02] = "The specified property does not exist" + errors[0xfb03] = "The PASSWORD property does not exist for this bindery object" + errors[0xfb04] = "NDS NCP not available" + errors[0xfb05] = "Bad Directory Handle" + errors[0xfb06] = "Unknown Request" + errors[0xfb07] = "Invalid Subfunction Request" + errors[0xfb08] = "Attempt to use an invalid parameter (drive number, path, or flag value) during a set drive path call" + errors[0xfb09] = "NMAS not running on this server, NCP NOT Supported" + errors[0xfb0a] = "Station Not Logged In" + errors[0xfb0b] = "Secret Store not running on this server, NCP Not supported" + + errors[0xfc00] = "The message queue cannot accept another message" + errors[0xfc01] = "The trustee associated with ObjectId does not exist" + errors[0xfc02] = "The specified bindery object does not exist" + errors[0xfc03] = "The bindery object associated with ObjectID does not exist" + errors[0xfc04] = "A bindery object does not exist that matches" + errors[0xfc05] = "The specified queue does not exist" + errors[0xfc06] = "No such object" + errors[0xfc07] = "The queue associated with ObjectID does not exist" + + errors[0xfd00] = "Bad station number" + errors[0xfd01] = "The connection associated with ConnectionNumber is not active" + errors[0xfd02] = "Lock collision" + errors[0xfd03] = "Transaction tracking is disabled" + + errors[0xfe00] = "I/O failure" + errors[0xfe01] = "The files containing the bindery on the file server are locked" + errors[0xfe02] = "A file with the specified name already exists in this directory" + errors[0xfe03] = "No more restrictions were found" + errors[0xfe04] = "The file server was unable to lock the file within the specified time limit" + errors[0xfe05] = "The file server was unable to lock all files within the specified time limit" + errors[0xfe06] = "The bindery object associated with ObjectID is not a valid trustee" + errors[0xfe07] = "Directory locked" + errors[0xfe08] = "Bindery locked" + errors[0xfe09] = "Invalid semaphore name length" + errors[0xfe0a] = "The file server was unable to complete the operation within the specified time limit" + errors[0xfe0b] = "Transaction restart" + errors[0xfe0c] = "Bad packet" + errors[0xfe0d] = "Timeout" + errors[0xfe0e] = "User Not Found" + errors[0xfe0f] = "Trustee Not Found" + + errors[0xff00] = "Failure" + errors[0xff01] = "Lock error" + errors[0xff02] = "File not found" + errors[0xff03] = "The file not found or cannot be unlocked" + errors[0xff04] = "Record not found" + errors[0xff05] = "The logical record was not found" + errors[0xff06] = "The printer associated with Printer Number does not exist" + errors[0xff07] = "No such printer" + errors[0xff08] = "Unable to complete the request" + errors[0xff09] = "Unauthorized to change privileges of this trustee" + errors[0xff0a] = "No files matching the search criteria were found" + errors[0xff0b] = "A file matching the search criteria was not found" + errors[0xff0c] = "Verification failed" + errors[0xff0d] = "Object associated with ObjectID is not a manager" + errors[0xff0e] = "Invalid initial semaphore value" + errors[0xff0f] = "The semaphore handle is not valid" + errors[0xff10] = "SemaphoreHandle is not associated with a valid sempahore" + errors[0xff11] = "Invalid semaphore handle" + errors[0xff12] = "Transaction tracking is not available" + errors[0xff13] = "The transaction has not yet been written to disk" + errors[0xff14] = "Directory already exists" + errors[0xff15] = "The file already exists and the deletion flag was not set" + errors[0xff16] = "No matching files or directories were found" + errors[0xff17] = "A file or directory matching the search criteria was not found" + errors[0xff18] = "The file already exists" + errors[0xff19] = "Failure, No files found" + errors[0xff1a] = "Unlock Error" + errors[0xff1b] = "I/O Bound Error" + errors[0xff1c] = "Not Accepting Messages" + errors[0xff1d] = "No More Salvageable Files in Directory" + errors[0xff1e] = "Calling Station is Not a Manager" + errors[0xff1f] = "Bindery Failure" + errors[0xff20] = "NCP Extension Not Found" + errors[0xff21] = "Audit Property Not Found" + errors[0xff22] = "Server Set Parameter Not Found" + +############################################################################## +# Produce C code +############################################################################## +def ExamineVars(vars, structs_hash, vars_hash): + for var in vars: + if isinstance(var, struct): + structs_hash[var.HFName()] = var + struct_vars = var.Variables() + ExamineVars(struct_vars, structs_hash, vars_hash) + else: + vars_hash[repr(var)] = var + if isinstance(var, bitfield): + sub_vars = var.SubVariables() + ExamineVars(sub_vars, structs_hash, vars_hash) + +def produce_code(): + + global errors + + print("/*") + print(" * Do not modify this file. Changes will be overwritten.") + print(" * Generated automatically from %s" % (sys.argv[0])) + print(" */\n") + + print(""" +/* + * Portions Copyright (c) Gilbert Ramirez 2000-2002 + * Portions Copyright (c) Novell, Inc. 2000-2005 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "packet-ncp-int.h" +#include "packet-ncp-nmas.h" +#include "packet-ncp-sss.h" + +/* Function declarations for functions used in proto_register_ncp2222() */ +void proto_register_ncp2222(void); + +/* Endianness macros */ +#define NO_ENDIANNESS 0 + +#define NO_LENGTH -1 + +/* We use this int-pointer as a special flag in ptvc_record's */ +static int ptvc_struct_int_storage; +#define PTVC_STRUCT (&ptvc_struct_int_storage) + +/* Values used in the count-variable ("var"/"repeat") logic. */""") + + + if global_highest_var > -1: + print("#define NUM_REPEAT_VARS %d" % (global_highest_var + 1)) + print("static unsigned repeat_vars[NUM_REPEAT_VARS];") + else: + print("#define NUM_REPEAT_VARS 0") + print("static unsigned *repeat_vars = NULL;") + + print(""" +#define NO_VAR NUM_REPEAT_VARS +#define NO_REPEAT NUM_REPEAT_VARS + +#define REQ_COND_SIZE_CONSTANT 0 +#define REQ_COND_SIZE_VARIABLE 1 +#define NO_REQ_COND_SIZE 0 + + +#define NTREE 0x00020000 +#define NDEPTH 0x00000002 +#define NREV 0x00000004 +#define NFLAGS 0x00000008 + +static int hf_ncp_number_of_data_streams_long = -1; +static int hf_ncp_func = -1; +static int hf_ncp_length = -1; +static int hf_ncp_subfunc = -1; +static int hf_ncp_group = -1; +static int hf_ncp_fragment_handle = -1; +static int hf_ncp_completion_code = -1; +static int hf_ncp_connection_status = -1; +static int hf_ncp_req_frame_num = -1; +static int hf_ncp_req_frame_time = -1; +static int hf_ncp_fragment_size = -1; +static int hf_ncp_message_size = -1; +static int hf_ncp_nds_flag = -1; +static int hf_ncp_nds_verb = -1; +static int hf_ping_version = -1; +/* static int hf_nds_version = -1; */ +/* static int hf_nds_flags = -1; */ +static int hf_nds_reply_depth = -1; +static int hf_nds_reply_rev = -1; +static int hf_nds_reply_flags = -1; +static int hf_nds_p1type = -1; +static int hf_nds_uint32value = -1; +static int hf_nds_bit1 = -1; +static int hf_nds_bit2 = -1; +static int hf_nds_bit3 = -1; +static int hf_nds_bit4 = -1; +static int hf_nds_bit5 = -1; +static int hf_nds_bit6 = -1; +static int hf_nds_bit7 = -1; +static int hf_nds_bit8 = -1; +static int hf_nds_bit9 = -1; +static int hf_nds_bit10 = -1; +static int hf_nds_bit11 = -1; +static int hf_nds_bit12 = -1; +static int hf_nds_bit13 = -1; +static int hf_nds_bit14 = -1; +static int hf_nds_bit15 = -1; +static int hf_nds_bit16 = -1; +static int hf_outflags = -1; +static int hf_bit1outflags = -1; +static int hf_bit2outflags = -1; +static int hf_bit3outflags = -1; +static int hf_bit4outflags = -1; +static int hf_bit5outflags = -1; +static int hf_bit6outflags = -1; +static int hf_bit7outflags = -1; +static int hf_bit8outflags = -1; +static int hf_bit9outflags = -1; +static int hf_bit10outflags = -1; +static int hf_bit11outflags = -1; +static int hf_bit12outflags = -1; +static int hf_bit13outflags = -1; +static int hf_bit14outflags = -1; +static int hf_bit15outflags = -1; +static int hf_bit16outflags = -1; +static int hf_bit1nflags = -1; +static int hf_bit2nflags = -1; +static int hf_bit3nflags = -1; +static int hf_bit4nflags = -1; +static int hf_bit5nflags = -1; +static int hf_bit6nflags = -1; +static int hf_bit7nflags = -1; +static int hf_bit8nflags = -1; +static int hf_bit9nflags = -1; +static int hf_bit10nflags = -1; +static int hf_bit11nflags = -1; +static int hf_bit12nflags = -1; +static int hf_bit13nflags = -1; +static int hf_bit14nflags = -1; +static int hf_bit15nflags = -1; +static int hf_bit16nflags = -1; +static int hf_bit1rflags = -1; +static int hf_bit2rflags = -1; +static int hf_bit3rflags = -1; +static int hf_bit4rflags = -1; +static int hf_bit5rflags = -1; +static int hf_bit6rflags = -1; +static int hf_bit7rflags = -1; +static int hf_bit8rflags = -1; +static int hf_bit9rflags = -1; +static int hf_bit10rflags = -1; +static int hf_bit11rflags = -1; +static int hf_bit12rflags = -1; +static int hf_bit13rflags = -1; +static int hf_bit14rflags = -1; +static int hf_bit15rflags = -1; +static int hf_bit16rflags = -1; +static int hf_cflags = -1; +static int hf_bit1cflags = -1; +static int hf_bit2cflags = -1; +static int hf_bit3cflags = -1; +static int hf_bit4cflags = -1; +static int hf_bit5cflags = -1; +static int hf_bit6cflags = -1; +static int hf_bit7cflags = -1; +static int hf_bit8cflags = -1; +static int hf_bit9cflags = -1; +static int hf_bit10cflags = -1; +static int hf_bit11cflags = -1; +static int hf_bit12cflags = -1; +static int hf_bit13cflags = -1; +static int hf_bit14cflags = -1; +static int hf_bit15cflags = -1; +static int hf_bit16cflags = -1; +static int hf_bit1acflags = -1; +static int hf_bit2acflags = -1; +static int hf_bit3acflags = -1; +static int hf_bit4acflags = -1; +static int hf_bit5acflags = -1; +static int hf_bit6acflags = -1; +static int hf_bit7acflags = -1; +static int hf_bit8acflags = -1; +static int hf_bit9acflags = -1; +static int hf_bit10acflags = -1; +static int hf_bit11acflags = -1; +static int hf_bit12acflags = -1; +static int hf_bit13acflags = -1; +static int hf_bit14acflags = -1; +static int hf_bit15acflags = -1; +static int hf_bit16acflags = -1; +static int hf_vflags = -1; +static int hf_bit1vflags = -1; +static int hf_bit2vflags = -1; +static int hf_bit3vflags = -1; +static int hf_bit4vflags = -1; +static int hf_bit5vflags = -1; +static int hf_bit6vflags = -1; +static int hf_bit7vflags = -1; +static int hf_bit8vflags = -1; +static int hf_bit9vflags = -1; +static int hf_bit10vflags = -1; +static int hf_bit11vflags = -1; +static int hf_bit12vflags = -1; +static int hf_bit13vflags = -1; +static int hf_bit14vflags = -1; +static int hf_bit15vflags = -1; +static int hf_bit16vflags = -1; +static int hf_eflags = -1; +static int hf_bit1eflags = -1; +static int hf_bit2eflags = -1; +static int hf_bit3eflags = -1; +static int hf_bit4eflags = -1; +static int hf_bit5eflags = -1; +static int hf_bit6eflags = -1; +static int hf_bit7eflags = -1; +static int hf_bit8eflags = -1; +static int hf_bit9eflags = -1; +static int hf_bit10eflags = -1; +static int hf_bit11eflags = -1; +static int hf_bit12eflags = -1; +static int hf_bit13eflags = -1; +static int hf_bit14eflags = -1; +static int hf_bit15eflags = -1; +static int hf_bit16eflags = -1; +static int hf_infoflagsl = -1; +static int hf_retinfoflagsl = -1; +static int hf_bit1infoflagsl = -1; +static int hf_bit2infoflagsl = -1; +static int hf_bit3infoflagsl = -1; +static int hf_bit4infoflagsl = -1; +static int hf_bit5infoflagsl = -1; +static int hf_bit6infoflagsl = -1; +static int hf_bit7infoflagsl = -1; +static int hf_bit8infoflagsl = -1; +static int hf_bit9infoflagsl = -1; +static int hf_bit10infoflagsl = -1; +static int hf_bit11infoflagsl = -1; +static int hf_bit12infoflagsl = -1; +static int hf_bit13infoflagsl = -1; +static int hf_bit14infoflagsl = -1; +static int hf_bit15infoflagsl = -1; +static int hf_bit16infoflagsl = -1; +static int hf_infoflagsh = -1; +static int hf_bit1infoflagsh = -1; +static int hf_bit2infoflagsh = -1; +static int hf_bit3infoflagsh = -1; +static int hf_bit4infoflagsh = -1; +static int hf_bit5infoflagsh = -1; +static int hf_bit6infoflagsh = -1; +static int hf_bit7infoflagsh = -1; +static int hf_bit8infoflagsh = -1; +static int hf_bit9infoflagsh = -1; +static int hf_bit10infoflagsh = -1; +static int hf_bit11infoflagsh = -1; +static int hf_bit12infoflagsh = -1; +static int hf_bit13infoflagsh = -1; +static int hf_bit14infoflagsh = -1; +static int hf_bit15infoflagsh = -1; +static int hf_bit16infoflagsh = -1; +static int hf_retinfoflagsh = -1; +static int hf_bit1retinfoflagsh = -1; +static int hf_bit2retinfoflagsh = -1; +static int hf_bit3retinfoflagsh = -1; +static int hf_bit4retinfoflagsh = -1; +static int hf_bit5retinfoflagsh = -1; +static int hf_bit6retinfoflagsh = -1; +static int hf_bit7retinfoflagsh = -1; +static int hf_bit8retinfoflagsh = -1; +static int hf_bit9retinfoflagsh = -1; +static int hf_bit10retinfoflagsh = -1; +static int hf_bit11retinfoflagsh = -1; +static int hf_bit12retinfoflagsh = -1; +static int hf_bit13retinfoflagsh = -1; +static int hf_bit14retinfoflagsh = -1; +static int hf_bit15retinfoflagsh = -1; +static int hf_bit16retinfoflagsh = -1; +static int hf_bit1lflags = -1; +static int hf_bit2lflags = -1; +static int hf_bit3lflags = -1; +static int hf_bit4lflags = -1; +static int hf_bit5lflags = -1; +static int hf_bit6lflags = -1; +static int hf_bit7lflags = -1; +static int hf_bit8lflags = -1; +static int hf_bit9lflags = -1; +static int hf_bit10lflags = -1; +static int hf_bit11lflags = -1; +static int hf_bit12lflags = -1; +static int hf_bit13lflags = -1; +static int hf_bit14lflags = -1; +static int hf_bit15lflags = -1; +static int hf_bit16lflags = -1; +static int hf_l1flagsl = -1; +static int hf_l1flagsh = -1; +static int hf_bit1l1flagsl = -1; +static int hf_bit2l1flagsl = -1; +static int hf_bit3l1flagsl = -1; +static int hf_bit4l1flagsl = -1; +static int hf_bit5l1flagsl = -1; +static int hf_bit6l1flagsl = -1; +static int hf_bit7l1flagsl = -1; +static int hf_bit8l1flagsl = -1; +static int hf_bit9l1flagsl = -1; +static int hf_bit10l1flagsl = -1; +static int hf_bit11l1flagsl = -1; +static int hf_bit12l1flagsl = -1; +static int hf_bit13l1flagsl = -1; +static int hf_bit14l1flagsl = -1; +static int hf_bit15l1flagsl = -1; +static int hf_bit16l1flagsl = -1; +static int hf_bit1l1flagsh = -1; +static int hf_bit2l1flagsh = -1; +static int hf_bit3l1flagsh = -1; +static int hf_bit4l1flagsh = -1; +static int hf_bit5l1flagsh = -1; +static int hf_bit6l1flagsh = -1; +static int hf_bit7l1flagsh = -1; +static int hf_bit8l1flagsh = -1; +static int hf_bit9l1flagsh = -1; +static int hf_bit10l1flagsh = -1; +static int hf_bit11l1flagsh = -1; +static int hf_bit12l1flagsh = -1; +static int hf_bit13l1flagsh = -1; +static int hf_bit14l1flagsh = -1; +static int hf_bit15l1flagsh = -1; +static int hf_bit16l1flagsh = -1; +static int hf_nds_tree_name = -1; +static int hf_nds_reply_error = -1; +static int hf_nds_net = -1; +static int hf_nds_node = -1; +static int hf_nds_socket = -1; +static int hf_add_ref_ip = -1; +static int hf_add_ref_udp = -1; +static int hf_add_ref_tcp = -1; +static int hf_referral_record = -1; +static int hf_referral_addcount = -1; +static int hf_nds_port = -1; +static int hf_mv_string = -1; +static int hf_nds_syntax = -1; +static int hf_value_string = -1; +static int hf_nds_buffer_size = -1; +static int hf_nds_ver = -1; +static int hf_nds_nflags = -1; +static int hf_nds_scope = -1; +static int hf_nds_name = -1; +static int hf_nds_comm_trans = -1; +static int hf_nds_tree_trans = -1; +static int hf_nds_iteration = -1; +static int hf_nds_eid = -1; +static int hf_nds_info_type = -1; +static int hf_nds_all_attr = -1; +static int hf_nds_req_flags = -1; +static int hf_nds_attr = -1; +static int hf_nds_crc = -1; +static int hf_nds_referrals = -1; +static int hf_nds_result_flags = -1; +static int hf_nds_tag_string = -1; +static int hf_value_bytes = -1; +static int hf_replica_type = -1; +static int hf_replica_state = -1; +static int hf_replica_number = -1; +static int hf_min_nds_ver = -1; +static int hf_nds_ver_include = -1; +static int hf_nds_ver_exclude = -1; +/* static int hf_nds_es = -1; */ +static int hf_es_type = -1; +/* static int hf_delim_string = -1; */ +static int hf_rdn_string = -1; +static int hf_nds_revent = -1; +static int hf_nds_rnum = -1; +static int hf_nds_name_type = -1; +static int hf_nds_rflags = -1; +static int hf_nds_eflags = -1; +static int hf_nds_depth = -1; +static int hf_nds_class_def_type = -1; +static int hf_nds_classes = -1; +static int hf_nds_return_all_classes = -1; +static int hf_nds_stream_flags = -1; +static int hf_nds_stream_name = -1; +static int hf_nds_file_handle = -1; +static int hf_nds_file_size = -1; +static int hf_nds_dn_output_type = -1; +static int hf_nds_nested_output_type = -1; +static int hf_nds_output_delimiter = -1; +static int hf_nds_output_entry_specifier = -1; +static int hf_es_value = -1; +static int hf_es_rdn_count = -1; +static int hf_nds_replica_num = -1; +static int hf_nds_event_num = -1; +static int hf_es_seconds = -1; +static int hf_nds_compare_results = -1; +static int hf_nds_parent = -1; +static int hf_nds_name_filter = -1; +static int hf_nds_class_filter = -1; +static int hf_nds_time_filter = -1; +static int hf_nds_partition_root_id = -1; +static int hf_nds_replicas = -1; +static int hf_nds_purge = -1; +static int hf_nds_local_partition = -1; +static int hf_partition_busy = -1; +static int hf_nds_number_of_changes = -1; +static int hf_sub_count = -1; +static int hf_nds_revision = -1; +static int hf_nds_base_class = -1; +static int hf_nds_relative_dn = -1; +/* static int hf_nds_root_dn = -1; */ +/* static int hf_nds_parent_dn = -1; */ +static int hf_deref_base = -1; +/* static int hf_nds_entry_info = -1; */ +static int hf_nds_base = -1; +static int hf_nds_privileges = -1; +static int hf_nds_vflags = -1; +static int hf_nds_value_len = -1; +static int hf_nds_cflags = -1; +static int hf_nds_acflags = -1; +static int hf_nds_asn1 = -1; +static int hf_nds_upper = -1; +static int hf_nds_lower = -1; +static int hf_nds_trustee_dn = -1; +static int hf_nds_attribute_dn = -1; +static int hf_nds_acl_add = -1; +static int hf_nds_acl_del = -1; +static int hf_nds_att_add = -1; +static int hf_nds_att_del = -1; +static int hf_nds_keep = -1; +static int hf_nds_new_rdn = -1; +static int hf_nds_time_delay = -1; +static int hf_nds_root_name = -1; +static int hf_nds_new_part_id = -1; +static int hf_nds_child_part_id = -1; +static int hf_nds_master_part_id = -1; +static int hf_nds_target_name = -1; +static int hf_nds_super = -1; +static int hf_pingflags2 = -1; +static int hf_bit1pingflags2 = -1; +static int hf_bit2pingflags2 = -1; +static int hf_bit3pingflags2 = -1; +static int hf_bit4pingflags2 = -1; +static int hf_bit5pingflags2 = -1; +static int hf_bit6pingflags2 = -1; +static int hf_bit7pingflags2 = -1; +static int hf_bit8pingflags2 = -1; +static int hf_bit9pingflags2 = -1; +static int hf_bit10pingflags2 = -1; +static int hf_bit11pingflags2 = -1; +static int hf_bit12pingflags2 = -1; +static int hf_bit13pingflags2 = -1; +static int hf_bit14pingflags2 = -1; +static int hf_bit15pingflags2 = -1; +static int hf_bit16pingflags2 = -1; +static int hf_pingflags1 = -1; +static int hf_bit1pingflags1 = -1; +static int hf_bit2pingflags1 = -1; +static int hf_bit3pingflags1 = -1; +static int hf_bit4pingflags1 = -1; +static int hf_bit5pingflags1 = -1; +static int hf_bit6pingflags1 = -1; +static int hf_bit7pingflags1 = -1; +static int hf_bit8pingflags1 = -1; +static int hf_bit9pingflags1 = -1; +static int hf_bit10pingflags1 = -1; +static int hf_bit11pingflags1 = -1; +static int hf_bit12pingflags1 = -1; +static int hf_bit13pingflags1 = -1; +static int hf_bit14pingflags1 = -1; +static int hf_bit15pingflags1 = -1; +static int hf_bit16pingflags1 = -1; +static int hf_pingpflags1 = -1; +static int hf_bit1pingpflags1 = -1; +static int hf_bit2pingpflags1 = -1; +static int hf_bit3pingpflags1 = -1; +static int hf_bit4pingpflags1 = -1; +static int hf_bit5pingpflags1 = -1; +static int hf_bit6pingpflags1 = -1; +static int hf_bit7pingpflags1 = -1; +static int hf_bit8pingpflags1 = -1; +static int hf_bit9pingpflags1 = -1; +static int hf_bit10pingpflags1 = -1; +static int hf_bit11pingpflags1 = -1; +static int hf_bit12pingpflags1 = -1; +static int hf_bit13pingpflags1 = -1; +static int hf_bit14pingpflags1 = -1; +static int hf_bit15pingpflags1 = -1; +static int hf_bit16pingpflags1 = -1; +static int hf_pingvflags1 = -1; +static int hf_bit1pingvflags1 = -1; +static int hf_bit2pingvflags1 = -1; +static int hf_bit3pingvflags1 = -1; +static int hf_bit4pingvflags1 = -1; +static int hf_bit5pingvflags1 = -1; +static int hf_bit6pingvflags1 = -1; +static int hf_bit7pingvflags1 = -1; +static int hf_bit8pingvflags1 = -1; +static int hf_bit9pingvflags1 = -1; +static int hf_bit10pingvflags1 = -1; +static int hf_bit11pingvflags1 = -1; +static int hf_bit12pingvflags1 = -1; +static int hf_bit13pingvflags1 = -1; +static int hf_bit14pingvflags1 = -1; +static int hf_bit15pingvflags1 = -1; +static int hf_bit16pingvflags1 = -1; +static int hf_nds_letter_ver = -1; +static int hf_nds_os_majver = -1; +static int hf_nds_os_minver = -1; +static int hf_nds_lic_flags = -1; +static int hf_nds_ds_time = -1; +static int hf_nds_ping_version = -1; +static int hf_nds_search_scope = -1; +static int hf_nds_num_objects = -1; +static int hf_siflags = -1; +static int hf_bit1siflags = -1; +static int hf_bit2siflags = -1; +static int hf_bit3siflags = -1; +static int hf_bit4siflags = -1; +static int hf_bit5siflags = -1; +static int hf_bit6siflags = -1; +static int hf_bit7siflags = -1; +static int hf_bit8siflags = -1; +static int hf_bit9siflags = -1; +static int hf_bit10siflags = -1; +static int hf_bit11siflags = -1; +static int hf_bit12siflags = -1; +static int hf_bit13siflags = -1; +static int hf_bit14siflags = -1; +static int hf_bit15siflags = -1; +static int hf_bit16siflags = -1; +static int hf_nds_segments = -1; +static int hf_nds_segment = -1; +static int hf_nds_segment_overlap = -1; +static int hf_nds_segment_overlap_conflict = -1; +static int hf_nds_segment_multiple_tails = -1; +static int hf_nds_segment_too_long_segment = -1; +static int hf_nds_segment_error = -1; +static int hf_nds_segment_count = -1; +static int hf_nds_reassembled_length = -1; +static int hf_nds_verb2b_req_flags = -1; +static int hf_ncp_ip_address = -1; +static int hf_ncp_copyright = -1; +static int hf_ndsprot1flag = -1; +static int hf_ndsprot2flag = -1; +static int hf_ndsprot3flag = -1; +static int hf_ndsprot4flag = -1; +static int hf_ndsprot5flag = -1; +static int hf_ndsprot6flag = -1; +static int hf_ndsprot7flag = -1; +static int hf_ndsprot8flag = -1; +static int hf_ndsprot9flag = -1; +static int hf_ndsprot10flag = -1; +static int hf_ndsprot11flag = -1; +static int hf_ndsprot12flag = -1; +static int hf_ndsprot13flag = -1; +static int hf_ndsprot14flag = -1; +static int hf_ndsprot15flag = -1; +static int hf_ndsprot16flag = -1; +static int hf_nds_svr_dst_name = -1; +static int hf_nds_tune_mark = -1; +/* static int hf_nds_create_time = -1; */ +static int hf_srvr_param_number = -1; +static int hf_srvr_param_boolean = -1; +static int hf_srvr_param_string = -1; +static int hf_nds_svr_time = -1; +static int hf_nds_crt_time = -1; +static int hf_nds_number_of_items = -1; +static int hf_nds_compare_attributes = -1; +static int hf_nds_read_attribute = -1; +static int hf_nds_write_add_delete_attribute = -1; +static int hf_nds_add_delete_self = -1; +static int hf_nds_privilege_not_defined = -1; +static int hf_nds_supervisor = -1; +static int hf_nds_inheritance_control = -1; +static int hf_nds_browse_entry = -1; +static int hf_nds_add_entry = -1; +static int hf_nds_delete_entry = -1; +static int hf_nds_rename_entry = -1; +static int hf_nds_supervisor_entry = -1; +static int hf_nds_entry_privilege_not_defined = -1; +static int hf_nds_iterator = -1; +static int hf_ncp_nds_iterverb = -1; +static int hf_iter_completion_code = -1; +/* static int hf_nds_iterobj = -1; */ +static int hf_iter_verb_completion_code = -1; +static int hf_iter_ans = -1; +static int hf_positionable = -1; +static int hf_num_skipped = -1; +static int hf_num_to_skip = -1; +static int hf_timelimit = -1; +static int hf_iter_index = -1; +static int hf_num_to_get = -1; +/* static int hf_ret_info_type = -1; */ +static int hf_data_size = -1; +static int hf_this_count = -1; +static int hf_max_entries = -1; +static int hf_move_position = -1; +static int hf_iter_copy = -1; +static int hf_iter_position = -1; +static int hf_iter_search = -1; +static int hf_iter_other = -1; +static int hf_nds_oid = -1; +static int hf_ncp_bytes_actually_trans_64 = -1; +static int hf_sap_name = -1; +static int hf_os_name = -1; +static int hf_vendor_name = -1; +static int hf_hardware_name = -1; +static int hf_no_request_record_found = -1; +static int hf_search_modifier = -1; +static int hf_search_pattern = -1; +static int hf_nds_acl_protected_attribute = -1; +static int hf_nds_acl_subject = -1; +static int hf_nds_acl_privileges = -1; + +static expert_field ei_ncp_file_rights_change = EI_INIT; +static expert_field ei_ncp_completion_code = EI_INIT; +static expert_field ei_nds_reply_error = EI_INIT; +static expert_field ei_ncp_destroy_connection = EI_INIT; +static expert_field ei_nds_iteration = EI_INIT; +static expert_field ei_ncp_eid = EI_INIT; +static expert_field ei_ncp_file_handle = EI_INIT; +static expert_field ei_ncp_connection_destroyed = EI_INIT; +static expert_field ei_ncp_no_request_record_found = EI_INIT; +static expert_field ei_ncp_file_rights = EI_INIT; +static expert_field ei_iter_verb_completion_code = EI_INIT; +static expert_field ei_ncp_connection_request = EI_INIT; +static expert_field ei_ncp_connection_status = EI_INIT; +static expert_field ei_ncp_op_lock_handle = EI_INIT; +static expert_field ei_ncp_effective_rights = EI_INIT; +static expert_field ei_ncp_server = EI_INIT; +static expert_field ei_ncp_invalid_offset = EI_INIT; +static expert_field ei_ncp_address_type = EI_INIT; +static expert_field ei_ncp_value_too_large = EI_INIT; +""") + + # Look at all packet types in the packets collection, and cull information + # from them. + errors_used_list = [] + errors_used_hash = {} + groups_used_list = [] + groups_used_hash = {} + variables_used_hash = {} + structs_used_hash = {} + + for pkt in packets: + # Determine which error codes are used. + codes = pkt.CompletionCodes() + for code in codes.Records(): + if code not in errors_used_hash: + errors_used_hash[code] = len(errors_used_list) + errors_used_list.append(code) + + # Determine which groups are used. + group = pkt.Group() + if group not in groups_used_hash: + groups_used_hash[group] = len(groups_used_list) + groups_used_list.append(group) + + + + + # Determine which variables are used. + vars = pkt.Variables() + ExamineVars(vars, structs_used_hash, variables_used_hash) + + + # Print the hf variable declarations + sorted_vars = list(variables_used_hash.values()) + sorted_vars.sort() + for var in sorted_vars: + print("static int " + var.HFName() + " = -1;") + + + # Print the value_string's + for var in sorted_vars: + if isinstance(var, val_string): + print("") + print(var.Code()) + + # Determine which error codes are not used + errors_not_used = {} + # Copy the keys from the error list... + for code in list(errors.keys()): + errors_not_used[code] = 1 + # ... and remove the ones that *were* used. + for code in errors_used_list: + del errors_not_used[code] + + # Print a remark showing errors not used + list_errors_not_used = list(errors_not_used.keys()) + list_errors_not_used.sort() + for code in list_errors_not_used: + print("/* Error 0x%04x not used: %s */" % (code, errors[code])) + print("\n") + + # Print the errors table + print("/* Error strings. */") + print("static const char *ncp_errors[] = {") + for code in errors_used_list: + print(' /* %02d (0x%04x) */ "%s",' % (errors_used_hash[code], code, errors[code])) + print("};\n") + + + + + # Determine which groups are not used + groups_not_used = {} + # Copy the keys from the group list... + for group in list(groups.keys()): + groups_not_used[group] = 1 + # ... and remove the ones that *were* used. + for group in groups_used_list: + del groups_not_used[group] + + # Print a remark showing groups not used + list_groups_not_used = list(groups_not_used.keys()) + list_groups_not_used.sort() + for group in list_groups_not_used: + print("/* Group not used: %s = %s */" % (group, groups[group])) + print("\n") + + # Print the groups table + print("/* Group strings. */") + print("static const char *ncp_groups[] = {") + for group in groups_used_list: + print(' /* %02d (%s) */ "%s",' % (groups_used_hash[group], group, groups[group])) + print("};\n") + + # Print the group macros + for group in groups_used_list: + name = str.upper(group) + print("#define NCP_GROUP_%s %d" % (name, groups_used_hash[group])) + print("\n") + + + # Print the conditional_records for all Request Conditions. + num = 0 + print("/* Request-Condition dfilter records. The NULL pointer") + print(" is replaced by a pointer to the created dfilter_t. */") + if len(global_req_cond) == 0: + print("static conditional_record req_conds = NULL;") + else: + print("static conditional_record req_conds[] = {") + req_cond_l = list(global_req_cond.keys()) + req_cond_l.sort() + for req_cond in req_cond_l: + print(" { \"%s\", NULL }," % (req_cond,)) + global_req_cond[req_cond] = num + num = num + 1 + print("};") + print("#define NUM_REQ_CONDS %d" % (num,)) + print("#define NO_REQ_COND NUM_REQ_CONDS\n\n") + + + + # Print PTVC's for bitfields + ett_list = [] + print("/* PTVC records for bit-fields. */") + for var in sorted_vars: + if isinstance(var, bitfield): + sub_vars_ptvc = var.SubVariablesPTVC() + print("/* %s */" % (sub_vars_ptvc.Name())) + print(sub_vars_ptvc.Code()) + ett_list.append(sub_vars_ptvc.ETTName()) + + + # Print the PTVC's for structures + print("/* PTVC records for structs. */") + # Sort them + svhash = {} + for svar in list(structs_used_hash.values()): + svhash[svar.HFName()] = svar + if svar.descr: + ett_list.append(svar.ETTName()) + + struct_vars = list(svhash.keys()) + struct_vars.sort() + for varname in struct_vars: + var = svhash[varname] + print(var.Code()) + + ett_list.sort() + + # Print info string structures + print("/* Info Strings */") + for pkt in packets: + if pkt.req_info_str: + name = pkt.InfoStrName() + "_req" + var = pkt.req_info_str[0] + print("static const info_string_t %s = {" % (name,)) + print(" &%s," % (var.HFName(),)) + print(' "%s",' % (pkt.req_info_str[1],)) + print(' "%s"' % (pkt.req_info_str[2],)) + print("};\n") + + # Print regular PTVC's + print("/* PTVC records. These are re-used to save space. */") + for ptvc in ptvc_lists.Members(): + if not ptvc.Null() and not ptvc.Empty(): + print(ptvc.Code()) + + # Print error_equivalency tables + print("/* Error-Equivalency Tables. These are re-used to save space. */") + for compcodes in compcode_lists.Members(): + errors = compcodes.Records() + # Make sure the record for error = 0x00 comes last. + print("static const error_equivalency %s[] = {" % (compcodes.Name())) + for error in errors: + error_in_packet = error >> 8; + ncp_error_index = errors_used_hash[error] + print(" { 0x%02x, %d }, /* 0x%04x */" % (error_in_packet, + ncp_error_index, error)) + print(" { 0x00, -1 }\n};\n") + + + + # Print integer arrays for all ncp_records that need + # a list of req_cond_indexes. Do it "uniquely" to save space; + # if multiple packets share the same set of req_cond's, + # then they'll share the same integer array + print("/* Request Condition Indexes */") + # First, make them unique + req_cond_collection = UniqueCollection("req_cond_collection") + for pkt in packets: + req_conds = pkt.CalculateReqConds() + if req_conds: + unique_list = req_cond_collection.Add(req_conds) + pkt.SetReqConds(unique_list) + else: + pkt.SetReqConds(None) + + # Print them + for req_cond in req_cond_collection.Members(): + sys.stdout.write("static const int %s[] = {" % (req_cond.Name())) + sys.stdout.write(" ") + vals = [] + for text in req_cond.Records(): + vals.append(global_req_cond[text]) + vals.sort() + for val in vals: + sys.stdout.write("%s, " % (val,)) + + print("-1 };") + print("") + + + + # Functions without length parameter + funcs_without_length = {} + + print("/* Forward declaration of expert info functions defined in ncp2222.inc */") + for expert in expert_hash: + print("static void %s_expert_func(ptvcursor_t *ptvc, packet_info *pinfo, const ncp_record *ncp_rec, bool request);" % expert) + + # Print ncp_record packet records + print("#define SUBFUNC_WITH_LENGTH 0x02") + print("#define SUBFUNC_NO_LENGTH 0x01") + print("#define NO_SUBFUNC 0x00") + + print("/* ncp_record structs for packets */") + print("static const ncp_record ncp_packets[] = {") + for pkt in packets: + if pkt.HasSubFunction(): + func = pkt.FunctionCode('high') + if pkt.HasLength(): + subfunc_string = "SUBFUNC_WITH_LENGTH" + # Ensure that the function either has a length param or not + if func in funcs_without_length: + sys.exit("Function 0x%04x sometimes has length param, sometimes not." \ + % (pkt.FunctionCode(),)) + else: + subfunc_string = "SUBFUNC_NO_LENGTH" + funcs_without_length[func] = 1 + else: + subfunc_string = "NO_SUBFUNC" + sys.stdout.write(' { 0x%02x, 0x%02x, %s, "%s",' % (pkt.FunctionCode('high'), + pkt.FunctionCode('low'), subfunc_string, pkt.Description())) + + print(' %d /* %s */,' % (groups_used_hash[pkt.Group()], pkt.Group())) + + ptvc = pkt.PTVCRequest() + if not ptvc.Null() and not ptvc.Empty(): + ptvc_request = ptvc.Name() + else: + ptvc_request = 'NULL' + + ptvc = pkt.PTVCReply() + if not ptvc.Null() and not ptvc.Empty(): + ptvc_reply = ptvc.Name() + else: + ptvc_reply = 'NULL' + + errors = pkt.CompletionCodes() + + req_conds_obj = pkt.GetReqConds() + if req_conds_obj: + req_conds = req_conds_obj.Name() + else: + req_conds = "NULL" + + if not req_conds_obj: + req_cond_size = "NO_REQ_COND_SIZE" + else: + req_cond_size = pkt.ReqCondSize() + if req_cond_size is None: + msg.write("NCP packet %s needs a ReqCondSize*() call\n" \ + % (pkt.CName(),)) + sys.exit(1) + + if pkt.expert_func: + expert_func = "&" + pkt.expert_func + "_expert_func" + else: + expert_func = "NULL" + + print(' %s, %s, %s, %s, %s, %s },\n' % \ + (ptvc_request, ptvc_reply, errors.Name(), req_conds, + req_cond_size, expert_func)) + + print(' { 0, 0, 0, NULL, 0, NULL, NULL, NULL, NULL, NO_REQ_COND_SIZE, NULL }') + print("};\n") + + print("/* ncp funcs that require a subfunc */") + print("static const uint8_t ncp_func_requires_subfunc[] = {") + hi_seen = {} + for pkt in packets: + if pkt.HasSubFunction(): + hi_func = pkt.FunctionCode('high') + if hi_func not in hi_seen: + print(" 0x%02x," % (hi_func)) + hi_seen[hi_func] = 1 + print(" 0") + print("};\n") + + + print("/* ncp funcs that have no length parameter */") + print("static const uint8_t ncp_func_has_no_length_parameter[] = {") + funcs = list(funcs_without_length.keys()) + funcs.sort() + for func in funcs: + print(" 0x%02x," % (func,)) + print(" 0") + print("};\n") + + print("") + + # proto_register_ncp2222() + print(""" +static const value_string connection_status_vals[] = { + { 0x00, "Ok" }, + { 0x01, "Bad Service Connection" }, + { 0x10, "File Server is Down" }, + { 0x40, "Broadcast Message Pending" }, + { 0, NULL } +}; + +#include "packet-ncp2222.inc" + +void +proto_register_ncp2222(void) +{ + + static hf_register_info hf[] = { + { &hf_ncp_number_of_data_streams_long, + { "Number of Data Streams", "ncp.number_of_data_streams_long", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_func, + { "Function", "ncp.func", FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_length, + { "Packet Length", "ncp.length", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_subfunc, + { "SubFunction", "ncp.subfunc", FT_UINT8, BASE_DEC_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_completion_code, + { "Completion Code", "ncp.completion_code", FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_group, + { "NCP Group Type", "ncp.group", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_fragment_handle, + { "NDS Fragment Handle", "ncp.ndsfrag", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_fragment_size, + { "NDS Fragment Size", "ncp.ndsfragsize", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_message_size, + { "Message Size", "ncp.ndsmessagesize", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_nds_flag, + { "NDS Protocol Flags", "ncp.ndsflag", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_nds_verb, + { "NDS Verb", "ncp.ndsverb", FT_UINT8, BASE_HEX, VALS(ncp_nds_verb_vals), 0x0, NULL, HFILL }}, + + { &hf_ping_version, + { "NDS Version", "ncp.ping_version", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + +#if 0 /* Unused ? */ + { &hf_nds_version, + { "NDS Version", "ncp.nds_version", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, +#endif + + { &hf_nds_tree_name, + { "NDS Tree Name", "ncp.nds_tree_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + /* + * XXX - the page at + * + * https://web.archive.org/web/20030629082113/http://www.odyssea.com/whats_new/tcpipnet/tcpipnet.html + * + * says of the connection status "The Connection Code field may + * contain values that indicate the status of the client host to + * server connection. A value of 1 in the fourth bit of this data + * byte indicates that the server is unavailable (server was + * downed). + * + * The page at + * + * https://web.archive.org/web/20090809191415/http://www.unm.edu/~network/presentations/course/appendix/appendix_f/tsld088.htm + * + * says that bit 0 is "bad service", bit 2 is "no connection + * available", bit 4 is "service down", and bit 6 is "server + * has a broadcast message waiting for the client". + * + * Should it be displayed in hex, and should those bits (and any + * other bits with significance) be displayed as bitfields + * underneath it? + */ + { &hf_ncp_connection_status, + { "Connection Status", "ncp.connection_status", FT_UINT8, BASE_DEC, VALS(connection_status_vals), 0x0, NULL, HFILL }}, + + { &hf_ncp_req_frame_num, + { "Response to Request in Frame Number", "ncp.req_frame_num", FT_FRAMENUM, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_req_frame_time, + { "Time from Request", "ncp.time", FT_RELATIVE_TIME, BASE_NONE, NULL, 0x0, "Time between request and response in seconds", HFILL }}, + +#if 0 /* Unused ? */ + { &hf_nds_flags, + { "NDS Return Flags", "ncp.nds_flags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, +#endif + + { &hf_nds_reply_depth, + { "Distance from Root", "ncp.ndsdepth", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_reply_rev, + { "NDS Revision", "ncp.ndsrev", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_reply_flags, + { "Flags", "ncp.ndsflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_p1type, + { "NDS Parameter Type", "ncp.p1type", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_uint32value, + { "NDS Value", "ncp.uint32value", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_bit1, + { "Typeless", "ncp.nds_bit1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_nds_bit2, + { "All Containers", "ncp.nds_bit2", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_nds_bit3, + { "Slashed", "ncp.nds_bit3", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_nds_bit4, + { "Dotted", "ncp.nds_bit4", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_nds_bit5, + { "Tuned", "ncp.nds_bit5", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_nds_bit6, + { "Not Defined", "ncp.nds_bit6", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_nds_bit7, + { "Not Defined", "ncp.nds_bit7", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_nds_bit8, + { "Not Defined", "ncp.nds_bit8", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_nds_bit9, + { "Not Defined", "ncp.nds_bit9", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_nds_bit10, + { "Not Defined", "ncp.nds_bit10", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_nds_bit11, + { "Not Defined", "ncp.nds_bit11", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_nds_bit12, + { "Not Defined", "ncp.nds_bit12", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_nds_bit13, + { "Not Defined", "ncp.nds_bit13", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_nds_bit14, + { "Not Defined", "ncp.nds_bit14", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_nds_bit15, + { "Not Defined", "ncp.nds_bit15", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_nds_bit16, + { "Not Defined", "ncp.nds_bit16", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_outflags, + { "Output Flags", "ncp.outflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1outflags, + { "Output Flags", "ncp.bit1outflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2outflags, + { "Entry ID", "ncp.bit2outflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3outflags, + { "Replica State", "ncp.bit3outflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4outflags, + { "Modification Timestamp", "ncp.bit4outflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5outflags, + { "Purge Time", "ncp.bit5outflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6outflags, + { "Local Partition ID", "ncp.bit6outflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7outflags, + { "Distinguished Name", "ncp.bit7outflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8outflags, + { "Replica Type", "ncp.bit8outflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9outflags, + { "Partition Busy", "ncp.bit9outflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10outflags, + { "Not Defined", "ncp.bit10outflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11outflags, + { "Not Defined", "ncp.bit11outflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12outflags, + { "Not Defined", "ncp.bit12outflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13outflags, + { "Not Defined", "ncp.bit13outflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14outflags, + { "Not Defined", "ncp.bit14outflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15outflags, + { "Not Defined", "ncp.bit15outflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16outflags, + { "Not Defined", "ncp.bit16outflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_bit1nflags, + { "Entry ID", "ncp.bit1nflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2nflags, + { "Readable", "ncp.bit2nflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3nflags, + { "Writeable", "ncp.bit3nflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4nflags, + { "Master", "ncp.bit4nflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5nflags, + { "Create ID", "ncp.bit5nflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6nflags, + { "Walk Tree", "ncp.bit6nflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7nflags, + { "Dereference Alias", "ncp.bit7nflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8nflags, + { "Not Defined", "ncp.bit8nflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9nflags, + { "Not Defined", "ncp.bit9nflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10nflags, + { "Not Defined", "ncp.bit10nflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11nflags, + { "Not Defined", "ncp.bit11nflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12nflags, + { "Not Defined", "ncp.bit12nflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13nflags, + { "Not Defined", "ncp.bit13nflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14nflags, + { "Prefer Referrals", "ncp.bit14nflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15nflags, + { "Prefer Only Referrals", "ncp.bit15nflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16nflags, + { "Not Defined", "ncp.bit16nflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_bit1rflags, + { "Typeless", "ncp.bit1rflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2rflags, + { "Slashed", "ncp.bit2rflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3rflags, + { "Dotted", "ncp.bit3rflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4rflags, + { "Tuned", "ncp.bit4rflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5rflags, + { "Not Defined", "ncp.bit5rflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6rflags, + { "Not Defined", "ncp.bit6rflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7rflags, + { "Not Defined", "ncp.bit7rflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8rflags, + { "Not Defined", "ncp.bit8rflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9rflags, + { "Not Defined", "ncp.bit9rflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10rflags, + { "Not Defined", "ncp.bit10rflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11rflags, + { "Not Defined", "ncp.bit11rflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12rflags, + { "Not Defined", "ncp.bit12rflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13rflags, + { "Not Defined", "ncp.bit13rflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14rflags, + { "Not Defined", "ncp.bit14rflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15rflags, + { "Not Defined", "ncp.bit15rflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16rflags, + { "Not Defined", "ncp.bit16rflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_eflags, + { "Entry Flags", "ncp.eflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1eflags, + { "Alias Entry", "ncp.bit1eflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2eflags, + { "Partition Root", "ncp.bit2eflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3eflags, + { "Container Entry", "ncp.bit3eflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4eflags, + { "Container Alias", "ncp.bit4eflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5eflags, + { "Matches List Filter", "ncp.bit5eflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6eflags, + { "Reference Entry", "ncp.bit6eflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7eflags, + { "40x Reference Entry", "ncp.bit7eflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8eflags, + { "Back Linked", "ncp.bit8eflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9eflags, + { "New Entry", "ncp.bit9eflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10eflags, + { "Temporary Reference", "ncp.bit10eflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11eflags, + { "Audited", "ncp.bit11eflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12eflags, + { "Entry Not Present", "ncp.bit12eflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13eflags, + { "Entry Verify CTS", "ncp.bit13eflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14eflags, + { "Entry Damaged", "ncp.bit14eflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15eflags, + { "Not Defined", "ncp.bit15rflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16eflags, + { "Not Defined", "ncp.bit16rflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_infoflagsl, + { "Information Flags (low) Byte", "ncp.infoflagsl", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_retinfoflagsl, + { "Return Information Flags (low) Byte", "ncp.retinfoflagsl", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1infoflagsl, + { "Output Flags", "ncp.bit1infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2infoflagsl, + { "Entry ID", "ncp.bit2infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3infoflagsl, + { "Entry Flags", "ncp.bit3infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4infoflagsl, + { "Subordinate Count", "ncp.bit4infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5infoflagsl, + { "Modification Time", "ncp.bit5infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6infoflagsl, + { "Modification Timestamp", "ncp.bit6infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7infoflagsl, + { "Creation Timestamp", "ncp.bit7infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8infoflagsl, + { "Partition Root ID", "ncp.bit8infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9infoflagsl, + { "Parent ID", "ncp.bit9infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10infoflagsl, + { "Revision Count", "ncp.bit10infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11infoflagsl, + { "Replica Type", "ncp.bit11infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12infoflagsl, + { "Base Class", "ncp.bit12infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13infoflagsl, + { "Relative Distinguished Name", "ncp.bit13infoflagsl", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14infoflagsl, + { "Distinguished Name", "ncp.bit14infoflagsl", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15infoflagsl, + { "Root Distinguished Name", "ncp.bit15infoflagsl", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16infoflagsl, + { "Parent Distinguished Name", "ncp.bit16infoflagsl", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_infoflagsh, + { "Information Flags (high) Byte", "ncp.infoflagsh", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1infoflagsh, + { "Purge Time", "ncp.bit1infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2infoflagsh, + { "Dereference Base Class", "ncp.bit2infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3infoflagsh, + { "Not Defined", "ncp.bit3infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4infoflagsh, + { "Not Defined", "ncp.bit4infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5infoflagsh, + { "Not Defined", "ncp.bit5infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6infoflagsh, + { "Not Defined", "ncp.bit6infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7infoflagsh, + { "Not Defined", "ncp.bit7infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8infoflagsh, + { "Not Defined", "ncp.bit8infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9infoflagsh, + { "Not Defined", "ncp.bit9infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10infoflagsh, + { "Not Defined", "ncp.bit10infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11infoflagsh, + { "Not Defined", "ncp.bit11infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12infoflagsh, + { "Not Defined", "ncp.bit12infoflagshs", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13infoflagsh, + { "Not Defined", "ncp.bit13infoflagsh", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14infoflagsh, + { "Not Defined", "ncp.bit14infoflagsh", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15infoflagsh, + { "Not Defined", "ncp.bit15infoflagsh", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16infoflagsh, + { "Not Defined", "ncp.bit16infoflagsh", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_retinfoflagsh, + { "Return Information Flags (high) Byte", "ncp.retinfoflagsh", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1retinfoflagsh, + { "Purge Time", "ncp.bit1retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2retinfoflagsh, + { "Dereference Base Class", "ncp.bit2retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3retinfoflagsh, + { "Replica Number", "ncp.bit3retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4retinfoflagsh, + { "Replica State", "ncp.bit4retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5retinfoflagsh, + { "Federation Boundary", "ncp.bit5retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6retinfoflagsh, + { "Schema Boundary", "ncp.bit6retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7retinfoflagsh, + { "Federation Boundary ID", "ncp.bit7retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8retinfoflagsh, + { "Schema Boundary ID", "ncp.bit8retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9retinfoflagsh, + { "Current Subcount", "ncp.bit9retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10retinfoflagsh, + { "Local Entry Flags", "ncp.bit10retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11retinfoflagsh, + { "Not Defined", "ncp.bit11retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12retinfoflagsh, + { "Not Defined", "ncp.bit12retinfoflagshs", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13retinfoflagsh, + { "Not Defined", "ncp.bit13retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14retinfoflagsh, + { "Not Defined", "ncp.bit14retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15retinfoflagsh, + { "Not Defined", "ncp.bit15retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16retinfoflagsh, + { "Not Defined", "ncp.bit16retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_bit1lflags, + { "List Typeless", "ncp.bit1lflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2lflags, + { "List Containers", "ncp.bit2lflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3lflags, + { "List Slashed", "ncp.bit3lflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4lflags, + { "List Dotted", "ncp.bit4lflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5lflags, + { "Dereference Alias", "ncp.bit5lflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6lflags, + { "List All Containers", "ncp.bit6lflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7lflags, + { "List Obsolete", "ncp.bit7lflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8lflags, + { "List Tuned Output", "ncp.bit8lflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9lflags, + { "List External Reference", "ncp.bit9lflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10lflags, + { "Not Defined", "ncp.bit10lflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11lflags, + { "Not Defined", "ncp.bit11lflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12lflags, + { "Not Defined", "ncp.bit12lflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13lflags, + { "Not Defined", "ncp.bit13lflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14lflags, + { "Not Defined", "ncp.bit14lflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15lflags, + { "Not Defined", "ncp.bit15lflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16lflags, + { "Not Defined", "ncp.bit16lflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_l1flagsl, + { "Information Flags (low) Byte", "ncp.l1flagsl", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_l1flagsh, + { "Information Flags (high) Byte", "ncp.l1flagsh", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1l1flagsl, + { "Output Flags", "ncp.bit1l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2l1flagsl, + { "Entry ID", "ncp.bit2l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3l1flagsl, + { "Replica State", "ncp.bit3l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4l1flagsl, + { "Modification Timestamp", "ncp.bit4l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5l1flagsl, + { "Purge Time", "ncp.bit5l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6l1flagsl, + { "Local Partition ID", "ncp.bit6l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7l1flagsl, + { "Distinguished Name", "ncp.bit7l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8l1flagsl, + { "Replica Type", "ncp.bit8l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9l1flagsl, + { "Partition Busy", "ncp.bit9l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10l1flagsl, + { "Not Defined", "ncp.bit10l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11l1flagsl, + { "Not Defined", "ncp.bit11l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12l1flagsl, + { "Not Defined", "ncp.bit12l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13l1flagsl, + { "Not Defined", "ncp.bit13l1flagsl", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14l1flagsl, + { "Not Defined", "ncp.bit14l1flagsl", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15l1flagsl, + { "Not Defined", "ncp.bit15l1flagsl", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16l1flagsl, + { "Not Defined", "ncp.bit16l1flagsl", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_bit1l1flagsh, + { "Not Defined", "ncp.bit1l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2l1flagsh, + { "Not Defined", "ncp.bit2l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3l1flagsh, + { "Not Defined", "ncp.bit3l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4l1flagsh, + { "Not Defined", "ncp.bit4l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5l1flagsh, + { "Not Defined", "ncp.bit5l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6l1flagsh, + { "Not Defined", "ncp.bit6l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7l1flagsh, + { "Not Defined", "ncp.bit7l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8l1flagsh, + { "Not Defined", "ncp.bit8l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9l1flagsh, + { "Not Defined", "ncp.bit9l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10l1flagsh, + { "Not Defined", "ncp.bit10l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11l1flagsh, + { "Not Defined", "ncp.bit11l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12l1flagsh, + { "Not Defined", "ncp.bit12l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13l1flagsh, + { "Not Defined", "ncp.bit13l1flagsh", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14l1flagsh, + { "Not Defined", "ncp.bit14l1flagsh", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15l1flagsh, + { "Not Defined", "ncp.bit15l1flagsh", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16l1flagsh, + { "Not Defined", "ncp.bit16l1flagsh", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_vflags, + { "Value Flags", "ncp.vflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1vflags, + { "Naming", "ncp.bit1vflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2vflags, + { "Base Class", "ncp.bit2vflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3vflags, + { "Present", "ncp.bit3vflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4vflags, + { "Value Damaged", "ncp.bit4vflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5vflags, + { "Not Defined", "ncp.bit5vflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6vflags, + { "Not Defined", "ncp.bit6vflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7vflags, + { "Not Defined", "ncp.bit7vflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8vflags, + { "Not Defined", "ncp.bit8vflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9vflags, + { "Not Defined", "ncp.bit9vflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10vflags, + { "Not Defined", "ncp.bit10vflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11vflags, + { "Not Defined", "ncp.bit11vflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12vflags, + { "Not Defined", "ncp.bit12vflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13vflags, + { "Not Defined", "ncp.bit13vflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14vflags, + { "Not Defined", "ncp.bit14vflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15vflags, + { "Not Defined", "ncp.bit15vflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16vflags, + { "Not Defined", "ncp.bit16vflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_cflags, + { "Class Flags", "ncp.cflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1cflags, + { "Container", "ncp.bit1cflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2cflags, + { "Effective", "ncp.bit2cflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3cflags, + { "Class Definition Cannot be Removed", "ncp.bit3cflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4cflags, + { "Ambiguous Naming", "ncp.bit4cflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5cflags, + { "Ambiguous Containment", "ncp.bit5cflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6cflags, + { "Auxiliary", "ncp.bit6cflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7cflags, + { "Operational", "ncp.bit7cflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8cflags, + { "Sparse Required", "ncp.bit8cflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9cflags, + { "Sparse Operational", "ncp.bit9cflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10cflags, + { "Not Defined", "ncp.bit10cflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11cflags, + { "Not Defined", "ncp.bit11cflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12cflags, + { "Not Defined", "ncp.bit12cflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13cflags, + { "Not Defined", "ncp.bit13cflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14cflags, + { "Not Defined", "ncp.bit14cflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15cflags, + { "Not Defined", "ncp.bit15cflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16cflags, + { "Not Defined", "ncp.bit16cflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_bit1acflags, + { "Single Valued", "ncp.bit1acflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2acflags, + { "Sized", "ncp.bit2acflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3acflags, + { "Non-Removable", "ncp.bit3acflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4acflags, + { "Read Only", "ncp.bit4acflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5acflags, + { "Hidden", "ncp.bit5acflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6acflags, + { "String", "ncp.bit6acflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7acflags, + { "Synchronize Immediate", "ncp.bit7acflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8acflags, + { "Public Read", "ncp.bit8acflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9acflags, + { "Server Read", "ncp.bit9acflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10acflags, + { "Write Managed", "ncp.bit10acflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11acflags, + { "Per Replica", "ncp.bit11acflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12acflags, + { "Never Schedule Synchronization", "ncp.bit12acflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13acflags, + { "Operational", "ncp.bit13acflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14acflags, + { "Not Defined", "ncp.bit14acflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15acflags, + { "Not Defined", "ncp.bit15acflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16acflags, + { "Not Defined", "ncp.bit16acflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + + { &hf_nds_reply_error, + { "NDS Error", "ncp.ndsreplyerror", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_net, + { "Network","ncp.ndsnet", FT_IPXNET, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_node, + { "Node", "ncp.ndsnode", FT_ETHER, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_socket, + { "Socket", "ncp.ndssocket", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_add_ref_ip, + { "Address Referral", "ncp.ipref", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_add_ref_udp, + { "Address Referral", "ncp.udpref", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_add_ref_tcp, + { "Address Referral", "ncp.tcpref", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_referral_record, + { "Referral Record", "ncp.ref_rec", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_referral_addcount, + { "Number of Addresses in Referral", "ncp.ref_addcount", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_port, + { "Port", "ncp.ndsport", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_mv_string, + { "Attribute Name", "ncp.mv_string", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_syntax, + { "Attribute Syntax", "ncp.nds_syntax", FT_UINT32, BASE_DEC, VALS(nds_syntax), 0x0, NULL, HFILL }}, + + { &hf_value_string, + { "Value", "ncp.value_string", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_stream_name, + { "Stream Name", "ncp.nds_stream_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_buffer_size, + { "NDS Reply Buffer Size", "ncp.nds_reply_buf", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_ver, + { "NDS Version", "ncp.nds_ver", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_nflags, + { "Flags", "ncp.nds_nflags", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_rflags, + { "Request Flags", "ncp.nds_rflags", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_eflags, + { "Entry Flags", "ncp.nds_eflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_scope, + { "Scope", "ncp.nds_scope", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_name, + { "Name", "ncp.nds_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_name_type, + { "Name Type", "ncp.nds_name_type", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_comm_trans, + { "Communications Transport", "ncp.nds_comm_trans", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_tree_trans, + { "Tree Walker Transport", "ncp.nds_tree_trans", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_iteration, + { "Iteration Handle", "ncp.nds_iteration", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_iterator, + { "Iterator", "ncp.nds_iterator", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_file_handle, + { "File Handle", "ncp.nds_file_handle", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_file_size, + { "File Size", "ncp.nds_file_size", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_eid, + { "NDS EID", "ncp.nds_eid", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_depth, + { "Distance object is from Root", "ncp.nds_depth", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_info_type, + { "Info Type", "ncp.nds_info_type", FT_UINT32, BASE_RANGE_STRING|BASE_DEC, RVALS(nds_info_type), 0x0, NULL, HFILL }}, + + { &hf_nds_class_def_type, + { "Class Definition Type", "ncp.nds_class_def_type", FT_UINT32, BASE_DEC, VALS(class_def_type), 0x0, NULL, HFILL }}, + + { &hf_nds_all_attr, + { "All Attributes", "ncp.nds_all_attr", FT_UINT32, BASE_DEC, NULL, 0x0, "Return all Attributes?", HFILL }}, + + { &hf_nds_return_all_classes, + { "All Classes", "ncp.nds_return_all_classes", FT_UINT32, BASE_DEC, NULL, 0x0, "Return all Classes?", HFILL }}, + + { &hf_nds_req_flags, + { "Request Flags", "ncp.nds_req_flags", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_attr, + { "Attributes", "ncp.nds_attributes", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_classes, + { "Classes", "ncp.nds_classes", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_crc, + { "CRC", "ncp.nds_crc", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_referrals, + { "Referrals", "ncp.nds_referrals", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_result_flags, + { "Result Flags", "ncp.nds_result_flags", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_stream_flags, + { "Streams Flags", "ncp.nds_stream_flags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_tag_string, + { "Tags", "ncp.nds_tags", FT_UINT32, BASE_DEC, VALS(nds_tags), 0x0, NULL, HFILL }}, + + { &hf_value_bytes, + { "Bytes", "ncp.value_bytes", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_replica_type, + { "Replica Type", "ncp.rtype", FT_UINT32, BASE_DEC, VALS(nds_replica_type), 0x0, NULL, HFILL }}, + + { &hf_replica_state, + { "Replica State", "ncp.rstate", FT_UINT16, BASE_DEC, VALS(nds_replica_state), 0x0, NULL, HFILL }}, + + { &hf_nds_rnum, + { "Replica Number", "ncp.rnum", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_revent, + { "Event", "ncp.revent", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_replica_number, + { "Replica Number", "ncp.rnum", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_min_nds_ver, + { "Minimum NDS Version", "ncp.min_nds_version", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_ver_include, + { "Include NDS Version", "ncp.inc_nds_ver", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_ver_exclude, + { "Exclude NDS Version", "ncp.exc_nds_ver", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + +#if 0 /* Unused ? */ + { &hf_nds_es, + { "Input Entry Specifier", "ncp.nds_es", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, +#endif + + { &hf_es_type, + { "Entry Specifier Type", "ncp.nds_es_type", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_rdn_string, + { "RDN", "ncp.nds_rdn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + +#if 0 /* Unused ? */ + { &hf_delim_string, + { "Delimiter", "ncp.nds_delim", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, +#endif + + { &hf_nds_dn_output_type, + { "Output Entry Specifier Type", "ncp.nds_out_es_type", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_nested_output_type, + { "Nested Output Entry Specifier Type", "ncp.nds_nested_out_es", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_output_delimiter, + { "Output Delimiter", "ncp.nds_out_delimiter", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_output_entry_specifier, + { "Output Entry Specifier", "ncp.nds_out_es", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_es_value, + { "Entry Specifier Value", "ncp.nds_es_value", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_es_rdn_count, + { "RDN Count", "ncp.nds_es_rdn_count", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_replica_num, + { "Replica Number", "ncp.nds_replica_num", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_es_seconds, + { "Seconds", "ncp.nds_es_seconds", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_event_num, + { "Event Number", "ncp.nds_event_num", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_compare_results, + { "Compare Values Returned", "ncp.nds_compare_results", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_parent, + { "Parent ID", "ncp.nds_parent", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_name_filter, + { "Name Filter", "ncp.nds_name_filter", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_class_filter, + { "Class Filter", "ncp.nds_class_filter", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_time_filter, + { "Time Filter", "ncp.nds_time_filter", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_partition_root_id, + { "Partition Root ID", "ncp.nds_partition_root_id", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_replicas, + { "Replicas", "ncp.nds_replicas", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_purge, + { "Purge Time", "ncp.nds_purge", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_local_partition, + { "Local Partition ID", "ncp.nds_local_partition", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_partition_busy, + { "Partition Busy", "ncp.nds_partition_busy", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_number_of_changes, + { "Number of Attribute Changes", "ncp.nds_number_of_changes", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_sub_count, + { "Subordinate Count", "ncp.sub_count", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_revision, + { "Revision Count", "ncp.nds_rev_count", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_base_class, + { "Base Class", "ncp.nds_base_class", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_relative_dn, + { "Relative Distinguished Name", "ncp.nds_relative_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + +#if 0 /* Unused ? */ + { &hf_nds_root_dn, + { "Root Distinguished Name", "ncp.nds_root_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, +#endif + +#if 0 /* Unused ? */ + { &hf_nds_parent_dn, + { "Parent Distinguished Name", "ncp.nds_parent_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, +#endif + + { &hf_deref_base, + { "Dereference Base Class", "ncp.nds_deref_base", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_base, + { "Base Class", "ncp.nds_base", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_super, + { "Super Class", "ncp.nds_super", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + +#if 0 /* Unused ? */ + { &hf_nds_entry_info, + { "Entry Information", "ncp.nds_entry_info", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, +#endif + + { &hf_nds_privileges, + { "Privileges", "ncp.nds_privileges", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_compare_attributes, + { "Compare Attributes?", "ncp.nds_compare_attributes", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_nds_read_attribute, + { "Read Attribute?", "ncp.nds_read_attribute", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_nds_write_add_delete_attribute, + { "Write, Add, Delete Attribute?", "ncp.nds_write_add_delete_attribute", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_nds_add_delete_self, + { "Add/Delete Self?", "ncp.nds_add_delete_self", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_nds_privilege_not_defined, + { "Privilege Not defined", "ncp.nds_privilege_not_defined", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_nds_supervisor, + { "Supervisor?", "ncp.nds_supervisor", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_nds_inheritance_control, + { "Inheritance?", "ncp.nds_inheritance_control", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_nds_browse_entry, + { "Browse Entry?", "ncp.nds_browse_entry", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_nds_add_entry, + { "Add Entry?", "ncp.nds_add_entry", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_nds_delete_entry, + { "Delete Entry?", "ncp.nds_delete_entry", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_nds_rename_entry, + { "Rename Entry?", "ncp.nds_rename_entry", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_nds_supervisor_entry, + { "Supervisor?", "ncp.nds_supervisor_entry", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_nds_entry_privilege_not_defined, + { "Privilege Not Defined", "ncp.nds_entry_privilege_not_defined", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_nds_vflags, + { "Value Flags", "ncp.nds_vflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_value_len, + { "Value Length", "ncp.nds_vlength", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_cflags, + { "Class Flags", "ncp.nds_cflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_asn1, + { "ASN.1 ID", "ncp.nds_asn1", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_acflags, + { "Attribute Constraint Flags", "ncp.nds_acflags", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_upper, + { "Upper Limit Value", "ncp.nds_upper", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_lower, + { "Lower Limit Value", "ncp.nds_lower", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_trustee_dn, + { "Trustee Distinguished Name", "ncp.nds_trustee_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_attribute_dn, + { "Attribute Name", "ncp.nds_attribute_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_acl_add, + { "ACL Templates to Add", "ncp.nds_acl_add", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_acl_del, + { "Access Control Lists to Delete", "ncp.nds_acl_del", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_att_add, + { "Attribute to Add", "ncp.nds_att_add", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_att_del, + { "Attribute Names to Delete", "ncp.nds_att_del", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_keep, + { "Delete Original RDN", "ncp.nds_keep", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_new_rdn, + { "New Relative Distinguished Name", "ncp.nds_new_rdn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_time_delay, + { "Time Delay", "ncp.nds_time_delay", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_root_name, + { "Root Most Object Name", "ncp.nds_root_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_new_part_id, + { "New Partition Root ID", "ncp.nds_new_part_id", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_child_part_id, + { "Child Partition Root ID", "ncp.nds_child_part_id", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_master_part_id, + { "Master Partition Root ID", "ncp.nds_master_part_id", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_target_name, + { "Target Server Name", "ncp.nds_target_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_pingflags1, + { "Ping (low) Request Flags", "ncp.pingflags1", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1pingflags1, + { "Supported Fields", "ncp.bit1pingflags1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2pingflags1, + { "Depth", "ncp.bit2pingflags1", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3pingflags1, + { "Build Number", "ncp.bit3pingflags1", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4pingflags1, + { "Flags", "ncp.bit4pingflags1", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5pingflags1, + { "Verification Flags", "ncp.bit5pingflags1", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6pingflags1, + { "Letter Version", "ncp.bit6pingflags1", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7pingflags1, + { "OS Version", "ncp.bit7pingflags1", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8pingflags1, + { "Not Defined", "ncp.bit8pingflags1", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9pingflags1, + { "License Flags", "ncp.bit9pingflags1", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10pingflags1, + { "DS Time", "ncp.bit10pingflags1", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11pingflags1, + { "Server Time", "ncp.bit11pingflags1", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12pingflags1, + { "Create Time", "ncp.bit12pingflags1", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13pingflags1, + { "Not Defined", "ncp.bit13pingflags1", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14pingflags1, + { "Not Defined", "ncp.bit14pingflags1", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15pingflags1, + { "Not Defined", "ncp.bit15pingflags1", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16pingflags1, + { "Not Defined", "ncp.bit16pingflags1", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_pingflags2, + { "Ping (high) Request Flags", "ncp.pingflags2", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1pingflags2, + { "Sap Name", "ncp.bit1pingflags2", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2pingflags2, + { "Tree Name", "ncp.bit2pingflags2", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3pingflags2, + { "OS Name", "ncp.bit3pingflags2", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4pingflags2, + { "Hardware Name", "ncp.bit4pingflags2", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5pingflags2, + { "Vendor Name", "ncp.bit5pingflags2", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6pingflags2, + { "Not Defined", "ncp.bit6pingflags2", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7pingflags2, + { "Not Defined", "ncp.bit7pingflags2", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8pingflags2, + { "Not Defined", "ncp.bit8pingflags2", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9pingflags2, + { "Not Defined", "ncp.bit9pingflags2", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10pingflags2, + { "Not Defined", "ncp.bit10pingflags2", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11pingflags2, + { "Not Defined", "ncp.bit11pingflags2", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12pingflags2, + { "Not Defined", "ncp.bit12pingflags2", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13pingflags2, + { "Not Defined", "ncp.bit13pingflags2", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14pingflags2, + { "Not Defined", "ncp.bit14pingflags2", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15pingflags2, + { "Not Defined", "ncp.bit15pingflags2", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16pingflags2, + { "Not Defined", "ncp.bit16pingflags2", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_pingpflags1, + { "Ping Data Flags", "ncp.pingpflags1", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1pingpflags1, + { "Root Most Master Replica", "ncp.bit1pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2pingpflags1, + { "Is Time Synchronized?", "ncp.bit2pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3pingpflags1, + { "Is Time Valid?", "ncp.bit3pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4pingpflags1, + { "Is DS Time Synchronized?", "ncp.bit4pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5pingpflags1, + { "Does Agent Have All Replicas?", "ncp.bit5pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6pingpflags1, + { "Not Defined", "ncp.bit6pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7pingpflags1, + { "Not Defined", "ncp.bit7pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8pingpflags1, + { "Not Defined", "ncp.bit8pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9pingpflags1, + { "Not Defined", "ncp.bit9pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10pingpflags1, + { "Not Defined", "ncp.bit10pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11pingpflags1, + { "Not Defined", "ncp.bit11pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12pingpflags1, + { "Not Defined", "ncp.bit12pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13pingpflags1, + { "Not Defined", "ncp.bit13pingpflags1", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14pingpflags1, + { "Not Defined", "ncp.bit14pingpflags1", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15pingpflags1, + { "Not Defined", "ncp.bit15pingpflags1", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16pingpflags1, + { "Not Defined", "ncp.bit16pingpflags1", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_pingvflags1, + { "Verification Flags", "ncp.pingvflags1", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1pingvflags1, + { "Checksum", "ncp.bit1pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2pingvflags1, + { "CRC32", "ncp.bit2pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3pingvflags1, + { "Not Defined", "ncp.bit3pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4pingvflags1, + { "Not Defined", "ncp.bit4pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5pingvflags1, + { "Not Defined", "ncp.bit5pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6pingvflags1, + { "Not Defined", "ncp.bit6pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7pingvflags1, + { "Not Defined", "ncp.bit7pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8pingvflags1, + { "Not Defined", "ncp.bit8pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9pingvflags1, + { "Not Defined", "ncp.bit9pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10pingvflags1, + { "Not Defined", "ncp.bit10pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11pingvflags1, + { "Not Defined", "ncp.bit11pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12pingvflags1, + { "Not Defined", "ncp.bit12pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13pingvflags1, + { "Not Defined", "ncp.bit13pingvflags1", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14pingvflags1, + { "Not Defined", "ncp.bit14pingvflags1", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15pingvflags1, + { "Not Defined", "ncp.bit15pingvflags1", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16pingvflags1, + { "Not Defined", "ncp.bit16pingvflags1", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_nds_letter_ver, + { "Letter Version", "ncp.nds_letter_ver", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_os_majver, + { "OS Major Version", "ncp.nds_os_majver", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_os_minver, + { "OS Minor Version", "ncp.nds_os_minver", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_lic_flags, + { "License Flags", "ncp.nds_lic_flags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_ds_time, + { "DS Time", "ncp.nds_ds_time", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_svr_time, + { "Server Time", "ncp.nds_svr_time", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_crt_time, + { "Agent Create Time", "ncp.nds_crt_time", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_ping_version, + { "Ping Version", "ncp.nds_ping_version", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_search_scope, + { "Search Scope", "ncp.nds_search_scope", FT_UINT32, BASE_DEC|BASE_RANGE_STRING, RVALS(nds_search_scope), 0x0, NULL, HFILL }}, + + { &hf_nds_num_objects, + { "Number of Objects to Search", "ncp.nds_num_objects", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_siflags, + { "Information Types", "ncp.siflags", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_bit1siflags, + { "Names", "ncp.bit1siflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_bit2siflags, + { "Names and Values", "ncp.bit2siflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_bit3siflags, + { "Effective Privileges", "ncp.bit3siflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_bit4siflags, + { "Value Info", "ncp.bit4siflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_bit5siflags, + { "Abbreviated Value", "ncp.bit5siflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_bit6siflags, + { "Not Defined", "ncp.bit6siflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_bit7siflags, + { "Not Defined", "ncp.bit7siflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_bit8siflags, + { "Not Defined", "ncp.bit8siflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_bit9siflags, + { "Expanded Class", "ncp.bit9siflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_bit10siflags, + { "Not Defined", "ncp.bit10siflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_bit11siflags, + { "Not Defined", "ncp.bit11siflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_bit12siflags, + { "Not Defined", "ncp.bit12siflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_bit13siflags, + { "Not Defined", "ncp.bit13siflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_bit14siflags, + { "Not Defined", "ncp.bit14siflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_bit15siflags, + { "Not Defined", "ncp.bit15siflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_bit16siflags, + { "Not Defined", "ncp.bit16siflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_nds_segment_overlap, + { "Segment overlap", "nds.segment.overlap", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Segment overlaps with other segments", HFILL }}, + + { &hf_nds_segment_overlap_conflict, + { "Conflicting data in segment overlap", "nds.segment.overlap.conflict", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Overlapping segments contained conflicting data", HFILL }}, + + { &hf_nds_segment_multiple_tails, + { "Multiple tail segments found", "nds.segment.multipletails", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Several tails were found when desegmenting the packet", HFILL }}, + + { &hf_nds_segment_too_long_segment, + { "Segment too long", "nds.segment.toolongsegment", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Segment contained data past end of packet", HFILL }}, + + { &hf_nds_segment_error, + { "Desegmentation error", "nds.segment.error", FT_FRAMENUM, BASE_NONE, NULL, 0x0, "Desegmentation error due to illegal segments", HFILL }}, + + { &hf_nds_segment_count, + { "Segment count", "nds.segment.count", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_reassembled_length, + { "Reassembled NDS length", "nds.reassembled.length", FT_UINT32, BASE_DEC, NULL, 0x0, "The total length of the reassembled payload", HFILL }}, + + { &hf_nds_segment, + { "NDS Fragment", "nds.fragment", FT_FRAMENUM, BASE_NONE, NULL, 0x0, "NDPS Fragment", HFILL }}, + + { &hf_nds_segments, + { "NDS Fragments", "nds.fragments", FT_NONE, BASE_NONE, NULL, 0x0, "NDPS Fragments", HFILL }}, + + { &hf_nds_verb2b_req_flags, + { "Flags", "ncp.nds_verb2b_flags", FT_UINT32, BASE_HEX, VALS(nds_verb2b_flag_vals), 0x0, NULL, HFILL }}, + + { &hf_ncp_ip_address, + { "IP Address", "ncp.ip_addr", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_copyright, + { "Copyright", "ncp.copyright", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_ndsprot1flag, + { "Not Defined", "ncp.nds_prot_bit1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }}, + + { &hf_ndsprot2flag, + { "Not Defined", "ncp.nds_prot_bit2", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }}, + + { &hf_ndsprot3flag, + { "Not Defined", "ncp.nds_prot_bit3", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }}, + + { &hf_ndsprot4flag, + { "Not Defined", "ncp.nds_prot_bit4", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }}, + + { &hf_ndsprot5flag, + { "Not Defined", "ncp.nds_prot_bit5", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }}, + + { &hf_ndsprot6flag, + { "Not Defined", "ncp.nds_prot_bit6", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }}, + + { &hf_ndsprot7flag, + { "Not Defined", "ncp.nds_prot_bit7", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }}, + + { &hf_ndsprot8flag, + { "Not Defined", "ncp.nds_prot_bit8", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }}, + + { &hf_ndsprot9flag, + { "Not Defined", "ncp.nds_prot_bit9", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }}, + + { &hf_ndsprot10flag, + { "Not Defined", "ncp.nds_prot_bit10", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }}, + + { &hf_ndsprot11flag, + { "Not Defined", "ncp.nds_prot_bit11", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }}, + + { &hf_ndsprot12flag, + { "Not Defined", "ncp.nds_prot_bit12", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }}, + + { &hf_ndsprot13flag, + { "Not Defined", "ncp.nds_prot_bit13", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }}, + + { &hf_ndsprot14flag, + { "Not Defined", "ncp.nds_prot_bit14", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }}, + + { &hf_ndsprot15flag, + { "Include CRC in NDS Header", "ncp.nds_prot_bit15", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }}, + + { &hf_ndsprot16flag, + { "Client is a Server", "ncp.nds_prot_bit16", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }}, + + { &hf_nds_svr_dst_name, + { "Server Distinguished Name", "ncp.nds_svr_dist_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_tune_mark, + { "Tune Mark", "ncp.ndstunemark", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + +#if 0 /* Unused ? */ + { &hf_nds_create_time, + { "NDS Creation Time", "ncp.ndscreatetime", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }}, +#endif + + { &hf_srvr_param_string, + { "Set Parameter Value", "ncp.srvr_param_string", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_srvr_param_number, + { "Set Parameter Value", "ncp.srvr_param_number", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_srvr_param_boolean, + { "Set Parameter Value", "ncp.srvr_param_boolean", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_number_of_items, + { "Number of Items", "ncp.ndsitems", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_nds_iterverb, + { "NDS Iteration Verb", "ncp.ndsiterverb", FT_UINT32, BASE_DEC_HEX, VALS(iterator_subverbs), 0x0, NULL, HFILL }}, + + { &hf_iter_completion_code, + { "Iteration Completion Code", "ncp.iter_completion_code", FT_UINT32, BASE_HEX, VALS(nds_reply_errors), 0x0, NULL, HFILL }}, + +#if 0 /* Unused ? */ + { &hf_nds_iterobj, + { "Iterator Object", "ncp.ndsiterobj", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, +#endif + + { &hf_iter_verb_completion_code, + { "Completion Code", "ncp.iter_verb_completion_code", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_iter_ans, + { "Iterator Answer", "ncp.iter_answer", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_positionable, + { "Positionable", "ncp.iterpositionable", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_num_skipped, + { "Number Skipped", "ncp.iternumskipped", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_num_to_skip, + { "Number to Skip", "ncp.iternumtoskip", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_timelimit, + { "Time Limit", "ncp.itertimelimit", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_iter_index, + { "Iterator Index", "ncp.iterindex", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_num_to_get, + { "Number to Get", "ncp.iternumtoget", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + +#if 0 /* Unused ? */ + { &hf_ret_info_type, + { "Return Information Type", "ncp.iterretinfotype", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, +#endif + + { &hf_data_size, + { "Data Size", "ncp.iterdatasize", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_this_count, + { "Number of Items", "ncp.itercount", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_max_entries, + { "Maximum Entries", "ncp.itermaxentries", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_move_position, + { "Move Position", "ncp.itermoveposition", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_iter_copy, + { "Iterator Copy", "ncp.itercopy", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_iter_position, + { "Iteration Position", "ncp.iterposition", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_iter_search, + { "Search Filter", "ncp.iter_search", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_iter_other, + { "Other Iteration", "ncp.iterother", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_oid, + { "Object ID", "ncp.nds_oid", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_ncp_bytes_actually_trans_64, + { "Bytes Actually Transferred", "ncp.bytes_actually_trans_64", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, HFILL }}, + + { &hf_sap_name, + { "SAP Name", "ncp.sap_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_os_name, + { "OS Name", "ncp.os_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_vendor_name, + { "Vendor Name", "ncp.vendor_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_hardware_name, + { "Hardware Name", "ncp.harware_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_no_request_record_found, + { "No request record found. Parsing is impossible.", "ncp.no_request_record_found", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_search_modifier, + { "Search Modifier", "ncp.search_modifier", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + + { &hf_search_pattern, + { "Search Pattern", "ncp.search_pattern", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_acl_protected_attribute, + { "Protected Attribute", "ncp.nds_acl_protected_attribute", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_acl_subject, + { "Subject", "ncp.nds_acl_subject", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, + + { &hf_nds_acl_privileges, + { "Subject", "ncp.nds_acl_privileges", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, + +""") + # Print the registration code for the hf variables + for var in sorted_vars: + print(" { &%s," % (var.HFName())) + print(" { \"%s\", \"%s\", %s, %s, %s, 0x%x, NULL, HFILL }},\n" % \ + (var.Description(), var.DFilter(), + var.WiresharkFType(), var.Display(), var.ValuesName(), + var.Mask())) + + print(" };\n") + + if ett_list: + print(" static int *ett[] = {") + + for ett in ett_list: + print(" &%s," % (ett,)) + + print(" };\n") + + print(""" + static ei_register_info ei[] = { + { &ei_ncp_file_handle, { "ncp.file_handle.expert", PI_REQUEST_CODE, PI_CHAT, "Close file handle", EXPFILL }}, + { &ei_ncp_file_rights, { "ncp.file_rights", PI_REQUEST_CODE, PI_CHAT, "File rights", EXPFILL }}, + { &ei_ncp_op_lock_handle, { "ncp.op_lock_handle", PI_REQUEST_CODE, PI_CHAT, "Op-lock on handle", EXPFILL }}, + { &ei_ncp_file_rights_change, { "ncp.file_rights.change", PI_REQUEST_CODE, PI_CHAT, "Change handle rights", EXPFILL }}, + { &ei_ncp_effective_rights, { "ncp.effective_rights.expert", PI_RESPONSE_CODE, PI_CHAT, "Handle effective rights", EXPFILL }}, + { &ei_ncp_server, { "ncp.server", PI_RESPONSE_CODE, PI_CHAT, "Server info", EXPFILL }}, + { &ei_iter_verb_completion_code, { "ncp.iter_verb_completion_code.expert", PI_RESPONSE_CODE, PI_ERROR, "Iteration Verb Error", EXPFILL }}, + { &ei_ncp_connection_request, { "ncp.connection_request", PI_RESPONSE_CODE, PI_CHAT, "Connection Request", EXPFILL }}, + { &ei_ncp_destroy_connection, { "ncp.destroy_connection", PI_RESPONSE_CODE, PI_CHAT, "Destroy Connection Request", EXPFILL }}, + { &ei_nds_reply_error, { "ncp.ndsreplyerror.expert", PI_RESPONSE_CODE, PI_ERROR, "NDS Error", EXPFILL }}, + { &ei_nds_iteration, { "ncp.nds_iteration.error", PI_RESPONSE_CODE, PI_ERROR, "NDS Iteration Error", EXPFILL }}, + { &ei_ncp_eid, { "ncp.eid", PI_RESPONSE_CODE, PI_CHAT, "EID", EXPFILL }}, + { &ei_ncp_completion_code, { "ncp.completion_code.expert", PI_RESPONSE_CODE, PI_ERROR, "Code Completion Error", EXPFILL }}, + { &ei_ncp_connection_status, { "ncp.connection_status.bad", PI_RESPONSE_CODE, PI_ERROR, "Error: Bad Connection Status", EXPFILL }}, + { &ei_ncp_connection_destroyed, { "ncp.connection_destroyed", PI_RESPONSE_CODE, PI_CHAT, "Connection Destroyed", EXPFILL }}, + { &ei_ncp_no_request_record_found, { "ncp.no_request_record_found", PI_SEQUENCE, PI_NOTE, "No request record found.", EXPFILL }}, + { &ei_ncp_invalid_offset, { "ncp.invalid_offset", PI_MALFORMED, PI_ERROR, "Invalid offset", EXPFILL }}, + { &ei_ncp_address_type, { "ncp.address_type.unknown", PI_PROTOCOL, PI_WARN, "Unknown Address Type", EXPFILL }}, + { &ei_ncp_value_too_large, { "ncp.value_too_large", PI_MALFORMED, PI_ERROR, "Length value goes past the end of the packet", EXPFILL }}, + }; + + expert_module_t* expert_ncp; + + proto_register_field_array(proto_ncp, hf, array_length(hf));""") + + if ett_list: + print(""" + proto_register_subtree_array(ett, array_length(ett));""") + + print(""" + expert_ncp = expert_register_protocol(proto_ncp); + expert_register_field_array(expert_ncp, ei, array_length(ei)); + register_init_routine(&ncp_init_protocol); + /* fragment */ + reassembly_table_register(&nds_reassembly_table, + &addresses_reassembly_table_functions); + + ncp_req_hash = wmem_map_new_autoreset(wmem_epan_scope(), wmem_file_scope(), ncp_hash, ncp_equal); + ncp_req_eid_hash = wmem_map_new_autoreset(wmem_epan_scope(), wmem_file_scope(), ncp_eid_hash, ncp_eid_equal); + + """) + + # End of proto_register_ncp2222() + print("}") + +def usage(): + print("Usage: ncp2222.py -o output_file") + sys.exit(1) + +def main(): + global compcode_lists + global ptvc_lists + global msg + + optstring = "o:" + out_filename = None + + try: + opts, args = getopt.getopt(sys.argv[1:], optstring) + except getopt.error: + usage() + + for opt, arg in opts: + if opt == "-o": + out_filename = arg + else: + usage() + + if len(args) != 0: + usage() + + if not out_filename: + usage() + + # Create the output file + try: + out_file = open(out_filename, "w") + except IOError: + sys.exit("Could not open %s for writing: %s" % (out_filename, + IOError)) + + # Set msg to current stdout + msg = sys.stdout + + # Set stdout to the output file + sys.stdout = out_file + + msg.write("Processing NCP definitions...\n") + # Run the code, and if we catch any exception, + # erase the output file. + try: + compcode_lists = UniqueCollection('Completion Code Lists') + ptvc_lists = UniqueCollection('PTVC Lists') + + define_errors() + define_groups() + + define_ncp2222() + + msg.write("Defined %d NCP types.\n" % (len(packets),)) + produce_code() + except Exception: + traceback.print_exc(20, msg) + try: + out_file.close() + except IOError: + msg.write("Could not close %s: %s\n" % (out_filename, IOError)) + + try: + if os.path.exists(out_filename): + os.remove(out_filename) + except OSError: + msg.write("Could not remove %s: %s\n" % (out_filename, OSError)) + + sys.exit(1) + + + +def define_ncp2222(): + ############################################################################## + # NCP Packets. Here I list functions and subfunctions in hexadecimal like the + # NCP book (and I believe LanAlyzer does this too). + # However, Novell lists these in decimal in their on-line documentation. + ############################################################################## + # 2222/01 + pkt = NCP(0x01, "File Set Lock", 'sync') + pkt.Request(7) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/02 + pkt = NCP(0x02, "File Release Lock", 'sync') + pkt.Request(7) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xff00]) + # 2222/03 + pkt = NCP(0x03, "Log File Exclusive", 'sync') + pkt.Request( (12, 267), [ + rec( 7, 1, DirHandle ), + rec( 8, 1, LockFlag ), + rec( 9, 2, TimeoutLimit, ENC_BIG_ENDIAN ), + rec( 11, (1, 256), FilePath ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8200, 0x9600, 0xfe0d, 0xff01]) + # 2222/04 + pkt = NCP(0x04, "Lock File Set", 'sync') + pkt.Request( 9, [ + rec( 7, 2, TimeoutLimit ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xfe0d, 0xff01]) + ## 2222/05 + pkt = NCP(0x05, "Release File", 'sync') + pkt.Request( (9, 264), [ + rec( 7, 1, DirHandle ), + rec( 8, (1, 256), FilePath ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9b00, 0x9c03, 0xff1a]) + # 2222/06 + pkt = NCP(0x06, "Release File Set", 'sync') + pkt.Request( 8, [ + rec( 7, 1, LockFlag ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/07 + pkt = NCP(0x07, "Clear File", 'sync') + pkt.Request( (9, 264), [ + rec( 7, 1, DirHandle ), + rec( 8, (1, 256), FilePath ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, + 0xa100, 0xfd00, 0xff1a]) + # 2222/08 + pkt = NCP(0x08, "Clear File Set", 'sync') + pkt.Request( 8, [ + rec( 7, 1, LockFlag ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/09 + pkt = NCP(0x09, "Log Logical Record", 'sync') + pkt.Request( (11, 138), [ + rec( 7, 1, LockFlag ), + rec( 8, 2, TimeoutLimit, ENC_BIG_ENDIAN ), + rec( 10, (1, 128), LogicalRecordName, info_str=(LogicalRecordName, "Log Logical Record: %s", ", %s")), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xfe0d, 0xff1a]) + # 2222/0A, 10 + pkt = NCP(0x0A, "Lock Logical Record Set", 'sync') + pkt.Request( 10, [ + rec( 7, 1, LockFlag ), + rec( 8, 2, TimeoutLimit ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xfe0d, 0xff1a]) + # 2222/0B, 11 + pkt = NCP(0x0B, "Clear Logical Record", 'sync') + pkt.Request( (8, 135), [ + rec( 7, (1, 128), LogicalRecordName, info_str=(LogicalRecordName, "Clear Logical Record: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xff1a]) + # 2222/0C, 12 + pkt = NCP(0x0C, "Release Logical Record", 'sync') + pkt.Request( (8, 135), [ + rec( 7, (1, 128), LogicalRecordName, info_str=(LogicalRecordName, "Release Logical Record: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xff1a]) + # 2222/0D, 13 + pkt = NCP(0x0D, "Release Logical Record Set", 'sync') + pkt.Request( 8, [ + rec( 7, 1, LockFlag ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/0E, 14 + pkt = NCP(0x0E, "Clear Logical Record Set", 'sync') + pkt.Request( 8, [ + rec( 7, 1, LockFlag ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/1100, 17/00 + pkt = NCP(0x1100, "Write to Spool File", 'print') + pkt.Request( (11, 16), [ + rec( 10, ( 1, 6 ), Data, info_str=(Data, "Write to Spool File: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0104, 0x8000, 0x8101, 0x8701, 0x8800, + 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9400, 0x9500, + 0x9600, 0x9804, 0x9900, 0xa100, 0xa201, 0xff19]) + # 2222/1101, 17/01 + pkt = NCP(0x1101, "Close Spool File", 'print') + pkt.Request( 11, [ + rec( 10, 1, AbortQueueFlag ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8701, 0x8800, 0x8d00, + 0x8e00, 0x8f00, 0x9001, 0x9300, 0x9400, 0x9500, + 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03, 0x9d00, + 0xa100, 0xd000, 0xd100, 0xd202, 0xd300, 0xd400, + 0xda01, 0xe800, 0xea00, 0xeb00, 0xec00, 0xfc06, + 0xfd00, 0xfe07, 0xff06]) + # 2222/1102, 17/02 + pkt = NCP(0x1102, "Set Spool File Flags", 'print') + pkt.Request( 30, [ + rec( 10, 1, PrintFlags ), + rec( 11, 1, TabSize ), + rec( 12, 1, TargetPrinter ), + rec( 13, 1, Copies ), + rec( 14, 1, FormType ), + rec( 15, 1, Reserved ), + rec( 16, 14, BannerName ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xd202, 0xd300, 0xe800, 0xea00, + 0xeb00, 0xec00, 0xfc06, 0xfe07, 0xff06]) + + # 2222/1103, 17/03 + pkt = NCP(0x1103, "Spool A Disk File", 'print') + pkt.Request( (12, 23), [ + rec( 10, 1, DirHandle ), + rec( 11, (1, 12), Data, info_str=(Data, "Spool a Disk File: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8701, 0x8800, 0x8d00, + 0x8e00, 0x8f00, 0x9001, 0x9300, 0x9400, 0x9500, + 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03, 0x9d00, + 0xa100, 0xd000, 0xd100, 0xd202, 0xd300, 0xd400, + 0xda01, 0xe800, 0xea00, 0xeb00, 0xec00, 0xfc06, + 0xfd00, 0xfe07, 0xff06]) + + # 2222/1106, 17/06 + pkt = NCP(0x1106, "Get Printer Status", 'print') + pkt.Request( 11, [ + rec( 10, 1, TargetPrinter ), + ]) + pkt.Reply(12, [ + rec( 8, 1, PrinterHalted ), + rec( 9, 1, PrinterOffLine ), + rec( 10, 1, CurrentFormType ), + rec( 11, 1, RedirectedPrinter ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xfb05, 0xfd00, 0xff06]) + + # 2222/1109, 17/09 + pkt = NCP(0x1109, "Create Spool File", 'print') + pkt.Request( (12, 23), [ + rec( 10, 1, DirHandle ), + rec( 11, (1, 12), Data, info_str=(Data, "Create Spool File: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8400, 0x8701, 0x8d00, + 0x8f00, 0x9001, 0x9400, 0x9600, 0x9804, 0x9900, + 0x9b03, 0x9c03, 0xa100, 0xd000, 0xd100, 0xd202, + 0xd300, 0xd400, 0xda01, 0xe800, 0xea00, 0xeb00, + 0xec00, 0xfc06, 0xfd00, 0xfe07, 0xff06]) + + # 2222/110A, 17/10 + pkt = NCP(0x110A, "Get Printer's Queue", 'print') + pkt.Request( 11, [ + rec( 10, 1, TargetPrinter ), + ]) + pkt.Reply( 12, [ + rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xff06]) + + # 2222/12, 18 + pkt = NCP(0x12, "Get Volume Info with Number", 'file') + pkt.Request( 8, [ + rec( 7, 1, VolumeNumber,info_str=(VolumeNumber, "Get Volume Information for Volume %d", ", %d") ) + ]) + pkt.Reply( 36, [ + rec( 8, 2, SectorsPerCluster, ENC_BIG_ENDIAN ), + rec( 10, 2, TotalVolumeClusters, ENC_BIG_ENDIAN ), + rec( 12, 2, AvailableClusters, ENC_BIG_ENDIAN ), + rec( 14, 2, TotalDirectorySlots, ENC_BIG_ENDIAN ), + rec( 16, 2, AvailableDirectorySlots, ENC_BIG_ENDIAN ), + rec( 18, 16, VolumeName ), + rec( 34, 2, RemovableFlag, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x9804]) + + # 2222/13, 19 + pkt = NCP(0x13, "Get Station Number", 'connection') + pkt.Request(7) + pkt.Reply(11, [ + rec( 8, 3, StationNumber ) + ]) + pkt.CompletionCodes([0x0000, 0xff00]) + + # 2222/14, 20 + pkt = NCP(0x14, "Get File Server Date And Time", 'fileserver') + pkt.Request(7) + pkt.Reply(15, [ + rec( 8, 1, Year ), + rec( 9, 1, Month ), + rec( 10, 1, Day ), + rec( 11, 1, Hour ), + rec( 12, 1, Minute ), + rec( 13, 1, Second ), + rec( 14, 1, DayOfWeek ), + ]) + pkt.CompletionCodes([0x0000]) + + # 2222/1500, 21/00 + pkt = NCP(0x1500, "Send Broadcast Message", 'message') + pkt.Request((13, 70), [ + rec( 10, 1, ClientListLen, var="x" ), + rec( 11, 1, TargetClientList, repeat="x" ), + rec( 12, (1, 58), TargetMessage, info_str=(TargetMessage, "Send Broadcast Message: %s", ", %s") ), + ]) + pkt.Reply(10, [ + rec( 8, 1, ClientListLen, var="x" ), + rec( 9, 1, SendStatus, repeat="x" ) + ]) + pkt.CompletionCodes([0x0000, 0xfd00]) + + # 2222/1501, 21/01 + pkt = NCP(0x1501, "Get Broadcast Message", 'message') + pkt.Request(10) + pkt.Reply((9,66), [ + rec( 8, (1, 58), TargetMessage ) + ]) + pkt.CompletionCodes([0x0000, 0xfd00]) + + # 2222/1502, 21/02 + pkt = NCP(0x1502, "Disable Broadcasts", 'message') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xfb0a]) + + # 2222/1503, 21/03 + pkt = NCP(0x1503, "Enable Broadcasts", 'message') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + + # 2222/1509, 21/09 + pkt = NCP(0x1509, "Broadcast To Console", 'message') + pkt.Request((11, 68), [ + rec( 10, (1, 58), TargetMessage, info_str=(TargetMessage, "Broadcast to Console: %s", ", %s") ) + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + + # 2222/150A, 21/10 + pkt = NCP(0x150A, "Send Broadcast Message", 'message') + pkt.Request((17, 74), [ + rec( 10, 2, ClientListCount, ENC_LITTLE_ENDIAN, var="x" ), + rec( 12, 4, ClientList, ENC_LITTLE_ENDIAN, repeat="x" ), + rec( 16, (1, 58), TargetMessage, info_str=(TargetMessage, "Send Broadcast Message: %s", ", %s") ), + ]) + pkt.Reply(14, [ + rec( 8, 2, ClientListCount, ENC_LITTLE_ENDIAN, var="x" ), + rec( 10, 4, ClientCompFlag, ENC_LITTLE_ENDIAN, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0xfd00]) + + # 2222/150B, 21/11 + pkt = NCP(0x150B, "Get Broadcast Message", 'message') + pkt.Request(10) + pkt.Reply((9,66), [ + rec( 8, (1, 58), TargetMessage ) + ]) + pkt.CompletionCodes([0x0000, 0xfd00]) + + # 2222/150C, 21/12 + pkt = NCP(0x150C, "Connection Message Control", 'message') + pkt.Request(22, [ + rec( 10, 1, ConnectionControlBits ), + rec( 11, 3, Reserved3 ), + rec( 14, 4, ConnectionListCount, ENC_LITTLE_ENDIAN, var="x" ), + rec( 18, 4, ConnectionList, ENC_LITTLE_ENDIAN, repeat="x" ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xff00]) + + # 2222/1600, 22/0 + pkt = NCP(0x1600, "Set Directory Handle", 'file') + pkt.Request((13,267), [ + rec( 10, 1, TargetDirHandle ), + rec( 11, 1, DirHandle ), + rec( 12, (1, 255), Path, info_str=(Path, "Set Directory Handle to: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfa00, + 0xfd00, 0xff00]) + + + # 2222/1601, 22/1 + pkt = NCP(0x1601, "Get Directory Path", 'file') + pkt.Request(11, [ + rec( 10, 1, DirHandle,info_str=(DirHandle, "Get Directory Path for Directory Handle %d", ", %d") ), + ]) + pkt.Reply((9,263), [ + rec( 8, (1,255), Path ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9b00, 0x9c00, 0xa100]) + + # 2222/1602, 22/2 + pkt = NCP(0x1602, "Scan Directory Information", 'file') + pkt.Request((14,268), [ + rec( 10, 1, DirHandle ), + rec( 11, 2, StartingSearchNumber, ENC_BIG_ENDIAN ), + rec( 13, (1, 255), Path, info_str=(Path, "Scan Directory Information: %s", ", %s") ), + ]) + pkt.Reply(36, [ + rec( 8, 16, DirectoryPath ), + rec( 24, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 26, 2, CreationTime, ENC_BIG_ENDIAN ), + rec( 28, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 32, 1, AccessRightsMask ), + rec( 33, 1, Reserved ), + rec( 34, 2, NextSearchNumber, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfa00, + 0xfd00, 0xff00]) + + # 2222/1603, 22/3 + pkt = NCP(0x1603, "Get Effective Directory Rights", 'file') + pkt.Request((12,266), [ + rec( 10, 1, DirHandle ), + rec( 11, (1, 255), Path, info_str=(Path, "Get Effective Directory Rights: %s", ", %s") ), + ]) + pkt.Reply(9, [ + rec( 8, 1, AccessRightsMask ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfa00, + 0xfd00, 0xff00]) + + # 2222/1604, 22/4 + pkt = NCP(0x1604, "Modify Maximum Rights Mask", 'file') + pkt.Request((14,268), [ + rec( 10, 1, DirHandle ), + rec( 11, 1, RightsGrantMask ), + rec( 12, 1, RightsRevokeMask ), + rec( 13, (1, 255), Path, info_str=(Path, "Modify Maximum Rights Mask: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfa00, + 0xfd00, 0xff00]) + + # 2222/1605, 22/5 + pkt = NCP(0x1605, "Get Volume Number", 'file') + pkt.Request((11, 265), [ + rec( 10, (1,255), VolumeNameLen, info_str=(VolumeNameLen, "Get Volume Number for: %s", ", %s") ), + ]) + pkt.Reply(9, [ + rec( 8, 1, VolumeNumber ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804]) + + # 2222/1606, 22/6 + pkt = NCP(0x1606, "Get Volume Name", 'file') + pkt.Request(11, [ + rec( 10, 1, VolumeNumber,info_str=(VolumeNumber, "Get Name for Volume %d", ", %d") ), + ]) + pkt.Reply((9, 263), [ + rec( 8, (1,255), VolumeNameLen ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0xff00]) + + # 2222/160A, 22/10 + pkt = NCP(0x160A, "Create Directory", 'file') + pkt.Request((13,267), [ + rec( 10, 1, DirHandle ), + rec( 11, 1, AccessRightsMask ), + rec( 12, (1, 255), Path, info_str=(Path, "Create Directory: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8400, 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03, + 0x9e00, 0xa100, 0xfd00, 0xff00]) + + # 2222/160B, 22/11 + pkt = NCP(0x160B, "Delete Directory", 'file') + pkt.Request((13,267), [ + rec( 10, 1, DirHandle ), + rec( 11, 1, Reserved ), + rec( 12, (1, 255), Path, info_str=(Path, "Delete Directory: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8a00, 0x9600, 0x9804, 0x9b03, 0x9c03, + 0x9f00, 0xa000, 0xa100, 0xfd00, 0xff00]) + + # 2222/160C, 22/12 + pkt = NCP(0x160C, "Scan Directory for Trustees", 'file') + pkt.Request((13,267), [ + rec( 10, 1, DirHandle ), + rec( 11, 1, TrusteeSetNumber ), + rec( 12, (1, 255), Path, info_str=(Path, "Scan Directory for Trustees: %s", ", %s") ), + ]) + pkt.Reply(57, [ + rec( 8, 16, DirectoryPath ), + rec( 24, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 26, 2, CreationTime, ENC_BIG_ENDIAN ), + rec( 28, 4, CreatorID ), + rec( 32, 4, TrusteeID, ENC_BIG_ENDIAN ), + rec( 36, 4, TrusteeID, ENC_BIG_ENDIAN ), + rec( 40, 4, TrusteeID, ENC_BIG_ENDIAN ), + rec( 44, 4, TrusteeID, ENC_BIG_ENDIAN ), + rec( 48, 4, TrusteeID, ENC_BIG_ENDIAN ), + rec( 52, 1, AccessRightsMask ), + rec( 53, 1, AccessRightsMask ), + rec( 54, 1, AccessRightsMask ), + rec( 55, 1, AccessRightsMask ), + rec( 56, 1, AccessRightsMask ), + ]) + pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9b03, 0x9c03, + 0xa100, 0xfd00, 0xff00]) + + # 2222/160D, 22/13 + pkt = NCP(0x160D, "Add Trustee to Directory", 'file') + pkt.Request((17,271), [ + rec( 10, 1, DirHandle ), + rec( 11, 4, TrusteeID, ENC_BIG_ENDIAN ), + rec( 15, 1, AccessRightsMask ), + rec( 16, (1, 255), Path, info_str=(Path, "Add Trustee to Directory: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03, + 0xa100, 0xfc06, 0xfd00, 0xff00]) + + # 2222/160E, 22/14 + pkt = NCP(0x160E, "Delete Trustee from Directory", 'file') + pkt.Request((17,271), [ + rec( 10, 1, DirHandle ), + rec( 11, 4, TrusteeID, ENC_BIG_ENDIAN ), + rec( 15, 1, Reserved ), + rec( 16, (1, 255), Path, info_str=(Path, "Delete Trustee from Directory: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03, + 0xa100, 0xfc06, 0xfd00, 0xfe07, 0xff00]) + + # 2222/160F, 22/15 + pkt = NCP(0x160F, "Rename Directory", 'file') + pkt.Request((13, 521), [ + rec( 10, 1, DirHandle ), + rec( 11, (1, 255), Path, info_str=(Path, "Rename Directory: %s", ", %s") ), + rec( -1, (1, 255), NewPath ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8b00, 0x9200, 0x9600, 0x9804, 0x9b03, 0x9c03, + 0x9e00, 0xa100, 0xef00, 0xfd00, 0xff00]) + + # 2222/1610, 22/16 + pkt = NCP(0x1610, "Purge Erased Files", 'file') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8100, 0x9600, 0x9804, 0xa100, 0xff00]) + + # 2222/1611, 22/17 + pkt = NCP(0x1611, "Recover Erased File", 'file') + pkt.Request(11, [ + rec( 10, 1, DirHandle,info_str=(DirHandle, "Recover Erased File from Directory Handle %d", ", %d") ), + ]) + pkt.Reply(38, [ + rec( 8, 15, OldFileName ), + rec( 23, 15, NewFileName ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, + 0xa100, 0xfd00, 0xff00]) + # 2222/1612, 22/18 + pkt = NCP(0x1612, "Alloc Permanent Directory Handle", 'file') + pkt.Request((13, 267), [ + rec( 10, 1, DirHandle ), + rec( 11, 1, DirHandleName ), + rec( 12, (1,255), Path, info_str=(Path, "Allocate Permanent Directory Handle: %s", ", %s") ), + ]) + pkt.Reply(10, [ + rec( 8, 1, DirHandle ), + rec( 9, 1, AccessRightsMask ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9900, 0x9b00, 0x9c03, 0x9d00, + 0xa100, 0xfd00, 0xff00]) + # 2222/1613, 22/19 + pkt = NCP(0x1613, "Alloc Temporary Directory Handle", 'file') + pkt.Request((13, 267), [ + rec( 10, 1, DirHandle ), + rec( 11, 1, DirHandleName ), + rec( 12, (1,255), Path, info_str=(Path, "Allocate Temporary Directory Handle: %s", ", %s") ), + ]) + pkt.Reply(10, [ + rec( 8, 1, DirHandle ), + rec( 9, 1, AccessRightsMask ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9900, 0x9c03, 0x9d00, + 0xa100, 0xfd00, 0xff00]) + # 2222/1614, 22/20 + pkt = NCP(0x1614, "Deallocate Directory Handle", 'file') + pkt.Request(11, [ + rec( 10, 1, DirHandle,info_str=(DirHandle, "Deallocate Directory Handle %d", ", %d") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9b03]) + # 2222/1615, 22/21 + pkt = NCP(0x1615, "Get Volume Info with Handle", 'file') + pkt.Request( 11, [ + rec( 10, 1, DirHandle,info_str=(DirHandle, "Get Volume Information with Handle %d", ", %d") ) + ]) + pkt.Reply( 36, [ + rec( 8, 2, SectorsPerCluster, ENC_BIG_ENDIAN ), + rec( 10, 2, TotalVolumeClusters, ENC_BIG_ENDIAN ), + rec( 12, 2, AvailableClusters, ENC_BIG_ENDIAN ), + rec( 14, 2, TotalDirectorySlots, ENC_BIG_ENDIAN ), + rec( 16, 2, AvailableDirectorySlots, ENC_BIG_ENDIAN ), + rec( 18, 16, VolumeName ), + rec( 34, 2, RemovableFlag, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0xff00]) + # 2222/1616, 22/22 + pkt = NCP(0x1616, "Alloc Special Temporary Directory Handle", 'file') + pkt.Request((13, 267), [ + rec( 10, 1, DirHandle ), + rec( 11, 1, DirHandleName ), + rec( 12, (1,255), Path, info_str=(Path, "Allocate Special Temporary Directory Handle: %s", ", %s") ), + ]) + pkt.Reply(10, [ + rec( 8, 1, DirHandle ), + rec( 9, 1, AccessRightsMask ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9900, 0x9b00, 0x9c03, 0x9d00, + 0xa100, 0xfd00, 0xff00]) + # 2222/1617, 22/23 + pkt = NCP(0x1617, "Extract a Base Handle", 'file') + pkt.Request(11, [ + rec( 10, 1, DirHandle, info_str=(DirHandle, "Extract a Base Handle from Directory Handle %d", ", %d") ), + ]) + pkt.Reply(22, [ + rec( 8, 10, ServerNetworkAddress ), + rec( 18, 4, DirHandleLong ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9b03]) + # 2222/1618, 22/24 + pkt = NCP(0x1618, "Restore an Extracted Base Handle", 'file') + pkt.Request(24, [ + rec( 10, 10, ServerNetworkAddress ), + rec( 20, 4, DirHandleLong ), + ]) + pkt.Reply(10, [ + rec( 8, 1, DirHandle ), + rec( 9, 1, AccessRightsMask ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c00, 0x9d00, 0xa100, + 0xfd00, 0xff00]) + # 2222/1619, 22/25 + pkt = NCP(0x1619, "Set Directory Information", 'file') + pkt.Request((21, 275), [ + rec( 10, 1, DirHandle ), + rec( 11, 2, CreationDate ), + rec( 13, 2, CreationTime ), + rec( 15, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 19, 1, AccessRightsMask ), + rec( 20, (1,255), Path, info_str=(Path, "Set Directory Information: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9b03, 0x9c00, 0xa100, + 0xff16]) + # 2222/161A, 22/26 + pkt = NCP(0x161A, "Get Path Name of a Volume-Directory Number Pair", 'file') + pkt.Request(13, [ + rec( 10, 1, VolumeNumber ), + rec( 11, 2, DirectoryEntryNumberWord ), + ]) + pkt.Reply((9,263), [ + rec( 8, (1,255), Path ), + ]) + pkt.CompletionCodes([0x0000, 0x9804, 0x9c00, 0xa100]) + # 2222/161B, 22/27 + pkt = NCP(0x161B, "Scan Salvageable Files", 'file') + pkt.Request(15, [ + rec( 10, 1, DirHandle ), + rec( 11, 4, SequenceNumber ), + ]) + pkt.Reply(140, [ + rec( 8, 4, SequenceNumber ), + rec( 12, 2, Subdirectory ), + rec( 14, 2, Reserved2 ), + rec( 16, 4, AttributesDef32 ), + rec( 20, 1, UniqueID ), + rec( 21, 1, FlagsDef ), + rec( 22, 1, DestNameSpace ), + rec( 23, 1, FileNameLen ), + rec( 24, 12, FileName12 ), + rec( 36, 2, CreationTime ), + rec( 38, 2, CreationDate ), + rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 44, 2, ArchivedTime ), + rec( 46, 2, ArchivedDate ), + rec( 48, 4, ArchiverID, ENC_BIG_ENDIAN ), + rec( 52, 2, UpdateTime ), + rec( 54, 2, UpdateDate ), + rec( 56, 4, UpdateID, ENC_BIG_ENDIAN ), + rec( 60, 4, FileSize, ENC_BIG_ENDIAN ), + rec( 64, 44, Reserved44 ), + rec( 108, 2, InheritedRightsMask ), + rec( 110, 2, LastAccessedDate ), + rec( 112, 4, DeletedFileTime ), + rec( 116, 2, DeletedTime ), + rec( 118, 2, DeletedDate ), + rec( 120, 4, DeletedID, ENC_BIG_ENDIAN ), + rec( 124, 16, Reserved16 ), + ]) + pkt.CompletionCodes([0x0000, 0xfb01, 0x9801, 0xff1d]) + # 2222/161C, 22/28 + pkt = NCP(0x161C, "Recover Salvageable File", 'file') + pkt.Request((17,525), [ + rec( 10, 1, DirHandle ), + rec( 11, 4, SequenceNumber ), + rec( 15, (1, 255), FileName, info_str=(FileName, "Recover File: %s", ", %s") ), + rec( -1, (1, 255), NewFileNameLen ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8401, 0x9c03, 0xfe02]) + # 2222/161D, 22/29 + pkt = NCP(0x161D, "Purge Salvageable File", 'file') + pkt.Request(15, [ + rec( 10, 1, DirHandle ), + rec( 11, 4, SequenceNumber ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8500, 0x9c03]) + # 2222/161E, 22/30 + pkt = NCP(0x161E, "Scan a Directory", 'file') + pkt.Request((17, 271), [ + rec( 10, 1, DirHandle ), + rec( 11, 1, DOSFileAttributes ), + rec( 12, 4, SequenceNumber ), + rec( 16, (1, 255), SearchPattern, info_str=(SearchPattern, "Scan a Directory: %s", ", %s") ), + ]) + pkt.Reply(140, [ + rec( 8, 4, SequenceNumber ), + rec( 12, 4, Subdirectory ), + rec( 16, 4, AttributesDef32 ), + rec( 20, 1, UniqueID, ENC_LITTLE_ENDIAN ), + rec( 21, 1, PurgeFlags ), + rec( 22, 1, DestNameSpace ), + rec( 23, 1, NameLen ), + rec( 24, 12, Name12 ), + rec( 36, 2, CreationTime ), + rec( 38, 2, CreationDate ), + rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 44, 2, ArchivedTime ), + rec( 46, 2, ArchivedDate ), + rec( 48, 4, ArchiverID, ENC_BIG_ENDIAN ), + rec( 52, 2, UpdateTime ), + rec( 54, 2, UpdateDate ), + rec( 56, 4, UpdateID, ENC_BIG_ENDIAN ), + rec( 60, 4, FileSize, ENC_BIG_ENDIAN ), + rec( 64, 44, Reserved44 ), + rec( 108, 2, InheritedRightsMask ), + rec( 110, 2, LastAccessedDate ), + rec( 112, 28, Reserved28 ), + ]) + pkt.CompletionCodes([0x0000, 0x8500, 0x9c03]) + # 2222/161F, 22/31 + pkt = NCP(0x161F, "Get Directory Entry", 'file') + pkt.Request(11, [ + rec( 10, 1, DirHandle ), + ]) + pkt.Reply(136, [ + rec( 8, 4, Subdirectory ), + rec( 12, 4, AttributesDef32 ), + rec( 16, 1, UniqueID, ENC_LITTLE_ENDIAN ), + rec( 17, 1, PurgeFlags ), + rec( 18, 1, DestNameSpace ), + rec( 19, 1, NameLen ), + rec( 20, 12, Name12 ), + rec( 32, 2, CreationTime ), + rec( 34, 2, CreationDate ), + rec( 36, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 40, 2, ArchivedTime ), + rec( 42, 2, ArchivedDate ), + rec( 44, 4, ArchiverID, ENC_BIG_ENDIAN ), + rec( 48, 2, UpdateTime ), + rec( 50, 2, UpdateDate ), + rec( 52, 4, NextTrusteeEntry, ENC_BIG_ENDIAN ), + rec( 56, 48, Reserved48 ), + rec( 104, 2, MaximumSpace ), + rec( 106, 2, InheritedRightsMask ), + rec( 108, 28, Undefined28 ), + ]) + pkt.CompletionCodes([0x0000, 0x8900, 0xbf00, 0xfb00]) + # 2222/1620, 22/32 + pkt = NCP(0x1620, "Scan Volume's User Disk Restrictions", 'file') + pkt.Request(15, [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, SequenceNumber ), + ]) + pkt.Reply(17, [ + rec( 8, 1, NumberOfEntries, var="x" ), + rec( 9, 8, ObjectIDStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9800]) + # 2222/1621, 22/33 + pkt = NCP(0x1621, "Add User Disk Space Restriction", 'file') + pkt.Request(19, [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, ObjectID ), + rec( 15, 4, DiskSpaceLimit ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9800]) + # 2222/1622, 22/34 + pkt = NCP(0x1622, "Remove User Disk Space Restrictions", 'file') + pkt.Request(15, [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, ObjectID ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8c00, 0xfe0e]) + # 2222/1623, 22/35 + pkt = NCP(0x1623, "Get Directory Disk Space Restriction", 'file') + pkt.Request(11, [ + rec( 10, 1, DirHandle ), + ]) + pkt.Reply(18, [ + rec( 8, 1, NumberOfEntries ), + rec( 9, 1, Level ), + rec( 10, 4, MaxSpace ), + rec( 14, 4, CurrentSpace ), + ]) + pkt.CompletionCodes([0x0000]) + # 2222/1624, 22/36 + pkt = NCP(0x1624, "Set Directory Disk Space Restriction", 'file') + pkt.Request(15, [ + rec( 10, 1, DirHandle ), + rec( 11, 4, DiskSpaceLimit ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0101, 0x8c00, 0xbf00]) + # 2222/1625, 22/37 + pkt = NCP(0x1625, "Set Directory Entry Information", 'file') + pkt.Request(NO_LENGTH_CHECK, [ + # + # XXX - this didn't match what was in the spec for 22/37 + # on the Novell Web site. + # + rec( 10, 1, DirHandle ), + rec( 11, 1, SearchAttributes ), + rec( 12, 4, SequenceNumber ), + rec( 16, 2, ChangeBits ), + rec( 18, 2, Reserved2 ), + rec( 20, 4, Subdirectory ), + #srec(DOSDirectoryEntryStruct, req_cond="ncp.search_att_sub == TRUE"), + srec(DOSFileEntryStruct, req_cond="ncp.search_att_sub == FALSE"), + ]) + pkt.Reply(8) + pkt.ReqCondSizeConstant() + pkt.CompletionCodes([0x0000, 0x0106, 0x8c00, 0xbf00]) + # 2222/1626, 22/38 + pkt = NCP(0x1626, "Scan File or Directory for Extended Trustees", 'file') + pkt.Request((13,267), [ + rec( 10, 1, DirHandle ), + rec( 11, 1, SequenceByte ), + rec( 12, (1, 255), Path, info_str=(Path, "Scan for Extended Trustees: %s", ", %s") ), + ]) + pkt.Reply(91, [ + rec( 8, 1, NumberOfEntries, var="x" ), + rec( 9, 4, ObjectID ), + rec( 13, 4, ObjectID ), + rec( 17, 4, ObjectID ), + rec( 21, 4, ObjectID ), + rec( 25, 4, ObjectID ), + rec( 29, 4, ObjectID ), + rec( 33, 4, ObjectID ), + rec( 37, 4, ObjectID ), + rec( 41, 4, ObjectID ), + rec( 45, 4, ObjectID ), + rec( 49, 4, ObjectID ), + rec( 53, 4, ObjectID ), + rec( 57, 4, ObjectID ), + rec( 61, 4, ObjectID ), + rec( 65, 4, ObjectID ), + rec( 69, 4, ObjectID ), + rec( 73, 4, ObjectID ), + rec( 77, 4, ObjectID ), + rec( 81, 4, ObjectID ), + rec( 85, 4, ObjectID ), + rec( 89, 2, AccessRightsMaskWord, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9800, 0x9b00, 0x9c00]) + # 2222/1627, 22/39 + pkt = NCP(0x1627, "Add Extended Trustee to Directory or File", 'file') + pkt.Request((18,272), [ + rec( 10, 1, DirHandle ), + rec( 11, 4, ObjectID, ENC_BIG_ENDIAN ), + rec( 15, 2, TrusteeRights ), + rec( 17, (1, 255), Path, info_str=(Path, "Add Extended Trustee: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9000]) + # 2222/1628, 22/40 + pkt = NCP(0x1628, "Scan Directory Disk Space", 'file') + pkt.Request((17,271), [ + rec( 10, 1, DirHandle ), + rec( 11, 1, SearchAttributes ), + rec( 12, 4, SequenceNumber ), + rec( 16, (1, 255), SearchPattern, info_str=(SearchPattern, "Scan Directory Disk Space: %s", ", %s") ), + ]) + pkt.Reply((148), [ + rec( 8, 4, SequenceNumber ), + rec( 12, 4, Subdirectory ), + rec( 16, 4, AttributesDef32 ), + rec( 20, 1, UniqueID ), + rec( 21, 1, PurgeFlags ), + rec( 22, 1, DestNameSpace ), + rec( 23, 1, NameLen ), + rec( 24, 12, Name12 ), + rec( 36, 2, CreationTime ), + rec( 38, 2, CreationDate ), + rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 44, 2, ArchivedTime ), + rec( 46, 2, ArchivedDate ), + rec( 48, 4, ArchiverID, ENC_BIG_ENDIAN ), + rec( 52, 2, UpdateTime ), + rec( 54, 2, UpdateDate ), + rec( 56, 4, UpdateID, ENC_BIG_ENDIAN ), + rec( 60, 4, DataForkSize, ENC_BIG_ENDIAN ), + rec( 64, 4, DataForkFirstFAT, ENC_BIG_ENDIAN ), + rec( 68, 4, NextTrusteeEntry, ENC_BIG_ENDIAN ), + rec( 72, 36, Reserved36 ), + rec( 108, 2, InheritedRightsMask ), + rec( 110, 2, LastAccessedDate ), + rec( 112, 4, DeletedFileTime ), + rec( 116, 2, DeletedTime ), + rec( 118, 2, DeletedDate ), + rec( 120, 4, DeletedID, ENC_BIG_ENDIAN ), + rec( 124, 8, Undefined8 ), + rec( 132, 4, PrimaryEntry, ENC_LITTLE_ENDIAN ), + rec( 136, 4, NameList, ENC_LITTLE_ENDIAN ), + rec( 140, 4, OtherFileForkSize, ENC_BIG_ENDIAN ), + rec( 144, 4, OtherFileForkFAT, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x8900, 0x9c03, 0xfb01, 0xff00]) + # 2222/1629, 22/41 + pkt = NCP(0x1629, "Get Object Disk Usage and Restrictions", 'file') + pkt.Request(15, [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, ObjectID, ENC_LITTLE_ENDIAN ), + ]) + pkt.Reply(16, [ + rec( 8, 4, Restriction ), + rec( 12, 4, InUse ), + ]) + pkt.CompletionCodes([0x0000, 0x9802]) + # 2222/162A, 22/42 + pkt = NCP(0x162A, "Get Effective Rights for Directory Entry", 'file') + pkt.Request((12,266), [ + rec( 10, 1, DirHandle ), + rec( 11, (1, 255), Path, info_str=(Path, "Get Effective Rights: %s", ", %s") ), + ]) + pkt.Reply(10, [ + rec( 8, 2, AccessRightsMaskWord ), + ]) + pkt.CompletionCodes([0x0000, 0x9804, 0x9c03]) + # 2222/162B, 22/43 + pkt = NCP(0x162B, "Remove Extended Trustee from Dir or File", 'file') + pkt.Request((17,271), [ + rec( 10, 1, DirHandle ), + rec( 11, 4, ObjectID, ENC_BIG_ENDIAN ), + rec( 15, 1, Unused ), + rec( 16, (1, 255), Path, info_str=(Path, "Remove Extended Trustee from %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9002, 0x9c03, 0xfe0f, 0xff09]) + # 2222/162C, 22/44 + pkt = NCP(0x162C, "Get Volume and Purge Information", 'file') + pkt.Request( 11, [ + rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Volume and Purge Information for Volume %d", ", %d") ) + ]) + pkt.Reply( (38,53), [ + rec( 8, 4, TotalBlocks ), + rec( 12, 4, FreeBlocks ), + rec( 16, 4, PurgeableBlocks ), + rec( 20, 4, NotYetPurgeableBlocks ), + rec( 24, 4, TotalDirectoryEntries ), + rec( 28, 4, AvailableDirEntries ), + rec( 32, 4, Reserved4 ), + rec( 36, 1, SectorsPerBlock ), + rec( 37, (1,16), VolumeNameLen ), + ]) + pkt.CompletionCodes([0x0000]) + # 2222/162D, 22/45 + pkt = NCP(0x162D, "Get Directory Information", 'file') + pkt.Request( 11, [ + rec( 10, 1, DirHandle ) + ]) + pkt.Reply( (30, 45), [ + rec( 8, 4, TotalBlocks ), + rec( 12, 4, AvailableBlocks ), + rec( 16, 4, TotalDirectoryEntries ), + rec( 20, 4, AvailableDirEntries ), + rec( 24, 4, Reserved4 ), + rec( 28, 1, SectorsPerBlock ), + rec( 29, (1,16), VolumeNameLen ), + ]) + pkt.CompletionCodes([0x0000, 0x9b03]) + # 2222/162E, 22/46 + pkt = NCP(0x162E, "Rename Or Move", 'file') + pkt.Request( (17,525), [ + rec( 10, 1, SourceDirHandle ), + rec( 11, 1, SearchAttributes ), + rec( 12, 1, SourcePathComponentCount ), + rec( 13, (1,255), SourcePath, info_str=(SourcePath, "Rename or Move: %s", ", %s") ), + rec( -1, 1, DestDirHandle ), + rec( -1, 1, DestPathComponentCount ), + rec( -1, (1,255), DestPath ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x8701, 0x8b00, 0x8d00, 0x8e00, + 0x8f00, 0x9001, 0x9101, 0x9201, 0x9a00, 0x9b03, + 0x9c03, 0xa400, 0xff17]) + # 2222/162F, 22/47 + pkt = NCP(0x162F, "Get Name Space Information", 'file') + pkt.Request( 11, [ + rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Name Space Information for Volume %d", ", %d") ) + ]) + pkt.Reply( (15,523), [ + # + # XXX - why does this not display anything at all + # if the stuff after the first IndexNumber is + # un-commented? That stuff really is there.... + # + rec( 8, 1, DefinedNameSpaces, var="v" ), + rec( 9, (1,255), NameSpaceName, repeat="v" ), + rec( -1, 1, DefinedDataStreams, var="w" ), + rec( -1, (2,256), DataStreamInfo, repeat="w" ), + rec( -1, 1, LoadedNameSpaces, var="x" ), + rec( -1, 1, IndexNumber, repeat="x" ), +# rec( -1, 1, VolumeNameSpaces, var="y" ), +# rec( -1, 1, IndexNumber, repeat="y" ), +# rec( -1, 1, VolumeDataStreams, var="z" ), +# rec( -1, 1, IndexNumber, repeat="z" ), + ]) + pkt.CompletionCodes([0x0000, 0x9802, 0xff00]) + # 2222/1630, 22/48 + pkt = NCP(0x1630, "Get Name Space Directory Entry", 'file') + pkt.Request( 16, [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, DOSSequence ), + rec( 15, 1, SrcNameSpace ), + ]) + pkt.Reply( 112, [ + rec( 8, 4, SequenceNumber ), + rec( 12, 4, Subdirectory ), + rec( 16, 4, AttributesDef32 ), + rec( 20, 1, UniqueID ), + rec( 21, 1, Flags ), + rec( 22, 1, SrcNameSpace ), + rec( 23, 1, NameLength ), + rec( 24, 12, Name12 ), + rec( 36, 2, CreationTime ), + rec( 38, 2, CreationDate ), + rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 44, 2, ArchivedTime ), + rec( 46, 2, ArchivedDate ), + rec( 48, 4, ArchiverID ), + rec( 52, 2, UpdateTime ), + rec( 54, 2, UpdateDate ), + rec( 56, 4, UpdateID ), + rec( 60, 4, FileSize ), + rec( 64, 44, Reserved44 ), + rec( 108, 2, InheritedRightsMask ), + rec( 110, 2, LastAccessedDate ), + ]) + pkt.CompletionCodes([0x0000, 0x8900, 0x9802, 0xbf00]) + # 2222/1631, 22/49 + pkt = NCP(0x1631, "Open Data Stream", 'file') + pkt.Request( (15,269), [ + rec( 10, 1, DataStream ), + rec( 11, 1, DirHandle ), + rec( 12, 1, AttributesDef ), + rec( 13, 1, OpenRights ), + rec( 14, (1, 255), FileName, info_str=(FileName, "Open Data Stream: %s", ", %s") ), + ]) + pkt.Reply( 12, [ + rec( 8, 4, CCFileHandle, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8200, 0x9002, 0xbe00, 0xff00]) + # 2222/1632, 22/50 + pkt = NCP(0x1632, "Get Object Effective Rights for Directory Entry", 'file') + pkt.Request( (16,270), [ + rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ), + rec( 14, 1, DirHandle ), + rec( 15, (1, 255), Path, info_str=(Path, "Get Object Effective Rights: %s", ", %s") ), + ]) + pkt.Reply( 10, [ + rec( 8, 2, TrusteeRights ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0x9b00, 0x9c03, 0xfc06]) + # 2222/1633, 22/51 + pkt = NCP(0x1633, "Get Extended Volume Information", 'file') + pkt.Request( 11, [ + rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Extended Volume Information for Volume %d", ", %d") ), + ]) + pkt.Reply( (139,266), [ + rec( 8, 2, VolInfoReplyLen ), + rec( 10, 128, VolInfoStructure), + rec( 138, (1,128), VolumeNameLen ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0x9804, 0xfb08, 0xff00]) + pkt.MakeExpert("ncp1633_reply") + # 2222/1634, 22/52 + pkt = NCP(0x1634, "Get Mount Volume List", 'file') + pkt.Request( 22, [ + rec( 10, 4, StartVolumeNumber ), + rec( 14, 4, VolumeRequestFlags, ENC_LITTLE_ENDIAN ), + rec( 18, 4, SrcNameSpace ), + ]) + pkt.Reply( NO_LENGTH_CHECK, [ + rec( 8, 4, ItemsInPacket, var="x" ), + rec( 12, 4, NextVolumeNumber ), + srec( VolumeStruct, req_cond="ncp.volume_request_flags==0x0000", repeat="x" ), + srec( VolumeWithNameStruct, req_cond="ncp.volume_request_flags==0x0001", repeat="x" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x9802]) + # 2222/1635, 22/53 + pkt = NCP(0x1635, "Get Volume Capabilities", 'file') + pkt.Request( 18, [ + rec( 10, 4, VolumeNumberLong ), + rec( 14, 4, VersionNumberLong ), + ]) + pkt.Reply( NO_LENGTH_CHECK, [ + rec( 8, 4, VolumeCapabilities ), + rec( 12, 28, Reserved28 ), + rec( 40, 64, VolumeNameStringz ), + rec( 104, 128, VolumeGUID ), + rec( 232, 256, PoolName ), + rec( 488, PROTO_LENGTH_UNKNOWN, VolumeMountPoint ), + ]) + pkt.CompletionCodes([0x0000, 0x7700, 0x9802, 0xfb01]) + # 2222/1636, 22/54 + pkt = NCP(0x1636, "Add User Disk Space Restriction 64 Bit Aware", 'file') + pkt.Request(26, [ + rec( 10, 4, VolumeNumberLong ), + rec( 14, 4, ObjectID, ENC_LITTLE_ENDIAN ), + rec( 18, 8, DiskSpaceLimit64 ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9800]) + # 2222/1637, 22/55 + pkt = NCP(0x1637, "Get Object Disk Usage and Restrictions 64 Bit Aware", 'file') + pkt.Request(18, [ + rec( 10, 4, VolumeNumberLong ), + rec( 14, 4, ObjectID, ENC_LITTLE_ENDIAN ), + ]) + pkt.Reply(24, [ + rec( 8, 8, RestrictionQuad ), + rec( 16, 8, InUse64 ), + ]) + pkt.CompletionCodes([0x0000, 0x9802]) + # 2222/1638, 22/56 + pkt = NCP(0x1638, "Scan Volume's User Disk Restrictions 64 Bit Aware", 'file') + pkt.Request(18, [ + rec( 10, 4, VolumeNumberLong ), + rec( 14, 4, SequenceNumber ), + ]) + pkt.Reply(24, [ + rec( 8, 4, NumberOfEntriesLong, var="x" ), + rec( 12, 12, ObjectIDStruct64, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9800]) + # 2222/1639, 22/57 + pkt = NCP(0x1639, "Set Directory Disk Space Restriction 64 Bit Aware", 'file') + pkt.Request(26, [ + rec( 10, 8, DirHandle64 ), + rec( 18, 8, DiskSpaceLimit64 ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0101, 0x8c00, 0xbf00]) + # 2222/163A, 22/58 + pkt = NCP(0x163A, "Get Directory Information 64 Bit Aware", 'file') + pkt.Request( 18, [ + rec( 10, 8, DirHandle64 ) + ]) + pkt.Reply( (49, 64), [ + rec( 8, 8, TotalBlocks64 ), + rec( 16, 8, AvailableBlocks64 ), + rec( 24, 8, TotalDirEntries64 ), + rec( 32, 8, AvailableDirEntries64 ), + rec( 40, 4, Reserved4 ), + rec( 44, 4, SectorsPerBlockLong ), + rec( 48, (1,16), VolumeNameLen ), + ]) + pkt.CompletionCodes([0x0000, 0x9b03]) + # 2222/1641, 22/59 +# pkt = NCP(0x1641, "Scan Volume's User Disk Restrictions 64-bit Aware", 'file') +# pkt.Request(18, [ +# rec( 10, 4, VolumeNumberLong ), +# rec( 14, 4, SequenceNumber ), +# ]) +# pkt.Reply(24, [ +# rec( 8, 4, NumberOfEntriesLong, var="x" ), +# rec( 12, 12, ObjectIDStruct64, repeat="x" ), +# ]) +# pkt.CompletionCodes([0x0000, 0x9800]) + # 2222/1700, 23/00 + pkt = NCP(0x1700, "Login User", 'connection') + pkt.Request( (12, 58), [ + rec( 10, (1,16), UserName, info_str=(UserName, "Login User: %s", ", %s") ), + rec( -1, (1,32), Password ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9602, 0xc101, 0xc200, 0xc501, 0xd700, + 0xd900, 0xda00, 0xdb00, 0xde00, 0xdf00, 0xe800, + 0xec00, 0xed00, 0xef00, 0xf001, 0xf100, 0xf200, + 0xf600, 0xfb00, 0xfc06, 0xfe07, 0xff00]) + # 2222/1701, 23/01 + pkt = NCP(0x1701, "Change User Password", 'bindery') + pkt.Request( (13, 90), [ + rec( 10, (1,16), UserName, info_str=(UserName, "Change Password for User: %s", ", %s") ), + rec( -1, (1,32), Password ), + rec( -1, (1,32), NewPassword ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xd600, 0xf001, 0xf101, 0xf501, + 0xfc06, 0xfe07, 0xff00]) + # 2222/1702, 23/02 + pkt = NCP(0x1702, "Get User Connection List", 'connection') + pkt.Request( (11, 26), [ + rec( 10, (1,16), UserName, info_str=(UserName, "Get User Connection: %s", ", %s") ), + ]) + pkt.Reply( (9, 136), [ + rec( 8, (1, 128), ConnectionNumberList ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00]) + # 2222/1703, 23/03 + pkt = NCP(0x1703, "Get User Number", 'bindery') + pkt.Request( (11, 26), [ + rec( 10, (1,16), UserName, info_str=(UserName, "Get User Number: %s", ", %s") ), + ]) + pkt.Reply( 12, [ + rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00]) + # 2222/1705, 23/05 + pkt = NCP(0x1705, "Get Station's Logged Info", 'connection') + pkt.Request( 11, [ + rec( 10, 1, TargetConnectionNumber, info_str=(TargetConnectionNumber, "Get Station's Logged Information on Connection %d", ", %d") ), + ]) + pkt.Reply( 266, [ + rec( 8, 16, UserName16 ), + rec( 24, 7, LoginTime ), + rec( 31, 39, FullName ), + rec( 70, 4, UserID, ENC_BIG_ENDIAN ), + rec( 74, 128, SecurityEquivalentList ), + rec( 202, 64, Reserved64 ), + ]) + pkt.CompletionCodes([0x0000, 0x9602, 0xfc06, 0xfd00, 0xfe07, 0xff00]) + # 2222/1707, 23/07 + pkt = NCP(0x1707, "Get Group Number", 'bindery') + pkt.Request( 14, [ + rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ), + ]) + pkt.Reply( 62, [ + rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ), + rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 14, 48, ObjectNameLen ), + ]) + pkt.CompletionCodes([0x0000, 0x9602, 0xf101, 0xfc06, 0xfe07, 0xff00]) + # 2222/170C, 23/12 + pkt = NCP(0x170C, "Verify Serialization", 'fileserver') + pkt.Request( 14, [ + rec( 10, 4, ServerSerialNumber ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xff00]) + # 2222/170D, 23/13 + pkt = NCP(0x170D, "Log Network Message", 'file') + pkt.Request( (11, 68), [ + rec( 10, (1, 58), TargetMessage, info_str=(TargetMessage, "Log Network Message: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8100, 0x8800, 0x8d00, 0x8e00, 0x8f00, + 0x9001, 0x9400, 0x9600, 0x9804, 0x9900, 0x9b00, 0xa100, + 0xa201, 0xff00]) + # 2222/170E, 23/14 + pkt = NCP(0x170E, "Get Disk Utilization", 'fileserver') + pkt.Request( 15, [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, TrusteeID, ENC_BIG_ENDIAN ), + ]) + pkt.Reply( 19, [ + rec( 8, 1, VolumeNumber ), + rec( 9, 4, TrusteeID, ENC_BIG_ENDIAN ), + rec( 13, 2, DirectoryCount, ENC_BIG_ENDIAN ), + rec( 15, 2, FileCount, ENC_BIG_ENDIAN ), + rec( 17, 2, ClusterCount, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0xa100, 0xf200]) + # 2222/170F, 23/15 + pkt = NCP(0x170F, "Scan File Information", 'file') + pkt.Request((15,269), [ + rec( 10, 2, LastSearchIndex ), + rec( 12, 1, DirHandle ), + rec( 13, 1, SearchAttributes ), + rec( 14, (1, 255), FileName, info_str=(FileName, "Scan File Information: %s", ", %s") ), + ]) + pkt.Reply( 102, [ + rec( 8, 2, NextSearchIndex ), + rec( 10, 14, FileName14 ), + rec( 24, 2, AttributesDef16 ), + rec( 26, 4, FileSize, ENC_BIG_ENDIAN ), + rec( 30, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 32, 2, LastAccessedDate, ENC_BIG_ENDIAN ), + rec( 34, 2, ModifiedDate, ENC_BIG_ENDIAN ), + rec( 36, 2, ModifiedTime, ENC_BIG_ENDIAN ), + rec( 38, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 42, 2, ArchivedDate, ENC_BIG_ENDIAN ), + rec( 44, 2, ArchivedTime, ENC_BIG_ENDIAN ), + rec( 46, 56, Reserved56 ), + ]) + pkt.CompletionCodes([0x0000, 0x8800, 0x8900, 0x9300, 0x9400, 0x9804, 0x9b00, 0x9c00, + 0xa100, 0xfd00, 0xff17]) + # 2222/1710, 23/16 + pkt = NCP(0x1710, "Set File Information", 'file') + pkt.Request((91,345), [ + rec( 10, 2, AttributesDef16 ), + rec( 12, 4, FileSize, ENC_BIG_ENDIAN ), + rec( 16, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 18, 2, LastAccessedDate, ENC_BIG_ENDIAN ), + rec( 20, 2, ModifiedDate, ENC_BIG_ENDIAN ), + rec( 22, 2, ModifiedTime, ENC_BIG_ENDIAN ), + rec( 24, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 28, 2, ArchivedDate, ENC_BIG_ENDIAN ), + rec( 30, 2, ArchivedTime, ENC_BIG_ENDIAN ), + rec( 32, 56, Reserved56 ), + rec( 88, 1, DirHandle ), + rec( 89, 1, SearchAttributes ), + rec( 90, (1, 255), FileName, info_str=(FileName, "Set Information for File: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8800, 0x8c00, 0x8e00, 0x9400, 0x9600, 0x9804, + 0x9b03, 0x9c00, 0xa100, 0xa201, 0xfc06, 0xfd00, 0xfe07, + 0xff17]) + # 2222/1711, 23/17 + pkt = NCP(0x1711, "Get File Server Information", 'fileserver') + pkt.Request(10) + pkt.Reply(136, [ + rec( 8, 48, ServerName ), + rec( 56, 1, OSMajorVersion ), + rec( 57, 1, OSMinorVersion ), + rec( 58, 2, ConnectionsSupportedMax, ENC_BIG_ENDIAN ), + rec( 60, 2, ConnectionsInUse, ENC_BIG_ENDIAN ), + rec( 62, 2, VolumesSupportedMax, ENC_BIG_ENDIAN ), + rec( 64, 1, OSRevision ), + rec( 65, 1, SFTSupportLevel ), + rec( 66, 1, TTSLevel ), + rec( 67, 2, ConnectionsMaxUsed, ENC_BIG_ENDIAN ), + rec( 69, 1, AccountVersion ), + rec( 70, 1, VAPVersion ), + rec( 71, 1, QueueingVersion ), + rec( 72, 1, PrintServerVersion ), + rec( 73, 1, VirtualConsoleVersion ), + rec( 74, 1, SecurityRestrictionVersion ), + rec( 75, 1, InternetBridgeVersion ), + rec( 76, 1, MixedModePathFlag ), + rec( 77, 1, LocalLoginInfoCcode ), + rec( 78, 2, ProductMajorVersion, ENC_BIG_ENDIAN ), + rec( 80, 2, ProductMinorVersion, ENC_BIG_ENDIAN ), + rec( 82, 2, ProductRevisionVersion, ENC_BIG_ENDIAN ), + rec( 84, 1, OSLanguageID, ENC_LITTLE_ENDIAN ), + rec( 85, 1, SixtyFourBitOffsetsSupportedFlag ), + rec( 86, 1, OESServer ), + rec( 87, 1, OESLinuxOrNetWare ), + rec( 88, 48, Reserved48 ), + ]) + pkt.MakeExpert("ncp1711_reply") + pkt.CompletionCodes([0x0000, 0x9600]) + # 2222/1712, 23/18 + pkt = NCP(0x1712, "Get Network Serial Number", 'fileserver') + pkt.Request(10) + pkt.Reply(14, [ + rec( 8, 4, ServerSerialNumber ), + rec( 12, 2, ApplicationNumber ), + ]) + pkt.CompletionCodes([0x0000, 0x9600]) + # 2222/1713, 23/19 + pkt = NCP(0x1713, "Get Internet Address", 'connection') + pkt.Request(11, [ + rec( 10, 1, TargetConnectionNumber, info_str=(TargetConnectionNumber, "Get Internet Address for Connection %d", ", %d") ), + ]) + pkt.Reply(20, [ + rec( 8, 4, NetworkAddress, ENC_BIG_ENDIAN ), + rec( 12, 6, NetworkNodeAddress ), + rec( 18, 2, NetworkSocket, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0xff00]) + # 2222/1714, 23/20 + pkt = NCP(0x1714, "Login Object", 'connection') + pkt.Request( (14, 60), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,16), ClientName, info_str=(ClientName, "Login Object: %s", ", %s") ), + rec( -1, (1,32), Password ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9602, 0xc101, 0xc200, 0xc501, 0xd600, 0xd700, + 0xd900, 0xda00, 0xdb00, 0xde00, 0xdf00, 0xe800, 0xec00, + 0xed00, 0xef00, 0xf001, 0xf100, 0xf200, 0xf600, 0xfb00, + 0xfc06, 0xfe07, 0xff00]) + # 2222/1715, 23/21 + pkt = NCP(0x1715, "Get Object Connection List", 'connection') + pkt.Request( (13, 28), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,16), ObjectName, info_str=(ObjectName, "Get Object Connection List: %s", ", %s") ), + ]) + pkt.Reply( (9, 136), [ + rec( 8, (1, 128), ConnectionNumberList ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00]) + # 2222/1716, 23/22 + pkt = NCP(0x1716, "Get Station's Logged Info", 'connection') + pkt.Request( 11, [ + rec( 10, 1, TargetConnectionNumber ), + ]) + pkt.Reply( 70, [ + rec( 8, 4, UserID, ENC_BIG_ENDIAN ), + rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 14, 48, ObjectNameLen ), + rec( 62, 7, LoginTime ), + rec( 69, 1, Reserved ), + ]) + pkt.CompletionCodes([0x0000, 0x9602, 0xfb0a, 0xfc06, 0xfd00, 0xfe07, 0xff00]) + # 2222/1717, 23/23 + pkt = NCP(0x1717, "Get Login Key", 'connection') + pkt.Request(10) + pkt.Reply( 16, [ + rec( 8, 8, LoginKey ), + ]) + pkt.CompletionCodes([0x0000, 0x9602]) + # 2222/1718, 23/24 + pkt = NCP(0x1718, "Keyed Object Login", 'connection') + pkt.Request( (21, 68), [ + rec( 10, 8, LoginKey ), + rec( 18, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 20, (1,48), ObjectName, info_str=(ObjectName, "Keyed Object Login: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9602, 0xc101, 0xc200, 0xc500, 0xd904, 0xda00, + 0xdb00, 0xdc00, 0xde00, 0xff00]) + # 2222/171A, 23/26 + pkt = NCP(0x171A, "Get Internet Address", 'connection') + pkt.Request(12, [ + rec( 10, 2, TargetConnectionNumber ), + ]) +# Dissect reply in packet-ncp2222.inc + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/171B, 23/27 + pkt = NCP(0x171B, "Get Object Connection List", 'connection') + pkt.Request( (17,64), [ + rec( 10, 4, SearchConnNumber ), + rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 16, (1,48), ObjectName, info_str=(ObjectName, "Get Object Connection List: %s", ", %s") ), + ]) + pkt.Reply( (13), [ + rec( 8, 1, ConnListLen, var="x" ), + rec( 9, 4, ConnectionNumber, ENC_LITTLE_ENDIAN, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00]) + # 2222/171C, 23/28 + pkt = NCP(0x171C, "Get Station's Logged Info", 'connection') + pkt.Request( 14, [ + rec( 10, 4, TargetConnectionNumber ), + ]) + pkt.Reply( 70, [ + rec( 8, 4, UserID, ENC_BIG_ENDIAN ), + rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 14, 48, ObjectNameLen ), + rec( 62, 7, LoginTime ), + rec( 69, 1, Reserved ), + ]) + pkt.CompletionCodes([0x0000, 0x7d00, 0x9602, 0xfb02, 0xfc06, 0xfd00, 0xfe07, 0xff00]) + # 2222/171D, 23/29 + pkt = NCP(0x171D, "Change Connection State", 'connection') + pkt.Request( 11, [ + rec( 10, 1, RequestCode ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0109, 0x7a00, 0x7b00, 0x7c00, 0xe000, 0xfb06, 0xfd00]) + # 2222/171E, 23/30 + pkt = NCP(0x171E, "Set Watchdog Delay Interval", 'connection') + pkt.Request( 14, [ + rec( 10, 4, NumberOfMinutesToDelay ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0107]) + # 2222/171F, 23/31 + pkt = NCP(0x171F, "Get Connection List From Object", 'connection') + pkt.Request( 18, [ + rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ), + rec( 14, 4, ConnectionNumber ), + ]) + pkt.Reply( (9, 136), [ + rec( 8, (1, 128), ConnectionNumberList ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00]) + # 2222/1720, 23/32 + pkt = NCP(0x1720, "Scan Bindery Object (List)", 'bindery') + pkt.Request((23,70), [ + rec( 10, 4, NextObjectID, ENC_BIG_ENDIAN ), + rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 16, 2, Reserved2 ), + rec( 18, 4, InfoFlags ), + rec( 22, (1,48), ObjectName, info_str=(ObjectName, "Scan Bindery Object: %s", ", %s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 4, ObjectInfoReturnCount ), + rec( 12, 4, NextObjectID, ENC_BIG_ENDIAN ), + rec( 16, 4, ObjectID ), + srec(ObjectTypeStruct, req_cond="ncp.info_flags_type == TRUE"), + srec(ObjectSecurityStruct, req_cond="ncp.info_flags_security == TRUE"), + srec(ObjectFlagsStruct, req_cond="ncp.info_flags_flags == TRUE"), + srec(ObjectNameStruct, req_cond="ncp.info_flags_name == TRUE"), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x9600, 0xef01, 0xfc02, 0xfe01, 0xff00]) + # 2222/1721, 23/33 + pkt = NCP(0x1721, "Generate GUIDs", 'connection') + pkt.Request( 14, [ + rec( 10, 4, ReturnInfoCount ), + ]) + pkt.Reply(28, [ + rec( 8, 4, ReturnInfoCount, var="x" ), + rec( 12, 16, GUID, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01]) +# 2222/1722, 23/34 + pkt = NCP(0x1722, "Set Connection Language Encoding", 'connection') + pkt.Request( 22, [ + rec( 10, 4, SetMask ), + rec( 14, 4, NCPEncodedStringsBits ), + rec( 18, 4, CodePage ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/1732, 23/50 + pkt = NCP(0x1732, "Create Bindery Object", 'bindery') + pkt.Request( (15,62), [ + rec( 10, 1, ObjectFlags ), + rec( 11, 1, ObjectSecurity ), + rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 14, (1,48), ObjectName, info_str=(ObjectName, "Create Bindery Object: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xe700, 0xee00, 0xef00, 0xf101, 0xf501, + 0xfc06, 0xfe07, 0xff00]) + # 2222/1733, 23/51 + pkt = NCP(0x1733, "Delete Bindery Object", 'bindery') + pkt.Request( (13,60), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Delete Bindery Object: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf200, 0xf400, 0xf600, 0xfb00, + 0xfc06, 0xfe07, 0xff00]) + # 2222/1734, 23/52 + pkt = NCP(0x1734, "Rename Bindery Object", 'bindery') + pkt.Request( (14,108), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Rename Bindery Object: %s", ", %s") ), + rec( -1, (1,48), NewObjectName ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xee00, 0xf000, 0xf300, 0xfc06, 0xfe07, 0xff00]) + # 2222/1735, 23/53 + pkt = NCP(0x1735, "Get Bindery Object ID", 'bindery') + pkt.Request((13,60), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Get Bindery Object: %s", ", %s") ), + ]) + pkt.Reply(62, [ + rec( 8, 4, ObjectID, ENC_LITTLE_ENDIAN ), + rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 14, 48, ObjectNameLen ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xef01, 0xf000, 0xfc02, 0xfe01, 0xff00]) + # 2222/1736, 23/54 + pkt = NCP(0x1736, "Get Bindery Object Name", 'bindery') + pkt.Request( 14, [ + rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ), + ]) + pkt.Reply( 62, [ + rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ), + rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 14, 48, ObjectNameLen ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xf101, 0xfc02, 0xfe01, 0xff00]) + # 2222/1737, 23/55 + pkt = NCP(0x1737, "Scan Bindery Object", 'bindery') + pkt.Request((17,64), [ + rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ), + rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 16, (1,48), ObjectName, info_str=(ObjectName, "Scan Bindery Object: %s", ", %s") ), + ]) + pkt.Reply(65, [ + rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ), + rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 14, 48, ObjectNameLen ), + rec( 62, 1, ObjectFlags ), + rec( 63, 1, ObjectSecurity ), + rec( 64, 1, ObjectHasProperties ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xef01, 0xfc02, + 0xfe01, 0xff00]) + # 2222/1738, 23/56 + pkt = NCP(0x1738, "Change Bindery Object Security", 'bindery') + pkt.Request((14,61), [ + rec( 10, 1, ObjectSecurity ), + rec( 11, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 13, (1,48), ObjectName, info_str=(ObjectName, "Change Bindery Object Security: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf101, 0xf501, 0xfc02, 0xfe01, 0xff00]) + # 2222/1739, 23/57 + pkt = NCP(0x1739, "Create Property", 'bindery') + pkt.Request((16,78), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName ), + rec( -1, 1, PropertyType ), + rec( -1, 1, ObjectSecurity ), + rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Create Property: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xed00, 0xef00, 0xf000, 0xf101, + 0xf200, 0xf600, 0xf700, 0xfb00, 0xfc02, 0xfe01, + 0xff00]) + # 2222/173A, 23/58 + pkt = NCP(0x173A, "Delete Property", 'bindery') + pkt.Request((14,76), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName ), + rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Delete Property: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf101, 0xf600, 0xfb00, 0xfc02, + 0xfe01, 0xff00]) + # 2222/173B, 23/59 + pkt = NCP(0x173B, "Change Property Security", 'bindery') + pkt.Request((15,77), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName ), + rec( -1, 1, ObjectSecurity ), + rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Change Property Security: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf101, 0xf200, 0xf600, 0xfb00, + 0xfc02, 0xfe01, 0xff00]) + # 2222/173C, 23/60 + pkt = NCP(0x173C, "Scan Property", 'bindery') + pkt.Request((18,80), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName ), + rec( -1, 4, LastInstance, ENC_BIG_ENDIAN ), + rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Scan Property: %s", ", %s") ), + ]) + pkt.Reply( 32, [ + rec( 8, 16, PropertyName16 ), + rec( 24, 1, ObjectFlags ), + rec( 25, 1, ObjectSecurity ), + rec( 26, 4, SearchInstance, ENC_BIG_ENDIAN ), + rec( 30, 1, ValueAvailable ), + rec( 31, 1, MoreProperties ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf101, 0xf200, 0xf600, 0xfb00, + 0xfc02, 0xfe01, 0xff00]) + # 2222/173D, 23/61 + pkt = NCP(0x173D, "Read Property Value", 'bindery') + pkt.Request((15,77), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName ), + rec( -1, 1, PropertySegment ), + rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Read Property Value: %s", ", %s") ), + ]) + pkt.Reply(138, [ + rec( 8, 128, PropertyData ), + rec( 136, 1, PropertyHasMoreSegments ), + rec( 137, 1, PropertyType ), + ]) + pkt.CompletionCodes([0x0000, 0x8800, 0x9300, 0x9600, 0xec01, + 0xf000, 0xf100, 0xf900, 0xfb02, 0xfc02, + 0xfe01, 0xff00]) + # 2222/173E, 23/62 + pkt = NCP(0x173E, "Write Property Value", 'bindery') + pkt.Request((144,206), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName ), + rec( -1, 1, PropertySegment ), + rec( -1, 1, MoreFlag ), + rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Write Property Value: %s", ", %s") ), + # + # XXX - don't show this if MoreFlag isn't set? + # In at least some packages where it's not set, + # PropertyValue appears to be garbage. + # + rec( -1, 128, PropertyValue ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xe800, 0xec01, 0xf000, 0xf800, + 0xfb02, 0xfc03, 0xfe01, 0xff00 ]) + # 2222/173F, 23/63 + pkt = NCP(0x173F, "Verify Bindery Object Password", 'bindery') + pkt.Request((14,92), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Verify Bindery Object Password: %s", ", %s") ), + rec( -1, (1,32), Password ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xe800, 0xec01, 0xf000, 0xf101, + 0xfb02, 0xfc03, 0xfe01, 0xff00 ]) + # 2222/1740, 23/64 + pkt = NCP(0x1740, "Change Bindery Object Password", 'bindery') + pkt.Request((15,124), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Change Bindery Object Password: %s", ", %s") ), + rec( -1, (1,32), Password ), + rec( -1, (1,32), NewPassword ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xc501, 0xd701, 0xe800, 0xec01, 0xf001, + 0xf100, 0xf800, 0xfb02, 0xfc03, 0xfe01, 0xff00]) + # 2222/1741, 23/65 + pkt = NCP(0x1741, "Add Bindery Object To Set", 'bindery') + pkt.Request((17,126), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName ), + rec( -1, (1,16), PropertyName ), + rec( -1, 2, MemberType, ENC_BIG_ENDIAN ), + rec( -1, (1,48), MemberName, info_str=(MemberName, "Add Bindery Object to Set: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xe800, 0xe900, 0xea00, 0xeb00, + 0xec01, 0xf000, 0xf800, 0xfb02, 0xfc03, 0xfe01, + 0xff00]) + # 2222/1742, 23/66 + pkt = NCP(0x1742, "Delete Bindery Object From Set", 'bindery') + pkt.Request((17,126), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName ), + rec( -1, (1,16), PropertyName ), + rec( -1, 2, MemberType, ENC_BIG_ENDIAN ), + rec( -1, (1,48), MemberName, info_str=(MemberName, "Delete Bindery Object from Set: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xeb00, 0xf000, 0xf800, 0xfb02, + 0xfc03, 0xfe01, 0xff00]) + # 2222/1743, 23/67 + pkt = NCP(0x1743, "Is Bindery Object In Set", 'bindery') + pkt.Request((17,126), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName ), + rec( -1, (1,16), PropertyName ), + rec( -1, 2, MemberType, ENC_BIG_ENDIAN ), + rec( -1, (1,48), MemberName, info_str=(MemberName, "Is Bindery Object in Set: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xea00, 0xeb00, 0xec01, 0xf000, + 0xfb02, 0xfc03, 0xfe01, 0xff00]) + # 2222/1744, 23/68 + pkt = NCP(0x1744, "Close Bindery", 'bindery') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xff00]) + # 2222/1745, 23/69 + pkt = NCP(0x1745, "Open Bindery", 'bindery') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xff00]) + # 2222/1746, 23/70 + pkt = NCP(0x1746, "Get Bindery Access Level", 'bindery') + pkt.Request(10) + pkt.Reply(13, [ + rec( 8, 1, ObjectSecurity ), + rec( 9, 4, LoggedObjectID, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x9600]) + # 2222/1747, 23/71 + pkt = NCP(0x1747, "Scan Bindery Object Trustee Paths", 'bindery') + pkt.Request(17, [ + rec( 10, 1, VolumeNumber ), + rec( 11, 2, LastSequenceNumber, ENC_BIG_ENDIAN ), + rec( 13, 4, ObjectID, ENC_BIG_ENDIAN ), + ]) + pkt.Reply((16,270), [ + rec( 8, 2, LastSequenceNumber, ENC_BIG_ENDIAN), + rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ), + rec( 14, 1, ObjectSecurity ), + rec( 15, (1,255), Path ), + ]) + pkt.CompletionCodes([0x0000, 0x9300, 0x9600, 0xa100, 0xf000, 0xf100, + 0xf200, 0xfc02, 0xfe01, 0xff00]) + # 2222/1748, 23/72 + pkt = NCP(0x1748, "Get Bindery Object Access Level", 'bindery') + pkt.Request(14, [ + rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(9, [ + rec( 8, 1, ObjectSecurity ), + ]) + pkt.CompletionCodes([0x0000, 0x9600]) + # 2222/1749, 23/73 + pkt = NCP(0x1749, "Is Calling Station a Manager", 'bindery') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0003, 0xff1e]) + # 2222/174A, 23/74 + pkt = NCP(0x174A, "Keyed Verify Password", 'bindery') + pkt.Request((21,68), [ + rec( 10, 8, LoginKey ), + rec( 18, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 20, (1,48), ObjectName, info_str=(ObjectName, "Keyed Verify Password: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc500, 0xfe01, 0xff0c]) + # 2222/174B, 23/75 + pkt = NCP(0x174B, "Keyed Change Password", 'bindery') + pkt.Request((22,100), [ + rec( 10, 8, LoginKey ), + rec( 18, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 20, (1,48), ObjectName, info_str=(ObjectName, "Keyed Change Password: %s", ", %s") ), + rec( -1, (1,32), Password ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc500, 0xfe01, 0xff0c]) + # 2222/174C, 23/76 + pkt = NCP(0x174C, "List Relations Of an Object", 'bindery') + pkt.Request((18,80), [ + rec( 10, 4, LastSeen, ENC_BIG_ENDIAN ), + rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 16, (1,48), ObjectName, info_str=(ObjectName, "List Relations of an Object: %s", ", %s") ), + rec( -1, (1,16), PropertyName ), + ]) + pkt.Reply(14, [ + rec( 8, 2, RelationsCount, ENC_BIG_ENDIAN, var="x" ), + rec( 10, 4, ObjectID, ENC_BIG_ENDIAN, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0xf000, 0xf200, 0xfe01, 0xff00]) + # 2222/1764, 23/100 + pkt = NCP(0x1764, "Create Queue", 'qms') + pkt.Request((15,316), [ + rec( 10, 2, QueueType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), QueueName, info_str=(QueueName, "Create Queue: %s", ", %s") ), + rec( -1, 1, PathBase ), + rec( -1, (1,255), Path ), + ]) + pkt.Reply(12, [ + rec( 8, 4, QueueID ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9900, 0xd000, 0xd100, + 0xd200, 0xd300, 0xd400, 0xd500, 0xd601, + 0xd703, 0xd800, 0xd902, 0xda01, 0xdb02, + 0xee00, 0xff00]) + # 2222/1765, 23/101 + pkt = NCP(0x1765, "Destroy Queue", 'qms') + pkt.Request(14, [ + rec( 10, 4, QueueID ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/1766, 23/102 + pkt = NCP(0x1766, "Read Queue Current Status", 'qms') + pkt.Request(14, [ + rec( 10, 4, QueueID ), + ]) + pkt.Reply(20, [ + rec( 8, 4, QueueID ), + rec( 12, 1, QueueStatus ), + rec( 13, 1, CurrentEntries ), + rec( 14, 1, CurrentServers, var="x" ), + rec( 15, 4, ServerID, repeat="x" ), + rec( 19, 1, ServerStationList, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/1767, 23/103 + pkt = NCP(0x1767, "Set Queue Current Status", 'qms') + pkt.Request(15, [ + rec( 10, 4, QueueID ), + rec( 14, 1, QueueStatus ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, + 0xff00]) + # 2222/1768, 23/104 + pkt = NCP(0x1768, "Create Queue Job And File", 'qms') + pkt.Request(264, [ + rec( 10, 4, QueueID ), + rec( 14, 250, JobStruct ), + ]) + pkt.Reply(62, [ + rec( 8, 1, ClientStation ), + rec( 9, 1, ClientTaskNumber ), + rec( 10, 4, ClientIDNumber, ENC_BIG_ENDIAN ), + rec( 14, 4, TargetServerIDNumber, ENC_BIG_ENDIAN ), + rec( 18, 6, TargetExecutionTime ), + rec( 24, 6, JobEntryTime ), + rec( 30, 2, JobNumber, ENC_BIG_ENDIAN ), + rec( 32, 2, JobType, ENC_BIG_ENDIAN ), + rec( 34, 1, JobPosition ), + rec( 35, 1, JobControlFlags ), + rec( 36, 14, JobFileName ), + rec( 50, 6, JobFileHandle ), + rec( 56, 1, ServerStation ), + rec( 57, 1, ServerTaskNumber ), + rec( 58, 4, ServerID, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, + 0xff00]) + # 2222/1769, 23/105 + pkt = NCP(0x1769, "Close File And Start Queue Job", 'qms') + pkt.Request(16, [ + rec( 10, 4, QueueID ), + rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/176A, 23/106 + pkt = NCP(0x176A, "Remove Job From Queue", 'qms') + pkt.Request(16, [ + rec( 10, 4, QueueID ), + rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/176B, 23/107 + pkt = NCP(0x176B, "Get Queue Job List", 'qms') + pkt.Request(14, [ + rec( 10, 4, QueueID ), + ]) + pkt.Reply(12, [ + rec( 8, 2, JobCount, ENC_BIG_ENDIAN, var="x" ), + rec( 10, 2, JobNumber, ENC_BIG_ENDIAN, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/176C, 23/108 + pkt = NCP(0x176C, "Read Queue Job Entry", 'qms') + pkt.Request(16, [ + rec( 10, 4, QueueID ), + rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(258, [ + rec( 8, 250, JobStruct ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/176D, 23/109 + pkt = NCP(0x176D, "Change Queue Job Entry", 'qms') + pkt.Request(260, [ + rec( 14, 250, JobStruct ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff18]) + # 2222/176E, 23/110 + pkt = NCP(0x176E, "Change Queue Job Position", 'qms') + pkt.Request(17, [ + rec( 10, 4, QueueID ), + rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ), + rec( 16, 1, NewPosition ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xd000, 0xd100, 0xd300, 0xd500, + 0xd601, 0xfe07, 0xff1f]) + # 2222/176F, 23/111 + pkt = NCP(0x176F, "Attach Queue Server To Queue", 'qms') + pkt.Request(14, [ + rec( 10, 4, QueueID ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xea00, + 0xfc06, 0xff00]) + # 2222/1770, 23/112 + pkt = NCP(0x1770, "Detach Queue Server From Queue", 'qms') + pkt.Request(14, [ + rec( 10, 4, QueueID ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/1771, 23/113 + pkt = NCP(0x1771, "Service Queue Job", 'qms') + pkt.Request(16, [ + rec( 10, 4, QueueID ), + rec( 14, 2, ServiceType, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(62, [ + rec( 8, 1, ClientStation ), + rec( 9, 1, ClientTaskNumber ), + rec( 10, 4, ClientIDNumber, ENC_BIG_ENDIAN ), + rec( 14, 4, TargetServerIDNumber, ENC_BIG_ENDIAN ), + rec( 18, 6, TargetExecutionTime ), + rec( 24, 6, JobEntryTime ), + rec( 30, 2, JobNumber, ENC_BIG_ENDIAN ), + rec( 32, 2, JobType, ENC_BIG_ENDIAN ), + rec( 34, 1, JobPosition ), + rec( 35, 1, JobControlFlags ), + rec( 36, 14, JobFileName ), + rec( 50, 6, JobFileHandle ), + rec( 56, 1, ServerStation ), + rec( 57, 1, ServerTaskNumber ), + rec( 58, 4, ServerID, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/1772, 23/114 + pkt = NCP(0x1772, "Finish Servicing Queue Job", 'qms') + pkt.Request(18, [ + rec( 10, 4, QueueID ), + rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ), + rec( 16, 2, ChargeInformation, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff00]) + # 2222/1773, 23/115 + pkt = NCP(0x1773, "Abort Servicing Queue Job", 'qms') + pkt.Request(16, [ + rec( 10, 4, QueueID ), + rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff18]) + # 2222/1774, 23/116 + pkt = NCP(0x1774, "Change To Client Rights", 'qms') + pkt.Request(16, [ + rec( 10, 4, QueueID ), + rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff18]) + # 2222/1775, 23/117 + pkt = NCP(0x1775, "Restore Queue Server Rights", 'qms') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/1776, 23/118 + pkt = NCP(0x1776, "Read Queue Server Current Status", 'qms') + pkt.Request(19, [ + rec( 10, 4, QueueID ), + rec( 14, 4, ServerID, ENC_BIG_ENDIAN ), + rec( 18, 1, ServerStation ), + ]) + pkt.Reply(72, [ + rec( 8, 64, ServerStatusRecord ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/1777, 23/119 + pkt = NCP(0x1777, "Set Queue Server Current Status", 'qms') + pkt.Request(78, [ + rec( 10, 4, QueueID ), + rec( 14, 64, ServerStatusRecord ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/1778, 23/120 + pkt = NCP(0x1778, "Get Queue Job File Size", 'qms') + pkt.Request(16, [ + rec( 10, 4, QueueID ), + rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(18, [ + rec( 8, 4, QueueID ), + rec( 12, 2, JobNumber, ENC_BIG_ENDIAN ), + rec( 14, 4, FileSize, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff00]) + # 2222/1779, 23/121 + pkt = NCP(0x1779, "Create Queue Job And File", 'qms') + pkt.Request(264, [ + rec( 10, 4, QueueID ), + rec( 14, 250, JobStruct3x ), + ]) + pkt.Reply(94, [ + rec( 8, 86, JobStructNew ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff00]) + # 2222/177A, 23/122 + pkt = NCP(0x177A, "Read Queue Job Entry", 'qms') + pkt.Request(18, [ + rec( 10, 4, QueueID ), + rec( 14, 4, JobNumberLong ), + ]) + pkt.Reply(258, [ + rec( 8, 250, JobStruct3x ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/177B, 23/123 + pkt = NCP(0x177B, "Change Queue Job Entry", 'qms') + pkt.Request(264, [ + rec( 10, 4, QueueID ), + rec( 14, 250, JobStruct ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xea02, 0xfc07, 0xff00]) + # 2222/177C, 23/124 + pkt = NCP(0x177C, "Service Queue Job", 'qms') + pkt.Request(16, [ + rec( 10, 4, QueueID ), + rec( 14, 2, ServiceType ), + ]) + pkt.Reply(94, [ + rec( 8, 86, JobStructNew ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc05, 0xff00]) + # 2222/177D, 23/125 + pkt = NCP(0x177D, "Read Queue Current Status", 'qms') + pkt.Request(14, [ + rec( 10, 4, QueueID ), + ]) + pkt.Reply(32, [ + rec( 8, 4, QueueID ), + rec( 12, 1, QueueStatus ), + rec( 13, 3, Reserved3 ), + rec( 16, 4, CurrentEntries ), + rec( 20, 4, CurrentServers, var="x" ), + rec( 24, 4, ServerID, repeat="x" ), + rec( 28, 4, ServerStationLong, ENC_LITTLE_ENDIAN, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/177E, 23/126 + pkt = NCP(0x177E, "Set Queue Current Status", 'qms') + pkt.Request(15, [ + rec( 10, 4, QueueID ), + rec( 14, 1, QueueStatus ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/177F, 23/127 + pkt = NCP(0x177F, "Close File And Start Queue Job", 'qms') + pkt.Request(18, [ + rec( 10, 4, QueueID ), + rec( 14, 4, JobNumberLong ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff00]) + # 2222/1780, 23/128 + pkt = NCP(0x1780, "Remove Job From Queue", 'qms') + pkt.Request(18, [ + rec( 10, 4, QueueID ), + rec( 14, 4, JobNumberLong ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/1781, 23/129 + pkt = NCP(0x1781, "Get Queue Job List", 'qms') + pkt.Request(18, [ + rec( 10, 4, QueueID ), + rec( 14, 4, JobNumberLong ), + ]) + pkt.Reply(20, [ + rec( 8, 4, TotalQueueJobs ), + rec( 12, 4, ReplyQueueJobNumbers, var="x" ), + rec( 16, 4, JobNumberLong, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/1782, 23/130 + pkt = NCP(0x1782, "Change Job Priority", 'qms') + pkt.Request(22, [ + rec( 10, 4, QueueID ), + rec( 14, 4, JobNumberLong ), + rec( 18, 4, Priority ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/1783, 23/131 + pkt = NCP(0x1783, "Finish Servicing Queue Job", 'qms') + pkt.Request(22, [ + rec( 10, 4, QueueID ), + rec( 14, 4, JobNumberLong ), + rec( 18, 4, ChargeInformation ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc05, 0xff00]) + # 2222/1784, 23/132 + pkt = NCP(0x1784, "Abort Servicing Queue Job", 'qms') + pkt.Request(18, [ + rec( 10, 4, QueueID ), + rec( 14, 4, JobNumberLong ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc05, 0xff18]) + # 2222/1785, 23/133 + pkt = NCP(0x1785, "Change To Client Rights", 'qms') + pkt.Request(18, [ + rec( 10, 4, QueueID ), + rec( 14, 4, JobNumberLong ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff18]) + # 2222/1786, 23/134 + pkt = NCP(0x1786, "Read Queue Server Current Status", 'qms') + pkt.Request(22, [ + rec( 10, 4, QueueID ), + rec( 14, 4, ServerID, ENC_BIG_ENDIAN ), + rec( 18, 4, ServerStation ), + ]) + pkt.Reply(72, [ + rec( 8, 64, ServerStatusRecord ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00]) + # 2222/1787, 23/135 + pkt = NCP(0x1787, "Get Queue Job File Size", 'qms') + pkt.Request(18, [ + rec( 10, 4, QueueID ), + rec( 14, 4, JobNumberLong ), + ]) + pkt.Reply(20, [ + rec( 8, 4, QueueID ), + rec( 12, 4, JobNumberLong ), + rec( 16, 4, FileSize, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200, + 0xd300, 0xd400, 0xd500, 0xd601, 0xd703, + 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc05, 0xff00]) + # 2222/1788, 23/136 + pkt = NCP(0x1788, "Move Queue Job From Src Q to Dst Q", 'qms') + pkt.Request(22, [ + rec( 10, 4, QueueID ), + rec( 14, 4, JobNumberLong ), + rec( 18, 4, DstQueueID ), + ]) + pkt.Reply(12, [ + rec( 8, 4, JobNumberLong ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfc06]) + # 2222/1789, 23/137 + pkt = NCP(0x1789, "Get Queue Jobs From Form List", 'qms') + pkt.Request(24, [ + rec( 10, 4, QueueID ), + rec( 14, 4, QueueStartPosition ), + rec( 18, 4, FormTypeCnt, ENC_LITTLE_ENDIAN, var="x" ), + rec( 22, 2, FormType, repeat="x" ), + ]) + pkt.Reply(20, [ + rec( 8, 4, TotalQueueJobs ), + rec( 12, 4, JobCount, var="x" ), + rec( 16, 4, JobNumberLong, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xd300, 0xfc06]) + # 2222/178A, 23/138 + pkt = NCP(0x178A, "Service Queue Job By Form List", 'qms') + pkt.Request(24, [ + rec( 10, 4, QueueID ), + rec( 14, 4, QueueStartPosition ), + rec( 18, 4, FormTypeCnt, ENC_LITTLE_ENDIAN, var= "x" ), + rec( 22, 2, FormType, repeat="x" ), + ]) + pkt.Reply(94, [ + rec( 8, 86, JobStructNew ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xd902, 0xfc06, 0xff00]) + # 2222/1796, 23/150 + pkt = NCP(0x1796, "Get Current Account Status", 'accounting') + pkt.Request((13,60), [ + rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Get Current Account Status: %s", ", %s") ), + ]) + pkt.Reply(264, [ + rec( 8, 4, AccountBalance, ENC_BIG_ENDIAN ), + rec( 12, 4, CreditLimit, ENC_BIG_ENDIAN ), + rec( 16, 120, Reserved120 ), + rec( 136, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 140, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 144, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 148, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 152, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 156, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 160, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 164, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 168, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 172, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 176, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 180, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 184, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 188, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 192, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 196, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 200, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 204, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 208, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 212, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 216, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 220, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 224, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 228, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 232, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 236, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 240, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 244, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 248, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 252, 4, HoldAmount, ENC_BIG_ENDIAN ), + rec( 256, 4, HolderID, ENC_BIG_ENDIAN ), + rec( 260, 4, HoldAmount, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc000, 0xc101, 0xc400, 0xe800, + 0xea00, 0xeb00, 0xec00, 0xfc06, 0xfe07, 0xff00]) + # 2222/1797, 23/151 + pkt = NCP(0x1797, "Submit Account Charge", 'accounting') + pkt.Request((26,327), [ + rec( 10, 2, ServiceType, ENC_BIG_ENDIAN ), + rec( 12, 4, ChargeAmount, ENC_BIG_ENDIAN ), + rec( 16, 4, HoldCancelAmount, ENC_BIG_ENDIAN ), + rec( 20, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 22, 2, CommentType, ENC_BIG_ENDIAN ), + rec( 24, (1,48), ObjectName, info_str=(ObjectName, "Submit Account Charge: %s", ", %s") ), + rec( -1, (1,255), Comment ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x8800, 0x9400, 0x9600, 0xa201, + 0xc000, 0xc101, 0xc200, 0xc400, 0xe800, 0xea00, + 0xeb00, 0xec00, 0xfe07, 0xff00]) + # 2222/1798, 23/152 + pkt = NCP(0x1798, "Submit Account Hold", 'accounting') + pkt.Request((17,64), [ + rec( 10, 4, HoldCancelAmount, ENC_BIG_ENDIAN ), + rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 16, (1,48), ObjectName, info_str=(ObjectName, "Submit Account Hold: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x8800, 0x9400, 0x9600, 0xa201, + 0xc000, 0xc101, 0xc200, 0xc400, 0xe800, 0xea00, + 0xeb00, 0xec00, 0xfe07, 0xff00]) + # 2222/1799, 23/153 + pkt = NCP(0x1799, "Submit Account Note", 'accounting') + pkt.Request((18,319), [ + rec( 10, 2, ServiceType, ENC_BIG_ENDIAN ), + rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ), + rec( 14, 2, CommentType, ENC_BIG_ENDIAN ), + rec( 16, (1,48), ObjectName, info_str=(ObjectName, "Submit Account Note: %s", ", %s") ), + rec( -1, (1,255), Comment ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x9600, 0xc000, 0xc101, 0xc400, + 0xe800, 0xea00, 0xeb00, 0xec00, 0xf000, 0xfc06, + 0xff00]) + # 2222/17c8, 23/200 + pkt = NCP(0x17c8, "Check Console Privileges", 'fileserver') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc601]) + # 2222/17c9, 23/201 + pkt = NCP(0x17c9, "Get File Server Description Strings", 'fileserver') + pkt.Request(10) + pkt.Reply(108, [ + rec( 8, 100, DescriptionStrings ), + ]) + pkt.CompletionCodes([0x0000, 0x9600]) + # 2222/17CA, 23/202 + pkt = NCP(0x17CA, "Set File Server Date And Time", 'fileserver') + pkt.Request(16, [ + rec( 10, 1, Year ), + rec( 11, 1, Month ), + rec( 12, 1, Day ), + rec( 13, 1, Hour ), + rec( 14, 1, Minute ), + rec( 15, 1, Second ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc601]) + # 2222/17CB, 23/203 + pkt = NCP(0x17CB, "Disable File Server Login", 'fileserver') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc601]) + # 2222/17CC, 23/204 + pkt = NCP(0x17CC, "Enable File Server Login", 'fileserver') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc601]) + # 2222/17CD, 23/205 + pkt = NCP(0x17CD, "Get File Server Login Status", 'fileserver') + pkt.Request(10) + pkt.Reply(9, [ + rec( 8, 1, UserLoginAllowed ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xfb01]) + # 2222/17CF, 23/207 + pkt = NCP(0x17CF, "Disable Transaction Tracking", 'fileserver') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc601]) + # 2222/17D0, 23/208 + pkt = NCP(0x17D0, "Enable Transaction Tracking", 'fileserver') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc601]) + # 2222/17D1, 23/209 + pkt = NCP(0x17D1, "Send Console Broadcast", 'fileserver') + pkt.Request((13,267), [ + rec( 10, 1, NumberOfStations, var="x" ), + rec( 11, 1, StationList, repeat="x" ), + rec( 12, (1, 255), TargetMessage, info_str=(TargetMessage, "Send Console Broadcast: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc601, 0xfd00]) + # 2222/17D2, 23/210 + pkt = NCP(0x17D2, "Clear Connection Number", 'fileserver') + pkt.Request(11, [ + rec( 10, 1, ConnectionNumber, info_str=(ConnectionNumber, "Clear Connection Number %d", ", %d") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc601, 0xfd00]) + # 2222/17D3, 23/211 + pkt = NCP(0x17D3, "Down File Server", 'fileserver') + pkt.Request(11, [ + rec( 10, 1, ForceFlag ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc601, 0xff00]) + # 2222/17D4, 23/212 + pkt = NCP(0x17D4, "Get File System Statistics", 'fileserver') + pkt.Request(10) + pkt.Reply(50, [ + rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ), + rec( 12, 2, ConfiguredMaxOpenFiles ), + rec( 14, 2, ActualMaxOpenFiles ), + rec( 16, 2, CurrentOpenFiles ), + rec( 18, 4, TotalFilesOpened ), + rec( 22, 4, TotalReadRequests ), + rec( 26, 4, TotalWriteRequests ), + rec( 30, 2, CurrentChangedFATs ), + rec( 32, 4, TotalChangedFATs ), + rec( 36, 2, FATWriteErrors ), + rec( 38, 2, FatalFATWriteErrors ), + rec( 40, 2, FATScanErrors ), + rec( 42, 2, ActualMaxIndexedFiles ), + rec( 44, 2, ActiveIndexedFiles ), + rec( 46, 2, AttachedIndexedFiles ), + rec( 48, 2, AvailableIndexedFiles ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00]) + # 2222/17D5, 23/213 + pkt = NCP(0x17D5, "Get Transaction Tracking Statistics", 'fileserver') + pkt.Request((13,267), [ + rec( 10, 2, LastRecordSeen ), + rec( 12, (1,255), SemaphoreName ), + ]) + pkt.Reply(53, [ + rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ), + rec( 12, 1, TransactionTrackingSupported ), + rec( 13, 1, TransactionTrackingEnabled ), + rec( 14, 2, TransactionVolumeNumber ), + rec( 16, 2, ConfiguredMaxSimultaneousTransactions ), + rec( 18, 2, ActualMaxSimultaneousTransactions ), + rec( 20, 2, CurrentTransactionCount ), + rec( 22, 4, TotalTransactionsPerformed ), + rec( 26, 4, TotalWriteTransactionsPerformed ), + rec( 30, 4, TotalTransactionsBackedOut ), + rec( 34, 2, TotalUnfilledBackoutRequests ), + rec( 36, 2, TransactionDiskSpace ), + rec( 38, 4, TransactionFATAllocations ), + rec( 42, 4, TransactionFileSizeChanges ), + rec( 46, 4, TransactionFilesTruncated ), + rec( 50, 1, NumberOfEntries, var="x" ), + rec( 51, 2, ConnTaskStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00]) + # 2222/17D6, 23/214 + pkt = NCP(0x17D6, "Read Disk Cache Statistics", 'fileserver') + pkt.Request(10) + pkt.Reply(86, [ + rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ), + rec( 12, 2, CacheBufferCount ), + rec( 14, 2, CacheBufferSize ), + rec( 16, 2, DirtyCacheBuffers ), + rec( 18, 4, CacheReadRequests ), + rec( 22, 4, CacheWriteRequests ), + rec( 26, 4, CacheHits ), + rec( 30, 4, CacheMisses ), + rec( 34, 4, PhysicalReadRequests ), + rec( 38, 4, PhysicalWriteRequests ), + rec( 42, 2, PhysicalReadErrors ), + rec( 44, 2, PhysicalWriteErrors ), + rec( 46, 4, CacheGetRequests ), + rec( 50, 4, CacheFullWriteRequests ), + rec( 54, 4, CachePartialWriteRequests ), + rec( 58, 4, BackgroundDirtyWrites ), + rec( 62, 4, BackgroundAgedWrites ), + rec( 66, 4, TotalCacheWrites ), + rec( 70, 4, CacheAllocations ), + rec( 74, 2, ThrashingCount ), + rec( 76, 2, LRUBlockWasDirty ), + rec( 78, 2, ReadBeyondWrite ), + rec( 80, 2, FragmentWriteOccurred ), + rec( 82, 2, CacheHitOnUnavailableBlock ), + rec( 84, 2, CacheBlockScrapped ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00]) + # 2222/17D7, 23/215 + pkt = NCP(0x17D7, "Get Drive Mapping Table", 'fileserver') + pkt.Request(10) + pkt.Reply(184, [ + rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ), + rec( 12, 1, SFTSupportLevel ), + rec( 13, 1, LogicalDriveCount ), + rec( 14, 1, PhysicalDriveCount ), + rec( 15, 1, DiskChannelTable ), + rec( 16, 4, Reserved4 ), + rec( 20, 2, PendingIOCommands, ENC_BIG_ENDIAN ), + rec( 22, 32, DriveMappingTable ), + rec( 54, 32, DriveMirrorTable ), + rec( 86, 32, DeadMirrorTable ), + rec( 118, 1, ReMirrorDriveNumber ), + rec( 119, 1, Filler ), + rec( 120, 4, ReMirrorCurrentOffset, ENC_BIG_ENDIAN ), + rec( 124, 60, SFTErrorTable ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00]) + # 2222/17D8, 23/216 + pkt = NCP(0x17D8, "Read Physical Disk Statistics", 'fileserver') + pkt.Request(11, [ + rec( 10, 1, PhysicalDiskNumber ), + ]) + pkt.Reply(101, [ + rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ), + rec( 12, 1, PhysicalDiskChannel ), + rec( 13, 1, DriveRemovableFlag ), + rec( 14, 1, PhysicalDriveType ), + rec( 15, 1, ControllerDriveNumber ), + rec( 16, 1, ControllerNumber ), + rec( 17, 1, ControllerType ), + rec( 18, 4, DriveSize ), + rec( 22, 2, DriveCylinders ), + rec( 24, 1, DriveHeads ), + rec( 25, 1, SectorsPerTrack ), + rec( 26, 64, DriveDefinitionString ), + rec( 90, 2, IOErrorCount ), + rec( 92, 4, HotFixTableStart ), + rec( 96, 2, HotFixTableSize ), + rec( 98, 2, HotFixBlocksAvailable ), + rec( 100, 1, HotFixDisabled ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00]) + # 2222/17D9, 23/217 + pkt = NCP(0x17D9, "Get Disk Channel Statistics", 'fileserver') + pkt.Request(11, [ + rec( 10, 1, DiskChannelNumber ), + ]) + pkt.Reply(192, [ + rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ), + rec( 12, 2, ChannelState, ENC_BIG_ENDIAN ), + rec( 14, 2, ChannelSynchronizationState, ENC_BIG_ENDIAN ), + rec( 16, 1, SoftwareDriverType ), + rec( 17, 1, SoftwareMajorVersionNumber ), + rec( 18, 1, SoftwareMinorVersionNumber ), + rec( 19, 65, SoftwareDescription ), + rec( 84, 8, IOAddressesUsed ), + rec( 92, 10, SharedMemoryAddresses ), + rec( 102, 4, InterruptNumbersUsed ), + rec( 106, 4, DMAChannelsUsed ), + rec( 110, 1, FlagBits ), + rec( 111, 1, Reserved ), + rec( 112, 80, ConfigurationDescription ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00]) + # 2222/17DB, 23/219 + pkt = NCP(0x17DB, "Get Connection's Open Files", 'fileserver') + pkt.Request(14, [ + rec( 10, 2, ConnectionNumber ), + rec( 12, 2, LastRecordSeen, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(32, [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 1, NumberOfRecords, var="x" ), + rec( 11, 21, ConnStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00]) + # 2222/17DC, 23/220 + pkt = NCP(0x17DC, "Get Connection Using A File", 'fileserver') + pkt.Request((14,268), [ + rec( 10, 2, LastRecordSeen, ENC_BIG_ENDIAN ), + rec( 12, 1, DirHandle ), + rec( 13, (1,255), Path, info_str=(Path, "Get Connection Using File: %s", ", %s") ), + ]) + pkt.Reply(30, [ + rec( 8, 2, UseCount, ENC_BIG_ENDIAN ), + rec( 10, 2, OpenCount, ENC_BIG_ENDIAN ), + rec( 12, 2, OpenForReadCount, ENC_BIG_ENDIAN ), + rec( 14, 2, OpenForWriteCount, ENC_BIG_ENDIAN ), + rec( 16, 2, DenyReadCount, ENC_BIG_ENDIAN ), + rec( 18, 2, DenyWriteCount, ENC_BIG_ENDIAN ), + rec( 20, 2, NextRequestRecord, ENC_BIG_ENDIAN ), + rec( 22, 1, Locked ), + rec( 23, 1, NumberOfRecords, var="x" ), + rec( 24, 6, ConnFileStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00]) + # 2222/17DD, 23/221 + pkt = NCP(0x17DD, "Get Physical Record Locks By Connection And File", 'fileserver') + pkt.Request(31, [ + rec( 10, 2, TargetConnectionNumber ), + rec( 12, 2, LastRecordSeen, ENC_BIG_ENDIAN ), + rec( 14, 1, VolumeNumber ), + rec( 15, 2, DirectoryID ), + rec( 17, 14, FileName14, info_str=(FileName14, "Get Physical Record Locks by Connection and File: %s", ", %s") ), + ]) + pkt.Reply(22, [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 1, NumberOfLocks, var="x" ), + rec( 11, 1, Reserved ), + rec( 12, 10, LockStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17DE, 23/222 + pkt = NCP(0x17DE, "Get Physical Record Locks By File", 'fileserver') + pkt.Request((14,268), [ + rec( 10, 2, TargetConnectionNumber ), + rec( 12, 1, DirHandle ), + rec( 13, (1,255), Path, info_str=(Path, "Get Physical Record Locks by File: %s", ", %s") ), + ]) + pkt.Reply(28, [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 1, NumberOfLocks, var="x" ), + rec( 11, 1, Reserved ), + rec( 12, 16, PhyLockStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17DF, 23/223 + pkt = NCP(0x17DF, "Get Logical Records By Connection", 'fileserver') + pkt.Request(14, [ + rec( 10, 2, TargetConnectionNumber ), + rec( 12, 2, LastRecordSeen, ENC_BIG_ENDIAN ), + ]) + pkt.Reply((14,268), [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 1, NumberOfRecords, var="x" ), + rec( 11, (3, 257), LogLockStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17E0, 23/224 + pkt = NCP(0x17E0, "Get Logical Record Information", 'fileserver') + pkt.Request((13,267), [ + rec( 10, 2, LastRecordSeen ), + rec( 12, (1,255), LogicalRecordName, info_str=(LogicalRecordName, "Get Logical Record Information: %s", ", %s") ), + ]) + pkt.Reply(20, [ + rec( 8, 2, UseCount, ENC_BIG_ENDIAN ), + rec( 10, 2, ShareableLockCount, ENC_BIG_ENDIAN ), + rec( 12, 2, NextRequestRecord ), + rec( 14, 1, Locked ), + rec( 15, 1, NumberOfRecords, var="x" ), + rec( 16, 4, LogRecStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17E1, 23/225 + pkt = NCP(0x17E1, "Get Connection's Semaphores", 'fileserver') + pkt.Request(14, [ + rec( 10, 2, ConnectionNumber ), + rec( 12, 2, LastRecordSeen ), + ]) + pkt.Reply((18,272), [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 2, NumberOfSemaphores, var="x" ), + rec( 12, (6,260), SemaStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17E2, 23/226 + pkt = NCP(0x17E2, "Get Semaphore Information", 'fileserver') + pkt.Request((13,267), [ + rec( 10, 2, LastRecordSeen ), + rec( 12, (1,255), SemaphoreName, info_str=(SemaphoreName, "Get Semaphore Information: %s", ", %s") ), + ]) + pkt.Reply(17, [ + rec( 8, 2, NextRequestRecord, ENC_BIG_ENDIAN ), + rec( 10, 2, OpenCount, ENC_BIG_ENDIAN ), + rec( 12, 1, SemaphoreValue ), + rec( 13, 1, NumberOfRecords, var="x" ), + rec( 14, 3, SemaInfoStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17E3, 23/227 + pkt = NCP(0x17E3, "Get LAN Driver Configuration Information", 'fileserver') + pkt.Request(11, [ + rec( 10, 1, LANDriverNumber ), + ]) + pkt.Reply(180, [ + rec( 8, 4, NetworkAddress, ENC_BIG_ENDIAN ), + rec( 12, 6, HostAddress ), + rec( 18, 1, BoardInstalled ), + rec( 19, 1, OptionNumber ), + rec( 20, 160, ConfigurationText ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17E5, 23/229 + pkt = NCP(0x17E5, "Get Connection Usage Statistics", 'fileserver') + pkt.Request(12, [ + rec( 10, 2, ConnectionNumber ), + ]) + pkt.Reply(26, [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 6, BytesRead ), + rec( 16, 6, BytesWritten ), + rec( 22, 4, TotalRequestPackets ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17E6, 23/230 + pkt = NCP(0x17E6, "Get Object's Remaining Disk Space", 'fileserver') + pkt.Request(14, [ + rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(21, [ + rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ), + rec( 12, 4, ObjectID ), + rec( 16, 4, UnusedDiskBlocks, ENC_BIG_ENDIAN ), + rec( 20, 1, RestrictionsEnforced ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17E7, 23/231 + pkt = NCP(0x17E7, "Get File Server LAN I/O Statistics", 'fileserver') + pkt.Request(10) + pkt.Reply(74, [ + rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ), + rec( 12, 2, ConfiguredMaxRoutingBuffers ), + rec( 14, 2, ActualMaxUsedRoutingBuffers ), + rec( 16, 2, CurrentlyUsedRoutingBuffers ), + rec( 18, 4, TotalFileServicePackets ), + rec( 22, 2, TurboUsedForFileService ), + rec( 24, 2, PacketsFromInvalidConnection ), + rec( 26, 2, BadLogicalConnectionCount ), + rec( 28, 2, PacketsReceivedDuringProcessing ), + rec( 30, 2, RequestsReprocessed ), + rec( 32, 2, PacketsWithBadSequenceNumber ), + rec( 34, 2, DuplicateRepliesSent ), + rec( 36, 2, PositiveAcknowledgesSent ), + rec( 38, 2, PacketsWithBadRequestType ), + rec( 40, 2, AttachDuringProcessing ), + rec( 42, 2, AttachWhileProcessingAttach ), + rec( 44, 2, ForgedDetachedRequests ), + rec( 46, 2, DetachForBadConnectionNumber ), + rec( 48, 2, DetachDuringProcessing ), + rec( 50, 2, RepliesCancelled ), + rec( 52, 2, PacketsDiscardedByHopCount ), + rec( 54, 2, PacketsDiscardedUnknownNet ), + rec( 56, 2, IncomingPacketDiscardedNoDGroup ), + rec( 58, 2, OutgoingPacketDiscardedNoTurboBuffer ), + rec( 60, 2, IPXNotMyNetwork ), + rec( 62, 4, NetBIOSBroadcastWasPropagated ), + rec( 66, 4, TotalOtherPackets ), + rec( 70, 4, TotalRoutedPackets ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17E8, 23/232 + pkt = NCP(0x17E8, "Get File Server Misc Information", 'fileserver') + pkt.Request(10) + pkt.Reply(40, [ + rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ), + rec( 12, 1, ProcessorType ), + rec( 13, 1, Reserved ), + rec( 14, 1, NumberOfServiceProcesses ), + rec( 15, 1, ServerUtilizationPercentage ), + rec( 16, 2, ConfiguredMaxBinderyObjects ), + rec( 18, 2, ActualMaxBinderyObjects ), + rec( 20, 2, CurrentUsedBinderyObjects ), + rec( 22, 2, TotalServerMemory ), + rec( 24, 2, WastedServerMemory ), + rec( 26, 2, NumberOfDynamicMemoryAreas, var="x" ), + rec( 28, 12, DynMemStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17E9, 23/233 + pkt = NCP(0x17E9, "Get Volume Information", 'fileserver') + pkt.Request(11, [ + rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Information on Volume %d", ", %d") ), + ]) + pkt.Reply(48, [ + rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ), + rec( 12, 1, VolumeNumber ), + rec( 13, 1, LogicalDriveNumber ), + rec( 14, 2, BlockSize ), + rec( 16, 2, StartingBlock ), + rec( 18, 2, TotalBlocks ), + rec( 20, 2, FreeBlocks ), + rec( 22, 2, TotalDirectoryEntries ), + rec( 24, 2, FreeDirectoryEntries ), + rec( 26, 2, ActualMaxUsedDirectoryEntries ), + rec( 28, 1, VolumeHashedFlag ), + rec( 29, 1, VolumeCachedFlag ), + rec( 30, 1, VolumeRemovableFlag ), + rec( 31, 1, VolumeMountedFlag ), + rec( 32, 16, VolumeName ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17EA, 23/234 + pkt = NCP(0x17EA, "Get Connection's Task Information", 'fileserver') + pkt.Request(12, [ + rec( 10, 2, ConnectionNumber ), + ]) + pkt.Reply(13, [ + rec( 8, 1, ConnLockStatus ), + rec( 9, 1, NumberOfActiveTasks, var="x" ), + rec( 10, 3, TaskStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17EB, 23/235 + pkt = NCP(0x17EB, "Get Connection's Open Files", 'fileserver') + pkt.Request(14, [ + rec( 10, 2, ConnectionNumber ), + rec( 12, 2, LastRecordSeen ), + ]) + pkt.Reply((29,283), [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 2, NumberOfRecords, var="x" ), + rec( 12, (17, 271), OpnFilesStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00]) + # 2222/17EC, 23/236 + pkt = NCP(0x17EC, "Get Connection Using A File", 'fileserver') + pkt.Request(18, [ + rec( 10, 1, DataStreamNumber ), + rec( 11, 1, VolumeNumber ), + rec( 12, 4, DirectoryBase, ENC_LITTLE_ENDIAN ), + rec( 16, 2, LastRecordSeen ), + ]) + pkt.Reply(33, [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 2, FileUseCount ), + rec( 12, 2, OpenCount ), + rec( 14, 2, OpenForReadCount ), + rec( 16, 2, OpenForWriteCount ), + rec( 18, 2, DenyReadCount ), + rec( 20, 2, DenyWriteCount ), + rec( 22, 1, Locked ), + rec( 23, 1, ForkCount ), + rec( 24, 2, NumberOfRecords, var="x" ), + rec( 26, 7, ConnFileStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00]) + # 2222/17ED, 23/237 + pkt = NCP(0x17ED, "Get Physical Record Locks By Connection And File", 'fileserver') + pkt.Request(20, [ + rec( 10, 2, TargetConnectionNumber ), + rec( 12, 1, DataStreamNumber ), + rec( 13, 1, VolumeNumber ), + rec( 14, 4, DirectoryBase, ENC_LITTLE_ENDIAN ), + rec( 18, 2, LastRecordSeen ), + ]) + pkt.Reply(23, [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 2, NumberOfLocks, ENC_LITTLE_ENDIAN, var="x" ), + rec( 12, 11, LockStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17EE, 23/238 + pkt = NCP(0x17EE, "Get Physical Record Locks By File", 'fileserver') + pkt.Request(18, [ + rec( 10, 1, DataStreamNumber ), + rec( 11, 1, VolumeNumber ), + rec( 12, 4, DirectoryBase ), + rec( 16, 2, LastRecordSeen ), + ]) + pkt.Reply(30, [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 2, NumberOfLocks, ENC_LITTLE_ENDIAN, var="x" ), + rec( 12, 18, PhyLockStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17EF, 23/239 + pkt = NCP(0x17EF, "Get Logical Records By Connection", 'fileserver') + pkt.Request(14, [ + rec( 10, 2, TargetConnectionNumber ), + rec( 12, 2, LastRecordSeen ), + ]) + pkt.Reply((16,270), [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 2, NumberOfRecords, var="x" ), + rec( 12, (4, 258), LogLockStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17F0, 23/240 + pkt = NCP(0x17F0, "Get Logical Record Information (old)", 'fileserver') + pkt.Request((13,267), [ + rec( 10, 2, LastRecordSeen ), + rec( 12, (1,255), LogicalRecordName ), + ]) + pkt.Reply(22, [ + rec( 8, 2, ShareableLockCount ), + rec( 10, 2, UseCount ), + rec( 12, 1, Locked ), + rec( 13, 2, NextRequestRecord ), + rec( 15, 2, NumberOfRecords, var="x" ), + rec( 17, 5, LogRecStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17F1, 23/241 + pkt = NCP(0x17F1, "Get Connection's Semaphores", 'fileserver') + pkt.Request(14, [ + rec( 10, 2, ConnectionNumber ), + rec( 12, 2, LastRecordSeen ), + ]) + pkt.Reply((19,273), [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 2, NumberOfSemaphores, var="x" ), + rec( 12, (7, 261), SemaStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17F2, 23/242 + pkt = NCP(0x17F2, "Get Semaphore Information", 'fileserver') + pkt.Request((13,267), [ + rec( 10, 2, LastRecordSeen ), + rec( 12, (1,255), SemaphoreName, info_str=(SemaphoreName, "Get Semaphore Information: %s", ", %s") ), + ]) + pkt.Reply(20, [ + rec( 8, 2, NextRequestRecord ), + rec( 10, 2, OpenCount ), + rec( 12, 2, SemaphoreValue ), + rec( 14, 2, NumberOfRecords, var="x" ), + rec( 16, 4, SemaInfoStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17F3, 23/243 + pkt = NCP(0x17F3, "Map Directory Number to Path", 'file') + pkt.Request(16, [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, DirectoryNumber ), + rec( 15, 1, NameSpace ), + ]) + pkt.Reply((9,263), [ + rec( 8, (1,255), Path ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9c00, 0xc601, 0xfd00, 0xff00]) + # 2222/17F4, 23/244 + pkt = NCP(0x17F4, "Convert Path to Dir Entry", 'file') + pkt.Request((12,266), [ + rec( 10, 1, DirHandle ), + rec( 11, (1,255), Path, info_str=(Path, "Convert Path to Directory Entry: %s", ", %s") ), + ]) + pkt.Reply(13, [ + rec( 8, 1, VolumeNumber ), + rec( 9, 4, DirectoryNumber ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00]) + # 2222/17FD, 23/253 + pkt = NCP(0x17FD, "Send Console Broadcast", 'fileserver') + pkt.Request((16, 270), [ + rec( 10, 1, NumberOfStations, var="x" ), + rec( 11, 4, StationList, repeat="x" ), + rec( 15, (1, 255), TargetMessage, info_str=(TargetMessage, "Send Console Broadcast: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc601, 0xfd00]) + # 2222/17FE, 23/254 + pkt = NCP(0x17FE, "Clear Connection Number", 'fileserver') + pkt.Request(14, [ + rec( 10, 4, ConnectionNumber ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xc601, 0xfd00]) + # 2222/18, 24 + pkt = NCP(0x18, "End of Job", 'connection') + pkt.Request(7) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/19, 25 + pkt = NCP(0x19, "Logout", 'connection') + pkt.Request(7) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/1A, 26 + pkt = NCP(0x1A, "Log Physical Record", 'sync') + pkt.Request(24, [ + rec( 7, 1, LockFlag ), + rec( 8, 6, FileHandle ), + rec( 14, 4, LockAreasStartOffset, ENC_BIG_ENDIAN ), + rec( 18, 4, LockAreaLen, ENC_BIG_ENDIAN, info_str=(LockAreaLen, "Lock Record - Length of %d", "%d") ), + rec( 22, 2, LockTimeout ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff01]) + # 2222/1B, 27 + pkt = NCP(0x1B, "Lock Physical Record Set", 'sync') + pkt.Request(10, [ + rec( 7, 1, LockFlag ), + rec( 8, 2, LockTimeout ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff01]) + # 2222/1C, 28 + pkt = NCP(0x1C, "Release Physical Record", 'sync') + pkt.Request(22, [ + rec( 7, 1, Reserved ), + rec( 8, 6, FileHandle ), + rec( 14, 4, LockAreasStartOffset ), + rec( 18, 4, LockAreaLen, info_str=(LockAreaLen, "Release Lock Record - Length of %d", "%d") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff03]) + # 2222/1D, 29 + pkt = NCP(0x1D, "Release Physical Record Set", 'sync') + pkt.Request(8, [ + rec( 7, 1, LockFlag ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff03]) + # 2222/1E, 30 #Tested and fixed 6-14-02 GM + pkt = NCP(0x1E, "Clear Physical Record", 'sync') + pkt.Request(22, [ + rec( 7, 1, Reserved ), + rec( 8, 6, FileHandle ), + rec( 14, 4, LockAreasStartOffset, ENC_BIG_ENDIAN ), + rec( 18, 4, LockAreaLen, ENC_BIG_ENDIAN, info_str=(LockAreaLen, "Clear Lock Record - Length of %d", "%d") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff03]) + # 2222/1F, 31 + pkt = NCP(0x1F, "Clear Physical Record Set", 'sync') + pkt.Request(8, [ + rec( 7, 1, LockFlag ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff03]) + # 2222/2000, 32/00 + pkt = NCP(0x2000, "Open Semaphore", 'sync', has_length=0) + pkt.Request((10,264), [ + rec( 8, 1, InitialSemaphoreValue ), + rec( 9, (1,255), SemaphoreName, info_str=(SemaphoreName, "Open Semaphore: %s", ", %s") ), + ]) + pkt.Reply(13, [ + rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ), + rec( 12, 1, SemaphoreOpenCount ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xff01]) + # 2222/2001, 32/01 + pkt = NCP(0x2001, "Examine Semaphore", 'sync', has_length=0) + pkt.Request(12, [ + rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(10, [ + rec( 8, 1, SemaphoreValue ), + rec( 9, 1, SemaphoreOpenCount ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xff01]) + # 2222/2002, 32/02 + pkt = NCP(0x2002, "Wait On Semaphore", 'sync', has_length=0) + pkt.Request(14, [ + rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ), + rec( 12, 2, SemaphoreTimeOut, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xff01]) + # 2222/2003, 32/03 + pkt = NCP(0x2003, "Signal Semaphore", 'sync', has_length=0) + pkt.Request(12, [ + rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xff01]) + # 2222/2004, 32/04 + pkt = NCP(0x2004, "Close Semaphore", 'sync', has_length=0) + pkt.Request(12, [ + rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xff01]) + # 2222/21, 33 + pkt = NCP(0x21, "Negotiate Buffer Size", 'connection') + pkt.Request(9, [ + rec( 7, 2, BufferSize, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(10, [ + rec( 8, 2, BufferSize, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000]) + # 2222/2200, 34/00 + pkt = NCP(0x2200, "TTS Is Available", 'tts', has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0001, 0xfd03, 0xff12]) + # 2222/2201, 34/01 + pkt = NCP(0x2201, "TTS Begin Transaction", 'tts', has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/2202, 34/02 + pkt = NCP(0x2202, "TTS End Transaction", 'tts', has_length=0) + pkt.Request(8) + pkt.Reply(12, [ + rec( 8, 4, TransactionNumber, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0xff01]) + # 2222/2203, 34/03 + pkt = NCP(0x2203, "TTS Abort Transaction", 'tts', has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xfd03, 0xfe0b, 0xff01]) + # 2222/2204, 34/04 + pkt = NCP(0x2204, "TTS Transaction Status", 'tts', has_length=0) + pkt.Request(12, [ + rec( 8, 4, TransactionNumber, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/2205, 34/05 + pkt = NCP(0x2205, "TTS Get Application Thresholds", 'tts', has_length=0) + pkt.Request(8) + pkt.Reply(10, [ + rec( 8, 1, LogicalLockThreshold ), + rec( 9, 1, PhysicalLockThreshold ), + ]) + pkt.CompletionCodes([0x0000]) + # 2222/2206, 34/06 + pkt = NCP(0x2206, "TTS Set Application Thresholds", 'tts', has_length=0) + pkt.Request(10, [ + rec( 8, 1, LogicalLockThreshold ), + rec( 9, 1, PhysicalLockThreshold ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600]) + # 2222/2207, 34/07 + pkt = NCP(0x2207, "TTS Get Workstation Thresholds", 'tts', has_length=0) + pkt.Request(8) + pkt.Reply(10, [ + rec( 8, 1, LogicalLockThreshold ), + rec( 9, 1, PhysicalLockThreshold ), + ]) + pkt.CompletionCodes([0x0000]) + # 2222/2208, 34/08 + pkt = NCP(0x2208, "TTS Set Workstation Thresholds", 'tts', has_length=0) + pkt.Request(10, [ + rec( 8, 1, LogicalLockThreshold ), + rec( 9, 1, PhysicalLockThreshold ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/2209, 34/09 + pkt = NCP(0x2209, "TTS Get Transaction Bits", 'tts', has_length=0) + pkt.Request(8) + pkt.Reply(9, [ + rec( 8, 1, ControlFlags ), + ]) + pkt.CompletionCodes([0x0000]) + # 2222/220A, 34/10 + pkt = NCP(0x220A, "TTS Set Transaction Bits", 'tts', has_length=0) + pkt.Request(9, [ + rec( 8, 1, ControlFlags ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/2301, 35/01 + pkt = NCP(0x2301, "AFP Create Directory", 'afp') + pkt.Request((49, 303), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, BaseDirectoryID ), + rec( 15, 1, Reserved ), + rec( 16, 4, CreatorID ), + rec( 20, 4, Reserved4 ), + rec( 24, 2, FinderAttr ), + rec( 26, 2, HorizLocation ), + rec( 28, 2, VertLocation ), + rec( 30, 2, FileDirWindow ), + rec( 32, 16, Reserved16 ), + rec( 48, (1,255), Path, info_str=(Path, "AFP Create Directory: %s", ", %s") ), + ]) + pkt.Reply(12, [ + rec( 8, 4, NewDirectoryID ), + ]) + pkt.CompletionCodes([0x0000, 0x8301, 0x8400, 0x8800, 0x9300, 0x9600, 0x9804, + 0x9900, 0x9c03, 0x9e02, 0xa100, 0xa201, 0xfd00, 0xff18]) + # 2222/2302, 35/02 + pkt = NCP(0x2302, "AFP Create File", 'afp') + pkt.Request((49, 303), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, BaseDirectoryID ), + rec( 15, 1, DeleteExistingFileFlag ), + rec( 16, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 20, 4, Reserved4 ), + rec( 24, 2, FinderAttr ), + rec( 26, 2, HorizLocation, ENC_BIG_ENDIAN ), + rec( 28, 2, VertLocation, ENC_BIG_ENDIAN ), + rec( 30, 2, FileDirWindow, ENC_BIG_ENDIAN ), + rec( 32, 16, Reserved16 ), + rec( 48, (1,255), Path, info_str=(Path, "AFP Create File: %s", ", %s") ), + ]) + pkt.Reply(12, [ + rec( 8, 4, NewDirectoryID ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8301, 0x8400, 0x8701, 0x8800, + 0x8a00, 0x8d00, 0x8e00, 0x8f00, 0x9300, 0x9600, 0x9804, + 0x9900, 0x9b03, 0x9c03, 0x9e02, 0xa100, 0xa201, 0xfd00, + 0xff18]) + # 2222/2303, 35/03 + pkt = NCP(0x2303, "AFP Delete", 'afp') + pkt.Request((16,270), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, BaseDirectoryID ), + rec( 15, (1,255), Path, info_str=(Path, "AFP Delete: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x8a00, 0x8d00, 0x8e00, 0x8f00, + 0x9000, 0x9300, 0x9600, 0x9804, 0x9b03, 0x9c03, 0x9e02, + 0xa000, 0xa100, 0xa201, 0xfd00, 0xff19]) + # 2222/2304, 35/04 + pkt = NCP(0x2304, "AFP Get Entry ID From Name", 'afp') + pkt.Request((16,270), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, BaseDirectoryID ), + rec( 15, (1,255), Path, info_str=(Path, "AFP Get Entry from Name: %s", ", %s") ), + ]) + pkt.Reply(12, [ + rec( 8, 4, TargetEntryID, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804, 0x9c03, + 0xa100, 0xa201, 0xfd00, 0xff19]) + # 2222/2305, 35/05 + pkt = NCP(0x2305, "AFP Get File Information", 'afp') + pkt.Request((18,272), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, BaseDirectoryID ), + rec( 15, 2, RequestBitMap, ENC_BIG_ENDIAN ), + rec( 17, (1,255), Path, info_str=(Path, "AFP Get File Information: %s", ", %s") ), + ]) + pkt.Reply(121, [ + rec( 8, 4, AFPEntryID, ENC_BIG_ENDIAN ), + rec( 12, 4, ParentID, ENC_BIG_ENDIAN ), + rec( 16, 2, AttributesDef16, ENC_LITTLE_ENDIAN ), + rec( 18, 4, DataForkLen, ENC_BIG_ENDIAN ), + rec( 22, 4, ResourceForkLen, ENC_BIG_ENDIAN ), + rec( 26, 2, TotalOffspring, ENC_BIG_ENDIAN ), + rec( 28, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 30, 2, LastAccessedDate, ENC_BIG_ENDIAN ), + rec( 32, 2, ModifiedDate, ENC_BIG_ENDIAN ), + rec( 34, 2, ModifiedTime, ENC_BIG_ENDIAN ), + rec( 36, 2, ArchivedDate, ENC_BIG_ENDIAN ), + rec( 38, 2, ArchivedTime, ENC_BIG_ENDIAN ), + rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 44, 4, Reserved4 ), + rec( 48, 2, FinderAttr ), + rec( 50, 2, HorizLocation ), + rec( 52, 2, VertLocation ), + rec( 54, 2, FileDirWindow ), + rec( 56, 16, Reserved16 ), + rec( 72, 32, LongName ), + rec( 104, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 108, 12, ShortName ), + rec( 120, 1, AccessPrivileges ), + ]) + pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804, 0x9c03, + 0xa100, 0xa201, 0xfd00, 0xff19]) + # 2222/2306, 35/06 + pkt = NCP(0x2306, "AFP Get Entry ID From NetWare Handle", 'afp') + pkt.Request(16, [ + rec( 10, 6, FileHandle ), + ]) + pkt.Reply(14, [ + rec( 8, 1, VolumeID ), + rec( 9, 4, TargetEntryID, ENC_BIG_ENDIAN ), + rec( 13, 1, ForkIndicator ), + ]) + pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0xa201]) + # 2222/2307, 35/07 + pkt = NCP(0x2307, "AFP Rename", 'afp') + pkt.Request((21, 529), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, MacSourceBaseID, ENC_BIG_ENDIAN ), + rec( 15, 4, MacDestinationBaseID, ENC_BIG_ENDIAN ), + rec( 19, (1,255), Path, info_str=(Path, "AFP Rename: %s", ", %s") ), + rec( -1, (1,255), NewFileNameLen ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8301, 0x8401, 0x8800, 0x8b00, 0x8e00, + 0x9001, 0x9201, 0x9300, 0x9600, 0x9804, 0x9900, + 0x9c03, 0x9e00, 0xa100, 0xa201, 0xfd00, 0xff0a]) + # 2222/2308, 35/08 + pkt = NCP(0x2308, "AFP Open File Fork", 'afp') + pkt.Request((18, 272), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, MacBaseDirectoryID ), + rec( 15, 1, ForkIndicator ), + rec( 16, 1, AccessMode ), + rec( 17, (1,255), Path, info_str=(Path, "AFP Open File Fork: %s", ", %s") ), + ]) + pkt.Reply(22, [ + rec( 8, 4, AFPEntryID, ENC_BIG_ENDIAN ), + rec( 12, 4, DataForkLen, ENC_BIG_ENDIAN ), + rec( 16, 6, NetWareAccessHandle ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8301, 0x8800, 0x9300, + 0x9400, 0x9600, 0x9804, 0x9900, 0x9c03, 0xa100, + 0xa201, 0xfd00, 0xff16]) + # 2222/2309, 35/09 + pkt = NCP(0x2309, "AFP Set File Information", 'afp') + pkt.Request((64, 318), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, MacBaseDirectoryID ), + rec( 15, 2, RequestBitMap, ENC_BIG_ENDIAN ), + rec( 17, 2, MacAttr, ENC_BIG_ENDIAN ), + rec( 19, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 21, 2, LastAccessedDate, ENC_BIG_ENDIAN ), + rec( 23, 2, ModifiedDate, ENC_BIG_ENDIAN ), + rec( 25, 2, ModifiedTime, ENC_BIG_ENDIAN ), + rec( 27, 2, ArchivedDate, ENC_BIG_ENDIAN ), + rec( 29, 2, ArchivedTime, ENC_BIG_ENDIAN ), + rec( 31, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 35, 4, Reserved4 ), + rec( 39, 2, FinderAttr ), + rec( 41, 2, HorizLocation ), + rec( 43, 2, VertLocation ), + rec( 45, 2, FileDirWindow ), + rec( 47, 16, Reserved16 ), + rec( 63, (1,255), Path, info_str=(Path, "AFP Set File Information: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0104, 0x8301, 0x8800, 0x9300, 0x9400, + 0x9500, 0x9600, 0x9804, 0x9c03, 0xa100, 0xa201, + 0xfd00, 0xff16]) + # 2222/230A, 35/10 + pkt = NCP(0x230A, "AFP Scan File Information", 'afp') + pkt.Request((26, 280), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, MacBaseDirectoryID ), + rec( 15, 4, MacLastSeenID, ENC_BIG_ENDIAN ), + rec( 19, 2, DesiredResponseCount, ENC_BIG_ENDIAN ), + rec( 21, 2, SearchBitMap, ENC_BIG_ENDIAN ), + rec( 23, 2, RequestBitMap, ENC_BIG_ENDIAN ), + rec( 25, (1,255), Path, info_str=(Path, "AFP Scan File Information: %s", ", %s") ), + ]) + pkt.Reply(123, [ + rec( 8, 2, ActualResponseCount, ENC_BIG_ENDIAN, var="x" ), + rec( 10, 113, AFP10Struct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804, + 0x9c03, 0xa100, 0xa201, 0xfd00, 0xff16]) + # 2222/230B, 35/11 + pkt = NCP(0x230B, "AFP Alloc Temporary Directory Handle", 'afp') + pkt.Request((16,270), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, MacBaseDirectoryID ), + rec( 15, (1,255), Path, info_str=(Path, "AFP Allocate Temporary Directory Handle: %s", ", %s") ), + ]) + pkt.Reply(10, [ + rec( 8, 1, DirHandle ), + rec( 9, 1, AccessRightsMask ), + ]) + pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0x9d00, 0xa100, + 0xa201, 0xfd00, 0xff00]) + # 2222/230C, 35/12 + pkt = NCP(0x230C, "AFP Get Entry ID From Path Name", 'afp') + pkt.Request((12,266), [ + rec( 10, 1, DirHandle ), + rec( 11, (1,255), Path, info_str=(Path, "AFP Get Entry ID from Path Name: %s", ", %s") ), + ]) + pkt.Reply(12, [ + rec( 8, 4, AFPEntryID, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa100, 0xa201, + 0xfd00, 0xff00]) + # 2222/230D, 35/13 + pkt = NCP(0x230D, "AFP 2.0 Create Directory", 'afp') + pkt.Request((55,309), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, BaseDirectoryID ), + rec( 15, 1, Reserved ), + rec( 16, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 20, 4, Reserved4 ), + rec( 24, 2, FinderAttr ), + rec( 26, 2, HorizLocation ), + rec( 28, 2, VertLocation ), + rec( 30, 2, FileDirWindow ), + rec( 32, 16, Reserved16 ), + rec( 48, 6, ProDOSInfo ), + rec( 54, (1,255), Path, info_str=(Path, "AFP 2.0 Create Directory: %s", ", %s") ), + ]) + pkt.Reply(12, [ + rec( 8, 4, NewDirectoryID ), + ]) + pkt.CompletionCodes([0x0000, 0x8301, 0x8400, 0x8800, 0x9300, + 0x9600, 0x9804, 0x9900, 0x9c03, 0x9e00, + 0xa100, 0xa201, 0xfd00, 0xff00]) + # 2222/230E, 35/14 + pkt = NCP(0x230E, "AFP 2.0 Create File", 'afp') + pkt.Request((55,309), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, BaseDirectoryID ), + rec( 15, 1, DeleteExistingFileFlag ), + rec( 16, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 20, 4, Reserved4 ), + rec( 24, 2, FinderAttr ), + rec( 26, 2, HorizLocation ), + rec( 28, 2, VertLocation ), + rec( 30, 2, FileDirWindow ), + rec( 32, 16, Reserved16 ), + rec( 48, 6, ProDOSInfo ), + rec( 54, (1,255), Path, info_str=(Path, "AFP 2.0 Create File: %s", ", %s") ), + ]) + pkt.Reply(12, [ + rec( 8, 4, NewDirectoryID ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8301, 0x8400, + 0x8701, 0x8800, 0x8a00, 0x8d00, 0x8e00, + 0x8f00, 0x9001, 0x9300, 0x9600, 0x9804, + 0x9900, 0x9b03, 0x9c03, 0x9e00, 0xa100, + 0xa201, 0xfd00, 0xff00]) + # 2222/230F, 35/15 + pkt = NCP(0x230F, "AFP 2.0 Get File Or Directory Information", 'afp') + pkt.Request((18,272), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, BaseDirectoryID ), + rec( 15, 2, RequestBitMap, ENC_BIG_ENDIAN ), + rec( 17, (1,255), Path, info_str=(Path, "AFP 2.0 Get Information: %s", ", %s") ), + ]) + pkt.Reply(128, [ + rec( 8, 4, AFPEntryID, ENC_BIG_ENDIAN ), + rec( 12, 4, ParentID, ENC_BIG_ENDIAN ), + rec( 16, 2, AttributesDef16 ), + rec( 18, 4, DataForkLen, ENC_BIG_ENDIAN ), + rec( 22, 4, ResourceForkLen, ENC_BIG_ENDIAN ), + rec( 26, 2, TotalOffspring, ENC_BIG_ENDIAN ), + rec( 28, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 30, 2, LastAccessedDate, ENC_BIG_ENDIAN ), + rec( 32, 2, ModifiedDate, ENC_BIG_ENDIAN ), + rec( 34, 2, ModifiedTime, ENC_BIG_ENDIAN ), + rec( 36, 2, ArchivedDate, ENC_BIG_ENDIAN ), + rec( 38, 2, ArchivedTime, ENC_BIG_ENDIAN ), + rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 44, 4, Reserved4 ), + rec( 48, 2, FinderAttr ), + rec( 50, 2, HorizLocation ), + rec( 52, 2, VertLocation ), + rec( 54, 2, FileDirWindow ), + rec( 56, 16, Reserved16 ), + rec( 72, 32, LongName ), + rec( 104, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 108, 12, ShortName ), + rec( 120, 1, AccessPrivileges ), + rec( 121, 1, Reserved ), + rec( 122, 6, ProDOSInfo ), + ]) + pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804, 0x9c03, + 0xa100, 0xa201, 0xfd00, 0xff19]) + # 2222/2310, 35/16 + pkt = NCP(0x2310, "AFP 2.0 Set File Information", 'afp') + pkt.Request((70, 324), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, MacBaseDirectoryID ), + rec( 15, 2, RequestBitMap, ENC_BIG_ENDIAN ), + rec( 17, 2, AttributesDef16 ), + rec( 19, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 21, 2, LastAccessedDate, ENC_BIG_ENDIAN ), + rec( 23, 2, ModifiedDate, ENC_BIG_ENDIAN ), + rec( 25, 2, ModifiedTime, ENC_BIG_ENDIAN ), + rec( 27, 2, ArchivedDate, ENC_BIG_ENDIAN ), + rec( 29, 2, ArchivedTime, ENC_BIG_ENDIAN ), + rec( 31, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 35, 4, Reserved4 ), + rec( 39, 2, FinderAttr ), + rec( 41, 2, HorizLocation ), + rec( 43, 2, VertLocation ), + rec( 45, 2, FileDirWindow ), + rec( 47, 16, Reserved16 ), + rec( 63, 6, ProDOSInfo ), + rec( 69, (1,255), Path, info_str=(Path, "AFP 2.0 Set File Information: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0104, 0x8301, 0x8800, 0x9300, 0x9400, + 0x9500, 0x9600, 0x9804, 0x9c03, 0xa100, 0xa201, + 0xfd00, 0xff16]) + # 2222/2311, 35/17 + pkt = NCP(0x2311, "AFP 2.0 Scan File Information", 'afp') + pkt.Request((26, 280), [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, MacBaseDirectoryID ), + rec( 15, 4, MacLastSeenID, ENC_BIG_ENDIAN ), + rec( 19, 2, DesiredResponseCount, ENC_BIG_ENDIAN ), + rec( 21, 2, SearchBitMap, ENC_BIG_ENDIAN ), + rec( 23, 2, RequestBitMap, ENC_BIG_ENDIAN ), + rec( 25, (1,255), Path, info_str=(Path, "AFP 2.0 Scan File Information: %s", ", %s") ), + ]) + pkt.Reply(14, [ + rec( 8, 2, ActualResponseCount, var="x" ), + rec( 10, 4, AFP20Struct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804, + 0x9c03, 0xa100, 0xa201, 0xfd00, 0xff16]) + # 2222/2312, 35/18 + pkt = NCP(0x2312, "AFP Get DOS Name From Entry ID", 'afp') + pkt.Request(15, [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, AFPEntryID, ENC_BIG_ENDIAN ), + ]) + pkt.Reply((9,263), [ + rec( 8, (1,255), Path ), + ]) + pkt.CompletionCodes([0x0000, 0x8900, 0x9600, 0xbf00]) + # 2222/2313, 35/19 + pkt = NCP(0x2313, "AFP Get Macintosh Info On Deleted File", 'afp') + pkt.Request(15, [ + rec( 10, 1, VolumeNumber ), + rec( 11, 4, DirectoryNumber, ENC_BIG_ENDIAN ), + ]) + pkt.Reply((51,305), [ + rec( 8, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 12, 4, Reserved4 ), + rec( 16, 2, FinderAttr ), + rec( 18, 2, HorizLocation ), + rec( 20, 2, VertLocation ), + rec( 22, 2, FileDirWindow ), + rec( 24, 16, Reserved16 ), + rec( 40, 6, ProDOSInfo ), + rec( 46, 4, ResourceForkSize, ENC_BIG_ENDIAN ), + rec( 50, (1,255), FileName ), + ]) + pkt.CompletionCodes([0x0000, 0x9c03, 0xbf00]) + # 2222/2400, 36/00 + pkt = NCP(0x2400, "Get NCP Extension Information", 'extension') + pkt.Request(14, [ + rec( 10, 4, NCPextensionNumber, ENC_LITTLE_ENDIAN ), + ]) + pkt.Reply((16,270), [ + rec( 8, 4, NCPextensionNumber ), + rec( 12, 1, NCPextensionMajorVersion ), + rec( 13, 1, NCPextensionMinorVersion ), + rec( 14, 1, NCPextensionRevisionNumber ), + rec( 15, (1, 255), NCPextensionName ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20]) + # 2222/2401, 36/01 + pkt = NCP(0x2401, "Get NCP Extension Maximum Data Size", 'extension') + pkt.Request(10) + pkt.Reply(10, [ + rec( 8, 2, NCPdataSize ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20]) + # 2222/2402, 36/02 + pkt = NCP(0x2402, "Get NCP Extension Information by Name", 'extension') + pkt.Request((11, 265), [ + rec( 10, (1,255), NCPextensionName, info_str=(NCPextensionName, "Get NCP Extension Information by Name: %s", ", %s") ), + ]) + pkt.Reply((16,270), [ + rec( 8, 4, NCPextensionNumber ), + rec( 12, 1, NCPextensionMajorVersion ), + rec( 13, 1, NCPextensionMinorVersion ), + rec( 14, 1, NCPextensionRevisionNumber ), + rec( 15, (1, 255), NCPextensionName ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20]) + # 2222/2403, 36/03 + pkt = NCP(0x2403, "Get Number of Registered NCP Extensions", 'extension') + pkt.Request(10) + pkt.Reply(12, [ + rec( 8, 4, NumberOfNCPExtensions ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20]) + # 2222/2404, 36/04 + pkt = NCP(0x2404, "Get NCP Extension Registered Verbs List", 'extension') + pkt.Request(14, [ + rec( 10, 4, StartingNumber ), + ]) + pkt.Reply(20, [ + rec( 8, 4, ReturnedListCount, var="x" ), + rec( 12, 4, nextStartingNumber ), + rec( 16, 4, NCPExtensionNumbers, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20]) + # 2222/2405, 36/05 + pkt = NCP(0x2405, "Return NCP Extension Information", 'extension') + pkt.Request(14, [ + rec( 10, 4, NCPextensionNumber ), + ]) + pkt.Reply((16,270), [ + rec( 8, 4, NCPextensionNumber ), + rec( 12, 1, NCPextensionMajorVersion ), + rec( 13, 1, NCPextensionMinorVersion ), + rec( 14, 1, NCPextensionRevisionNumber ), + rec( 15, (1, 255), NCPextensionName ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20]) + # 2222/2406, 36/06 + pkt = NCP(0x2406, "Return NCP Extension Maximum Data Size", 'extension') + pkt.Request(10) + pkt.Reply(12, [ + rec( 8, 4, NCPdataSize ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20]) + # 2222/25, 37 + pkt = NCP(0x25, "Execute NCP Extension", 'extension') + pkt.Request(11, [ + rec( 7, 4, NCPextensionNumber ), + # The following value is Unicode + #rec[ 13, (1,255), RequestData ], + ]) + pkt.Reply(8) + # The following value is Unicode + #[ 8, (1, 255), ReplyBuffer ], + pkt.CompletionCodes([0x0000, 0x7e01, 0xf000, 0x9c00, 0xd504, 0xee00, 0xfe00, 0xff20]) + # 2222/3B, 59 + pkt = NCP(0x3B, "Commit File", 'file', has_length=0 ) + pkt.Request(14, [ + rec( 7, 1, Reserved ), + rec( 8, 6, FileHandle, info_str=(FileHandle, "Commit File - 0x%s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8800, 0x9804, 0xff00]) + # 2222/3D, 61 + pkt = NCP(0x3D, "Commit File", 'file', has_length=0 ) + pkt.Request(14, [ + rec( 7, 1, Reserved ), + rec( 8, 6, FileHandle, info_str=(FileHandle, "Commit File - 0x%s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8800, 0x9804, 0xff00]) + # 2222/3E, 62 + pkt = NCP(0x3E, "File Search Initialize", 'file', has_length=0 ) + pkt.Request((9, 263), [ + rec( 7, 1, DirHandle ), + rec( 8, (1,255), Path, info_str=(Path, "Initialize File Search: %s", ", %s") ), + ]) + pkt.Reply(14, [ + rec( 8, 1, VolumeNumber ), + rec( 9, 2, DirectoryID ), + rec( 11, 2, SequenceNumber, ENC_BIG_ENDIAN ), + rec( 13, 1, AccessRightsMask ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, + 0xfd00, 0xff16]) + # 2222/3F, 63 + pkt = NCP(0x3F, "File Search Continue", 'file', has_length=0 ) + pkt.Request((14, 268), [ + rec( 7, 1, VolumeNumber ), + rec( 8, 2, DirectoryID ), + rec( 10, 2, SequenceNumber, ENC_BIG_ENDIAN ), + rec( 12, 1, SearchAttributes ), + rec( 13, (1,255), Path, info_str=(Path, "File Search Continue: %s", ", %s") ), + ]) + pkt.Reply( NO_LENGTH_CHECK, [ + # + # XXX - don't show this if we got back a non-zero + # completion code? For example, 255 means "No + # matching files or directories were found", so + # presumably it can't show you a matching file or + # directory instance - it appears to just leave crap + # there. + # + srec( DirectoryInstance, req_cond="ncp.sattr_sub==TRUE"), + srec( FileInstance, req_cond="ncp.sattr_sub!=TRUE"), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0xff16]) + # 2222/40, 64 + pkt = NCP(0x40, "Search for a File", 'file') + pkt.Request((12, 266), [ + rec( 7, 2, SequenceNumber, ENC_BIG_ENDIAN ), + rec( 9, 1, DirHandle ), + rec( 10, 1, SearchAttributes ), + rec( 11, (1,255), FileName, info_str=(FileName, "Search for File: %s", ", %s") ), + ]) + pkt.Reply(40, [ + rec( 8, 2, SequenceNumber, ENC_BIG_ENDIAN ), + rec( 10, 2, Reserved2 ), + rec( 12, 14, FileName14 ), + rec( 26, 1, AttributesDef ), + rec( 27, 1, FileExecuteType ), + rec( 28, 4, FileSize ), + rec( 32, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 34, 2, LastAccessedDate, ENC_BIG_ENDIAN ), + rec( 36, 2, ModifiedDate, ENC_BIG_ENDIAN ), + rec( 38, 2, ModifiedTime, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x8900, 0x9600, 0x9804, 0x9b03, + 0x9c03, 0xa100, 0xfd00, 0xff16]) + # 2222/41, 65 + pkt = NCP(0x41, "Open File", 'file') + pkt.Request((10, 264), [ + rec( 7, 1, DirHandle ), + rec( 8, 1, SearchAttributes ), + rec( 9, (1,255), FileName, info_str=(FileName, "Open File: %s", ", %s") ), + ]) + pkt.Reply(44, [ + rec( 8, 6, FileHandle ), + rec( 14, 2, Reserved2 ), + rec( 16, 14, FileName14 ), + rec( 30, 1, AttributesDef ), + rec( 31, 1, FileExecuteType ), + rec( 32, 4, FileSize, ENC_BIG_ENDIAN ), + rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ), + rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ), + rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8200, 0x9400, + 0x9600, 0x9804, 0x9c03, 0xa100, 0xfd00, + 0xff16]) + # 2222/42, 66 + pkt = NCP(0x42, "Close File", 'file') + pkt.Request(14, [ + rec( 7, 1, Reserved ), + rec( 8, 6, FileHandle, info_str=(FileHandle, "Close File - 0x%s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8800, 0xff1a]) + pkt.MakeExpert("ncp42_request") + # 2222/43, 67 + pkt = NCP(0x43, "Create File", 'file') + pkt.Request((10, 264), [ + rec( 7, 1, DirHandle ), + rec( 8, 1, AttributesDef ), + rec( 9, (1,255), FileName, info_str=(FileName, "Create File: %s", ", %s") ), + ]) + pkt.Reply(44, [ + rec( 8, 6, FileHandle ), + rec( 14, 2, Reserved2 ), + rec( 16, 14, FileName14 ), + rec( 30, 1, AttributesDef ), + rec( 31, 1, FileExecuteType ), + rec( 32, 4, FileSize, ENC_BIG_ENDIAN ), + rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ), + rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ), + rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9900, 0x9b03, 0x9c03, 0xfd00, + 0xff00]) + # 2222/44, 68 + pkt = NCP(0x44, "Erase File", 'file') + pkt.Request((10, 264), [ + rec( 7, 1, DirHandle ), + rec( 8, 1, SearchAttributes ), + rec( 9, (1,255), FileName, info_str=(FileName, "Erase File: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8a00, 0x8d00, 0x8e00, 0x8f00, + 0x9001, 0x9600, 0x9804, 0x9b03, 0x9c03, + 0xa100, 0xfd00, 0xff00]) + # 2222/45, 69 + pkt = NCP(0x45, "Rename File", 'file') + pkt.Request((12, 520), [ + rec( 7, 1, DirHandle ), + rec( 8, 1, SearchAttributes ), + rec( 9, (1,255), FileName, info_str=(FileName, "Rename File: %s", ", %s") ), + rec( -1, 1, TargetDirHandle ), + rec( -1, (1, 255), NewFileNameLen ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8701, 0x8b00, 0x8d00, 0x8e00, + 0x8f00, 0x9001, 0x9101, 0x9201, 0x9600, + 0x9804, 0x9a00, 0x9b03, 0x9c03, 0xa100, + 0xfd00, 0xff16]) + # 2222/46, 70 + pkt = NCP(0x46, "Set File Attributes", 'file') + pkt.Request((11, 265), [ + rec( 7, 1, AttributesDef ), + rec( 8, 1, DirHandle ), + rec( 9, 1, SearchAttributes ), + rec( 10, (1,255), FileName, info_str=(FileName, "Set File Attributes: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8c00, 0x8d00, 0x8e00, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfd00, + 0xff16]) + # 2222/47, 71 + pkt = NCP(0x47, "Get Current Size of File", 'file') + pkt.Request(14, [ + rec(7, 1, Reserved ), + rec( 8, 6, FileHandle, info_str=(FileHandle, "Get Current Size of File - 0x%s", ", %s") ), + ]) + pkt.Reply(12, [ + rec( 8, 4, FileSize, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x8800]) + # 2222/48, 72 + pkt = NCP(0x48, "Read From A File", 'file') + pkt.Request(20, [ + rec( 7, 1, Reserved ), + rec( 8, 6, FileHandle, info_str=(FileHandle, "Read From File - 0x%s", ", %s") ), + rec( 14, 4, FileOffset, ENC_BIG_ENDIAN ), + rec( 18, 2, MaxBytes, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(10, [ + rec( 8, 2, NumBytes, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x8300, 0x8800, 0x9300, 0xff1b]) + # 2222/49, 73 + pkt = NCP(0x49, "Write to a File", 'file') + pkt.Request(20, [ + rec( 7, 1, Reserved ), + rec( 8, 6, FileHandle, info_str=(FileHandle, "Write to a File - 0x%s", ", %s") ), + rec( 14, 4, FileOffset, ENC_BIG_ENDIAN ), + rec( 18, 2, MaxBytes, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0104, 0x8300, 0x8800, 0x9400, 0x9500, 0xa201, 0xff1b]) + # 2222/4A, 74 + pkt = NCP(0x4A, "Copy from One File to Another", 'file') + pkt.Request(30, [ + rec( 7, 1, Reserved ), + rec( 8, 6, FileHandle ), + rec( 14, 6, TargetFileHandle ), + rec( 20, 4, FileOffset, ENC_BIG_ENDIAN ), + rec( 24, 4, TargetFileOffset, ENC_BIG_ENDIAN ), + rec( 28, 2, BytesToCopy, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(12, [ + rec( 8, 4, BytesActuallyTransferred, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x0104, 0x8300, 0x8800, 0x9300, 0x9400, + 0x9500, 0x9600, 0xa201, 0xff1b]) + # 2222/4B, 75 + pkt = NCP(0x4B, "Set File Time Date Stamp", 'file') + pkt.Request(18, [ + rec( 7, 1, Reserved ), + rec( 8, 6, FileHandle, info_str=(FileHandle, "Set Time and Date Stamp for File - 0x%s", ", %s") ), + rec( 14, 2, FileTime, ENC_BIG_ENDIAN ), + rec( 16, 2, FileDate, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8800, 0x9400, 0x9600, 0xfb08]) + # 2222/4C, 76 + pkt = NCP(0x4C, "Open File", 'file') + pkt.Request((11, 265), [ + rec( 7, 1, DirHandle ), + rec( 8, 1, SearchAttributes ), + rec( 9, 1, AccessRightsMask ), + rec( 10, (1,255), FileName, info_str=(FileName, "Open File: %s", ", %s") ), + ]) + pkt.Reply(44, [ + rec( 8, 6, FileHandle ), + rec( 14, 2, Reserved2 ), + rec( 16, 14, FileName14 ), + rec( 30, 1, AttributesDef ), + rec( 31, 1, FileExecuteType ), + rec( 32, 4, FileSize, ENC_BIG_ENDIAN ), + rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ), + rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ), + rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8200, 0x9400, + 0x9600, 0x9804, 0x9c03, 0xa100, 0xfd00, + 0xff16]) + # 2222/4D, 77 + pkt = NCP(0x4D, "Create File", 'file') + pkt.Request((10, 264), [ + rec( 7, 1, DirHandle ), + rec( 8, 1, AttributesDef ), + rec( 9, (1,255), FileName, info_str=(FileName, "Create File: %s", ", %s") ), + ]) + pkt.Reply(44, [ + rec( 8, 6, FileHandle ), + rec( 14, 2, Reserved2 ), + rec( 16, 14, FileName14 ), + rec( 30, 1, AttributesDef ), + rec( 31, 1, FileExecuteType ), + rec( 32, 4, FileSize, ENC_BIG_ENDIAN ), + rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ), + rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ), + rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9900, 0x9b03, 0x9c03, 0xfd00, + 0xff00]) + # 2222/4F, 79 + pkt = NCP(0x4F, "Set File Extended Attributes", 'file') + pkt.Request((11, 265), [ + rec( 7, 1, AttributesDef ), + rec( 8, 1, DirHandle ), + rec( 9, 1, AccessRightsMask ), + rec( 10, (1,255), FileName, info_str=(FileName, "Set File Extended Attributes: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8c00, 0x8d00, 0x8e00, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfd00, + 0xff16]) + # 2222/54, 84 + pkt = NCP(0x54, "Open/Create File", 'file') + pkt.Request((12, 266), [ + rec( 7, 1, DirHandle ), + rec( 8, 1, AttributesDef ), + rec( 9, 1, AccessRightsMask ), + rec( 10, 1, ActionFlag ), + rec( 11, (1,255), FileName, info_str=(FileName, "Open/Create File: %s", ", %s") ), + ]) + pkt.Reply(44, [ + rec( 8, 6, FileHandle ), + rec( 14, 2, Reserved2 ), + rec( 16, 14, FileName14 ), + rec( 30, 1, AttributesDef ), + rec( 31, 1, FileExecuteType ), + rec( 32, 4, FileSize, ENC_BIG_ENDIAN ), + rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ), + rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ), + rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ), + rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16]) + # 2222/55, 85 + pkt = NCP(0x55, "Get Sparse File Data Block Bit Map", 'file', has_length=1) + pkt.Request(19, [ + rec( 7, 2, SubFuncStrucLen, ENC_BIG_ENDIAN ), + rec( 9, 6, FileHandle, info_str=(FileHandle, "Get Sparse File Data Block Bitmap for File - 0x%s", ", %s") ), + rec( 15, 4, FileOffset ), + ]) + pkt.Reply(528, [ + rec( 8, 4, AllocationBlockSize ), + rec( 12, 4, Reserved4 ), + rec( 16, 512, BitMap ), + ]) + pkt.CompletionCodes([0x0000, 0x8800]) + # 2222/5601, 86/01 + pkt = NCP(0x5601, "Close Extended Attribute Handle", 'extended', has_length=0 ) + pkt.Request(14, [ + rec( 8, 2, Reserved2 ), + rec( 10, 4, EAHandle ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xcf00, 0xd301]) + # 2222/5602, 86/02 + pkt = NCP(0x5602, "Write Extended Attribute", 'extended', has_length=0 ) + pkt.Request((35,97), [ + rec( 8, 2, EAFlags ), + rec( 10, 4, EAHandleOrNetWareHandleOrVolume, ENC_BIG_ENDIAN ), + rec( 14, 4, ReservedOrDirectoryNumber ), + rec( 18, 4, TtlWriteDataSize ), + rec( 22, 4, FileOffset ), + rec( 26, 4, EAAccessFlag ), + rec( 30, 2, EAValueLength, var='x' ), + rec( 32, (2,64), EAKey, info_str=(EAKey, "Write Extended Attribute: %s", ", %s") ), + rec( -1, 1, EAValueRep, repeat='x' ), + ]) + pkt.Reply(20, [ + rec( 8, 4, EAErrorCodes ), + rec( 12, 4, EABytesWritten ), + rec( 16, 4, NewEAHandle ), + ]) + pkt.CompletionCodes([0x0000, 0xc800, 0xc900, 0xcb00, 0xce00, 0xcf00, 0xd101, + 0xd203, 0xd301, 0xd402, 0xda02, 0xdc01, 0xef00, 0xff00]) + # 2222/5603, 86/03 + pkt = NCP(0x5603, "Read Extended Attribute", 'extended', has_length=0 ) + pkt.Request((28,538), [ + rec( 8, 2, EAFlags ), + rec( 10, 4, EAHandleOrNetWareHandleOrVolume ), + rec( 14, 4, ReservedOrDirectoryNumber ), + rec( 18, 4, FileOffset ), + rec( 22, 4, InspectSize ), + rec( 26, (2,512), EAKey, info_str=(EAKey, "Read Extended Attribute: %s", ", %s") ), + ]) + pkt.Reply((26,536), [ + rec( 8, 4, EAErrorCodes ), + rec( 12, 4, TtlValuesLength ), + rec( 16, 4, NewEAHandle ), + rec( 20, 4, EAAccessFlag ), + rec( 24, (2,512), EAValue ), + ]) + pkt.CompletionCodes([0x0000, 0x8800, 0x9c03, 0xc900, 0xce00, 0xcf00, 0xd101, + 0xd301, 0xd503]) + # 2222/5604, 86/04 + pkt = NCP(0x5604, "Enumerate Extended Attribute", 'extended', has_length=0 ) + pkt.Request((26,536), [ + rec( 8, 2, EAFlags ), + rec( 10, 4, EAHandleOrNetWareHandleOrVolume ), + rec( 14, 4, ReservedOrDirectoryNumber ), + rec( 18, 4, InspectSize ), + rec( 22, 2, SequenceNumber ), + rec( 24, (2,512), EAKey, info_str=(EAKey, "Enumerate Extended Attribute: %s", ", %s") ), + ]) + pkt.Reply(28, [ + rec( 8, 4, EAErrorCodes ), + rec( 12, 4, TtlEAs ), + rec( 16, 4, TtlEAsDataSize ), + rec( 20, 4, TtlEAsKeySize ), + rec( 24, 4, NewEAHandle ), + ]) + pkt.CompletionCodes([0x0000, 0x8800, 0x8c01, 0xc800, 0xc900, 0xce00, 0xcf00, 0xd101, + 0xd301, 0xd503, 0xfb08, 0xff00]) + # 2222/5605, 86/05 + pkt = NCP(0x5605, "Duplicate Extended Attributes", 'extended', has_length=0 ) + pkt.Request(28, [ + rec( 8, 2, EAFlags ), + rec( 10, 2, DstEAFlags ), + rec( 12, 4, EAHandleOrNetWareHandleOrVolume ), + rec( 16, 4, ReservedOrDirectoryNumber ), + rec( 20, 4, EAHandleOrNetWareHandleOrVolume ), + rec( 24, 4, ReservedOrDirectoryNumber ), + ]) + pkt.Reply(20, [ + rec( 8, 4, EADuplicateCount ), + rec( 12, 4, EADataSizeDuplicated ), + rec( 16, 4, EAKeySizeDuplicated ), + ]) + pkt.CompletionCodes([0x0000, 0x8800, 0xd101]) + # 2222/5701, 87/01 + pkt = NCP(0x5701, "Open/Create File or Subdirectory", 'file', has_length=0) + pkt.Request((30, 284), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, OpenCreateMode ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 4, AttributesDef32 ), + rec( 20, 2, DesiredAccessRights ), + rec( 22, 1, VolumeNumber ), + rec( 23, 4, DirectoryBase ), + rec( 27, 1, HandleFlag ), + rec( 28, 1, PathCount, var="x" ), + rec( 29, (1,255), Path, repeat="x", info_str=(Path, "Open or Create: %s", "/%s") ), + ]) + pkt.Reply( NO_LENGTH_CHECK, [ + rec( 8, 4, FileHandle ), + rec( 12, 1, OpenCreateAction ), + rec( 13, 1, Reserved ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ), + srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ), + rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ), + srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ), + rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ), + srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ), + srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ), + srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ), + srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ), + srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ), + srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ), + srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ), + srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ), + srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ), + srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ), + srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x0102, 0x7f00, 0x8001, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8900, 0x8d00, 0x8f00, 0x9001, 0x9400, 0x9600, + 0x9804, 0x9900, 0x9b03, 0x9c03, 0xa500, 0xa802, 0xa901, 0xbf00, 0xfd00, 0xff16]) + pkt.MakeExpert("file_rights") + # 2222/5702, 87/02 + pkt = NCP(0x5702, "Initialize Search", 'file', has_length=0) + pkt.Request( (18,272), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 1, VolumeNumber ), + rec( 11, 4, DirectoryBase ), + rec( 15, 1, HandleFlag ), + rec( 16, 1, PathCount, var="x" ), + rec( 17, (1,255), Path, repeat="x", info_str=(Path, "Set Search Pointer to: %s", "/%s") ), + ]) + pkt.Reply(17, [ + rec( 8, 1, VolumeNumber ), + rec( 9, 4, DirectoryNumber ), + rec( 13, 4, DirectoryEntryNumber ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16]) + # 2222/5703, 87/03 + pkt = NCP(0x5703, "Search for File or Subdirectory", 'file', has_length=0) + pkt.Request((26, 280), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 9, SeachSequenceStruct ), + rec( 25, (1,255), SearchPattern, info_str=(SearchPattern, "Search for: %s", "/%s") ), + ]) + pkt.Reply( NO_LENGTH_CHECK, [ + rec( 8, 9, SeachSequenceStruct ), + rec( 17, 1, Reserved ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ), + srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ), + srec( DStreamActual, req_cond="ncp.ret_info_mask_actual == 1" ), + srec( DStreamLogical, req_cond="ncp.ret_info_mask_logical == 1" ), + srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ), + srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ), + srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ), + srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ), + srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ), + srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ), + srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ), + srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ), + srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ), + srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16]) + # 2222/5704, 87/04 + pkt = NCP(0x5704, "Rename Or Move a File or Subdirectory", 'file', has_length=0) + pkt.Request((28, 536), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, RenameFlag ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 1, VolumeNumber ), + rec( 13, 4, DirectoryBase ), + rec( 17, 1, HandleFlag ), + rec( 18, 1, PathCount, var="x" ), + rec( 19, 1, VolumeNumber ), + rec( 20, 4, DirectoryBase ), + rec( 24, 1, HandleFlag ), + rec( 25, 1, PathCount, var="y" ), + rec( 26, (1, 255), Path, repeat="x", info_str=(Path, "Rename or Move: %s", "/%s") ), + rec( -1, (1,255), DestPath, repeat="y" ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9100, 0x9200, 0x9600, + 0x9804, 0x9a00, 0x9b03, 0x9c03, 0x9e00, 0xa901, 0xbf00, 0xfd00, 0xff16]) + # 2222/5705, 87/05 + pkt = NCP(0x5705, "Scan File or Subdirectory for Trustees", 'file', has_length=0) + pkt.Request((24, 278), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 4, SequenceNumber ), + rec( 16, 1, VolumeNumber ), + rec( 17, 4, DirectoryBase ), + rec( 21, 1, HandleFlag ), + rec( 22, 1, PathCount, var="x" ), + rec( 23, (1, 255), Path, repeat="x", info_str=(Path, "Scan Trustees for: %s", "/%s") ), + ]) + pkt.Reply(20, [ + rec( 8, 4, SequenceNumber ), + rec( 12, 2, ObjectIDCount, var="x" ), + rec( 14, 6, TrusteeStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c04, 0xa901, 0xbf00, 0xfd00, 0xff16]) + # 2222/5706, 87/06 + pkt = NCP(0x5706, "Obtain File or SubDirectory Information", 'file', has_length=0) + pkt.Request((24,278), [ + rec( 10, 1, SrcNameSpace ), + rec( 11, 1, DestNameSpace ), + rec( 12, 2, SearchAttributesLow ), + rec( 14, 2, ReturnInfoMask, ENC_LITTLE_ENDIAN ), + rec( 16, 2, ExtendedInfo ), + rec( 18, 1, VolumeNumber ), + rec( 19, 4, DirectoryBase ), + rec( 23, 1, HandleFlag ), + rec( 24, 1, PathCount, var="x" ), + rec( 25, (1,255), Path, repeat="x", info_str=(Path, "Obtain Info for: %s", "/%s")), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ), + srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ), + rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ), + srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ), + rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ), + srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ), + srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ), + srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ), + srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ), + srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ), + srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ), + srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ), + srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ), + srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ), + srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ), + srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ), + srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8700, 0x8900, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9802, 0x9b03, 0x9c03, 0xa802, 0xa901, 0xbf00, 0xfd00, 0xff16]) + # 2222/5707, 87/07 + pkt = NCP(0x5707, "Modify File or Subdirectory DOS Information", 'file', has_length=0) + pkt.Request((62,316), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ModifyDOSInfoMask ), + rec( 14, 2, Reserved2 ), + rec( 16, 2, AttributesDef16 ), + rec( 18, 1, FileMode ), + rec( 19, 1, FileExtendedAttributes ), + rec( 20, 2, CreationDate ), + rec( 22, 2, CreationTime ), + rec( 24, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 28, 2, ModifiedDate ), + rec( 30, 2, ModifiedTime ), + rec( 32, 4, ModifierID, ENC_BIG_ENDIAN ), + rec( 36, 2, ArchivedDate ), + rec( 38, 2, ArchivedTime ), + rec( 40, 4, ArchiverID, ENC_BIG_ENDIAN ), + rec( 44, 2, LastAccessedDate ), + rec( 46, 2, InheritedRightsMask ), + rec( 48, 2, InheritanceRevokeMask ), + rec( 50, 4, MaxSpace ), + rec( 54, 1, VolumeNumber ), + rec( 55, 4, DirectoryBase ), + rec( 59, 1, HandleFlag ), + rec( 60, 1, PathCount, var="x" ), + rec( 61, (1,255), Path, repeat="x", info_str=(Path, "Modify DOS Information for: %s", "/%s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8c01, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16]) + # 2222/5708, 87/08 + pkt = NCP(0x5708, "Delete a File or Subdirectory", 'file', has_length=0) + pkt.Request((20,274), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 1, VolumeNumber ), + rec( 13, 4, DirectoryBase ), + rec( 17, 1, HandleFlag ), + rec( 18, 1, PathCount, var="x" ), + rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Delete a File or Subdirectory: %s", "/%s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8900, 0x8a00, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16]) + # 2222/5709, 87/09 + pkt = NCP(0x5709, "Set Short Directory Handle", 'file', has_length=0) + pkt.Request((20,274), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 1, DestDirHandle ), + rec( 11, 1, Reserved ), + rec( 12, 1, VolumeNumber ), + rec( 13, 4, DirectoryBase ), + rec( 17, 1, HandleFlag ), + rec( 18, 1, PathCount, var="x" ), + rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Set Short Directory Handle to: %s", "/%s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16]) + # 2222/570A, 87/10 + pkt = NCP(0x570A, "Add Trustee Set to File or Subdirectory", 'file', has_length=0) + pkt.Request((31,285), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, AccessRightsMaskWord ), + rec( 14, 2, ObjectIDCount, var="y" ), + rec( 16, 1, VolumeNumber ), + rec( 17, 4, DirectoryBase ), + rec( 21, 1, HandleFlag ), + rec( 22, 1, PathCount, var="x" ), + rec( 23, (1,255), Path, repeat="x", info_str=(Path, "Add Trustee Set to: %s", "/%s") ), + rec( -1, 7, TrusteeStruct, repeat="y" ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8c01, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa802, 0xa901, 0xbf00, 0xfc01, 0xfd00, 0xff16]) + # 2222/570B, 87/11 + pkt = NCP(0x570B, "Delete Trustee Set from File or SubDirectory", 'file', has_length=0) + pkt.Request((27,281), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, ObjectIDCount, var="y" ), + rec( 12, 1, VolumeNumber ), + rec( 13, 4, DirectoryBase ), + rec( 17, 1, HandleFlag ), + rec( 18, 1, PathCount, var="x" ), + rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Delete Trustee Set from: %s", "/%s") ), + rec( -1, 7, TrusteeStruct, repeat="y" ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8c01, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/570C, 87/12 + pkt = NCP(0x570C, "Allocate Short Directory Handle", 'file', has_length=0) + pkt.Request((20,274), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, AllocateMode ), + rec( 12, 1, VolumeNumber ), + rec( 13, 4, DirectoryBase ), + rec( 17, 1, HandleFlag ), + rec( 18, 1, PathCount, var="x" ), + rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Allocate Short Directory Handle to: %s", "/%s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + srec( ReplyLevel2Struct, req_cond="ncp.alloc_reply_lvl2 == TRUE" ), + srec( ReplyLevel1Struct, req_cond="ncp.alloc_reply_lvl2 == FALSE" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8900, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0x9d00, 0xa901, 0xbf00, 0xfd00, 0xff16]) + # 2222/5710, 87/16 + pkt = NCP(0x5710, "Scan Salvageable Files", 'file', has_length=0) + pkt.Request((26,280), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 2, ReturnInfoMask ), + rec( 12, 2, ExtendedInfo ), + rec( 14, 4, SequenceNumber ), + rec( 18, 1, VolumeNumber ), + rec( 19, 4, DirectoryBase ), + rec( 23, 1, HandleFlag ), + rec( 24, 1, PathCount, var="x" ), + rec( 25, (1,255), Path, repeat="x", info_str=(Path, "Scan for Deleted Files in: %s", "/%s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 4, SequenceNumber ), + rec( 12, 2, DeletedTime ), + rec( 14, 2, DeletedDate ), + rec( 16, 4, DeletedID, ENC_BIG_ENDIAN ), + rec( 20, 4, VolumeID ), + rec( 24, 4, DirectoryBase ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/5711, 87/17 + pkt = NCP(0x5711, "Recover Salvageable File", 'file', has_length=0) + pkt.Request((23,277), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 4, SequenceNumber ), + rec( 14, 4, VolumeID ), + rec( 18, 4, DirectoryBase ), + rec( 22, (1,255), FileName, info_str=(FileName, "Recover Deleted File: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa802, 0xbf00, 0xfe02, 0xfd00, 0xff16]) + # 2222/5712, 87/18 + pkt = NCP(0x5712, "Purge Salvageable Files", 'file', has_length=0) + pkt.Request(22, [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 4, SequenceNumber ), + rec( 14, 4, VolumeID ), + rec( 18, 4, DirectoryBase ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x010a, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/5713, 87/19 + pkt = NCP(0x5713, "Get Name Space Information", 'file', has_length=0) + pkt.Request(18, [ + rec( 8, 1, SrcNameSpace ), + rec( 9, 1, DestNameSpace ), + rec( 10, 1, Reserved ), + rec( 11, 1, VolumeNumber ), + rec( 12, 4, DirectoryBase ), + rec( 16, 2, NamesSpaceInfoMask ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + srec( FileNameStruct, req_cond="ncp.ns_info_mask_modify == TRUE" ), + srec( FileAttributesStruct, req_cond="ncp.ns_info_mask_fatt == TRUE" ), + srec( CreationDateStruct, req_cond="ncp.ns_info_mask_cdate == TRUE" ), + srec( CreationTimeStruct, req_cond="ncp.ns_info_mask_ctime == TRUE" ), + srec( OwnerIDStruct, req_cond="ncp.ns_info_mask_owner == TRUE" ), + srec( ArchiveDateStruct, req_cond="ncp.ns_info_mask_adate == TRUE" ), + srec( ArchiveTimeStruct, req_cond="ncp.ns_info_mask_atime == TRUE" ), + srec( ArchiveIdStruct, req_cond="ncp.ns_info_mask_aid == TRUE" ), + srec( UpdateDateStruct, req_cond="ncp.ns_info_mask_udate == TRUE" ), + srec( UpdateTimeStruct, req_cond="ncp.ns_info_mask_utime == TRUE" ), + srec( UpdateIDStruct, req_cond="ncp.ns_info_mask_uid == TRUE" ), + srec( LastAccessStruct, req_cond="ncp.ns_info_mask_acc_date == TRUE" ), + srec( RightsInfoStruct, req_cond="ncp.ns_info_mask_max_acc_mask == TRUE" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/5714, 87/20 + pkt = NCP(0x5714, "Search for File or Subdirectory Set", 'file', has_length=0) + pkt.Request((27, 27), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 2, ReturnInfoCount ), + rec( 18, 9, SeachSequenceStruct ), +# rec( 27, (1,255), SearchPattern ), + ]) +# The reply packet is dissected in packet-ncp2222.inc + pkt.Reply(NO_LENGTH_CHECK, [ + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16]) + # 2222/5715, 87/21 + pkt = NCP(0x5715, "Get Path String from Short Directory Handle", 'file', has_length=0) + pkt.Request(10, [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DirHandle ), + ]) + pkt.Reply((9,263), [ + rec( 8, (1,255), Path ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16]) + # 2222/5716, 87/22 + pkt = NCP(0x5716, "Generate Directory Base and Volume Number", 'file', has_length=0) + pkt.Request((20,274), [ + rec( 8, 1, SrcNameSpace ), + rec( 9, 1, DestNameSpace ), + rec( 10, 2, dstNSIndicator ), + rec( 12, 1, VolumeNumber ), + rec( 13, 4, DirectoryBase ), + rec( 17, 1, HandleFlag ), + rec( 18, 1, PathCount, var="x" ), + rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Get Volume and Directory Base from: %s", "/%s") ), + ]) + pkt.Reply(17, [ + rec( 8, 4, DirectoryBase ), + rec( 12, 4, DOSDirectoryBase ), + rec( 16, 1, VolumeNumber ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/5717, 87/23 + pkt = NCP(0x5717, "Query Name Space Information Format", 'file', has_length=0) + pkt.Request(10, [ + rec( 8, 1, NameSpace ), + rec( 9, 1, VolumeNumber ), + ]) + pkt.Reply(58, [ + rec( 8, 4, FixedBitMask ), + rec( 12, 4, VariableBitMask ), + rec( 16, 4, HugeBitMask ), + rec( 20, 2, FixedBitsDefined ), + rec( 22, 2, VariableBitsDefined ), + rec( 24, 2, HugeBitsDefined ), + rec( 26, 32, FieldsLenTable ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/5718, 87/24 + pkt = NCP(0x5718, "Get Name Spaces Loaded List from Volume Number", 'file', has_length=0) + pkt.Request(11, [ + rec( 8, 2, Reserved2 ), + rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Name Spaces Loaded List from Vol: %d", "/%d") ), + ]) + pkt.Reply(11, [ + rec( 8, 2, NumberOfNSLoaded, var="x" ), + rec( 10, 1, NameSpace, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/5719, 87/25 + pkt = NCP(0x5719, "Set Name Space Information", 'file', has_length=0) + pkt.Request(531, [ + rec( 8, 1, SrcNameSpace ), + rec( 9, 1, DestNameSpace ), + rec( 10, 1, VolumeNumber ), + rec( 11, 4, DirectoryBase ), + rec( 15, 2, NamesSpaceInfoMask ), + rec( 17, 2, Reserved2 ), + rec( 19, 512, NSSpecificInfo ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001, + 0x9600, 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, + 0xff16]) + # 2222/571A, 87/26 + pkt = NCP(0x571A, "Get Huge Name Space Information", 'file', has_length=0) + pkt.Request(34, [ + rec( 8, 1, NameSpace ), + rec( 9, 1, VolumeNumber ), + rec( 10, 4, DirectoryBase ), + rec( 14, 4, HugeBitMask ), + rec( 18, 16, HugeStateInfo ), + ]) + pkt.Reply((25,279), [ + rec( 8, 16, NextHugeStateInfo ), + rec( 24, (1,255), HugeData ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001, + 0x9600, 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, + 0xff16]) + # 2222/571B, 87/27 + pkt = NCP(0x571B, "Set Huge Name Space Information", 'file', has_length=0) + pkt.Request((35,289), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, VolumeNumber ), + rec( 10, 4, DirectoryBase ), + rec( 14, 4, HugeBitMask ), + rec( 18, 16, HugeStateInfo ), + rec( 34, (1,255), HugeData ), + ]) + pkt.Reply(28, [ + rec( 8, 16, NextHugeStateInfo ), + rec( 24, 4, HugeDataUsed ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001, + 0x9600, 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, + 0xff16]) + # 2222/571C, 87/28 + pkt = NCP(0x571C, "Get Full Path String", 'file', has_length=0) + pkt.Request((28,282), [ + rec( 8, 1, SrcNameSpace ), + rec( 9, 1, DestNameSpace ), + rec( 10, 2, PathCookieFlags ), + rec( 12, 4, Cookie1 ), + rec( 16, 4, Cookie2 ), + rec( 20, 1, VolumeNumber ), + rec( 21, 4, DirectoryBase ), + rec( 25, 1, HandleFlag ), + rec( 26, 1, PathCount, var="x" ), + rec( 27, (1,255), Path, repeat="x", info_str=(Path, "Get Full Path from: %s", "/%s") ), + ]) + pkt.Reply((23,277), [ + rec( 8, 2, PathCookieFlags ), + rec( 10, 4, Cookie1 ), + rec( 14, 4, Cookie2 ), + rec( 18, 2, PathComponentSize ), + rec( 20, 2, PathComponentCount, var='x' ), + rec( 22, (1,255), Path, repeat='x' ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001, + 0x9600, 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, + 0xff16]) + # 2222/571D, 87/29 + pkt = NCP(0x571D, "Get Effective Directory Rights", 'file', has_length=0) + pkt.Request((24, 278), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DestNameSpace ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 1, VolumeNumber ), + rec( 17, 4, DirectoryBase ), + rec( 21, 1, HandleFlag ), + rec( 22, 1, PathCount, var="x" ), + rec( 23, (1,255), Path, repeat="x", info_str=(Path, "Get Effective Rights for: %s", "/%s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 2, EffectiveRights ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/571E, 87/30 + pkt = NCP(0x571E, "Open/Create File or Subdirectory", 'file', has_length=0) + pkt.Request((34, 288), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 1, OpenCreateMode ), + rec( 11, 1, Reserved ), + rec( 12, 2, SearchAttributesLow ), + rec( 14, 2, Reserved2 ), + rec( 16, 2, ReturnInfoMask ), + rec( 18, 2, ExtendedInfo ), + rec( 20, 4, AttributesDef32 ), + rec( 24, 2, DesiredAccessRights ), + rec( 26, 1, VolumeNumber ), + rec( 27, 4, DirectoryBase ), + rec( 31, 1, HandleFlag ), + rec( 32, 1, PathCount, var="x" ), + rec( 33, (1,255), Path, repeat="x", info_str=(Path, "Open or Create File: %s", "/%s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ), + rec( 12, 1, OpenCreateAction ), + rec( 13, 1, Reserved ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbe00, 0xbf00, 0xfd00, 0xff16]) + pkt.MakeExpert("file_rights") + # 2222/571F, 87/31 + pkt = NCP(0x571F, "Get File Information", 'file', has_length=0) + pkt.Request(15, [ + rec( 8, 6, FileHandle, info_str=(FileHandle, "Get File Information - 0x%s", ", %s") ), + rec( 14, 1, HandleInfoLevel ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 4, VolumeNumberLong ), + rec( 12, 4, DirectoryBase ), + srec(HandleInfoLevel0, req_cond="ncp.handle_info_level==0x00" ), + srec(HandleInfoLevel1, req_cond="ncp.handle_info_level==0x01" ), + srec(HandleInfoLevel2, req_cond="ncp.handle_info_level==0x02" ), + srec(HandleInfoLevel3, req_cond="ncp.handle_info_level==0x03" ), + srec(HandleInfoLevel4, req_cond="ncp.handle_info_level==0x04" ), + srec(HandleInfoLevel5, req_cond="ncp.handle_info_level==0x05" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/5720, 87/32 + pkt = NCP(0x5720, "Open/Create File or Subdirectory with Callback", 'file', has_length=0) + pkt.Request((30, 284), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, OpenCreateMode ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 4, AttributesDef32 ), + rec( 20, 2, DesiredAccessRights ), + rec( 22, 1, VolumeNumber ), + rec( 23, 4, DirectoryBase ), + rec( 27, 1, HandleFlag ), + rec( 28, 1, PathCount, var="x" ), + rec( 29, (1,255), Path, repeat="x", info_str=(Path, "Open or Create with Op-Lock: %s", "/%s") ), + ]) + pkt.Reply( NO_LENGTH_CHECK, [ + rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ), + rec( 12, 1, OpenCreateAction ), + rec( 13, 1, OCRetFlags ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ), + srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ), + rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ), + srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ), + rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ), + srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ), + srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ), + srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ), + srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ), + srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ), + srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ), + srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ), + srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ), + srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ), + srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ), + srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x0102, 0x7f00, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + pkt.MakeExpert("file_rights") + # 2222/5721, 87/33 + pkt = NCP(0x5721, "Open/Create File or Subdirectory II with Callback", 'file', has_length=0) + pkt.Request((34, 288), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 1, OpenCreateMode ), + rec( 11, 1, Reserved ), + rec( 12, 2, SearchAttributesLow ), + rec( 14, 2, Reserved2 ), + rec( 16, 2, ReturnInfoMask ), + rec( 18, 2, ExtendedInfo ), + rec( 20, 4, AttributesDef32 ), + rec( 24, 2, DesiredAccessRights ), + rec( 26, 1, VolumeNumber ), + rec( 27, 4, DirectoryBase ), + rec( 31, 1, HandleFlag ), + rec( 32, 1, PathCount, var="x" ), + rec( 33, (1,255), Path, repeat="x", info_str=(Path, "Open or Create II with Op-Lock: %s", "/%s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 4, FileHandle ), + rec( 12, 1, OpenCreateAction ), + rec( 13, 1, OCRetFlags ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ), + srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ), + rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ), + srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ), + rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ), + srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ), + srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ), + srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ), + srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ), + srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ), + srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ), + srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ), + srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ), + srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ), + srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ), + srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbe00, 0xbf00, 0xfd00, 0xff16]) + pkt.MakeExpert("file_rights") + # 2222/5722, 87/34 + pkt = NCP(0x5722, "Open CallBack Control (Op-Lock)", 'file', has_length=0) + pkt.Request(13, [ + rec( 10, 4, CCFileHandle, ENC_BIG_ENDIAN ), + rec( 14, 1, CCFunction ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8800, 0xff16]) + pkt.MakeExpert("ncp5722_request") + # 2222/5723, 87/35 + pkt = NCP(0x5723, "Modify DOS Attributes on a File or Subdirectory", 'file', has_length=0) + pkt.Request((28, 282), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Flags ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 4, AttributesDef32 ), + rec( 20, 1, VolumeNumber ), + rec( 21, 4, DirectoryBase ), + rec( 25, 1, HandleFlag ), + rec( 26, 1, PathCount, var="x" ), + rec( 27, (1,255), Path, repeat="x", info_str=(Path, "Modify DOS Attributes for: %s", "/%s") ), + ]) + pkt.Reply(24, [ + rec( 8, 4, ItemsChecked ), + rec( 12, 4, ItemsChanged ), + rec( 16, 4, AttributeValidFlag ), + rec( 20, 4, AttributesDef32 ), + ]) + pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/5724, 87/36 + pkt = NCP(0x5724, "Log File", 'sync', has_length=0) + pkt.Request((28, 282), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, Reserved2 ), + rec( 12, 1, LogFileFlagLow ), + rec( 13, 1, LogFileFlagHigh ), + rec( 14, 2, Reserved2 ), + rec( 16, 4, WaitTime ), + rec( 20, 1, VolumeNumber ), + rec( 21, 4, DirectoryBase ), + rec( 25, 1, HandleFlag ), + rec( 26, 1, PathCount, var="x" ), + rec( 27, (1,255), Path, repeat="x", info_str=(Path, "Lock File: %s", "/%s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/5725, 87/37 + pkt = NCP(0x5725, "Release File", 'sync', has_length=0) + pkt.Request((20, 274), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, Reserved2 ), + rec( 12, 1, VolumeNumber ), + rec( 13, 4, DirectoryBase ), + rec( 17, 1, HandleFlag ), + rec( 18, 1, PathCount, var="x" ), + rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Release Lock on: %s", "/%s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/5726, 87/38 + pkt = NCP(0x5726, "Clear File", 'sync', has_length=0) + pkt.Request((20, 274), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, Reserved2 ), + rec( 12, 1, VolumeNumber ), + rec( 13, 4, DirectoryBase ), + rec( 17, 1, HandleFlag ), + rec( 18, 1, PathCount, var="x" ), + rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Clear File: %s", "/%s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/5727, 87/39 + pkt = NCP(0x5727, "Get Directory Disk Space Restriction", 'file', has_length=0) + pkt.Request((19, 273), [ + rec( 8, 1, NameSpace ), + rec( 9, 2, Reserved2 ), + rec( 11, 1, VolumeNumber ), + rec( 12, 4, DirectoryBase ), + rec( 16, 1, HandleFlag ), + rec( 17, 1, PathCount, var="x" ), + rec( 18, (1,255), Path, repeat="x", info_str=(Path, "Get Disk Space Restriction for: %s", "/%s") ), + ]) + pkt.Reply(18, [ + rec( 8, 1, NumberOfEntries, var="x" ), + rec( 9, 9, SpaceStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, + 0xff16]) + # 2222/5728, 87/40 + pkt = NCP(0x5728, "Search for File or Subdirectory Set (Extended Errors)", 'file', has_length=0) + pkt.Request((28, 282), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 2, ReturnInfoCount ), + rec( 18, 9, SeachSequenceStruct ), + rec( 27, (1,255), SearchPattern, info_str=(SearchPattern, "Search for: %s", ", %s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 9, SeachSequenceStruct ), + rec( 17, 1, MoreFlag ), + rec( 18, 2, InfoCount ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/5729, 87/41 + pkt = NCP(0x5729, "Scan Salvageable Files", 'file', has_length=0) + pkt.Request((24,278), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, CtrlFlags, ENC_LITTLE_ENDIAN ), + rec( 12, 4, SequenceNumber ), + rec( 16, 1, VolumeNumber ), + rec( 17, 4, DirectoryBase ), + rec( 21, 1, HandleFlag ), + rec( 22, 1, PathCount, var="x" ), + rec( 23, (1,255), Path, repeat="x", info_str=(Path, "Scan Deleted Files: %s", "/%s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 4, SequenceNumber ), + rec( 12, 4, DirectoryBase ), + rec( 16, 4, ScanItems, var="x" ), + srec(ScanInfoFileName, req_cond="ncp.ctrl_flags==0x0001", repeat="x" ), + srec(ScanInfoFileNoName, req_cond="ncp.ctrl_flags==0x0000", repeat="x" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/572A, 87/42 + pkt = NCP(0x572A, "Purge Salvageable File List", 'file', has_length=0) + pkt.Request(28, [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, PurgeFlags ), + rec( 12, 4, VolumeNumberLong ), + rec( 16, 4, DirectoryBase ), + rec( 20, 4, PurgeCount, var="x" ), + rec( 24, 4, PurgeList, repeat="x" ), + ]) + pkt.Reply(16, [ + rec( 8, 4, PurgeCount, var="x" ), + rec( 12, 4, PurgeCcode, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/572B, 87/43 + pkt = NCP(0x572B, "Revoke File Handle Rights", 'file', has_length=0) + pkt.Request(17, [ + rec( 8, 3, Reserved3 ), + rec( 11, 1, RevQueryFlag ), + rec( 12, 4, FileHandle ), + rec( 16, 1, RemoveOpenRights ), + ]) + pkt.Reply(13, [ + rec( 8, 4, FileHandle ), + rec( 12, 1, OpenRights ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + # 2222/572C, 87/44 + pkt = NCP(0x572C, "Update File Handle Rights", 'file', has_length=0) + pkt.Request(24, [ + rec( 8, 2, Reserved2 ), + rec( 10, 1, VolumeNumber ), + rec( 11, 1, NameSpace ), + rec( 12, 4, DirectoryNumber ), + rec( 16, 2, AccessRightsMaskWord ), + rec( 18, 2, NewAccessRights ), + rec( 20, 4, FileHandle, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(16, [ + rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ), + rec( 12, 4, EffectiveRights, ENC_LITTLE_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16]) + pkt.MakeExpert("ncp572c") + # 2222/5740, 87/64 + pkt = NCP(0x5740, "Read from File", 'file', has_length=0) + pkt.Request(22, [ + rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ), + rec( 12, 8, StartOffset64bit, ENC_BIG_ENDIAN ), + rec( 20, 2, NumBytes, ENC_BIG_ENDIAN ), +]) + pkt.Reply(10, [ + rec( 8, 2, NumBytes, ENC_BIG_ENDIAN), +]) + pkt.CompletionCodes([0x0000, 0x8300, 0x8800, 0x9300, 0x9500, 0xa201, 0xfd00, 0xff1b]) + # 2222/5741, 87/65 + pkt = NCP(0x5741, "Write to File", 'file', has_length=0) + pkt.Request(22, [ + rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ), + rec( 12, 8, StartOffset64bit, ENC_BIG_ENDIAN ), + rec( 20, 2, NumBytes, ENC_BIG_ENDIAN ), +]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x8300, 0x8800, 0x9400, 0x9500, 0xa201, 0xfd00, 0xff1b]) + # 2222/5742, 87/66 + pkt = NCP(0x5742, "Get Current Size of File", 'file', has_length=0) + pkt.Request(12, [ + rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ), +]) + pkt.Reply(16, [ + rec( 8, 8, FileSize64bit), +]) + pkt.CompletionCodes([0x0000, 0x7f00, 0x8800, 0x9600, 0xfd02, 0xff01]) + # 2222/5743, 87/67 + pkt = NCP(0x5743, "Log Physical Record", 'file', has_length=0) + pkt.Request(36, [ + rec( 8, 4, LockFlag, ENC_BIG_ENDIAN ), + rec(12, 4, FileHandle, ENC_BIG_ENDIAN ), + rec(16, 8, StartOffset64bit, ENC_BIG_ENDIAN ), + rec(24, 8, Length64bit, ENC_BIG_ENDIAN ), + rec(32, 4, LockTimeout, ENC_BIG_ENDIAN), +]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7f00, 0x8800, 0x9600, 0xfb08, 0xfd02, 0xff01]) + # 2222/5744, 87/68 + pkt = NCP(0x5744, "Release Physical Record", 'file', has_length=0) + pkt.Request(28, [ + rec(8, 4, FileHandle, ENC_BIG_ENDIAN ), + rec(12, 8, StartOffset64bit, ENC_BIG_ENDIAN ), + rec(20, 8, Length64bit, ENC_BIG_ENDIAN ), +]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff1a]) + # 2222/5745, 87/69 + pkt = NCP(0x5745, "Clear Physical Record", 'file', has_length=0) + pkt.Request(28, [ + rec(8, 4, FileHandle, ENC_BIG_ENDIAN ), + rec(12, 8, StartOffset64bit, ENC_BIG_ENDIAN ), + rec(20, 8, Length64bit, ENC_BIG_ENDIAN ), +]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff1a]) + # 2222/5746, 87/70 + pkt = NCP(0x5746, "Copy from One File to Another (64 Bit offset capable)", 'file', has_length=0) + pkt.Request(44, [ + rec(8, 6, SourceFileHandle, ENC_BIG_ENDIAN ), + rec(14, 6, TargetFileHandle, ENC_BIG_ENDIAN ), + rec(20, 8, SourceFileOffset, ENC_BIG_ENDIAN ), + rec(28, 8, TargetFileOffset64bit, ENC_BIG_ENDIAN ), + rec(36, 8, BytesToCopy64bit, ENC_BIG_ENDIAN ), +]) + pkt.Reply(16, [ + rec( 8, 8, BytesActuallyTransferred64bit, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000, 0x0104, 0x8301, 0x8800, 0x9300, 0x9400, + 0x9500, 0x9600, 0xa201]) + # 2222/5747, 87/71 + pkt = NCP(0x5747, "Get Sparse File Data Block Bit Map", 'file', has_length=0) + pkt.Request(23, [ + rec(8, 6, SourceFileHandle, ENC_BIG_ENDIAN ), + rec(14, 8, SourceFileOffset, ENC_BIG_ENDIAN ), + rec(22, 1, ExtentListFormat, ENC_BIG_ENDIAN ), +]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 1, ExtentListFormat ), + rec( 9, 1, RetExtentListCount, var="x" ), + rec( 10, 8, EndingOffset ), + srec(zFileMap_Allocation, req_cond="ncp.ext_lst_format==0", repeat="x" ), + srec(zFileMap_Logical, req_cond="ncp.ext_lst_format==1", repeat="x" ), + srec(zFileMap_Physical, req_cond="ncp.ext_lst_format==2", repeat="x" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8800, 0xff00]) + # 2222/5748, 87/72 + pkt = NCP(0x5748, "Read a File", 'file', has_length=0) + pkt.Request(24, [ + rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ), + rec( 12, 8, StartOffset64bit, ENC_BIG_ENDIAN ), + rec( 20, 4, NumBytesLong, ENC_BIG_ENDIAN ), +]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 4, NumBytesLong, ENC_BIG_ENDIAN), + rec( 12, PROTO_LENGTH_UNKNOWN, Data64), + #decoded in packet-ncp2222.inc + # rec( NumBytesLong, 4, BytesActuallyTransferred64bit, ENC_BIG_ENDIAN), + ]) + pkt.CompletionCodes([0x0000, 0x8300, 0x8800, 0x9300, 0x9500, 0xa201, 0xfd00, 0xff1b]) + + # 2222/5749, 87/73 + pkt = NCP(0x5749, "Write to a File", 'file', has_length=0) + pkt.Request(24, [ + rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ), + rec( 12, 8, StartOffset64bit, ENC_BIG_ENDIAN ), + rec( 20, 4, NumBytesLong, ENC_BIG_ENDIAN ), +]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x8300, 0x8800, 0x9400, 0x9500, 0xa201, 0xfd00, 0xff1b]) + + # 2222/5801, 8801 + pkt = NCP(0x5801, "Query Volume Audit Status", "auditing", has_length=0) + pkt.Request(12, [ + rec( 8, 4, ConnectionNumber ), + ]) + pkt.Reply(40, [ + rec(8, 32, NWAuditStatus ), + ]) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5802, 8802 + pkt = NCP(0x5802, "Add User Audit Property", "auditing", has_length=0) + pkt.Request(25, [ + rec(8, 4, AuditIDType ), + rec(12, 4, AuditID ), + rec(16, 4, AuditHandle ), + rec(20, 4, ObjectID ), + rec(24, 1, AuditFlag ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5803, 8803 + pkt = NCP(0x5803, "Add Auditor Access", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xde00, 0xfd00, 0xff16]) + # 2222/5804, 8804 + pkt = NCP(0x5804, "Change Auditor Volume Password", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5805, 8805 + pkt = NCP(0x5805, "Check Auditor Access", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5806, 8806 + pkt = NCP(0x5806, "Delete User Audit Property", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff21]) + # 2222/5807, 8807 + pkt = NCP(0x5807, "Disable Auditing On A Volume", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5808, 8808 + pkt = NCP(0x5808, "Enable Auditing On A Volume", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xde00, 0xfd00, 0xff16]) + # 2222/5809, 8809 + pkt = NCP(0x5809, "Query User Being Audited", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/580A, 88,10 + pkt = NCP(0x580A, "Read Audit Bit Map", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/580B, 88,11 + pkt = NCP(0x580B, "Read Audit File Configuration Header", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/580D, 88,13 + pkt = NCP(0x580D, "Remove Auditor Access", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/580E, 88,14 + pkt = NCP(0x580E, "Reset Audit File", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + + # 2222/580F, 88,15 + pkt = NCP(0x580F, "Auditing NCP", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfb00, 0xfd00, 0xff16]) + # 2222/5810, 88,16 + pkt = NCP(0x5810, "Write Audit Bit Map", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5811, 88,17 + pkt = NCP(0x5811, "Write Audit File Configuration Header", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5812, 88,18 + pkt = NCP(0x5812, "Change Auditor Volume Password2", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5813, 88,19 + pkt = NCP(0x5813, "Return Audit Flags", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5814, 88,20 + pkt = NCP(0x5814, "Close Old Audit File", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5816, 88,22 + pkt = NCP(0x5816, "Check Level Two Access", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xde00, 0xfd00, 0xff16]) + # 2222/5817, 88,23 + pkt = NCP(0x5817, "Return Old Audit File List", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5818, 88,24 + pkt = NCP(0x5818, "Init Audit File Reads", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5819, 88,25 + pkt = NCP(0x5819, "Read Auditing File", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/581A, 88,26 + pkt = NCP(0x581A, "Delete Old Audit File", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/581E, 88,30 + pkt = NCP(0x581E, "Restart Volume auditing", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/581F, 88,31 + pkt = NCP(0x581F, "Set Volume Password", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16]) + # 2222/5901, 89,01 + pkt = NCP(0x5901, "Open/Create File or Subdirectory", "enhanced", has_length=0) + pkt.Request((37,290), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, OpenCreateMode ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 4, AttributesDef32 ), + rec( 20, 2, DesiredAccessRights ), + rec( 22, 4, DirectoryBase ), + rec( 26, 1, VolumeNumber ), + rec( 27, 1, HandleFlag ), + rec( 28, 1, DataTypeFlag ), + rec( 29, 5, Reserved5 ), + rec( 34, 1, PathCount, var="x" ), + rec( 35, (2,255), Path16, repeat="x", info_str=(Path16, "Open or Create File or Subdirectory: %s", "/%s") ), + ]) + pkt.Reply( NO_LENGTH_CHECK, [ + rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ), + rec( 12, 1, OpenCreateAction ), + rec( 13, 1, Reserved ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ), + srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ), + rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ), + srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ), + rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ), + srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ), + srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ), + srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ), + srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ), + srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ), + srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ), + srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ), + srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ), + srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ), + srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ), + srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ), + srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x0102, 0x7f00, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9900, 0x9b03, 0x9c03, 0xa901, 0xa500, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + pkt.MakeExpert("file_rights") + # 2222/5902, 89/02 + pkt = NCP(0x5902, "Initialize Search", 'enhanced', has_length=0) + pkt.Request( (25,278), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 4, DirectoryBase ), + rec( 14, 1, VolumeNumber ), + rec( 15, 1, HandleFlag ), + rec( 16, 1, DataTypeFlag ), + rec( 17, 5, Reserved5 ), + rec( 22, 1, PathCount, var="x" ), + rec( 23, (2,255), Path16, repeat="x", info_str=(Path16, "Set Search Pointer to: %s", "/%s") ), + ]) + pkt.Reply(17, [ + rec( 8, 1, VolumeNumber ), + rec( 9, 4, DirectoryNumber ), + rec( 13, 4, DirectoryEntryNumber ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5903, 89/03 + pkt = NCP(0x5903, "Search for File or Subdirectory", 'enhanced', has_length=0) + pkt.Request(26, [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 9, SeachSequenceStruct ), + rec( 25, 1, DataTypeFlag ), + # next field is dissected in packet-ncp2222.inc + #rec( 26, (2,255), SearchPattern16, info_str=(SearchPattern16, "Search for: %s", "/%s") ), + ]) + pkt.Reply( NO_LENGTH_CHECK, [ + rec( 8, 9, SeachSequenceStruct ), + rec( 17, 1, Reserved ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ), + srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ), + rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ), + srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ), + rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ), + srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ), + srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ), + srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ), + srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ), + srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ), + srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ), + srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ), + srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ), + srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ), + srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ), + srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ), + srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5904, 89/04 + pkt = NCP(0x5904, "Rename Or Move a File or Subdirectory", 'enhanced', has_length=0) + pkt.Request((42, 548), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, RenameFlag ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 12, SrcEnhNWHandlePathS1 ), + rec( 24, 1, PathCount, var="x" ), + rec( 25, 12, DstEnhNWHandlePathS1 ), + rec( 37, 1, PathCount, var="y" ), + rec( 38, (2, 255), Path16, repeat="x", info_str=(Path16, "Rename or Move: %s", "/%s") ), + rec( -1, (2,255), DestPath16, repeat="y" ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9200, 0x9600, + 0x9804, 0x9a00, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5905, 89/05 + pkt = NCP(0x5905, "Scan File or Subdirectory for Trustees", 'enhanced', has_length=0) + pkt.Request((31, 284), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, MaxReplyObjectIDCount ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 4, SequenceNumber ), + rec( 16, 4, DirectoryBase ), + rec( 20, 1, VolumeNumber ), + rec( 21, 1, HandleFlag ), + rec( 22, 1, DataTypeFlag ), + rec( 23, 5, Reserved5 ), + rec( 28, 1, PathCount, var="x" ), + rec( 29, (2, 255), Path16, repeat="x", info_str=(Path16, "Scan Trustees for: %s", "/%s") ), + ]) + pkt.Reply(20, [ + rec( 8, 4, SequenceNumber ), + rec( 12, 2, ObjectIDCount, var="x" ), + rec( 14, 6, TrusteeStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5906, 89/06 + pkt = NCP(0x5906, "Obtain File or SubDirectory Information", 'enhanced', has_length=0) + pkt.Request((22), [ + rec( 8, 1, SrcNameSpace ), + rec( 9, 1, DestNameSpace ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask, ENC_LITTLE_ENDIAN ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 4, DirectoryBase ), + rec( 20, 1, VolumeNumber ), + rec( 21, 1, HandleFlag ), + # + # Move to packet-ncp2222.inc + # The datatype flag indicates if the path is represented as ASCII or UTF8 + # ASCII has a 1 byte count field whereas UTF8 has a two byte count field. + # + #rec( 22, 1, DataTypeFlag ), + #rec( 23, 5, Reserved5 ), + #rec( 28, 1, PathCount, var="x" ), + #rec( 29, (2,255), Path16, repeat="x", info_str=(Path16, "Obtain Info for: %s", "/%s")), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ), + srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ), + rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ), + srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ), + rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ), + srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ), + srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ), + srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ), + srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ), + srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ), + srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ), + srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ), + srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ), + srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ), + srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ), + srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ), + srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8700, 0x8900, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa802, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5907, 89/07 + pkt = NCP(0x5907, "Modify File or Subdirectory DOS Information", 'enhanced', has_length=0) + pkt.Request((69,322), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ModifyDOSInfoMask ), + rec( 14, 2, Reserved2 ), + rec( 16, 2, AttributesDef16 ), + rec( 18, 1, FileMode ), + rec( 19, 1, FileExtendedAttributes ), + rec( 20, 2, CreationDate ), + rec( 22, 2, CreationTime ), + rec( 24, 4, CreatorID, ENC_BIG_ENDIAN ), + rec( 28, 2, ModifiedDate ), + rec( 30, 2, ModifiedTime ), + rec( 32, 4, ModifierID, ENC_BIG_ENDIAN ), + rec( 36, 2, ArchivedDate ), + rec( 38, 2, ArchivedTime ), + rec( 40, 4, ArchiverID, ENC_BIG_ENDIAN ), + rec( 44, 2, LastAccessedDate ), + rec( 46, 2, InheritedRightsMask ), + rec( 48, 2, InheritanceRevokeMask ), + rec( 50, 4, MaxSpace ), + rec( 54, 4, DirectoryBase ), + rec( 58, 1, VolumeNumber ), + rec( 59, 1, HandleFlag ), + rec( 60, 1, DataTypeFlag ), + rec( 61, 5, Reserved5 ), + rec( 66, 1, PathCount, var="x" ), + rec( 67, (2,255), Path16, repeat="x", info_str=(Path16, "Modify DOS Information for: %s", "/%s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x7902, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8c01, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5908, 89/08 + pkt = NCP(0x5908, "Delete a File or Subdirectory", 'enhanced', has_length=0) + pkt.Request((27,280), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 4, DirectoryBase ), + rec( 16, 1, VolumeNumber ), + rec( 17, 1, HandleFlag ), + rec( 18, 1, DataTypeFlag ), + rec( 19, 5, Reserved5 ), + rec( 24, 1, PathCount, var="x" ), + rec( 25, (2,255), Path16, repeat="x", info_str=(Path16, "Delete a File or Subdirectory: %s", "/%s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8900, 0x8a00, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5909, 89/09 + pkt = NCP(0x5909, "Set Short Directory Handle", 'enhanced', has_length=0) + pkt.Request((27,280), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 1, DestDirHandle ), + rec( 11, 1, Reserved ), + rec( 12, 4, DirectoryBase ), + rec( 16, 1, VolumeNumber ), + rec( 17, 1, HandleFlag ), + rec( 18, 1, DataTypeFlag ), + rec( 19, 5, Reserved5 ), + rec( 24, 1, PathCount, var="x" ), + rec( 25, (2,255), Path16, repeat="x", info_str=(Path16, "Set Short Directory Handle to: %s", "/%s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/590A, 89/10 + pkt = NCP(0x590A, "Add Trustee Set to File or Subdirectory", 'enhanced', has_length=0) + pkt.Request((37,290), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, AccessRightsMaskWord ), + rec( 14, 2, ObjectIDCount, var="y" ), + rec( -1, 6, TrusteeStruct, repeat="y" ), + rec( -1, 4, DirectoryBase ), + rec( -1, 1, VolumeNumber ), + rec( -1, 1, HandleFlag ), + rec( -1, 1, DataTypeFlag ), + rec( -1, 5, Reserved5 ), + rec( -1, 1, PathCount, var="x" ), + rec( -1, (2,255), Path16, repeat="x", info_str=(Path16, "Add Trustee Set to: %s", "/%s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8c01, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfc01, 0xfd00, 0xff16]) + # 2222/590B, 89/11 + pkt = NCP(0x590B, "Delete Trustee Set from File or SubDirectory", 'enhanced', has_length=0) + pkt.Request((34,287), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 2, ObjectIDCount, var="y" ), + rec( 12, 7, TrusteeStruct, repeat="y" ), + rec( 19, 4, DirectoryBase ), + rec( 23, 1, VolumeNumber ), + rec( 24, 1, HandleFlag ), + rec( 25, 1, DataTypeFlag ), + rec( 26, 5, Reserved5 ), + rec( 31, 1, PathCount, var="x" ), + rec( 32, (2,255), Path16, repeat="x", info_str=(Path16, "Delete Trustee Set from: %s", "/%s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8c01, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/590C, 89/12 + pkt = NCP(0x590C, "Allocate Short Directory Handle", 'enhanced', has_length=0) + pkt.Request((27,280), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DestNameSpace ), + rec( 10, 2, AllocateMode ), + rec( 12, 4, DirectoryBase ), + rec( 16, 1, VolumeNumber ), + rec( 17, 1, HandleFlag ), + rec( 18, 1, DataTypeFlag ), + rec( 19, 5, Reserved5 ), + rec( 24, 1, PathCount, var="x" ), + rec( 25, (2,255), Path16, repeat="x", info_str=(Path16, "Allocate Short Directory Handle to: %s", "/%s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + srec( ReplyLevel2Struct, req_cond="ncp.alloc_reply_lvl2 == TRUE" ), + srec( ReplyLevel1Struct, req_cond="ncp.alloc_reply_lvl2 == FALSE" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) +# 2222/5910, 89/16 + pkt = NCP(0x5910, "Scan Salvageable Files", 'enhanced', has_length=0) + pkt.Request((33,286), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 2, ReturnInfoMask ), + rec( 12, 2, ExtendedInfo ), + rec( 14, 4, SequenceNumber ), + rec( 18, 4, DirectoryBase ), + rec( 22, 1, VolumeNumber ), + rec( 23, 1, HandleFlag ), + rec( 24, 1, DataTypeFlag ), + rec( 25, 5, Reserved5 ), + rec( 30, 1, PathCount, var="x" ), + rec( 31, (2,255), Path16, repeat="x", info_str=(Path16, "Scan for Deleted Files in: %s", "/%s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 4, SequenceNumber ), + rec( 12, 2, DeletedTime ), + rec( 14, 2, DeletedDate ), + rec( 16, 4, DeletedID, ENC_BIG_ENDIAN ), + rec( 20, 4, VolumeID ), + rec( 24, 4, DirectoryBase ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( ReferenceIDStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_id == 1)" ), + srec( NSAttributeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns_attr == 1)" ), + rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ), + srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ), + rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ), + srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ), + srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ), + srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ), + srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ), + srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ), + srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ), + srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ), + srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ), + srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ), + srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ), + srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ), + srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5911, 89/17 + pkt = NCP(0x5911, "Recover Salvageable File", 'enhanced', has_length=0) + pkt.Request((24,278), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 4, SequenceNumber ), + rec( 14, 4, VolumeID ), + rec( 18, 4, DirectoryBase ), + rec( 22, 1, DataTypeFlag ), + rec( 23, (1,255), FileName16, info_str=(FileName16, "Recover Deleted File: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5913, 89/19 + pkt = NCP(0x5913, "Get Name Space Information", 'enhanced', has_length=0) + pkt.Request(18, [ + rec( 8, 1, SrcNameSpace ), + rec( 9, 1, DestNameSpace ), + rec( 10, 1, DataTypeFlag ), + rec( 11, 1, VolumeNumber ), + rec( 12, 4, DirectoryBase ), + rec( 16, 2, NamesSpaceInfoMask ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + srec( FileName16Struct, req_cond="ncp.ns_info_mask_modify == TRUE" ), + srec( FileAttributesStruct, req_cond="ncp.ns_info_mask_fatt == TRUE" ), + srec( CreationDateStruct, req_cond="ncp.ns_info_mask_cdate == TRUE" ), + srec( CreationTimeStruct, req_cond="ncp.ns_info_mask_ctime == TRUE" ), + srec( OwnerIDStruct, req_cond="ncp.ns_info_mask_owner == TRUE" ), + srec( ArchiveDateStruct, req_cond="ncp.ns_info_mask_adate == TRUE" ), + srec( ArchiveTimeStruct, req_cond="ncp.ns_info_mask_atime == TRUE" ), + srec( ArchiveIdStruct, req_cond="ncp.ns_info_mask_aid == TRUE" ), + srec( UpdateDateStruct, req_cond="ncp.ns_info_mask_udate == TRUE" ), + srec( UpdateTimeStruct, req_cond="ncp.ns_info_mask_utime == TRUE" ), + srec( UpdateIDStruct, req_cond="ncp.ns_info_mask_uid == TRUE" ), + srec( LastAccessStruct, req_cond="ncp.ns_info_mask_acc_date == TRUE" ), + srec( RightsInfoStruct, req_cond="ncp.ns_info_mask_max_acc_mask == TRUE" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5914, 89/20 + pkt = NCP(0x5914, "Search for File or Subdirectory Set", 'enhanced', has_length=0) + pkt.Request((28, 28), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 2, ReturnInfoCount ), + rec( 18, 9, SeachSequenceStruct ), + rec( 27, 1, DataTypeFlag ), + # next field is dissected in packet-ncp2222.inc + #rec( 28, (2,255), SearchPattern16 ), + ]) +# The reply packet is dissected in packet-ncp2222.inc + pkt.Reply(NO_LENGTH_CHECK, [ + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5916, 89/22 + pkt = NCP(0x5916, "Generate Directory Base and Volume Number", 'enhanced', has_length=0) + pkt.Request((27,280), [ + rec( 8, 1, SrcNameSpace ), + rec( 9, 1, DestNameSpace ), + rec( 10, 2, dstNSIndicator ), + rec( 12, 4, DirectoryBase ), + rec( 16, 1, VolumeNumber ), + rec( 17, 1, HandleFlag ), + rec( 18, 1, DataTypeFlag ), + rec( 19, 5, Reserved5 ), + rec( 24, 1, PathCount, var="x" ), + rec( 25, (2,255), Path16, repeat="x", info_str=(Path16, "Get Volume and Directory Base from: %s", "/%s") ), + ]) + pkt.Reply(17, [ + rec( 8, 4, DirectoryBase ), + rec( 12, 4, DOSDirectoryBase ), + rec( 16, 1, VolumeNumber ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5919, 89/25 + pkt = NCP(0x5919, "Set Name Space Information", 'enhanced', has_length=0) + pkt.Request(530, [ + rec( 8, 1, SrcNameSpace ), + rec( 9, 1, DestNameSpace ), + rec( 10, 1, VolumeNumber ), + rec( 11, 4, DirectoryBase ), + rec( 15, 2, NamesSpaceInfoMask ), + rec( 17, 1, DataTypeFlag ), + rec( 18, 512, NSSpecificInfo ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001, + 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, + 0xff16]) + # 2222/591C, 89/28 + pkt = NCP(0x591C, "Get Full Path String", 'enhanced', has_length=0) + pkt.Request((35,288), [ + rec( 8, 1, SrcNameSpace ), + rec( 9, 1, DestNameSpace ), + rec( 10, 2, PathCookieFlags ), + rec( 12, 4, Cookie1 ), + rec( 16, 4, Cookie2 ), + rec( 20, 4, DirectoryBase ), + rec( 24, 1, VolumeNumber ), + rec( 25, 1, HandleFlag ), + rec( 26, 1, DataTypeFlag ), + rec( 27, 5, Reserved5 ), + rec( 32, 1, PathCount, var="x" ), + rec( 33, (2,255), Path16, repeat="x", info_str=(Path16, "Get Full Path from: %s", "/%s") ), + ]) + pkt.Reply((24,277), [ + rec( 8, 2, PathCookieFlags ), + rec( 10, 4, Cookie1 ), + rec( 14, 4, Cookie2 ), + rec( 18, 2, PathComponentSize ), + rec( 20, 2, PathComponentCount, var='x' ), + rec( 22, (2,255), Path16, repeat='x' ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001, + 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, + 0xff16]) + # 2222/591D, 89/29 + pkt = NCP(0x591D, "Get Effective Directory Rights", 'enhanced', has_length=0) + pkt.Request((31, 284), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DestNameSpace ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 4, DirectoryBase ), + rec( 20, 1, VolumeNumber ), + rec( 21, 1, HandleFlag ), + rec( 22, 1, DataTypeFlag ), + rec( 23, 5, Reserved5 ), + rec( 28, 1, PathCount, var="x" ), + rec( 29, (2,255), Path16, repeat="x", info_str=(Path16, "Get Effective Rights for: %s", "/%s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 2, EffectiveRights, ENC_LITTLE_ENDIAN ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( FileSize64bitStruct, req_cond="(ncp.ext_info_64_bit_fs == 1) && (ncp.ret_info_mask_fname == 1)" ), + srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/591E, 89/30 + pkt = NCP(0x591E, "Open/Create File or Subdirectory", 'enhanced', has_length=0) + pkt.Request((41, 294), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 1, OpenCreateMode ), + rec( 11, 1, Reserved ), + rec( 12, 2, SearchAttributesLow ), + rec( 14, 2, Reserved2 ), + rec( 16, 2, ReturnInfoMask ), + rec( 18, 2, ExtendedInfo ), + rec( 20, 4, AttributesDef32 ), + rec( 24, 2, DesiredAccessRights ), + rec( 26, 4, DirectoryBase ), + rec( 30, 1, VolumeNumber ), + rec( 31, 1, HandleFlag ), + rec( 32, 1, DataTypeFlag ), + rec( 33, 5, Reserved5 ), + rec( 38, 1, PathCount, var="x" ), + rec( 39, (2,255), Path16, repeat="x", info_str=(Path16, "Open or Create File: %s", "/%s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ), + rec( 12, 1, OpenCreateAction ), + rec( 13, 1, Reserved ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( FileSize64bitStruct, req_cond="(ncp.ext_info_64_bit_fs == 1) && (ncp.ret_info_mask_fname == 1)" ), + srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + pkt.MakeExpert("file_rights") + # 2222/5920, 89/32 + pkt = NCP(0x5920, "Open/Create File or Subdirectory with Callback", 'enhanced', has_length=0) + pkt.Request((37, 290), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, OpenCreateMode ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 4, AttributesDef32 ), + rec( 20, 2, DesiredAccessRights ), + rec( 22, 4, DirectoryBase ), + rec( 26, 1, VolumeNumber ), + rec( 27, 1, HandleFlag ), + rec( 28, 1, DataTypeFlag ), + rec( 29, 5, Reserved5 ), + rec( 34, 1, PathCount, var="x" ), + rec( 35, (2,255), Path16, repeat="x", info_str=(Path16, "Open or Create with Op-Lock: %s", "/%s") ), + ]) + pkt.Reply( NO_LENGTH_CHECK, [ + rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ), + rec( 12, 1, OpenCreateAction ), + rec( 13, 1, OCRetFlags ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ), + srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ), + rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ), + srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ), + rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ), + srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ), + srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ), + srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ), + srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ), + srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ), + srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ), + srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ), + srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ), + srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ), + srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ), + srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ), + srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x0102, 0x7f00, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9400, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + pkt.MakeExpert("file_rights") + # 2222/5921, 89/33 + pkt = NCP(0x5921, "Open/Create File or Subdirectory II with Callback", 'enhanced', has_length=0) + pkt.Request((41, 294), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 1, OpenCreateMode ), + rec( 11, 1, Reserved ), + rec( 12, 2, SearchAttributesLow ), + rec( 14, 2, Reserved2 ), + rec( 16, 2, ReturnInfoMask ), + rec( 18, 2, ExtendedInfo ), + rec( 20, 4, AttributesDef32 ), + rec( 24, 2, DesiredAccessRights ), + rec( 26, 4, DirectoryBase ), + rec( 30, 1, VolumeNumber ), + rec( 31, 1, HandleFlag ), + rec( 32, 1, DataTypeFlag ), + rec( 33, 5, Reserved5 ), + rec( 38, 1, PathCount, var="x" ), + rec( 39, (2,255), Path16, repeat="x", info_str=(Path16, "Open or Create II with Op-Lock: %s", "/%s") ), + ]) + pkt.Reply( NO_LENGTH_CHECK, [ + rec( 8, 4, FileHandle ), + rec( 12, 1, OpenCreateAction ), + rec( 13, 1, OCRetFlags ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ), + srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ), + rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ), + srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ), + rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ), + srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ), + srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ), + srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ), + srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ), + srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ), + srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ), + srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ), + srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ), + srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ), + srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ), + srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ), + srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + pkt.MakeExpert("file_rights") + # 2222/5923, 89/35 + pkt = NCP(0x5923, "Modify DOS Attributes on a File or Subdirectory", 'enhanced', has_length=0) + pkt.Request((35, 288), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Flags ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 4, AttributesDef32 ), + rec( 20, 4, DirectoryBase ), + rec( 24, 1, VolumeNumber ), + rec( 25, 1, HandleFlag ), + rec( 26, 1, DataTypeFlag ), + rec( 27, 5, Reserved5 ), + rec( 32, 1, PathCount, var="x" ), + rec( 33, (2,255), Path16, repeat="x", info_str=(Path16, "Modify DOS Attributes for: %s", "/%s") ), + ]) + pkt.Reply(24, [ + rec( 8, 4, ItemsChecked ), + rec( 12, 4, ItemsChanged ), + rec( 16, 4, AttributeValidFlag ), + rec( 20, 4, AttributesDef32 ), + ]) + pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5927, 89/39 + pkt = NCP(0x5927, "Get Directory Disk Space Restriction", 'enhanced', has_length=0) + pkt.Request((26, 279), [ + rec( 8, 1, NameSpace ), + rec( 9, 2, Reserved2 ), + rec( 11, 4, DirectoryBase ), + rec( 15, 1, VolumeNumber ), + rec( 16, 1, HandleFlag ), + rec( 17, 1, DataTypeFlag ), + rec( 18, 5, Reserved5 ), + rec( 23, 1, PathCount, var="x" ), + rec( 24, (2,255), Path16, repeat="x", info_str=(Path16, "Get Disk Space Restriction for: %s", "/%s") ), + ]) + pkt.Reply(18, [ + rec( 8, 1, NumberOfEntries, var="x" ), + rec( 9, 9, SpaceStruct, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, + 0xff16]) + # 2222/5928, 89/40 + pkt = NCP(0x5928, "Search for File or Subdirectory Set (Extended Errors)", 'enhanced', has_length=0) + pkt.Request((30, 283), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, DataStream ), + rec( 10, 2, SearchAttributesLow ), + rec( 12, 2, ReturnInfoMask ), + rec( 14, 2, ExtendedInfo ), + rec( 16, 2, ReturnInfoCount ), + rec( 18, 9, SeachSequenceStruct ), + rec( 27, 1, DataTypeFlag ), + rec( 28, (2,255), SearchPattern16, info_str=(SearchPattern16, "Search for: %s", ", %s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( 8, 9, SeachSequenceStruct ), + rec( 17, 1, MoreFlag ), + rec( 18, 2, InfoCount ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ), + srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ), + srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ), + srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ), + srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ), + srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ), + srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ), + srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ), + srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ), + srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ), + srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ), + srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ), + srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ), + srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ), + srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ), + srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ), + srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ), + srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ), + srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ), + srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ), + srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ), + srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ), + srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ), + srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ), + srec( FileSize64bitStruct, req_cond="(ncp.ext_info_64_bit_fs == 1) && (ncp.ret_info_mask_fname == 1)" ), + srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5929, 89/41 + pkt = NCP(0x5929, "Get Directory Disk Space Restriction 64 Bit Aware", 'enhanced', has_length=0) + pkt.Request((26, 279), [ + rec( 8, 1, NameSpace ), + rec( 9, 1, Reserved ), + rec( 10, 1, InfoLevelNumber), + rec( 11, 4, DirectoryBase ), + rec( 15, 1, VolumeNumber ), + rec( 16, 1, HandleFlag ), + rec( 17, 1, DataTypeFlag ), + rec( 18, 5, Reserved5 ), + rec( 23, 1, PathCount, var="x" ), + rec( 24, (2,255), Path16, repeat="x", info_str=(Path16, "Get Disk Space Restriction for: %s", "/%s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec( -1, 8, MaxSpace64, req_cond = "(ncp.info_level_num == 0)" ), + rec( -1, 8, MinSpaceLeft64, req_cond = "(ncp.info_level_num == 0)" ), + rec( -1, 1, NumberOfEntries, var="x", req_cond = "(ncp.info_level_num == 1)" ), + srec( DirDiskSpaceRest64bit, repeat="x", req_cond = "(ncp.info_level_num == 1)" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, + 0xff16]) + # 2222/5932, 89/50 + pkt = NCP(0x5932, "Get Object Effective Rights", "enhanced", has_length=0) + pkt.Request(25, [ +rec( 8, 1, NameSpace ), +rec( 9, 4, ObjectID ), + rec( 13, 4, DirectoryBase ), + rec( 17, 1, VolumeNumber ), + rec( 18, 1, HandleFlag ), + rec( 19, 1, DataTypeFlag ), + rec( 20, 5, Reserved5 ), +]) + pkt.Reply( 10, [ + rec( 8, 2, TrusteeRights ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0x9b00, 0x9c03, 0xa901, 0xaa00]) + # 2222/5934, 89/52 + pkt = NCP(0x5934, "Write Extended Attribute", 'enhanced', has_length=0 ) + pkt.Request((36,98), [ + rec( 8, 2, EAFlags ), + rec( 10, 4, EAHandleOrNetWareHandleOrVolume ), + rec( 14, 4, ReservedOrDirectoryNumber ), + rec( 18, 4, TtlWriteDataSize ), + rec( 22, 4, FileOffset ), + rec( 26, 4, EAAccessFlag ), + rec( 30, 1, DataTypeFlag ), + rec( 31, 2, EAValueLength, var='x' ), + rec( 33, (2,64), EAKey, info_str=(EAKey, "Write Extended Attribute: %s", ", %s") ), + rec( -1, 1, EAValueRep, repeat='x' ), + ]) + pkt.Reply(20, [ + rec( 8, 4, EAErrorCodes ), + rec( 12, 4, EABytesWritten ), + rec( 16, 4, NewEAHandle ), + ]) + pkt.CompletionCodes([0x0000, 0xc800, 0xc900, 0xcb00, 0xce00, 0xcf00, 0xd101, + 0xd203, 0xa901, 0xaa00, 0xd301, 0xd402]) + # 2222/5935, 89/53 + pkt = NCP(0x5935, "Read Extended Attribute", 'enhanced', has_length=0 ) + pkt.Request((31,541), [ + rec( 8, 2, EAFlags ), + rec( 10, 4, EAHandleOrNetWareHandleOrVolume ), + rec( 14, 4, ReservedOrDirectoryNumber ), + rec( 18, 4, FileOffset ), + rec( 22, 4, InspectSize ), + rec( 26, 1, DataTypeFlag ), + rec( 27, 2, MaxReadDataReplySize ), + rec( 29, (2,512), EAKey, info_str=(EAKey, "Read Extended Attribute: %s", ", %s") ), + ]) + pkt.Reply((26,536), [ + rec( 8, 4, EAErrorCodes ), + rec( 12, 4, TtlValuesLength ), + rec( 16, 4, NewEAHandle ), + rec( 20, 4, EAAccessFlag ), + rec( 24, (2,512), EAValue ), + ]) + pkt.CompletionCodes([0x0000, 0xa901, 0xaa00, 0xc900, 0xce00, 0xcf00, 0xd101, + 0xd301]) + # 2222/5936, 89/54 + pkt = NCP(0x5936, "Enumerate Extended Attribute", 'enhanced', has_length=0 ) + pkt.Request((27,537), [ + rec( 8, 2, EAFlags ), + rec( 10, 4, EAHandleOrNetWareHandleOrVolume ), + rec( 14, 4, ReservedOrDirectoryNumber ), + rec( 18, 4, InspectSize ), + rec( 22, 2, SequenceNumber ), + rec( 24, 1, DataTypeFlag ), + rec( 25, (2,512), EAKey, info_str=(EAKey, "Enumerate Extended Attribute: %s", ", %s") ), + ]) + pkt.Reply(28, [ + rec( 8, 4, EAErrorCodes ), + rec( 12, 4, TtlEAs ), + rec( 16, 4, TtlEAsDataSize ), + rec( 20, 4, TtlEAsKeySize ), + rec( 24, 4, NewEAHandle ), + ]) + pkt.CompletionCodes([0x0000, 0x8800, 0xa901, 0xaa00, 0xc900, 0xce00, 0xcf00, 0xd101, + 0xd301]) + # 2222/5947, 89/71 + pkt = NCP(0x5947, "Scan Volume Trustee Object Paths", 'enhanced', has_length=0) + pkt.Request(21, [ + rec( 8, 4, VolumeID ), + rec( 12, 4, ObjectID ), + rec( 16, 4, SequenceNumber ), + rec( 20, 1, DataTypeFlag ), + ]) + pkt.Reply((20,273), [ + rec( 8, 4, SequenceNumber ), + rec( 12, 4, ObjectID ), + rec( 16, 1, TrusteeAccessMask ), + rec( 17, 1, PathCount, var="x" ), + rec( 18, (2,255), Path16, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16]) + # 2222/5A01, 90/00 + pkt = NCP(0x5A00, "Parse Tree", 'file') + pkt.Request(46, [ + rec( 10, 4, InfoMask ), + rec( 14, 4, Reserved4 ), + rec( 18, 4, Reserved4 ), + rec( 22, 4, limbCount ), + rec( 26, 4, limbFlags ), + rec( 30, 4, VolumeNumberLong ), + rec( 34, 4, DirectoryBase ), + rec( 38, 4, limbScanNum ), + rec( 42, 4, NameSpace ), + ]) + pkt.Reply(32, [ + rec( 8, 4, limbCount ), + rec( 12, 4, ItemsCount ), + rec( 16, 4, nextLimbScanNum ), + rec( 20, 4, CompletionCode ), + rec( 24, 1, FolderFlag ), + rec( 25, 3, Reserved ), + rec( 28, 4, DirectoryBase ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16]) + # 2222/5A0A, 90/10 + pkt = NCP(0x5A0A, "Get Reference Count from Dir Entry Number", 'file') + pkt.Request(19, [ + rec( 10, 4, VolumeNumberLong ), + rec( 14, 4, DirectoryBase ), + rec( 18, 1, NameSpace ), + ]) + pkt.Reply(12, [ + rec( 8, 4, ReferenceCount ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16]) + # 2222/5A0B, 90/11 + pkt = NCP(0x5A0B, "Get Reference Count from Dir Handle", 'file') + pkt.Request(14, [ + rec( 10, 4, DirHandle ), + ]) + pkt.Reply(12, [ + rec( 8, 4, ReferenceCount ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16]) + # 2222/5A0C, 90/12 + pkt = NCP(0x5A0C, "Set Compressed File Size", 'file') + pkt.Request(20, [ + rec( 10, 6, FileHandle ), + rec( 16, 4, SuggestedFileSize ), + ]) + pkt.Reply(16, [ + rec( 8, 4, OldFileSize ), + rec( 12, 4, NewFileSize ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16]) + # 2222/5A80, 90/128 + pkt = NCP(0x5A80, "Move File Data To Data Migration", 'migration') + pkt.Request(27, [ + rec( 10, 4, VolumeNumberLong ), + rec( 14, 4, DirectoryEntryNumber ), + rec( 18, 1, NameSpace ), + rec( 19, 3, Reserved ), + rec( 22, 4, SupportModuleID ), + rec( 26, 1, DMFlags ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5A81, 90/129 + pkt = NCP(0x5A81, "Data Migration File Information", 'migration') + pkt.Request(19, [ + rec( 10, 4, VolumeNumberLong ), + rec( 14, 4, DirectoryEntryNumber ), + rec( 18, 1, NameSpace ), + ]) + pkt.Reply(24, [ + rec( 8, 4, SupportModuleID ), + rec( 12, 4, RestoreTime ), + rec( 16, 4, DMInfoEntries, var="x" ), + rec( 20, 4, DataSize, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5A82, 90/130 + pkt = NCP(0x5A82, "Volume Data Migration Status", 'migration') + pkt.Request(18, [ + rec( 10, 4, VolumeNumberLong ), + rec( 14, 4, SupportModuleID ), + ]) + pkt.Reply(32, [ + rec( 8, 4, NumOfFilesMigrated ), + rec( 12, 4, TtlMigratedSize ), + rec( 16, 4, SpaceUsed ), + rec( 20, 4, LimboUsed ), + rec( 24, 4, SpaceMigrated ), + rec( 28, 4, FileLimbo ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5A83, 90/131 + pkt = NCP(0x5A83, "Migrator Status Info", 'migration') + pkt.Request(10) + pkt.Reply(20, [ + rec( 8, 1, DMPresentFlag ), + rec( 9, 3, Reserved3 ), + rec( 12, 4, DMmajorVersion ), + rec( 16, 4, DMminorVersion ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5A84, 90/132 + pkt = NCP(0x5A84, "Data Migration Support Module Information", 'migration') + pkt.Request(18, [ + rec( 10, 1, DMInfoLevel ), + rec( 11, 3, Reserved3), + rec( 14, 4, SupportModuleID ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + srec( DMInfoLevel0, req_cond="ncp.dm_info_level == 0x00" ), + srec( DMInfoLevel1, req_cond="ncp.dm_info_level == 0x01" ), + srec( DMInfoLevel2, req_cond="ncp.dm_info_level == 0x02" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5A85, 90/133 + pkt = NCP(0x5A85, "Move File Data From Data Migration", 'migration') + pkt.Request(19, [ + rec( 10, 4, VolumeNumberLong ), + rec( 14, 4, DirectoryEntryNumber ), + rec( 18, 1, NameSpace ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5A86, 90/134 + pkt = NCP(0x5A86, "Get/Set Default Read-Write Support Module ID", 'migration') + pkt.Request(18, [ + rec( 10, 1, GetSetFlag ), + rec( 11, 3, Reserved3 ), + rec( 14, 4, SupportModuleID ), + ]) + pkt.Reply(12, [ + rec( 8, 4, SupportModuleID ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5A87, 90/135 + pkt = NCP(0x5A87, "Data Migration Support Module Capacity Request", 'migration') + pkt.Request(22, [ + rec( 10, 4, SupportModuleID ), + rec( 14, 4, VolumeNumberLong ), + rec( 18, 4, DirectoryBase ), + ]) + pkt.Reply(20, [ + rec( 8, 4, BlockSizeInSectors ), + rec( 12, 4, TotalBlocks ), + rec( 16, 4, UsedBlocks ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5A88, 90/136 + pkt = NCP(0x5A88, "RTDM Request", 'migration') + pkt.Request(15, [ + rec( 10, 4, Verb ), + rec( 14, 1, VerbData ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) +# 2222/5A96, 90/150 + pkt = NCP(0x5A96, "File Migration Request", 'file') + pkt.Request(22, [ + rec( 10, 4, VolumeNumberLong ), + rec( 14, 4, DirectoryBase ), + rec( 18, 4, FileMigrationState ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfb00, 0xff16]) +# 2222/5C, 91 + pkt = NCP(0x5B, "NMAS Graded Authentication", 'nmas') + #Need info on this packet structure + pkt.Request(7) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # SecretStore data is dissected by packet-ncp-sss.c +# 2222/5C01, 9201 + pkt = NCP(0x5C01, "SecretStore Services (Ping Server)", 'sss', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5C02, 9202 + pkt = NCP(0x5C02, "SecretStore Services (Fragment)", 'sss', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5C03, 9203 + pkt = NCP(0x5C03, "SecretStore Services (Write App Secrets)", 'sss', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5C04, 9204 + pkt = NCP(0x5C04, "SecretStore Services (Add Secret ID)", 'sss', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5C05, 9205 + pkt = NCP(0x5C05, "SecretStore Services (Remove Secret ID)", 'sss', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5C06, 9206 + pkt = NCP(0x5C06, "SecretStore Services (Remove SecretStore)", 'sss', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5C07, 9207 + pkt = NCP(0x5C07, "SecretStore Services (Enumerate Secret IDs)", 'sss', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5C08, 9208 + pkt = NCP(0x5C08, "SecretStore Services (Unlock Store)", 'sss', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5C09, 9209 + pkt = NCP(0x5C09, "SecretStore Services (Set Master Password)", 'sss', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) + # 2222/5C0a, 9210 + pkt = NCP(0x5C0a, "SecretStore Services (Get Service Information)", 'sss', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501, + 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b, + 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16]) +# NMAS packets are dissected in packet-ncp-nmas.c + # 2222/5E, 9401 + pkt = NCP(0x5E01, "NMAS Communications Packet (Ping)", 'nmas', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xfb09, 0xff08]) + # 2222/5E, 9402 + pkt = NCP(0x5E02, "NMAS Communications Packet (Fragment)", 'nmas', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xfb09, 0xff08]) + # 2222/5E, 9403 + pkt = NCP(0x5E03, "NMAS Communications Packet (Abort)", 'nmas', 0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xfb09, 0xff08]) + # 2222/61, 97 + pkt = NCP(0x61, "Get Big Packet NCP Max Packet Size", 'connection') + pkt.Request(10, [ + rec( 7, 2, ProposedMaxSize, ENC_BIG_ENDIAN, info_str=(ProposedMaxSize, "Get Big Max Packet Size - %d", ", %d") ), + rec( 9, 1, SecurityFlag ), + ]) + pkt.Reply(13, [ + rec( 8, 2, AcceptedMaxSize, ENC_BIG_ENDIAN ), + rec( 10, 2, EchoSocket, ENC_BIG_ENDIAN ), + rec( 12, 1, SecurityFlag ), + ]) + pkt.CompletionCodes([0x0000]) + # 2222/62, 98 + pkt = NCP(0x62, "Negotiate NDS connection buffer size", 'connection') + pkt.Request(15, [ + rec( 7, 8, ProposedMaxSize64, ENC_BIG_ENDIAN, Info_str=(ProposedMaxSize, "Negotiate NDS connection - %d", ", %d")), + ]) + pkt.Reply(18, [ + rec( 8, 8, AcceptedMaxSize64, ENC_BIG_ENDIAN ), + rec( 16, 2, EchoSocket, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000]) + # 2222/63, 99 + pkt = NCP(0x63, "Undocumented Packet Burst", 'pburst') + pkt.Request(7) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/64, 100 + pkt = NCP(0x64, "Undocumented Packet Burst", 'pburst') + pkt.Request(7) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/65, 101 + pkt = NCP(0x65, "Packet Burst Connection Request", 'pburst') + pkt.Request(25, [ + rec( 7, 4, LocalConnectionID, ENC_BIG_ENDIAN ), + rec( 11, 4, LocalMaxPacketSize, ENC_BIG_ENDIAN ), + rec( 15, 2, LocalTargetSocket, ENC_BIG_ENDIAN ), + rec( 17, 4, LocalMaxSendSize, ENC_BIG_ENDIAN ), + rec( 21, 4, LocalMaxRecvSize, ENC_BIG_ENDIAN ), + ]) + pkt.Reply(16, [ + rec( 8, 4, RemoteTargetID, ENC_BIG_ENDIAN ), + rec( 12, 4, RemoteMaxPacketSize, ENC_BIG_ENDIAN ), + ]) + pkt.CompletionCodes([0x0000]) + # 2222/66, 102 + pkt = NCP(0x66, "Undocumented Packet Burst", 'pburst') + pkt.Request(7) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/67, 103 + pkt = NCP(0x67, "Undocumented Packet Burst", 'pburst') + pkt.Request(7) + pkt.Reply(8) + pkt.CompletionCodes([0x0000]) + # 2222/6801, 104/01 + pkt = NCP(0x6801, "Ping for NDS NCP", "nds", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x8100, 0xfb04, 0xfe0c]) + # 2222/6802, 104/02 + # + # XXX - if FraggerHandle is not 0xffffffff, this is not the + # first fragment, so we can only dissect this by reassembling; + # the fields after "Fragment Handle" are bogus for non-0xffffffff + # fragments, so we shouldn't dissect them. This is all handled in packet-ncp2222.inc. + # + pkt = NCP(0x6802, "Send NDS Fragmented Request/Reply", "nds", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0xac00, 0xfd01]) + # 2222/6803, 104/03 + pkt = NCP(0x6803, "Fragment Close", "nds", has_length=0) + pkt.Request(12, [ + rec( 8, 4, FraggerHandle ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xff00]) + # 2222/6804, 104/04 + pkt = NCP(0x6804, "Return Bindery Context", "nds", has_length=0) + pkt.Request(8) + pkt.Reply((9, 263), [ + rec( 8, (1,255), binderyContext ), + ]) + pkt.CompletionCodes([0x0000, 0xfe0c, 0xff00]) + # 2222/6805, 104/05 + pkt = NCP(0x6805, "Monitor NDS Connection", "nds", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/6806, 104/06 + pkt = NCP(0x6806, "Return NDS Statistics", "nds", has_length=0) + pkt.Request(10, [ + rec( 8, 2, NDSRequestFlags ), + ]) + pkt.Reply(8) + #Need to investigate how to decode Statistics Return Value + pkt.CompletionCodes([0x0000, 0xfb00, 0xfe0c, 0xff00]) + # 2222/6807, 104/07 + pkt = NCP(0x6807, "Clear NDS Statistics", "nds", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xfb00, 0xfe0c, 0xff00]) + # 2222/6808, 104/08 + pkt = NCP(0x6808, "Reload NDS Software", "nds", has_length=0) + pkt.Request(8) + pkt.Reply(12, [ + rec( 8, 4, NDSStatus ), + ]) + pkt.CompletionCodes([0x0000, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68C8, 104/200 + pkt = NCP(0x68C8, "Query Container Audit Status", "auditing", has_length=0) + pkt.Request(12, [ + rec( 8, 4, ConnectionNumber ), + ]) + pkt.Reply(40, [ + rec(8, 32, NWAuditStatus ), + ]) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68CA, 104/202 + pkt = NCP(0x68CA, "Add Auditor Access", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68CB, 104/203 + pkt = NCP(0x68CB, "Change Auditor Container Password", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68CC, 104/204 + pkt = NCP(0x68CC, "Check Auditor Access", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68CE, 104/206 + pkt = NCP(0x680CE, "Disable Container Auditing", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68CF, 104/207 + pkt = NCP(0x68CF, "Enable Container Auditing", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68D1, 104/209 + pkt = NCP(0x68D1, "Read Audit File Header", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68D3, 104/211 + pkt = NCP(0x68D3, "Remove Auditor Access", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68D4, 104/212 + pkt = NCP(0x68D4, "Reset Audit File", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68D6, 104/214 + pkt = NCP(0x68D6, "Write Audit File Configuration Header", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68D7, 104/215 + pkt = NCP(0x68D7, "Change Auditor Container Password2", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68D8, 104/216 + pkt = NCP(0x68D8, "Return Audit Flags", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68D9, 104/217 + pkt = NCP(0x68D9, "Close Old Audit File", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68DB, 104/219 + pkt = NCP(0x68DB, "Check Level Two Access", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68DC, 104/220 + pkt = NCP(0x68DC, "Check Object Audited", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68DD, 104/221 + pkt = NCP(0x68DD, "Change Object Audited", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68DE, 104/222 + pkt = NCP(0x68DE, "Return Old Audit File List", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68DF, 104/223 + pkt = NCP(0x68DF, "Init Audit File Reads", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68E0, 104/224 + pkt = NCP(0x68E0, "Read Auditing File", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68E1, 104/225 + pkt = NCP(0x68E1, "Delete Old Audit File", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68E5, 104/229 + pkt = NCP(0x68E5, "Set Audit Password", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/68E7, 104/231 + pkt = NCP(0x68E7, "External Audit Append To File", "auditing", has_length=0) + pkt.Request(8) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00]) + # 2222/69, 105 + pkt = NCP(0x69, "Log File", 'sync') + pkt.Request( (12, 267), [ + rec( 7, 1, DirHandle ), + rec( 8, 1, LockFlag ), + rec( 9, 2, TimeoutLimit ), + rec( 11, (1, 256), FilePath, info_str=(FilePath, "Log File: %s", "/%s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7f00, 0x8200, 0x9600, 0xfe0d, 0xff01]) + # 2222/6A, 106 + pkt = NCP(0x6A, "Lock File Set", 'sync') + pkt.Request( 9, [ + rec( 7, 2, TimeoutLimit ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7f00, 0x8200, 0x9600, 0xfe0d, 0xff01]) + # 2222/6B, 107 + pkt = NCP(0x6B, "Log Logical Record", 'sync') + pkt.Request( (11, 266), [ + rec( 7, 1, LockFlag ), + rec( 8, 2, TimeoutLimit ), + rec( 10, (1, 256), SynchName, info_str=(SynchName, "Log Logical Record: %s", ", %s") ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7f00, 0x9600, 0xfe0d, 0xff01]) + # 2222/6C, 108 + pkt = NCP(0x6C, "Log Logical Record", 'sync') + pkt.Request( 10, [ + rec( 7, 1, LockFlag ), + rec( 8, 2, TimeoutLimit ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7f00, 0x9600, 0xfe0d, 0xff01]) + # 2222/6D, 109 + pkt = NCP(0x6D, "Log Physical Record", 'sync') + pkt.Request(24, [ + rec( 7, 1, LockFlag ), + rec( 8, 6, FileHandle ), + rec( 14, 4, LockAreasStartOffset ), + rec( 18, 4, LockAreaLen ), + rec( 22, 2, LockTimeout ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7f00, 0x8200, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff01]) + # 2222/6E, 110 + pkt = NCP(0x6E, "Lock Physical Record Set", 'sync') + pkt.Request(10, [ + rec( 7, 1, LockFlag ), + rec( 8, 2, LockTimeout ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7f00, 0x8200, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff01]) + # 2222/6F00, 111/00 + pkt = NCP(0x6F00, "Open/Create a Semaphore", 'sync', has_length=0) + pkt.Request((10,521), [ + rec( 8, 1, InitialSemaphoreValue ), + rec( 9, (1, 512), SemaphoreName, info_str=(SemaphoreName, "Open/Create Semaphore: %s", ", %s") ), + ]) + pkt.Reply(13, [ + rec( 8, 4, SemaphoreHandle ), + rec( 12, 1, SemaphoreOpenCount ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xff01]) + # 2222/6F01, 111/01 + pkt = NCP(0x6F01, "Examine Semaphore", 'sync', has_length=0) + pkt.Request(12, [ + rec( 8, 4, SemaphoreHandle ), + ]) + pkt.Reply(10, [ + rec( 8, 1, SemaphoreValue ), + rec( 9, 1, SemaphoreOpenCount ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xff01]) + # 2222/6F02, 111/02 + pkt = NCP(0x6F02, "Wait On (P) Semaphore", 'sync', has_length=0) + pkt.Request(14, [ + rec( 8, 4, SemaphoreHandle ), + rec( 12, 2, LockTimeout ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xfe04, 0xff01]) + # 2222/6F03, 111/03 + pkt = NCP(0x6F03, "Signal (V) Semaphore", 'sync', has_length=0) + pkt.Request(12, [ + rec( 8, 4, SemaphoreHandle ), + ]) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9600, 0xfe04, 0xff01]) + # 2222/6F04, 111/04 + pkt = NCP(0x6F04, "Close Semaphore", 'sync', has_length=0) + pkt.Request(12, [ + rec( 8, 4, SemaphoreHandle ), + ]) + pkt.Reply(10, [ + rec( 8, 1, SemaphoreOpenCount ), + rec( 9, 1, SemaphoreShareCount ), + ]) + pkt.CompletionCodes([0x0000, 0x9600, 0xfe04, 0xff01]) + ## 2222/1125 + pkt = NCP(0x70, "Clear and Get Waiting Lock Completion", 'sync') + pkt.Request(7) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x9b00, 0x9c03, 0xff1a]) + # 2222/7201, 114/01 + pkt = NCP(0x7201, "Timesync Get Time", 'tsync') + pkt.Request(10) + pkt.Reply(32,[ + rec( 8, 12, theTimeStruct ), + rec(20, 8, eventOffset ), + rec(28, 4, eventTime ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00]) + # 2222/7202, 114/02 + pkt = NCP(0x7202, "Timesync Exchange Time", 'tsync') + pkt.Request((63,112), [ + rec( 10, 4, protocolFlags ), + rec( 14, 4, nodeFlags ), + rec( 18, 8, sourceOriginateTime ), + rec( 26, 8, targetReceiveTime ), + rec( 34, 8, targetTransmitTime ), + rec( 42, 8, sourceReturnTime ), + rec( 50, 8, eventOffset ), + rec( 58, 4, eventTime ), + rec( 62, (1,50), ServerNameLen, info_str=(ServerNameLen, "Timesync Exchange Time: %s", ", %s") ), + ]) + pkt.Reply((64,113), [ + rec( 8, 3, Reserved3 ), + rec( 11, 4, protocolFlags ), + rec( 15, 4, nodeFlags ), + rec( 19, 8, sourceOriginateTime ), + rec( 27, 8, targetReceiveTime ), + rec( 35, 8, targetTransmitTime ), + rec( 43, 8, sourceReturnTime ), + rec( 51, 8, eventOffset ), + rec( 59, 4, eventTime ), + rec( 63, (1,50), ServerNameLen ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00]) + # 2222/7205, 114/05 + pkt = NCP(0x7205, "Timesync Get Server List", 'tsync') + pkt.Request(14, [ + rec( 10, 4, StartNumber ), + ]) + pkt.Reply(66, [ + rec( 8, 4, nameType ), + rec( 12, 48, ServerName ), + rec( 60, 4, serverListFlags ), + rec( 64, 2, startNumberFlag ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00]) + # 2222/7206, 114/06 + pkt = NCP(0x7206, "Timesync Set Server List", 'tsync') + pkt.Request(14, [ + rec( 10, 4, StartNumber ), + ]) + pkt.Reply(66, [ + rec( 8, 4, nameType ), + rec( 12, 48, ServerName ), + rec( 60, 4, serverListFlags ), + rec( 64, 2, startNumberFlag ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00]) + # 2222/720C, 114/12 + pkt = NCP(0x720C, "Timesync Get Version", 'tsync') + pkt.Request(10) + pkt.Reply(12, [ + rec( 8, 4, version ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00]) + # 2222/7B01, 123/01 + pkt = NCP(0x7B01, "Get Cache Information", 'stats') + pkt.Request(10) + pkt.Reply(288, [ + rec(8, 4, CurrentServerTime, ENC_LITTLE_ENDIAN), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 104, Counters ), + rec(120, 40, ExtraCacheCntrs ), + rec(160, 40, MemoryCounters ), + rec(200, 48, TrendCounters ), + rec(248, 40, CacheInfo ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xff00]) + # 2222/7B02, 123/02 + pkt = NCP(0x7B02, "Get File Server Information", 'stats') + pkt.Request(10) + pkt.Reply(150, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, NCPStaInUseCnt ), + rec(20, 4, NCPPeakStaInUse ), + rec(24, 4, NumOfNCPReqs ), + rec(28, 4, ServerUtilization ), + rec(32, 96, ServerInfo ), + rec(128, 22, FileServerCounters ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B03, 123/03 + pkt = NCP(0x7B03, "NetWare File System Information", 'stats') + pkt.Request(11, [ + rec(10, 1, FileSystemID ), + ]) + pkt.Reply(68, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 52, FileSystemInfo ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B04, 123/04 + pkt = NCP(0x7B04, "User Information", 'stats') + pkt.Request(14, [ + rec(10, 4, ConnectionNumber, ENC_LITTLE_ENDIAN ), + ]) + pkt.Reply((85, 132), [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 68, UserInformation ), + rec(84, (1, 48), UserName ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B05, 123/05 + pkt = NCP(0x7B05, "Packet Burst Information", 'stats') + pkt.Request(10) + pkt.Reply(216, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 200, PacketBurstInformation ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B06, 123/06 + pkt = NCP(0x7B06, "IPX SPX Information", 'stats') + pkt.Request(10) + pkt.Reply(94, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 34, IPXInformation ), + rec(50, 44, SPXInformation ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B07, 123/07 + pkt = NCP(0x7B07, "Garbage Collection Information", 'stats') + pkt.Request(10) + pkt.Reply(40, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, FailedAllocReqCnt ), + rec(20, 4, NumberOfAllocs ), + rec(24, 4, NoMoreMemAvlCnt ), + rec(28, 4, NumOfGarbageColl ), + rec(32, 4, FoundSomeMem ), + rec(36, 4, NumOfChecks ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B08, 123/08 + pkt = NCP(0x7B08, "CPU Information", 'stats') + pkt.Request(14, [ + rec(10, 4, CPUNumber ), + ]) + pkt.Reply(51, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, NumberOfCPUs ), + rec(20, 31, CPUInformation ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B09, 123/09 + pkt = NCP(0x7B09, "Volume Switch Information", 'stats') + pkt.Request(14, [ + rec(10, 4, StartNumber ) + ]) + pkt.Reply(28, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, TotalLFSCounters ), + rec(20, 4, CurrentLFSCounters, var="x"), + rec(24, 4, LFSCounters, repeat="x"), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B0A, 123/10 + pkt = NCP(0x7B0A, "Get NLM Loaded List", 'stats') + pkt.Request(14, [ + rec(10, 4, StartNumber ) + ]) + pkt.Reply(28, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, NLMcount ), + rec(20, 4, NLMsInList, var="x" ), + rec(24, 4, NLMNumbers, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B0B, 123/11 + pkt = NCP(0x7B0B, "NLM Information", 'stats') + pkt.Request(14, [ + rec(10, 4, NLMNumber ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 60, NLMInformation ), + # The remainder of this dissection is manually decoded in packet-ncp2222.inc + #rec(-1, (1,255), FileName ), + #rec(-1, (1,255), Name ), + #rec(-1, (1,255), Copyright ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B0C, 123/12 + pkt = NCP(0x7B0C, "Get Directory Cache Information", 'stats') + pkt.Request(10) + pkt.Reply(72, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 56, DirCacheInfo ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B0D, 123/13 + pkt = NCP(0x7B0D, "Get Operating System Version Information", 'stats') + pkt.Request(10) + pkt.Reply(70, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 1, OSMajorVersion ), + rec(17, 1, OSMinorVersion ), + rec(18, 1, OSRevision ), + rec(19, 1, AccountVersion ), + rec(20, 1, VAPVersion ), + rec(21, 1, QueueingVersion ), + rec(22, 1, SecurityRestrictionVersion ), + rec(23, 1, InternetBridgeVersion ), + rec(24, 4, MaxNumOfVol ), + rec(28, 4, MaxNumOfConn ), + rec(32, 4, MaxNumOfUsers ), + rec(36, 4, MaxNumOfNmeSps ), + rec(40, 4, MaxNumOfLANS ), + rec(44, 4, MaxNumOfMedias ), + rec(48, 4, MaxNumOfStacks ), + rec(52, 4, MaxDirDepth ), + rec(56, 4, MaxDataStreams ), + rec(60, 4, MaxNumOfSpoolPr ), + rec(64, 4, ServerSerialNumber ), + rec(68, 2, ServerAppNumber ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B0E, 123/14 + pkt = NCP(0x7B0E, "Get Active Connection List by Type", 'stats') + pkt.Request(15, [ + rec(10, 4, StartConnNumber ), + rec(14, 1, ConnectionType ), + ]) + pkt.Reply(528, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 512, ActiveConnBitList ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfd01, 0xff00]) + # 2222/7B0F, 123/15 + pkt = NCP(0x7B0F, "Get NLM Resource Tag List", 'stats') + pkt.Request(18, [ + rec(10, 4, NLMNumber ), + rec(14, 4, NLMStartNumber ), + ]) + pkt.Reply(37, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, TtlNumOfRTags ), + rec(20, 4, CurNumOfRTags ), + rec(24, 13, RTagStructure ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B10, 123/16 + pkt = NCP(0x7B10, "Enumerate Connection Information from Connection List", 'stats') + pkt.Request(22, [ + rec(10, 1, EnumInfoMask), + rec(11, 3, Reserved3), + rec(14, 4, itemsInList, var="x"), + rec(18, 4, connList, repeat="x"), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, ItemsInPacket ), + srec(netAddr, req_cond="ncp.enum_info_transport==TRUE"), + srec(timeInfo, req_cond="ncp.enum_info_time==TRUE"), + srec(nameInfo, req_cond="ncp.enum_info_name==TRUE"), + srec(lockInfo, req_cond="ncp.enum_info_lock==TRUE"), + srec(printInfo, req_cond="ncp.enum_info_print==TRUE"), + srec(statsInfo, req_cond="ncp.enum_info_stats==TRUE"), + srec(acctngInfo, req_cond="ncp.enum_info_account==TRUE"), + srec(authInfo, req_cond="ncp.enum_info_auth==TRUE"), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B11, 123/17 + pkt = NCP(0x7B11, "Enumerate NCP Service Network Addresses", 'stats') + pkt.Request(14, [ + rec(10, 4, SearchNumber ), + ]) + pkt.Reply(36, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, ServerInfoFlags ), + rec(16, 16, GUID ), + rec(32, 4, NextSearchNum ), + # The following two items are dissected in packet-ncp2222.inc + #rec(36, 4, ItemsInPacket, var="x"), + #rec(40, 20, NCPNetworkAddress, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb01, 0xff00]) + # 2222/7B14, 123/20 + pkt = NCP(0x7B14, "Active LAN Board List", 'stats') + pkt.Request(14, [ + rec(10, 4, StartNumber ), + ]) + pkt.Reply(28, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, MaxNumOfLANS ), + rec(20, 4, ItemsInPacket, var="x"), + rec(24, 4, BoardNumbers, repeat="x"), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B15, 123/21 + pkt = NCP(0x7B15, "LAN Configuration Information", 'stats') + pkt.Request(14, [ + rec(10, 4, BoardNumber ), + ]) + pkt.Reply(152, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16,136, LANConfigInfo ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B16, 123/22 + pkt = NCP(0x7B16, "LAN Common Counters Information", 'stats') + pkt.Request(18, [ + rec(10, 4, BoardNumber ), + rec(14, 4, BlockNumber ), + ]) + pkt.Reply(86, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 1, StatMajorVersion ), + rec(15, 1, StatMinorVersion ), + rec(16, 4, TotalCommonCnts ), + rec(20, 4, TotalCntBlocks ), + rec(24, 4, CustomCounters ), + rec(28, 4, NextCntBlock ), + rec(32, 54, CommonLanStruc ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B17, 123/23 + pkt = NCP(0x7B17, "LAN Custom Counters Information", 'stats') + pkt.Request(18, [ + rec(10, 4, BoardNumber ), + rec(14, 4, StartNumber ), + ]) + pkt.Reply(25, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, NumOfCCinPkt, var="x"), + rec(20, 5, CustomCntsInfo, repeat="x"), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B18, 123/24 + pkt = NCP(0x7B18, "LAN Name Information", 'stats') + pkt.Request(14, [ + rec(10, 4, BoardNumber ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, PROTO_LENGTH_UNKNOWN, DriverBoardName ), + rec(-1, PROTO_LENGTH_UNKNOWN, DriverShortName ), + rec(-1, PROTO_LENGTH_UNKNOWN, DriverLogicalName ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B19, 123/25 + pkt = NCP(0x7B19, "LSL Information", 'stats') + pkt.Request(10) + pkt.Reply(90, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 74, LSLInformation ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B1A, 123/26 + pkt = NCP(0x7B1A, "LSL Logical Board Statistics", 'stats') + pkt.Request(14, [ + rec(10, 4, BoardNumber ), + ]) + pkt.Reply(28, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, LogTtlTxPkts ), + rec(20, 4, LogTtlRxPkts ), + rec(24, 4, UnclaimedPkts ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B1B, 123/27 + pkt = NCP(0x7B1B, "MLID Board Information", 'stats') + pkt.Request(14, [ + rec(10, 4, BoardNumber ), + ]) + pkt.Reply(44, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 1, Reserved ), + rec(15, 1, NumberOfProtocols ), + rec(16, 28, MLIDBoardInfo ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B1E, 123/30 + pkt = NCP(0x7B1E, "Get Media Manager Object Information", 'stats') + pkt.Request(14, [ + rec(10, 4, ObjectNumber ), + ]) + pkt.Reply(212, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 196, GenericInfoDef ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B1F, 123/31 + pkt = NCP(0x7B1F, "Get Media Manager Objects List", 'stats') + pkt.Request(15, [ + rec(10, 4, StartNumber ), + rec(14, 1, MediaObjectType ), + ]) + pkt.Reply(28, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, nextStartingNumber ), + rec(20, 4, ObjectCount, var="x"), + rec(24, 4, ObjectID, repeat="x"), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B20, 123/32 + pkt = NCP(0x7B20, "Get Media Manager Object Childrens List", 'stats') + pkt.Request(22, [ + rec(10, 4, StartNumber ), + rec(14, 1, MediaObjectType ), + rec(15, 3, Reserved3 ), + rec(18, 4, ParentObjectNumber ), + ]) + pkt.Reply(28, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, nextStartingNumber ), + rec(20, 4, ObjectCount, var="x" ), + rec(24, 4, ObjectID, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B21, 123/33 + pkt = NCP(0x7B21, "Get Volume Segment List", 'stats') + pkt.Request(14, [ + rec(10, 4, VolumeNumberLong ), + ]) + pkt.Reply(32, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, NumOfSegments, var="x" ), + rec(20, 12, Segments, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0x9801, 0xfb06, 0xff00]) + # 2222/7B22, 123/34 + pkt = NCP(0x7B22, "Get Volume Information by Level", 'stats') + pkt.Request(15, [ + rec(10, 4, VolumeNumberLong ), + rec(14, 1, InfoLevelNumber ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 1, InfoLevelNumber ), + rec(17, 3, Reserved3 ), + srec(VolInfoStructure, req_cond="ncp.info_level_num==0x01"), + srec(VolInfo2Struct, req_cond="ncp.info_level_num==0x02"), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B23, 123/35 + pkt = NCP(0x7B23, "Get Volume Information by Level 64 Bit Aware", 'stats') + pkt.Request(22, [ + rec(10, 4, InpInfotype ), + rec(14, 4, Inpld ), + rec(18, 4, VolInfoReturnInfoMask), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, VolInfoReturnInfoMask), + srec(VolInfoStructure64, req_cond="ncp.vinfo_info64==0x00000001"), + rec( -1, (1,255), VolumeNameLen, req_cond="ncp.vinfo_volname==0x00000002" ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B28, 123/40 + pkt = NCP(0x7B28, "Active Protocol Stacks", 'stats') + pkt.Request(14, [ + rec(10, 4, StartNumber ), + ]) + pkt.Reply(48, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, MaxNumOfLANS ), + rec(20, 4, StackCount, var="x" ), + rec(24, 4, nextStartingNumber ), + rec(28, 20, StackInfo, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B29, 123/41 + pkt = NCP(0x7B29, "Get Protocol Stack Configuration Information", 'stats') + pkt.Request(14, [ + rec(10, 4, StackNumber ), + ]) + pkt.Reply((37,164), [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 1, ConfigMajorVN ), + rec(17, 1, ConfigMinorVN ), + rec(18, 1, StackMajorVN ), + rec(19, 1, StackMinorVN ), + rec(20, 16, ShortStkName ), + rec(36, (1,128), StackFullNameStr ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B2A, 123/42 + pkt = NCP(0x7B2A, "Get Protocol Stack Statistics Information", 'stats') + pkt.Request(14, [ + rec(10, 4, StackNumber ), + ]) + pkt.Reply(38, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 1, StatMajorVersion ), + rec(17, 1, StatMinorVersion ), + rec(18, 2, ComCnts ), + rec(20, 4, CounterMask ), + rec(24, 4, TotalTxPkts ), + rec(28, 4, TotalRxPkts ), + rec(32, 4, IgnoredRxPkts ), + rec(36, 2, CustomCnts ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B2B, 123/43 + pkt = NCP(0x7B2B, "Get Protocol Stack Custom Information", 'stats') + pkt.Request(18, [ + rec(10, 4, StackNumber ), + rec(14, 4, StartNumber ), + ]) + pkt.Reply(25, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, CustomCount, var="x" ), + rec(20, 5, CustomCntsInfo, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B2C, 123/44 + pkt = NCP(0x7B2C, "Get Protocol Stack Numbers by Media Number", 'stats') + pkt.Request(14, [ + rec(10, 4, MediaNumber ), + ]) + pkt.Reply(24, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, StackCount, var="x" ), + rec(20, 4, StackNumber, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B2D, 123/45 + pkt = NCP(0x7B2D, "Get Protocol Stack Numbers by LAN Board Number", 'stats') + pkt.Request(14, [ + rec(10, 4, BoardNumber ), + ]) + pkt.Reply(24, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, StackCount, var="x" ), + rec(20, 4, StackNumber, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B2E, 123/46 + pkt = NCP(0x7B2E, "Get Media Name by Media Number", 'stats') + pkt.Request(14, [ + rec(10, 4, MediaNumber ), + ]) + pkt.Reply((17,144), [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, (1,128), MediaName ), + ]) + pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B2F, 123/47 + pkt = NCP(0x7B2F, "Get Loaded Media Number", 'stats') + pkt.Request(10) + pkt.Reply(28, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, MaxNumOfMedias ), + rec(20, 4, MediaListCount, var="x" ), + rec(24, 4, MediaList, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B32, 123/50 + pkt = NCP(0x7B32, "Get General Router and SAP Information", 'stats') + pkt.Request(10) + pkt.Reply(37, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 2, RIPSocketNumber ), + rec(18, 2, Reserved2 ), + rec(20, 1, RouterDownFlag ), + rec(21, 3, Reserved3 ), + rec(24, 1, TrackOnFlag ), + rec(25, 3, Reserved3 ), + rec(28, 1, ExtRouterActiveFlag ), + rec(29, 3, Reserved3 ), + rec(32, 2, SAPSocketNumber ), + rec(34, 2, Reserved2 ), + rec(36, 1, RpyNearestSrvFlag ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B33, 123/51 + pkt = NCP(0x7B33, "Get Network Router Information", 'stats') + pkt.Request(14, [ + rec(10, 4, NetworkNumber ), + ]) + pkt.Reply(26, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 10, KnownRoutes ), + ]) + pkt.CompletionCodes([0x0000, 0x0108, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B34, 123/52 + pkt = NCP(0x7B34, "Get Network Routers Information", 'stats') + pkt.Request(18, [ + rec(10, 4, NetworkNumber), + rec(14, 4, StartNumber ), + ]) + pkt.Reply(34, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, NumOfEntries, var="x" ), + rec(20, 14, RoutersInfo, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x0108, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B35, 123/53 + pkt = NCP(0x7B35, "Get Known Networks Information", 'stats') + pkt.Request(14, [ + rec(10, 4, StartNumber ), + ]) + pkt.Reply(30, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, NumOfEntries, var="x" ), + rec(20, 10, KnownRoutes, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B36, 123/54 + pkt = NCP(0x7B36, "Get Server Information", 'stats') + pkt.Request((15,64), [ + rec(10, 2, ServerType ), + rec(12, 2, Reserved2 ), + rec(14, (1,50), ServerNameLen, info_str=(ServerNameLen, "Get Server Information: %s", ", %s") ), + ]) + pkt.Reply(30, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 12, ServerAddress ), + rec(28, 2, HopsToNet ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B37, 123/55 + pkt = NCP(0x7B37, "Get Server Sources Information", 'stats') + pkt.Request((19,68), [ + rec(10, 4, StartNumber ), + rec(14, 2, ServerType ), + rec(16, 2, Reserved2 ), + rec(18, (1,50), ServerNameLen, info_str=(ServerNameLen, "Get Server Sources Info: %s", ", %s") ), + ]) + pkt.Reply(32, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, NumOfEntries, var="x" ), + rec(20, 12, ServersSrcInfo, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x0108, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B38, 123/56 + pkt = NCP(0x7B38, "Get Known Servers Information", 'stats') + pkt.Request(16, [ + rec(10, 4, StartNumber ), + rec(14, 2, ServerType ), + ]) + pkt.Reply(35, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, NumOfEntries, var="x" ), + rec(20, 15, KnownServStruc, repeat="x" ), + ]) + pkt.CompletionCodes([0x0000, 0x0108, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B3C, 123/60 + pkt = NCP(0x7B3C, "Get Server Set Commands Information", 'stats') + pkt.Request(14, [ + rec(10, 4, StartNumber ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, TtlNumOfSetCmds ), + rec(20, 4, nextStartingNumber ), + rec(24, 1, SetCmdType ), + rec(25, 3, Reserved3 ), + rec(28, 1, SetCmdCategory ), + rec(29, 3, Reserved3 ), + rec(32, 1, SetCmdFlags ), + rec(33, 3, Reserved3 ), + rec(36, PROTO_LENGTH_UNKNOWN, SetCmdName ), + rec(-1, 4, SetCmdValueNum ), + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B3D, 123/61 + pkt = NCP(0x7B3D, "Get Server Set Categories", 'stats') + pkt.Request(14, [ + rec(10, 4, StartNumber ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, NumberOfSetCategories ), + rec(20, 4, nextStartingNumber ), + rec(24, PROTO_LENGTH_UNKNOWN, CategoryName ), + ]) + pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00]) + # 2222/7B3E, 123/62 + pkt = NCP(0x7B3E, "Get Server Set Commands Information By Name", 'stats') + pkt.Request(NO_LENGTH_CHECK, [ + rec(10, PROTO_LENGTH_UNKNOWN, SetParmName, info_str=(SetParmName, "Get Server Set Command Info for: %s", ", %s") ), + ]) + pkt.Reply(NO_LENGTH_CHECK, [ + rec(8, 4, CurrentServerTime ), + rec(12, 1, VConsoleVersion ), + rec(13, 1, VConsoleRevision ), + rec(14, 2, Reserved2 ), + rec(16, 4, TtlNumOfSetCmds ), + rec(20, 4, nextStartingNumber ), + rec(24, 1, SetCmdType ), + rec(25, 3, Reserved3 ), + rec(28, 1, SetCmdCategory ), + rec(29, 3, Reserved3 ), + rec(32, 1, SetCmdFlags ), + rec(33, 3, Reserved3 ), + rec(36, PROTO_LENGTH_UNKNOWN, SetCmdName ), + # The value of the set command is decoded in packet-ncp2222.inc + ]) + pkt.ReqCondSizeVariable() + pkt.CompletionCodes([0x0000, 0x7e01, 0xc600, 0xfb06, 0xff22]) + # 2222/7B46, 123/70 + pkt = NCP(0x7B46, "Get Current Compressing File", 'stats') + pkt.Request(14, [ + rec(10, 4, VolumeNumberLong ), + ]) + pkt.Reply(56, [ + rec(8, 4, ParentID ), + rec(12, 4, DirectoryEntryNumber ), + rec(16, 4, compressionStage ), + rec(20, 4, ttlIntermediateBlks ), + rec(24, 4, ttlCompBlks ), + rec(28, 4, curIntermediateBlks ), + rec(32, 4, curCompBlks ), + rec(36, 4, curInitialBlks ), + rec(40, 4, fileFlags ), + rec(44, 4, projectedCompSize ), + rec(48, 4, originalSize ), + rec(52, 4, compressVolume ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0x7901, 0x9801, 0xfb06, 0xff00]) + # 2222/7B47, 123/71 + pkt = NCP(0x7B47, "Get Current DeCompressing File Info List", 'stats') + pkt.Request(14, [ + rec(10, 4, VolumeNumberLong ), + ]) + pkt.Reply(24, [ + #rec(8, 4, FileListCount ), + rec(8, 16, FileInfoStruct ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0x9801, 0xfb06, 0xff00]) + # 2222/7B48, 123/72 + pkt = NCP(0x7B48, "Get Compression and Decompression Time and Counts", 'stats') + pkt.Request(14, [ + rec(10, 4, VolumeNumberLong ), + ]) + pkt.Reply(64, [ + rec(8, 56, CompDeCompStat ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0x9801, 0xfb06, 0xff00]) + # 2222/7BF9, 123/249 + pkt = NCP(0x7BF9, "Set Alert Notification", 'stats') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00]) + # 2222/7BFB, 123/251 + pkt = NCP(0x7BFB, "Get Item Configuration Information", 'stats') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00]) + # 2222/7BFC, 123/252 + pkt = NCP(0x7BFC, "Get Subject Item ID List", 'stats') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00]) + # 2222/7BFD, 123/253 + pkt = NCP(0x7BFD, "Get Subject Item List Count", 'stats') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00]) + # 2222/7BFE, 123/254 + pkt = NCP(0x7BFE, "Get Subject ID List", 'stats') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00]) + # 2222/7BFF, 123/255 + pkt = NCP(0x7BFF, "Get Number of NetMan Subjects", 'stats') + pkt.Request(10) + pkt.Reply(8) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00]) + # 2222/8301, 131/01 + pkt = NCP(0x8301, "RPC Load an NLM", 'remote') + pkt.Request(NO_LENGTH_CHECK, [ + rec(10, 4, NLMLoadOptions ), + rec(14, 16, Reserved16 ), + rec(30, PROTO_LENGTH_UNKNOWN, PathAndName, info_str=(PathAndName, "RPC Load NLM: %s", ", %s") ), + ]) + pkt.Reply(12, [ + rec(8, 4, RPCccode ), + ]) + pkt.CompletionCodes([0x0000, 0x7c00, 0x7e00, 0xfb07, 0xff00]) + # 2222/8302, 131/02 + pkt = NCP(0x8302, "RPC Unload an NLM", 'remote') + pkt.Request(NO_LENGTH_CHECK, [ + rec(10, 20, Reserved20 ), + rec(30, PROTO_LENGTH_UNKNOWN, NLMName, info_str=(NLMName, "RPC Unload NLM: %s", ", %s") ), + ]) + pkt.Reply(12, [ + rec(8, 4, RPCccode ), + ]) + pkt.CompletionCodes([0x0000, 0x7c00, 0x7e00, 0xfb07, 0xff00]) + # 2222/8303, 131/03 + pkt = NCP(0x8303, "RPC Mount Volume", 'remote') + pkt.Request(NO_LENGTH_CHECK, [ + rec(10, 20, Reserved20 ), + rec(30, PROTO_LENGTH_UNKNOWN, VolumeNameStringz, info_str=(VolumeNameStringz, "RPC Mount Volume: %s", ", %s") ), + ]) + pkt.Reply(32, [ + rec(8, 4, RPCccode), + rec(12, 16, Reserved16 ), + rec(28, 4, VolumeNumberLong ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00]) + # 2222/8304, 131/04 + pkt = NCP(0x8304, "RPC Dismount Volume", 'remote') + pkt.Request(NO_LENGTH_CHECK, [ + rec(10, 20, Reserved20 ), + rec(30, PROTO_LENGTH_UNKNOWN, VolumeNameStringz, info_str=(VolumeNameStringz, "RPC Dismount Volume: %s", ", %s") ), + ]) + pkt.Reply(12, [ + rec(8, 4, RPCccode ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00]) + # 2222/8305, 131/05 + pkt = NCP(0x8305, "RPC Add Name Space To Volume", 'remote') + pkt.Request(NO_LENGTH_CHECK, [ + rec(10, 20, Reserved20 ), + rec(30, PROTO_LENGTH_UNKNOWN, AddNameSpaceAndVol, info_str=(AddNameSpaceAndVol, "RPC Add Name Space to Volume: %s", ", %s") ), + ]) + pkt.Reply(12, [ + rec(8, 4, RPCccode ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00]) + # 2222/8306, 131/06 + pkt = NCP(0x8306, "RPC Set Command Value", 'remote') + pkt.Request(NO_LENGTH_CHECK, [ + rec(10, 1, SetCmdType ), + rec(11, 3, Reserved3 ), + rec(14, 4, SetCmdValueNum ), + rec(18, 12, Reserved12 ), + rec(30, PROTO_LENGTH_UNKNOWN, SetCmdName, info_str=(SetCmdName, "RPC Set Command Value: %s", ", %s") ), + # + # XXX - optional string, if SetCmdType is 0 + # + ]) + pkt.Reply(12, [ + rec(8, 4, RPCccode ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00]) + # 2222/8307, 131/07 + pkt = NCP(0x8307, "RPC Execute NCF File", 'remote') + pkt.Request(NO_LENGTH_CHECK, [ + rec(10, 20, Reserved20 ), + rec(30, PROTO_LENGTH_UNKNOWN, PathAndName, info_str=(PathAndName, "RPC Execute NCF File: %s", ", %s") ), + ]) + pkt.Reply(12, [ + rec(8, 4, RPCccode ), + ]) + pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00]) +if __name__ == '__main__': +# import profile +# filename = "ncp.pstats" +# profile.run("main()", filename) +# +# import pstats +# sys.stdout = msg +# p = pstats.Stats(filename) +# +# print "Stats sorted by cumulative time" +# p.strip_dirs().sort_stats('cumulative').print_stats() +# +# print "Function callees" +# p.print_callees() + main() + +# +# Editor modelines - https://www.wireshark.org/tools/modelines.html +# +# Local variables: +# c-basic-offset: 4 +# indent-tabs-mode: nil +# End: +# +# vi: set shiftwidth=4 expandtab: +# :indentSize=4:noTabs=true: +# diff --git a/tools/netscreen2dump.py b/tools/netscreen2dump.py new file mode 100755 index 0000000..7aaac94 --- /dev/null +++ b/tools/netscreen2dump.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python +""" +Converts netscreen snoop hex-dumps to a hex-dump that text2pcap can read. + +Copyright (c) 2004 by Gilbert Ramirez + +SPDX-License-Identifier: GPL-2.0-or-later +""" + +import sys +import re +import os +import stat +import time + + +class OutputFile: + TIMER_MAX = 99999.9 + + def __init__(self, name, base_time): + try: + self.fh = open(name, "w") + except IOError, err: + sys.exit(err) + + self.base_time = base_time + self.prev_timestamp = 0.0 + + def PrintPacket(self, timestamp, datalines): + # What do to with the timestamp? I need more data about what + # the netscreen timestamp is, then I can generate one for the text file. + # print("TS:", timestamp.group("time")) + try: + timestamp = float(timestamp.group("time")) + except ValueError: + sys.exit("Unable to convert '%s' to floating point." % + (timestamp,)) + + # Did we wrap around the timeer max? + if timestamp < self.prev_timestamp: + self.base_time += self.TIMER_MAX + + self.prev_timestamp = timestamp + + packet_timestamp = self.base_time + timestamp + + # Determine the time string to print + gmtime = time.gmtime(packet_timestamp) + subsecs = packet_timestamp - int(packet_timestamp) + assert subsecs <= 0 + subsecs = int(subsecs * 10) + + print >> self.fh, "%s.%d" % (time.strftime("%Y-%m-%d %H:%M:%S", gmtime), \ + subsecs) + + # Print the packet data + offset = 0 + for lineno, hexgroup in datalines: + hexline = hexgroup.group("hex") + hexpairs = hexline.split() + print >> self.fh, "%08x %s" % (offset, hexline) + offset += len(hexpairs) + + # Blank line + print >> self.fh + + +# Find a timestamp line +re_timestamp = re.compile(r"^(?P
FileLineSeverityMessageID
{file}{line}{severity}{message}{id}