summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-10 20:34:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-10 20:34:10 +0000
commite4ba6dbc3f1e76890b22773807ea37fe8fa2b1bc (patch)
tree68cb5ef9081156392f1dd62a00c6ccc1451b93df /tools
parentInitial commit. (diff)
downloadwireshark-e4ba6dbc3f1e76890b22773807ea37fe8fa2b1bc.tar.xz
wireshark-e4ba6dbc3f1e76890b22773807ea37fe8fa2b1bc.zip
Adding upstream version 4.2.2.upstream/4.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--tools/Get-HardenFlags.ps1146
-rw-r--r--tools/SkinnyProtocolOptimized.xml4190
-rwxr-xr-xtools/WiresharkXML.py312
-rwxr-xr-xtools/alpine-setup.sh129
-rwxr-xr-xtools/arch-setup.sh136
-rwxr-xr-xtools/asn2deb179
-rwxr-xr-xtools/asn2wrs.py8242
-rw-r--r--tools/asterix/README.md51
-rw-r--r--tools/asterix/packet-asterix-template.c867
-rwxr-xr-xtools/asterix/update-specs.py829
-rwxr-xr-xtools/bsd-setup.sh202
-rwxr-xr-xtools/checkAPIs.pl1303
-rwxr-xr-xtools/check_dissector.py133
-rwxr-xr-xtools/check_dissector_urls.py291
-rwxr-xr-xtools/check_help_urls.py46
-rwxr-xr-xtools/check_spelling.py493
-rwxr-xr-xtools/check_static.py326
-rwxr-xr-xtools/check_tfs.py595
-rwxr-xr-xtools/check_typed_item_calls.py1775
-rwxr-xr-xtools/check_val_to_str.py230
-rwxr-xr-xtools/checkfiltername.pl790
-rwxr-xr-xtools/checkhf.pl700
-rwxr-xr-xtools/checklicenses.py262
-rw-r--r--tools/colorfilters2js.py85
-rwxr-xr-xtools/commit-msg7
-rwxr-xr-xtools/compress-pngs.py89
-rwxr-xr-xtools/convert-glib-types.py124
-rwxr-xr-xtools/convert_expert_add_info_format.pl417
-rwxr-xr-xtools/convert_proto_tree_add_text.pl759
-rwxr-xr-xtools/cppcheck/cppcheck.sh158
-rw-r--r--tools/cppcheck/includes7
-rw-r--r--tools/cppcheck/suppressions7
-rwxr-xr-xtools/debian-nightly-package.sh24
-rwxr-xr-xtools/debian-setup.sh300
-rw-r--r--tools/debug-alloc.env33
-rwxr-xr-xtools/delete_includes.py427
-rw-r--r--tools/detect_bad_alloc_patterns.py120
-rwxr-xr-xtools/eti2wireshark.py1166
-rwxr-xr-xtools/extract_asn1_from_spec.pl125
-rwxr-xr-xtools/fix-encoding-args.pl698
-rwxr-xr-xtools/fuzz-test.sh317
-rwxr-xr-xtools/gen-bugnote54
-rwxr-xr-xtools/generate-bacnet-vendors.py47
-rwxr-xr-xtools/generate-dissector.py158
-rwxr-xr-xtools/generate-nl80211-fields.py373
-rwxr-xr-xtools/generate-sysdig-event.py412
-rwxr-xr-xtools/generate_authors.py144
-rwxr-xr-xtools/generate_cbor_pcap.py69
-rwxr-xr-xtools/html2text.py249
-rwxr-xr-xtools/idl2deb141
-rwxr-xr-xtools/idl2wrs114
-rwxr-xr-xtools/indexcap.py283
-rwxr-xr-xtools/json2pcap/json2pcap.py686
-rw-r--r--tools/lemon/CMakeLists.txt46
-rw-r--r--tools/lemon/README52
-rwxr-xr-xtools/lemon/apply-patches.sh16
-rw-r--r--tools/lemon/lemon.c5893
-rw-r--r--tools/lemon/lempar.c1068
-rw-r--r--tools/lex.py1074
-rwxr-xr-xtools/licensecheck.pl874
-rwxr-xr-xtools/list_protos_in_cap.sh96
-rwxr-xr-xtools/macos-setup-brew.sh173
-rwxr-xr-xtools/macos-setup.sh3865
-rwxr-xr-xtools/make-authors-csv.py63
-rwxr-xr-xtools/make-enterprises.py196
-rwxr-xr-xtools/make-enums.py102
-rw-r--r--tools/make-isobus.py223
-rwxr-xr-xtools/make-manuf.py401
-rwxr-xr-xtools/make-no-reassembly-profile.py69
-rwxr-xr-xtools/make-packet-dcm.py247
-rwxr-xr-xtools/make-pci-ids.py252
-rwxr-xr-xtools/make-plugin-reg.py197
-rwxr-xr-xtools/make-regs.py157
-rwxr-xr-xtools/make-services.py292
-rwxr-xr-xtools/make-tls-ct-logids.py126
-rwxr-xr-xtools/make-usb.py164
-rwxr-xr-xtools/make-version.py459
-rw-r--r--tools/make_charset_table.c125
-rwxr-xr-xtools/mingw-rpm-setup.sh70
-rwxr-xr-xtools/msnchat315
-rw-r--r--tools/msys2-setup.sh129
-rw-r--r--tools/msys2checkdeps.py177
-rwxr-xr-xtools/ncp2222.py16921
-rwxr-xr-xtools/netscreen2dump.py137
-rwxr-xr-xtools/oss-fuzzshark/build.sh22
-rwxr-xr-xtools/parse_xml2skinny_dissector.py1073
-rw-r--r--tools/pidl/MANIFEST41
-rw-r--r--tools/pidl/META.yml18
-rwxr-xr-xtools/pidl/Makefile.PL17
-rw-r--r--tools/pidl/README64
-rw-r--r--tools/pidl/TODO44
-rw-r--r--tools/pidl/expr.yp202
-rw-r--r--tools/pidl/idl.yp696
-rw-r--r--tools/pidl/lib/Parse/Pidl.pm44
-rw-r--r--tools/pidl/lib/Parse/Pidl/CUtil.pm52
-rw-r--r--tools/pidl/lib/Parse/Pidl/Compat.pm168
-rw-r--r--tools/pidl/lib/Parse/Pidl/Dump.pm294
-rw-r--r--tools/pidl/lib/Parse/Pidl/Expr.pm1444
-rw-r--r--tools/pidl/lib/Parse/Pidl/IDL.pm2664
-rw-r--r--tools/pidl/lib/Parse/Pidl/NDR.pm1472
-rw-r--r--tools/pidl/lib/Parse/Pidl/ODL.pm130
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba3/ClientNDR.pm409
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba3/ServerNDR.pm322
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba4.pm133
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba4/COM/Header.pm160
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba4/COM/Proxy.pm225
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba4/COM/Stub.pm327
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba4/Header.pm537
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba4/NDR/Client.pm884
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba4/NDR/Parser.pm3224
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba4/NDR/Server.pm342
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba4/Python.pm2425
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba4/TDR.pm283
-rw-r--r--tools/pidl/lib/Parse/Pidl/Samba4/Template.pm92
-rw-r--r--tools/pidl/lib/Parse/Pidl/Typelist.pm354
-rw-r--r--tools/pidl/lib/Parse/Pidl/Util.pm197
-rw-r--r--tools/pidl/lib/Parse/Pidl/Wireshark/Conformance.pm509
-rw-r--r--tools/pidl/lib/Parse/Pidl/Wireshark/NDR.pm1401
-rw-r--r--tools/pidl/lib/Parse/Yapp/Driver.pm471
-rw-r--r--tools/pidl/lib/wscript_build37
-rwxr-xr-xtools/pidl/pidl804
-rw-r--r--tools/pidl/tests/Util.pm181
-rwxr-xr-xtools/pidl/tests/cutil.pl21
-rwxr-xr-xtools/pidl/tests/dump.pl15
-rwxr-xr-xtools/pidl/tests/header.pl108
-rwxr-xr-xtools/pidl/tests/ndr.pl561
-rwxr-xr-xtools/pidl/tests/ndr_align.pl143
-rwxr-xr-xtools/pidl/tests/ndr_alloc.pl118
-rwxr-xr-xtools/pidl/tests/ndr_array.pl37
-rwxr-xr-xtools/pidl/tests/ndr_compat.pl21
-rwxr-xr-xtools/pidl/tests/ndr_deprecations.pl26
-rwxr-xr-xtools/pidl/tests/ndr_fullptr.pl44
-rwxr-xr-xtools/pidl/tests/ndr_refptr.pl526
-rwxr-xr-xtools/pidl/tests/ndr_represent.pl71
-rwxr-xr-xtools/pidl/tests/ndr_simple.pl28
-rwxr-xr-xtools/pidl/tests/ndr_string.pl192
-rwxr-xr-xtools/pidl/tests/ndr_tagtype.pl66
-rwxr-xr-xtools/pidl/tests/parse_idl.pl243
-rwxr-xr-xtools/pidl/tests/samba-ndr.pl300
-rwxr-xr-xtools/pidl/tests/samba3-cli.pl236
-rwxr-xr-xtools/pidl/tests/samba3-srv.pl18
-rwxr-xr-xtools/pidl/tests/tdr.pl49
-rwxr-xr-xtools/pidl/tests/test_util.pl21
-rwxr-xr-xtools/pidl/tests/typelist.pl93
-rwxr-xr-xtools/pidl/tests/util.pl115
-rwxr-xr-xtools/pidl/tests/wireshark-conf.pl205
-rwxr-xr-xtools/pidl/tests/wireshark-ndr.pl274
-rw-r--r--tools/pidl/wscript103
-rwxr-xr-xtools/pkt-from-core.py477
-rwxr-xr-xtools/pre-commit135
-rw-r--r--tools/pre-commit-ignore.conf27
-rwxr-xr-xtools/pre-commit-ignore.py59
-rwxr-xr-xtools/process-x11-fields.pl165
-rwxr-xr-xtools/process-x11-xcb.pl1946
-rw-r--r--tools/radiotap-gen/CMakeLists.txt8
-rw-r--r--tools/radiotap-gen/radiotap-gen.c182
-rwxr-xr-xtools/randpkt-test.sh171
-rwxr-xr-xtools/rdps.py142
-rwxr-xr-xtools/release-update-debian-soversions.sh23
-rwxr-xr-xtools/rpm-setup.sh358
-rwxr-xr-xtools/sharkd_shell.py311
-rwxr-xr-xtools/test-captures.sh85
-rwxr-xr-xtools/test-common.sh160
-rwxr-xr-xtools/update-appdata.py99
-rwxr-xr-xtools/update-tools-help.py82
-rwxr-xr-xtools/update-tx72
-rwxr-xr-xtools/valgrind-wireshark.sh123
-rwxr-xr-xtools/validate-clang-check.sh57
-rwxr-xr-xtools/validate-commit.py274
-rwxr-xr-xtools/validate-diameter-xml.sh91
-rw-r--r--tools/vg-suppressions119
-rw-r--r--tools/win-setup.ps1331
-rwxr-xr-xtools/wireshark_be.py260
-rwxr-xr-xtools/wireshark_gen.py2789
-rw-r--r--tools/wireshark_words.txt1857
-rw-r--r--tools/ws-coding-style.cfg370
-rw-r--r--tools/yacc.py3448
177 files changed, 103416 insertions, 0 deletions
diff --git a/tools/Get-HardenFlags.ps1 b/tools/Get-HardenFlags.ps1
new file mode 100644
index 0000000..c078565
--- /dev/null
+++ b/tools/Get-HardenFlags.ps1
@@ -0,0 +1,146 @@
+#
+# Get-HardenFlags - Checks hardening flags on the binaries.
+#
+# Copyright 2015 Graham Bloice <graham.bloice@trihedral.com>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+#requires -version 2
+
+# Get-HardenFlags does:
+# call the dumpbin utility to get the binary header flags
+# on all the binaries in the distribution, and then filters
+# for the NXCOMPAT and DYNAMICBASE flags.
+
+# This script will probably fail for the forseeable future.
+#
+# Many of our third-party libraries are compiled using MinGW-w64. Its version
+# of `ld` doesn't enable the dynamicbase, nxcompat, or high-entropy-va flags
+# by default. When you *do* pass --dynamicbase it strips the relocation
+# section of the executable:
+#
+# https://sourceware.org/bugzilla/show_bug.cgi?id=19011
+#
+# As a result, none of the distributions that produce Windows applications
+# and libraries have any sort of hardening flags enabled:
+#
+# https://mingw-w64.org/doku.php/download
+#
+
+<#
+.SYNOPSIS
+Checks the NXCOMPAT and DYNAMICBASE flags on all the binaries.
+
+.DESCRIPTION
+This script downloads and extracts third-party libraries required to compile
+Wireshark.
+
+.PARAMETER BinaryDir
+Specifies the directory where the binaries may be found.
+
+.INPUTS
+-BinaryDir Directory containing the binaries to be checked.
+
+.OUTPUTS
+Any binary that doesn't have the flags is written to the error stream
+
+.EXAMPLE
+C:\PS> .\tools\Get-HardenFlags.ps1 -BinaryDir run\RelWithDebInfo
+#>
+
+Param(
+ [Parameter(Mandatory=$true, Position=0)]
+ [String]
+ $BinaryDir
+)
+
+# Create a list of 3rd party binaries that are not hardened
+$SoftBins = (
+ "libpixmap.dll",
+ "libwimp.dll",
+ "libgail.dll",
+ "airpcap.dll",
+ "comerr32.dll",
+ "k5sprt32.dll",
+ "krb5_32.dll",
+ "libatk-1.0-0.dll",
+ "libcairo-2.dll",
+ "libffi-6.dll",
+ "libfontconfig-1.dll",
+ "libfreetype-6.dll",
+ "libgcc_s_sjlj-1.dll",
+ "libgcrypt-20.dll",
+ "libgdk-win32-2.0-0.dll",
+ "libgdk_pixbuf-2.0-0.dll",
+ "libgio-2.0-0.dll",
+ "libglib-2.0-0.dll",
+ "libgmodule-2.0-0.dll",
+ "libgmp-10.dll",
+ "libgnutls-28.dll",
+ "libgobject-2.0-0.dll",
+ "libgpg-error-0.dll",
+ "libgtk-win32-2.0-0.dll",
+ "libharfbuzz-0.dll",
+ "libhogweed-2-4.dll",
+ "libintl-8.dll",
+ "libjasper-1.dll",
+ "libjpeg-8.dll",
+ "liblzma-5.dll",
+ "libmaxminddb.dll",
+ "libnettle-4-6.dll",
+ "libp11-kit-0.dll",
+ "libpango-1.0-0.dll",
+ "libpangocairo-1.0-0.dll",
+ "libpangoft2-1.0-0.dll",
+ "libpangowin32-1.0-0.dll",
+ "libpixman-1-0.dll",
+ "libpng15-15.dll",
+ "libtasn1-6.dll",
+ "libtiff-5.dll",
+ "libxml2-2.dll",
+# The x64 ones that are different
+ "comerr64.dll",
+ "k5sprt64.dll",
+ "krb5_64.dll",
+ "libgcc_s_seh-1.dll",
+ "libgpg-error6-0.dll",
+ "libpng16-16.dll",
+# Unfortunately the nsis uninstaller is not hardened.
+ "uninstall.exe"
+)
+
+# CD into the bindir, allows Resolve-Path to work in relative mode.
+Push-Location $BinaryDir
+[Console]::Error.WriteLine("Checking in $BinaryDir for unhardened binaries:")
+
+# Retrieve the list of binaries. -Filter is quicker than -Include, but can only handle one item
+$Binaries = Get-ChildItem -Path $BinaryDir -Recurse -Include *.exe,*.dll
+
+# Number of "soft" binaries found
+$Count = 0;
+
+# Iterate over the list
+$Binaries | ForEach-Object {
+
+ # Get the flags
+ $flags = dumpbin $_ /HEADERS;
+
+ # Check for the required flags
+ $match = $flags | Select-String -Pattern "NX compatible", "Dynamic base"
+ if ($match.Count -ne 2) {
+
+ # Write-Error outputs error records, we simply want the filename
+ [Console]::Error.WriteLine((Resolve-Path $_ -Relative))
+
+ # Don't count files that won't ever be OK
+ if ($SoftBins -notcontains (Split-Path $_ -Leaf)) {
+ $Count++
+ }
+ }
+}
+
+exit $Count
diff --git a/tools/SkinnyProtocolOptimized.xml b/tools/SkinnyProtocolOptimized.xml
new file mode 100644
index 0000000..3eb5f83
--- /dev/null
+++ b/tools/SkinnyProtocolOptimized.xml
@@ -0,0 +1,4190 @@
+<?xml version="1.0"?>
+<messages>
+ <bitfield name="Generic_Bitfield_8">
+ <entries>
+ <entry comment="" name="Generic_Bitfield_Bit1" text="Bit1" value="0x01"/>
+ <entry comment="" name="Generic_Bitfield_Bit2" text="Bit2" value="0x02"/>
+ <entry comment="" name="Generic_Bitfield_Bit3" text="Bit3" value="0x04"/>
+ <entry comment="" name="Generic_Bitfield_Bit4" text="Bit4" value="0x08"/>
+ <entry comment="" name="Generic_Bitfield_Bit5" text="Bit5" value="0x10"/>
+ <entry comment="" name="Generic_Bitfield_Bit6" text="Bit6" value="0x20"/>
+ <entry comment="" name="Generic_Bitfield_Bit7" text="Bit7" value="0x40"/>
+ <entry comment="" name="Generic_Bitfield_Bit8" text="Bit8" value="0x80"/>
+ </entries>
+ </bitfield>
+ <bitfield name="Generic_Bitfield_16">
+ <entries>
+ <entry comment="" name="Generic_Bitfield_Bit1" text="Bit1" value="0x0001"/>
+ <entry comment="" name="Generic_Bitfield_Bit2" text="Bit2" value="0x0002"/>
+ <entry comment="" name="Generic_Bitfield_Bit3" text="Bit3" value="0x0004"/>
+ <entry comment="" name="Generic_Bitfield_Bit4" text="Bit4" value="0x0008"/>
+ <entry comment="" name="Generic_Bitfield_Bit5" text="Bit5" value="0x0010"/>
+ <entry comment="" name="Generic_Bitfield_Bit6" text="Bit6" value="0x0020"/>
+ <entry comment="" name="Generic_Bitfield_Bit7" text="Bit7" value="0x0040"/>
+ <entry comment="" name="Generic_Bitfield_Bit8" text="Bit8" value="0x0080"/>
+ <entry comment="" name="Generic_Bitfield_Bit9" text="Bit9" value="0x0100"/>
+ <entry comment="" name="Generic_Bitfield_Bit10" text="Bit10" value="0x0200"/>
+ <entry comment="" name="Generic_Bitfield_Bit11" text="Bit11" value="0x0400"/>
+ <entry comment="" name="Generic_Bitfield_Bit12" text="Bit12" value="0x0800"/>
+ <entry comment="" name="Generic_Bitfield_Bit13" text="Bit13" value="0x1000"/>
+ <entry comment="" name="Generic_Bitfield_Bit14" text="Bit14" value="0x2000"/>
+ <entry comment="" name="Generic_Bitfield_Bit15" text="Bit15" value="0x4000"/>
+ <entry comment="" name="Generic_Bitfield_Bit16" text="Bit16" value="0x8000"/>
+ </entries>
+ </bitfield>
+ <bitfield name="Generic_Bitfield_32">
+ <entries>
+ <entry comment="" name="Generic_Bitfield_Bit1" text="Bit1" value="0x00000001"/>
+ <entry comment="" name="Generic_Bitfield_Bit2" text="Bit2" value="0x00000002"/>
+ <entry comment="" name="Generic_Bitfield_Bit3" text="Bit3" value="0x00000004"/>
+ <entry comment="" name="Generic_Bitfield_Bit4" text="Bit4" value="0x00000008"/>
+ <entry comment="" name="Generic_Bitfield_Bit5" text="Bit5" value="0x00000010"/>
+ <entry comment="" name="Generic_Bitfield_Bit6" text="Bit6" value="0x00000020"/>
+ <entry comment="" name="Generic_Bitfield_Bit7" text="Bit7" value="0x00000040"/>
+ <entry comment="" name="Generic_Bitfield_Bit8" text="Bit8" value="0x00000080"/>
+ <entry comment="" name="Generic_Bitfield_Bit9" text="Bit9" value="0x00000100"/>
+ <entry comment="" name="Generic_Bitfield_Bit10" text="Bit10" value="0x00000200"/>
+ <entry comment="" name="Generic_Bitfield_Bit11" text="Bit11" value="0x00000400"/>
+ <entry comment="" name="Generic_Bitfield_Bit12" text="Bit12" value="0x00000800"/>
+ <entry comment="" name="Generic_Bitfield_Bit13" text="Bit13" value="0x00001000"/>
+ <entry comment="" name="Generic_Bitfield_Bit14" text="Bit14" value="0x00002000"/>
+ <entry comment="" name="Generic_Bitfield_Bit15" text="Bit15" value="0x00004000"/>
+ <entry comment="" name="Generic_Bitfield_Bit16" text="Bit16" value="0x00008000"/>
+ <entry comment="" name="Generic_Bitfield_Bit17" text="Bit17" value="0x00010000"/>
+ <entry comment="" name="Generic_Bitfield_Bit18" text="Bit18" value="0x00020000"/>
+ <entry comment="" name="Generic_Bitfield_Bit19" text="Bit19" value="0x00040000"/>
+ <entry comment="" name="Generic_Bitfield_Bit20" text="Bit20" value="0x00080000"/>
+ <entry comment="" name="Generic_Bitfield_Bit21" text="Bit21" value="0x00100000"/>
+ <entry comment="" name="Generic_Bitfield_Bit22" text="Bit22" value="0x00200000"/>
+ <entry comment="" name="Generic_Bitfield_Bit23" text="Bit23" value="0x00400000"/>
+ <entry comment="" name="Generic_Bitfield_Bit24" text="Bit24" value="0x00800000"/>
+ <entry comment="" name="Generic_Bitfield_Bit25" text="Bit25" value="0x01000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit26" text="Bit26" value="0x02000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit27" text="Bit27" value="0x04000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit28" text="Bit28" value="0x08000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit29" text="Bit29" value="0x10000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit30" text="Bit30" value="0x20000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit31" text="Bit31" value="0x40000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit32" text="Bit32" value="0x80000000"/>
+ </entries>
+ </bitfield>
+ <enum name="DisplayLabels_36">
+ <entries>
+ <entry name="DisplayLabel_Empty" text="Empty" value="0o000"/>
+ <entry name="DisplayLabel_Acct" text="Acct" value="0o002"/>
+ <entry name="DisplayLabel_Flash" text="Flash" value="0o003"/>
+ <entry name="DisplayLabel_Login" text="Login" value="0o004"/>
+ <entry name="DisplayLabel_Device_In_Home_Location" text="Device In Home Location" value="0o005"/>
+ <entry name="DisplayLabel_Device_In_Roaming_Location" text="Device In Roaming Location" value="0o006"/>
+ <entry name="DisplayLabel_Enter_Authorization_Code" text="Enter Authorization Code" value="0o007"/>
+ <entry name="DisplayLabel_Enter_Client_Matter_Code" text="Enter Client Matter Code" value="0o010"/>
+ <entry name="DisplayLabel_Calls_Available_For_Pickup" text="Calls Available For Pickup" value="0o011"/>
+ <entry name="DisplayLabel_Cm_Fallback_Service_Operating" text="Cm Fallback Service Operating" value="0o012"/>
+ <entry name="DisplayLabel_Max_Phones_Exceeded" text="Max Phones Exceeded" value="0o013"/>
+ <entry name="DisplayLabel_Waiting_To_Rehome" text="Waiting To Rehome" value="0o014"/>
+ <entry name="DisplayLabel_Please_End_Call" text="Please End Call" value="0o015"/>
+ <entry name="DisplayLabel_Paging" text="Paging" value="0o016"/>
+ <entry name="DisplayLabel_Select_Line" text="Select Line" value="0o017"/>
+ <entry name="DisplayLabel_Transfer_Destination_Is_Busy" text="Transfer Destination Is Busy" value="0o020"/>
+ <entry name="DisplayLabel_Select_A_Service" text="Select A Service" value="0o021"/>
+ <entry name="DisplayLabel_Local_Services" text="Local Services" value="0o022"/>
+ <entry name="DisplayLabel_Enter_Search_Criteria" text="Enter Search Criteria" value="0o023"/>
+ <entry name="DisplayLabel_Night_Service" text="Night Service" value="0o024"/>
+ <entry name="DisplayLabel_Night_Service_Active" text="Night Service Active" value="0o025"/>
+ <entry name="DisplayLabel_Night_Service_Disabled" text="Night Service Disabled" value="0o026"/>
+ <entry name="DisplayLabel_Login_Successful" text="Login Successful" value="0o027"/>
+ <entry name="DisplayLabel_Wrong_Pin" text="Wrong Pin" value="0o030"/>
+ <entry name="DisplayLabel_Please_Enter_Pin" text="Please Enter Pin" value="0o031"/>
+ <entry name="DisplayLabel_Of" text="Of" value="0o032"/>
+ <entry name="DisplayLabel_Records_1_To" text="Records 1 To" value="0o033"/>
+ <entry name="DisplayLabel_No_Record_Found" text="No Record Found" value="0o034"/>
+ <entry name="DisplayLabel_Search_Results" text="Search Results" value="0o035"/>
+ <entry name="DisplayLabel_Calls_In_Queue" text="Calls In Queue" value="0o036"/>
+ <entry name="DisplayLabel_Join_To_Hunt_Group" text="Join To Hunt Group" value="0o037"/>
+ <entry name="DisplayLabel_Ready" text="Ready" value="0o040"/>
+ <entry name="DisplayLabel_Notready" text="Notready" value="0o041"/>
+ <entry name="DisplayLabel_Call_On_Hold" text="Call On Hold" value="0o042"/>
+ <entry name="DisplayLabel_Hold_Reversion" text="Hold Reversion" value="0o043"/>
+ <entry name="DisplayLabel_Setup_Failed" text="Setup Failed" value="0o044"/>
+ <entry name="DisplayLabel_No_Resources" text="No Resources" value="0o045"/>
+ <entry name="DisplayLabel_Device_Not_Authorized" text="Device Not Authorized" value="0o046"/>
+ <entry name="DisplayLabel_Monitoring" text="Monitoring" value="0o047"/>
+ <entry name="DisplayLabel_Recording_Awaiting_Call_To_Be_Active" text="Recording Awaiting Call To Be Active" value="0o050"/>
+ <entry name="DisplayLabel_Recording_Already_In_Progress" text="Recording Already In Progress" value="0o051"/>
+ <entry name="DisplayLabel_Inactive_Recording_Session" text="Inactive Recording Session" value="0o052"/>
+ <entry name="DisplayLabel_Mobility" text="Mobility" value="0o053"/>
+ <entry name="DisplayLabel_Whisper" text="Whisper" value="0o054"/>
+ <entry name="DisplayLabel_Forward_All" text="Forward All" value="0o055"/>
+ <entry name="DisplayLabel_Malicious_Call_Id" text="Malicious Call Id" value="0o056"/>
+ <entry name="DisplayLabel_Group_Pickup" text="Group Pickup" value="0o057"/>
+ <entry name="DisplayLabel_Remove_Last_Participant" text="Remove Last Participant" value="0o060"/>
+ <entry name="DisplayLabel_Other_Pickup" text="Other Pickup" value="0o061"/>
+ <entry name="DisplayLabel_Video" text="Video" value="0o062"/>
+ <entry name="DisplayLabel_End_Call" text="End Call" value="0o063"/>
+ <entry name="DisplayLabel_Conference_List" text="Conference List" value="0o064"/>
+ <entry name="DisplayLabel_Quality_Reporting_Tool" text="Quality Reporting Tool" value="0o065"/>
+ <entry name="DisplayLabel_Hunt_Group" text="Hunt Group" value="0o066"/>
+ <entry name="DisplayLabel_Use_Line_Or_Join_To_Complete" text="Use Line Or Join To Complete" value="0o067"/>
+ <entry name="DisplayLabel_Do_Not_Disturb" text="Do Not Disturb" value="0o070"/>
+ <entry name="DisplayLabel_Do_Not_Disturb_Is_Active" text="Do Not Disturb Is Active" value="0o071"/>
+ <entry name="DisplayLabel_Cfwdall_Loop_Detected" text="Cfwdall Loop Detected" value="0o072"/>
+ <entry name="DisplayLabel_Cfwdall_Hops_Exceeded" text="Cfwdall Hops Exceeded" value="0o073"/>
+ <entry name="DisplayLabel_Abbrdial" text="Abbrdial" value="0o074"/>
+ <entry name="DisplayLabel_Pickup_Is_Unavailable" text="Pickup Is Unavailable" value="0o075"/>
+ <entry name="DisplayLabel_Conference_Is_Unavailable" text="Conference Is Unavailable" value="0o076"/>
+ <entry name="DisplayLabel_Meetme_Is_Unavailable" text="Meetme Is Unavailable" value="0o077"/>
+ <entry name="DisplayLabel_Cannot_Retrieve_Parked_Call" text="Cannot Retrieve Parked Call" value="0o0100"/>
+ <entry name="DisplayLabel_Cannot_Send_Call_To_Mobile" text="Cannot Send Call To Mobile" value="0o0101"/>
+ <entry name="DisplayLabel_Record" text="Record" value="0o0103"/>
+ <entry name="DisplayLabel_Cannot_Move_Conversation" text="Cannot Move Conversation" value="0o0104"/>
+ <entry name="DisplayLabel_Cw_Off" text="Cw Off" value="0o0105"/>
+ <entry name="DisplayLabel_Coaching" text="Coaching" value="0o0106"/>
+ <entry name="DisplayLabel_Recording" text="Recording" value="0o0117"/>
+ <entry name="DisplayLabel_Recording_Failed" text="Recording Failed" value="0o0120"/>
+ <entry name="DisplayLabel_Connecting" text="Connecting" value="0o0121"/>
+ </entries>
+ </enum>
+ <enum name="DisplayLabels_200">
+ <entries>
+ <entry name="DisplayLabel_Redial" text="Redial" value="0o01"/>
+ <entry name="DisplayLabel_Newcall" text="Newcall" value="0o02"/>
+ <entry name="DisplayLabel_Hold" text="Hold" value="0o03"/>
+ <entry name="DisplayLabel_Transfer" text="Transfer" value="0o04"/>
+ <entry name="DisplayLabel_Cfwdall" text="Cfwdall" value="0o05"/>
+ <entry name="DisplayLabel_Cfwdbusy" text="Cfwdbusy" value="0o06"/>
+ <entry name="DisplayLabel_Cfwdnoanswer" text="Cfwdnoanswer" value="0o07"/>
+ <entry name="DisplayLabel_Backspace" text="Backspace" value="0o010"/>
+ <entry name="DisplayLabel_Endcall" text="Endcall" value="0o011"/>
+ <entry name="DisplayLabel_Resume" text="Resume" value="0o012"/>
+ <entry name="DisplayLabel_Answer" text="Answer" value="0o013"/>
+ <entry name="DisplayLabel_Info" text="Info" value="0o014"/>
+ <entry name="DisplayLabel_Confrn" text="Confrn" value="0o015"/>
+ <entry name="DisplayLabel_Park" text="Park" value="0o016"/>
+ <entry name="DisplayLabel_Join" text="Join" value="0o017"/>
+ <entry name="DisplayLabel_Meetme" text="Meetme" value="0o020"/>
+ <entry name="DisplayLabel_Pickup" text="Pickup" value="0o021"/>
+ <entry name="DisplayLabel_Gpickup" text="Gpickup" value="0o022"/>
+ <entry name="DisplayLabel_Your_Current_Options" text="Your Current Options" value="0o023"/>
+ <entry name="DisplayLabel_Off_Hook" text="Off Hook" value="0o024"/>
+ <entry name="DisplayLabel_On_Hook" text="On Hook" value="0o025"/>
+ <entry name="DisplayLabel_Ring_Out" text="Ring Out" value="0o026"/>
+ <entry name="DisplayLabel_From" text="From " value="0o027"/>
+ <entry name="DisplayLabel_Connected" text="Connected" value="0o030"/>
+ <entry name="DisplayLabel_Busy" text="Busy" value="0o031"/>
+ <entry name="DisplayLabel_Line_In_Use" text="Line In Use" value="0o032"/>
+ <entry name="DisplayLabel_Call_Waiting" text="Call Waiting" value="0o033"/>
+ <entry name="DisplayLabel_Call_Transfer" text="Call Transfer" value="0o034"/>
+ <entry name="DisplayLabel_Call_Park" text="Call Park" value="0o035"/>
+ <entry name="DisplayLabel_Call_Proceed" text="Call Proceed" value="0o036"/>
+ <entry name="DisplayLabel_In_Use_Remote" text="In Use Remote" value="0o037"/>
+ <entry name="DisplayLabel_Enter_Number" text="Enter Number" value="0o040"/>
+ <entry name="DisplayLabel_Call_Park_At" text="Call Park At" value="0o041"/>
+ <entry name="DisplayLabel_Primary_Only" text="Primary Only" value="0o042"/>
+ <entry name="DisplayLabel_Temp_Fail" text="Temp Fail" value="0o043"/>
+ <entry name="DisplayLabel_You_Have_Voicemail" text="You Have Voicemail" value="0o044"/>
+ <entry name="DisplayLabel_Forwarded_To" text="Forwarded To" value="0o045"/>
+ <entry name="DisplayLabel_Can_Not_Complete_Conference" text="Can Not Complete Conference" value="0o046"/>
+ <entry name="DisplayLabel_No_Conference_Bridge" text="No Conference Bridge" value="0o047"/>
+ <entry name="DisplayLabel_Can_Not_Hold_Primary_Control" text="Can Not Hold Primary Control" value="0o050"/>
+ <entry name="DisplayLabel_Invalid_Conference_Participant" text="Invalid Conference Participant" value="0o051"/>
+ <entry name="DisplayLabel_In_Conference_Already" text="In Conference Already" value="0o052"/>
+ <entry name="DisplayLabel_No_Participant_Info" text="No Participant Info" value="0o053"/>
+ <entry name="DisplayLabel_Exceed_Maximum_Parties" text="Exceed Maximum Parties" value="0o054"/>
+ <entry name="DisplayLabel_Key_Is_Not_Active" text="Key Is Not Active" value="0o055"/>
+ <entry name="DisplayLabel_Error_No_License" text="Error No License" value="0o056"/>
+ <entry name="DisplayLabel_Error_Dbconfig" text="Error Dbconfig" value="0o057"/>
+ <entry name="DisplayLabel_Error_Database" text="Error Database" value="0o060"/>
+ <entry name="DisplayLabel_Error_Pass_Limit" text="Error Pass Limit" value="0o061"/>
+ <entry name="DisplayLabel_Error_Unknown" text="Error Unknown" value="0o062"/>
+ <entry name="DisplayLabel_Error_Mismatch" text="Error Mismatch" value="0o063"/>
+ <entry name="DisplayLabel_Conference" text="Conference" value="0o064"/>
+ <entry name="DisplayLabel_Park_Number" text="Park Number" value="0o065"/>
+ <entry name="DisplayLabel_Private" text="Private" value="0o066"/>
+ <entry name="DisplayLabel_Not_Enough_Bandwidth" text="Not Enough Bandwidth" value="0o067"/>
+ <entry name="DisplayLabel_Unknown_Number" text="Unknown Number" value="0o070"/>
+ <entry name="DisplayLabel_Rmlstc" text="Rmlstc" value="0o071"/>
+ <entry name="DisplayLabel_Voicemail" text="Voicemail" value="0o072"/>
+ <entry name="DisplayLabel_Immdiv" text="Immdiv" value="0o073"/>
+ <entry name="DisplayLabel_Intrcpt" text="Intrcpt" value="0o074"/>
+ <entry name="DisplayLabel_Setwtch" text="Setwtch" value="0o075"/>
+ <entry name="DisplayLabel_Trnsfvm" text="Trnsfvm" value="0o076"/>
+ <entry name="DisplayLabel_Dnd" text="Dnd" value="0o077"/>
+ <entry name="DisplayLabel_Divall" text="Divall" value="0o0100"/>
+ <entry name="DisplayLabel_Callback" text="Callback" value="0o0101"/>
+ <entry name="DisplayLabel_Network_Congestion_Rerouting" text="Network Congestion Rerouting" value="0o0102"/>
+ <entry name="DisplayLabel_Barge" text="Barge" value="0o0103"/>
+ <entry name="DisplayLabel_Failed_To_Setup_Barge" text="Failed To Setup Barge" value="0o0104"/>
+ <entry name="DisplayLabel_Another_Barge_Exists" text="Another Barge Exists" value="0o0105"/>
+ <entry name="DisplayLabel_Incompatible_Device_Type" text="Incompatible Device Type" value="0o0106"/>
+ <entry name="DisplayLabel_No_Park_Number_Available" text="No Park Number Available" value="0o0107"/>
+ <entry name="DisplayLabel_Callpark_Reversion" text="Callpark Reversion" value="0o0110"/>
+ <entry name="DisplayLabel_Service_Is_Not_Active" text="Service Is Not Active" value="0o0111"/>
+ <entry name="DisplayLabel_High_Traffic_Try_Again_Later" text="High Traffic Try Again Later" value="0o0112"/>
+ <entry name="DisplayLabel_Qrt" text="Qrt" value="0o0113"/>
+ <entry name="DisplayLabel_Mcid" text="Mcid" value="0o0114"/>
+ <entry name="DisplayLabel_Dirtrfr" text="Dirtrfr" value="0o0115"/>
+ <entry name="DisplayLabel_Select" text="Select" value="0o0116"/>
+ <entry name="DisplayLabel_Conflist" text="Conflist" value="0o0117"/>
+ <entry name="DisplayLabel_Idivert" text="Idivert" value="0o0120"/>
+ <entry name="DisplayLabel_Cbarge" text="Cbarge" value="0o0121"/>
+ <entry name="DisplayLabel_Can_Not_Complete_Transfer" text="Can Not Complete Transfer" value="0o0122"/>
+ <entry name="DisplayLabel_Can_Not_Join_Calls" text="Can Not Join Calls" value="0o0123"/>
+ <entry name="DisplayLabel_Mcid_Successful" text="Mcid Successful" value="0o0124"/>
+ <entry name="DisplayLabel_Number_Not_Configured" text="Number Not Configured" value="0o0125"/>
+ <entry name="DisplayLabel_Security_Error" text="Security Error" value="0o0126"/>
+ <entry name="DisplayLabel_Video_Bandwidth_Unavailable" text="Video Bandwidth Unavailable" value="0o0127"/>
+ <entry name="DisplayLabel_Vidmode" text="Vidmode" value="0o0130"/>
+ <entry name="DisplayLabel_Max_Call_Duration_Timeout" text="Max Call Duration Timeout" value="0o0131"/>
+ <entry name="DisplayLabel_Max_Hold_Duration_Timeout" text="Max Hold Duration Timeout" value="0o0132"/>
+ <entry name="DisplayLabel_Opickup" text="Opickup" value="0o0133"/>
+ <entry name="DisplayLabel_Hlog" text="Hlog" value="0o0134"/>
+ <entry name="DisplayLabel_Logged_Out_Of_Hunt_Group" text="Logged Out Of Hunt Group" value="0o0135"/>
+ <entry name="DisplayLabel_Park_Slot_Unavailable" text="Park Slot Unavailable" value="0o0136"/>
+ <entry name="DisplayLabel_No_Call_Available_For_Pickup" text="No Call Available For Pickup" value="0o0137"/>
+ <entry name="DisplayLabel_External_Transfer_Restricted" text="External Transfer Restricted" value="0o0141"/>
+ <entry name="DisplayLabel_No_Line_Available_For_Pickup" text="No Line Available For Pickup" value="0o0142"/>
+ <entry name="DisplayLabel_Path_Replacement_In_Progress" text="Path Replacement In Progress" value="0o0143"/>
+ <entry name="DisplayLabel_Unknown_2" text="Unknown 2" value="0o0144"/>
+ <entry name="DisplayLabel_Mac_Address" text="Mac Address" value="0o0145"/>
+ <entry name="DisplayLabel_Host_Name" text="Host Name" value="0o0146"/>
+ <entry name="DisplayLabel_Domain_Name" text="Domain Name" value="0o0147"/>
+ <entry name="DisplayLabel_Ip_Address" text="Ip Address" value="0o0150"/>
+ <entry name="DisplayLabel_Subnet_Mask" text="Subnet Mask" value="0o0151"/>
+ <entry name="DisplayLabel_Tftp_Server_1" text="Tftp Server 1" value="0o0152"/>
+ <entry name="DisplayLabel_Default_Router_1" text="Default Router 1" value="0o0153"/>
+ <entry name="DisplayLabel_Default_Router_2" text="Default Router 2" value="0o0154"/>
+ <entry name="DisplayLabel_Default_Router_3" text="Default Router 3" value="0o0155"/>
+ <entry name="DisplayLabel_Default_Router_4" text="Default Router 4" value="0o0156"/>
+ <entry name="DisplayLabel_Default_Router_5" text="Default Router 5" value="0o0157"/>
+ <entry name="DisplayLabel_Dns_Server_1" text="Dns Server 1" value="0o0160"/>
+ <entry name="DisplayLabel_Dns_Server_2" text="Dns Server 2" value="0o0161"/>
+ <entry name="DisplayLabel_Dns_Server_3" text="Dns Server 3" value="0o0162"/>
+ <entry name="DisplayLabel_Dns_Server_4" text="Dns Server 4" value="0o0163"/>
+ <entry name="DisplayLabel_Dns_Server_5" text="Dns Server 5" value="0o0164"/>
+ <entry name="DisplayLabel_Operational_Vlan_Id" text="Operational Vlan Id" value="0o0165"/>
+ <entry name="DisplayLabel_Admin_Vlan_Id" text="Admin Vlan Id" value="0o0166"/>
+ <entry name="DisplayLabel_Call_Manager_1" text="Call Manager 1" value="0o0167"/>
+ <entry name="DisplayLabel_Call_Manager_2" text="Call Manager 2" value="0o0170"/>
+ <entry name="DisplayLabel_Call_Manager_3" text="Call Manager 3" value="0o0171"/>
+ <entry name="DisplayLabel_Call_Manager_4" text="Call Manager 4" value="0o0172"/>
+ <entry name="DisplayLabel_Call_Manager_5" text="Call Manager 5" value="0o0173"/>
+ <entry name="DisplayLabel_Information_Url" text="Information Url" value="0o0174"/>
+ <entry name="DisplayLabel_Directories_Url" text="Directories Url" value="0o0175"/>
+ <entry name="DisplayLabel_Messages_Url" text="Messages Url" value="0o0176"/>
+ <entry name="DisplayLabel_Services_Url" text="Services Url" value="0o0177"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="KeepAliveReqMessage" opcode="0x0000" type="RegistrationAndManagement"/>
+ <enum name="DeviceType">
+ <entries>
+ <entry comment="" name="DeviceType_Station30SPplus" text="Station30SPplus" value="0x00001"/>
+ <entry comment="" name="DeviceType_Station12SPplus" text="Station12SPplus" value="0x00002"/>
+ <entry comment="" name="DeviceType_Station12SP" text="Station12SP" value="0x00003"/>
+ <entry comment="" name="DeviceType_Station12" text="Station12" value="0x00004"/>
+ <entry comment="" name="DeviceType_Station30VIP" text="Station30VIP" value="0x00005"/>
+ <entry comment="" name="DeviceType_StationTelecasterMgr" text="StationTelecasterMgr" value="0x00007"/>
+ <entry comment="" name="DeviceType_StationVGC" text="StationVGC" value="0x0000a"/>
+ <entry comment="" name="DeviceType_VGCVirtualPhone" text="VGCVirtualPhone" value="0x0000b"/>
+ <entry comment="" name="DeviceType_StationATA186" text="StationATA186" value="0x0000c"/>
+ <entry comment="" name="DeviceType_StationATA188" text="StationATA188" value="0x0000d"/>
+ <entry comment="" name="DeviceType_EmccBase" text="EmccBase" value="0x0000f"/>
+ <entry comment="" name="DeviceType_Virtual30SPplus" text="Virtual30SPplus" value="0x00014"/>
+ <entry comment="" name="DeviceType_StationPhoneApplication" text="StationPhoneApplication" value="0x00015"/>
+ <entry comment="" name="DeviceType_AnalogAccess" text="AnalogAccess" value="0x0001e"/>
+ <entry comment="" name="DeviceType_DigitalAccessTitan1" text="DigitalAccessTitan1" value="0x00028"/>
+ <entry comment="Digital Access T1" name="DeviceType_DigitalAccessT1" text="Digital Access T1" value="0x00029"/>
+ <entry comment="" name="DeviceType_DigitalAccessTitan2" text="DigitalAccessTitan2" value="0x0002a"/>
+ <entry comment="" name="DeviceType_DigitalAccessLennon" text="DigitalAccessLennon" value="0x0002b"/>
+ <entry comment="" name="DeviceType_AnalogAccessElvis" text="AnalogAccessElvis" value="0x0002f"/>
+ <entry comment="" name="DeviceType_VGCGateway" text="VGCGateway" value="0x00030"/>
+ <entry comment="" name="DeviceType_ConferenceBridge" text="ConferenceBridge" value="0x00032"/>
+ <entry comment="" name="DeviceType_ConferenceBridgeYoko" text="ConferenceBridgeYoko" value="0x00033"/>
+ <entry comment="" name="DeviceType_ConferenceBridgeDixieLand" text="ConferenceBridgeDixieLand" value="0x00034"/>
+ <entry comment="" name="DeviceType_ConferenceBridgeSummit" text="ConferenceBridgeSummit" value="0x00035"/>
+ <entry comment="" name="DeviceType_H225" text="H225" value="0x0003c"/>
+ <entry comment="" name="DeviceType_H323Phone" text="H323Phone" value="0x0003d"/>
+ <entry comment="" name="DeviceType_H323Gateway" text="H323Gateway" value="0x0003e"/>
+ <entry comment="" name="DeviceType_MusicOnHold" text="MusicOnHold" value="0x00046"/>
+ <entry comment="" name="DeviceType_Pilot" text="Pilot" value="0x00047"/>
+ <entry comment="" name="DeviceType_TapiPort" text="TapiPort" value="0x00048"/>
+ <entry comment="" name="DeviceType_TapiRoutePoint" text="TapiRoutePoint" value="0x00049"/>
+ <entry comment="" name="DeviceType_VoiceInBox" text="VoiceInBox" value="0x00050"/>
+ <entry comment="" name="DeviceType_VoiceInboxAdmin" text="VoiceInboxAdmin" value="0x00051"/>
+ <entry comment="" name="DeviceType_LineAnnunciator" text="LineAnnunciator" value="0x00052"/>
+ <entry comment="" name="DeviceType_SoftwareMtpDixieLand" text="SoftwareMtpDixieLand" value="0x00053"/>
+ <entry comment="" name="DeviceType_CiscoMediaServer" text="CiscoMediaServer" value="0x00054"/>
+ <entry comment="" name="DeviceType_ConferenceBridgeFlint" text="ConferenceBridgeFlint" value="0x00055"/>
+ <entry comment="" name="DeviceType_ConferenceBridgeHetroGen" text="ConferenceBridgeHetroGen" value="0x00056"/>
+ <entry comment="" name="DeviceType_ConferenceBridgeAudVid" text="ConferenceBridgeAudVid" value="0x00057"/>
+ <entry comment="" name="DeviceType_ConferenceHVideoBridge" text="ConferenceHVideoBridge" value="0x00058"/>
+ <entry comment="" name="DeviceType_RouteList" text="RouteList" value="0x0005a"/>
+ <entry comment="" name="DeviceType_LoadSimulator" text="LoadSimulator" value="0x00064"/>
+ <entry comment="" name="DeviceType_MediaTerminationPoint" text="MediaTerminationPoint" value="0x0006e"/>
+ <entry comment="" name="DeviceType_MediaTerminationPointYoko" text="MediaTerminationPointYoko" value="0x0006f"/>
+ <entry comment="" name="DeviceType_MediaTerminationPointDixieLand" text="MediaTerminationPointDixieLand" value="0x00070"/>
+ <entry comment="" name="DeviceType_MediaTerminationPointSummit" text="MediaTerminationPointSummit" value="0x00071"/>
+ <entry comment="" name="DeviceType_Cisco_7941G" text="7941G" value="0x00073"/>
+ <entry comment="" name="DeviceType_Cisco_7971" text="7971" value="0x00077"/>
+ <entry comment="" name="DeviceType_MGCPStation" text="MGCPStation" value="0x00078"/>
+ <entry comment="" name="DeviceType_MGCPTrunk" text="MGCPTrunk" value="0x00079"/>
+ <entry comment="" name="DeviceType_RASProxy" text="RASProxy" value="0x0007a"/>
+ <entry comment="" name="DeviceType_Trunk" text="Trunk" value="0x0007d"/>
+ <entry comment="" name="DeviceType_Annunciator" text="Annunciator" value="0x0007e"/>
+ <entry comment="" name="DeviceType_MonitorBridge" text="MonitorBridge" value="0x0007f"/>
+ <entry comment="" name="DeviceType_Recorder" text="Recorder" value="0x00080"/>
+ <entry comment="" name="DeviceType_MonitorBridgeYoko" text="MonitorBridgeYoko" value="0x00081"/>
+ <entry comment="" name="DeviceType_SipTrunk" text="SipTrunk" value="0x00083"/>
+ <entry comment="" name="DeviceType_SipGateway" text="SipGateway" value="0x00084"/>
+ <entry comment="" name="DeviceType_WsmTrunk" text="WsmTrunk" value="0x00085"/>
+ <entry comment="" name="DeviceType_RemoteDestination" text="RemoteDestination" value="0x00086"/>
+ <entry comment="" name="DeviceType_GenericDevice" text="GenericDevice" value="0x000fd"/>
+ <entry comment="" name="DeviceType_UnknownMGCPGateway" text="UnknownMGCPGateway" value="0x000fe"/>
+ <entry comment="" name="DeviceType_NotDefined" text="NotDefined" value="0x000ff"/>
+ <entry comment="" name="DeviceType_Cisco_7911" text="7911" value="0x00133"/>
+ <entry comment="" name="DeviceType_MotorolaCN622" text="MotorolaCN622" value="0x0014f"/>
+ <entry comment="" name="DeviceType_ThirdPartySipBasic" text="3rdPartySipBasic" value="0x00150"/>
+ <entry comment="" name="DeviceType_UnifiedCommunicator" text="UnifiedCommunicator" value="0x00166"/>
+ <entry comment="" name="DeviceType_Cisco_7921" text="7921" value="0x0016d"/>
+ <entry comment="" name="DeviceType_Cisco_7906" text="7906" value="0x00171"/>
+ <entry comment="" name="DeviceType_ThirdPartySipAdv" text="3rdPartySipAdv" value="0x00176"/>
+ <entry comment="" name="DeviceType_Telepresence" text="Telepresence" value="0x00177"/>
+ <entry comment="" name="DeviceType_Cisco_7962" text="7962" value="0x00194"/>
+ <entry comment="" name="DeviceType_Cisco_3951" text="3951" value="0x0019c"/>
+ <entry comment="" name="DeviceType_Cisco_7937" text="7937" value="0x001af"/>
+ <entry comment="" name="DeviceType_Cisco_7942" text="7942" value="0x001b2"/>
+ <entry comment="" name="DeviceType_Cisco_7945" text="7945" value="0x001b3"/>
+ <entry comment="" name="DeviceType_Cisco_7965" text="7965" value="0x001b4"/>
+ <entry comment="" name="DeviceType_Cisco_7975" text="7975" value="0x001b5"/>
+ <entry comment="" name="DeviceType_Cisco_9971_CE" text="9971_CE" value="0x001ed"/>
+ <entry comment="" name="DeviceType_UnifiedMobileCommunicator" text="UnifiedMobileCommunicator" value="0x001d4"/>
+ <entry comment="" name="DeviceType_CSF" text="CSF" value="0x001f7"/>
+ <entry comment="" name="DeviceType_CiscoTelepresenceMcu" text="CiscoTelepresenceMcu" value="0x00255"/>
+ <entry comment="" name="DeviceType_CiscoTelePresenceConductor" text="CiscoTelePresenceConductor" value="0x08cc9"/>
+ <entry comment="" name="DeviceType_CiscoTelePresenceExchange" text="CiscoTelePresenceExchange" value="0x00257"/>
+ <entry comment="" name="DeviceType_CiscoTelePresenceSoftwareConferenceBridge" text="CiscoTelePresenceSoftwareConferenceBridge" value="0x00258"/>
+ <entry comment="" name="DeviceType_ASSip" text="ASSip" value="0x00277"/>
+ <entry comment="" name="DeviceType_CtiRemoteDevice" text="CtiRemoteDevice" value="0x0027b"/>
+ <entry comment="" name="DeviceType_Cisco_7905" text="7905" value="0x04e20"/>
+ <entry comment="" name="DeviceType_Cisco_7920" text="7920" value="0x07532"/>
+ <entry comment="" name="DeviceType_Cisco_7970" text="7970" value="0x07536"/>
+ <entry comment="" name="DeviceType_Cisco_7912" text="7912" value="0x07537"/>
+ <entry comment="" name="DeviceType_Cisco_7902" text="7902" value="0x07538"/>
+ <entry comment="" name="DeviceType_Cisco_7961G" text="7961G" value="0x07542"/>
+ <entry comment="" name="DeviceType_Cisco_7936" text="7936" value="0x07543"/>
+ <entry comment="" name="DeviceType_AnalogPhone" text="AnalogPhone" value="0x0754b"/>
+ <entry comment="" name="DeviceType_ISDNBRIPhone" text="ISDNBRIPhone" value="0x0754c"/>
+ <entry comment="" name="DeviceType_SCCPGwVirtualPhone" text="SCCPGwVirtualPhone" value="0x07550"/>
+ <entry comment="" name="DeviceType_IP_STE" text="IP_STE" value="0x07553"/>
+ <entry comment="" name="DeviceType_InteractiveVoiceResponse" text="InteractiveVoiceResponse" value="0x8d7b"/>
+ <entry comment="Cisco 7910" name="DeviceType_Cisco_7910" text="Cisco 7910" value="0x00006"/>
+ <entry comment="Cisco 7925" name="DeviceType_Cisco_7925" text="Cisco 7925" value="0x001e4"/>
+ <entry comment="Cisco 7931" name="DeviceType_Cisco_7931" text="Cisco 7931" value="0x0015c"/>
+ <entry comment="Cisco 7935" name="DeviceType_Cisco_7935" text="Cisco 7935" value="0x00009"/>
+ <entry comment="Cisco 7940" name="DeviceType_Cisco_7940" text="Cisco 7940" value="0x00008"/>
+ <entry comment="Cisco 7961 GE" name="DeviceType_Cisco_7961_GE" text="Cisco 7961 GE" value="0x00134"/>
+ <entry comment="" name="DeviceType_Cisco_7961G_GE" text="7961G_GE" value="0x00135"/>
+ <entry comment="Cisco 7985" name="DeviceType_Cisco_7985" text="Cisco 7985" value="0x0012e"/>
+ <entry comment="Nokia E Series" name="DeviceType_Nokia_E_Series" text="Nokia E Series" value="0x00113"/>
+ <entry comment="Cisco IP Communicator" name="DeviceType_Cisco_IP_Communicator" text="Cisco IP Communicator" value="0x07540"/>
+ <entry comment="Nokia ICC client" name="DeviceType_Nokia_ICC_client" text="Nokia ICC client" value="0x00178"/>
+ <entry comment="Cisco 6901" name="DeviceType_Cisco_6901" text="Cisco 6901" value="0x00223"/>
+ <entry comment="Cisco 6911" name="DeviceType_Cisco_6911" text="Cisco 6911" value="0x00224"/>
+ <entry comment="Cisco 6921" name="DeviceType_Cisco_6921" text="Cisco 6921" value="0x001ef"/>
+ <entry comment="Cisco 6941" name="DeviceType_Cisco_6941" text="Cisco 6941" value="0x001f0"/>
+ <entry comment="Cisco 6945" name="DeviceType_Cisco_6945" text="Cisco 6945" value="0x00234"/>
+ <entry comment="Cisco 6961" name="DeviceType_Cisco_6961" text="Cisco 6961" value="0x001f1"/>
+ <entry comment="Cisco 8941" name="DeviceType_Cisco_8941" text="Cisco 8941" value="0x0024a"/>
+ <entry comment="Cisco 8945" name="DeviceType_Cisco_8945" text="Cisco 8945" value="0x00249"/>
+ <entry comment="Cisco SPA 303G (1 line)" name="DeviceType_Cisco_SPA_303G" text="Cisco SPA 303G" value="0x1388b"/>
+ <entry comment="Cisco SPA 502G (1 line)" name="DeviceType_Cisco_SPA_502G" text="Cisco SPA 502G" value="0x13883"/>
+ <entry comment="Cisco SPA 504G (4 lines)" name="DeviceType_Cisco_SPA_504G" text="Cisco SPA 504G" value="0x13884"/>
+ <entry comment="Cisco SPA 509G (12 lines)" name="DeviceType_Cisco_SPA_509G" text="Cisco SPA 509G" value="0x13887"/>
+ <entry comment="Cisco SPA 521S" name="DeviceType_Cisco_SPA_521S" text="Cisco SPA 521S" value="0x13880"/>
+ <entry comment="Cisco SPA 525G (5 lines / color / wifi / bluetooth)" name="DeviceType_Cisco_SPA_525G" text="Cisco SPA 525G" value="0x13885"/>
+ <entry comment="Cisco SPA 525G2 (5 lines / color / wifi / bluetooth)" name="DeviceType_Cisco_SPA_525G2" text="Cisco SPA 525G2" value="0x13889"/>
+ <entry comment="Cisco 7914 AddOn" name="DeviceType_Cisco_7914_AddOn" text="Cisco 7914 AddOn" value="0x0007c"/>
+ <entry comment="Cisco 7915 AddOn (12 Buttons)" name="DeviceType_Cisco_7915_AddOn" text="Cisco 7915 AddOn" value="0x000e3"/>
+ <entry comment="Cisco 7915 AddOn (24 Buttons)" name="DeviceType_Cisco_7915_AddOn_24" text="Cisco 7915 AddOn 24" value="0x000e4"/>
+ <entry comment="Cisco 7916 AddOn (12 Buttons)" name="DeviceType_Cisco_7916_AddOn" text="Cisco 7916 AddOn" value="0x000e5"/>
+ <entry comment="Cisco 7916 AddOn (24 Buttons)" name="DeviceType_Cisco_7916_AddOn_24" text="Cisco 7916 AddOn 24" value="0x000e6"/>
+ </entries>
+ </enum>
+ <bitfield name="PhoneFeatures">
+ <entries>
+ <entry comment="" name="PhoneFeatures_Bit1" text="Bit1" value="0x0001"/>
+ <entry comment="" name="PhoneFeatures_Bit2" text="Bit2" value="0x0002"/>
+ <entry comment="" name="PhoneFeatures_Bit3" text="Bit3" value="0x0004"/>
+ <entry comment="" name="PhoneFeatures_Bit4" text="Bit4" value="0x0008"/>
+ <entry comment="Supports UTF-8" name="PhoneFeatures_UTF8" text="UTF8Bit5" value="0x0010"/>
+ <entry comment="" name="PhoneFeatures_Bit6" text="Bit6" value="0x0020"/>
+ <entry comment="" name="PhoneFeatures_Bit7" text="Bit7" value="0x0040"/>
+ <entry comment="Support Dynamic Messages" name="PhoneFeatures_DynamicMessages" text="DynamicMessages" value="0x0080"/>
+ <entry comment="" name="PhoneFeatures_Bit9" text="Bit9" value="0x0100"/>
+ <entry comment="Supports DTMF Type RFC2833" name="PhoneFeatures_RFC2833" text="RFC2833" value="0x0200"/>
+ <entry comment="" name="PhoneFeatures_Bit11" text="Bit11" value="0x0400"/>
+ <entry comment="" name="PhoneFeatures_Bit12" text="Bit12" value="0x0800"/>
+ <entry comment="" name="PhoneFeatures_Bit13" text="Bit13" value="0x1000"/>
+ <entry comment="" name="PhoneFeatures_Bit14" text="Bit14" value="0x2000"/>
+ <entry comment="" name="PhoneFeatures_Bit15" text="Bit15" value="0x4000"/>
+ <entry comment="Abbreviated Dial" name="PhoneFeatures_Abbreviated_Dial" text="AbbrevDial" value="0x8000"/>
+ </entries>
+ </bitfield>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="RegisterReqMessage" opcode="0x0001" type="RegistrationAndManagement">
+ <fields>
+ <struct comment="Station Identifier" longcomment="Device Name of this phone / appliance" name="sid" type="struct">
+ <fields>
+ <string comment="Device Name" name="DeviceName" size="16" type="char"/>
+ <integer comment="User Id" name="reserved_for_future_use" type="uint32"/>
+ <integer comment="Device Instance" name="instance" type="uint32"/>
+ </fields>
+ </struct>
+ <ip comment="IPv4 Address" name="stationIpAddr" type="ipv4"/>
+ <enum comment="Device Type" longcomment="Device Type of this phone / appliance" name="deviceType" subtype="DeviceType" type="uint32"/>
+ <integer comment="Maximum Number of Concurrent RTP Streams" longcomment="Indicates the maximum number of simultaneous RTP duplex streams, which this client/appliance can handle." name="maxStreams" type="uint32"/>
+ </fields>
+ <fields size_gt="52">
+ <integer comment="Active RTP Streams" longcomment="Active RTP Streams at Registration" name="activeStreams" type="uint32"/>
+ <integer comment="Protocol Version" longcomment="Maximum Supported Protocol Version" name="protocolVer" type="uint8"/>
+ <integer comment="unknown" longcomment="unknown (Part of ProtocolVer)" name="unknown" type="uint8"/>
+ <bitfield comment="Features this device supports" name="phoneFeatures" size="uint16" subtype="PhoneFeatures" type="bitfield">
+ <entries>
+ <entry comment="" name="PhoneFeatures_Bit1" text="Bit1" value="0x0001"/>
+ <entry comment="" name="PhoneFeatures_Bit2" text="Bit2" value="0x0002"/>
+ <entry comment="" name="PhoneFeatures_Bit3" text="Bit3" value="0x0004"/>
+ <entry comment="" name="PhoneFeatures_Bit4" text="Bit4" value="0x0008"/>
+ <entry comment="Supports UTF-8" name="PhoneFeatures_UTF8" text="UTF8Bit5" value="0x0010"/>
+ <entry comment="" name="PhoneFeatures_Bit6" text="Bit6" value="0x0020"/>
+ <entry comment="" name="PhoneFeatures_Bit7" text="Bit7" value="0x0040"/>
+ <entry comment="Support Dynamic Messages" name="PhoneFeatures_DynamicMessages" text="DynamicMessages" value="0x0080"/>
+ <entry comment="" name="PhoneFeatures_Bit9" text="Bit9" value="0x0100"/>
+ <entry comment="Supports DTMF Type RFC2833" name="PhoneFeatures_RFC2833" text="RFC2833" value="0x0200"/>
+ <entry comment="" name="PhoneFeatures_Bit11" text="Bit11" value="0x0400"/>
+ <entry comment="" name="PhoneFeatures_Bit12" text="Bit12" value="0x0800"/>
+ <entry comment="" name="PhoneFeatures_Bit13" text="Bit13" value="0x1000"/>
+ <entry comment="" name="PhoneFeatures_Bit14" text="Bit14" value="0x2000"/>
+ <entry comment="" name="PhoneFeatures_Bit15" text="Bit15" value="0x4000"/>
+ <entry comment="Abbreviated Dial" name="PhoneFeatures_Abbreviated_Dial" text="AbbrevDial" value="0x8000"/>
+ </entries>
+ </bitfield>
+ <integer comment="Maximum Number of Concurrent Conferences" longcomment="Indicates the maximum number of simultaneous Conferences, which this client/appliance can handle" name="maxConferences" type="uint32"/>
+ </fields>
+ <fields size_gt="100">
+ <integer comment="Active Conferences" longcomment="Active Conferences at Registration" name="activeConferences" type="uint32"/>
+ <ether comment="Mac Address" longcomment="Ethernet/Mac Address" name="macAddress" size="12" type="ether"/>
+ <integer comment="IPv4 Address Scope" name="ipV4AddressScope" type="uint32"/>
+ <integer comment="Maximum number of lines" name="maxNumberOfLines" type="uint32"/>
+ <ip comment="IPv6 Address" endianness="big" name="stationIpV6Addr" type="ipv6"/>
+ <integer comment="IPv6 Address Scope" name="ipV6AddressScope" type="uint32"/>
+ <string comment="Firmware Load Name" name="firmwareLoadName" size="32" type="char"/>
+ </fields>
+ <fields beginversion="0" endversion="22" size_gt="191">
+ <string comment="" name="configVersionStamp" size="48" type="char"/>
+ </fields>
+ </message>
+ <enum name="KeyPadButton">
+ <entries>
+ <entry comment="" name="KeyPadButton_Zero" text="Zero" value="0x0000"/>
+ <entry comment="" name="KeyPadButton_One" text="One" value="0x0001"/>
+ <entry comment="" name="KeyPadButton_Two" text="Two" value="0x0002"/>
+ <entry comment="" name="KeyPadButton_Three" text="Three" value="0x0003"/>
+ <entry comment="" name="KeyPadButton_Four" text="Four" value="0x0004"/>
+ <entry comment="" name="KeyPadButton_Five" text="Five" value="0x0005"/>
+ <entry comment="" name="KeyPadButton_Six" text="Six" value="0x0006"/>
+ <entry comment="" name="KeyPadButton_Seven" text="Seven" value="0x0007"/>
+ <entry comment="" name="KeyPadButton_Eight" text="Eight" value="0x0008"/>
+ <entry comment="" name="KeyPadButton_Nine" text="Nine" value="0x0009"/>
+ <entry comment="" name="KeyPadButton_A" text="A" value="0x000a"/>
+ <entry comment="" name="KeyPadButton_B" text="B" value="0x000b"/>
+ <entry comment="" name="KeyPadButton_C" text="C" value="0x000c"/>
+ <entry comment="" name="KeyPadButton_D" text="D" value="0x000d"/>
+ <entry comment="" name="KeyPadButton_Star" text="Star" value="0x000e"/>
+ <entry comment="" name="KeyPadButton_Pound" text="Pound" value="0x000f"/>
+ <entry comment="" name="KeyPadButton_Plus" text="Plus" value="0x0010"/>
+ </entries>
+ </enum>
+ <enum name="KeyPadButton_short">
+ <entries>
+ <entry comment="" name="KeyPadButton_Zero" text="0" value="0x0000"/>
+ <entry comment="" name="KeyPadButton_One" text="1" value="0x0001"/>
+ <entry comment="" name="KeyPadButton_Two" text="2" value="0x0002"/>
+ <entry comment="" name="KeyPadButton_Three" text="3" value="0x0003"/>
+ <entry comment="" name="KeyPadButton_Four" text="4" value="0x0004"/>
+ <entry comment="" name="KeyPadButton_Five" text="5" value="0x0005"/>
+ <entry comment="" name="KeyPadButton_Six" text="6" value="0x0006"/>
+ <entry comment="" name="KeyPadButton_Seven" text="7" value="0x0007"/>
+ <entry comment="" name="KeyPadButton_Eight" text="8" value="0x0008"/>
+ <entry comment="" name="KeyPadButton_Nine" text="9" value="0x0009"/>
+ <entry comment="" name="KeyPadButton_A" text="A" value="0x000a"/>
+ <entry comment="" name="KeyPadButton_B" text="B" value="0x000b"/>
+ <entry comment="" name="KeyPadButton_C" text="C" value="0x000c"/>
+ <entry comment="" name="KeyPadButton_D" text="D" value="0x000d"/>
+ <entry comment="" name="KeyPadButton_Star" text="*" value="0x000e"/>
+ <entry comment="" name="KeyPadButton_Pound" text="#" value="0x000f"/>
+ <entry comment="" name="KeyPadButton_Plus" text="+" value="0x0010"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="IpPortMessage" opcode="0x0002" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="RTP Media Port" name="rtpMediaPort" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="KeypadButtonMessage" opcode="0x0003" type="CallControl">
+ <fields>
+ <enum comment="KeyPad Button which was Pressed" name="kpButton" subtype="KeyPadButton" type="uint32" make_additional_info_short="yes"/>
+ </fields>
+ <fields beginversion="0" endversion="22" size_gt="8">
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="EnblocCallMessage" opcode="0x0004" type="CallControl">
+ <fields>
+ <string comment="CalledPartyNumber" declare="yes" name="calledParty" size="VariableDirnumSize" type="char"/>
+ </fields>
+ <fields size_gt="28">
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="DeviceStimulus">
+ <entries>
+ <entry comment="" name="DeviceStimulus_LastNumberRedial" text="LastNumberRedial" value="0x0001"/>
+ <entry comment="" name="DeviceStimulus_SpeedDial" text="SpeedDial" value="0x0002"/>
+ <entry comment="" name="DeviceStimulus_Hold" text="Hold" value="0x0003"/>
+ <entry comment="" name="DeviceStimulus_Transfer" text="Transfer" value="0x0004"/>
+ <entry comment="" name="DeviceStimulus_ForwardAll" text="ForwardAll" value="0x0005"/>
+ <entry comment="" name="DeviceStimulus_ForwardBusy" text="ForwardBusy" value="0x0006"/>
+ <entry comment="" name="DeviceStimulus_ForwardNoAnswer" text="ForwardNoAnswer" value="0x0007"/>
+ <entry comment="" name="DeviceStimulus_Display" text="Display" value="0x0008"/>
+ <entry comment="" name="DeviceStimulus_Line" text="Line" value="0x0009"/>
+ <entry comment="" name="DeviceStimulus_T120Chat" text="T120Chat" value="0x000a"/>
+ <entry comment="" name="DeviceStimulus_T120Whiteboard" text="T120Whiteboard" value="0x000b"/>
+ <entry comment="" name="DeviceStimulus_T120ApplicationSharing" text="T120ApplicationSharing" value="0x000c"/>
+ <entry comment="" name="DeviceStimulus_T120FileTransfer" text="T120FileTransfer" value="0x000d"/>
+ <entry comment="" name="DeviceStimulus_Video" text="Video" value="0x000e"/>
+ <entry comment="" name="DeviceStimulus_VoiceMail" text="VoiceMail" value="0x000f"/>
+ <entry comment="" name="DeviceStimulus_AnswerRelease" text="AnswerRelease" value="0x0010"/>
+ <entry comment="" name="DeviceStimulus_AutoAnswer" text="AutoAnswer" value="0x0011"/>
+ <entry comment="" name="DeviceStimulus_Select" text="Select" value="0x0012"/>
+ <entry comment="" name="DeviceStimulus_Privacy" text="Privacy" value="0x0013"/>
+ <entry comment="" name="DeviceStimulus_ServiceURL" text="ServiceURL" value="0x0014"/>
+ <entry comment="" name="DeviceStimulus_BLFSpeedDial" text="BLFSpeedDial" value="0x0015"/>
+ <entry comment="" name="DeviceStimulus_DPark" text="DPark" value="0x0016"/>
+ <entry comment="" name="DeviceStimulus_Intercom" text="Intercom" value="0x0017"/>
+ <entry comment="" name="DeviceStimulus_MaliciousCall" text="MaliciousCall" value="0x001b"/>
+ <entry comment="" name="DeviceStimulus_GenericAppB1" text="GenericAppB1" value="0x0021"/>
+ <entry comment="" name="DeviceStimulus_GenericAppB2" text="GenericAppB2" value="0x0022"/>
+ <entry comment="" name="DeviceStimulus_GenericAppB3" text="GenericAppB3" value="0x0023"/>
+ <entry comment="" name="DeviceStimulus_GenericAppB4" text="GenericAppB4" value="0x0024"/>
+ <entry comment="" name="DeviceStimulus_GenericAppB5" text="GenericAppB5" value="0x0025"/>
+ <entry comment="" name="DeviceStimulus_MeetMeConference" text="MeetMeConference" value="0x007b"/>
+ <entry comment="" name="DeviceStimulus_Conference" text="Conference" value="0x007d"/>
+ <entry comment="" name="DeviceStimulus_CallPark" text="CallPark" value="0x007e"/>
+ <entry comment="" name="DeviceStimulus_CallPickUp" text="CallPickUp" value="0x007f"/>
+ <entry comment="" name="DeviceStimulus_GroupCallPickUp" text="GroupCallPickUp" value="0x0080"/>
+ <entry comment="" name="DeviceStimulus_Mobility" text="Mobility" value="0x0081"/>
+ <entry comment="" name="DeviceStimulus_DoNotDisturb" text="DoNotDisturb" value="0x0082"/>
+ <entry comment="" name="DeviceStimulus_ConfList" text="ConfList" value="0x0083"/>
+ <entry comment="" name="DeviceStimulus_RemoveLastParticipant" text="RemoveLastParticipant" value="0x0084"/>
+ <entry comment="" name="DeviceStimulus_QRT" text="QRT" value="0x0085"/>
+ <entry comment="" name="DeviceStimulus_CallBack" text="CallBack" value="0x0086"/>
+ <entry comment="" name="DeviceStimulus_OtherPickup" text="OtherPickup" value="0x0087"/>
+ <entry comment="" name="DeviceStimulus_VideoMode" text="VideoMode" value="0x0088"/>
+ <entry comment="" name="DeviceStimulus_NewCall" text="NewCall" value="0x0089"/>
+ <entry comment="" name="DeviceStimulus_EndCall" text="EndCall" value="0x008a"/>
+ <entry comment="" name="DeviceStimulus_HLog" text="HLog" value="0x008b"/>
+ <entry comment="" name="DeviceStimulus_Queuing" text="Queuing" value="0x008f"/>
+ <entry boundscheck="max" comment="" name="DeviceStimulus_MaxStimulusValue" text="MaxStimulusValue" value="0x00ff"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="StimulusMessage" opcode="0x0005" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="Device Stimulus" name="stimulus" subtype="DeviceStimulus" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="Stimulus Status" name="stimulusStatus" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="OffHookMessage" opcode="0x0006" type="CallControl">
+ <fields beginversion="0" endversion="22" size_gt="4">
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="OnHookMessage" opcode="0x0007" type="CallControl">
+ <fields beginversion="0" endversion="22" size_gt="4">
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="HookFlashMessage" opcode="0x0008" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="ForwardStatReqMessage" opcode="0x0009" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="lineNumber" req_resp_key="1" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="SpeedDialStatReqMessage" opcode="0x000a" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="speedDialNumber" req_resp_key="1" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="LineStatReqMessage" opcode="0x000b" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="lineNumber" req_resp_key="1" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="ConfigStatReqMessage" opcode="0x000c" type="RegistrationAndManagement"/>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="TimeDateReqMessage" opcode="0x000d" type="RegistrationAndManagement"/>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="ButtonTemplateReqMessage" opcode="0x000e" type="RegistrationAndManagement"/>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="VersionReqMessage" opcode="0x000f" type="RegistrationAndManagement"/>
+ <enum define="yes" name="Media_PayloadType">
+ <entries type="audio">
+ <entry comment="" name="Media_Payload_G711Alaw64k" text="Media_Payload_G711Alaw64k" value="0x0002"/>
+ <entry comment="" name="Media_Payload_G711Alaw56k" text="Media_Payload_G711Alaw56k" value="0x0003"/>
+ <entry comment="" name="Media_Payload_G711Ulaw64k" text="Media_Payload_G711Ulaw64k" value="0x0004"/>
+ <entry comment="" name="Media_Payload_G711Ulaw56k" text="Media_Payload_G711Ulaw56k" value="0x0005"/>
+ <entry comment="" name="Media_Payload_G722_64k" text="Media_Payload_G722_64k" value="0x0006"/>
+ <entry comment="" name="Media_Payload_G722_56k" text="Media_Payload_G722_56k" value="0x0007"/>
+ <entry comment="" name="Media_Payload_G722_48k" text="Media_Payload_G722_48k" value="0x0008"/>
+ <entry comment="" name="Media_Payload_G7231" text="Media_Payload_G7231" value="0x0009"/>
+ <entry comment="" name="Media_Payload_G728" text="Media_Payload_G728" value="0x000a"/>
+ <entry comment="" name="Media_Payload_G729" text="Media_Payload_G729" value="0x000b"/>
+ <entry comment="" name="Media_Payload_G729AnnexA" text="Media_Payload_G729AnnexA" value="0x000c"/>
+ <entry comment="" name="Media_Payload_G729AnnexB" text="Media_Payload_G729AnnexB" value="0x000f"/>
+ <entry comment="" name="Media_Payload_G729AnnexAwAnnexB" text="Media_Payload_G729AnnexAwAnnexB" value="0x0010"/>
+ <entry comment="" name="Media_Payload_GSM_Full_Rate" text="Media_Payload_GSM_Full_Rate" value="0x0012"/>
+ <entry comment="" name="Media_Payload_GSM_Half_Rate" text="Media_Payload_GSM_Half_Rate" value="0x0013"/>
+ <entry comment="" name="Media_Payload_GSM_Enhanced_Full_Rate" text="Media_Payload_GSM_Enhanced_Full_Rate" value="0x0014"/>
+ <entry comment="" name="Media_Payload_Wide_Band_256k" text="Media_Payload_Wide_Band_256k" value="0x0019"/>
+ <entry comment="" name="Media_Payload_Data64" text="Media_Payload_Data64" value="0x0020"/>
+ <entry comment="" name="Media_Payload_Data56" text="Media_Payload_Data56" value="0x0021"/>
+ <entry comment="" name="Media_Payload_G7221_32K" text="Media_Payload_G7221_32K" value="0x0028"/>
+ <entry comment="" name="Media_Payload_G7221_24K" text="Media_Payload_G7221_24K" value="0x0029"/>
+ <entry comment="" name="Media_Payload_AAC" text="Media_Payload_AAC" value="0x002a"/>
+ <entry comment="" name="Media_Payload_MP4ALATM_128" text="Media_Payload_MP4ALATM_128" value="0x002b"/>
+ <entry comment="" name="Media_Payload_MP4ALATM_64" text="Media_Payload_MP4ALATM_64" value="0x002c"/>
+ <entry comment="" name="Media_Payload_MP4ALATM_56" text="Media_Payload_MP4ALATM_56" value="0x002d"/>
+ <entry comment="" name="Media_Payload_MP4ALATM_48" text="Media_Payload_MP4ALATM_48" value="0x002e"/>
+ <entry comment="" name="Media_Payload_MP4ALATM_32" text="Media_Payload_MP4ALATM_32" value="0x002f"/>
+ <entry comment="" name="Media_Payload_MP4ALATM_24" text="Media_Payload_MP4ALATM_24" value="0x0030"/>
+ <entry comment="" name="Media_Payload_MP4ALATM_NA" text="Media_Payload_MP4ALATM_NA" value="0x0031"/>
+ <entry comment="" name="Media_Payload_GSM" text="Media_Payload_GSM" value="0x0050"/>
+ <entry comment="" name="Media_Payload_G726_32K" text="Media_Payload_G726_32K" value="0x0052"/>
+ <entry comment="" name="Media_Payload_G726_24K" text="Media_Payload_G726_24K" value="0x0053"/>
+ <entry comment="" name="Media_Payload_G726_16K" text="Media_Payload_G726_16K" value="0x0054"/>
+ <entry comment="" name="Media_Payload_ILBC" text="Media_Payload_ILBC" value="0x0056"/>
+ <entry comment="" name="Media_Payload_ISAC" text="Media_Payload_ISAC" value="0x0059"/>
+ <entry comment="" name="Media_Payload_OPUS" text="Media_Payload_OPUS" value="0x005a"/>
+ <entry comment="" name="Media_Payload_AMR" text="Media_Payload_AMR" value="0x0061"/>
+ <entry comment="" name="Media_Payload_AMR_WB" text="Media_Payload_AMR_WB" value="0x0062"/>
+ </entries>
+ <entries type="video">
+ <entry comment="" name="Media_Payload_H261" text="Media_Payload_H261" value="0x0064"/>
+ <entry comment="" name="Media_Payload_H263" text="Media_Payload_H263" value="0x0065"/>
+ <entry comment="" name="Media_Payload_Vieo" text="Media_Payload_Vieo" value="0x0066"/>
+ <entry comment="" name="Media_Payload_H264" text="Media_Payload_H264" value="0x0067"/>
+ <entry comment="" name="Media_Payload_H264_SVC" text="Media_Payload_H264_SVC" value="0x0068"/>
+ <entry comment="" name="Media_Payload_T120" text="Media_Payload_T120" value="0x0069"/>
+ <entry comment="" name="Media_Payload_H224" text="Media_Payload_H224" value="0x006a"/>
+ <entry comment="" name="Media_Payload_T38Fax" text="Media_Payload_T38Fax" value="0x006b"/>
+ <entry comment="" name="Media_Payload_TOTE" text="Media_Payload_TOTE" value="0x006c"/>
+ <entry comment="" name="Media_Payload_H265" text="Media_Payload_H265" value="0x006d"/>
+ <entry comment="" name="Media_Payload_H264_UC" text="Media_Payload_H264_UC" value="0x006e"/>
+ <entry comment="" name="Media_Payload_XV150_MR_711U" text="Media_Payload_XV150_MR_711U" value="0x006f"/>
+ <entry comment="" name="Media_Payload_NSE_VBD_711U" text="Media_Payload_NSE_VBD_711U" value="0x0070"/>
+ <entry comment="" name="Media_Payload_XV150_MR_729A" text="Media_Payload_XV150_MR_729A" value="0x0071"/>
+ <entry comment="" name="Media_Payload_NSE_VBD_729A" text="Media_Payload_NSE_VBD_729A" value="0x0072"/>
+ <entry comment="" name="Media_Payload_H264_FEC" text="Media_Payload_H264_FEC" value="0x0073"/>
+ </entries>
+ <entries type="data">
+ <entry comment="" name="Media_Payload_Clear_Chan" text="Media_Payload_Clear_Chan" value="0x0078"/>
+ <entry comment="" name="Media_Payload_Universal_Xcoder" text="Media_Payload_Universal_Xcoder" value="0x00de"/>
+ <entry comment="" name="Media_Payload_RFC2833_DynPayload" text="Media_Payload_RFC2833_DynPayload" value="0x0101"/>
+ <entry comment="" name="Media_Payload_PassThrough" text="Media_Payload_PassThrough" value="0x0102"/>
+ <entry comment="" name="Media_Payload_Dynamic_Payload_PassThru" text="Media_Payload_Dynamic_Payload_PassThru" value="0x0103"/>
+ <entry comment="" name="Media_Payload_DTMF_OOB" text="Media_Payload_DTMF_OOB" value="0x0104"/>
+ <entry comment="" name="Media_Payload_Inband_DTMF_RFC2833" text="Media_Payload_Inband_DTMF_RFC2833" value="0x0105"/>
+ <entry comment="" name="Media_Payload_CFB_Tones" text="Media_Payload_CFB_Tones" value="0x0106"/>
+ <entry comment="" name="Media_Payload_NoAudio" text="Media_Payload_NoAudio" value="0x012b"/>
+ <entry comment="" name="Media_Payload_v150_LC_ModemRelay" text="Media_Payload_v150_LC_ModemRelay" value="0x012c"/>
+ <entry comment="" name="Media_Payload_v150_LC_SPRT" text="Media_Payload_v150_LC_SPRT" value="0x012d"/>
+ <entry comment="" name="Media_Payload_v150_LC_SSE" text="Media_Payload_v150_LC_SSE" value="0x012e"/>
+ <entry comment="" name="Media_Payload_Max" text="Media_Payload_Max" value="0x012f"/>
+ </entries>
+ </enum>
+ <enum name="Media_G723BitRate">
+ <entries>
+ <entry comment="" name="Media_G723BRate_5_3" text="Media_G723BRate_5_3" value="0x0001"/>
+ <entry comment="" name="Media_G723BRate_6_3" text="Media_G723BRate_6_3" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="response" name="CapabilitiesResMessage" opcode="0x0010" request="0x009b" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="capCount" type="uint32"/>
+ <struct comment="" maxsize="18" name="caps" size_fieldname="capCount" type="struct">
+ <fields>
+ <enum comment="" declare="yes" name="payloadCapability" subtype="Media_PayloadType" type="uint32"/>
+ <integer comment="" name="maxFramesPerPacket" type="uint32"/>
+ <union comment="" lookup_guide="payloadCapability" name="PAYLOADS" subtype="MediaCapabilityUnion" type="union">
+ <fields>
+ <enum comment="" lookup_eq="Media_Payload_G7231" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ <struct comment="" lookup_eq="Media_Payload_v150_LC_ModemRelay" name="modemRelay" type="struct">
+ <fields>
+ <integer comment="" name="capAndVer" type="uint32"/>
+ <integer comment="" name="modAnd2833" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_v150_LC_SPRT" name="sprtPayload" type="struct">
+ <fields>
+ <integer comment="" name="chan0MaxPayload" type="uint16"/>
+ <integer comment="" name="chan2MaxPayload" type="uint16"/>
+ <integer comment="" name="chan3MaxPayload" type="uint16"/>
+ <integer comment="" name="chan2MaxWindow" type="uint16"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_v150_LC_SSE" name="sse" type="struct">
+ <fields>
+ <integer comment="" name="standard" type="uint32"/>
+ <integer comment="" name="vendor" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="*" name="codecParams" subtype="CodecParameters" type="struct">
+ <fields>
+ <integer comment="" name="codecMode" type="uint8"/>
+ <integer comment="" name="dynamicPayload" type="uint8"/>
+ <integer comment="" name="codecParam1" type="uint8"/>
+ <integer comment="" name="codecParam2" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="ServerReqMessage" opcode="0x0012" type="RegistrationAndManagement"/>
+ <enum name="DeviceAlarmSeverity">
+ <entries>
+ <entry comment="" name="DeviceAlarmSeverity_Critical" text="Critical" value="0x0000"/>
+ <entry comment="" name="DeviceAlarmSeverity_Major" text="Major" value="0x0007"/>
+ <entry comment="" name="DeviceAlarmSeverity_Minor" text="Minor" value="0x0008"/>
+ <entry comment="" name="DeviceAlarmSeverity_Warning" text="Warning" value="0x0001"/>
+ <entry comment="" name="DeviceAlarmSeverity_Marginal" text="Marginal" value="0x000a"/>
+ <entry comment="" name="DeviceAlarmSeverity_Unknown" text="Unknown" value="0x0004"/>
+ <entry comment="" name="DeviceAlarmSeverity_Informational" text="Informational" value="0x0002"/>
+ <entry comment="" name="DeviceAlarmSeverity_TraceInfo" text="TraceInfo" value="0x0014"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="AlarmMessage" opcode="0x0020" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="alarmSeverity" subtype="DeviceAlarmSeverity" type="uint32"/>
+ <string comment="" name="text" size="80" type="char"/>
+ <integer comment="" name="parm1" type="uint32"/>
+ <integer comment="" name="parm2" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="MulticastMediaReceptionStatus">
+ <entries>
+ <entry comment="" name="MulticastMediaReceptionStatus_Ok" text="Ok" value="0x0000"/>
+ <entry comment="" name="MulticastMediaReceptionStatus_Error" text="Error" value="0x0001"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="response" name="MulticastMediaReceptionAckMessage" opcode="0x0021" request="0x0101" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="multicastReceptionStatus" subtype="MulticastMediaReceptionStatus" type="uint32"/>
+ <integer comment="PassThrough PartyId" declare="yes" name="passThroughPartyId" req_resp_key="1" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="MediaStatus">
+ <entries>
+ <entry comment="" name="MediaStatus_Ok" text="Ok" value="0x0000"/>
+ <entry comment="" name="MediaStatus_Unknown" text="Unknown" value="0x0001"/>
+ <entry comment="" name="MediaStatus_NotEnoughChannels" text="NotEnoughChannels" value="0x0002"/>
+ <entry comment="" name="MediaStatus_CodecTooComplex" text="CodecTooComplex" value="0x0003"/>
+ <entry comment="" name="MediaStatus_InvalidPartyID" text="InvalidPartyID" value="0x0004"/>
+ <entry comment="" name="MediaStatus_InvalidCallRef" text="InvalidCallRef" value="0x0005"/>
+ <entry comment="" name="MediaStatus_InvalidCodec" text="InvalidCodec" value="0x0006"/>
+ <entry comment="" name="MediaStatus_InvalidPacketSize" text="InvalidPacketSize" value="0x0007"/>
+ <entry comment="" name="MediaStatus_OutOfSockets" text="OutOfSockets" value="0x0008"/>
+ <entry comment="" name="MediaStatus_EncoderOrDecoderFailed" text="EncoderOrDecoderFailed" value="0x0009"/>
+ <entry comment="" name="MediaStatus_InvalidDynamicPayloadType" text="InvalidDynamicPayloadType" value="0x000a"/>
+ <entry comment="" name="MediaStatus_RequestedIpAddrTypeUnAvailable" text="RequestedIpAddrTypeUnAvailable" value="0x000b"/>
+ <entry comment="" name="MediaStatus_DeviceOnHook" text="DeviceOnHook" value="0x00ff"/>
+ </entries>
+ </enum>
+ <enum define="yes" name="IpAddrType">
+ <entries>
+ <entry comment="" name="IpAddrType_Ipv4" text="v4" value="0x0000"/>
+ <entry comment="" name="IpAddrType_Ipv6" text="v6" value="0x0001"/>
+ <entry comment="" name="IpAddrType_Ipv4_v6" text="v4_v6" value="0x0002"/>
+ <entry comment="" name="IpAddrType_Ip_Invalid" text="_Invalid" value="0x0003"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="response" name="OpenReceiveChannelAckMessage" opcode="0x0022" request="0x0105" type="MediaControl">
+ <fields>
+ <enum comment="" name="mediaReceptionStatus" subtype="MediaStatus" type="uint32"/>
+ <ipv4or6 comment="" name="ipAddr" subtype="IPV4orV6Address" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" declare="yes" name="portNumber" subtype="uint32" type="ipport" use_param="ipAddr" make_additional_info="yes"/>
+ <integer comment="PassThrough PartyId" declare="yes" name="passThroughPartyId" req_resp_key="1" type="uint32"/>
+ </fields>
+ <fields beginversion="0" endversion="22" size_gt="20">
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="StatsProcessingType">
+ <entries>
+ <entry comment="" name="StatsProcessingType_clearStats" text="clearStats" value="0x0000"/>
+ <entry comment="" name="StatsProcessingType_doNotClearStats" text="doNotClearStats" value="0x0001"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="response" name="ConnectionStatisticsResMessage" opcode="0x0023" request="0x0107" type="CallControl">
+ <fields endversion="17" fixed="yes">
+ <string comment="" name="directoryNum" size="24" type="char"/>
+ <integer comment="CallId" declare="yes" name="callReference" req_resp_key="1" type="uint32"/>
+ <enum comment="Stats Processing Mode" longcomment="What to do after you send the stats" name="statsProcessingMode" subtype="StatsProcessingType" type="uint32"/>
+ </fields>
+ <fields beginversion="18" endversion="22" fixed="yes">
+ <string comment="" name="directoryNum" size="28" type="char"/>
+ <integer comment="CallId" declare="yes" name="callReference" req_resp_key="1" type="uint32"/>
+ <integer comment="Stats Processing Mode" longcomment="What to do after you send the stats" name="statsProcessingMode" subtype="StatsProcessingType" type="uint8"/>
+ </fields>
+ <fields>
+ <integer comment="Number of Packets Sent" name="numberPacketsSent" type="uint32"/>
+ <integer comment="Number of Octets Sent" name="numberOctetsSent" type="uint32"/>
+ <integer comment="Number of Packets Received" name="numberPacketsReceived" type="uint32"/>
+ <integer comment="Number of Octets Received" name="numberOctetsReceived" type="uint32"/>
+ <integer comment="Number of Packets Lost" name="numberPacketsLost" type="uint32"/>
+ <integer comment="Amount of Jitter" name="jitter" type="uint32"/>
+ <integer comment="Amount of Latency" name="latency" type="uint32"/>
+ </fields>
+ <fields size_gt="64">
+ <integer comment="Data Size" declare="yes" name="dataSize" type="uint32"/>
+ <string comment="Statistics" longcomment="variable field size (max: 600]" maxsize="600" name="data" size_fieldname="dataSize" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="OffHookWithCallingPartyNumberMessage" opcode="0x0024" type="CallControl">
+ <fields>
+ <string comment="Calling Party Number" declare="yes" name="callingPartyNumber" size="VariableDirnumSize" type="char"/>
+ <string comment="Calling Party Voicemail Box Number" declare="yes" name="cgpnVoiceMailbox" size="VariableDirnumSize" type="char"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="SoftKeySetReqMessage" opcode="0x0025" type="RegistrationAndManagement"/>
+ <enum name="SoftKeySet">
+ <entries>
+ <entry name="SoftKeySet_OnHook" text="On Hook" value="0"/>
+ <entry name="SoftKeySet_Connected" text="Connected" value="1"/>
+ <entry name="SoftKeySet_OnHold" text="On Hold" value="2"/>
+ <entry name="SoftKeySet_Ringin" text="Ring In" value="3"/>
+ <entry name="SoftKeySet_OffHook" text="Off Hook" value="4"/>
+ <entry name="SoftKeySet_ConnTrans" text="Connected Transferable" value="5"/>
+ <entry name="SoftKeySet_DigitsFoll" text="Digits Following" value="6"/>
+ <entry name="SoftKeySet_ConnConf" text="Connected Conference" value="7"/>
+ <entry name="SoftKeySet_RingOut" text="Ring Out" value="8"/>
+ <entry name="SoftKeySet_OffHookFeat" text="OffHook with Features" value="9"/>
+ <entry name="SoftKeySet_InUseHint" text="In Use Hint" value="10"/>
+ <entry name="SoftKeySet_OnHookStealable" text="On Hook with Stealable Call" value="11"/>
+ </entries>
+ </enum>
+ <enum name="SoftKeyEvent">
+ <entries>
+ <entry name="SoftKeyEvent_Redial" text="Redial" value="1"/>
+ <entry name="SoftKeyEvent_NewCall" text="NewCall" value="2"/>
+ <entry name="SoftKeyEvent_Hold" text="Hold" value="3"/>
+ <entry name="SoftKeyEvent_Transfer" text="Transfer" value="4"/>
+ <entry name="SoftKeyEvent_CfwdAll" text="CfwdAll" value="5"/>
+ <entry name="SoftKeyEvent_CfwdBusy" text="CfwdBusy" value="6"/>
+ <entry name="SoftKeyEvent_CfwdNoAnswer" text="CfwdNoAnswer" value="7"/>
+ <entry name="SoftKeyEvent_BackSpace" text="BackSpace" value="8"/>
+ <entry name="SoftKeyEvent_EndCall" text="EndCall" value="9"/>
+ <entry name="SoftKeyEvent_Resume" text="Resume" value="10"/>
+ <entry name="SoftKeyEvent_Answer" text="Answer" value="11"/>
+ <entry name="SoftKeyEvent_Info" text="Info" value="12"/>
+ <entry name="SoftKeyEvent_Confrn" text="Confrn" value="13"/>
+ <entry name="SoftKeyEvent_Park" text="Park" value="14"/>
+ <entry name="SoftKeyEvent_Join" text="Join" value="15"/>
+ <entry name="SoftKeyEvent_MeetMe" text="MeetMe" value="16"/>
+ <entry name="SoftKeyEvent_PickUp" text="PickUp" value="17"/>
+ <entry name="SoftKeyEvent_GrpPickup" text="GrpPickup" value="18"/>
+ <entry name="SoftKeyEvent_YourCurrentOptions" text="Your current options" value="19"/>
+ <entry name="SoftKeyEvent_OffHook" text="Off Hook" value="20"/>
+ <entry name="SoftKeyEvent_OnHook" text="On Hook" value="21"/>
+ <entry name="SoftKeyEvent_RingOut" text="Ring out" value="22"/>
+ <entry name="SoftKeyEvent_From " text="From " value="23"/>
+ <entry name="SoftKeyEvent_Connected" text="Connected" value="24"/>
+ <entry name="SoftKeyEvent_Busy" text="Busy" value="25"/>
+ <entry name="SoftKeyEvent_LineInUse" text="Line In Use" value="26"/>
+ <entry name="SoftKeyEvent_CallWaiting" text="Call Waiting" value="27"/>
+ <entry name="SoftKeyEvent_CallTransfer" text="Call Transfer" value="28"/>
+ <entry name="SoftKeyEvent_CallPark" text="Call Park" value="29"/>
+ <entry name="SoftKeyEvent_CallProceed" text="Call Proceed" value="30"/>
+ <entry name="SoftKeyEvent_InUseRemote" text="In Use Remote" value="31"/>
+ <entry name="SoftKeyEvent_EnterNumber" text="Enter number" value="32"/>
+ <entry name="SoftKeyEvent_CallParkAt" text="Call park At" value="33"/>
+ <entry name="SoftKeyEvent_PrimaryOnly" text="Primary Only" value="34"/>
+ <entry name="SoftKeyEvent_TempFail" text="Temp Fail" value="35"/>
+ <entry name="SoftKeyEvent_YouHaveAVoiceMail" text="You Have a VoiceMail" value="36"/>
+ <entry name="SoftKeyEvent_ForwardedTo" text="Forwarded to" value="37"/>
+ <entry name="SoftKeyEvent_CanNotCompleteConference" text="Can Not Complete Conference" value="38"/>
+ <entry name="SoftKeyEvent_NoConferenceBridge" text="No Conference Bridge" value="39"/>
+ <entry name="SoftKeyEvent_CanNotHoldPrimaryControl" text="Can Not Hold Primary Control" value="40"/>
+ <entry name="SoftKeyEvent_InvalidConferenceParticipant" text="Invalid Conference Participant" value="41"/>
+ <entry name="SoftKeyEvent_InConferenceAlready" text="In Conference Already" value="42"/>
+ <entry name="SoftKeyEvent_NoParticipantInfo" text="No Participant Info" value="43"/>
+ <entry name="SoftKeyEvent_ExceedMaximumParties" text="Exceed Maximum Parties" value="44"/>
+ <entry name="SoftKeyEvent_KeyIsNotActive" text="Key Is Not Active" value="45"/>
+ <entry name="SoftKeyEvent_ErrorNoLicense" text="Error No License" value="46"/>
+ <entry name="SoftKeyEvent_ErrorDBConfig" text="Error DBConfig" value="47"/>
+ <entry name="SoftKeyEvent_ErrorDatabase" text="Error Database" value="48"/>
+ <entry name="SoftKeyEvent_ErrorPassLimit" text="Error Pass Limit" value="49"/>
+ <entry name="SoftKeyEvent_ErrorUnknown" text="Error Unknown" value="50"/>
+ <entry name="SoftKeyEvent_ErrorMismatch" text="Error Mismatch" value="51"/>
+ <entry name="SoftKeyEvent_Conference" text="Conference" value="52"/>
+ <entry name="SoftKeyEvent_ParkNumber" text="Park Number" value="53"/>
+ <entry name="SoftKeyEvent_Private" text="Private" value="54"/>
+ <entry name="SoftKeyEvent_NotEnoughBandwidth" text="Not Enough Bandwidth" value="55"/>
+ <entry name="SoftKeyEvent_UnknownNumber" text="Unknown Number" value="56"/>
+ <entry name="SoftKeyEvent_RmLstC" text="RmLstC" value="57"/>
+ <entry name="SoftKeyEvent_Voicemail" text="Voicemail" value="58"/>
+ <entry name="SoftKeyEvent_ImmDiv" text="ImmDiv" value="59"/>
+ <entry name="SoftKeyEvent_Intrcpt" text="Intrcpt" value="60"/>
+ <entry name="SoftKeyEvent_SetWtch" text="SetWtch" value="61"/>
+ <entry name="SoftKeyEvent_TrnsfVM" text="TrnsfVM" value="62"/>
+ <entry name="SoftKeyEvent_DND" text="DND" value="63"/>
+ <entry name="SoftKeyEvent_DivAll" text="DivAll" value="64"/>
+ <entry name="SoftKeyEvent_CallBack" text="CallBack" value="65"/>
+ <entry name="SoftKeyEvent_NetworkCongestionRerouting" text="Network congestion,rerouting" value="66"/>
+ <entry name="SoftKeyEvent_Barge" text="Barge" value="67"/>
+ <entry name="SoftKeyEvent_FailedToSetupBarge" text="Failed to setup Barge" value="68"/>
+ <entry name="SoftKeyEvent_AnotherBargeExists" text="Another Barge exists" value="69"/>
+ <entry name="SoftKeyEvent_IncompatibleDeviceType" text="Incompatible device type" value="70"/>
+ <entry name="SoftKeyEvent_NoParkNumberAvailable" text="No Park Number Available" value="71"/>
+ <entry name="SoftKeyEvent_CallParkReversion" text="CallPark Reversion" value="72"/>
+ <entry name="SoftKeyEvent_ServiceIsNotActive" text="Service is not Active" value="73"/>
+ <entry name="SoftKeyEvent_HighTrafficTryAgainLater" text="High Traffic Try Again Later" value="74"/>
+ <entry name="SoftKeyEvent_QRT" text="QRT" value="75"/>
+ <entry name="SoftKeyEvent_MCID" text="MCID" value="76"/>
+ <entry name="SoftKeyEvent_DirTrfr" text="DirTrfr" value="77"/>
+ <entry name="SoftKeyEvent_Select" text="Select" value="78"/>
+ <entry name="SoftKeyEvent_ConfList" text="ConfList" value="79"/>
+ <entry name="SoftKeyEvent_iDivert" text="iDivert" value="80"/>
+ <entry name="SoftKeyEvent_cBarge" text="cBarge" value="81"/>
+ <entry name="SoftKeyEvent_CanNotCompleteTransfer" text="Can Not Complete Transfer" value="82"/>
+ <entry name="SoftKeyEvent_CanNotJoinCalls" text="Can Not Join Calls" value="83"/>
+ <entry name="SoftKeyEvent_McidSuccessful" text="Mcid Successful" value="84"/>
+ <entry name="SoftKeyEvent_NumberNotConfigured" text="Number Not Configured" value="85"/>
+ <entry name="SoftKeyEvent_SecurityError" text="Security Error" value="86"/>
+ <entry name="SoftKeyEvent_VideoBandwidthUnavailable" text="Video Bandwidth Unavailable" value="87"/>
+ <entry name="SoftKeyEvent_VideoMode" text="Video Mode" value="88"/>
+ <entry name="SoftKeyEvent_Record" text="Record" value="202"/>
+ <entry name="SoftKeyEvent_Dial" text="Dial" value="201"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="SoftKeyEventMessage" opcode="0x0026" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <enum comment="SoftKey Event" name="softKeyEvent" subtype="SoftKeyEvent" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="UnRegReasonCode">
+ <entries>
+ <entry comment="" name="UnRegReasonCode_Unknown" text="Unknown" value="0x0000"/>
+ <entry comment="" name="UnRegReasonCode_PowerSaveMode" text="PowerSaveMode" value="0x0001"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="UnregisterReqMessage" opcode="0x0027" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22" size_gt="12">
+ <enum comment="" name="unRegReasonCode" subtype="UnRegReasonCode" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="SoftKeyTemplateReqMessage" opcode="0x0028" type="RegistrationAndManagement"/>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="RegisterTokenReq" opcode="0x0029" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="sid" type="struct">
+ <fields>
+ <string comment="Device Name" name="DeviceName" size="16" type="char"/>
+ <integer comment="User Id" name="reserved_for_future_use" type="uint32"/>
+ <integer comment="Device Instance" name="instance" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="stationIpAddr" type="uint32"/>
+ <enum comment="" name="deviceType" subtype="DeviceType" type="uint32"/>
+ <ipv4or6 comment="" endianness="big" name="stationIpV6Addr" size="16" subtype="uint8" type="ipaddr"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="response" name="MediaTransmissionFailureMessage" opcode="0x002a" request="0x008a" type="MediaControl">
+ <fields>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" declare="yes" name="passThroughPartyId" req_resp_key="1" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endiannes="big" endianness="big" name="remoteIpAddr" subtype="IpAddress" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="remotePortNumber" subtype="uint32" type="ipport" use_param="remoteIpAddr" make_additional_info="yes"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="HeadsetMode">
+ <entries>
+ <entry comment="" name="HeadsetMode_On" text="On" value="0x0001"/>
+ <entry comment="" name="HeadsetMode_Off" text="Off" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="HeadsetStatusMessage" opcode="0x002b" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="headsetStatus" subtype="HeadsetMode" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="MediaResourceNotificationMessage" opcode="0x002c" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="deviceType" subtype="DeviceType" type="uint32"/>
+ <integer comment="" name="numberOfInServiceStreams" type="uint32"/>
+ <integer comment="" name="maxStreamsPerConf" type="uint32"/>
+ <integer comment="" name="numberOfOutOfServiceStreams" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="RegisterAvailableLinesMessage" opcode="0x002d" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="maxNumOfAvailLines" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="SequenceFlag">
+ <entries>
+ <entry comment="" name="Sequence_First" text="First" value="0x0000"/>
+ <entry comment="" name="Sequence_More" text="More" value="0x0001"/>
+ <entry comment="" name="Sequence_Last" text="Last" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="DeviceToUserDataMessage" opcode="0x002e" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="deviceToUserData" subtype="UserAndDeviceData" type="struct">
+ <fields>
+ <integer comment="" name="applicationId" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="" name="transactionId" type="uint32"/>
+ <integer comment="" declare="yes" name="dataLength" type="uint32"/>
+ <xml comment="" maxsize="2000" name="xmldata" size_fieldname="dataLength" type="xml"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="response" name="DeviceToUserDataResponseMessage" opcode="0x002f" request="0x002e" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="deviceToUserData" subtype="UserAndDeviceData" type="struct">
+ <fields>
+ <integer comment="" name="applicationId" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="" name="transactionId" type="uint32"/>
+ <integer comment="" declare="yes" name="dataLength" type="uint32"/>
+ <xml comment="" maxsize="2000" name="xmldata" size_fieldname="dataLength" type="xml"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <enum name="Layout">
+ <entries>
+ <entry comment="" name="Layout_NoLayout" text="NoLayout" value="0x0000"/>
+ <entry comment="" name="Layout_OneByOne" text="OneByOne" value="0x0001"/>
+ <entry comment="" name="Layout_OneByTwo" text="OneByTwo" value="0x0002"/>
+ <entry comment="" name="Layout_TwoByTwo" text="TwoByTwo" value="0x0003"/>
+ <entry comment="" name="Layout_TwoByTwo3Alt1" text="TwoByTwo3Alt1" value="0x0004"/>
+ <entry comment="" name="Layout_TwoByTwo3Alt2" text="TwoByTwo3Alt2" value="0x0005"/>
+ <entry comment="" name="Layout_ThreeByThree" text="ThreeByThree" value="0x0006"/>
+ <entry comment="" name="Layout_ThreeByThree6Alt1" text="ThreeByThree6Alt1" value="0x0007"/>
+ <entry comment="" name="Layout_ThreeByThree6Alt2" text="ThreeByThree6Alt2" value="0x0008"/>
+ <entry comment="" name="Layout_ThreeByThree4Alt1" text="ThreeByThree4Alt1" value="0x0009"/>
+ <entry comment="" name="Layout_ThreeByThree4Alt2" text="ThreeByThree4Alt2" value="0x000a"/>
+ </entries>
+ </enum>
+ <enum name="TransmitOrReceive">
+ <entries>
+ <entry comment="" name="TransmitOrReceive_None" text="None" value="0x0000"/>
+ <entry comment="" name="TransmitOrReceive_ReceiveOnly" text="ReceiveOnly" value="0x0001"/>
+ <entry comment="" name="TransmitOrReceive_TransmitOnly" text="TransmitOnly" value="0x0002"/>
+ <entry comment="" name="TransmitOrReceive_Both" text="Both" value="0x0003"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="UpdateCapabilitiesMessage" opcode="0x0030" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="audioCapCount" type="uint32"/>
+ <integer comment="" declare="yes" name="videoCapCount" type="uint32"/>
+ <integer comment="" declare="yes" name="dataCapCount" type="uint32"/>
+ <integer comment="" name="rtpPayloadFormat" type="uint32"/>
+ <integer comment="" declare="yes" name="customPictureFormatCount" type="uint32"/>
+ <struct comment="" maxsize="6" name="customPictureFormat" size_fieldname="customPictureFormatCount" subtype="CustomPictureFormat" type="struct">
+ <fields>
+ <integer comment="" name="pictureWidth" type="uint32"/>
+ <integer comment="" name="pictureHeight" type="uint32"/>
+ <integer comment="" name="pixelAspectRatio" type="uint32"/>
+ <integer comment="" name="clockConversionCode" type="uint32"/>
+ <integer comment="" name="clockDivisor" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" name="confResources" subtype="ConfResources" type="struct">
+ <fields>
+ <integer comment="" name="activeStreamsOnRegistration" type="uint32"/>
+ <integer comment="" name="maxBW" type="uint32"/>
+ <integer comment="" declare="yes" name="serviceResourceCount" type="uint32"/>
+ <struct comment="" maxsize="4" name="serviceResource" size_fieldname="serviceResourceCount" subtype="ServiceResource" type="struct">
+ <fields>
+ <integer comment="" declare="yes" name="layoutCount" type="uint32"/>
+ <enum comment="" maxsize="5" name="layouts" size_fieldname="layoutCount" subtype="Layout" type="uint32"/>
+ <integer comment="" name="serviceNum" type="uint32"/>
+ <integer comment="" name="maxStreams" type="uint32"/>
+ <integer comment="" name="maxConferences" type="uint32"/>
+ <integer comment="Active Conference" longcomment="Active conference at Registration" name="activeConferenceOnRegistration" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </struct>
+ <struct comment="" maxsize="18" name="audiocaps" size_fieldname="audioCapCount" type="struct">
+ <fields>
+ <enum comment="" declare="yes" name="payloadCapability" subtype="Media_PayloadType" type="uint32"/>
+ <integer comment="" name="maxFramesPerPacket" type="uint32"/>
+ <union comment="" lookup_guide="payloadCapability" name="PAYLOADS" subtype="MediaCapabilityUnion" type="union">
+ <fields>
+ <enum comment="" lookup_eq="Media_Payload_G7231" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ <struct comment="" lookup_eq="Media_Payload_v150_LC_ModemRelay" name="modemRelay" type="struct">
+ <fields>
+ <integer comment="" name="capAndVer" type="uint32"/>
+ <integer comment="" name="modAnd2833" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_v150_LC_SPRT" name="sprtPayload" type="struct">
+ <fields>
+ <integer comment="" name="chan0MaxPayload" type="uint16"/>
+ <integer comment="" name="chan2MaxPayload" type="uint16"/>
+ <integer comment="" name="chan3MaxPayload" type="uint16"/>
+ <integer comment="" name="chan2MaxWindow" type="uint16"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_v150_LC_SSE" name="sse" type="struct">
+ <fields>
+ <integer comment="" name="standard" type="uint32"/>
+ <integer comment="" name="vendor" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="*" name="codecParams" subtype="CodecParameters" type="struct">
+ <fields>
+ <integer comment="" name="codecMode" type="uint8"/>
+ <integer comment="" name="dynamicPayload" type="uint8"/>
+ <integer comment="" name="codecParam1" type="uint8"/>
+ <integer comment="" name="codecParam2" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ <struct comment="" maxsize="10" name="vidCaps" size_fieldname="videoCapCount" subtype="VideoCapability" type="struct">
+ <fields>
+ <enum comment="" declare="yes" name="payloadCapability" subtype="Media_PayloadType" type="uint32"/>
+ <enum comment="" name="videoCapabilityDirection" subtype="TransmitOrReceive" type="uint32"/>
+ <integer comment="" declare="yes" name="levelPreferenceCount" type="uint32"/>
+ <struct comment="" maxsize="4" name="levelPreference" size_fieldname="levelPreferenceCount" subtype="LevelPreference" type="struct">
+ <fields>
+ <integer comment="" name="transmitPreference" type="uint32"/>
+ <integer comment="" name="format" type="uint32"/>
+ <integer comment="" name="maxBitRate" type="uint32"/>
+ <integer comment="" name="minBitRate" type="uint32"/>
+ <integer comment="" name="MPI" type="uint32"/>
+ <integer comment="" name="serviceNumber" type="uint32"/>
+ </fields>
+ </struct>
+ <union comment="" lookup_guide="payloadCapability" name="capability" subtype="VideoCapabilityUnion" type="union">
+ <fields>
+ <struct comment="" lookup_eq="Media_Payload_H261" name="h261VideoCapability" subtype="H261VideoCapability" type="struct">
+ <fields>
+ <integer comment="Temporal spatial trade off capability" name="temporalSpatialTradeOffCapability" type="uint32"/>
+ <integer comment="Still Image Transmission" name="stillImageTransmission" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_H263" name="h263VideoCapability" subtype="H263VideoCapability" type="struct">
+ <fields>
+ <bitfield comment="H263 Capability BitField" name="h263_capability_bitfield" size="uint32" subtype="Generic_Bitfield_32" type="bitfield">
+ <entries>
+ <entry comment="" name="Generic_Bitfield_Bit1" text="Bit1" value="0x00000001"/>
+ <entry comment="" name="Generic_Bitfield_Bit2" text="Bit2" value="0x00000002"/>
+ <entry comment="" name="Generic_Bitfield_Bit3" text="Bit3" value="0x00000004"/>
+ <entry comment="" name="Generic_Bitfield_Bit4" text="Bit4" value="0x00000008"/>
+ <entry comment="" name="Generic_Bitfield_Bit5" text="Bit5" value="0x00000010"/>
+ <entry comment="" name="Generic_Bitfield_Bit6" text="Bit6" value="0x00000020"/>
+ <entry comment="" name="Generic_Bitfield_Bit7" text="Bit7" value="0x00000040"/>
+ <entry comment="" name="Generic_Bitfield_Bit8" text="Bit8" value="0x00000080"/>
+ <entry comment="" name="Generic_Bitfield_Bit9" text="Bit9" value="0x00000100"/>
+ <entry comment="" name="Generic_Bitfield_Bit10" text="Bit10" value="0x00000200"/>
+ <entry comment="" name="Generic_Bitfield_Bit11" text="Bit11" value="0x00000400"/>
+ <entry comment="" name="Generic_Bitfield_Bit12" text="Bit12" value="0x00000800"/>
+ <entry comment="" name="Generic_Bitfield_Bit13" text="Bit13" value="0x00001000"/>
+ <entry comment="" name="Generic_Bitfield_Bit14" text="Bit14" value="0x00002000"/>
+ <entry comment="" name="Generic_Bitfield_Bit15" text="Bit15" value="0x00004000"/>
+ <entry comment="" name="Generic_Bitfield_Bit16" text="Bit16" value="0x00008000"/>
+ <entry comment="" name="Generic_Bitfield_Bit17" text="Bit17" value="0x00010000"/>
+ <entry comment="" name="Generic_Bitfield_Bit18" text="Bit18" value="0x00020000"/>
+ <entry comment="" name="Generic_Bitfield_Bit19" text="Bit19" value="0x00040000"/>
+ <entry comment="" name="Generic_Bitfield_Bit20" text="Bit20" value="0x00080000"/>
+ <entry comment="" name="Generic_Bitfield_Bit21" text="Bit21" value="0x00100000"/>
+ <entry comment="" name="Generic_Bitfield_Bit22" text="Bit22" value="0x00200000"/>
+ <entry comment="" name="Generic_Bitfield_Bit23" text="Bit23" value="0x00400000"/>
+ <entry comment="" name="Generic_Bitfield_Bit24" text="Bit24" value="0x00800000"/>
+ <entry comment="" name="Generic_Bitfield_Bit25" text="Bit25" value="0x01000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit26" text="Bit26" value="0x02000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit27" text="Bit27" value="0x04000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit28" text="Bit28" value="0x08000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit29" text="Bit29" value="0x10000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit30" text="Bit30" value="0x20000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit31" text="Bit31" value="0x40000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit32" text="Bit32" value="0x80000000"/>
+ </entries>
+ </bitfield>
+ <integer comment="" name="annexNandWFutureUse" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_Vieo" name="vieoVideoCapability" subtype="VieoVideoCapability" type="struct">
+ <fields>
+ <integer comment="" name="modelNumber" type="uint32"/>
+ <integer comment="" name="bandwidth" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ <struct comment="" maxsize="5" name="dataCaps" size_fieldname="dataCapCount" subtype="DataApplicationCapability" type="struct">
+ <fields>
+ <enum comment="" name="payloadCapability" subtype="Media_PayloadType" type="uint32"/>
+ <enum comment="" name="dataCapabilityDirection" subtype="TransmitOrReceive" type="uint32"/>
+ <integer comment="" name="protocolDependentData" type="uint32"/>
+ <integer comment="" name="maxBitRate" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <enum name="OpenReceiveChanStatus">
+ <entries>
+ <entry comment="" name="OpenReceiveChanStatus_Ok" text="Ok" value="0x0000"/>
+ <entry comment="" name="OpenReceiveChanStatus_Error" text="Error" value="0x0001"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="response" name="OpenMultiMediaReceiveChannelAckMessage" opcode="0x0031" request="0x0131" type="MediaControl">
+ <fields>
+ <enum comment="" name="multimediaReceptionStatus" subtype="OpenReceiveChanStatus" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endiannes="big" endianness="big" name="ipAddr" subtype="IpAddress" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" declare="yes" name="portNumber" subtype="uint32" type="ipport" use_param="ipAddr" make_additional_info="yes"/>
+ <integer comment="PassThrough PartyId" declare="yes" name="passThroughPartyId" req_resp_key="1" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="ClearConferenceMessage" opcode="0x0032" type="Conference">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" name="serviceNum" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="ServiceURLStatReqMessage" opcode="0x0033" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="serviceURLIndex" req_resp_key="1" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="FeatureStatReqMessage" opcode="0x0034" type="RegistrationAndManagement">
+ <fields>
+ <integer comment="" declare="yes" name="featureIndex" req_resp_key="1" type="uint32"/>
+ </fields>
+ <fields beginversion="0" endversion="22" size_gt="16">
+ <integer comment="" name="featureCapabilities" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="CreateConfResult">
+ <entries>
+ <entry comment="" name="CreateConfResult_OK" text="OK" value="0x0000"/>
+ <entry comment="" name="CreateConfResult_ResourceNotAvailable" text="ResourceNotAvailable" value="0x0001"/>
+ <entry comment="" name="CreateConfResult_ConferenceAlreadyExist" text="ConferenceAlreadyExist" value="0x0002"/>
+ <entry comment="" name="CreateConfResult_SystemErr" text="SystemErr" value="0x0003"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="response" name="CreateConferenceResMessage" opcode="0x0035" request="0x0137" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" declare="yes" name="conferenceId" req_resp_key="1" type="uint32"/>
+ <enum comment="" name="result" subtype="CreateConfResult" type="uint32"/>
+ <integer comment="" declare="yes" name="dataLength" type="uint32"/>
+ <string comment="variable field size (max: 2000]" maxsize="2000" name="passThruData" size_fieldname="dataLength" type="char"/>
+ </fields>
+ </message>
+ <enum name="DeleteConfResult">
+ <entries>
+ <entry comment="" name="DeleteConfResult_OK" text="OK" value="0x0000"/>
+ <entry comment="" name="DeleteConfResult_ConferenceNotExist" text="ConferenceNotExist" value="0x0001"/>
+ <entry comment="" name="DeleteConfResult_SystemErr" text="SystemErr" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="response" name="DeleteConferenceResMessage" opcode="0x0036" request="0x0138" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" declare="yes" name="conferenceId" req_resp_key="1" type="uint32"/>
+ <enum comment="" name="delete_conf_result" subtype="DeleteConfResult" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="ModifyConfResult">
+ <entries>
+ <entry comment="" name="ModifyConfResult_OK" text="OK" value="0x0000"/>
+ <entry comment="" name="ModifyConfResult_ResourceNotAvailable" text="ResourceNotAvailable" value="0x0001"/>
+ <entry comment="" name="ModifyConfResult_ConferenceNotExist" text="ConferenceNotExist" value="0x0002"/>
+ <entry comment="" name="ModifyConfResult_InvalidParameter" text="InvalidParameter" value="0x0003"/>
+ <entry comment="" name="ModifyConfResult_MoreActiveCallsThanReserved" text="MoreActiveCallsThanReserved" value="0x0004"/>
+ <entry comment="" name="ModifyConfResult_InvalidResourceType" text="InvalidResourceType" value="0x0005"/>
+ <entry comment="" name="ModifyConfResult_SystemErr" text="SystemErr" value="0x0006"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="response" name="ModifyConferenceResMessage" opcode="0x0037" request="0x0139" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" declare="yes" name="conferenceId" req_resp_key="1" type="uint32"/>
+ <enum comment="" name="modify_conf_result" subtype="ModifyConfResult" type="uint32"/>
+ <integer comment="" declare="yes" name="dataLength" type="uint32"/>
+ <string comment="variable field size (max: 2000]" maxsize="2000" name="passThruData" size_fieldname="dataLength" type="char"/>
+ </fields>
+ </message>
+ <enum name="AddParticipantResult">
+ <entries>
+ <entry comment="" name="AddParticipantResult_OK" text="OK" value="0x0000"/>
+ <entry comment="" name="AddParticipantResult_ResourceNotAvailable" text="ResourceNotAvailable" value="0x0001"/>
+ <entry comment="" name="AddParticipantResult_ConferenceNotExist" text="ConferenceNotExist" value="0x0002"/>
+ <entry comment="" name="AddParticipantResult_DuplicateCallRef" text="DuplicateCallRef" value="0x0003"/>
+ <entry comment="" name="AddParticipantResult_SystemErr" text="SystemErr" value="0x0004"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="response" name="AddParticipantResMessage" opcode="0x0038" request="0x013a" type="IntraCCM">
+ <fields alignment="4" beginversion="0" endversion="22">
+ <integer comment="Conference ID" declare="yes" name="conferenceId" req_resp_key="1" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <enum comment="" name="add_participant_result" subtype="AddParticipantResult" type="uint32"/>
+ <string comment="" name="bridgeParticipantId" size="257" type="char"/>
+ </fields>
+ </message>
+ <enum name="ResourceType">
+ <entries>
+ <entry comment="" name="ResourceType_Conference" text="Conference" value="0x0000"/>
+ <entry comment="" name="ResourceType_IVR" text="IVR" value="0x0001"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="response" name="AuditConferenceResMessage" opcode="0x0039" request="0x013c" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="last" type="uint32"/>
+ <integer comment="" declare="yes" name="numberOfEntries" type="uint32"/>
+ <struct comment="" maxsize="32" name="conferenceEntry" size_fieldname="numberOfEntries" subtype="AuditConferenceEntry" type="struct">
+ <fields>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <enum comment="" name="resourceType" subtype="ResourceType" type="uint32"/>
+ <integer comment="" name="numberOfReservedParticipants" type="uint32"/>
+ <integer comment="" name="numberOfActiveParticipants" type="uint32"/>
+ <integer comment="" name="applicationId" type="uint32"/>
+ <string comment="" name="appConfID" size="32" type="char"/>
+ <string comment="" name="appData" size="24" type="char"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <enum name="AuditParticipantResult">
+ <entries>
+ <entry comment="" name="AuditParticipantResult_OK" text="OK" value="0x0000"/>
+ <entry comment="" name="AuditParticipantResult_ConferenceNotExist" text="ConferenceNotExist" value="0x0001"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="response" name="AuditParticipantResMessage" opcode="0x0040" request="0x013d" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="audit_participant_result" subtype="AuditParticipantResult" type="uint32"/>
+ <integer comment="" name="last" type="uint32"/>
+ <integer comment="Conference ID" declare="yes" name="conferenceId" req_resp_key="1" type="uint32"/>
+ <integer comment="" declare="yes" name="numberOfEntries" type="uint32"/>
+ <integer comment="" maxsize="256" name="participantEntry" size_fieldname="numberOfEntries" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="DeviceToUserDataMessageVersion1" opcode="0x0041" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="deviceToUserDataVersion1" subtype="UserAndDeviceDataVersion1" type="struct">
+ <fields>
+ <integer comment="" name="applicationId" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="" name="transactionId" type="uint32"/>
+ <integer comment="" declare="yes" name="dataLength" type="uint32"/>
+ <enum comment="" name="sequenceFlag" subtype="SequenceFlag" type="uint32"/>
+ <integer comment="" name="displayPriority" type="uint32"/>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" name="appInstanceID" type="uint32"/>
+ <integer comment="" name="routingID" type="uint32"/>
+ <xml comment="" maxsize="2000" name="xmldata" size_fieldname="dataLength" type="xml"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="response" name="DeviceToUserDataResponseMessageVersion1" opcode="0x0042" request="0x0041" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="deviceToUserDataVersion1" subtype="UserAndDeviceDataVersion1" type="struct">
+ <fields>
+ <integer comment="" name="applicationId" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="" name="transactionId" type="uint32"/>
+ <integer comment="" declare="yes" name="dataLength" type="uint32"/>
+ <enum comment="" name="sequenceFlag" subtype="SequenceFlag" type="uint32"/>
+ <integer comment="" name="displayPriority" type="uint32"/>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" name="appInstanceID" type="uint32"/>
+ <integer comment="" name="routingID" type="uint32"/>
+ <xml comment="" maxsize="2000" name="xmldata" size_fieldname="dataLength" type="xml"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="response" name="CapabilitiesV2ResMessage" opcode="0x0043" request="0x009b" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="audioCapCount" type="uint32"/>
+ <integer comment="" declare="yes" name="videoCapCount" type="uint32"/>
+ <integer comment="" declare="yes" name="dataCapCount" type="uint32"/>
+ <integer comment="" name="rtpPayloadFormat" type="uint32"/>
+ <integer comment="" declare="yes" name="customPictureFormatCount" type="uint32"/>
+ <struct comment="" maxsize="6" name="customPictureFormat" size_fieldname="customPictureFormatCount" subtype="CustomPictureFormat" type="struct">
+ <fields>
+ <integer comment="" name="pictureWidth" type="uint32"/>
+ <integer comment="" name="pictureHeight" type="uint32"/>
+ <integer comment="" name="pixelAspectRatio" type="uint32"/>
+ <integer comment="" name="clockConversionCode" type="uint32"/>
+ <integer comment="" name="clockDivisor" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" name="confResources" subtype="ConfResources" type="struct">
+ <fields>
+ <integer comment="" name="activeStreamsOnRegistration" type="uint32"/>
+ <integer comment="" name="maxBW" type="uint32"/>
+ <integer comment="" declare="yes" name="serviceResourceCount" type="uint32"/>
+ <struct comment="" maxsize="4" name="serviceResource" size_fieldname="serviceResourceCount" subtype="ServiceResource" type="struct">
+ <fields>
+ <integer comment="" declare="yes" name="layoutCount" type="uint32"/>
+ <enum comment="" maxsize="5" name="layouts" size_fieldname="layoutCount" subtype="Layout" type="uint32"/>
+ <integer comment="" name="serviceNum" type="uint32"/>
+ <integer comment="" name="maxStreams" type="uint32"/>
+ <integer comment="" name="maxConferences" type="uint32"/>
+ <integer comment="Active Conference" longcomment="Active conference at Registration" name="activeConferenceOnRegistration" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </struct>
+ <struct comment="" maxsize="18" name="audiocaps" size_fieldname="audioCapCount" type="struct">
+ <fields>
+ <enum comment="" declare="yes" name="payloadCapability" subtype="Media_PayloadType" type="uint32"/>
+ <integer comment="" name="maxFramesPerPacket" type="uint32"/>
+ <union comment="" lookup_guide="payloadCapability" name="PAYLOADS" subtype="MediaCapabilityUnion" type="union">
+ <fields>
+ <enum comment="" lookup_eq="Media_Payload_G7231" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ <struct comment="" lookup_eq="Media_Payload_v150_LC_ModemRelay" name="modemRelay" type="struct">
+ <fields>
+ <integer comment="" name="capAndVer" type="uint32"/>
+ <integer comment="" name="modAnd2833" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_v150_LC_SPRT" name="sprtPayload" type="struct">
+ <fields>
+ <integer comment="" name="chan0MaxPayload" type="uint16"/>
+ <integer comment="" name="chan2MaxPayload" type="uint16"/>
+ <integer comment="" name="chan3MaxPayload" type="uint16"/>
+ <integer comment="" name="chan2MaxWindow" type="uint16"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_v150_LC_SSE" name="sse" type="struct">
+ <fields>
+ <integer comment="" name="standard" type="uint32"/>
+ <integer comment="" name="vendor" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="*" name="codecParams" subtype="CodecParameters" type="struct">
+ <fields>
+ <integer comment="" name="codecMode" type="uint8"/>
+ <integer comment="" name="dynamicPayload" type="uint8"/>
+ <integer comment="" name="codecParam1" type="uint8"/>
+ <integer comment="" name="codecParam2" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ <struct comment="" maxsize="10" name="vidCaps" size_fieldname="videoCapCount" subtype="VideoCapabilityV2" type="struct">
+ <fields>
+ <enum comment="" declare="yes" name="payloadCapability" subtype="Media_PayloadType" type="uint32"/>
+ <enum comment="" name="videoCapabilityDirection" subtype="TransmitOrReceive" type="uint32"/>
+ <integer comment="" declare="yes" name="levelPreferenceCount" type="uint32"/>
+ <struct comment="" maxsize="4" name="levelPreference" size_fieldname="levelPreferenceCount" subtype="LevelPreference" type="struct">
+ <fields>
+ <integer comment="" name="transmitPreference" type="uint32"/>
+ <integer comment="" name="format" type="uint32"/>
+ <integer comment="" name="maxBitRate" type="uint32"/>
+ <integer comment="" name="minBitRate" type="uint32"/>
+ <integer comment="" name="MPI" type="uint32"/>
+ <integer comment="" name="serviceNumber" type="uint32"/>
+ </fields>
+ </struct>
+ <union comment="" lookup_guide="payloadCapability" name="capability" subtype="VideoCapabilityV2Union" type="union">
+ <fields>
+ <struct comment="" lookup_eq="Media_Payload_H261" name="h261VideoCapability" subtype="H261VideoCapability" type="struct">
+ <fields>
+ <integer comment="Temporal spatial trade off capability" name="temporalSpatialTradeOffCapability" type="uint32"/>
+ <integer comment="Still Image Transmission" name="stillImageTransmission" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_H263" name="h263VideoCapability" subtype="H263VideoCapability" type="struct">
+ <fields>
+ <bitfield comment="H263 Capability BitField" name="h263_capability_bitfield" size="uint32" subtype="Generic_Bitfield_32" type="bitfield">
+ <entries>
+ <entry comment="" name="Generic_Bitfield_Bit1" text="Bit1" value="0x0001"/>
+ <entry comment="" name="Generic_Bitfield_Bit2" text="Bit2" value="0x0002"/>
+ <entry comment="" name="Generic_Bitfield_Bit3" text="Bit3" value="0x0004"/>
+ <entry comment="" name="Generic_Bitfield_Bit4" text="Bit4" value="0x0008"/>
+ <entry comment="" name="Generic_Bitfield_Bit5" text="Bit5" value="0x0010"/>
+ <entry comment="" name="Generic_Bitfield_Bit6" text="Bit6" value="0x0020"/>
+ <entry comment="" name="Generic_Bitfield_Bit7" text="Bit7" value="0x0040"/>
+ <entry comment="" name="Generic_Bitfield_Bit8" text="Bit8" value="0x0080"/>
+ <entry comment="" name="Generic_Bitfield_Bit9" text="Bit9" value="0x0100"/>
+ <entry comment="" name="Generic_Bitfield_Bit10" text="Bit10" value="0x0200"/>
+ <entry comment="" name="Generic_Bitfield_Bit11" text="Bit11" value="0x0400"/>
+ <entry comment="" name="Generic_Bitfield_Bit12" text="Bit12" value="0x0800"/>
+ <entry comment="" name="Generic_Bitfield_Bit13" text="Bit13" value="0x1000"/>
+ <entry comment="" name="Generic_Bitfield_Bit14" text="Bit14" value="0x2000"/>
+ <entry comment="" name="Generic_Bitfield_Bit15" text="Bit15" value="0x4000"/>
+ <entry comment="" name="Generic_Bitfield_Bit16" text="Bit16" value="0x8000"/>
+ <entry comment="" name="Generic_Bitfield_Bit17" text="Bit17" value="0x10000"/>
+ <entry comment="" name="Generic_Bitfield_Bit18" text="Bit18" value="0x20000"/>
+ <entry comment="" name="Generic_Bitfield_Bit19" text="Bit19" value="0x40000"/>
+ <entry comment="" name="Generic_Bitfield_Bit20" text="Bit20" value="0x80000"/>
+ <entry comment="" name="Generic_Bitfield_Bit21" text="Bit21" value="0x100000"/>
+ <entry comment="" name="Generic_Bitfield_Bit22" text="Bit22" value="0x200000"/>
+ <entry comment="" name="Generic_Bitfield_Bit23" text="Bit23" value="0x400000"/>
+ <entry comment="" name="Generic_Bitfield_Bit24" text="Bit24" value="0x800000"/>
+ <entry comment="" name="Generic_Bitfield_Bit25" text="Bit25" value="0x1000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit26" text="Bit26" value="0x2000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit27" text="Bit27" value="0x4000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit28" text="Bit28" value="0x8000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit29" text="Bit29" value="0x10000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit30" text="Bit30" value="0x20000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit31" text="Bit31" value="0x40000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit32" text="Bit32" value="0x80000000"/>
+ </entries>
+ </bitfield>
+ <integer comment="" name="annexNandWFutureUse" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_H264" name="h264VideoCapability" subtype="H264VideoCapability" type="struct">
+ <fields>
+ <integer comment="" name="profile" type="uint32"/>
+ <integer comment="" name="level" type="uint32"/>
+ <integer comment="" name="customMaxMBPS" type="uint32"/>
+ <integer comment="" name="customMaxFS" type="uint32"/>
+ <integer comment="" name="customMaxDPB" type="uint32"/>
+ <integer comment="" name="customMaxBRandCPB" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_Vieo" name="vieoVideoCapability" subtype="VieoVideoCapability" type="struct">
+ <fields>
+ <integer comment="" name="modelNumber" type="uint32"/>
+ <integer comment="" name="bandwidth" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ <struct comment="" maxsize="5" name="dataCaps" size_fieldname="dataCapCount" subtype="DataApplicationCapability" type="struct">
+ <fields>
+ <enum comment="" name="payloadCapability" subtype="Media_PayloadType" type="uint32"/>
+ <enum comment="" name="dataCapabilityDirection" subtype="TransmitOrReceive" type="uint32"/>
+ <integer comment="" name="protocolDependentData" type="uint32"/>
+ <integer comment="" name="maxBitRate" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <enum name="Media_Encryption_Capability">
+ <entries>
+ <entry comment="" name="Media_Encryption_Capability_NotEncryptionCapable" text="NotEncryptionCapable" value="0x0000"/>
+ <entry comment="" name="Media_Encryption_Capability_EncryptionCapable" text="EncryptionCapable" value="0x0001"/>
+ </entries>
+ </enum>
+ <enum name="IpAddrMode">
+ <entries>
+ <entry comment="" name="IpAddrMode_ModeIpv4" text="ModeIpv4" value="0x0000"/>
+ <entry comment="" name="IpAddrMode_ModeIpv6" text="ModeIpv6" value="0x0001"/>
+ <entry comment="" name="IpAddrMode_ModeIpv4AndIpv6" text="ModeIpv4AndIpv6" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="yes" msgtype="response" name="CapabilitiesV3ResMessage" opcode="0x0044" request="0x009b" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="audioCapCount" type="uint32"/>
+ <integer comment="" declare="yes" name="videoCapCount" type="uint32"/>
+ <integer comment="" declare="yes" name="dataCapCount" type="uint32"/>
+ <integer comment="" name="rtpPayloadFormat" type="uint32"/>
+ <integer comment="" declare="yes" name="customPictureFormatCount" type="uint32"/>
+ <struct comment="" maxsize="6" name="customPictureFormat" size_fieldname="customPictureFormatCount" subtype="CustomPictureFormat" type="struct">
+ <fields>
+ <integer comment="" name="pictureWidth" type="uint32"/>
+ <integer comment="" name="pictureHeight" type="uint32"/>
+ <integer comment="" name="pixelAspectRatio" type="uint32"/>
+ <integer comment="" name="clockConversionCode" type="uint32"/>
+ <integer comment="" name="clockDivisor" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" name="confResources" subtype="ConfResources" type="struct">
+ <fields>
+ <integer comment="" name="activeStreamsOnRegistration" type="uint32"/>
+ <integer comment="" name="maxBW" type="uint32"/>
+ <integer comment="" declare="yes" name="serviceResourceCount" type="uint32"/>
+ <struct comment="" maxsize="4" name="serviceResource" size_fieldname="serviceResourceCount" subtype="ServiceResource" type="struct">
+ <fields>
+ <integer comment="" declare="yes" name="layoutCount" type="uint32"/>
+ <enum comment="" maxsize="5" name="layouts" size_fieldname="layoutCount" subtype="Layout" type="uint32"/>
+ <integer comment="" name="serviceNum" type="uint32"/>
+ <integer comment="" name="maxStreams" type="uint32"/>
+ <integer comment="" name="maxConferences" type="uint32"/>
+ <integer comment="Active Conference" longcomment="Active conference at Registration" name="activeConferenceOnRegistration" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </struct>
+ <struct comment="" maxsize="18" name="audiocaps" size_fieldname="audioCapCount" type="struct">
+ <fields>
+ <enum comment="" declare="yes" name="payloadCapability" subtype="Media_PayloadType" type="uint32"/>
+ <integer comment="" name="maxFramesPerPacket" type="uint32"/>
+ <union comment="" lookup_guide="payloadCapability" name="PAYLOADS" subtype="MediaCapabilityUnion" type="union">
+ <fields>
+ <enum comment="" lookup_eq="Media_Payload_G7231" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ <struct comment="" lookup_eq="Media_Payload_v150_LC_ModemRelay" name="modemRelay" type="struct">
+ <fields>
+ <integer comment="" name="capAndVer" type="uint32"/>
+ <integer comment="" name="modAnd2833" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_v150_LC_SPRT" name="sprtPayload" type="struct">
+ <fields>
+ <integer comment="" name="chan0MaxPayload" type="uint16"/>
+ <integer comment="" name="chan2MaxPayload" type="uint16"/>
+ <integer comment="" name="chan3MaxPayload" type="uint16"/>
+ <integer comment="" name="chan2MaxWindow" type="uint16"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_v150_LC_SSE" name="sse" type="struct">
+ <fields>
+ <integer comment="" name="standard" type="uint32"/>
+ <integer comment="" name="vendor" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="*" name="codecParams" subtype="CodecParameters" type="struct">
+ <fields>
+ <integer comment="" name="codecMode" type="uint8"/>
+ <integer comment="" name="dynamicPayload" type="uint8"/>
+ <integer comment="" name="codecParam1" type="uint8"/>
+ <integer comment="" name="codecParam2" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ <struct comment="" maxsize="10" name="vidCaps" size_fieldname="videoCapCount" subtype="VideoCapabilityV3" type="struct">
+ <fields>
+ <enum comment="" declare="yes" name="payloadCapability" subtype="Media_PayloadType" type="uint32"/>
+ <enum comment="" name="videoCapabilityDirection" subtype="TransmitOrReceive" type="uint32"/>
+ <integer comment="" declare="yes" name="levelPreferenceCount" type="uint32"/>
+ <struct comment="" maxsize="4" name="levelPreference" size_fieldname="levelPreferenceCount" subtype="LevelPreference" type="struct">
+ <fields>
+ <integer comment="" name="transmitPreference" type="uint32"/>
+ <integer comment="" name="format" type="uint32"/>
+ <integer comment="" name="maxBitRate" type="uint32"/>
+ <integer comment="" name="minBitRate" type="uint32"/>
+ <integer comment="" name="MPI" type="uint32"/>
+ <integer comment="" name="serviceNumber" type="uint32"/>
+ </fields>
+ </struct>
+ <enum comment="" name="encryptionCapability" subtype="Media_Encryption_Capability" type="uint32"/>
+ <union comment="" lookup_guide="payloadCapability" name="capability" subtype="VideoCapabilityV3Union" type="union">
+ <fields>
+ <struct comment="" lookup_eq="Media_Payload_H261" name="h261VideoCapability" subtype="H261VideoCapability" type="struct">
+ <fields>
+ <integer comment="Temporal spatial trade off capability" name="temporalSpatialTradeOffCapability" type="uint32"/>
+ <integer comment="Still Image Transmission" name="stillImageTransmission" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_H263" name="h263VideoCapability" subtype="H263VideoCapability" type="struct">
+ <fields>
+ <bitfield comment="H263 Capability BitField" name="h263_capability_bitfield" size="uint32" subtype="Generic_Bitfield_32" type="bitfield">
+ <entries>
+ <entry comment="" name="Generic_Bitfield_Bit1" text="Bit1" value="0x0001"/>
+ <entry comment="" name="Generic_Bitfield_Bit2" text="Bit2" value="0x0002"/>
+ <entry comment="" name="Generic_Bitfield_Bit3" text="Bit3" value="0x0004"/>
+ <entry comment="" name="Generic_Bitfield_Bit4" text="Bit4" value="0x0008"/>
+ <entry comment="" name="Generic_Bitfield_Bit5" text="Bit5" value="0x0010"/>
+ <entry comment="" name="Generic_Bitfield_Bit6" text="Bit6" value="0x0020"/>
+ <entry comment="" name="Generic_Bitfield_Bit7" text="Bit7" value="0x0040"/>
+ <entry comment="" name="Generic_Bitfield_Bit8" text="Bit8" value="0x0080"/>
+ <entry comment="" name="Generic_Bitfield_Bit9" text="Bit9" value="0x0100"/>
+ <entry comment="" name="Generic_Bitfield_Bit10" text="Bit10" value="0x0200"/>
+ <entry comment="" name="Generic_Bitfield_Bit11" text="Bit11" value="0x0400"/>
+ <entry comment="" name="Generic_Bitfield_Bit12" text="Bit12" value="0x0800"/>
+ <entry comment="" name="Generic_Bitfield_Bit13" text="Bit13" value="0x1000"/>
+ <entry comment="" name="Generic_Bitfield_Bit14" text="Bit14" value="0x2000"/>
+ <entry comment="" name="Generic_Bitfield_Bit15" text="Bit15" value="0x4000"/>
+ <entry comment="" name="Generic_Bitfield_Bit16" text="Bit16" value="0x8000"/>
+ <entry comment="" name="Generic_Bitfield_Bit17" text="Bit17" value="0x10000"/>
+ <entry comment="" name="Generic_Bitfield_Bit18" text="Bit18" value="0x20000"/>
+ <entry comment="" name="Generic_Bitfield_Bit19" text="Bit19" value="0x40000"/>
+ <entry comment="" name="Generic_Bitfield_Bit20" text="Bit20" value="0x80000"/>
+ <entry comment="" name="Generic_Bitfield_Bit21" text="Bit21" value="0x100000"/>
+ <entry comment="" name="Generic_Bitfield_Bit22" text="Bit22" value="0x200000"/>
+ <entry comment="" name="Generic_Bitfield_Bit23" text="Bit23" value="0x400000"/>
+ <entry comment="" name="Generic_Bitfield_Bit24" text="Bit24" value="0x800000"/>
+ <entry comment="" name="Generic_Bitfield_Bit25" text="Bit25" value="0x1000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit26" text="Bit26" value="0x2000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit27" text="Bit27" value="0x4000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit28" text="Bit28" value="0x8000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit29" text="Bit29" value="0x10000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit30" text="Bit30" value="0x20000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit31" text="Bit31" value="0x40000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit32" text="Bit32" value="0x80000000"/>
+ </entries>
+ </bitfield>
+ <integer comment="" name="annexNandWFutureUse" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_H264" name="h264VideoCapability" subtype="H264VideoCapability" type="struct">
+ <fields>
+ <integer comment="" name="profile" type="uint32"/>
+ <integer comment="" name="level" type="uint32"/>
+ <integer comment="" name="customMaxMBPS" type="uint32"/>
+ <integer comment="" name="customMaxFS" type="uint32"/>
+ <integer comment="" name="customMaxDPB" type="uint32"/>
+ <integer comment="" name="customMaxBRandCPB" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_Vieo" name="vieoVideoCapability" subtype="VieoVideoCapability" type="struct">
+ <fields>
+ <integer comment="" name="modelNumber" type="uint32"/>
+ <integer comment="" name="bandwidth" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ <enum comment="" name="ipAddressingMode" subtype="IpAddrMode" type="uint32"/>
+ </fields>
+ <fields beginversion="16" endversion="22">
+ <enum comment="" name="ipAddressingMode" subtype="IpAddrMode" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" maxsize="5" name="dataCaps" size_fieldname="dataCapCount" subtype="DataApplicationCapabilityV2" type="struct">
+ <fields>
+ <enum comment="" name="payloadCapability" subtype="Media_PayloadType" type="uint32"/>
+ <enum comment="" name="dataCapabilityDirection" subtype="TransmitOrReceive" type="uint32"/>
+ <integer comment="" name="protocolDependentData" type="uint32"/>
+ <integer comment="" name="maxBitRate" type="uint32"/>
+ <enum comment="" name="encryptionCapability" subtype="Media_Encryption_Capability" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <enum name="MediaType">
+ <entries>
+ <entry comment="" name="MediaType_Invalid" text="MediaType_Invalid" value="0x0000"/>
+ <entry comment="" name="MediaType_Audio" text="MediaType_Audio" value="0x0001"/>
+ <entry comment="" name="MediaType_Main_Video" text="MediaType_Main_Video" value="0x0002"/>
+ <entry comment="" name="MediaType_FECC" text="MediaType_FECC" value="0x0003"/>
+ <entry comment="" name="MediaType_Presentation_Video" text="MediaType_Presentation_Video" value="0x0004"/>
+ <entry comment="" name="MediaType_DataApp_BFCP" text="MediaType_DataApp_BFCP" value="0x0005"/>
+ <entry comment="" name="MediaType_DataApp_IXChannel" text="MediaType_DataApp_IXChannel" value="0x0006"/>
+ <entry comment="" name="MediaType_T38" text="MediaType_T38" value="0x0007"/>
+ <entry comment="" name="MediaType_Max" text="MediaType_Max" value="0x0008"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="response" name="PortResMessage" opcode="0x0045" request="0x014b" type="MediaControl">
+ <fields>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="CallId" declare="yes" name="callReference" req_resp_key="1" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endiannes="big" endianness="big" name="ipAddr" subtype="IpAddress" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="portNumber" subtype="uint32" type="ipport" use_param="ipAddr" make_additional_info="yes"/>
+ <integer comment="" name="RTCPPortNumber" type="uint32"/>
+ </fields>
+ <fields beginversion="19" endversion="22">
+ <enum comment="" name="mediaType" subtype="MediaType" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="RSVPDirection">
+ <entries>
+ <entry comment="" name="RSVPDirection_SEND" text="SEND" value="0x0001"/>
+ <entry comment="" name="RSVPDirection_RECV" text="RECV" value="0x0002"/>
+ <entry comment="" name="RSVPDirection_SENDRECV" text="SENDRECV" value="0x0003"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="event" name="QoSResvNotifyMessage" opcode="0x0046" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endianness="big" name="remoteIpAddr" subtype="uint32" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="remotePortNumber" subtype="uint32" type="ipport" use_param="remoteIpAddr" make_additional_info="yes"/>
+ <enum comment="" name="direction" subtype="RSVPDirection" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="QoSErrorCode">
+ <entries>
+ <entry comment="" name="QOS_CAUSE_RESERVATION_TIMEOUT" text="QOS_CAUSE_RESERVATION_TIMEOUT" value="0x0000"/>
+ <entry comment="" name="QOS_CAUSE_PATH_FAIL" text="QOS_CAUSE_PATH_FAIL" value="0x0001"/>
+ <entry comment="" name="QOS_CAUSE_RESV_FAIL" text="QOS_CAUSE_RESV_FAIL" value="0x0002"/>
+ <entry comment="" name="QOS_CAUSE_LISTEN_FAIL" text="QOS_CAUSE_LISTEN_FAIL" value="0x0003"/>
+ <entry comment="" name="QOS_CAUSE_RESOURCE_UNAVAILABLE" text="QOS_CAUSE_RESOURCE_UNAVAILABLE" value="0x0004"/>
+ <entry comment="" name="QOS_CAUSE_LISTEN_TIMEOUT" text="QOS_CAUSE_LISTEN_TIMEOUT" value="0x0005"/>
+ <entry comment="" name="QOS_CAUSE_RESV_RETRIES_FAIL" text="QOS_CAUSE_RESV_RETRIES_FAIL" value="0x0006"/>
+ <entry comment="" name="QOS_CAUSE_PATH_RETRIES_FAIL" text="QOS_CAUSE_PATH_RETRIES_FAIL" value="0x0007"/>
+ <entry comment="" name="QOS_CAUSE_RESV_PREEMPTION" text="QOS_CAUSE_RESV_PREEMPTION" value="0x0008"/>
+ <entry comment="" name="QOS_CAUSE_PATH_PREEMPTION" text="QOS_CAUSE_PATH_PREEMPTION" value="0x0009"/>
+ <entry comment="" name="QOS_CAUSE_RESV_MODIFY_FAIL" text="QOS_CAUSE_RESV_MODIFY_FAIL" value="0x000a"/>
+ <entry comment="" name="QOS_CAUSE_PATH_MODIFY_FAIL" text="QOS_CAUSE_PATH_MODIFY_FAIL" value="0x000b"/>
+ <entry comment="" name="QOS_CAUSE_RESV_TEAR" text="QOS_CAUSE_RESV_TEAR" value="0x000c"/>
+ </entries>
+ </enum>
+ <enum name="RSVPErrorCode">
+ <entries>
+ <entry comment="" name="RSVPErrorCode_CONFIRM" text="CONFIRM" value="0x0000"/>
+ <entry comment="" name="RSVPErrorCode_ADMISSION" text="ADMISSION" value="0x0001"/>
+ <entry comment="" name="RSVPErrorCode_ADMINISTRATIVE" text="ADMINISTRATIVE" value="0x0002"/>
+ <entry comment="" name="RSVPErrorCode_NO_PATH_INFORMATION" text="NO_PATH_INFORMATION" value="0x0003"/>
+ <entry comment="" name="RSVPErrorCode_NO_SENDER_INFORMATION" text="NO_SENDER_INFORMATION" value="0x0004"/>
+ <entry comment="" name="RSVPErrorCode_CONFLICTING_STYLE" text="CONFLICTING_STYLE" value="0x0005"/>
+ <entry comment="" name="RSVPErrorCode_UNKNOWN_STYLE" text="UNKNOWN_STYLE" value="0x0006"/>
+ <entry comment="" name="RSVPErrorCode_CONFLICTING_DST_PORTS" text="CONFLICTING_DST_PORTS" value="0x0007"/>
+ <entry comment="" name="RSVPErrorCode_CONFLICTING_SRC_PORTS" text="CONFLICTING_SRC_PORTS" value="0x0008"/>
+ <entry comment="" name="RSVPErrorCode_SERVICE_PREEMPTED" text="SERVICE_PREEMPTED" value="0x000c"/>
+ <entry comment="" name="RSVPErrorCode_UNKNOWN_OBJECT_CLASS" text="UNKNOWN_OBJECT_CLASS" value="0x000d"/>
+ <entry comment="" name="RSVPErrorCode_UNKNOWN_CLASS_TYPE" text="UNKNOWN_CLASS_TYPE" value="0x000e"/>
+ <entry comment="" name="RSVPErrorCode_API" text="API" value="0x0014"/>
+ <entry comment="" name="RSVPErrorCode_TRAFFIC" text="TRAFFIC" value="0x0015"/>
+ <entry comment="" name="RSVPErrorCode_TRAFFIC_SYSTEM" text="TRAFFIC_SYSTEM" value="0x0016"/>
+ <entry comment="" name="RSVPErrorCode_SYSTEM" text="SYSTEM" value="0x0017"/>
+ <entry comment="" name="RSVPErrorCode_ROUTING_PROBLEM" text="ROUTING_PROBLEM" value="0x0018"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="event" name="QoSErrorNotifyMessage" opcode="0x0047" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endianness="big" name="remoteIpAddr" subtype="uint32" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="remotePortNumber" subtype="uint32" type="ipport" use_param="remoteIpAddr" make_additional_info="yes"/>
+ <enum comment="" name="direction" subtype="RSVPDirection" type="uint32"/>
+ <enum comment="" name="errorCode" subtype="QoSErrorCode" type="uint32"/>
+ <integer comment="" name="failureNodeIpAddr" type="uint32"/>
+ <enum comment="" name="rsvpErrorCode" subtype="RSVPErrorCode" type="uint32"/>
+ <integer comment="" name="rsvpErrorSubCodeVal" type="uint32"/>
+ <integer comment="" name="rsvpErrorFlag" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="SubscriptionFeatureID">
+ <entries>
+ <entry comment="" name="SubscriptionFeatureID_BLF" text="BLF" value="0x0001"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="SubscriptionStatReqMessage" opcode="0x0048" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="transactionId" req_resp_key="1" type="uint32"/>
+ <enum comment="" name="subscriptionFeatureID" subtype="SubscriptionFeatureID" type="uint32"/>
+ <integer comment="" name="timer" type="uint32"/>
+ <string comment="" name="subscriptionID" size="64" type="char"/>
+ </fields>
+ </message>
+ <enum name="MediaPathID">
+ <entries>
+ <entry comment="" name="MediaPathID_Headset" text="Headset" value="0x0001"/>
+ <entry comment="" name="MediaPathID_Handset" text="Handset" value="0x0002"/>
+ <entry comment="" name="MediaPathID_Speaker" text="Speaker" value="0x0003"/>
+ </entries>
+ </enum>
+ <enum name="MediaPathEvent">
+ <entries>
+ <entry comment="" name="MediaPathEvent_On" text="On" value="0x0001"/>
+ <entry comment="" name="MediaPathEvent_Off" text="Off" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="MediaPathEventMessage" opcode="0x0049" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="mediaPathID" subtype="MediaPathID" type="uint32"/>
+ <enum comment="" name="mediaPathEvent" subtype="MediaPathEvent" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="MediaPathCapabilities">
+ <entries>
+ <entry comment="" name="MediaPathCapabilities_Enable" text="Enable" value="0x0001"/>
+ <entry comment="" name="MediaPathCapabilities_Disable" text="Disable" value="0x0002"/>
+ <entry comment="" name="MediaPathCapabilities_Monitor" text="Monitor" value="0x0003"/>
+ </entries>
+ </enum>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="MediaPathCapabilityMessage" opcode="0x004a" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="mediaPathID" subtype="MediaPathID" type="uint32"/>
+ <enum comment="" name="mediaPathCapabilities" subtype="MediaPathCapabilities" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="request" name="MwiNotificationMessage" opcode="0x004c" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <string comment="" name="mwiTargetNumber" size="25" type="char"/>
+ <string comment="" name="mwiControlNumber" size="25" type="char"/>
+ <integer comment="" name="areMessagesWaiting" type="uint32"/>
+ <struct comment="" name="totalVmCounts" subtype="MwiMessageCounts" type="struct">
+ <fields>
+ <integer comment="" name="numNewMsgs" type="uint32"/>
+ <integer comment="" name="numOldMsgs" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" name="priorityVmCounts" subtype="MwiMessageCounts" type="struct">
+ <fields>
+ <integer comment="" name="numNewMsgs" type="uint32"/>
+ <integer comment="" name="numOldMsgs" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" name="totalFaxCounts" subtype="MwiMessageCounts" type="struct">
+ <fields>
+ <integer comment="" name="numNewMsgs" type="uint32"/>
+ <integer comment="" name="numOldMsgs" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" name="priorityFaxCounts" subtype="MwiMessageCounts" type="struct">
+ <fields>
+ <integer comment="" name="numNewMsgs" type="uint32"/>
+ <integer comment="" name="numOldMsgs" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="RegisterAckMessage" opcode="0x0081" request="0x0001" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="keepAliveInterval" type="uint32"/>
+ <string comment="" name="dateTemplate" size="6" type="char"/>
+ <integer comment="" name="alignmentPadding" type="uint16"/>
+ <integer comment="" name="secondaryKeepAliveInterval" type="uint32"/>
+ <integer comment="" name="maxProtocolVer" type="uint8"/>
+ <integer comment="unknown" longcomment="unknown (Part of ProtocolVer)" name="unknown" type="uint8"/>
+ <bitfield comment="Features this device supports" name="phoneFeatures" size="uint16" subtype="PhoneFeatures" type="bitfield">
+ <entries>
+ <entry comment="" name="PhoneFeatures_Bit1" text="Bit1" value="0x0001"/>
+ <entry comment="" name="PhoneFeatures_Bit2" text="Bit2" value="0x0002"/>
+ <entry comment="" name="PhoneFeatures_Bit3" text="Bit3" value="0x0004"/>
+ <entry comment="" name="PhoneFeatures_Bit4" text="Bit4" value="0x0008"/>
+ <entry comment="Supports UTF-8" name="PhoneFeatures_UTF8" text="UTF8Bit5" value="0x0010"/>
+ <entry comment="" name="PhoneFeatures_Bit6" text="Bit6" value="0x0020"/>
+ <entry comment="" name="PhoneFeatures_Bit7" text="Bit7" value="0x0040"/>
+ <entry comment="Support Dynamic Messages" name="PhoneFeatures_DynamicMessages" text="DynamicMessages" value="0x0080"/>
+ <entry comment="" name="PhoneFeatures_Bit9" text="Bit9" value="0x0100"/>
+ <entry comment="Supports DTMF Type RFC2833" name="PhoneFeatures_RFC2833" text="RFC2833" value="0x0200"/>
+ <entry comment="" name="PhoneFeatures_Bit11" text="Bit11" value="0x0400"/>
+ <entry comment="" name="PhoneFeatures_Bit12" text="Bit12" value="0x0800"/>
+ <entry comment="" name="PhoneFeatures_Bit13" text="Bit13" value="0x1000"/>
+ <entry comment="" name="PhoneFeatures_Bit14" text="Bit14" value="0x2000"/>
+ <entry comment="" name="PhoneFeatures_Bit15" text="Bit15" value="0x4000"/>
+ <entry comment="Abbreviated Dial" name="PhoneFeatures_Abbreviated_Dial" text="AbbrevDial" value="0x8000"/>
+ </entries>
+ </bitfield>
+ </fields>
+ </message>
+ <enum name="DeviceTone">
+ <entries>
+ <entry comment="" name="DeviceTone_Silence" text="Silence" value="0x0000"/>
+ <entry comment="" name="DeviceTone_Dtmf1" text="Dtmf1" value="0x0001"/>
+ <entry comment="" name="DeviceTone_Dtmf2" text="Dtmf2" value="0x0002"/>
+ <entry comment="" name="DeviceTone_Dtmf3" text="Dtmf3" value="0x0003"/>
+ <entry comment="" name="DeviceTone_Dtmf4" text="Dtmf4" value="0x0004"/>
+ <entry comment="" name="DeviceTone_Dtmf5" text="Dtmf5" value="0x0005"/>
+ <entry comment="" name="DeviceTone_Dtmf6" text="Dtmf6" value="0x0006"/>
+ <entry comment="" name="DeviceTone_Dtmf7" text="Dtmf7" value="0x0007"/>
+ <entry comment="" name="DeviceTone_Dtmf8" text="Dtmf8" value="0x0008"/>
+ <entry comment="" name="DeviceTone_Dtmf9" text="Dtmf9" value="0x0009"/>
+ <entry comment="" name="DeviceTone_Dtmf0" text="Dtmf0" value="0x000a"/>
+ <entry comment="" name="DeviceTone_DtmfStar" text="DtmfStar" value="0x000e"/>
+ <entry comment="" name="DeviceTone_DtmfPound" text="DtmfPound" value="0x000f"/>
+ <entry comment="" name="DeviceTone_DtmfA" text="DtmfA" value="0x0010"/>
+ <entry comment="" name="DeviceTone_DtmfB" text="DtmfB" value="0x0011"/>
+ <entry comment="" name="DeviceTone_DtmfC" text="DtmfC" value="0x0012"/>
+ <entry comment="" name="DeviceTone_DtmfD" text="DtmfD" value="0x0013"/>
+ <entry comment="" name="DeviceTone_InsideDialTone" text="InsideDialTone" value="0x0021"/>
+ <entry comment="" name="DeviceTone_OutsideDialTone" text="OutsideDialTone" value="0x0022"/>
+ <entry comment="" name="DeviceTone_LineBusyTone" text="LineBusyTone" value="0x0023"/>
+ <entry comment="" name="DeviceTone_AlertingTone" text="AlertingTone" value="0x0024"/>
+ <entry comment="" name="DeviceTone_ReorderTone" text="ReorderTone" value="0x0025"/>
+ <entry comment="" name="DeviceTone_RecorderWarningTone" text="RecorderWarningTone" value="0x0026"/>
+ <entry comment="" name="DeviceTone_RecorderDetectedTone" text="RecorderDetectedTone" value="0x0027"/>
+ <entry comment="" name="DeviceTone_RevertingTone" text="RevertingTone" value="0x0028"/>
+ <entry comment="" name="DeviceTone_ReceiverOffHookTone" text="ReceiverOffHookTone" value="0x0029"/>
+ <entry comment="" name="DeviceTone_MessageWaitingIndicatorTone" text="MessageWaitingIndicatorTone" value="0x002a"/>
+ <entry comment="" name="DeviceTone_NoSuchNumberTone" text="NoSuchNumberTone" value="0x002b"/>
+ <entry comment="" name="DeviceTone_BusyVerificationTone" text="BusyVerificationTone" value="0x002c"/>
+ <entry comment="" name="DeviceTone_CallWaitingTone" text="CallWaitingTone" value="0x002d"/>
+ <entry comment="" name="DeviceTone_ConfirmationTone" text="ConfirmationTone" value="0x002e"/>
+ <entry comment="" name="DeviceTone_CampOnIndicationTone" text="CampOnIndicationTone" value="0x002f"/>
+ <entry comment="" name="DeviceTone_RecallDialTone" text="RecallDialTone" value="0x0030"/>
+ <entry comment="" name="DeviceTone_ZipZip" text="ZipZip" value="0x0031"/>
+ <entry comment="" name="DeviceTone_Zip" text="Zip" value="0x0032"/>
+ <entry comment="" name="DeviceTone_BeepBonk" text="BeepBonk" value="0x0033"/>
+ <entry comment="" name="DeviceTone_MusicTone" text="MusicTone" value="0x0034"/>
+ <entry comment="" name="DeviceTone_HoldTone" text="HoldTone" value="0x0035"/>
+ <entry comment="" name="DeviceTone_TestTone" text="TestTone" value="0x0036"/>
+ <entry comment="" name="DeviceTone_MonitorWarningTone" text="MonitorWarningTone" value="0x0038"/>
+ <entry comment="" name="DeviceTone_SecureWarningTone" text="SecureWarningTone" value="0x0039"/>
+ <entry comment="" name="DeviceTone_AddCallWaiting" text="AddCallWaiting" value="0x0040"/>
+ <entry comment="" name="DeviceTone_PriorityCallWait" text="PriorityCallWait" value="0x0041"/>
+ <entry comment="" name="DeviceTone_RecallDial" text="RecallDial" value="0x0042"/>
+ <entry comment="" name="DeviceTone_BargIn" text="BargIn" value="0x0043"/>
+ <entry comment="" name="DeviceTone_DistinctAlert" text="DistinctAlert" value="0x0044"/>
+ <entry comment="" name="DeviceTone_PriorityAlert" text="PriorityAlert" value="0x0045"/>
+ <entry comment="" name="DeviceTone_ReminderRing" text="ReminderRing" value="0x0046"/>
+ <entry comment="" name="DeviceTone_PrecedenceRingBack" text="PrecedenceRingBack" value="0x0047"/>
+ <entry comment="" name="DeviceTone_PreemptionTone" text="PreemptionTone" value="0x0048"/>
+ <entry comment="" name="DeviceTone_NonSecureWarningTone" text="NonSecureWarningTone" value="0x0049"/>
+ <entry comment="" name="DeviceTone_MF1" text="MF1" value="0x0050"/>
+ <entry comment="" name="DeviceTone_MF2" text="MF2" value="0x0051"/>
+ <entry comment="" name="DeviceTone_MF3" text="MF3" value="0x0052"/>
+ <entry comment="" name="DeviceTone_MF4" text="MF4" value="0x0053"/>
+ <entry comment="" name="DeviceTone_MF5" text="MF5" value="0x0054"/>
+ <entry comment="" name="DeviceTone_MF6" text="MF6" value="0x0055"/>
+ <entry comment="" name="DeviceTone_MF7" text="MF7" value="0x0056"/>
+ <entry comment="" name="DeviceTone_MF8" text="MF8" value="0x0057"/>
+ <entry comment="" name="DeviceTone_MF9" text="MF9" value="0x0058"/>
+ <entry comment="" name="DeviceTone_MF0" text="MF0" value="0x0059"/>
+ <entry comment="" name="DeviceTone_MFKP1" text="MFKP1" value="0x005a"/>
+ <entry comment="" name="DeviceTone_MFST" text="MFST" value="0x005b"/>
+ <entry comment="" name="DeviceTone_MFKP2" text="MFKP2" value="0x005c"/>
+ <entry comment="" name="DeviceTone_MFSTP" text="MFSTP" value="0x005d"/>
+ <entry comment="" name="DeviceTone_MFST3P" text="MFST3P" value="0x005e"/>
+ <entry comment="" name="DeviceTone_MILLIWATT" text="MILLIWATT" value="0x005f"/>
+ <entry comment="" name="DeviceTone_MILLIWATTTEST" text="MILLIWATTTEST" value="0x0060"/>
+ <entry comment="" name="DeviceTone_HIGHTONE" text="HIGHTONE" value="0x0061"/>
+ <entry comment="" name="DeviceTone_FLASHOVERRIDE" text="FLASHOVERRIDE" value="0x0062"/>
+ <entry comment="" name="DeviceTone_FLASH" text="FLASH" value="0x0063"/>
+ <entry comment="" name="DeviceTone_PRIORITY" text="PRIORITY" value="0x0064"/>
+ <entry comment="" name="DeviceTone_IMMEDIATE" text="IMMEDIATE" value="0x0065"/>
+ <entry comment="" name="DeviceTone_PREAMPWARN" text="PREAMPWARN" value="0x0066"/>
+ <entry comment="" name="DeviceTone_Tone2105HZ" text="2105HZ" value="0x0067"/>
+ <entry comment="" name="DeviceTone_Tone2600HZ" text="2600HZ" value="0x0068"/>
+ <entry comment="" name="DeviceTone_Tone440HZ" text="440HZ" value="0x0069"/>
+ <entry comment="" name="DeviceTone_Tone300HZ" text="300HZ" value="0x006a"/>
+ <entry comment="" name="DeviceTone_Mobility_WP" text="Mobility_WP" value="0x006b"/>
+ <entry comment="" name="DeviceTone_Mobility_UAC" text="Mobility_UAC" value="0x006c"/>
+ <entry comment="" name="DeviceTone_Mobility_WTDN" text="Mobility_WTDN" value="0x006d"/>
+ <entry comment="" name="DeviceTone_Mobility_MON" text="Mobility_MON" value="0x006e"/>
+ <entry comment="" name="DeviceTone_Mobility_MOFF" text="Mobility_MOFF" value="0x006f"/>
+ <entry comment="" name="DeviceTone_Mobility_UKC" text="Mobility_UKC" value="0x0070"/>
+ <entry comment="" name="DeviceTone_Mobility_VMA" text="Mobility_VMA" value="0x0071"/>
+ <entry comment="" name="DeviceTone_Mobility_FAC" text="Mobility_FAC" value="0x0072"/>
+ <entry comment="" name="DeviceTone_Mobility_CMC" text="Mobility_CMC" value="0x0073"/>
+ <entry comment="" name="DeviceTone_MLPP_PALA" text="MLPP_PALA" value="0x0077"/>
+ <entry comment="" name="DeviceTone_MLPP_ICA" text="MLPP_ICA" value="0x0078"/>
+ <entry comment="" name="DeviceTone_MLPP_VCA" text="MLPP_VCA" value="0x0079"/>
+ <entry comment="" name="DeviceTone_MLPP_BPA" text="MLPP_BPA" value="0x007a"/>
+ <entry comment="" name="DeviceTone_MLPP_BNEA" text="MLPP_BNEA" value="0x007b"/>
+ <entry comment="" name="DeviceTone_MLPP_UPA" text="MLPP_UPA" value="0x007c"/>
+ <entry comment="" name="DeviceTone_TUA" text="TUA" value="0x007d"/>
+ <entry comment="" name="DeviceTone_GONE" text="GONE" value="0x007e"/>
+ <entry comment="" name="DeviceTone_NoTone" text="NoTone" value="0x007f"/>
+ <entry comment="" name="DeviceTone_MeetMe_Greeting" text="MeetMe_Greeting" value="0x0080"/>
+ <entry comment="" name="DeviceTone_MeetMe_NumberInvalid" text="MeetMe_NumberInvalid" value="0x0081"/>
+ <entry comment="" name="DeviceTone_MeetMe_NumberFailed" text="MeetMe_NumberFailed" value="0x0082"/>
+ <entry comment="" name="DeviceTone_MeetMe_EnterPIN" text="MeetMe_EnterPIN" value="0x0083"/>
+ <entry comment="" name="DeviceTone_MeetMe_InvalidPIN" text="MeetMe_InvalidPIN" value="0x0084"/>
+ <entry comment="" name="DeviceTone_MeetMe_FailedPIN" text="MeetMe_FailedPIN" value="0x0085"/>
+ <entry comment="" name="DeviceTone_MeetMe_CFB_Failed" text="MeetMe_CFB_Failed" value="0x0086"/>
+ <entry comment="" name="DeviceTone_MeetMe_EnterAccessCode" text="MeetMe_EnterAccessCode" value="0x0087"/>
+ <entry comment="" name="DeviceTone_MeetMe_AccessCodeInvalid" text="MeetMe_AccessCodeInvalid" value="0x0088"/>
+ <entry comment="" name="DeviceTone_MeetMe_AccessCodeFailed" text="MeetMe_AccessCodeFailed" value="0x0089"/>
+ <entry comment="" name="DeviceTone_MAX" text="MAX" value="0x008A"/>
+ </entries>
+ </enum>
+ <enum name="ToneOutputDirection">
+ <entries>
+ <entry comment="" name="ToneOutputDirection_User" text="User" value="0x0000"/>
+ <entry comment="" name="ToneOutputDirection_Network" text="Network" value="0x0001"/>
+ <entry comment="" name="ToneOutputDirection_All" text="All" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="StartToneMessage" opcode="0x0082" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="tone" subtype="DeviceTone" type="uint32" make_additional_info="yes"/>
+ <enum comment="" name="tone_output_direction" subtype="ToneOutputDirection" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="StopToneMessage" opcode="0x0083" type="MediaControl">
+ <fields>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ <fields beginversion="11" endversion="22">
+ <enum comment="" name="tone" subtype="DeviceTone" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="RingMode">
+ <entries>
+ <entry comment="" name="RingMode_RingOff" text="RingOff" value="0x0001"/>
+ <entry comment="" name="RingMode_InsideRing" text="InsideRing" value="0x0002"/>
+ <entry comment="" name="RingMode_OutsideRing" text="OutsideRing" value="0x0003"/>
+ <entry comment="" name="RingMode_FeatureRing" text="FeatureRing" value="0x0004"/>
+ <entry comment="" name="RingMode_FlashOnly" text="FlashOnly" value="0x0005"/>
+ <entry comment="" name="RingMode_PrecedenceRing" text="PrecedenceRing" value="0x0006"/>
+ </entries>
+ </enum>
+ <enum name="RingDuration">
+ <entries>
+ <entry comment="" name="RingDuration_NormalRing" text="NormalRing" value="0x0001"/>
+ <entry comment="" name="RingDuration_SingleRing" text="SingleRing" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="SetRingerMessage" opcode="0x0085" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="ringMode" subtype="RingMode" type="uint32"/>
+ <enum comment="" name="ringDuration" subtype="RingDuration" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="LampMode">
+ <entries>
+ <entry comment="" name="LampMode_Off" text="Off" value="0x0001"/>
+ <entry comment="" name="LampMode_On" text="On" value="0x0002"/>
+ <entry comment="" name="LampMode_Wink" text="Wink" value="0x0003"/>
+ <entry comment="" name="LampMode_Flash" text="Flash" value="0x0004"/>
+ <entry comment="" name="LampMode_Blink" text="Blink" value="0x0005"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="SetLampMessage" opcode="0x0086" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="stimulus" subtype="DeviceStimulus" type="uint32"/>
+ <integer comment="" name="stimulusInstance" type="uint32"/>
+ <enum comment="" name="lampMode" subtype="LampMode" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="SetHookFlashDetectMessage" opcode="0x0087" type="CallControl"/>
+ <enum name="SpeakerMode">
+ <entries>
+ <entry comment="" name="SpeakerMode_On" text="On" value="0x0001"/>
+ <entry comment="" name="SpeakerMode_Off" text="Off" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="SetSpeakerModeMessage" opcode="0x0088" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="speakerMode" subtype="SpeakerMode" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="MicrophoneMode">
+ <entries>
+ <entry comment="" name="MicrophoneMode_On" text="On" value="0x0001"/>
+ <entry comment="" name="MicrophoneMode_Off" text="Off" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="SetMicroModeMessage" opcode="0x0089" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="micMode" subtype="MicrophoneMode" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="Media_SilenceSuppression">
+ <entries>
+ <entry comment="" name="Media_SilenceSuppression_Off" text="Media_SilenceSuppression_Off" value="0x0000"/>
+ <entry comment="" name="Media_SilenceSuppression_On" text="Media_SilenceSuppression_On" value="0x0001"/>
+ </entries>
+ </enum>
+ <enum name="MediaEncryptionAlgorithmType">
+ <entries>
+ <entry comment="" name="MediaEncryptionAlgorithmType_NO_ENCRYPTION" text="NO_ENCRYPTION" value="0x0000"/>
+ <entry comment="" name="MediaEncryptionAlgorithmType_CCM_AES_CM_128_HMAC_SHA1_32" text="CCM_AES_CM_128_HMAC_SHA1_32" value="0x0001"/>
+ <entry comment="" name="MediaEncryptionAlgorithmType_CCM_AES_CM_128_HMAC_SHA1_80" text="CCM_AES_CM_128_HMAC_SHA1_80" value="0x0002"/>
+ <entry comment="" name="MediaEncryptionAlgorithmType_CCM_F8_128_HMAC_SHA1_32" text="CCM_F8_128_HMAC_SHA1_32" value="0x0003"/>
+ <entry comment="" name="MediaEncryptionAlgorithmType_CCM_F8_128_HMAC_SHA1_80" text="CCM_F8_128_HMAC_SHA1_80" value="0x0004"/>
+ <entry comment="" name="MediaEncryptionAlgorithmType_CCM_AEAD_AES_128_GCM" text="CCM_AEAD_AES_128_GCM" value="0x0005"/>
+ <entry comment="" name="MediaEncryptionAlgorithmType_CCM_AEAD_AES_256_GCM" text="CCM_AEAD_AES_256_GCM" value="0x0006"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="request" name="StartMediaTransmissionMessage" opcode="0x008a" priority="send_immediate" type="MediaControl">
+ <fields>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" declare="yes" name="passThroughPartyId" req_resp_key="1" type="uint32"/>
+ <ipv4or6 comment="" name="remoteIpAddr" subtype="IPV4orV6Address" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="remotePortNumber" subtype="uint32" type="ipport" use_param="remoteIpAddr" make_additional_info="yes"/>
+ <integer comment="" name="milliSecondPacketSize" type="uint32"/>
+ <enum comment="" declare="yes" name="compressionType" subtype="Media_PayloadType" type="uint32"/>
+ <struct comment="" name="qualifierOut" subtype="Media_QualifierOutgoing" type="struct">
+ <fields>
+ <integer comment="" name="precedenceValue" type="uint32"/>
+ <enum comment="" name="ssValue" subtype="Media_SilenceSuppression" type="uint32"/>
+ <integer comment="" name="maxFramesPerPacket" type="uint16"/>
+ <integer comment="Unused/Padding" name="padding" type="uint16"/>
+ </fields>
+ <fields beginversion="0" endversion="10">
+ <enum comment="" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ </fields>
+ <fields beginversion="11" endversion="22">
+ <union comment="" lookup_guide="compressionType" name="codecParamsUnion" subtype="Media_QualifierOutgoingUnion" type="union">
+ <fields>
+ <enum comment="" lookup_eq="Media_Payload_G7231" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ <struct comment="" lookup_eq="*" name="codecParams" subtype="CodecParameters" type="struct">
+ <fields>
+ <integer comment="" name="codecMode" type="uint8"/>
+ <integer comment="" name="dynamicPayload" type="uint8"/>
+ <integer comment="" name="codecParam1" type="uint8"/>
+ <integer comment="" name="codecParam2" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <struct comment="" name="mTxMediaEncryptionKeyInfo" subtype="MediaEncryptionKeyInfo" type="struct">
+ <fields>
+ <enum comment="" name="algorithmID" subtype="MediaEncryptionAlgorithmType" type="uint32"/>
+ <integer comment="" declare="yes" name="keylen" type="uint16"/>
+ <integer comment="" declare="yes" name="saltlen" type="uint16"/>
+ <integer comment="" maxsize="16" name="key" size_fieldname="keylen" type="uint8"/>
+ <integer comment="" maxsize="16" name="salt" size_fieldname="saltlen" type="uint8"/>
+ <integer comment="" name="isMKIPresent" type="uint32"/>
+ <integer comment="" name="keyDerivationRate" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="streamPassThroughId" type="uint32"/>
+ <integer comment="" name="associatedStreamId" type="uint32"/>
+ <integer comment="" name="RFC2833PayloadType" type="uint32"/>
+ <integer comment="" name="dtmfType" type="uint32"/>
+ <integer comment="" name="mixingMode" type="uint32"/>
+ </fields>
+ <fields beginversion="15">
+ <integer comment="" name="partyDirection" type="uint32"/>
+ </fields>
+ <fields beginversion="21" endversion="22">
+ <struct comment="" name="latentCapsInfo" type="struct">
+ <fields>
+ <integer comment="" name="active" type="uint32"/>
+ <struct comment="" name="modemRelay" type="struct">
+ <fields>
+ <integer comment="" name="capAndVer" type="uint32"/>
+ <integer comment="" name="modAnd2833" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" name="sprtPayload" type="struct">
+ <fields>
+ <integer comment="" name="chan0MaxPayload" type="uint16"/>
+ <integer comment="" name="chan2MaxPayload" type="uint16"/>
+ <integer comment="" name="chan3MaxPayload" type="uint16"/>
+ <integer comment="" name="chan2MaxWindow" type="uint16"/>
+ </fields>
+ </struct>
+ <struct comment="" name="sse" type="struct">
+ <fields>
+ <integer comment="" name="standard" type="uint32"/>
+ <integer comment="" name="vendor" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" name="payloadParam" type="struct">
+ <fields>
+ <integer comment="" name="nse" type="uint8"/>
+ <integer comment="" name="rfc2833" type="uint8"/>
+ <integer comment="" name="sse" type="uint8"/>
+ <integer comment="" name="v150sprt" type="uint8"/>
+ <integer comment="" name="noaudio" type="uint8"/>
+ <integer comment="" name="FutureUse1" type="uint8"/>
+ <integer comment="" name="FutureUse2" type="uint8"/>
+ <integer comment="" name="FutureUse3" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <enum name="PortHandling">
+ <entries>
+ <entry comment="" name="PortHandling_CLOSE_PORT" text="CLOSE_PORT" value="0x0000"/>
+ <entry comment="" name="PortHandling_KEEP_PORT" text="KEEP_PORT" value="0x0001"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="StopMediaTransmissionMessage" opcode="0x008b" priority="send_immediate" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <enum comment="" name="portHandlingFlag" subtype="PortHandling" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="CallType">
+ <entries>
+ <entry comment="" name="CallType_InBoundCall" text="InBoundCall" value="0x0001"/>
+ <entry comment="" name="CallType_OutBoundCall" text="OutBoundCall" value="0x0002"/>
+ <entry comment="" name="CallType_ForwardCall" text="ForwardCall" value="0x0003"/>
+ </entries>
+ </enum>
+ <enum name="CallSecurityStatusType">
+ <entries>
+ <entry comment="" name="CallSecurityStatusType_Unknown" text="Unknown" value="0x0000"/>
+ <entry comment="" name="CallSecurityStatusType_NotAuthenticated" text="NotAuthenticated" value="0x0001"/>
+ <entry comment="" name="CallSecurityStatusType_Authenticated" text="Authenticated" value="0x0002"/>
+ <entry comment="" name="CallSecurityStatusType_Encrypted" text="Encrypted" value="0x0003"/>
+ <entry comment="" name="CallSecurityStatusType_Max" text="Max" value="0x0004"/>
+ </entries>
+ </enum>
+ <bitfield name="RestrictInformationType">
+ <entries>
+ <entry comment="" name="RestrictInformationType_CallingPartyName" text="CallingPartyName" value="0x00000001"/>
+ <entry comment="" name="RestrictInformationType_CallingPartyNumber" text="CallingPartyNumber" value="0x00000002"/>
+ <entry comment="" name="RestrictInformationType_CallingParty" text="CallingParty" value="0x00000003"/>
+ <entry comment="" name="RestrictInformationType_CalledPartyName" text="CalledPartyName" value="0x00000004"/>
+ <entry comment="" name="RestrictInformationType_CalledPartyNumber" text="CalledPartyNumber" value="0x00000008"/>
+ <entry comment="" name="RestrictInformationType_CalledParty" text="CalledParty" value="0x0000000c"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledPartyName" text="OriginalCalledPartyName" value="0x00000010"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledPartyNumber" text="OriginalCalledPartyNumber" value="0x00000020"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledParty" text="OriginalCalledParty" value="0x00000030"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectPartyName" text="LastRedirectPartyName" value="0x00000040"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectPartyNumber" text="LastRedirectPartyNumber" value="0x00000080"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectParty" text="LastRedirectParty" value="0x000000c0"/>
+ <entry comment="" name="RestrictInformationType_BitsReserved" text="BitsReserved" value="0xffffff00"/>
+ </entries>
+ </bitfield>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="CallInfoMessage" opcode="0x008f" priority="send_immediate" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <string comment="Calling Party Name" name="callingPartyName" size="40" type="char"/>
+ <string comment="Calling Party Number" name="callingParty" size="24" type="char"/>
+ <string comment="Called Party Name" name="calledPartyName" size="40" type="char"/>
+ <string comment="CalledPartyNumber" name="calledParty" size="24" type="char"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <enum comment="" name="callType" subtype="CallType" type="uint32"/>
+ <string comment="Original Called Party Name" name="originalCalledPartyName" size="40" type="char"/>
+ <string comment="Original Called Party Number" name="originalCalledParty" size="24" type="char"/>
+ <string comment="Last Redirecting Party Name" name="lastRedirectingPartyName" size="40" type="char"/>
+ <string comment="Last Redirecting Party Number" name="lastRedirectingParty" size="24" type="char"/>
+ <integer comment="Original Called Party Redirect Reason" name="originalCdpnRedirectReason" type="uint32"/>
+ <integer comment="Last Redirecting Reason" name="lastRedirectingReason" type="uint32"/>
+ <string comment="Calling Party Voicemail Box Number" name="cgpnVoiceMailbox" size="24" type="char"/>
+ <string comment="Called Party Voicemail Box Number" name="cdpnVoiceMailbox" size="24" type="char"/>
+ <string comment="Original Called Party Voicemail Box Number" name="originalCdpnVoiceMailbox" size="24" type="char"/>
+ <string comment="Last Redirecting Parties Voicemail Box Number" name="lastRedirectingVoiceMailbox" size="24" type="char"/>
+ <integer comment="CallId" name="callInstance" type="uint32"/>
+ <enum comment="" name="callSecurityStatus" subtype="CallSecurityStatusType" type="uint32"/>
+ <bitfield comment="" name="partyPIRestrictionBits" size="uint32" subtype="RestrictInformationType" type="bitfield">
+ <entries>
+ <entry comment="" name="RestrictInformationType_CallingPartyName" text="CallingPartyName" value="0x00000001"/>
+ <entry comment="" name="RestrictInformationType_CallingPartyNumber" text="CallingPartyNumber" value="0x00000002"/>
+ <entry comment="" name="RestrictInformationType_CallingParty" text="CallingParty" value="0x00000003"/>
+ <entry comment="" name="RestrictInformationType_CalledPartyName" text="CalledPartyName" value="0x00000004"/>
+ <entry comment="" name="RestrictInformationType_CalledPartyNumber" text="CalledPartyNumber" value="0x00000008"/>
+ <entry comment="" name="RestrictInformationType_CalledParty" text="CalledParty" value="0x0000000c"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledPartyName" text="OriginalCalledPartyName" value="0x00000010"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledPartyNumber" text="OriginalCalledPartyNumber" value="0x00000020"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledParty" text="OriginalCalledParty" value="0x00000030"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectPartyName" text="LastRedirectPartyName" value="0x00000040"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectPartyNumber" text="LastRedirectPartyNumber" value="0x00000080"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectParty" text="LastRedirectParty" value="0x000000c0"/>
+ <entry comment="" name="RestrictInformationType_BitsReserved" text="BitsReserved" value="0xffffff00"/>
+ </entries>
+ </bitfield>
+ <code type="calling_and_called_party" use_param="callingParty,calledParty"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="ForwardStatResMessage" opcode="0x0090" priority="send_immediate" request="0x0009" type="RegistrationAndManagement">
+ <fields>
+ <integer comment="" name="activeForward" type="uint32"/>
+ <integer comment="" declare="yes" name="lineNumber" req_resp_key="1" type="uint32"/>
+ <integer comment="" name="forwardAllActive" type="uint32"/>
+ <string comment="" declare="yes" name="forwardAllDirnum" size="VariableDirnumSize" type="char"/>
+ <integer comment="" name="forwardBusyActive" type="uint32"/>
+ <string comment="" declare="yes" name="forwardBusyDirnum" size="VariableDirnumSize" type="char"/>
+ <integer comment="" name="forwardNoAnswerActive" type="uint32"/>
+ <string comment="" declare="yes" name="forwardNoAnswerlDirnum" size="VariableDirnumSize" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="SpeedDialStatResMessage" opcode="0x0091" priority="send_immediate" request="0x000a" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="speedDialNumber" req_resp_key="1" type="uint32"/>
+ <string comment="" name="speedDialDirNumber" size="24" type="char"/>
+ <string comment="" name="speedDialDisplayName" size="40" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="LineStatResMessage" opcode="0x0092" priority="send_immediate" request="0x000b" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="lineNumber" req_resp_key="1" type="uint32"/>
+ <string comment="" name="lineDirNumber" size="24" type="char"/>
+ <string comment="" name="lineFullyQualifiedDisplayName" size="40" type="char"/>
+ <string comment="" name="lineTextLabel" size="40" type="char"/>
+ <integer comment="" name="lineDisplayOptions" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="ConfigStatResMessage" opcode="0x0093" priority="send_immediate" request="0x000c" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="sid" type="struct">
+ <fields>
+ <string comment="Device Name" name="DeviceName" size="16" type="char"/>
+ <integer comment="User Id" name="reserved_for_future_use" type="uint32"/>
+ <integer comment="Device Instance" name="instance" type="uint32"/>
+ </fields>
+ </struct>
+ <string comment="" name="userName" size="40" type="char"/>
+ <string comment="" name="serverName" size="40" type="char"/>
+ <integer comment="" name="numberOfLines" type="uint32"/>
+ <integer comment="" name="numberOfSpeedDials" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="TimeDateResMessage" opcode="0x0094" priority="send_immediate" request="0x000d" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="timeDataInfo" subtype="Time" type="struct">
+ <fields>
+ <integer comment="" name="wYear" type="uint32"/>
+ <integer comment="" name="wMonth" type="uint32"/>
+ <integer comment="" name="wDayOfWeek" type="uint32"/>
+ <integer comment="" name="wDay" type="uint32"/>
+ <integer comment="" name="wHour" type="uint32"/>
+ <integer comment="" name="wMinute" type="uint32"/>
+ <integer comment="" name="wSecond" type="uint32"/>
+ <integer comment="" name="wMilliseconds" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="systemTime" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="SessionType">
+ <entries>
+ <entry comment="" name="SessionType_Chat" text="Chat" value="0x0001"/>
+ <entry comment="" name="SessionType_Whiteboard" text="Whiteboard" value="0x0002"/>
+ <entry comment="" name="SessionType_ApplicationSharing" text="ApplicationSharing" value="0x0004"/>
+ <entry comment="" name="SessionType_FileTransfer" text="FileTransfer" value="0x0008"/>
+ <entry comment="" name="SessionType_Video" text="Video" value="0x0010"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="event" name="StartSessionTransmissionMessage" opcode="0x0095" priority="send_immediate" type="IntraCCM">
+ <fields>
+ <ipv4or6 comment="ipaddress in big endian" endiannes="big" endianness="big" name="remoteIpAddr" subtype="IpAddress" type="ipaddr"/>
+ <enum comment="" name="sessionType" subtype="SessionType" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="event" name="StopSessionTransmissionMessage" opcode="0x0096" type="IntraCCM">
+ <fields>
+ <ipv4or6 comment="ipaddress in big endian" endiannes="big" endianness="big" name="remoteIpAddr" subtype="IpAddress" type="ipaddr"/>
+ <enum comment="" name="sessionType" subtype="SessionType" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="ButtonType">
+ <entries>
+ <entry comment="" name="ButtonType_Unused" text="Unused" value="0x00"/>
+ <entry comment="" name="ButtonType_LastNumberRedial" text="Last Number Redial" value="0x01"/>
+ <entry comment="" name="ButtonType_SpeedDial" text="SpeedDial" value="0x02"/>
+ <entry comment="" name="ButtonType_Hold" text="Hold" value="0x03"/>
+ <entry comment="" name="ButtonType_Transfer" text="Transfer" value="0x04"/>
+ <entry comment="" name="ButtonType_ForwardAll" text="Forward All" value="0x05"/>
+ <entry comment="" name="ButtonType_ForwardBusy" text="Forward Busy" value="0x06"/>
+ <entry comment="" name="ButtonType_ForwardNoAnswer" text="Forward No Answer" value="0x07"/>
+ <entry comment="" name="ButtonType_Display" text="Display" value="0x08"/>
+ <entry comment="" name="ButtonType_Line" text="Line" value="0x09"/>
+ <entry comment="" name="ButtonType_T120Chat" text="T120 Chat" value="0x0A"/>
+ <entry comment="" name="ButtonType_T120Whiteboard" text="T120 Whiteboard" value="0x0B"/>
+ <entry comment="" name="ButtonType_T120ApplicationSharing" text="T120 Application Sharing" value="0x0C"/>
+ <entry comment="" name="ButtonType_T120FileTransfer" text="T120 File Transfer" value="0x0D"/>
+ <entry comment="" name="ButtonType_Video" text="Video" value="0x0E"/>
+ <entry comment="" name="ButtonType_Voicemail" text="Voicemail" value="0x0F"/>
+ <entry comment="" name="ButtonType_AnswerRelease" text="Answer Release" value="0x10"/>
+ <entry comment="" name="ButtonType_AutoAnswer" text="Auto Answer" value="0x11"/>
+ <entry comment="" name="ButtonType_Select" text="Select" value="0x12"/>
+ <entry comment="" name="ButtonType_Feature" text="Feature" value="0x13"/>
+ <entry comment="" name="ButtonType_ServiceURL" text="ServiceURL" value="0x14"/>
+ <entry comment="" name="ButtonType_BusyLampFieldSpeeddial" text="BusyLampField Speeddial" value="0x15"/>
+ <entry comment="" name="ButtonType_MaliciousCall" text="Malicious Call" value="0x1B"/>
+ <entry comment="" name="ButtonType_GenericAppB1" text="Generic App B1" value="0x21"/>
+ <entry comment="" name="ButtonType_GenericAppB2" text="Generic App B2" value="0x22"/>
+ <entry comment="" name="ButtonType_GenericAppB3" text="Generic App B3" value="0x23"/>
+ <entry comment="" name="ButtonType_GenericAppB4" text="Generic App B4" value="0x24"/>
+ <entry comment="" name="ButtonType_GenericAppB5" text="Generic App B5" value="0x25"/>
+ <entry comment="" name="ButtonType_MonitorMultiblink" text="Monitor/Multiblink" value="0x26"/>
+ <entry comment="" name="ButtonType_MeetMeConference" text="Meet Me Conference" value="0x7B"/>
+ <entry comment="" name="ButtonType_Conference" text="Conference" value="0x7D"/>
+ <entry comment="" name="ButtonType_CallPark" text="Call Park" value="0x7E"/>
+ <entry comment="" name="ButtonType_CallPickup" text="Call Pickup" value="0x7F"/>
+ <entry comment="" name="ButtonType_GroupCallPickup" text="Group Call Pickup" value="0x80"/>
+ <entry comment="" name="ButtonType_Mobility" text="Mobility" value="0x81"/>
+ <entry comment="" name="ButtonType_DoNotDisturb" text="DoNotDisturb" value="0x82"/>
+ <entry comment="" name="ButtonType_ConfList" text="ConfList" value="0x83"/>
+ <entry comment="" name="ButtonType_RemoveLastParticipant" text="RemoveLastParticipant" value="0x84"/>
+ <entry comment="" name="ButtonType_QRT" text="QRT" value="0x85"/>
+ <entry comment="" name="ButtonType_CallBack" text="CallBack" value="0x86"/>
+ <entry comment="" name="ButtonType_OtherPickup" text="OtherPickup" value="0x87"/>
+ <entry comment="" name="ButtonType_VideoMode" text="VideoMode" value="0x88"/>
+ <entry comment="" name="ButtonType_NewCall" text="NewCall" value="0x89"/>
+ <entry comment="" name="ButtonType_EndCall" text="EndCall" value="0x8A"/>
+ <entry comment="" name="ButtonType_HLog" text="HLog" value="0x8B"/>
+ <entry comment="" name="ButtonType_Queuing" text="Queuing" value="0x8F"/>
+ <entry comment="" name="ButtonType_TestE" text="Test E" value="0xC0"/>
+ <entry comment="" name="ButtonType_TestF" text="Test F" value="0xC1"/>
+ <entry comment="" name="ButtonType_TestI" text="Test I" value="0xC4"/>
+ <entry comment="" name="ButtonType_Messages" text="Messages" value="0xC2"/>
+ <entry comment="" name="ButtonType_Directory" text="Directory" value="0xC3"/>
+ <entry comment="" name="ButtonType_Application" text="Application" value="0xC5"/>
+ <entry comment="" name="ButtonType_Headset" text="Headset" value="0xC6"/>
+ <entry comment="" name="ButtonType_Keypad" text="Keypad" value="0xF0"/>
+ <entry comment="" name="ButtonType_Aec" text="Aec" value="0xFD"/>
+ <entry comment="" name="ButtonType_Undefined" text="Undefined" value="0xFF"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="ButtonTemplateResMessage" opcode="0x0097" request="0x000e" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="buttonTemplate" subtype="ButtonTemplate" type="struct">
+ <fields>
+ <integer comment="" name="buttonOffset" type="uint32"/>
+ <integer comment="" name="buttonCount" type="uint32"/>
+ <integer comment="" declare="yes" name="totalButtonCount" type="uint32"/>
+ <struct comment="" maxsize="42" name="definition" size_fieldname="totalButtonCount" subtype="ButtonDefinition" type="struct">
+ <fields>
+ <integer comment="" name="instanceNumber" type="uint8"/>
+ <enum comment="" name="buttonDefinition" subtype="ButtonType" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="VersionResMessage" opcode="0x0098" request="0x000f" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <string comment="" name="versionStr" size="16" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="DisplayTextMessage" opcode="0x0099" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <string comment="" name="text" size="32" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="ClearDisplay" opcode="0x009a" type="CallControl"/>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="CapabilitiesReq" opcode="0x009b" type="RegistrationAndManagement"/>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="RegisterRejectMessage" opcode="0x009d" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <string comment="" name="text" size="32" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="ServerResMessage" opcode="0x009e" request="0x0012" type="RegistrationAndManagement">
+ <fields>
+ <struct comment="" name="server" size="5" subtype="ServerIdentifier" type="struct">
+ <fields>
+ <string comment="" name="ServerName" size="48" type="char"/>
+ </fields>
+ </struct>
+ <integer comment="" name="serverTcpListenPort" size="5" type="uint32"/>
+ </fields>
+ <fields size_lt="293">
+ <struct comment="Server IPv4 Address" name="serverIpAddr" size="5" subtype="IPv4Address" type="struct">
+ <fields>
+ <ip comment="ipaddress in big endian" endianness="big" name="stationIpAddr" type="ipv4"/>
+ </fields>
+ </struct>
+ </fields>
+ <fields beginversion="0" endversion="22" size_gt="292">
+ <struct comment="Server IP Address (IPv4or6)" name="serverIpAddr" size="5" subtype="IPV4orV6Address" type="struct">
+ <fields>
+ <ipv4or6 comment="ipaddress in big endian" endianness="big" name="stationIpAddr" size="16" subtype="uint8" type="ipaddr"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <enum name="DeviceResetType">
+ <entries>
+ <entry comment="" name="DeviceResetType_RESET" text="RESET" value="0x0001"/>
+ <entry comment="" name="DeviceResetType_RESTART" text="RESTART" value="0x0002"/>
+ <entry comment="" name="DeviceResetType_APPLY_CONFIG" text="APPLY_CONFIG" value="0x0003"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="Reset" opcode="0x009f" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="resetType" subtype="DeviceResetType" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="KeepAliveAckMessage" opcode="0x0100" request="0x0000" type="RegistrationAndManagement"/>
+ <enum name="Media_EchoCancellation">
+ <entries>
+ <entry comment="" name="Media_EchoCancellation_Off" text="Media_EchoCancellation_Off" value="0x0000"/>
+ <entry comment="" name="Media_EchoCancellation_On" text="Media_EchoCancellation_On" value="0x0001"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="request" name="StartMulticastMediaReceptionMessage" opcode="0x0101" priority="send_immediate" type="MediaControl">
+ <fields>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" declare="yes" name="passThroughPartyId" req_resp_key="1" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endiannes="big" endianness="big" name="multicastIpAddr" subtype="IpAddress" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="multicastPortNumber" subtype="uint32" type="ipport" use_param="multicastIpAddr" make_additional_info="yes"/>
+ <integer comment="" name="milliSecondPacketSize" type="uint32"/>
+ <enum comment="" declare="yes" name="compressionType" subtype="Media_PayloadType" type="uint32"/>
+ <struct comment="" name="qualifierIn" subtype="Media_QualifierIncoming" type="struct">
+ <fields>
+ <enum comment="" name="ecValue" subtype="Media_EchoCancellation" type="uint32"/>
+ </fields>
+ <fields beginversion="0" endversion="10">
+ <enum comment="" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ </fields>
+ <fields beginversion="11" endversion="22">
+ <union comment="" lookup_guide="compressionType" name="codecParamsUnion" subtype="Media_QualifierIncomingUnion" type="union">
+ <fields>
+ <enum comment="" lookup_eq="Media_Payload_G7231" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ <struct comment="" lookup_eq="*" name="codecParams" subtype="CodecParameters" type="struct">
+ <fields>
+ <integer comment="" name="codecMode" type="uint8"/>
+ <integer comment="" name="dynamicPayload" type="uint8"/>
+ <integer comment="" name="codecParam1" type="uint8"/>
+ <integer comment="" name="codecParam2" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="request" name="StartMulticastMediaTransmissionMessage" opcode="0x0102" priority="send_immediate" type="MediaControl">
+ <fields>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" declare="yes" name="passThroughPartyId" req_resp_key="1" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endiannes="big" endianness="big" name="multicastIpAddr" subtype="IpAddress" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="multicastPortNumber" subtype="uint32" type="ipport" use_param="multicastIpAddr" make_additional_info="yes"/>
+ <integer comment="" name="milliSecondPacketSize" type="uint32"/>
+ <enum comment="" declare="yes" name="compressionType" subtype="Media_PayloadType" type="uint32"/>
+ <struct comment="" name="qualifierOut" subtype="Media_QualifierOutgoing" type="struct">
+ <fields>
+ <integer comment="" name="precedenceValue" type="uint32"/>
+ <enum comment="" name="ssValue" subtype="Media_SilenceSuppression" type="uint32"/>
+ <integer comment="" name="maxFramesPerPacket" type="uint16"/>
+ <integer comment="Unused/Padding" name="padding" type="uint16"/>
+ </fields>
+ <fields beginversion="0" endversion="10">
+ <enum comment="" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ </fields>
+ <fields beginversion="11" endversion="22">
+ <union comment="" lookup_guide="compressionType" name="codecParamsUnion" subtype="Media_QualifierOutgoingUnion" type="union">
+ <fields>
+ <enum comment="" lookup_eq="Media_Payload_G7231" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ <struct comment="" lookup_eq="*" name="codecParams" subtype="CodecParameters" type="struct">
+ <fields>
+ <integer comment="" name="codecMode" type="uint8"/>
+ <integer comment="" name="dynamicPayload" type="uint8"/>
+ <integer comment="" name="codecParam1" type="uint8"/>
+ <integer comment="" name="codecParam2" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="StopMulticastMediaReceptionMessage" opcode="0x0103" priority="send_immediate" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="StopMulticastMediaTransmissionMessage" opcode="0x0104" priority="send_immediate" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="request" name="OpenReceiveChannelMessage" opcode="0x0105" priority="send_immediate" type="MediaControl">
+ <fields>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" declare="yes" name="passThroughPartyId" req_resp_key="1" type="uint32"/>
+ <integer comment="" name="milliSecondPacketSize" type="uint32"/>
+ <enum comment="" declare="yes" name="compressionType" subtype="Media_PayloadType" type="uint32"/>
+ <struct comment="" name="qualifierIn" subtype="Media_QualifierIncoming" type="struct">
+ <fields>
+ <enum comment="" name="ecValue" subtype="Media_EchoCancellation" type="uint32"/>
+ </fields>
+ <fields beginversion="0" endversion="10">
+ <enum comment="" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ </fields>
+ <fields beginversion="11" endversion="22">
+ <union comment="" lookup_guide="compressionType" name="codecParamsUnion" subtype="Media_QualifierIncomingUnion" type="union">
+ <fields>
+ <enum comment="" lookup_eq="Media_Payload_G7231" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ <struct comment="" lookup_eq="*" name="codecParams" subtype="CodecParameters" type="struct">
+ <fields>
+ <integer comment="" name="codecMode" type="uint8"/>
+ <integer comment="" name="dynamicPayload" type="uint8"/>
+ <integer comment="" name="codecParam1" type="uint8"/>
+ <integer comment="" name="codecParam2" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <struct comment="" name="mRxMediaEncryptionKeyInfo" subtype="MediaEncryptionKeyInfo" type="struct">
+ <fields>
+ <enum comment="" name="algorithmID" subtype="MediaEncryptionAlgorithmType" type="uint32"/>
+ <integer comment="" declare="yes" name="keylen" type="uint16"/>
+ <integer comment="" declare="yes" name="saltlen" type="uint16"/>
+ <integer comment="" maxsize="16" name="key" size_fieldname="keylen" type="uint8"/>
+ <integer comment="" maxsize="16" name="salt" size_fieldname="saltlen" type="uint8"/>
+ <integer comment="" name="isMKIPresent" type="uint32"/>
+ <integer comment="" name="keyDerivationRate" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="streamPassThroughId" type="uint32"/>
+ <integer comment="" name="associatedStreamId" type="uint32"/>
+ <integer comment="" name="RFC2833PayloadType" type="uint32"/>
+ <integer comment="" name="dtmfType" type="uint32"/>
+ </fields>
+ <fields beginversion="11">
+ <integer comment="" name="mixingMode" type="uint32"/>
+ <integer comment="" name="partyDirection" type="uint32"/>
+ <ipv4or6 comment="" name="sourceIpAddr" subtype="IPV4orV6Address" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="sourcePortNumber" subtype="uint32" type="ipport" use_param="sourceIpAddr" make_additional_info="yes"/>
+ </fields>
+ <fields beginversion="16">
+ <enum comment="" name="requestedIpAddrType" subtype="IpAddrType" type="uint32"/>
+ </fields>
+ <fields beginversion="17" size_gt="132">
+ <integer comment="" name="audioLevelAdjustment" type="int32"/>
+ </fields>
+ <fields beginversion="21" endversion="22" size_gt="132">
+ <struct comment="" name="latentCapsInfo" type="struct">
+ <fields>
+ <integer comment="" name="active" type="uint32"/>
+ <struct comment="" name="modemRelay" type="struct">
+ <fields>
+ <integer comment="" name="capAndVer" type="uint32"/>
+ <integer comment="" name="modAnd2833" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" name="sprtPayload" type="struct">
+ <fields>
+ <integer comment="" name="chan0MaxPayload" type="uint16"/>
+ <integer comment="" name="chan2MaxPayload" type="uint16"/>
+ <integer comment="" name="chan3MaxPayload" type="uint16"/>
+ <integer comment="" name="chan2MaxWindow" type="uint16"/>
+ </fields>
+ </struct>
+ <struct comment="" name="sse" type="struct">
+ <fields>
+ <integer comment="" name="standard" type="uint32"/>
+ <integer comment="" name="vendor" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" name="payloadParam" type="struct">
+ <fields>
+ <integer comment="" name="nse" type="uint8"/>
+ <integer comment="" name="rfc2833" type="uint8"/>
+ <integer comment="" name="sse" type="uint8"/>
+ <integer comment="" name="v150sprt" type="uint8"/>
+ <integer comment="" name="noaudio" type="uint8"/>
+ <integer comment="" name="FutureUse1" type="uint8"/>
+ <integer comment="" name="FutureUse2" type="uint8"/>
+ <integer comment="" name="FutureUse3" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="CloseReceiveChannelMessage" opcode="0x0106" priority="send_immediate" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <enum comment="" name="portHandlingFlag" subtype="PortHandling" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="request" name="ConnectionStatisticsReqMessage" opcode="0x0107" type="CallControl">
+ <fields endversion="17" fixed="yes">
+ <string comment="" name="directoryNum" size="24" type="char"/>
+ </fields>
+ <fields beginversion="18" endversion="22" fixed="yes">
+ <string comment="" name="directoryNum" size="28" type="char"/>
+ </fields>
+ <fields>
+ <integer comment="CallId" declare="yes" name="callReference" req_resp_key="1" type="uint32"/>
+ <enum comment="" name="statsProcessingMode" subtype="StatsProcessingType" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="SoftKeyTemplateResMessage" opcode="0x0108" request="0x0028" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="softKeyTemplate" subtype="SoftKeyTemplate" type="struct">
+ <fields>
+ <integer comment="" name="softKeyOffset" type="uint32"/>
+ <integer comment="" name="softKeyCount" type="uint32"/>
+ <integer comment="" declare="yes" name="totalSoftKeyCount" type="uint32"/>
+ <struct comment="" maxsize="32" name="definition" size_fieldname="totalSoftKeyCount" subtype="SoftKeyDefinition" type="struct">
+ <fields>
+ <string comment="" name="softKeyLabel" size="16" subtype="DisplayLabel" type="char"/>
+ <enum comment="" name="softKeyEvent" subtype="SoftKeyEvent" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <enum name="SoftKeyTemplateIndex">
+ <entries>
+ <entry name="SoftKeyTemplateIndex_Redial" text="Redial" value="1"/>
+ <entry name="SoftKeyTemplateIndex_NewCall" text="NewCall" value="2"/>
+ <entry name="SoftKeyTemplateIndex_Hold" text="Hold" value="3"/>
+ <entry name="SoftKeyTemplateIndex_Transfer" text="Transfer" value="4"/>
+ <entry name="SoftKeyTemplateIndex_CfwdAll" text="CfwdAll" value="5"/>
+ <entry name="SoftKeyTemplateIndex_CfwdBusy" text="CfwdBusy" value="6"/>
+ <entry name="SoftKeyTemplateIndex_CfwdNoAnswer" text="CfwdNoAnswer" value="7"/>
+ <entry name="SoftKeyTemplateIndex_BackSpace" text="BackSpace" value="8"/>
+ <entry name="SoftKeyTemplateIndex_EndCall" text="EndCall" value="9"/>
+ <entry name="SoftKeyTemplateIndex_Resume" text="Resume" value="10"/>
+ <entry name="SoftKeyTemplateIndex_Answer" text="Answer" value="11"/>
+ <entry name="SoftKeyTemplateIndex_Info" text="Info" value="12"/>
+ <entry name="SoftKeyTemplateIndex_Confrn" text="Confrn" value="13"/>
+ <entry name="SoftKeyTemplateIndex_Park" text="Park" value="14"/>
+ <entry name="SoftKeyTemplateIndex_Join" text="Join" value="15"/>
+ <entry name="SoftKeyTemplateIndex_MeetMe" text="MeetMe" value="16"/>
+ <entry name="SoftKeyTemplateIndex_PickUp" text="PickUp" value="17"/>
+ <entry name="SoftKeyTemplateIndex_GrpPickup" text="GrpPickup" value="18"/>
+ <entry name="SoftKeyTemplateIndex_Monitor" text="Monitor" value="19"/>
+ <entry name="SoftKeyTemplateIndex_CallBack" text="CallBack" value="20"/>
+ <entry name="SoftKeyTemplateIndex_Barge" text="Barge" value="21"/>
+ <entry name="SoftKeyTemplateIndex_DND" text="DND" value="22"/>
+ <entry name="SoftKeyTemplateIndex_ConfList" text="ConfList" value="23"/>
+ <entry name="SoftKeyTemplateIndex_Select" text="Select" value="24"/>
+ <entry name="SoftKeyTemplateIndex_Private" text="Private" value="25"/>
+ <entry name="SoftKeyTemplateIndex_Trnsfvm" text="Transfer Voicemail" value="26"/>
+ <entry name="SoftKeyTemplateIndex_DirTrfr" text="Direct Transfer" value="27"/>
+ <entry name="SoftKeyTemplateIndex_IDivert" text="Immediate Divert" value="28"/>
+ <entry name="SoftKeyTemplateIndex_VideoMode" text="Video Mode" value="29"/>
+ <entry name="SoftKeyTemplateIndex_Intrcpt" text="Intercept" value="30"/>
+ <entry name="SoftKeyTemplateIndex_Empty" text="Empty" value="31"/>
+ <entry name="SoftKeyTemplateIndex_Dial" text="Dial" value="32"/>
+ <entry name="SoftKeyTemplateIndex_CBarge" text="Conference Barge" value="33"/>
+ </entries>
+ </enum>
+ <enum name="SoftKeyInfoIndex">
+ <entries>
+ <entry name="SoftKeyInfoIndex_Redial" text="Redial" value="301"/>
+ <entry name="SoftKeyInfoIndex_NewCall" text="NewCall" value="302"/>
+ <entry name="SoftKeyInfoIndex_Hold" text="Hold" value="303"/>
+ <entry name="SoftKeyInfoIndex_Transfer" text="Transfer" value="304"/>
+ <entry name="SoftKeyInfoIndex_CfwdAll" text="CfwdAll" value="305"/>
+ <entry name="SoftKeyInfoIndex_CfwdBusy" text="CfwdBusy" value="306"/>
+ <entry name="SoftKeyInfoIndex_CfwdNoAnswer" text="CfwdNoAnswer" value="307"/>
+ <entry name="SoftKeyInfoIndex_BackSpace" text="BackSpace" value="308"/>
+ <entry name="SoftKeyInfoIndex_EndCall" text="EndCall" value="309"/>
+ <entry name="SoftKeyInfoIndex_Resume" text="Resume" value="310"/>
+ <entry name="SoftKeyInfoIndex_Answer" text="Answer" value="311"/>
+ <entry name="SoftKeyInfoIndex_Info" text="Info" value="312"/>
+ <entry name="SoftKeyInfoIndex_Confrn" text="Confrn" value="313"/>
+ <entry name="SoftKeyInfoIndex_Park" text="Park" value="314"/>
+ <entry name="SoftKeyInfoIndex_Join" text="Join" value="315"/>
+ <entry name="SoftKeyInfoIndex_MeetMe" text="MeetMe" value="316"/>
+ <entry name="SoftKeyInfoIndex_PickUp" text="PickUp" value="317"/>
+ <entry name="SoftKeyInfoIndex_GrpPickup" text="GrpPickup" value="318"/>
+ <entry name="SoftKeyInfoIndex_Monitor" text="Monitor" value="319"/>
+ <entry name="SoftKeyInfoIndex_CallBack" text="CallBack" value="320"/>
+ <entry name="SoftKeyInfoIndex_Barge" text="Barge" value="321"/>
+ <entry name="SoftKeyInfoIndex_DND" text="DND" value="322"/>
+ <entry name="SoftKeyInfoIndex_ConfList" text="ConfList" value="323"/>
+ <entry name="SoftKeyInfoIndex_Select" text="Select" value="324"/>
+ <entry name="SoftKeyInfoIndex_Private" text="Private" value="325"/>
+ <entry name="SoftKeyInfoIndex_Trnsfvm" text="Transfer Voicemail" value="326"/>
+ <entry name="SoftKeyInfoIndex_DirTrfr" text="Direct Transfer" value="327"/>
+ <entry name="SoftKeyInfoIndex_IDivert" text="Immediate Divert" value="328"/>
+ <entry name="SoftKeyInfoIndex_VideoMode" text="Video Mode" value="329"/>
+ <entry name="SoftKeyInfoIndex_Intrcpt" text="Intercept" value="330"/>
+ <entry name="SoftKeyInfoIndex_Empty" text="Empty" value="331"/>
+ <entry name="SoftKeyInfoIndex_Dial" text="Dial" value="332"/>
+ <entry name="SoftKeyInfoIndex_CBarge" text="Conference Barge" value="333"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="SoftKeySetResMessage" opcode="0x0109" request="0x0025" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="softKeySets" subtype="SoftKeySets" type="struct">
+ <fields>
+ <integer comment="" name="softKeySetOffset" type="uint32"/>
+ <integer comment="" name="softKeySetCount" type="uint32"/>
+ <integer comment="" declare="yes" name="totalSoftKeySetCount" type="uint32"/>
+ <struct comment="" maxsize="16" name="definition" size_fieldname="totalSoftKeySetCount" subtype="SoftKeySetDefinition" type="struct">
+ <fields>
+ <enum comment="" name="softKeyTemplateIndex" size="16" subtype="SoftKeyTemplateIndex" type="uint8"/>
+ <enum comment="" name="softKeyInfoIndex" size="16" subtype="SoftKeyInfoIndex" type="uint16"/>
+ </fields>
+ </struct>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <bitfield name="SoftKeyMask">
+ <entries>
+ <entry comment="" name="SoftKeyMask_SoftKey1" text="SoftKey1" value="0x0001"/>
+ <entry comment="" name="SoftKeyMask_SoftKey2" text="SoftKey2" value="0x0002"/>
+ <entry comment="" name="SoftKeyMask_SoftKey3" text="SoftKey3" value="0x0004"/>
+ <entry comment="" name="SoftKeyMask_SoftKey4" text="SoftKey4" value="0x0008"/>
+ <entry comment="" name="SoftKeyMask_SoftKey5" text="SoftKey5" value="0x0010"/>
+ <entry comment="" name="SoftKeyMask_SoftKey6" text="SoftKey6" value="0x0020"/>
+ <entry comment="" name="SoftKeyMask_SoftKey7" text="SoftKey7" value="0x0040"/>
+ <entry comment="" name="SoftKeyMask_SoftKey8" text="SoftKey8" value="0x0080"/>
+ <entry comment="" name="SoftKeyMask_SoftKey9" text="SoftKey9" value="0x0100"/>
+ <entry comment="" name="SoftKeyMask_SoftKey10" text="SoftKey10" value="0x0200"/>
+ <entry comment="" name="SoftKeyMask_SoftKey11" text="SoftKey11" value="0x0400"/>
+ <entry comment="" name="SoftKeyMask_SoftKey12" text="SoftKey12" value="0x0800"/>
+ <entry comment="" name="SoftKeyMask_SoftKey13" text="SoftKey13" value="0x1000"/>
+ <entry comment="" name="SoftKeyMask_SoftKey14" text="SoftKey14" value="0x2000"/>
+ <entry comment="" name="SoftKeyMask_SoftKey15" text="SoftKey15" value="0x4000"/>
+ <entry comment="" name="SoftKeyMask_SoftKey16" text="SoftKey16" value="0x8000"/>
+ </entries>
+ </bitfield>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="SelectSoftKeysMessage" opcode="0x0110" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <enum comment="" name="softKeySetIndex" subtype="SoftKeySet" type="uint32"/>
+ <bitfield comment="" name="validKeyMask" size="uint32" subtype="SoftKeyMask" type="bitfield">
+ <entries>
+ <entry comment="" name="SoftKeyMask_SoftKey1" text="SoftKey1" value="0x00000001"/>
+ <entry comment="" name="SoftKeyMask_SoftKey2" text="SoftKey2" value="0x00000002"/>
+ <entry comment="" name="SoftKeyMask_SoftKey3" text="SoftKey3" value="0x00000004"/>
+ <entry comment="" name="SoftKeyMask_SoftKey4" text="SoftKey4" value="0x00000008"/>
+ <entry comment="" name="SoftKeyMask_SoftKey5" text="SoftKey5" value="0x00000010"/>
+ <entry comment="" name="SoftKeyMask_SoftKey6" text="SoftKey6" value="0x00000020"/>
+ <entry comment="" name="SoftKeyMask_SoftKey7" text="SoftKey7" value="0x00000040"/>
+ <entry comment="" name="SoftKeyMask_SoftKey8" text="SoftKey8" value="0x00000080"/>
+ <entry comment="" name="SoftKeyMask_SoftKey9" text="SoftKey9" value="0x00000100"/>
+ <entry comment="" name="SoftKeyMask_SoftKey10" text="SoftKey10" value="0x00000200"/>
+ <entry comment="" name="SoftKeyMask_SoftKey11" text="SoftKey11" value="0x00000400"/>
+ <entry comment="" name="SoftKeyMask_SoftKey12" text="SoftKey12" value="0x00000800"/>
+ <entry comment="" name="SoftKeyMask_SoftKey13" text="SoftKey13" value="0x00001000"/>
+ <entry comment="" name="SoftKeyMask_SoftKey14" text="SoftKey14" value="0x00002000"/>
+ <entry comment="" name="SoftKeyMask_SoftKey15" text="SoftKey15" value="0x00004000"/>
+ <entry comment="" name="SoftKeyMask_SoftKey16" text="SoftKey16" value="0x00008000"/>
+ </entries>
+ </bitfield>
+ </fields>
+ </message>
+ <enum name="DCallState">
+ <entries>
+ <entry comment="" name="DCallState_Idle" text="Idle" value="0x0000"/>
+ <entry comment="" name="DCallState_OffHook" text="OffHook" value="0x0001"/>
+ <entry comment="" name="DCallState_OnHook" text="OnHook" value="0x0002"/>
+ <entry comment="" name="DCallState_RingOut" text="RingOut" value="0x0003"/>
+ <entry comment="" name="DCallState_RingIn" text="RingIn" value="0x0004"/>
+ <entry comment="" name="DCallState_Connected" text="Connected" value="0x0005"/>
+ <entry comment="" name="DCallState_Busy" text="Busy" value="0x0006"/>
+ <entry comment="" name="DCallState_Congestion" text="Congestion" value="0x0007"/>
+ <entry comment="" name="DCallState_Hold" text="Hold" value="0x0008"/>
+ <entry comment="" name="DCallState_CallWaiting" text="CallWaiting" value="0x0009"/>
+ <entry comment="" name="DCallState_CallTransfer" text="CallTransfer" value="0x000a"/>
+ <entry comment="" name="DCallState_CallPark" text="CallPark" value="0x000b"/>
+ <entry comment="" name="DCallState_Proceed" text="Proceed" value="0x000c"/>
+ <entry comment="" name="DCallState_CallRemoteMultiline" text="CallRemoteMultiline" value="0x000d"/>
+ <entry comment="" name="DCallState_InvalidNumber" text="InvalidNumber" value="0x000e"/>
+ <entry comment="" name="DCallState_HoldRevert" text="HoldRevert" value="0x000f"/>
+ <entry comment="" name="DCallState_Whisper" text="Whisper" value="0x0010"/>
+ <entry comment="" name="DCallState_RemoteHold" text="RemoteHold" value="0x0011"/>
+ <entry comment="" name="DCallState_MaxState" text="MaxState" value="0x0012"/>
+ </entries>
+ </enum>
+ <enum name="CallPrivacy">
+ <entries>
+ <entry comment="" name="CallPrivacy_None" text="None" value="0x0000"/>
+ <entry comment="" name="CallPrivacy_Limited" text="Limited" value="0x0001"/>
+ <entry comment="" name="CallPrivacy_Full" text="Full" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="CallStateMessage" opcode="0x0111" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="callState" subtype="DCallState" type="uint32" make_additional_info="yes"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <enum comment="" name="privacy" subtype="CallPrivacy" type="uint32"/>
+ <struct comment="" name="precedence" type="struct">
+ <fields>
+ <integer comment="Precedence Level, MLPP priorities" name="precedenceLevel" type="uint32"/>
+ <integer comment="Precedence Domain" name="precedenceDomain" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="DisplayPromptStatusMessage" opcode="0x0112" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="timeOutValue" type="uint32"/>
+ <string comment="" name="promptStatus" size="32" subtype="DisplayLabel" type="char"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="ClearPromptStatusMessage" opcode="0x0113" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="DisplayNotifyMessage" opcode="0x0114" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="timeOutValue" type="uint32"/>
+ <string comment="" name="notify" size="32" subtype="DisplayLabel" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="ClearNotifyMessage" opcode="0x0115" type="CallControl"/>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="ActivateCallPlaneMessage" opcode="0x0116" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="DeactivateCallPlaneMessage" opcode="0x0117" type="CallControl"/>
+ <enum name="DeviceUnregisterStatus">
+ <entries>
+ <entry comment="" name="DeviceUnregisterStatus_Ok" text="Ok" value="0x0000"/>
+ <entry comment="" name="DeviceUnregisterStatus_Error" text="Error" value="0x0001"/>
+ <entry comment="" name="DeviceUnregisterStatus_NAK" text="NAK" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="UnregisterAckMessage" opcode="0x0118" request="0x0027" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="status" subtype="DeviceUnregisterStatus" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="BackSpaceResMessage" opcode="0x0119" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="RegisterTokenAck" opcode="0x011a" request="0x0029" type="RegistrationAndManagement"/>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="RegisterTokenReject" opcode="0x011b" request="0x0029" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="waitTimeBeforeNextReq" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="StartMediaFailureDetectionMessage" opcode="0x011c" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <integer comment="" name="milliSecondPacketSize" type="uint32"/>
+ <enum comment="" declare="yes" name="compressionType" subtype="Media_PayloadType" type="uint32"/>
+ <struct comment="" name="qualifierIn" subtype="Media_QualifierIncoming" type="struct">
+ <fields>
+ <enum comment="" name="ecValue" subtype="Media_EchoCancellation" type="uint32"/>
+ </fields>
+ <fields beginversion="0" endversion="10">
+ <enum comment="" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ </fields>
+ <fields beginversion="11" endversion="22">
+ <union comment="" lookup_guide="compressionType" name="codecParamsUnion" subtype="Media_QualifierIncomingUnion" type="union">
+ <fields>
+ <enum comment="" lookup_eq="Media_Payload_G7231" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ <struct comment="" lookup_eq="*" name="codecParams" subtype="CodecParameters" type="struct">
+ <fields>
+ <integer comment="" name="codecMode" type="uint8"/>
+ <integer comment="" name="dynamicPayload" type="uint8"/>
+ <integer comment="" name="codecParam1" type="uint8"/>
+ <integer comment="" name="codecParam2" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="DialedNumberMessage" opcode="0x011d" type="CallControl">
+ <fields endversion="17" fixed="yes">
+ <string comment="" name="dialedNumber" size="24" type="char" make_additional_info="yes"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ <fields beginversion="18" endversion="22" fixed="yes">
+ <string comment="" declare="yes" name="dialedNumber" size="VariableDirnumSize" type="char" make_additional_info="yes"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="UserToDeviceDataMessage" opcode="0x011e" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="userToDeviceData" subtype="UserAndDeviceData" type="struct">
+ <fields>
+ <integer comment="" name="applicationId" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="" name="transactionId" type="uint32"/>
+ <integer comment="" declare="yes" name="dataLength" type="uint32"/>
+ <xml comment="" maxsize="2000" name="xmldata" size_fieldname="dataLength" type="xml"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="FeatureStatResMessage" opcode="0x011f" request="0x0034" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="featureIndex" req_resp_key="1" type="uint32"/>
+ <enum comment="" name="featureID" subtype="ButtonType" type="uint32"/>
+ <string comment="" name="featureTextLabel" size="40" type="char"/>
+ <integer comment="" name="featureStatus" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="DisplayPriNotifyMessage" opcode="0x0120" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="timeOutValue" type="uint32"/>
+ <integer comment="" name="priority" type="uint32"/>
+ <string comment="" name="notify" size="32" subtype="DisplayLabel" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="ClearPriNotifyMessage" opcode="0x0121" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="priority" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="EndOfAnnAck">
+ <entries>
+ <entry comment="" name="EndOfAnnAck_NoAnnAckRequired" text="NoAnnAckRequired" value="0x0000"/>
+ <entry comment="" name="EndOfAnnAck_AnnAckRequired" text="AnnAckRequired" value="0x0001"/>
+ </entries>
+ </enum>
+ <enum name="AnnPlayMode">
+ <entries>
+ <entry comment="" name="AnnPlayMode_XmlConfigMode" text="XmlConfigMode" value="0x0000"/>
+ <entry comment="" name="AnnPlayMode_OneShotMode" text="OneShotMode" value="0x0001"/>
+ <entry comment="" name="AnnPlayMode_ContinuousMode" text="ContinuousMode" value="0x0002"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="event" name="StartAnnouncementMessage" opcode="0x0122" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="AnnList" size="32" type="struct">
+ <fields>
+ <integer comment="" name="locale" type="uint32"/>
+ <integer comment="" name="country" type="uint32"/>
+ <enum comment="" name="toneAnnouncement" subtype="DeviceTone" type="uint32"/>
+ </fields>
+ </struct>
+ <enum comment="" name="annAckReq" subtype="EndOfAnnAck" type="uint32"/>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" name="matrixConfPartyID" size="16" type="uint32"/>
+ <integer comment="" name="hearingConfPartyMask" type="uint32"/>
+ <enum comment="" name="annPlayMode" subtype="AnnPlayMode" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="event" name="StopAnnouncementMessage" opcode="0x0123" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="PlayAnnStatus">
+ <entries>
+ <entry comment="" name="PlayAnnStatus_OK" text="OK" value="0x0000"/>
+ <entry comment="" name="PlayAnnStatus_Err" text="Err" value="0x0001"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="event" name="AnnouncementFinishMessage" opcode="0x0124" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <enum comment="" name="annStatus" subtype="PlayAnnStatus" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="NotifyDtmfToneMessage" opcode="0x0127" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="tone" subtype="DeviceTone" type="uint32"/>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" name="passthruPartyID" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="SendDtmfToneMessage" opcode="0x0128" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="tone" subtype="DeviceTone" type="uint32"/>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" name="passthruPartyID" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="request" name="SubscribeDtmfPayloadReqMessage" opcode="0x0129" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="payloadDtmf" type="uint32"/>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" name="passthruPartyID" type="uint32"/>
+ <integer comment="" name="dtmfType" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="SubscribeDtmfPayloadResMessage" opcode="0x012a" request="0x0129" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="payloadDtmf" type="uint32"/>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" declare="yes" name="passthruPartyID" req_resp_key="1" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="SubscribeDtmfPayloadErrMessage" opcode="0x012b" request="0x0129" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="payloadDtmf" type="uint32"/>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" declare="yes" name="passthruPartyID" req_resp_key="1" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="request" name="UnSubscribeDtmfPayloadReqMessage" opcode="0x012c" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="payloadDtmf" type="uint32"/>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" declare="yes" name="passthruPartyID" req_resp_key="1" type="uint32"/>
+ <integer comment="" name="dtmfType" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="UnSubscribeDtmfPayloadResMessage" opcode="0x012d" request="0x012d" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="payloadDtmf" type="uint32"/>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" declare="yes" name="passthruPartyID" req_resp_key="1" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="UnSubscribeDtmfPayloadErrMessage" opcode="0x012e" request="0x012d" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="payloadDtmf" type="uint32"/>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" declare="yes" name="passthruPartyID" req_resp_key="1" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="ServiceURLStatResMessage" opcode="0x012f" request="0x0033" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="serviceURLIndex" req_resp_key="1" type="uint32"/>
+ <string comment="" name="serviceURL" size="256" type="char"/>
+ <string comment="" name="serviceURLDisplayName" size="40" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="CallSelectStatResMessage" opcode="0x0130" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="callSelectStat" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="request" name="OpenMultiMediaReceiveChannelMessage" opcode="0x0131" priority="send_immediate" type="MediaControl">
+ <fields>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" declare="yes" name="passThroughPartyId" req_resp_key="1" type="uint32"/>
+ <enum comment="" declare="yes" name="compressionType" subtype="Media_PayloadType" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <struct comment="" declare="yes" name="payloadType" subtype="RTPPayloadType" type="struct">
+ <fields>
+ <integer comment="" name="payload_rfc_number" type="uint32"/>
+ <integer comment="" declare="yes" name="payloadType" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="isConferenceCreator" type="uint32"/>
+ <union comment="" lookup_guide="payloadType" name="capability" subtype="OpenMultiMediaReceiveChannelMessageUnion" type="union">
+ <fields>
+ <struct comment="" lookup_le="Media_Payload_AMR_WB" name="audioParameters" subtype="Media_AudioIncomingParameters" type="struct">
+ <fields>
+ <integer comment="" name="milliSecondPacketSize" type="uint32"/>
+ <struct comment="" name="qualifierIn" subtype="Media_QualifierIncoming" type="struct">
+ <fields>
+ <enum comment="" name="ecValue" subtype="Media_EchoCancellation" type="uint32"/>
+ </fields>
+ <fields beginversion="0" endversion="10">
+ <enum comment="" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ </fields>
+ <fields beginversion="11" endversion="22">
+ <union comment="" lookup_guide="compressionType" name="codecParamsUnion" subtype="Media_QualifierIncomingUnion" type="union">
+ <fields>
+ <enum comment="" lookup_eq="Media_Payload_G7231" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ <struct comment="" lookup_eq="*" name="codecParams" subtype="CodecParameters" type="struct">
+ <fields>
+ <integer comment="" name="codecMode" type="uint8"/>
+ <integer comment="" name="dynamicPayload" type="uint8"/>
+ <integer comment="" name="codecParam1" type="uint8"/>
+ <integer comment="" name="codecParam2" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ </fields>
+ </struct>
+ <struct comment="" lookup_ge="Media_Payload_H261" lookup_le="Media_Payload_H264_FEC" name="vidParameters" subtype="ChannelVideoParameters" type="struct">
+ <fields>
+ <integer comment="" name="bitRate" type="uint32"/>
+ <integer comment="" declare="yes" name="pictureFormatCount" type="uint32"/>
+ <struct comment="" maxsize="5" name="pictureFormat" size_fieldname="pictureFormatCount" subtype="PictureFormatSupport" type="struct">
+ <fields>
+ <integer comment="" name="format" type="uint32"/>
+ <integer comment="" name="MPI" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="confServiceNum" type="uint32"/>
+ <union comment="" lookup_guide="payloadType" name="capability" subtype="ChannelVideoParametersUnion" type="union">
+ <fields>
+ <struct comment="" lookup_eq="Media_Payload_H261" name="h261VideoCapability" subtype="H261VideoCapability" type="struct">
+ <fields>
+ <integer comment="Temporal spatial trade off capability" name="temporalSpatialTradeOffCapability" type="uint32"/>
+ <integer comment="Still Image Transmission" name="stillImageTransmission" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_H263" name="h263VideoCapability" subtype="H263VideoCapability" type="struct">
+ <fields>
+ <bitfield comment="H263 Capability BitField" name="h263_capability_bitfield" size="uint32" subtype="Generic_Bitfield_32" type="bitfield">
+ <entries>
+ <entry comment="" name="Generic_Bitfield_Bit1" text="Bit1" value="0x0001"/>
+ <entry comment="" name="Generic_Bitfield_Bit2" text="Bit2" value="0x0002"/>
+ <entry comment="" name="Generic_Bitfield_Bit3" text="Bit3" value="0x0004"/>
+ <entry comment="" name="Generic_Bitfield_Bit4" text="Bit4" value="0x0008"/>
+ <entry comment="" name="Generic_Bitfield_Bit5" text="Bit5" value="0x0010"/>
+ <entry comment="" name="Generic_Bitfield_Bit6" text="Bit6" value="0x0020"/>
+ <entry comment="" name="Generic_Bitfield_Bit7" text="Bit7" value="0x0040"/>
+ <entry comment="" name="Generic_Bitfield_Bit8" text="Bit8" value="0x0080"/>
+ <entry comment="" name="Generic_Bitfield_Bit9" text="Bit9" value="0x0100"/>
+ <entry comment="" name="Generic_Bitfield_Bit10" text="Bit10" value="0x0200"/>
+ <entry comment="" name="Generic_Bitfield_Bit11" text="Bit11" value="0x0400"/>
+ <entry comment="" name="Generic_Bitfield_Bit12" text="Bit12" value="0x0800"/>
+ <entry comment="" name="Generic_Bitfield_Bit13" text="Bit13" value="0x1000"/>
+ <entry comment="" name="Generic_Bitfield_Bit14" text="Bit14" value="0x2000"/>
+ <entry comment="" name="Generic_Bitfield_Bit15" text="Bit16" value="0x4000"/>
+ <entry comment="" name="Generic_Bitfield_Bit16" text="Bit15" value="0x8000"/>
+ <entry comment="" name="Generic_Bitfield_Bit17" text="Bit17" value="0x10000"/>
+ <entry comment="" name="Generic_Bitfield_Bit18" text="Bit18" value="0x20000"/>
+ <entry comment="" name="Generic_Bitfield_Bit19" text="Bit19" value="0x40000"/>
+ <entry comment="" name="Generic_Bitfield_Bit20" text="Bit20" value="0x80000"/>
+ <entry comment="" name="Generic_Bitfield_Bit21" text="Bit21" value="0x100000"/>
+ <entry comment="" name="Generic_Bitfield_Bit22" text="Bit22" value="0x200000"/>
+ <entry comment="" name="Generic_Bitfield_Bit23" text="Bit23" value="0x400000"/>
+ <entry comment="" name="Generic_Bitfield_Bit24" text="Bit24" value="0x800000"/>
+ <entry comment="" name="Generic_Bitfield_Bit25" text="Bit25" value="0x1000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit26" text="Bit26" value="0x2000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit27" text="Bit27" value="0x4000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit28" text="Bit28" value="0x8000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit29" text="Bit29" value="0x10000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit30" text="Bit30" value="0x20000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit31" text="Bit31" value="0x40000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit32" text="Bit32" value="0x80000000"/>
+ </entries>
+ </bitfield>
+ <integer comment="" name="annexNandWFutureUse" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_H264" name="h264VideoCapability" subtype="H264VideoCapability" type="struct">
+ <fields>
+ <integer comment="" name="profile" type="uint32"/>
+ <integer comment="" name="level" type="uint32"/>
+ <integer comment="" name="customMaxMBPS" type="uint32"/>
+ <integer comment="" name="customMaxFS" type="uint32"/>
+ <integer comment="" name="customMaxDPB" type="uint32"/>
+ <integer comment="" name="customMaxBRandCPB" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_Vieo" name="vieoVideoCapability" subtype="VieoVideoCapability" type="struct">
+ <fields>
+ <integer comment="" name="modelNumber" type="uint32"/>
+ <integer comment="" name="bandwidth" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ <struct comment="" lookup_ge="Media_Payload_Clear_Chan" name="dataParameters" subtype="ChannelDataParameters" type="struct">
+ <fields>
+ <integer comment="" name="protocolDependentData" type="uint32"/>
+ <integer comment="" name="maxBitRate" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ <struct comment="" name="mRxMediaEncryptionKeyInfo" subtype="MediaEncryptionKeyInfo" type="struct">
+ <fields>
+ <enum comment="" name="algorithmID" subtype="MediaEncryptionAlgorithmType" type="uint32"/>
+ <integer comment="" declare="yes" name="keylen" type="uint16"/>
+ <integer comment="" declare="yes" name="saltlen" type="uint16"/>
+ <integer comment="" maxsize="16" name="key" size_fieldname="keylen" type="uint8"/>
+ <integer comment="" maxsize="16" name="salt" size_fieldname="saltlen" type="uint8"/>
+ <integer comment="" name="isMKIPresent" type="uint32"/>
+ <integer comment="" name="keyDerivationRate" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="streamPassThroughId" type="uint32"/>
+ <integer comment="" name="associatedStreamId" type="uint32"/>
+ </fields>
+ <fields beginversion="11">
+ <ipv4or6 comment="ipaddress in big endian" endianness="big" name="sourceIpAddr" subtype="uint32" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="sourcePortNumber" subtype="uint32" type="ipport" use_param="sourceIpAddr" make_additional_info="yes"/>
+ </fields>
+ <fields beginversion="16" endversion="22">
+ <enum comment="" name="requestedIpAddrType" subtype="IpAddrType" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="request" name="StartMultiMediaTransmissionMessage" opcode="0x0132" priority="send_immediate" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" declare="yes" name="passthruPartyID" req_resp_key="1" type="uint32"/>
+ <enum comment="" declare="yes" name="compressionType" subtype="Media_PayloadType" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endianness="big" name="remoteIpAddr" subtype="uint32" type="ipaddr"/>
+ <integer comment="" name="remotePortNumber" subtype="uint32" type="ipport" use_param="remoteIpAddr"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <struct comment="" declare="yes" name="payloadType" subtype="RTPPayloadType" type="struct">
+ <fields>
+ <integer comment="" name="payload_rfc_number" type="uint32"/>
+ <integer comment="" declare="yes" name="payloadType" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="DSCPValue" type="uint32"/>
+ <union comment="" lookup_guide="payloadType" name="capability" subtype="StartMultiMediaTransmissionMessageUnion" type="union">
+ <fields>
+ <struct comment="" lookup_le="Media_Payload_AMR_WB" name="audioParameters" subtype="Media_AudioIncomingParameters" type="struct">
+ <fields>
+ <integer comment="" name="milliSecondPacketSize" type="uint32"/>
+ <struct comment="" name="qualifierIn" subtype="Media_QualifierIncoming" type="struct">
+ <fields>
+ <enum comment="" name="ecValue" subtype="Media_EchoCancellation" type="uint32"/>
+ </fields>
+ <fields beginversion="0" endversion="10">
+ <enum comment="" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ </fields>
+ <fields beginversion="11" endversion="22">
+ <union comment="" lookup_guide="compressionType" name="codecParamsUnion" subtype="Media_QualifierIncomingUnion" type="union">
+ <fields>
+ <enum comment="" lookup_eq="Media_Payload_G7231" name="g723BitRate" subtype="Media_G723BitRate" type="uint32"/>
+ <struct comment="" lookup_eq="*" name="codecParams" subtype="CodecParameters" type="struct">
+ <fields>
+ <integer comment="" name="codecMode" type="uint8"/>
+ <integer comment="" name="dynamicPayload" type="uint8"/>
+ <integer comment="" name="codecParam1" type="uint8"/>
+ <integer comment="" name="codecParam2" type="uint8"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ </fields>
+ </struct>
+ <struct comment="" lookup_ge="Media_Payload_H261" lookup_le="Media_Payload_H264_FEC" name="vidParameters" subtype="ChannelVideoParameters" type="struct">
+ <fields>
+ <integer comment="" name="bitRate" type="uint32"/>
+ <integer comment="" declare="yes" name="pictureFormatCount" type="uint32"/>
+ <struct comment="" maxsize="5" name="pictureFormat" size_fieldname="pictureFormatCount" subtype="PictureFormatSupport" type="struct">
+ <fields>
+ <integer comment="" name="format" type="uint32"/>
+ <integer comment="" name="MPI" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="confServiceNum" type="uint32"/>
+ <union comment="" lookup_guide="payloadType" name="capability" subtype="ChannelVideoParametersUnion" type="union">
+ <fields>
+ <struct comment="" lookup_eq="Media_Payload_H261" name="h261VideoCapability" subtype="H261VideoCapability" type="struct">
+ <fields>
+ <integer comment="Temporal spatial trade off capability" name="temporalSpatialTradeOffCapability" type="uint32"/>
+ <integer comment="Still Image Transmission" name="stillImageTransmission" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_H263" name="h263VideoCapability" subtype="H263VideoCapability" type="struct">
+ <fields>
+ <bitfield comment="H263 Capability BitField" name="h263_capability_bitfield" size="uint32" subtype="Generic_Bitfield_32" type="bitfield">
+ <entries>
+ <entry comment="" name="Generic_Bitfield_Bit1" text="Bit1" value="0x0001"/>
+ <entry comment="" name="Generic_Bitfield_Bit2" text="Bit2" value="0x0002"/>
+ <entry comment="" name="Generic_Bitfield_Bit3" text="Bit3" value="0x0004"/>
+ <entry comment="" name="Generic_Bitfield_Bit4" text="Bit4" value="0x0008"/>
+ <entry comment="" name="Generic_Bitfield_Bit5" text="Bit5" value="0x0010"/>
+ <entry comment="" name="Generic_Bitfield_Bit6" text="Bit6" value="0x0020"/>
+ <entry comment="" name="Generic_Bitfield_Bit7" text="Bit7" value="0x0040"/>
+ <entry comment="" name="Generic_Bitfield_Bit8" text="Bit8" value="0x0080"/>
+ <entry comment="" name="Generic_Bitfield_Bit9" text="Bit9" value="0x0100"/>
+ <entry comment="" name="Generic_Bitfield_Bit10" text="Bit10" value="0x0200"/>
+ <entry comment="" name="Generic_Bitfield_Bit11" text="Bit11" value="0x0400"/>
+ <entry comment="" name="Generic_Bitfield_Bit12" text="Bit12" value="0x0800"/>
+ <entry comment="" name="Generic_Bitfield_Bit13" text="Bit13" value="0x1000"/>
+ <entry comment="" name="Generic_Bitfield_Bit14" text="Bit14" value="0x2000"/>
+ <entry comment="" name="Generic_Bitfield_Bit15" text="Bit15" value="0x4000"/>
+ <entry comment="" name="Generic_Bitfield_Bit16" text="Bit16" value="0x8000"/>
+ <entry comment="" name="Generic_Bitfield_Bit17" text="Bit17" value="0x10000"/>
+ <entry comment="" name="Generic_Bitfield_Bit18" text="Bit18" value="0x20000"/>
+ <entry comment="" name="Generic_Bitfield_Bit19" text="Bit19" value="0x40000"/>
+ <entry comment="" name="Generic_Bitfield_Bit20" text="Bit20" value="0x80000"/>
+ <entry comment="" name="Generic_Bitfield_Bit21" text="Bit21" value="0x100000"/>
+ <entry comment="" name="Generic_Bitfield_Bit22" text="Bit22" value="0x200000"/>
+ <entry comment="" name="Generic_Bitfield_Bit23" text="Bit23" value="0x400000"/>
+ <entry comment="" name="Generic_Bitfield_Bit24" text="Bit24" value="0x800000"/>
+ <entry comment="" name="Generic_Bitfield_Bit25" text="Bit25" value="0x1000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit26" text="Bit26" value="0x2000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit27" text="Bit27" value="0x4000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit28" text="Bit28" value="0x8000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit29" text="Bit29" value="0x10000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit30" text="Bit30" value="0x20000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit31" text="Bit31" value="0x40000000"/>
+ <entry comment="" name="Generic_Bitfield_Bit32" text="Bit32" value="0x80000000"/>
+ </entries>
+ </bitfield>
+ <integer comment="" name="annexNandWFutureUse" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_H264" name="h264VideoCapability" subtype="H264VideoCapability" type="struct">
+ <fields>
+ <integer comment="" name="profile" type="uint32"/>
+ <integer comment="" name="level" type="uint32"/>
+ <integer comment="" name="customMaxMBPS" type="uint32"/>
+ <integer comment="" name="customMaxFS" type="uint32"/>
+ <integer comment="" name="customMaxDPB" type="uint32"/>
+ <integer comment="" name="customMaxBRandCPB" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="Media_Payload_Vieo" name="vieoVideoCapability" subtype="VieoVideoCapability" type="struct">
+ <fields>
+ <integer comment="" name="modelNumber" type="uint32"/>
+ <integer comment="" name="bandwidth" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ </fields>
+ </struct>
+ <struct comment="" lookup_ge="Media_Payload_Clear_Chan" name="dataParameters" subtype="ChannelDataParameters" type="struct">
+ <fields>
+ <integer comment="" name="protocolDependentData" type="uint32"/>
+ <integer comment="" name="maxBitRate" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </union>
+ <struct comment="" name="mTxMediaEncryptionKeyInfo" subtype="MediaEncryptionKeyInfo" type="struct">
+ <fields>
+ <enum comment="" name="algorithmID" subtype="MediaEncryptionAlgorithmType" type="uint32"/>
+ <integer comment="" declare="yes" name="keylen" type="uint16"/>
+ <integer comment="" declare="yes" name="saltlen" type="uint16"/>
+ <integer comment="" maxsize="16" name="key" size_fieldname="keylen" type="uint8"/>
+ <integer comment="" maxsize="16" name="salt" size_fieldname="saltlen" type="uint8"/>
+ <integer comment="" name="isMKIPresent" type="uint32"/>
+ <integer comment="" name="keyDerivationRate" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="streamPassThroughId" type="uint32"/>
+ <integer comment="" name="associatedStreamId" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="StopMultiMediaTransmissionMessage" opcode="0x0133" priority="send_immediate" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <enum comment="" name="portHandlingFlag" subtype="PortHandling" type="uint32"/>
+ </fields>
+ </message>
+ <enum define="yes" name="MiscCommandType">
+ <entries>
+ <entry comment="" name="MiscCommandType_videoFreezePicture" text="videoFreezePicture" value="0x0000"/>
+ <entry comment="" name="MiscCommandType_videoFastUpdatePicture" text="videoFastUpdatePicture" value="0x0001"/>
+ <entry comment="" name="MiscCommandType_videoFastUpdateGOB" text="videoFastUpdateGOB" value="0x0002"/>
+ <entry comment="" name="MiscCommandType_videoFastUpdateMB" text="videoFastUpdateMB" value="0x0003"/>
+ <entry comment="" name="MiscCommandType_lostPicture" text="lostPicture" value="0x0004"/>
+ <entry comment="" name="MiscCommandType_lostPartialPicture" text="lostPartialPicture" value="0x0005"/>
+ <entry comment="" name="MiscCommandType_recoveryReferencePicture" text="recoveryReferencePicture" value="0x0006"/>
+ <entry comment="" name="MiscCommandType_temporalSpatialTradeOff" text="temporalSpatialTradeOff" value="0x0007"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="MiscellaneousCommandMessage" opcode="0x0134" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" name="passthruPartyID" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <enum comment="" declare="yes" name="command" subtype="MiscCommandType" type="uint32"/>
+ <union comment="" lookup_guide="command" name="u" subtype="MiscellaneousCommandMessageUnion" type="union">
+ <fields>
+ <struct comment="" lookup_eq="MiscCommandType_videoFastUpdatePicture" name="videoFastUpdatePicture" subtype="VideoFastUpdateGOB" type="struct">
+ <fields>
+ <integer comment="" name="firstGOB" type="uint32"/>
+ <integer comment="" name="numberOfGOBs" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="MiscCommandType_videoFastUpdateGOB" name="videoFastUpdateGOB" subtype="VideoFastUpdateGOB" type="struct">
+ <fields>
+ <integer comment="" name="firstGOB" type="uint32"/>
+ <integer comment="" name="numberOfGOBs" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="MiscCommandType_videoFastUpdateMB" name="videoFastUpdateMB" subtype="VideoFastUpdateMB" type="struct">
+ <fields>
+ <integer comment="" name="firstGOB" type="uint32"/>
+ <integer comment="" name="firstMB" type="uint32"/>
+ <integer comment="" name="numberOfMBs" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="MiscCommandType_lostPicture" name="lostPicture" subtype="PictureReference" type="struct">
+ <fields>
+ <integer comment="" name="pictureNumber" type="uint32"/>
+ <integer comment="" name="longTermPictureIndex" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="MiscCommandType_lostPartialPicture" name="lostPartialPicture" type="struct">
+ <fields>
+ <struct comment="" name="pictureReference" subtype="PictureReference" type="struct">
+ <fields>
+ <integer comment="" name="pictureNumber" type="uint32"/>
+ <integer comment="" name="longTermPictureIndex" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="firstMB" type="uint32"/>
+ <integer comment="" name="numberOfMBs" type="uint32"/>
+ </fields>
+ </struct>
+ <struct comment="" lookup_eq="MiscCommandType_recoveryReferencePicture" name="recoveryReferencePictureValue" subtype="PictureReferenceValues" type="struct">
+ <fields>
+ <integer comment="" declare="yes" name="recoveryReferencePictureCount" type="uint32"/>
+ <struct comment="" maxsize="4" name="recoveryReferencePicture" size_fieldname="recoveryReferencePictureCount" subtype="PictureReference" type="struct">
+ <fields>
+ <integer comment="" name="pictureNumber" type="uint32"/>
+ <integer comment="" name="longTermPictureIndex" type="uint32"/>
+ </fields>
+ </struct>
+ </fields>
+ </struct>
+ <integer comment="" lookup_eq="MiscCommandType_temporalSpatialTradeOff" name="temporalSpatialTradeOff" type="uint32"/>
+ <integer comment="" lookup_eq="*" name="none" type="uint32"/>
+ </fields>
+ </union>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="FlowControlCommandMessage" opcode="0x0135" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" name="passthruPartyID" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="" name="maximumBitRate" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="CloseMultiMediaReceiveChannelMessage" opcode="0x0136" priority="send_immediate" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <enum comment="" name="portHandlingFlag" subtype="PortHandling" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="request" name="CreateConferenceReqMessage" opcode="0x0137" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" declare="yes" name="conferenceId" req_resp_key="1" type="uint32"/>
+ <integer comment="" name="numberOfReservedParticipants" type="uint32"/>
+ <enum comment="" name="resourceType" subtype="ResourceType" type="uint32"/>
+ <integer comment="" name="applicationId" type="uint32"/>
+ <string comment="" name="appConfID" size="32" type="char"/>
+ <string comment="" name="appData" size="24" type="char"/>
+ <integer comment="" declare="yes" name="dataLength" type="uint32"/>
+ <string comment="variable field size (max: 2000]" maxsize="2000" name="passThruData" size_fieldname="dataLength" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="request" name="DeleteConferenceReqMessage" opcode="0x0138" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" declare="yes" name="conferenceId" req_resp_key="1" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="request" name="ModifyConferenceReqMessage" opcode="0x0139" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" declare="yes" name="conferenceId" req_resp_key="1" type="uint32"/>
+ <integer comment="" name="numberOfReservedParticipants" type="uint32"/>
+ <integer comment="" name="applicationId" type="uint32"/>
+ <string comment="" name="appConfID" size="32" type="char"/>
+ <string comment="" name="appData" size="24" type="char"/>
+ <integer comment="" declare="yes" name="dataLength" type="uint32"/>
+ <string comment="variable field size (max: 2000]" maxsize="2000" name="passThruData" size_fieldname="dataLength" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="request" name="AddParticipantReqMessage" opcode="0x013a" type="IntraCCM">
+ <fields>
+ <integer comment="Conference ID" declare="yes" name="conferenceId" req_resp_key="1" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <bitfield comment="" name="partyPIRestrictionBits" size="uint32" subtype="RestrictInformationType" type="bitfield">
+ <entries>
+ <entry comment="" name="RestrictInformationType_CallingPartyName" text="CallingPartyName" value="0x00000001"/>
+ <entry comment="" name="RestrictInformationType_CallingPartyNumber" text="CallingPartyNumber" value="0x00000002"/>
+ <entry comment="" name="RestrictInformationType_CallingParty" text="CallingParty" value="0x00000003"/>
+ <entry comment="" name="RestrictInformationType_CalledPartyName" text="CalledPartyName" value="0x00000004"/>
+ <entry comment="" name="RestrictInformationType_CalledPartyNumber" text="CalledPartyNumber" value="0x00000008"/>
+ <entry comment="" name="RestrictInformationType_CalledParty" text="CalledParty" value="0x0000000c"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledPartyName" text="OriginalCalledPartyName" value="0x00000010"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledPartyNumber" text="OriginalCalledPartyNumber" value="0x00000020"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledParty" text="OriginalCalledParty" value="0x00000030"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectPartyName" text="LastRedirectPartyName" value="0x00000040"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectPartyNumber" text="LastRedirectPartyNumber" value="0x00000080"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectParty" text="LastRedirectParty" value="0x000000c0"/>
+ <entry comment="" name="RestrictInformationType_BitsReserved" text="BitsReserved" value="0xffffff00"/>
+ </entries>
+ </bitfield>
+ <string comment="" name="participantName" size="40" type="char"/>
+ <string comment="" name="participantNumber" size="24" type="char"/>
+ <string comment="" name="conferenceName" size="32" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="request" name="DropParticipantReqMessage" opcode="0x013b" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" declare="yes" name="conferenceId" req_resp_key="1" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="request" name="AuditConferenceReqMessage" opcode="0x013c" type="IntraCCM"/>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="request" name="AuditParticipantReqMessage" opcode="0x013d" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" declare="yes" name="conferenceId" req_resp_key="1" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="request" name="ChangeParticipantReqMessage" opcode="0x013e" type="IntraCCM">
+ <fields>
+ <integer comment="Conference ID" declare="yes" name="conferenceId" req_resp_key="1" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <bitfield comment="" name="partyPIRestrictionBits" size="uint32" subtype="RestrictInformationType" type="bitfield">
+ <entries>
+ <entry comment="" name="RestrictInformationType_CallingPartyName" text="CallingPartyName" value="0x0001"/>
+ <entry comment="" name="RestrictInformationType_CallingPartyNumber" text="CallingPartyNumber" value="0x0002"/>
+ <entry comment="" name="RestrictInformationType_CallingParty" text="CallingParty" value="0x0003"/>
+ <entry comment="" name="RestrictInformationType_CalledPartyName" text="CalledPartyName" value="0x0004"/>
+ <entry comment="" name="RestrictInformationType_CalledPartyNumber" text="CalledPartyNumber" value="0x0008"/>
+ <entry comment="" name="RestrictInformationType_CalledParty" text="CalledParty" value="0x000c"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledPartyName" text="OriginalCalledPartyName" value="0x0010"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledPartyNumber" text="OriginalCalledPartyNumber" value="0x0020"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledParty" text="OriginalCalledParty" value="0x0030"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectPartyName" text="LastRedirectPartyName" value="0x0040"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectPartyNumber" text="LastRedirectPartyNumber" value="0x0080"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectParty" text="LastRedirectParty" value="0x00c0"/>
+ <entry comment="" name="RestrictInformationType_BitsReserved" text="BitsReserved" value="0xffffff00"/>
+ </entries>
+ </bitfield>
+ <string comment="" name="participantName" size="40" type="char"/>
+ <string comment="" name="participantNumber" size="24" type="char"/>
+ <string comment="" name="conferenceName" size="32" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="UserToDeviceDataMessageVersion1" opcode="0x013f" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="userToDeviceDataVersion1" subtype="UserAndDeviceDataVersion1" type="struct">
+ <fields>
+ <integer comment="" name="applicationId" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="" name="transactionId" type="uint32"/>
+ <integer comment="" declare="yes" name="dataLength" type="uint32"/>
+ <enum comment="" name="sequenceFlag" subtype="SequenceFlag" type="uint32"/>
+ <integer comment="" name="displayPriority" type="uint32"/>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" name="appInstanceID" type="uint32"/>
+ <integer comment="" name="routingID" type="uint32"/>
+ <xml comment="" maxsize="2000" name="xmldata" size_fieldname="dataLength" type="xml"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="VideoDisplayCommandMessage" opcode="0x0140" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="" name="layoutID" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="FlowControlNotifyMessage" opcode="0x0141" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="" name="passthruPartyID" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="" name="maximumBitRate" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="yes" msgtype="response" name="ConfigStatV2ResMessage" opcode="0x0142" request="0x000c" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="sid" type="struct">
+ <fields>
+ <string comment="Device Name" name="DeviceName" size="16" type="char"/>
+ <integer comment="User Id" name="reserved_for_future_use" type="uint32"/>
+ <integer comment="Device Instance" name="instance" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="numberOfLines" type="uint32"/>
+ <integer comment="" name="numberOfSpeedDials" type="uint32"/>
+ <string comment="" name="userName" size="121" type="char"/>
+ <string comment="" name="serverName" size="121" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="yes" msgtype="event" name="DisplayNotifyV2Message" opcode="0x0143" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="timeOutValue" type="uint32"/>
+ <string comment="" name="notify" size="97" subtype="DisplayLabel" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="yes" msgtype="event" name="DisplayPriNotifyV2Message" opcode="0x0144" type="RegistrationAndManagement">
+ <fields alignment="4" beginversion="0" endversion="22">
+ <integer comment="" name="timeOutValue" type="uint32"/>
+ <integer comment="" name="priority" type="uint32"/>
+ <string comment="" name="notify" size="97" subtype="DisplayLabel" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="yes" msgtype="event" name="DisplayPromptStatusV2Message" opcode="0x0145" type="CallControl">
+ <fields alignment="4" beginversion="0" endversion="22">
+ <integer comment="" name="timeOutValue" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <string comment="" name="promptStatus" size="97" subtype="DisplayLabel" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="yes" msgtype="response" name="FeatureStatV2ResMessage" opcode="0x0146" request="0x0034" type="RegistrationAndManagement">
+ <fields alignment="4" beginversion="0" endversion="22">
+ <integer comment="" name="featureIndex" type="uint32"/>
+ <enum comment="" name="featureID" subtype="ButtonType" type="uint32"/>
+ <integer comment="" name="featureStatus" type="uint32"/>
+ <string comment="" name="featureTextLabel" size="121" type="char"/>
+ </fields>
+ </message>
+ <bitfield name="LineTypeBits">
+ <entries>
+ <entry comment="" name="OrigDialed" text="Originally Dialed" value="0x00000001"/>
+ <entry comment="" name="RedirDialed" text="Redirected Dialed" value="0x00000002"/>
+ <entry comment="" name="CallingPartyNumber" text="CallingNum" value="0x00000004"/>
+ <entry comment="" name="CallingPartyName" text="CallingName" value="0x00000008"/>
+ </entries>
+ </bitfield>
+ <message comment="" direction="pbx2dev" dynamic="yes" msgtype="response" name="LineStatV2ResMessage" opcode="0x0147" request="0x000b" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="lineNumber" req_resp_key="1" type="uint32"/>
+ <bitfield comment="" name="lineType" size="uint32" subtype="LineTypeBits" type="bitfield">
+ <entries>
+ <entry comment="" name="OrigDialed" text="Originally Dialed" value="0x00000001"/>
+ <entry comment="" name="RedirDialed" text="Redirected Dialed" value="0x00000002"/>
+ <entry comment="" name="CallingPartyNumber" text="CallingNum" value="0x00000004"/>
+ <entry comment="" name="CallingPartyName" text="CallingName" value="0x00000008"/>
+ </entries>
+ </bitfield>
+ <string comment="" name="lineDirNumber" size="25" type="char"/>
+ <string comment="" name="lineFullyQualifiedDisplayName" size="40" type="char"/>
+ <string comment="" name="lineTextLabel" size="40" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="yes" msgtype="response" name="ServiceURLStatV2ResMessage" opcode="0x0148" request="0x0033" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="serviceURLIndex" req_resp_key="1" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="yes" msgtype="response" name="SpeedDialStatV2ResMessage" opcode="0x0149" request="0x000a" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="speedDialNumber" req_resp_key="1" type="uint32"/>
+ <string comment="" name="speedDialDirNumber" size="24" type="char"/>
+ <string comment="" name="speedDialDisplayName" size="40" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="yes" msgtype="event" name="CallInfoV2Message" opcode="0x014a" type="CallControl">
+ <fields alignment="2">
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <enum comment="" name="callType" subtype="CallType" type="uint32"/>
+ <integer comment="Original Called Party Redirect Reason" name="originalCdpnRedirectReason" type="uint32"/>
+ <integer comment="Last Redirecting Reason" name="lastRedirectingReason" type="uint32"/>
+ <integer comment="CallId" name="callInstance" type="uint32"/>
+ <enum comment="" name="callSecurityStatus" subtype="CallSecurityStatusType" type="uint32"/>
+ <bitfield comment="" name="partyPIRestrictionBits" size="uint32" subtype="RestrictInformationType" type="bitfield">
+ <entries>
+ <entry comment="" name="RestrictInformationType_CallingPartyName" text="CallingPartyName" value="0x0001"/>
+ <entry comment="" name="RestrictInformationType_CallingPartyNumber" text="CallingPartyNumber" value="0x0002"/>
+ <entry comment="" name="RestrictInformationType_CallingParty" text="CallingParty" value="0x0003"/>
+ <entry comment="" name="RestrictInformationType_CalledPartyName" text="CalledPartyName" value="0x0004"/>
+ <entry comment="" name="RestrictInformationType_CalledPartyNumber" text="CalledPartyNumber" value="0x0008"/>
+ <entry comment="" name="RestrictInformationType_CalledParty" text="CalledParty" value="0x000c"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledPartyName" text="OriginalCalledPartyName" value="0x0010"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledPartyNumber" text="OriginalCalledPartyNumber" value="0x0020"/>
+ <entry comment="" name="RestrictInformationType_OriginalCalledParty" text="OriginalCalledParty" value="0x0030"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectPartyName" text="LastRedirectPartyName" value="0x0040"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectPartyNumber" text="LastRedirectPartyNumber" value="0x0080"/>
+ <entry comment="" name="RestrictInformationType_LastRedirectParty" text="LastRedirectParty" value="0x00c0"/>
+ <entry comment="" name="RestrictInformationType_BitsReserved" text="BitsReserved" value="0xffffff00"/>
+ </entries>
+ </bitfield>
+ <string comment="Calling Party Number" name="callingParty" size="VariableDirnumSize" type="char"/>
+ <string comment="Alternate Calling Party Number" name="AlternateCallingParty" size="VariableDirnumSize" type="char"/>
+ <string comment="CalledPartyNumber" name="calledParty" size="VariableDirnumSize" type="char"/>
+ <string comment="Original Called Party Number" name="originalCalledParty" size="VariableDirnumSize" type="char"/>
+ <string comment="Last Redirecting Party Number" name="lastRedirectingParty" size="VariableDirnumSize" type="char"/>
+ <string comment="Calling Party Voicemail Box Number" name="cgpnVoiceMailbox" size="VariableDirnumSize" type="char"/>
+ <string comment="Called Party Voicemail Box Number" name="cdpnVoiceMailbox" size="VariableDirnumSize" type="char"/>
+ <string comment="Original Called Party Voicemail Box Number" name="originalCdpnVoiceMailbox" size="VariableDirnumSize" type="char"/>
+ <string comment="Last Redirecting Parties Voicemail Box Number" name="lastRedirectingVoiceMailbox" size="VariableDirnumSize" type="char"/>
+ <string comment="Calling Party Name" name="callingPartyName" size="121" type="char"/>
+ <string comment="Called Party Name" name="calledPartyName" size="121" type="char"/>
+ <string comment="Original Called Party Name" name="originalCalledPartyName" size="121" type="char"/>
+ <string comment="Last Redirecting Party Name" name="lastRedirectingPartyName" size="121" type="char"/>
+ </fields>
+ <fields beginversion="17" endversion="22">
+ <string comment="" name="HuntPilotNumber" size="VariableDirnumSize" type="char"/>
+ <string comment="" name="HuntPilotName" size="121" type="char"/>
+ </fields>
+ <fields>
+ <code type="calling_and_called_party" use_param="callingParty,calledParty"/>
+ </fields>
+ </message>
+ <enum name="MediaTransportType">
+ <entries>
+ <entry comment="" name="MediaTransportType_RTP" text="RTP" value="0x0001"/>
+ <entry comment="" name="MediaTransportType_UDP" text="UDP" value="0x0002"/>
+ <entry comment="" name="MediaTransportType_TCP" text="TCP" value="0x0003"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="request" name="PortReqMessage" opcode="0x014b" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <enum comment="" name="mediaTransportType" subtype="MediaTransportType" type="uint32"/>
+ <enum comment="" name="ipAddressType" subtype="IpAddrType" type="uint32"/>
+ <enum comment="" name="mediaType" subtype="MediaType" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="PortCloseMessage" opcode="0x014c" type="MediaControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <enum comment="" name="mediaType" subtype="MediaType" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="ResvStyle">
+ <entries>
+ <entry comment="" name="ResvStyle_FF" text="FF" value="0x0001"/>
+ <entry comment="" name="ResvStyle_SE" text="SE" value="0x0002"/>
+ <entry comment="" name="ResvStyle_WF" text="WF" value="0x0003"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="event" name="QoSListenMessage" opcode="0x014d" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endianness="big" name="remoteIpAddr" subtype="uint32" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="remotePortNumber" subtype="uint32" type="ipport" use_param="remoteIpAddr" make_additional_info="yes"/>
+ <enum comment="" name="resvStyle" subtype="ResvStyle" type="uint32"/>
+ <integer comment="" name="maxRetryNumber" type="int32"/>
+ <integer comment="" name="retryTimer" type="uint32"/>
+ <integer comment="" name="confirmRequired" type="uint32"/>
+ <integer comment="" name="preemptionPriority" type="uint32"/>
+ <integer comment="" name="defendingPriority" type="uint32"/>
+ <enum comment="" name="compressionType" subtype="Media_PayloadType" type="uint32"/>
+ <integer comment="" name="averageBitRate" type="uint32"/>
+ <integer comment="" name="burstSize" type="uint32"/>
+ <integer comment="" name="peakRate" type="uint32"/>
+ <struct comment="" name="applicationID" subtype="RSVPApplicationID" type="struct">
+ <fields>
+ <string comment="" name="vendorID" size="32" type="char"/>
+ <string comment="" name="version" size="16" type="char"/>
+ <string comment="" name="appName" size="32" type="char"/>
+ <string comment="" name="subAppID" size="32" type="char"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="event" name="QoSPathMessage" opcode="0x014e" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endianness="big" name="remoteIpAddr" subtype="uint32" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="remotePortNumber" subtype="uint32" type="ipport" use_param="remoteIpAddr" make_additional_info="yes"/>
+ <enum comment="" name="resvStyle" subtype="ResvStyle" type="uint32"/>
+ <integer comment="" name="maxRetryNumber" type="int32"/>
+ <integer comment="" name="retryTimer" type="uint32"/>
+ <integer comment="" name="preemptionPriority" type="uint32"/>
+ <integer comment="" name="defendingPriority" type="uint32"/>
+ <enum comment="" name="compressionType" subtype="Media_PayloadType" type="uint32"/>
+ <integer comment="" name="averageBitRate" type="uint32"/>
+ <integer comment="" name="burstSize" type="uint32"/>
+ <integer comment="" name="peakRate" type="uint32"/>
+ <struct comment="" name="applicationID" subtype="RSVPApplicationID" type="struct">
+ <fields>
+ <string comment="" name="vendorID" size="32" type="char"/>
+ <string comment="" name="version" size="16" type="char"/>
+ <string comment="" name="appName" size="32" type="char"/>
+ <string comment="" name="subAppID" size="32" type="char"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="event" name="QoSTeardownMessage" opcode="0x014f" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endianness="big" name="remoteIpAddr" subtype="uint32" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="remotePortNumber" subtype="uint32" type="ipport" use_param="remoteIpAddr" make_additional_info="yes"/>
+ <enum comment="" name="direction" subtype="RSVPDirection" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="event" name="UpdateDSCPMessage" opcode="0x0150" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endianness="big" name="remoteIpAddr" subtype="uint32" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="remotePortNumber" subtype="uint32" type="ipport" use_param="remoteIpAddr" make_additional_info="yes"/>
+ <integer comment="" name="DSCPValue" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="event" name="QoSModifyMessage" opcode="0x0151" type="IntraCCM">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <integer comment="PassThrough PartyId" name="passThroughPartyId" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endianness="big" name="remoteIpAddr" subtype="uint32" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="" name="remotePortNumber" subtype="uint32" type="ipport" use_param="remoteIpAddr" make_additional_info="yes"/>
+ <enum comment="" name="direction" subtype="RSVPDirection" type="uint32"/>
+ <enum comment="" name="compressionType" subtype="Media_PayloadType" type="uint32"/>
+ <integer comment="" name="averageBitRate" type="uint32"/>
+ <integer comment="" name="burstSize" type="uint32"/>
+ <integer comment="" name="peakRate" type="uint32"/>
+ <struct comment="" name="applicationID" subtype="RSVPApplicationID" type="struct">
+ <fields>
+ <string comment="" name="vendorID" size="32" type="char"/>
+ <string comment="" name="version" size="16" type="char"/>
+ <string comment="" name="appName" size="32" type="char"/>
+ <string comment="" name="subAppID" size="32" type="char"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <enum name="SubscribeCause">
+ <entries>
+ <entry comment="" name="SubscribeCause_OK" text="OK" value="0x0000"/>
+ <entry comment="" name="SubscribeCause_RouteFail" text="RouteFail" value="0x0001"/>
+ <entry comment="" name="SubscribeCause_AuthFail" text="AuthFail" value="0x0002"/>
+ <entry comment="" name="SubscribeCause_Timeout" text="Timeout" value="0x0003"/>
+ <entry comment="" name="SubscribeCause_TrunkTerm" text="TrunkTerm" value="0x0004"/>
+ <entry comment="" name="SubscribeCause_TrunkForbidden" text="TrunkForbidden" value="0x0005"/>
+ <entry comment="" name="SubscribeCause_Throttle" text="Throttle" value="0x0006"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="SubscriptionStatResMessage" opcode="0x0152" request="0x0048" status="result" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" declare="yes" name="transactionId" req_resp_key="1" type="uint32"/>
+ <enum comment="" name="subscriptionFeatureID" subtype="SubscriptionFeatureID" type="uint32"/>
+ <integer comment="" name="timer" type="uint32"/>
+ <enum comment="" name="cause" subtype="SubscribeCause" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="NotificationMessage" opcode="0x0153" type="RegistrationAndManagement">
+ <fields alignment="4" beginversion="0" endversion="22">
+ <integer comment="" name="transactionId" type="uint32"/>
+ <enum comment="" name="subscriptionFeatureID" subtype="SubscriptionFeatureID" type="uint32"/>
+ <integer comment="" name="notificationStatus" type="uint32"/>
+ <string comment="" name="text" size="97" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="StartMediaTransmissionAckMessage" opcode="0x0154" request="0x008a" type="MediaControl">
+ <fields>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" declare="yes" name="passThroughPartyId" req_resp_key="1" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endiannes="big" endianness="big" name="transmitIpAddr" subtype="IpAddress" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="transmit Port" declare="yes" name="portNumber" subtype="uint32" type="ipport" use_param="transmitIpAddr" make_additional_info="yes"/>
+ <enum comment="" name="mediaTransmissionStatus" subtype="MediaStatus" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="StartMultiMediaTransmissionAckMessage" opcode="0x0155" request="0x0132" type="MediaControl">
+ <fields>
+ <integer comment="Conference ID" name="conferenceId" type="uint32"/>
+ <integer comment="PassThrough PartyId" declare="yes" name="passThroughPartyId" req_resp_key="1" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <ipv4or6 comment="ipaddress in big endian" endiannes="big" endianness="big" name="transmitIpAddr" subtype="IpAddress" type="ipaddr" make_additional_info="yes"/>
+ <integer comment="transmit Port" declare="yes" name="portNumber" subtype="uint32" type="ipport" use_param="transmitIpAddr" make_additional_info="yes"/>
+ <enum comment="" name="multimediaTransmissionStatus" subtype="MediaStatus" type="uint32"/>
+ </fields>
+ </message>
+ <enum name="CallHistoryDisposition">
+ <entries>
+ <entry comment="" name="CallHistoryDisposition_UnknownDisp" text="UnknownDisp" value="0xffff"/>
+ <entry comment="" name="CallHistoryDisposition_Ignore" text="Ignore" value="0x0000"/>
+ <entry comment="" name="CallHistoryDisposition_PlacedCalls" text="PlacedCalls" value="0x0001"/>
+ <entry comment="" name="CallHistoryDisposition_ReceivedCalls" text="ReceivedCalls" value="0x0002"/>
+ <entry comment="" name="CallHistoryDisposition_MissedCalls" text="MissedCalls" value="0x0003"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="CallHistoryInfoMessage" opcode="0x0156" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <enum comment="" name="callHistoryDisposition" subtype="CallHistoryDisposition" type="uint32"/>
+ <integer comment="LineId" name="lineInstance" type="uint32"/>
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="Sent by wifi devices, contains xml information about connected SSID" direction="pbx2dev" dynamic="no" msgtype="event" name="LocationInfoMessage" opcode="0x0157" type="RegistrationAndManagement">
+ <fields alignment="4" beginversion="0" endversion="22">
+ <string comment="" name="locationInfo" size="2401" type="char"/>
+ </fields>
+ </message>
+ <enum name="MwiNotificationResult">
+ <entries>
+ <entry comment="" name="MwiNotificationResult_Ok" text="Ok" value="0x0000"/>
+ <entry comment="" name="MwiNotificationResult_GeneralError" text="GeneralError" value="0x0001"/>
+ <entry comment="" name="MwiNotificationResult_RequestRejected" text="RequestRejected" value="0x0002"/>
+ <entry comment="" name="MwiNotificationResult_VmCountOutOfBounds" text="VmCountOutOfBounds" value="0x0003"/>
+ <entry comment="" name="MwiNotificationResult_FaxCountOutOfBounds" text="FaxCountOutOfBounds" value="0x0004"/>
+ <entry comment="" name="MwiNotificationResult_InvalidPriorityVmCount" text="InvalidPriorityVmCount" value="0x0005"/>
+ <entry comment="" name="MwiNotificationResult_InvalidPriorityFaxCount" text="InvalidPriorityFaxCount" value="0x0006"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="response" name="MwiResMessage" opcode="0x0158" request="0x004c" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <string comment="" name="mwiTargetNumber" size="25" type="char"/>
+ <enum comment="" name="mwi_notification_result" subtype="MwiNotificationResult" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="event" name="AddOnDeviceCapabilitiesMessage" opcode="0x0159" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer name="unknown1_0159" type="uint32"/>
+ <integer name="unknown2_0159" type="uint32"/>
+ <integer name="unknown3_0159" type="uint32"/>
+ <string name="unknownString_0159" size="152" type="char"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="EnhancedAlarmMessage" opcode="0x015a" type="RegistrationAndManagement">
+ <fields alignment="4" beginversion="0" endversion="22">
+ <xml comment="" name="alarmInfo" size="2048" type="xml"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="request" name="CallCountReqMessage" opcode="0x015e" type="CallControl"/>
+ <message comment="" direction="pbx2pbx" dynamic="no" msgtype="response" name="CallCountRespMessage" opcode="0x015f" request="0x015e" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="Total Number of Configured Lines" name="totalNumOfConfiguredLines" type="uint32"/>
+ <integer comment="Starting Line Instance" name="startingLineInstance" type="uint32"/>
+ <integer comment="Number of Line Data Entries" declare="yes" name="lineDataEntries" type="uint32"/>
+ <struct comment="" maxsize="42" name="lineData" size_fieldname="lineDataEntries" subtype="LineData" type="struct">
+ <fields>
+ <integer comment="" name="maxNumCalls" type="uint16"/>
+ <integer comment="" name="busyTrigger" type="uint16"/>
+ </fields>
+ </struct>
+ </fields>
+ </message>
+ <enum name="RecordingStatus">
+ <entries>
+ <entry comment="" name="RecordingStatus__OFF" text="_OFF" value="0x0000"/>
+ <entry comment="" name="RecordingStatus__ON" text="_ON" value="0x0001"/>
+ </entries>
+ </enum>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="event" name="RecordingStatusMessage" opcode="0x0160" type="CallControl">
+ <fields beginversion="0" endversion="22">
+ <integer comment="CallId" name="callReference" type="uint32"/>
+ <enum comment="" name="recording_status" subtype="RecordingStatus" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="dev2pbx" dynamic="no" msgtype="request" name="SPCPRegisterTokenReq" opcode="0x8000" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <struct comment="" name="sid" type="struct">
+ <fields>
+ <string comment="Device Name" name="DeviceName" size="16" type="char"/>
+ <integer comment="User Id" name="reserved_for_future_use" type="uint32"/>
+ <integer comment="Device Instance" name="instance" type="uint32"/>
+ </fields>
+ </struct>
+ <integer comment="" name="stationIpAddr" type="uint32"/>
+ <enum comment="" name="deviceType" subtype="DeviceType" type="uint32"/>
+ <integer comment="" name="maxStreams" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="SPCPRegisterTokenAck" opcode="0x8100" request="0x8000" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="features" type="uint32"/>
+ </fields>
+ </message>
+ <message comment="" direction="pbx2dev" dynamic="no" msgtype="response" name="SPCPRegisterTokenReject" opcode="0x8101" request="0x8000" type="RegistrationAndManagement">
+ <fields beginversion="0" endversion="22">
+ <integer comment="" name="waitTimeBeforeNextReq" type="uint32"/>
+ </fields>
+ </message>
+</messages>
diff --git a/tools/WiresharkXML.py b/tools/WiresharkXML.py
new file mode 100755
index 0000000..02d2cad
--- /dev/null
+++ b/tools/WiresharkXML.py
@@ -0,0 +1,312 @@
+"""
+Routines for reading PDML produced from TShark.
+
+Copyright (c) 2003, 2013 by Gilbert Ramirez <gram@alumni.rice.edu>
+
+SPDX-License-Identifier: GPL-2.0-or-later
+"""
+
+import sys
+import xml.sax
+from xml.sax.saxutils import quoteattr
+import cStringIO as StringIO
+
+class CaptureFile:
+ pass
+
+class FoundItException(Exception):
+ """Used internally for exiting a tree search"""
+ pass
+
+class PacketList:
+ """Holds Packet objects, and has methods for finding
+ items within it."""
+
+ def __init__(self, children=None):
+ if children is None:
+ self.children = []
+ else:
+ self.children = children
+
+ def __getitem__(self, index):
+ """We act like a list."""
+ return self.children[index]
+
+ def __len__(self):
+ return len(self.children)
+
+ def item_exists(self, name):
+ """Does an item with name 'name' exist in this
+ PacketList? Returns True or False."""
+ for child in self.children:
+ if child.name == name:
+ return True
+
+ try:
+ for child in self.children:
+ child._item_exists(name)
+
+ except FoundItException:
+ return True
+
+ return False
+
+ def _item_exists(self, name):
+ for child in self.children:
+ if child.name == name:
+ raise FoundItException
+ child._item_exists(name)
+
+
+ def get_items(self, name, items=None):
+ """Return all items that match the name 'name'.
+ They are returned in order of a depth-first-search."""
+ if items is None:
+ top_level = 1
+ items = []
+ else:
+ top_level = 0
+
+ for child in self.children:
+ if child.name == name:
+ items.append(child)
+ child.get_items(name, items)
+
+ if top_level:
+ return PacketList(items)
+
+ def get_items_before(self, name, before_item, items=None):
+ """Return all items that match the name 'name' that
+ exist before the before_item. The before_item is an object.
+ They results are returned in order of a depth-first-search.
+ This function allows you to find fields from protocols that occur
+ before other protocols. For example, if you have an HTTP
+ protocol, you can find all tcp.dstport fields *before* that HTTP
+ protocol. This helps analyze in the presence of tunneled protocols."""
+ if items is None:
+ top_level = 1
+ items = []
+ else:
+ top_level = 0
+
+ for child in self.children:
+ if top_level == 1 and child == before_item:
+ break
+ if child.name == name:
+ items.append(child)
+ # Call get_items because the 'before_item' applies
+ # only to the top level search.
+ child.get_items(name, items)
+
+ if top_level:
+ return PacketList(items)
+
+
+class ProtoTreeItem(PacketList):
+ def __init__(self, xmlattrs):
+ PacketList.__init__(self)
+
+ self.name = xmlattrs.get("name", "")
+ self.showname = xmlattrs.get("showname", "")
+ self.pos = xmlattrs.get("pos", "")
+ self.size = xmlattrs.get("size", "")
+ self.value = xmlattrs.get("value", "")
+ self.show = xmlattrs.get("show", "")
+ self.hide = xmlattrs.get("hide", "")
+
+ def add_child(self, child):
+ self.children.append(child)
+
+ def get_name(self):
+ return self.name
+
+ def get_showname(self):
+ return self.showname
+
+ def get_pos(self):
+ return self.pos
+
+ def get_size(self):
+ return self.size
+
+ def get_value(self):
+ return self.value
+
+ def get_show(self):
+ return self.show
+
+ def get_hide(self):
+ return self.hide
+
+ def dump(self, fh=sys.stdout):
+ if self.name:
+ print >> fh, " name=%s" % (quoteattr(self.name),),
+
+ if self.showname:
+ print >> fh, "showname=%s" % (quoteattr(self.showname),),
+
+ if self.pos:
+ print >> fh, "pos=%s" % (quoteattr(self.pos),),
+
+ if self.size:
+ print >> fh, "size=%s" % (quoteattr(self.size),),
+
+ if self.value:
+ print >> fh, "value=%s" % (quoteattr(self.value),),
+
+ if self.show:
+ print >> fh, "show=%s" % (quoteattr(self.show),),
+
+ if self.hide:
+ print >> fh, "hide=%s" % (quoteattr(self.hide),),
+
+class Packet(ProtoTreeItem, PacketList):
+ def dump(self, fh=sys.stdout, indent=0):
+ print >> fh, " " * indent, "<packet>"
+ indent += 1
+ for child in self.children:
+ child.dump(fh, indent)
+ print >> fh, " " * indent, "</packet>"
+
+
+class Protocol(ProtoTreeItem):
+
+ def dump(self, fh=sys.stdout, indent=0):
+ print >> fh, "%s<proto " % (" " * indent,),
+
+ ProtoTreeItem.dump(self, fh)
+
+ print >> fh, '>'
+
+ indent += 1
+ for child in self.children:
+ child.dump(fh, indent)
+ print >> fh, " " * indent, "</proto>"
+
+
+class Field(ProtoTreeItem):
+
+ def dump(self, fh=sys.stdout, indent=0):
+ print >> fh, "%s<field " % (" " * indent,),
+
+ ProtoTreeItem.dump(self, fh)
+
+ if self.children:
+ print >> fh, ">"
+ indent += 1
+ for child in self.children:
+ child.dump(fh, indent)
+ print >> fh, " " * indent, "</field>"
+
+ else:
+ print >> fh, "/>"
+
+
+class ParseXML(xml.sax.handler.ContentHandler):
+
+ ELEMENT_FILE = "pdml"
+ ELEMENT_FRAME = "packet"
+ ELEMENT_PROTOCOL = "proto"
+ ELEMENT_FIELD = "field"
+
+ def __init__(self, cb):
+ self.cb = cb
+ self.chars = ""
+ self.element_stack = []
+
+ def startElement(self, name, xmlattrs):
+ self.chars = ""
+
+ if name == self.ELEMENT_FILE:
+ # Eventually, we should check version number of pdml here
+ elem = CaptureFile()
+
+ elif name == self.ELEMENT_FRAME:
+ elem = Packet(xmlattrs)
+
+ elif name == self.ELEMENT_PROTOCOL:
+ elem = Protocol(xmlattrs)
+
+ elif name == self.ELEMENT_FIELD:
+ elem = Field(xmlattrs)
+
+ else:
+ sys.exit("Unknown element: %s" % (name,))
+
+ self.element_stack.append(elem)
+
+
+ def endElement(self, name):
+ elem = self.element_stack.pop()
+
+# if isinstance(elem, Field):
+# if elem.get_name() == "frame.number":
+# print >> sys.stderr, "Packet:", elem.get_show()
+
+ # Add element as child to previous element as long
+ # as there is more than 1 element in the stack. Only
+ # one element in the stack means that the element in
+ # the stack is the single CaptureFile element, and we don't
+ # want to add this element to that, as we only want one
+ # Packet element in memory at a time.
+ if len(self.element_stack) > 1:
+ parent_elem = self.element_stack[-1]
+ parent_elem.add_child(elem)
+
+ self.chars = ""
+
+ # If we just finished a Packet element, hand it to the
+ # user's callback.
+ if isinstance(elem, Packet):
+ self.cb(elem)
+
+ def characters(self, chars):
+ self.chars = self.chars + chars
+
+
+def _create_parser(cb):
+ """Internal function for setting up the SAX parser."""
+
+ # Create a parser
+ parser = xml.sax.make_parser()
+
+ # Create the handler
+ handler = ParseXML(cb)
+
+ # Tell the parser to use our handler
+ parser.setContentHandler(handler)
+
+ # Don't fetch the DTD, in case it is listed
+ parser.setFeature(xml.sax.handler.feature_external_ges, False)
+
+ return parser
+
+def parse_fh(fh, cb):
+ """Parse a PDML file, given filehandle, and call the callback function (cb),
+ once for each Packet object."""
+
+ parser = _create_parser(cb)
+
+ # Parse the file
+ parser.parse(fh)
+
+ # Close the parser ; this is erroring out, but I'm not sure why.
+ #parser.close()
+
+def parse_string(text, cb):
+ """Parse the PDML contained in a string."""
+ stream = StringIO.StringIO(text)
+ parse_fh(stream, cb)
+
+def _test():
+ import sys
+
+ def test_cb(obj):
+ pass
+
+ filename = sys.argv[1]
+ fh = open(filename, "r")
+ parse_fh(fh, test_cb)
+
+if __name__ == '__main__':
+ _test()
diff --git a/tools/alpine-setup.sh b/tools/alpine-setup.sh
new file mode 100755
index 0000000..4622035
--- /dev/null
+++ b/tools/alpine-setup.sh
@@ -0,0 +1,129 @@
+#!/bin/bash
+# Setup development environment on alpine systems
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# We drag in tools that might not be needed by all users; it's easier
+# that way.
+#
+
+set -e -u -o pipefail
+
+function print_usage() {
+ printf "\\nUtility to setup a alpine system for Wireshark Development.\\n"
+ printf "The basic usage installs the needed software\\n\\n"
+ printf "Usage: %s [--install-optional] [...other options...]\\n" "$0"
+ printf "\\t--install-optional: install optional software as well\\n"
+ printf "\\t--install-all: install everything\\n"
+ printf "\\t[other]: other options are passed as-is to apk\\n"
+}
+
+ADDITIONAL=0
+OPTIONS=
+for arg; do
+ case $arg in
+ --help)
+ print_usage
+ exit 0
+ ;;
+ --install-optional)
+ ADDITIONAL=1
+ ;;
+ --install-all)
+ ADDITIONAL=1
+ ;;
+ *)
+ OPTIONS="$OPTIONS $arg"
+ ;;
+ esac
+done
+
+# Check if the user is root
+if [ "$(id -u)" -ne 0 ]
+then
+ echo "You must be root."
+ exit 1
+fi
+
+BASIC_LIST="
+ cmake
+ ninja
+ gcc
+ g++
+ glib-dev
+ libgcrypt-dev
+ flex
+ tiff-dev
+ c-ares-dev
+ pcre2-dev
+ qt5-qtbase-dev
+ qt5-qttools-dev
+ qt5-qtmultimedia-dev
+ qt5-qtsvg-dev
+ speexdsp-dev
+ python3
+ "
+
+ADDITIONAL_LIST="
+ git
+ asciidoctor
+ libssh-dev
+ spandsp-dev
+ libcap-dev
+ libpcap-dev
+ libxml2-dev
+ libmaxminddb-dev
+ krb5-dev
+ lz4-dev
+ gnutls-dev
+ snappy-dev
+ nghttp2-dev
+ nghttp3-dev
+ lua5.2-dev
+ libnl3-dev
+ sbc-dev
+ minizip-dev
+ brotli-dev
+ perl
+ py3-pytest
+ py3-pytest-xdist
+ "
+
+# Uncomment to add PNG compression utilities used by compress-pngs:
+# ADDITIONAL_LIST="$ADDITIONAL_LIST \
+# advancecomp \
+# optipng \
+# oxipng \
+# pngcrush"
+
+# Adds package $2 to list variable $1 if the package is found.
+# If $3 is given, then this version requirement must be satisfied.
+add_package() {
+ local list="$1" pkgname="$2"
+
+ # fail if the package is not known
+ apk list $pkgname &> /dev/null || return 1
+
+ # package is found, append it to list
+ eval "${list}=\"\${${list}} \${pkgname}\""
+}
+
+ACTUAL_LIST=$BASIC_LIST
+
+# Now arrange for optional support libraries
+if [ $ADDITIONAL -ne 0 ]
+then
+ ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST"
+fi
+
+apk update || exit 2
+apk add $ACTUAL_LIST $OPTIONS || exit 2
+
+if [ $ADDITIONAL -eq 0 ]
+then
+ printf "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n"
+fi
diff --git a/tools/arch-setup.sh b/tools/arch-setup.sh
new file mode 100755
index 0000000..1443c52
--- /dev/null
+++ b/tools/arch-setup.sh
@@ -0,0 +1,136 @@
+#!/bin/bash
+# Setup development environment on Arch Linux
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# We drag in tools that might not be needed by all users; it's easier
+# that way.
+#
+
+set -e -u -o pipefail
+
+function print_usage() {
+ printf "\\nUtility to setup a pacman-based system for Wireshark development.\\n"
+ printf "The basic usage installs the needed software\\n\\n"
+ printf "Usage: %s [--install-optional] [...other options...]\\n" "$0"
+ printf "\\t--install-optional: install optional software as well\\n"
+ printf "\\t--install-test-deps: install packages required to run all tests\\n"
+ printf "\\t--install-all: install everything\\n"
+ printf "\\t[other]: other options are passed as-is to pacman\\n"
+ printf "\\tPass --noconfirm to bypass any \"are you sure?\" messages.\\n"
+}
+
+ADDITIONAL=0
+TESTDEPS=0
+AUR=0
+OPTIONS=
+for arg; do
+ case $arg in
+ --help)
+ print_usage
+ exit 0
+ ;;
+ --install-optional)
+ ADDITIONAL=1
+ ;;
+ --install-test-deps)
+ TESTDEPS=1
+ ;;
+ --install-all)
+ ADDITIONAL=1
+ TESTDEPS=1
+ AUR=1
+ ;;
+ *)
+ OPTIONS="$OPTIONS $arg"
+ ;;
+ esac
+done
+
+# Check if the user is root
+if [ "$(id -u)" -ne 0 ]
+then
+ echo "You must be root."
+ exit 1
+fi
+
+BASIC_LIST="base-devel \
+ bcg729 \
+ brotli \
+ c-ares \
+ cmake \
+ git \
+ glib2 \
+ gnutls \
+ krb5 \
+ libcap \
+ libgcrypt \
+ libilbc \
+ libmaxminddb \
+ libnghttp2 \
+ libnghttp3 \
+ libnl \
+ libpcap \
+ libssh \
+ libxml2 \
+ lua52 \
+ lz4 \
+ minizip \
+ ninja \
+ pcre2 \
+ python \
+ qt6-base \
+ qt6-multimedia \
+ qt6-tools \
+ qt6-5compat \
+ sbc \
+ snappy \
+ spandsp \
+ speexdsp \
+ zlib \
+ zstd"
+
+ADDITIONAL_LIST="asciidoctor \
+ ccache \
+ docbook-xml \
+ docbook-xsl \
+ doxygen \
+ libxslt \
+ perl"
+
+TESTDEPS_LIST="python-pytest \
+ python-pytest-xdist"
+
+ACTUAL_LIST=$BASIC_LIST
+
+if [ $ADDITIONAL -ne 0 ]
+then
+ ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST"
+fi
+
+if [ $TESTDEPS -ne 0 ]
+then
+ ACTUAL_LIST="$ACTUAL_LIST $TESTDEPS_LIST"
+fi
+
+# Partial upgrades are unsupported.
+pacman --sync --refresh --sysupgrade --needed $ACTUAL_LIST $OPTIONS || exit 2
+
+if [ $ADDITIONAL -eq 0 ]
+then
+ printf "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n"
+fi
+
+if [ $TESTDEPS -eq 0 ]
+then
+ printf "\n*** Test deps not installed. Rerun with --install-test-deps to have them.\n"
+fi
+
+if [ $AUR -ne 0 ]
+then
+ printf "\n*** These and other packages may also be found in the AUR: libsmi.\n"
+fi
diff --git a/tools/asn2deb b/tools/asn2deb
new file mode 100755
index 0000000..926d34e
--- /dev/null
+++ b/tools/asn2deb
@@ -0,0 +1,179 @@
+#!/usr/bin/env python3
+
+# asn2deb - quick hack by W. Borgert <debacle@debian.org> to create
+# Debian GNU/Linux packages from ASN.1 files for Wireshark.
+# Copyright 2004, W. Borgert
+
+# ASN.1 module for Wireshark, use of snacc type table:
+# Copyright 2003, Matthijs Melchior <matthijs.melchior@xs4all.nl>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.com>
+# Copyright 1998 Gerald Combs
+
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import getopt, os, string, sys, time
+
+scriptinfo = """asn2deb version 2004-02-17
+Copyright 2004, W. Borgert
+Free software, released under the terms of the GPL."""
+
+options = {'asn': None,
+ 'dbopts': "",
+ 'email': "invalid@invalid.invalid",
+ 'help': 0,
+ 'name': "No Name",
+ 'preserve': 0,
+ 'version': 0}
+
+def create_file(filename, content, mode = None):
+ """Create a file with given content."""
+ global options
+ if options['preserve'] and os.path.isfile(filename):
+ return
+ f = open(filename, 'w')
+ f.write(content)
+ f.close()
+ if mode:
+ os.chmod(filename, mode)
+
+def create_files(version, deb, email, asn, name, iso, rfc):
+ """Create all files for the .deb build process."""
+ base = asn.lower()[:-5]
+
+ if not os.path.isdir("packaging/debian"):
+ os.mkdir("packaging/debian")
+
+ create_file("packaging/debian/rules", """#!/usr/bin/make -f
+
+include /usr/share/cdbs/1/rules/debhelper.mk
+include /usr/share/cdbs/1/class/autotools.mk
+
+PREFIX=`pwd`/packaging/debian/wireshark-asn1-%s
+
+binary-post-install/wireshark-asn1-%s::
+ rm -f $(PREFIX)/usr/lib/wireshark/plugins/%s/*.a
+""" % (base, base, version), 0o755)
+
+ create_file("packaging/debian/control", """Source: wireshark-asn1-%s
+Section: net
+Priority: optional
+Maintainer: %s <%s>
+Standards-Version: 3.6.1.0
+Build-Depends: snacc, autotools-dev, debhelper, cdbs
+
+Package: wireshark-asn1-%s
+Architecture: all
+Depends: wireshark (= %s)
+Description: ASN.1/BER dissector for %s
+ This package provides a type table for decoding BER (Basic Encoding
+ Rules) data over TCP or UDP, described by an ASN.1 (Abstract Syntax
+ Notation 1) file '%s.asn1'.
+""" % (base, name, email, base, deb, base, base))
+
+ create_file("packaging/debian/changelog",
+ """wireshark-asn1-%s (0.0.1-1) unstable; urgency=low
+
+ * Automatically created package.
+
+ -- %s <%s> %s
+""" % (base, name, email, rfc + "\n (" + iso + ")"))
+
+ create_file("packaging/debian/copyright",
+ """This package has been created automatically be asn2deb on
+%s for Debian GNU/Linux.
+
+Wireshark: https://www.wireshark.com/
+
+Copyright:
+
+GPL, as evidenced by existence of GPL license file \"COPYING\".
+(the GNU GPL may be viewed on Debian systems in
+/usr/share/common-licenses/GPL)
+""" % (iso))
+
+def get_wrs_version():
+ """Detect version of wireshark-dev package."""
+ deb = os.popen(
+ "dpkg-query -W --showformat='${Version}' wireshark-dev").read()
+ debv = string.find(deb, "-")
+ if debv == -1: debv = len(deb)
+ version = deb[string.find(deb, ":")+1:debv]
+ return version, deb
+
+def get_time():
+ """Detect current time and return ISO and RFC time string."""
+ currenttime = time.gmtime()
+ return time.strftime("%Y-%m-%d %H:%M:%S +0000", currenttime), \
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", currenttime)
+
+def main():
+ global options
+ process_opts(sys.argv)
+ iso, rfc = get_time()
+ version, deb = get_wrs_version()
+ create_files(version, deb,
+ options['email'], options['asn'], options['name'],
+ iso, rfc)
+ os.system("dpkg-buildpackage " + options['dbopts'])
+
+def process_opts(argv):
+ """Process command line options."""
+ global options
+ try:
+ opts, args = getopt.getopt(argv[1:], "a:d:e:hn:pv",
+ ["asn=",
+ "dbopts=",
+ "email=",
+ "help",
+ "name=",
+ "preserve",
+ "version"])
+ except getopt.GetoptError:
+ usage(argv[0])
+ sys.exit(1)
+ for o, a in opts:
+ if o in ("-a", "--asn"):
+ options['asn'] = a
+ if o in ("-d", "--dbopts"):
+ options['dbopts'] = a
+ if o in ("-e", "--email"):
+ options['email'] = a
+ if o in ("-h", "--help"):
+ options['help'] = 1
+ if o in ("-n", "--name"):
+ options['name'] = a
+ if o in ("-p", "--preserve"):
+ options['preserve'] = 1
+ if o in ("-v", "--version"):
+ options['version'] = 1
+ if options['help']:
+ usage(argv[0])
+ sys.exit(0)
+ if options['version']:
+ print(scriptinfo)
+ sys.exit(0)
+ if not options['asn']:
+ print("mandatory ASN.1 file parameter missing")
+ sys.exit(1)
+ if not os.access(options['asn'], os.R_OK):
+ print("ASN.1 file not accessible")
+ sys.exit(1)
+
+def usage(name):
+ """Print usage help."""
+ print("Usage: " + name + " <parameters>\n" + \
+ "Parameters are\n" + \
+ " --asn -a asn1file, ASN.1 file to use (mandatory)\n" + \
+ " --dbopts -d opts, options for dpkg-buildpackage\n" + \
+ " --email -e address, use e-mail address\n" + \
+ " --help -h, print help and exit\n" + \
+ " --name -n name, use user name\n" + \
+ " --preserve -p, do not overwrite files\n" + \
+ " --version -v, print version and exit\n" + \
+ "Example:\n" + \
+ name + " -e me@foo.net -a bar.asn1 -n \"My Name\" " + \
+ "-d \"-rfakeroot -uc -us\"")
+if __name__ == '__main__':
+ main()
diff --git a/tools/asn2wrs.py b/tools/asn2wrs.py
new file mode 100755
index 0000000..6669be8
--- /dev/null
+++ b/tools/asn2wrs.py
@@ -0,0 +1,8242 @@
+#!/usr/bin/env python3
+
+#
+# asn2wrs.py
+# ASN.1 to Wireshark dissector compiler
+# Copyright 2004 Tomas Kukosa
+#
+# SPDX-License-Identifier: MIT
+#
+
+"""ASN.1 to Wireshark dissector compiler"""
+
+#
+# Compiler from ASN.1 specification to the Wireshark dissector
+#
+# Based on ASN.1 to Python compiler from Aaron S. Lav's PyZ3950 package licensed under the X Consortium license
+# https://www.pobox.com/~asl2/software/PyZ3950/
+# (ASN.1 to Python compiler functionality is broken but not removed, it could be revived if necessary)
+#
+# It requires Dave Beazley's PLY parsing package licensed under the LGPL (tested with version 2.3)
+# https://www.dabeaz.com/ply/
+#
+#
+# ITU-T Recommendation X.680 (07/2002),
+# Information technology - Abstract Syntax Notation One (ASN.1): Specification of basic notation
+#
+# ITU-T Recommendation X.681 (07/2002),
+# Information technology - Abstract Syntax Notation One (ASN.1): Information object specification
+#
+# ITU-T Recommendation X.682 (07/2002),
+# Information technology - Abstract Syntax Notation One (ASN.1): Constraint specification
+#
+# ITU-T Recommendation X.683 (07/2002),
+# Information technology - Abstract Syntax Notation One (ASN.1): Parameterization of ASN.1 specifications
+#
+# ITU-T Recommendation X.880 (07/1994),
+# Information technology - Remote Operations: Concepts, model and notation
+#
+
+import warnings
+
+import re
+import sys
+import os
+import os.path
+import time
+import getopt
+import traceback
+
+try:
+ from ply import lex
+ from ply import yacc
+except ImportError:
+ # Fallback: use lex.py and yacc from the tools directory within the
+ # Wireshark source tree if python-ply is not installed.
+ import lex
+ import yacc
+
+if sys.version_info[0] < 3:
+ from string import maketrans
+
+
+# OID name -> number conversion table
+oid_names = {
+ '/itu-t' : 0,
+ '/itu' : 0,
+ '/ccitt' : 0,
+ '/itu-r' : 0,
+ '0/recommendation' : 0,
+ '0.0/a' : 1,
+ '0.0/b' : 2,
+ '0.0/c' : 3,
+ '0.0/d' : 4,
+ '0.0/e' : 5,
+ '0.0/f' : 6,
+ '0.0/g' : 7,
+ '0.0/h' : 8,
+ '0.0/i' : 9,
+ '0.0/j' : 10,
+ '0.0/k' : 11,
+ '0.0/l' : 12,
+ '0.0/m' : 13,
+ '0.0/n' : 14,
+ '0.0/o' : 15,
+ '0.0/p' : 16,
+ '0.0/q' : 17,
+ '0.0/r' : 18,
+ '0.0/s' : 19,
+ '0.0/t' : 20,
+ '0.0/tseries' : 20,
+ '0.0/u' : 21,
+ '0.0/v' : 22,
+ '0.0/w' : 23,
+ '0.0/x' : 24,
+ '0.0/y' : 25,
+ '0.0/z' : 26,
+ '0/question' : 1,
+ '0/administration' : 2,
+ '0/network-operator' : 3,
+ '0/identified-organization' : 4,
+ '0/r-recommendation' : 5,
+ '0/data' : 9,
+ '/iso' : 1,
+ '1/standard' : 0,
+ '1/registration-authority' : 1,
+ '1/member-body' : 2,
+ '1/identified-organization' : 3,
+ '/joint-iso-itu-t' : 2,
+ '/joint-iso-ccitt' : 2,
+ '2/presentation' : 0,
+ '2/asn1' : 1,
+ '2/association-control' : 2,
+ '2/reliable-transfer' : 3,
+ '2/remote-operations' : 4,
+ '2/ds' : 5,
+ '2/directory' : 5,
+ '2/mhs' : 6,
+ '2/mhs-motis' : 6,
+ '2/ccr' : 7,
+ '2/oda' : 8,
+ '2/ms' : 9,
+ '2/osi-management' : 9,
+ '2/transaction-processing' : 10,
+ '2/dor' : 11,
+ '2/distinguished-object-reference' : 11,
+ '2/reference-data-transfe' : 12,
+ '2/network-layer' : 13,
+ '2/network-layer-management' : 13,
+ '2/transport-layer' : 14,
+ '2/transport-layer-management' : 14,
+ '2/datalink-layer' : 15,
+ '2/datalink-layer-managemen' : 15,
+ '2/datalink-layer-management-information' : 15,
+ '2/country' : 16,
+ '2/registration-procedures' : 17,
+ '2/registration-procedure' : 17,
+ '2/physical-layer' : 18,
+ '2/physical-layer-management' : 18,
+ '2/mheg' : 19,
+ '2/genericULS' : 20,
+ '2/generic-upper-layers-security' : 20,
+ '2/guls' : 20,
+ '2/transport-layer-security-protocol' : 21,
+ '2/network-layer-security-protocol' : 22,
+ '2/international-organizations' : 23,
+ '2/internationalRA' : 23,
+ '2/sios' : 24,
+ '2/uuid' : 25,
+ '2/odp' : 26,
+ '2/upu' : 40,
+}
+
+ITEM_FIELD_NAME = '_item'
+UNTAG_TYPE_NAME = '_untag'
+
+def asn2c(id):
+ return id.replace('-', '_').replace('.', '_').replace('&', '_')
+
+input_file = None
+g_conform = None
+lexer = None
+in_oid = False
+
+class LexError(Exception):
+ def __init__(self, tok, filename=None):
+ self.tok = tok
+ self.filename = filename
+ self.msg = "Unexpected character %r" % (self.tok.value[0])
+ Exception.__init__(self, self.msg)
+ def __repr__(self):
+ return "%s:%d: %s" % (self.filename, self.tok.lineno, self.msg)
+ __str__ = __repr__
+
+
+class ParseError(Exception):
+ def __init__(self, tok, filename=None):
+ self.tok = tok
+ self.filename = filename
+ self.msg = "Unexpected token %s(%r)" % (self.tok.type, self.tok.value)
+ Exception.__init__(self, self.msg)
+ def __repr__(self):
+ return "%s:%d: %s" % (self.filename, self.tok.lineno, self.msg)
+ __str__ = __repr__
+
+
+class DuplicateError(Exception):
+ def __init__(self, type, ident):
+ self.type = type
+ self.ident = ident
+ self.msg = "Duplicate %s for %s" % (self.type, self.ident)
+ Exception.__init__(self, self.msg)
+ def __repr__(self):
+ return self.msg
+ __str__ = __repr__
+
+class CompError(Exception):
+ def __init__(self, msg):
+ self.msg = msg
+ Exception.__init__(self, self.msg)
+ def __repr__(self):
+ return self.msg
+ __str__ = __repr__
+
+
+states = (
+ ('braceignore','exclusive'),
+)
+
+precedence = (
+ ('left', 'UNION', 'BAR'),
+ ('left', 'INTERSECTION', 'CIRCUMFLEX'),
+)
+# 11 ASN.1 lexical items
+
+static_tokens = {
+ r'::=' : 'ASSIGNMENT', # 11.16 Assignment lexical item
+ r'\.\.' : 'RANGE', # 11.17 Range separator
+ r'\.\.\.' : 'ELLIPSIS', # 11.18 Ellipsis
+ r'\[\[' : 'LVERBRACK', # 11.19 Left version brackets
+ r'\]\]' : 'RVERBRACK', # 11.20 Right version brackets
+ # 11.26 Single character lexical items
+ r'\{' : 'LBRACE',
+ r'\}' : 'RBRACE',
+ r'<' : 'LT',
+ #r'>' : 'GT',
+ r',' : 'COMMA',
+ r'\.' : 'DOT',
+ r'\(' : 'LPAREN',
+ r'\)' : 'RPAREN',
+ r'\[' : 'LBRACK',
+ r'\]' : 'RBRACK',
+ r'-' : 'MINUS',
+ r':' : 'COLON',
+ #r'=' : 'EQ',
+ #r'"' : 'QUOTATION',
+ #r"'" : 'APOSTROPHE',
+ r';' : 'SEMICOLON',
+ r'@' : 'AT',
+ r'\!' : 'EXCLAMATION',
+ r'\^' : 'CIRCUMFLEX',
+ r'\&' : 'AMPERSAND',
+ r'\|' : 'BAR'
+}
+
+# 11.27 Reserved words
+
+# all keys in reserved_words must start w/ upper case
+reserved_words = {
+ 'ABSENT' : 'ABSENT',
+ 'ABSTRACT-SYNTAX' : 'ABSTRACT_SYNTAX',
+ 'ALL' : 'ALL',
+ 'APPLICATION' : 'APPLICATION',
+ 'AUTOMATIC' : 'AUTOMATIC',
+ 'BEGIN' : 'BEGIN',
+ 'BIT' : 'BIT',
+ 'BOOLEAN' : 'BOOLEAN',
+ 'BY' : 'BY',
+ 'CHARACTER' : 'CHARACTER',
+ 'CHOICE' : 'CHOICE',
+ 'CLASS' : 'CLASS',
+ 'COMPONENT' : 'COMPONENT',
+ 'COMPONENTS' : 'COMPONENTS',
+ 'CONSTRAINED' : 'CONSTRAINED',
+ 'CONTAINING' : 'CONTAINING',
+ 'DEFAULT' : 'DEFAULT',
+ 'DEFINITIONS' : 'DEFINITIONS',
+ 'EMBEDDED' : 'EMBEDDED',
+# 'ENCODED' : 'ENCODED',
+ 'END' : 'END',
+ 'ENUMERATED' : 'ENUMERATED',
+# 'EXCEPT' : 'EXCEPT',
+ 'EXPLICIT' : 'EXPLICIT',
+ 'EXPORTS' : 'EXPORTS',
+# 'EXTENSIBILITY' : 'EXTENSIBILITY',
+ 'EXTERNAL' : 'EXTERNAL',
+ 'FALSE' : 'FALSE',
+ 'FROM' : 'FROM',
+ 'GeneralizedTime' : 'GeneralizedTime',
+ 'IDENTIFIER' : 'IDENTIFIER',
+ 'IMPLICIT' : 'IMPLICIT',
+# 'IMPLIED' : 'IMPLIED',
+ 'IMPORTS' : 'IMPORTS',
+ 'INCLUDES' : 'INCLUDES',
+ 'INSTANCE' : 'INSTANCE',
+ 'INTEGER' : 'INTEGER',
+ 'INTERSECTION' : 'INTERSECTION',
+ 'MAX' : 'MAX',
+ 'MIN' : 'MIN',
+ 'MINUS-INFINITY' : 'MINUS_INFINITY',
+ 'NULL' : 'NULL',
+ 'OBJECT' : 'OBJECT',
+ 'ObjectDescriptor' : 'ObjectDescriptor',
+ 'OCTET' : 'OCTET',
+ 'OF' : 'OF',
+ 'OPTIONAL' : 'OPTIONAL',
+ 'PATTERN' : 'PATTERN',
+ 'PDV' : 'PDV',
+ 'PLUS-INFINITY' : 'PLUS_INFINITY',
+ 'PRESENT' : 'PRESENT',
+ 'PRIVATE' : 'PRIVATE',
+ 'REAL' : 'REAL',
+ 'RELATIVE-OID' : 'RELATIVE_OID',
+ 'SEQUENCE' : 'SEQUENCE',
+ 'SET' : 'SET',
+ 'SIZE' : 'SIZE',
+ 'STRING' : 'STRING',
+ 'SUCCESSORS' : 'SUCCESSORS',
+ 'SYNTAX' : 'SYNTAX',
+ 'TAGS' : 'TAGS',
+ 'TRUE' : 'TRUE',
+ 'TYPE-IDENTIFIER' : 'TYPE_IDENTIFIER',
+ 'UNION' : 'UNION',
+ 'UNIQUE' : 'UNIQUE',
+ 'UNIVERSAL' : 'UNIVERSAL',
+ 'UTCTime' : 'UTCTime',
+ 'WITH' : 'WITH',
+# X.208 obsolete but still used
+ 'ANY' : 'ANY',
+ 'DEFINED' : 'DEFINED',
+}
+
+for k in list(static_tokens.keys()):
+ if static_tokens [k] is None:
+ static_tokens [k] = k
+
+StringTypes = ['Numeric', 'Printable', 'IA5', 'BMP', 'Universal', 'UTF8',
+ 'Teletex', 'T61', 'Videotex', 'Graphic', 'ISO646', 'Visible',
+ 'General']
+
+# Effective permitted-alphabet constraints are PER-visible only
+# for the known-multiplier character string types (X.691 27.1)
+#
+# XXX: This should include BMPString (UCS2) and UniversalString (UCS4),
+# but asn2wrs only suports the RestrictedCharacterStringValue
+# notation of "cstring", but not that of "CharacterStringList",
+# "Quadruple", or "Tuple" (See X.680 41.8), and packet-per.c does
+# not support members of the permitted-alphabet being outside the
+# ASCII range. We don't currently have any ASN.1 modules that need it,
+# anyway.
+KnownMultiplierStringTypes = ('NumericString', 'PrintableString', 'IA5String',
+ 'ISO646String', 'VisibleString')
+
+for s in StringTypes:
+ reserved_words[s + 'String'] = s + 'String'
+
+tokens = list(static_tokens.values()) \
+ + list(reserved_words.values()) \
+ + ['BSTRING', 'HSTRING', 'QSTRING',
+ 'UCASE_IDENT', 'LCASE_IDENT', 'LCASE_IDENT_ASSIGNED', 'CLASS_IDENT',
+ 'REAL_NUMBER', 'NUMBER', 'PYQUOTE']
+
+
+cur_mod = __import__ (__name__) # XXX blech!
+
+for (k, v) in list(static_tokens.items ()):
+ cur_mod.__dict__['t_' + v] = k
+
+# 11.10 Binary strings
+def t_BSTRING (t):
+ r"'[01]*'B"
+ return t
+
+# 11.12 Hexadecimal strings
+def t_HSTRING (t):
+ r"'[0-9A-Fa-f]*'H"
+ return t
+
+def t_QSTRING (t):
+ r'"([^"]|"")*"'
+ return t
+
+def t_UCASE_IDENT (t):
+ r"[A-Z](-[a-zA-Z0-9]|[a-zA-Z0-9])*" # can't end w/ '-'
+ if (is_class_ident(t.value)): t.type = 'CLASS_IDENT'
+ if (is_class_syntax(t.value)): t.type = t.value
+ t.type = reserved_words.get(t.value, t.type)
+ return t
+
+lcase_ident_assigned = {}
+def t_LCASE_IDENT (t):
+ r"[a-z](-[a-zA-Z0-9]|[a-zA-Z0-9])*" # can't end w/ '-'
+ if (not in_oid and (t.value in lcase_ident_assigned)): t.type = 'LCASE_IDENT_ASSIGNED'
+ return t
+
+# 11.9 Real numbers
+def t_REAL_NUMBER (t):
+ r"[0-9]+\.[0-9]*(?!\.)"
+ return t
+
+# 11.8 Numbers
+def t_NUMBER (t):
+ r"0|([1-9][0-9]*)"
+ return t
+
+# 11.6 Comments
+pyquote_str = 'PYQUOTE'
+def t_COMMENT(t):
+ r"--(-[^\-\n]|[^\-\n])*(--|\n|-\n|$|-$)"
+ if (t.value.find("\n") >= 0) : t.lexer.lineno += 1
+ if t.value[2:2+len (pyquote_str)] == pyquote_str:
+ t.value = t.value[2+len(pyquote_str):]
+ t.value = t.value.lstrip ()
+ t.type = pyquote_str
+ return t
+ return None
+
+t_ignore = " \t\r"
+
+def t_NEWLINE(t):
+ r'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+def t_error(t):
+ global input_file
+ raise LexError(t, input_file)
+
+# state 'braceignore'
+
+def t_braceignore_lbrace(t):
+ r'\{'
+ t.lexer.level +=1
+
+def t_braceignore_rbrace(t):
+ r'\}'
+ t.lexer.level -=1
+ # If closing brace, return token
+ if t.lexer.level == 0:
+ t.type = 'RBRACE'
+ return t
+
+def t_braceignore_QSTRING (t):
+ r'"([^"]|"")*"'
+ t.lexer.lineno += t.value.count("\n")
+
+def t_braceignore_COMMENT(t):
+ r"--(-[^\-\n]|[^\-\n])*(--|\n|-\n|$|-$)"
+ if (t.value.find("\n") >= 0) : t.lexer.lineno += 1
+
+def t_braceignore_nonspace(t):
+ r'[^\s\{\}\"-]+|-(?!-)'
+
+t_braceignore_ignore = " \t\r"
+
+def t_braceignore_NEWLINE(t):
+ r'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+def t_braceignore_error(t):
+ t.lexer.skip(1)
+
+class Ctx:
+ def __init__ (self, defined_dict, indent = 0):
+ self.tags_def = 'EXPLICIT' # default = explicit
+ self.indent_lev = 0
+ self.assignments = {}
+ self.dependencies = {}
+ self.pyquotes = []
+ self.defined_dict = defined_dict
+ self.name_ctr = 0
+ def spaces (self):
+ return " " * (4 * self.indent_lev)
+ def indent (self):
+ self.indent_lev += 1
+ def outdent (self):
+ self.indent_lev -= 1
+ assert (self.indent_lev >= 0)
+ def register_assignment (self, ident, val, dependencies):
+ if ident in self.assignments:
+ raise DuplicateError("assignment", ident)
+ if ident in self.defined_dict:
+ raise Exception("cross-module duplicates for %s" % ident)
+ self.defined_dict [ident] = 1
+ self.assignments[ident] = val
+ self.dependencies [ident] = dependencies
+ return ""
+ # return "#%s depends on %s" % (ident, str (dependencies))
+ def register_pyquote (self, val):
+ self.pyquotes.append (val)
+ return ""
+ def output_assignments (self):
+ already_output = {}
+ text_list = []
+ assign_keys = list(self.assignments.keys())
+ to_output_count = len (assign_keys)
+ while True:
+ any_output = 0
+ for (ident, val) in list(self.assignments.items ()):
+ if ident in already_output:
+ continue
+ ok = 1
+ for d in self.dependencies [ident]:
+ if ((d not in already_output) and
+ (d in assign_keys)):
+ ok = 0
+ if ok:
+ text_list.append ("%s=%s" % (ident,
+ self.assignments [ident]))
+ already_output [ident] = 1
+ any_output = 1
+ to_output_count -= 1
+ assert (to_output_count >= 0)
+ if not any_output:
+ if to_output_count == 0:
+ break
+ # OK, we detected a cycle
+ cycle_list = []
+ for ident in list(self.assignments.keys ()):
+ if ident not in already_output:
+ depend_list = [d for d in self.dependencies[ident] if d in assign_keys]
+ cycle_list.append ("%s(%s)" % (ident, ",".join (depend_list)))
+
+ text_list.append ("# Cycle XXX " + ",".join (cycle_list))
+ for (ident, val) in list(self.assignments.items ()):
+ if ident not in already_output:
+ text_list.append ("%s=%s" % (ident, self.assignments [ident]))
+ break
+
+ return "\n".join (text_list)
+ def output_pyquotes (self):
+ return "\n".join (self.pyquotes)
+ def make_new_name (self):
+ self.name_ctr += 1
+ return "_compiler_generated_name_%d" % (self.name_ctr,)
+
+#--- Flags for EXPORT, USER_DEFINED, NO_EMIT, MAKE_ENUM -------------------------------
+EF_TYPE = 0x0001
+EF_VALS = 0x0002
+EF_ENUM = 0x0004
+EF_WS_DLL = 0x0010 # exported from shared library
+EF_EXTERN = 0x0020
+EF_NO_PROT = 0x0040
+EF_NO_TYPE = 0x0080
+EF_UCASE = 0x0100
+EF_TABLE = 0x0400
+EF_DEFINE = 0x0800
+EF_MODULE = 0x1000
+
+#--- common dependency computation ---
+# Input : list of items
+# dictionary with lists of dependency
+#
+#
+# Output : list of two outputs:
+# [0] list of items in dependency
+# [1] list of cycle dependency cycles
+def dependency_compute(items, dependency, map_fn = lambda t: t, ignore_fn = lambda t: False):
+ item_ord = []
+ item_cyc = []
+ x = {} # already emitted
+ #print '# Dependency computation'
+ for t in items:
+ if map_fn(t) in x:
+ #print 'Continue: %s : %s' % (t, (map_fn(t))
+ continue
+ stack = [t]
+ stackx = {t : dependency.get(t, [])[:]}
+ #print 'Push: %s : %s' % (t, str(stackx[t]))
+ while stack:
+ if stackx[stack[-1]]: # has dependencies
+ d = stackx[stack[-1]].pop(0)
+ if map_fn(d) in x or ignore_fn(d):
+ continue
+ if d in stackx: # cyclic dependency
+ c = stack[:]
+ c.reverse()
+ c = [d] + c[0:c.index(d)+1]
+ c.reverse()
+ item_cyc.append(c)
+ #print 'Cyclic: %s ' % (' -> '.join(c))
+ continue
+ stack.append(d)
+ stackx[d] = dependency.get(d, [])[:]
+ #print 'Push: %s : %s' % (d, str(stackx[d]))
+ else:
+ #print 'Pop: %s' % (stack[-1])
+ del stackx[stack[-1]]
+ e = map_fn(stack.pop())
+ if e in x:
+ continue
+ #print 'Add: %s' % (e)
+ item_ord.append(e)
+ x[e] = True
+ return (item_ord, item_cyc)
+
+# Given a filename, return a relative path from the current directory
+def relpath(filename):
+ return os.path.relpath(filename)
+
+# Given a filename, return a relative path from epan/dissectors
+def rel_dissector_path(filename):
+ path_parts = os.path.abspath(filename).split(os.sep)
+ while (len(path_parts) > 3 and path_parts[0] != 'asn1'):
+ path_parts.pop(0)
+ path_parts.insert(0, '.')
+ return '/'.join(path_parts)
+
+
+#--- EthCtx -------------------------------------------------------------------
+class EthCtx:
+ def __init__(self, conform, output, indent = 0):
+ self.conform = conform
+ self.output = output
+ self.conform.ectx = self
+ self.output.ectx = self
+ self.encoding = 'per'
+ self.aligned = False
+ self.default_oid_variant = ''
+ self.default_opentype_variant = ''
+ self.default_containing_variant = '_pdu_new'
+ self.default_embedded_pdv_cb = None
+ self.default_external_type_cb = None
+ self.remove_prefix = None
+ self.srcdir = None
+ self.emitted_pdu = {}
+ self.module = {}
+ self.module_ord = []
+ self.all_type_attr = {}
+ self.all_tags = {}
+ self.all_vals = {}
+
+ def encp(self): # encoding protocol
+ encp = self.encoding
+ return encp
+
+ # Encoding
+ def Per(self): return self.encoding == 'per'
+ def Ber(self): return self.encoding == 'ber'
+ def Oer(self): return self.encoding == 'oer'
+ def Aligned(self): return self.aligned
+ def Unaligned(self): return not self.aligned
+ def NeedTags(self): return self.tag_opt or self.Ber()
+ def NAPI(self): return False # disable planned features
+
+ def Module(self): # current module name
+ return self.modules[-1][0]
+
+ def groups(self):
+ return self.group_by_prot or (self.conform.last_group > 0)
+
+ def dbg(self, d):
+ if (self.dbgopt.find(d) >= 0):
+ return True
+ else:
+ return False
+
+ def value_max(self, a, b):
+ if (a == 'MAX') or (b == 'MAX'): return 'MAX';
+ if a == 'MIN': return b;
+ if b == 'MIN': return a;
+ try:
+ if (int(a) > int(b)):
+ return a
+ else:
+ return b
+ except (ValueError, TypeError):
+ pass
+ return "MAX((%s),(%s))" % (a, b)
+
+ def value_min(self, a, b):
+ if (a == 'MIN') or (b == 'MIN'): return 'MIN';
+ if a == 'MAX': return b;
+ if b == 'MAX': return a;
+ try:
+ if (int(a) < int(b)):
+ return a
+ else:
+ return b
+ except (ValueError, TypeError):
+ pass
+ return "MIN((%s),(%s))" % (a, b)
+
+ def value_get_eth(self, val):
+ if isinstance(val, Value):
+ return val.to_str(self)
+ ethname = val
+ if val in self.value:
+ ethname = self.value[val]['ethname']
+ return ethname
+
+ def value_get_val(self, nm):
+ val = asn2c(nm)
+ if nm in self.value:
+ if self.value[nm]['import']:
+ v = self.get_val_from_all(nm, self.value[nm]['import'])
+ if v is None:
+ msg = 'Need value of imported value identifier %s from %s (%s)' % (nm, self.value[nm]['import'], self.value[nm]['proto'])
+ warnings.warn_explicit(msg, UserWarning, '', 0)
+ else:
+ val = v
+ else:
+ val = self.value[nm]['value']
+ if isinstance (val, Value):
+ val = val.to_str(self)
+ else:
+ msg = 'Need value of unknown value identifier %s' % (nm)
+ warnings.warn_explicit(msg, UserWarning, '', 0)
+ return val
+
+ def eth_get_type_attr(self, type):
+ #print "eth_get_type_attr(%s)" % (type)
+ types = [type]
+ while (not self.type[type]['import']):
+ val = self.type[type]['val']
+ #print val
+ ttype = type
+ while (val.type == 'TaggedType'):
+ val = val.val
+ ttype += '/' + UNTAG_TYPE_NAME
+ if (val.type != 'Type_Ref'):
+ if (type != ttype):
+ types.append(ttype)
+ break
+ type = val.val
+ types.append(type)
+ attr = {}
+ #print " ", types
+ while len(types):
+ t = types.pop()
+ if (self.type[t]['import']):
+ attr.update(self.type[t]['attr'])
+ attr.update(self.eth_get_type_attr_from_all(t, self.type[t]['import']))
+ elif (self.type[t]['val'].type == 'SelectionType'):
+ val = self.type[t]['val']
+ (ftype, display) = val.eth_ftype(self)
+ attr.update({ 'TYPE' : ftype, 'DISPLAY' : display,
+ 'STRINGS' : val.eth_strings(), 'BITMASK' : '0' });
+ else:
+ attr.update(self.type[t]['attr'])
+ attr.update(self.eth_type[self.type[t]['ethname']]['attr'])
+ if attr['STRINGS'].startswith('VALS64(') and '|BASE_VAL64_STRING' not in attr['DISPLAY']:
+ attr['DISPLAY'] += '|BASE_VAL64_STRING'
+ #print " ", attr
+ return attr
+
+ def eth_get_type_attr_from_all(self, type, module):
+ attr = {}
+ if module in self.all_type_attr and type in self.all_type_attr[module]:
+ attr = self.all_type_attr[module][type]
+ return attr
+
+ def get_ttag_from_all(self, type, module):
+ ttag = None
+ if module in self.all_tags and type in self.all_tags[module]:
+ ttag = self.all_tags[module][type]
+ return ttag
+
+ def get_val_from_all(self, nm, module):
+ val = None
+ if module in self.all_vals and nm in self.all_vals[module]:
+ val = self.all_vals[module][nm]
+ return val
+
+ def get_obj_repr(self, ident, flds=[], not_flds=[]):
+ def set_type_fn(cls, field, fnfield):
+ obj[fnfield + '_fn'] = 'NULL'
+ obj[fnfield + '_pdu'] = 'NULL'
+ if field in val and isinstance(val[field], Type_Ref):
+ p = val[field].eth_type_default_pars(self, '')
+ obj[fnfield + '_fn'] = p['TYPE_REF_FN']
+ obj[fnfield + '_fn'] = obj[fnfield + '_fn'] % p # one iteration
+ if (self.conform.check_item('PDU', cls + '.' + field)):
+ obj[fnfield + '_pdu'] = 'dissect_' + self.field[val[field].val]['ethname']
+ return
+ # end of get_type_fn()
+ obj = { '_name' : ident, '_ident' : asn2c(ident)}
+ obj['_class'] = self.oassign[ident].cls
+ obj['_module'] = self.oassign[ident].module
+ val = self.oassign[ident].val
+ for f in flds:
+ if f not in val:
+ return None
+ for f in not_flds:
+ if f in val:
+ return None
+ for f in list(val.keys()):
+ if isinstance(val[f], Node):
+ obj[f] = val[f].fld_obj_repr(self)
+ else:
+ obj[f] = str(val[f])
+ if (obj['_class'] == 'TYPE-IDENTIFIER') or (obj['_class'] == 'ABSTRACT-SYNTAX'):
+ set_type_fn(obj['_class'], '&Type', '_type')
+ if (obj['_class'] == 'OPERATION'):
+ set_type_fn(obj['_class'], '&ArgumentType', '_argument')
+ set_type_fn(obj['_class'], '&ResultType', '_result')
+ if (obj['_class'] == 'ERROR'):
+ set_type_fn(obj['_class'], '&ParameterType', '_parameter')
+ return obj
+
+ #--- eth_reg_module -----------------------------------------------------------
+ def eth_reg_module(self, module):
+ #print "eth_reg_module(module='%s')" % (module)
+ name = module.get_name()
+ self.modules.append([name, module.get_proto(self)])
+ if name in self.module:
+ raise DuplicateError("module", name)
+ self.module[name] = []
+ self.module_ord.append(name)
+
+ #--- eth_module_dep_add ------------------------------------------------------------
+ def eth_module_dep_add(self, module, dep):
+ self.module[module].append(dep)
+
+ #--- eth_exports ------------------------------------------------------------
+ def eth_exports(self, exports):
+ self.exports_all = False
+ if ((len(exports) == 1) and (exports[0] == 'ALL')):
+ self.exports_all = True
+ return
+ for e in (exports):
+ if isinstance(e, Type_Ref):
+ self.exports.append(e.val)
+ elif isinstance(e, Class_Ref):
+ self.cexports.append(e.val)
+ else:
+ self.vexports.append(e)
+
+ #--- eth_reg_assign ---------------------------------------------------------
+ def eth_reg_assign(self, ident, val, virt=False):
+ #print("eth_reg_assign(ident='%s')" % (ident), 'module=', self.Module())
+ if ident in self.assign:
+ raise DuplicateError("assignment", ident)
+ self.assign[ident] = { 'val' : val , 'virt' : virt }
+ self.assign_ord.append(ident)
+ if (self.exports_all):
+ self.exports.append(ident)
+
+ #--- eth_reg_vassign --------------------------------------------------------
+ def eth_reg_vassign(self, vassign):
+ ident = vassign.ident
+ #print "eth_reg_vassign(ident='%s')" % (ident)
+ if ident in self.vassign:
+ raise DuplicateError("value assignment", ident)
+ self.vassign[ident] = vassign
+ self.vassign_ord.append(ident)
+ if (self.exports_all):
+ self.vexports.append(ident)
+
+ #--- eth_reg_oassign --------------------------------------------------------
+ def eth_reg_oassign(self, oassign):
+ ident = oassign.ident
+ #print "eth_reg_oassign(ident='%s')" % (ident)
+ if ident in self.oassign:
+ if self.oassign[ident] == oassign:
+ return # OK - already defined
+ else:
+ raise DuplicateError("information object assignment", ident)
+ self.oassign[ident] = oassign
+ self.oassign_ord.append(ident)
+ self.oassign_cls.setdefault(oassign.cls, []).append(ident)
+
+ #--- eth_import_type --------------------------------------------------------
+ def eth_import_type(self, ident, mod, proto):
+ #print ("eth_import_type(ident='%s', mod='%s', prot='%s')" % (ident, mod, proto))
+ if ident in self.type:
+ #print ("already defined '%s' import=%s, module=%s" % (ident, str(self.type[ident]['import']), self.type[ident].get('module', '-')))
+ if not self.type[ident]['import'] and (self.type[ident]['module'] == mod) :
+ return # OK - already defined
+ elif self.type[ident]['import'] and (self.type[ident]['import'] == mod) :
+ return # OK - already imported
+ else:
+ raise DuplicateError("type", ident)
+ self.type[ident] = {'import' : mod, 'proto' : proto,
+ 'ethname' : '' }
+ self.type[ident]['attr'] = { 'TYPE' : 'FT_NONE', 'DISPLAY' : 'BASE_NONE',
+ 'STRINGS' : 'NULL', 'BITMASK' : '0' }
+ mident = "$%s$%s" % (mod, ident)
+ if (self.conform.check_item('TYPE_ATTR', mident)):
+ self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', mident))
+ else:
+ self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', ident))
+ if (self.conform.check_item('IMPORT_TAG', mident)):
+ self.conform.copy_item('IMPORT_TAG', ident, mident)
+ self.type_imp.append(ident)
+
+ #--- dummy_import_type --------------------------------------------------------
+ def dummy_import_type(self, ident):
+ # dummy imported
+ if ident in self.type:
+ raise Exception("Try to dummy import for existing type :%s" % ident)
+ ethtype = asn2c(ident)
+ self.type[ident] = {'import' : 'xxx', 'proto' : 'xxx',
+ 'ethname' : ethtype }
+ self.type[ident]['attr'] = { 'TYPE' : 'FT_NONE', 'DISPLAY' : 'BASE_NONE',
+ 'STRINGS' : 'NULL', 'BITMASK' : '0' }
+ self.eth_type[ethtype] = { 'import' : 'xxx', 'proto' : 'xxx' , 'attr' : {}, 'ref' : []}
+ print("Dummy imported: %s (%s)" % (ident, ethtype))
+ return ethtype
+
+ #--- eth_import_class --------------------------------------------------------
+ def eth_import_class(self, ident, mod, proto):
+ #print "eth_import_class(ident='%s', mod='%s', prot='%s')" % (ident, mod, proto)
+ if ident in self.objectclass:
+ #print "already defined import=%s, module=%s" % (str(self.objectclass[ident]['import']), self.objectclass[ident]['module'])
+ if not self.objectclass[ident]['import'] and (self.objectclass[ident]['module'] == mod) :
+ return # OK - already defined
+ elif self.objectclass[ident]['import'] and (self.objectclass[ident]['import'] == mod) :
+ return # OK - already imported
+ else:
+ raise DuplicateError("object class", ident)
+ self.objectclass[ident] = {'import' : mod, 'proto' : proto,
+ 'ethname' : '' }
+ self.objectclass_imp.append(ident)
+
+ #--- eth_import_value -------------------------------------------------------
+ def eth_import_value(self, ident, mod, proto):
+ #print "eth_import_value(ident='%s', mod='%s', prot='%s')" % (ident, mod, prot)
+ if ident in self.value:
+ #print "already defined import=%s, module=%s" % (str(self.value[ident]['import']), self.value[ident]['module'])
+ if not self.value[ident]['import'] and (self.value[ident]['module'] == mod) :
+ return # OK - already defined
+ elif self.value[ident]['import'] and (self.value[ident]['import'] == mod) :
+ return # OK - already imported
+ else:
+ raise DuplicateError("value", ident)
+ self.value[ident] = {'import' : mod, 'proto' : proto,
+ 'ethname' : ''}
+ self.value_imp.append(ident)
+
+ #--- eth_sel_req ------------------------------------------------------------
+ def eth_sel_req(self, typ, sel):
+ key = typ + '.' + sel
+ if key not in self.sel_req:
+ self.sel_req[key] = { 'typ' : typ , 'sel' : sel}
+ self.sel_req_ord.append(key)
+ return key
+
+ #--- eth_comp_req ------------------------------------------------------------
+ def eth_comp_req(self, type):
+ self.comp_req_ord.append(type)
+
+ #--- eth_dep_add ------------------------------------------------------------
+ def eth_dep_add(self, type, dep):
+ if type not in self.type_dep:
+ self.type_dep[type] = []
+ self.type_dep[type].append(dep)
+
+ #--- eth_reg_type -----------------------------------------------------------
+ def eth_reg_type(self, ident, val, mod=None):
+ #print("eth_reg_type(ident='%s', type='%s')" % (ident, val.type))
+ if ident in self.type:
+ if self.type[ident]['import'] and (self.type[ident]['import'] == self.Module()) :
+ # replace imported type
+ del self.type[ident]
+ self.type_imp.remove(ident)
+ else:
+ #print('DuplicateError: import=', self.type[ident]['import'], 'module=', self.Module())
+ raise DuplicateError("type", ident)
+ val.ident = ident
+ self.type[ident] = { 'val' : val, 'import' : None }
+ self.type[ident]['module'] = self.Module()
+ self.type[ident]['proto'] = self.proto
+ if len(ident.split('/')) > 1:
+ self.type[ident]['tname'] = val.eth_tname()
+ else:
+ self.type[ident]['tname'] = asn2c(ident)
+ if mod :
+ mident = "$%s$%s" % (mod, ident)
+ else:
+ mident = None
+ self.type[ident]['export'] = self.conform.use_item('EXPORTS', ident)
+ self.type[ident]['enum'] = self.conform.use_item('MAKE_ENUM', ident)
+ self.type[ident]['vals_ext'] = self.conform.use_item('USE_VALS_EXT', ident)
+ self.type[ident]['user_def'] = self.conform.use_item('USER_DEFINED', ident)
+ if mident and self.conform.check_item('NO_EMIT', mident) :
+ self.type[ident]['no_emit'] = self.conform.use_item('NO_EMIT', mident)
+ else:
+ self.type[ident]['no_emit'] = self.conform.use_item('NO_EMIT', ident)
+ self.type[ident]['tname'] = self.conform.use_item('TYPE_RENAME', ident, val_dflt=self.type[ident]['tname'])
+ self.type[ident]['ethname'] = ''
+ if (val.type == 'Type_Ref') or (val.type == 'TaggedType') or (val.type == 'SelectionType') :
+ self.type[ident]['attr'] = {}
+ else:
+ (ftype, display) = val.eth_ftype(self)
+ self.type[ident]['attr'] = { 'TYPE' : ftype, 'DISPLAY' : display,
+ 'STRINGS' : val.eth_strings(), 'BITMASK' : '0' }
+ self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', ident))
+ self.type_ord.append(ident)
+ # PDU
+ if (self.conform.check_item('PDU', ident)):
+ self.eth_reg_field(ident, ident, impl=val.HasImplicitTag(self), pdu=self.conform.use_item('PDU', ident))
+
+ #--- eth_reg_objectclass ----------------------------------------------------------
+ def eth_reg_objectclass(self, ident, val):
+ #print "eth_reg_objectclass(ident='%s')" % (ident)
+ if ident in self.objectclass:
+ if self.objectclass[ident]['import'] and (self.objectclass[ident]['import'] == self.Module()) :
+ # replace imported object class
+ del self.objectclass[ident]
+ self.objectclass_imp.remove(ident)
+ elif isinstance(self.objectclass[ident]['val'], Class_Ref) and \
+ isinstance(val, Class_Ref) and \
+ (self.objectclass[ident]['val'].val == val.val):
+ pass # ignore duplicated CLASS1 ::= CLASS2
+ else:
+ raise DuplicateError("object class", ident)
+ self.objectclass[ident] = { 'import' : None, 'module' : self.Module(), 'proto' : self.proto }
+ self.objectclass[ident]['val'] = val
+ self.objectclass[ident]['export'] = self.conform.use_item('EXPORTS', ident)
+ self.objectclass_ord.append(ident)
+
+ #--- eth_reg_value ----------------------------------------------------------
+ def eth_reg_value(self, ident, type, value, ethname=None):
+ #print "eth_reg_value(ident='%s')" % (ident)
+ if ident in self.value:
+ if self.value[ident]['import'] and (self.value[ident]['import'] == self.Module()) :
+ # replace imported value
+ del self.value[ident]
+ self.value_imp.remove(ident)
+ elif ethname:
+ self.value[ident]['ethname'] = ethname
+ return
+ else:
+ raise DuplicateError("value", ident)
+ self.value[ident] = { 'import' : None, 'module' : self.Module(), 'proto' : self.proto,
+ 'type' : type, 'value' : value,
+ 'no_emit' : False }
+ self.value[ident]['export'] = self.conform.use_item('EXPORTS', ident)
+ self.value[ident]['ethname'] = ''
+ if (ethname): self.value[ident]['ethname'] = ethname
+ self.value_ord.append(ident)
+
+ #--- eth_reg_field ----------------------------------------------------------
+ def eth_reg_field(self, ident, type, idx='', parent=None, impl=False, pdu=None):
+ #print "eth_reg_field(ident='%s', type='%s')" % (ident, type)
+ if ident in self.field:
+ if pdu and (type == self.field[ident]['type']):
+ pass # OK already created PDU
+ else:
+ raise DuplicateError("field", ident)
+ self.field[ident] = {'type' : type, 'idx' : idx, 'impl' : impl, 'pdu' : pdu,
+ 'modified' : '', 'attr' : {} }
+ name = ident.split('/')[-1]
+ if self.remove_prefix and name.startswith(self.remove_prefix):
+ name = name[len(self.remove_prefix):]
+
+ if len(ident.split('/')) > 1 and name == ITEM_FIELD_NAME: # Sequence/Set of type
+ if len(self.field[ident]['type'].split('/')) > 1:
+ self.field[ident]['attr']['NAME'] = '"%s item"' % ident.split('/')[-2]
+ self.field[ident]['attr']['ABBREV'] = asn2c(ident.split('/')[-2] + name)
+ else:
+ self.field[ident]['attr']['NAME'] = '"%s"' % self.field[ident]['type']
+ self.field[ident]['attr']['ABBREV'] = asn2c(self.field[ident]['type'])
+ else:
+ self.field[ident]['attr']['NAME'] = '"%s"' % name
+ self.field[ident]['attr']['ABBREV'] = asn2c(name)
+ if self.conform.check_item('FIELD_ATTR', ident):
+ self.field[ident]['modified'] = '#' + str(id(self))
+ self.field[ident]['attr'].update(self.conform.use_item('FIELD_ATTR', ident))
+ if (pdu):
+ self.field[ident]['pdu']['export'] = (self.conform.use_item('EXPORTS', ident + '_PDU') != 0)
+ self.pdu_ord.append(ident)
+ else:
+ self.field_ord.append(ident)
+ if parent:
+ self.eth_dep_add(parent, type)
+
+ def eth_dummy_eag_field_required(self):
+ if (not self.dummy_eag_field):
+ self.dummy_eag_field = 'eag_field'
+
+ #--- eth_clean --------------------------------------------------------------
+ def eth_clean(self):
+ self.proto = self.proto_opt;
+ #--- ASN.1 tables ----------------
+ self.assign = {}
+ self.assign_ord = []
+ self.field = {}
+ self.pdu_ord = []
+ self.field_ord = []
+ self.type = {}
+ self.type_ord = []
+ self.type_imp = []
+ self.type_dep = {}
+ self.sel_req = {}
+ self.sel_req_ord = []
+ self.comp_req_ord = []
+ self.vassign = {}
+ self.vassign_ord = []
+ self.value = {}
+ self.value_ord = []
+ self.value_imp = []
+ self.objectclass = {}
+ self.objectclass_ord = []
+ self.objectclass_imp = []
+ self.oassign = {}
+ self.oassign_ord = []
+ self.oassign_cls = {}
+ #--- Modules ------------
+ self.modules = []
+ self.exports_all = False
+ self.exports = []
+ self.cexports = []
+ self.vexports = []
+ #--- types -------------------
+ self.eth_type = {}
+ self.eth_type_ord = []
+ self.eth_export_ord = []
+ self.eth_type_dupl = {}
+ self.named_bit = []
+ #--- value dependencies -------------------
+ self.value_dep = {}
+ #--- values -------------------
+ self.eth_value = {}
+ self.eth_value_ord = []
+ #--- fields -------------------------
+ self.eth_hf = {}
+ self.eth_hf_ord = []
+ self.eth_hfpdu_ord = []
+ self.eth_hf_dupl = {}
+ self.dummy_eag_field = None
+ #--- type dependencies -------------------
+ self.eth_type_ord1 = []
+ self.eth_dep_cycle = []
+ self.dep_cycle_eth_type = {}
+ #--- value dependencies and export -------------------
+ self.eth_value_ord1 = []
+ self.eth_vexport_ord = []
+
+ #--- eth_prepare ------------------------------------------------------------
+ def eth_prepare(self):
+ self.eproto = asn2c(self.proto)
+
+ #--- dummy types/fields for PDU registration ---
+ nm = 'NULL'
+ if (self.conform.check_item('PDU', nm)):
+ self.eth_reg_type('_dummy/'+nm, NullType())
+ self.eth_reg_field(nm, '_dummy/'+nm, pdu=self.conform.use_item('PDU', nm))
+
+ #--- required PDUs ----------------------------
+ for t in self.type_ord:
+ pdu = self.type[t]['val'].eth_need_pdu(self)
+ if not pdu: continue
+ f = pdu['type']
+ pdu['reg'] = None
+ pdu['hidden'] = False
+ pdu['need_decl'] = True
+ if f not in self.field:
+ self.eth_reg_field(f, f, pdu=pdu)
+
+ #--- values -> named values -------------------
+ t_for_update = {}
+ for v in self.value_ord:
+ if (self.value[v]['type'].type == 'Type_Ref') or self.conform.check_item('ASSIGN_VALUE_TO_TYPE', v):
+ if self.conform.check_item('ASSIGN_VALUE_TO_TYPE', v):
+ tnm = self.conform.use_item('ASSIGN_VALUE_TO_TYPE', v)
+ else:
+ tnm = self.value[v]['type'].val
+ if tnm in self.type \
+ and not self.type[tnm]['import'] \
+ and (self.type[tnm]['val'].type == 'IntegerType'):
+ self.type[tnm]['val'].add_named_value(v, self.value[v]['value'])
+ self.value[v]['no_emit'] = True
+ t_for_update[tnm] = True
+ for t in list(t_for_update.keys()):
+ self.type[t]['attr']['STRINGS'] = self.type[t]['val'].eth_strings()
+ self.type[t]['attr'].update(self.conform.use_item('TYPE_ATTR', t))
+
+ #--- required components of ---------------------------
+ #print "self.comp_req_ord = ", self.comp_req_ord
+ for t in self.comp_req_ord:
+ self.type[t]['val'].eth_reg_sub(t, self, components_available=True)
+
+ #--- required selection types ---------------------------
+ #print "self.sel_req_ord = ", self.sel_req_ord
+ for t in self.sel_req_ord:
+ tt = self.sel_req[t]['typ']
+ if tt not in self.type:
+ self.dummy_import_type(t)
+ elif self.type[tt]['import']:
+ self.eth_import_type(t, self.type[tt]['import'], self.type[tt]['proto'])
+ else:
+ self.type[tt]['val'].sel_req(t, self.sel_req[t]['sel'], self)
+
+ #--- types -------------------
+ for t in self.type_imp: # imported types
+ nm = asn2c(t)
+ self.eth_type[nm] = { 'import' : self.type[t]['import'],
+ 'proto' : asn2c(self.type[t]['proto']),
+ 'attr' : {}, 'ref' : []}
+ self.eth_type[nm]['attr'].update(self.conform.use_item('ETYPE_ATTR', nm))
+ self.type[t]['ethname'] = nm
+ for t in self.type_ord: # dummy import for missing type reference
+ tp = self.type[t]['val']
+ #print "X : %s %s " % (t, tp.type)
+ if isinstance(tp, TaggedType):
+ #print "%s : %s " % (tp.type, t)
+ tp = tp.val
+ if isinstance(tp, Type_Ref):
+ #print "%s : %s ::= %s " % (tp.type, t, tp.val)
+ if tp.val not in self.type:
+ self.dummy_import_type(tp.val)
+ for t in self.type_ord:
+ nm = self.type[t]['tname']
+ if ((nm.find('#') >= 0) or
+ ((len(t.split('/'))>1) and
+ (self.conform.get_fn_presence(t) or self.conform.check_item('FN_PARS', t) or
+ self.conform.get_fn_presence('/'.join((t,ITEM_FIELD_NAME))) or self.conform.check_item('FN_PARS', '/'.join((t,ITEM_FIELD_NAME)))) and
+ not self.conform.check_item('TYPE_RENAME', t))):
+ if len(t.split('/')) == 2 and t.split('/')[1] == ITEM_FIELD_NAME: # Sequence of type at the 1st level
+ nm = t.split('/')[0] + t.split('/')[1]
+ elif t.split('/')[-1] == ITEM_FIELD_NAME: # Sequence/Set of type at next levels
+ nm = 'T_' + self.conform.use_item('FIELD_RENAME', '/'.join(t.split('/')[0:-1]), val_dflt=t.split('/')[-2]) + t.split('/')[-1]
+ elif t.split('/')[-1] == UNTAG_TYPE_NAME: # Untagged type
+ nm = self.type['/'.join(t.split('/')[0:-1])]['ethname'] + '_U'
+ else:
+ nm = 'T_' + self.conform.use_item('FIELD_RENAME', t, val_dflt=t.split('/')[-1])
+ nm = asn2c(nm)
+ if nm in self.eth_type:
+ if nm in self.eth_type_dupl:
+ self.eth_type_dupl[nm].append(t)
+ else:
+ self.eth_type_dupl[nm] = [self.eth_type[nm]['ref'][0], t]
+ nm += '_%02d' % (len(self.eth_type_dupl[nm])-1)
+ if nm in self.eth_type:
+ self.eth_type[nm]['ref'].append(t)
+ else:
+ self.eth_type_ord.append(nm)
+ self.eth_type[nm] = { 'import' : None, 'proto' : self.eproto, 'export' : 0, 'enum' : 0, 'vals_ext' : 0,
+ 'user_def' : EF_TYPE|EF_VALS, 'no_emit' : EF_TYPE|EF_VALS,
+ 'val' : self.type[t]['val'],
+ 'attr' : {}, 'ref' : [t]}
+ self.type[t]['ethname'] = nm
+ if (not self.eth_type[nm]['export'] and self.type[t]['export']): # new export
+ self.eth_export_ord.append(nm)
+ self.eth_type[nm]['export'] |= self.type[t]['export']
+ self.eth_type[nm]['enum'] |= self.type[t]['enum']
+ self.eth_type[nm]['vals_ext'] |= self.type[t]['vals_ext']
+ self.eth_type[nm]['user_def'] &= self.type[t]['user_def']
+ self.eth_type[nm]['no_emit'] &= self.type[t]['no_emit']
+ if self.type[t]['attr'].get('STRINGS') == '$$':
+ use_ext = self.type[t]['vals_ext']
+ if (use_ext):
+ self.eth_type[nm]['attr']['STRINGS'] = '&%s_ext' % (self.eth_vals_nm(nm))
+ else:
+ if self.eth_type[nm]['val'].type == 'IntegerType' \
+ and self.eth_type[nm]['val'].HasConstraint() \
+ and self.eth_type[nm]['val'].constr.Needs64b(self):
+ self.eth_type[nm]['attr']['STRINGS'] = 'VALS64(%s)' % (self.eth_vals_nm(nm))
+ else:
+ self.eth_type[nm]['attr']['STRINGS'] = 'VALS(%s)' % (self.eth_vals_nm(nm))
+ self.eth_type[nm]['attr'].update(self.conform.use_item('ETYPE_ATTR', nm))
+ for t in self.eth_type_ord:
+ bits = self.eth_type[t]['val'].eth_named_bits()
+ if (bits):
+ old_val = 0
+ for (val, id) in bits:
+ self.named_bit.append({'name' : id, 'val' : val,
+ 'ethname' : 'hf_%s_%s_%s' % (self.eproto, t, asn2c(id)),
+ 'ftype' : 'FT_BOOLEAN', 'display' : '8',
+ 'strings' : 'NULL',
+ 'bitmask' : '0x'+('80','40','20','10','08','04','02','01')[val%8]})
+ old_val = val + 1
+ if self.eth_type[t]['val'].eth_need_tree():
+ self.eth_type[t]['tree'] = "ett_%s_%s" % (self.eth_type[t]['proto'], t)
+ else:
+ self.eth_type[t]['tree'] = None
+
+ #--- register values from enums ------------
+ for t in self.eth_type_ord:
+ if (self.eth_type[t]['val'].eth_has_enum(t, self)):
+ self.eth_type[t]['val'].reg_enum_vals(t, self)
+
+ #--- value dependencies -------------------
+ for v in self.value_ord:
+ if isinstance (self.value[v]['value'], Value):
+ dep = self.value[v]['value'].get_dep()
+ else:
+ dep = self.value[v]['value']
+ if dep and dep in self.value:
+ self.value_dep.setdefault(v, []).append(dep)
+
+ #--- exports all necessary values
+ for v in self.value_ord:
+ if not self.value[v]['export']: continue
+ deparr = self.value_dep.get(v, [])
+ while deparr:
+ d = deparr.pop()
+ if not self.value[d]['import']:
+ if not self.value[d]['export']:
+ self.value[d]['export'] = EF_TYPE
+ deparr.extend(self.value_dep.get(d, []))
+
+ #--- values -------------------
+ for v in self.value_imp:
+ nm = asn2c(v)
+ self.eth_value[nm] = { 'import' : self.value[v]['import'],
+ 'proto' : asn2c(self.value[v]['proto']),
+ 'ref' : []}
+ self.value[v]['ethname'] = nm
+ for v in self.value_ord:
+ if (self.value[v]['ethname']):
+ continue
+ if (self.value[v]['no_emit']):
+ continue
+ nm = asn2c(v)
+ self.eth_value[nm] = { 'import' : None,
+ 'proto' : asn2c(self.value[v]['proto']),
+ 'export' : self.value[v]['export'], 'ref' : [v] }
+ self.eth_value[nm]['value'] = self.value[v]['value']
+ self.eth_value_ord.append(nm)
+ self.value[v]['ethname'] = nm
+
+ #--- fields -------------------------
+ for f in (self.pdu_ord + self.field_ord):
+ if len(f.split('/')) > 1 and f.split('/')[-1] == ITEM_FIELD_NAME: # Sequence/Set of type
+ nm = self.conform.use_item('FIELD_RENAME', '/'.join(f.split('/')[0:-1]), val_dflt=f.split('/')[-2]) + f.split('/')[-1]
+ else:
+ nm = f.split('/')[-1]
+ nm = self.conform.use_item('FIELD_RENAME', f, val_dflt=nm)
+ nm = asn2c(nm)
+ if (self.field[f]['pdu']):
+ nm += '_PDU'
+ if (not self.merge_modules or self.field[f]['pdu']['export']):
+ nm = self.eproto + '_' + nm
+ t = self.field[f]['type']
+ if t in self.type:
+ ethtype = self.type[t]['ethname']
+ else: # undefined type
+ ethtype = self.dummy_import_type(t)
+ ethtypemod = ethtype + self.field[f]['modified']
+ if nm in self.eth_hf:
+ if nm in self.eth_hf_dupl:
+ if ethtypemod in self.eth_hf_dupl[nm]:
+ nm = self.eth_hf_dupl[nm][ethtypemod]
+ self.eth_hf[nm]['ref'].append(f)
+ self.field[f]['ethname'] = nm
+ continue
+ else:
+ nmx = nm + ('_%02d' % (len(self.eth_hf_dupl[nm])))
+ self.eth_hf_dupl[nm][ethtype] = nmx
+ nm = nmx
+ else:
+ if (self.eth_hf[nm]['ethtype']+self.eth_hf[nm]['modified']) == ethtypemod:
+ self.eth_hf[nm]['ref'].append(f)
+ self.field[f]['ethname'] = nm
+ continue
+ else:
+ nmx = nm + '_01'
+ self.eth_hf_dupl[nm] = {self.eth_hf[nm]['ethtype']+self.eth_hf[nm]['modified'] : nm, \
+ ethtypemod : nmx}
+ nm = nmx
+ if (self.field[f]['pdu']):
+ self.eth_hfpdu_ord.append(nm)
+ else:
+ self.eth_hf_ord.append(nm)
+ fullname = 'hf_%s_%s' % (self.eproto, nm)
+ attr = self.eth_get_type_attr(self.field[f]['type']).copy()
+ attr.update(self.field[f]['attr'])
+ if (self.NAPI() and 'NAME' in attr):
+ attr['NAME'] += self.field[f]['idx']
+ attr.update(self.conform.use_item('EFIELD_ATTR', nm))
+ use_vals_ext = self.eth_type[ethtype].get('vals_ext')
+ if (use_vals_ext):
+ attr['DISPLAY'] += '|BASE_EXT_STRING'
+ self.eth_hf[nm] = {'fullname' : fullname, 'pdu' : self.field[f]['pdu'],
+ 'ethtype' : ethtype, 'modified' : self.field[f]['modified'],
+ 'attr' : attr.copy(),
+ 'ref' : [f]}
+ self.field[f]['ethname'] = nm
+ if (self.dummy_eag_field):
+ # Prepending "dummy_" avoids matching checkhf.pl.
+ self.dummy_eag_field = 'dummy_hf_%s_%s' % (self.eproto, self.dummy_eag_field)
+ #--- type dependencies -------------------
+ (self.eth_type_ord1, self.eth_dep_cycle) = dependency_compute(self.type_ord, self.type_dep, map_fn = lambda t: self.type[t]['ethname'], ignore_fn = lambda t: self.type[t]['import'])
+ i = 0
+ while i < len(self.eth_dep_cycle):
+ t = self.type[self.eth_dep_cycle[i][0]]['ethname']
+ self.dep_cycle_eth_type.setdefault(t, []).append(i)
+ i += 1
+
+ #--- value dependencies and export -------------------
+ for v in self.eth_value_ord:
+ if self.eth_value[v]['export']:
+ self.eth_vexport_ord.append(v)
+ else:
+ self.eth_value_ord1.append(v)
+
+ #--- export tags, values, ... ---
+ for t in self.exports:
+ if t not in self.type:
+ continue
+ if self.type[t]['import']:
+ continue
+ m = self.type[t]['module']
+ if not self.Per() and not self.Oer():
+ if m not in self.all_tags:
+ self.all_tags[m] = {}
+ self.all_tags[m][t] = self.type[t]['val'].GetTTag(self)
+ if m not in self.all_type_attr:
+ self.all_type_attr[m] = {}
+ self.all_type_attr[m][t] = self.eth_get_type_attr(t).copy()
+ for v in self.vexports:
+ if v not in self.value:
+ continue
+ if self.value[v]['import']:
+ continue
+ m = self.value[v]['module']
+ if m not in self.all_vals:
+ self.all_vals[m] = {}
+ vv = self.value[v]['value']
+ if isinstance (vv, Value):
+ vv = vv.to_str(self)
+ self.all_vals[m][v] = vv
+
+ #--- eth_vals_nm ------------------------------------------------------------
+ def eth_vals_nm(self, tname):
+ out = ""
+ if (not self.eth_type[tname]['export'] & EF_NO_PROT):
+ out += "%s_" % (self.eproto)
+ out += "%s_vals" % (tname)
+ return out
+
+ #--- eth_vals ---------------------------------------------------------------
+ def eth_vals(self, tname, vals):
+ out = ""
+ has_enum = self.eth_type[tname]['enum'] & EF_ENUM
+ use_ext = self.eth_type[tname]['vals_ext']
+ if (use_ext):
+ vals.sort(key=lambda vals_entry: int(vals_entry[0]))
+ if (not self.eth_type[tname]['export'] & EF_VALS):
+ out += 'static '
+ if (self.eth_type[tname]['export'] & EF_VALS) and (self.eth_type[tname]['export'] & EF_TABLE):
+ out += 'static '
+ if self.eth_type[tname]['val'].HasConstraint() and self.eth_type[tname]['val'].constr.Needs64b(self) \
+ and self.eth_type[tname]['val'].type == 'IntegerType':
+ out += "const val64_string %s[] = {\n" % (self.eth_vals_nm(tname))
+ else:
+ out += "const value_string %s[] = {\n" % (self.eth_vals_nm(tname))
+ for (val, id) in vals:
+ if (has_enum):
+ vval = self.eth_enum_item(tname, id)
+ else:
+ vval = val
+ out += ' { %3s, "%s" },\n' % (vval, id)
+ out += " { 0, NULL }\n};\n"
+ if (use_ext):
+ out += "\nstatic value_string_ext %s_ext = VALUE_STRING_EXT_INIT(%s);\n" % (self.eth_vals_nm(tname), self.eth_vals_nm(tname))
+ return out
+
+ #--- eth_enum_prefix ------------------------------------------------------------
+ def eth_enum_prefix(self, tname, type=False):
+ out = ""
+ if (self.eth_type[tname]['export'] & EF_ENUM):
+ no_prot = self.eth_type[tname]['export'] & EF_NO_PROT
+ else:
+ no_prot = self.eth_type[tname]['enum'] & EF_NO_PROT
+ if (not no_prot):
+ out += self.eproto
+ if ((not self.eth_type[tname]['enum'] & EF_NO_TYPE) or type):
+ if (out): out += '_'
+ out += tname
+ if (self.eth_type[tname]['enum'] & EF_UCASE):
+ out = out.upper()
+ if (out): out += '_'
+ return out
+
+ #--- eth_enum_nm ------------------------------------------------------------
+ def eth_enum_nm(self, tname):
+ out = self.eth_enum_prefix(tname, type=True)
+ out += "enum"
+ return out
+
+ #--- eth_enum_item ---------------------------------------------------------------
+ def eth_enum_item(self, tname, ident):
+ out = self.eth_enum_prefix(tname)
+ out += asn2c(ident)
+ if (self.eth_type[tname]['enum'] & EF_UCASE):
+ out = out.upper()
+ return out
+
+ #--- eth_enum ---------------------------------------------------------------
+ def eth_enum(self, tname, vals):
+ out = ""
+ if (self.eth_type[tname]['enum'] & EF_DEFINE):
+ out += "/* enumerated values for %s */\n" % (tname)
+ for (val, id) in vals:
+ out += '#define %-12s %3s\n' % (self.eth_enum_item(tname, id), val)
+ else:
+ out += "typedef enum _%s {\n" % (self.eth_enum_nm(tname))
+ first_line = 1
+ for (val, id) in vals:
+ if (first_line == 1):
+ first_line = 0
+ else:
+ out += ",\n"
+ out += ' %-12s = %3s' % (self.eth_enum_item(tname, id), val)
+ out += "\n} %s;\n" % (self.eth_enum_nm(tname))
+ return out
+
+ #--- eth_bits ---------------------------------------------------------------
+ def eth_bits(self, tname, bits):
+ out = ""
+ out += "static int * const "
+ out += "%(TABLE)s[] = {\n"
+ for (val, id) in bits:
+ out += ' &hf_%s_%s_%s,\n' % (self.eproto, tname, asn2c(id))
+ out += " NULL\n};\n"
+ return out
+
+ #--- eth_type_fn_h ----------------------------------------------------------
+ def eth_type_fn_h(self, tname):
+ out = ""
+ if (not self.eth_type[tname]['export'] & EF_TYPE):
+ out += 'static '
+ out += "int "
+ if (self.Ber()):
+ out += "dissect_%s_%s(bool implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_)" % (self.eth_type[tname]['proto'], tname)
+ elif (self.Per() or self.Oer()):
+ out += "dissect_%s_%s(tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_)" % (self.eth_type[tname]['proto'], tname)
+ out += ";\n"
+ return out
+
+ #--- eth_fn_call ------------------------------------------------------------
+ def eth_fn_call(self, fname, ret=None, indent=2, par=None):
+ out = indent * ' '
+ if (ret):
+ if (ret == 'return'):
+ out += 'return '
+ else:
+ out += ret + ' = '
+ out += fname + '('
+ ind = len(out)
+ for i in range(len(par)):
+ if (i>0): out += ind * ' '
+ out += ', '.join(par[i])
+ if (i<(len(par)-1)): out += ',\n'
+ out += ');\n'
+ return out
+
+ def output_proto_root(self):
+ out = ''
+ if self.conform.proto_root_name:
+ out += ' proto_item *prot_ti = proto_tree_add_item(tree, ' + self.conform.proto_root_name + ', tvb, 0, -1, ENC_NA);\n'
+ out += ' proto_item_set_hidden(prot_ti);\n'
+ return out
+
+ #--- eth_type_fn_hdr --------------------------------------------------------
+ def eth_type_fn_hdr(self, tname):
+ out = '\n'
+ if (not self.eth_type[tname]['export'] & EF_TYPE):
+ out += 'static '
+ out += "int\n"
+ if (self.Ber()):
+ out += "dissect_%s_%s(bool implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {\n" % (self.eth_type[tname]['proto'], tname)
+ elif (self.Per() or self.Oer()):
+ out += "dissect_%s_%s(tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {\n" % (self.eth_type[tname]['proto'], tname)
+ #if self.conform.get_fn_presence(tname):
+ # out += self.conform.get_fn_text(tname, 'FN_HDR')
+ #el
+ if self.conform.check_item('PDU', tname):
+ out += self.output_proto_root()
+
+ cycle_size = 0
+ if self.eth_dep_cycle:
+ for cur_cycle in self.eth_dep_cycle:
+ t = self.type[cur_cycle[0]]['ethname']
+ if t == tname:
+ cycle_size = len(cur_cycle)
+ break
+
+ if cycle_size > 0:
+ out += f'''\
+ const int proto_id = GPOINTER_TO_INT(wmem_list_frame_data(wmem_list_tail(actx->pinfo->layers)));
+ const unsigned cycle_size = {cycle_size};
+ unsigned recursion_depth = p_get_proto_depth(actx->pinfo, proto_id);
+ DISSECTOR_ASSERT(recursion_depth <= MAX_RECURSION_DEPTH);
+ p_set_proto_depth(actx->pinfo, proto_id, recursion_depth + cycle_size);
+'''
+
+ if self.conform.get_fn_presence(self.eth_type[tname]['ref'][0]):
+ out += self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_HDR')
+ return out
+
+ #--- eth_type_fn_ftr --------------------------------------------------------
+ def eth_type_fn_ftr(self, tname):
+ out = '\n'
+ #if self.conform.get_fn_presence(tname):
+ # out += self.conform.get_fn_text(tname, 'FN_FTR')
+ #el
+
+ add_recursion_check = False
+ if self.eth_dep_cycle:
+ for cur_cycle in self.eth_dep_cycle:
+ t = self.type[cur_cycle[0]]['ethname']
+ if t == tname:
+ add_recursion_check = True
+ break
+
+ if add_recursion_check:
+ out += '''\
+ p_set_proto_depth(actx->pinfo, proto_id, recursion_depth - cycle_size);
+'''
+
+ if self.conform.get_fn_presence(self.eth_type[tname]['ref'][0]):
+ out += self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_FTR')
+ out += " return offset;\n"
+ out += "}\n"
+ return out
+
+ #--- eth_type_fn_body -------------------------------------------------------
+ def eth_type_fn_body(self, tname, body, pars=None):
+ out = body
+ #if self.conform.get_fn_body_presence(tname):
+ # out = self.conform.get_fn_text(tname, 'FN_BODY')
+ #el
+ if self.conform.get_fn_body_presence(self.eth_type[tname]['ref'][0]):
+ out = self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_BODY')
+ if pars:
+ try:
+ out = out % pars
+ except (TypeError):
+ pass
+ return out
+
+ #--- eth_out_pdu_decl ----------------------------------------------------------
+ def eth_out_pdu_decl(self, f):
+ t = self.eth_hf[f]['ethtype']
+ out = ''
+ if (not self.eth_hf[f]['pdu']['export']):
+ out += 'static '
+ out += 'int '
+ out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_);\n'
+ return out
+
+ #--- eth_output_hf ----------------------------------------------------------
+ def eth_output_hf (self):
+ if not len(self.eth_hf_ord) and not len(self.eth_hfpdu_ord) and not len(self.named_bit): return
+ fx = self.output.file_open('hf')
+ for f in (self.eth_hfpdu_ord + self.eth_hf_ord):
+ fx.write("%-50s/* %s */\n" % ("static int %s = -1; " % (self.eth_hf[f]['fullname']), self.eth_hf[f]['ethtype']))
+ if (self.named_bit):
+ fx.write('/* named bits */\n')
+ for nb in self.named_bit:
+ fx.write("static int %s = -1;\n" % (nb['ethname']))
+ if (self.dummy_eag_field):
+ fx.write("static int %s = -1; /* never registered */\n" % (self.dummy_eag_field))
+ self.output.file_close(fx)
+
+ #--- eth_output_hf_arr ------------------------------------------------------
+ def eth_output_hf_arr (self):
+ if not len(self.eth_hf_ord) and not len(self.eth_hfpdu_ord) and not len(self.named_bit): return
+ fx = self.output.file_open('hfarr')
+ for f in (self.eth_hfpdu_ord + self.eth_hf_ord):
+ t = self.eth_hf[f]['ethtype']
+ if self.remove_prefix and t.startswith(self.remove_prefix):
+ t = t[len(self.remove_prefix):]
+ name=self.eth_hf[f]['attr']['NAME']
+ try: # Python < 3
+ trantab = maketrans("- ", "__")
+ except Exception:
+ trantab = str.maketrans("- ", "__")
+ name = name.translate(trantab)
+ namelower = name.lower()
+ tquoted_lower = '"' + t.lower() + '"'
+ # Try to avoid giving blurbs that give no more info than the name
+ if tquoted_lower == namelower or \
+ t == "NULL" or \
+ tquoted_lower.replace("t_", "") == namelower:
+ blurb = 'NULL'
+ else:
+ blurb = '"%s"' % (t)
+ attr = self.eth_hf[f]['attr'].copy()
+ if attr['TYPE'] == 'FT_NONE':
+ attr['ABBREV'] = '"%s.%s_element"' % (self.proto, attr['ABBREV'])
+ else:
+ attr['ABBREV'] = '"%s.%s"' % (self.proto, attr['ABBREV'])
+ if 'BLURB' not in attr:
+ attr['BLURB'] = blurb
+ fx.write(' { &%s,\n' % (self.eth_hf[f]['fullname']))
+ fx.write(' { %(NAME)s, %(ABBREV)s,\n' % attr)
+ fx.write(' %(TYPE)s, %(DISPLAY)s, %(STRINGS)s, %(BITMASK)s,\n' % attr)
+ fx.write(' %(BLURB)s, HFILL }},\n' % attr)
+ for nb in self.named_bit:
+ flt_str = nb['ethname']
+ # cut out hf_
+ flt_str = flt_str[3:]
+ flt_str = flt_str.replace('_' , '.')
+ #print("filter string=%s" % (flt_str))
+ fx.write(' { &%s,\n' % (nb['ethname']))
+ fx.write(' { "%s", "%s",\n' % (nb['name'], flt_str))
+ fx.write(' %s, %s, %s, %s,\n' % (nb['ftype'], nb['display'], nb['strings'], nb['bitmask']))
+ fx.write(' NULL, HFILL }},\n')
+ self.output.file_close(fx)
+
+ #--- eth_output_ett ---------------------------------------------------------
+ def eth_output_ett (self):
+ fx = self.output.file_open('ett')
+ fempty = True
+ #fx.write("static gint ett_%s = -1;\n" % (self.eproto))
+ for t in self.eth_type_ord:
+ if self.eth_type[t]['tree']:
+ fx.write("static gint %s = -1;\n" % (self.eth_type[t]['tree']))
+ fempty = False
+ self.output.file_close(fx, discard=fempty)
+
+ #--- eth_output_ett_arr -----------------------------------------------------
+ def eth_output_ett_arr(self):
+ fx = self.output.file_open('ettarr')
+ fempty = True
+ #fx.write(" &ett_%s,\n" % (self.eproto))
+ for t in self.eth_type_ord:
+ if self.eth_type[t]['tree']:
+ fx.write(" &%s,\n" % (self.eth_type[t]['tree']))
+ fempty = False
+ self.output.file_close(fx, discard=fempty)
+
+ #--- eth_output_export ------------------------------------------------------
+ def eth_output_export(self):
+ fx = self.output.file_open('exp', ext='h')
+ for t in self.eth_export_ord: # vals
+ if (self.eth_type[t]['export'] & EF_ENUM) and self.eth_type[t]['val'].eth_has_enum(t, self):
+ fx.write(self.eth_type[t]['val'].eth_type_enum(t, self))
+ if (self.eth_type[t]['export'] & EF_VALS) and self.eth_type[t]['val'].eth_has_vals():
+ if not self.eth_type[t]['export'] & EF_TABLE:
+ if self.eth_type[t]['export'] & EF_WS_DLL:
+ fx.write("WS_DLL_PUBLIC ")
+ else:
+ fx.write("extern ")
+ if self.eth_type[t]['val'].HasConstraint() and self.eth_type[t]['val'].constr.Needs64b(self) \
+ and self.eth_type[t]['val'].type == 'IntegerType':
+ fx.write("const val64_string %s[];\n" % (self.eth_vals_nm(t)))
+ else:
+ fx.write("const value_string %s[];\n" % (self.eth_vals_nm(t)))
+ else:
+ fx.write(self.eth_type[t]['val'].eth_type_vals(t, self))
+ for t in self.eth_export_ord: # functions
+ if (self.eth_type[t]['export'] & EF_TYPE):
+ if self.eth_type[t]['export'] & EF_EXTERN:
+ if self.eth_type[t]['export'] & EF_WS_DLL:
+ fx.write("WS_DLL_PUBLIC ")
+ else:
+ fx.write("extern ")
+ fx.write(self.eth_type_fn_h(t))
+ for f in self.eth_hfpdu_ord: # PDUs
+ if (self.eth_hf[f]['pdu'] and self.eth_hf[f]['pdu']['export']):
+ fx.write(self.eth_out_pdu_decl(f))
+ self.output.file_close(fx)
+
+ #--- eth_output_expcnf ------------------------------------------------------
+ def eth_output_expcnf(self):
+ fx = self.output.file_open('exp', ext='cnf')
+ fx.write('#.MODULE\n')
+ maxw = 0
+ for (m, p) in self.modules:
+ if (len(m) > maxw): maxw = len(m)
+ for (m, p) in self.modules:
+ fx.write("%-*s %s\n" % (maxw, m, p))
+ fx.write('#.END\n\n')
+ for cls in self.objectclass_ord:
+ if self.objectclass[cls]['export']:
+ cnm = cls
+ if self.objectclass[cls]['export'] & EF_MODULE:
+ cnm = "$%s$%s" % (self.objectclass[cls]['module'], cnm)
+ fx.write('#.CLASS %s\n' % (cnm))
+ maxw = 2
+ for fld in self.objectclass[cls]['val'].fields:
+ w = len(fld.fld_repr()[0])
+ if (w > maxw): maxw = w
+ for fld in self.objectclass[cls]['val'].fields:
+ repr = fld.fld_repr()
+ fx.write('%-*s %s\n' % (maxw, repr[0], ' '.join(repr[1:])))
+ fx.write('#.END\n\n')
+ if self.Ber():
+ fx.write('#.IMPORT_TAG\n')
+ for t in self.eth_export_ord: # tags
+ if (self.eth_type[t]['export'] & EF_TYPE):
+ fx.write('%-24s ' % self.eth_type[t]['ref'][0])
+ fx.write('%s %s\n' % self.eth_type[t]['val'].GetTag(self))
+ fx.write('#.END\n\n')
+ fx.write('#.TYPE_ATTR\n')
+ for t in self.eth_export_ord: # attributes
+ if (self.eth_type[t]['export'] & EF_TYPE):
+ tnm = self.eth_type[t]['ref'][0]
+ if self.eth_type[t]['export'] & EF_MODULE:
+ tnm = "$%s$%s" % (self.type[tnm]['module'], tnm)
+ fx.write('%-24s ' % tnm)
+ attr = self.eth_get_type_attr(self.eth_type[t]['ref'][0]).copy()
+ fx.write('TYPE = %(TYPE)-9s DISPLAY = %(DISPLAY)-9s STRINGS = %(STRINGS)s BITMASK = %(BITMASK)s\n' % attr)
+ fx.write('#.END\n\n')
+ self.output.file_close(fx, keep_anyway=True)
+
+ #--- eth_output_val ------------------------------------------------------
+ def eth_output_val(self):
+ fx = self.output.file_open('val', ext='h')
+ for v in self.eth_value_ord1:
+ vv = self.eth_value[v]['value']
+ if isinstance (vv, Value):
+ vv = vv.to_str(self)
+ fx.write("#define %-30s %s\n" % (v, vv))
+ for t in self.eth_type_ord1:
+ if self.eth_type[t]['import']:
+ continue
+ if self.eth_type[t]['val'].eth_has_enum(t, self) and not (self.eth_type[t]['export'] & EF_ENUM):
+ fx.write(self.eth_type[t]['val'].eth_type_enum(t, self))
+ self.output.file_close(fx)
+
+ #--- eth_output_valexp ------------------------------------------------------
+ def eth_output_valexp(self):
+ if (not len(self.eth_vexport_ord)): return
+ fx = self.output.file_open('valexp', ext='h')
+ for v in self.eth_vexport_ord:
+ vv = self.eth_value[v]['value']
+ if isinstance (vv, Value):
+ vv = vv.to_str(self)
+ fx.write("#define %-30s %s\n" % (v, vv))
+ self.output.file_close(fx)
+
+ #--- eth_output_types -------------------------------------------------------
+ def eth_output_types(self):
+ def out_pdu(f):
+ t = self.eth_hf[f]['ethtype']
+ impl = 'FALSE'
+ out = ''
+ if (not self.eth_hf[f]['pdu']['export']):
+ out += 'static '
+ out += 'int '
+ out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_) {\n'
+ out += self.output_proto_root()
+
+ out += ' int offset = 0;\n'
+ off_par = 'offset'
+ ret_par = 'offset'
+ if (self.Per()):
+ if (self.Aligned()):
+ aligned = 'TRUE'
+ else:
+ aligned = 'FALSE'
+ out += " asn1_ctx_t asn1_ctx;\n"
+ out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_PER', aligned, 'pinfo'),))
+ if (self.Ber()):
+ out += " asn1_ctx_t asn1_ctx;\n"
+ out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_BER', 'TRUE', 'pinfo'),))
+ par=((impl, 'tvb', off_par,'&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),)
+ elif (self.Per()):
+ par=(('tvb', off_par, '&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),)
+ elif (self.Oer()):
+ out += " asn1_ctx_t asn1_ctx;\n"
+ out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_OER', 'TRUE', 'pinfo'),))
+ par=(('tvb', off_par,'&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),)
+ else:
+ par=((),)
+ out += self.eth_fn_call('dissect_%s_%s' % (self.eth_type[t]['proto'], t), ret=ret_par, par=par)
+ if (self.Per()):
+ out += ' offset += 7; offset >>= 3;\n'
+ out += ' return offset;\n'
+ out += '}\n'
+ return out
+ #end out_pdu()
+ fx = self.output.file_open('fn')
+ pos = fx.tell()
+ if (len(self.eth_hfpdu_ord)):
+ first_decl = True
+ for f in self.eth_hfpdu_ord:
+ if (self.eth_hf[f]['pdu'] and self.eth_hf[f]['pdu']['need_decl']):
+ if first_decl:
+ fx.write('/*--- PDUs declarations ---*/\n')
+ first_decl = False
+ fx.write(self.eth_out_pdu_decl(f))
+ if not first_decl:
+ fx.write('\n')
+
+ add_depth_define = False
+ if self.eth_dep_cycle:
+ fx.write('/*--- Cyclic dependencies ---*/\n\n')
+ i = 0
+ while i < len(self.eth_dep_cycle):
+ t = self.type[self.eth_dep_cycle[i][0]]['ethname']
+ if self.dep_cycle_eth_type[t][0] != i: i += 1; continue
+ add_depth_define = True
+ fx.write(''.join(['/* %s */\n' % ' -> '.join(self.eth_dep_cycle[i]) for i in self.dep_cycle_eth_type[t]]))
+ if not self.eth_type[t]['export'] & EF_TYPE:
+ fx.write(self.eth_type_fn_h(t))
+ else:
+ fx.write('/*' + self.eth_type_fn_h(t).strip() + '*/\n')
+ fx.write('\n')
+ i += 1
+ fx.write('\n')
+ if add_depth_define:
+ fx.write('#define MAX_RECURSION_DEPTH 100 // Arbitrarily chosen.\n')
+ for t in self.eth_type_ord1:
+ if self.eth_type[t]['import']:
+ continue
+ if self.eth_type[t]['val'].eth_has_vals():
+ if self.eth_type[t]['no_emit'] & EF_VALS:
+ pass
+ elif self.eth_type[t]['user_def'] & EF_VALS:
+ if self.eth_type[t]['val'].HasConstraint() and self.eth_type[t]['val'].constr.Needs64b(self) \
+ and self.eth_type[t]['val'].type == 'IntegerType':
+ fx.write("extern const val64_string %s[];\n" % (self.eth_vals_nm(t)))
+ else:
+ fx.write("extern const value_string %s[];\n" % (self.eth_vals_nm(t)))
+ elif (self.eth_type[t]['export'] & EF_VALS) and (self.eth_type[t]['export'] & EF_TABLE):
+ pass
+ else:
+ fx.write(self.eth_type[t]['val'].eth_type_vals(t, self))
+ if self.eth_type[t]['no_emit'] & EF_TYPE:
+ pass
+ elif self.eth_type[t]['user_def'] & EF_TYPE:
+ fx.write(self.eth_type_fn_h(t))
+ else:
+ fx.write(self.eth_type[t]['val'].eth_type_fn(self.eth_type[t]['proto'], t, self))
+ fx.write('\n')
+ if (len(self.eth_hfpdu_ord)):
+ fx.write('/*--- PDUs ---*/\n\n')
+ for f in self.eth_hfpdu_ord:
+ if (self.eth_hf[f]['pdu']):
+ if (f in self.emitted_pdu):
+ fx.write(" /* %s already emitted */\n" % (f))
+ else:
+ fx.write(out_pdu(f))
+ self.emitted_pdu[f] = True
+ fx.write('\n')
+ fempty = pos == fx.tell()
+ self.output.file_close(fx, discard=fempty)
+
+ #--- eth_output_dis_hnd -----------------------------------------------------
+ def eth_output_dis_hnd(self):
+ fx = self.output.file_open('dis-hnd')
+ fempty = True
+ for f in self.eth_hfpdu_ord:
+ pdu = self.eth_hf[f]['pdu']
+ if (pdu and pdu['reg'] and not pdu['hidden']):
+ dis = self.proto
+ if (pdu['reg'] != '.'):
+ dis += '.' + pdu['reg']
+ fx.write('static dissector_handle_t %s_handle;\n' % (asn2c(dis)))
+ fempty = False
+ fx.write('\n')
+ self.output.file_close(fx, discard=fempty)
+
+ #--- eth_output_dis_reg -----------------------------------------------------
+ def eth_output_dis_reg(self):
+ fx = self.output.file_open('dis-reg')
+ fempty = True
+ for f in self.eth_hfpdu_ord:
+ pdu = self.eth_hf[f]['pdu']
+ if (pdu and pdu['reg']):
+ new_prefix = ''
+ if (pdu['new']): new_prefix = 'new_'
+ dis = self.proto
+ if (pdu['reg'] != '.'): dis += '.' + pdu['reg']
+ fx.write(' %sregister_dissector("%s", dissect_%s, proto_%s);\n' % (new_prefix, dis, f, self.eproto))
+ if (not pdu['hidden']):
+ fx.write(' %s_handle = find_dissector("%s");\n' % (asn2c(dis), dis))
+ fempty = False
+ fx.write('\n')
+ self.output.file_close(fx, discard=fempty)
+
+ #--- eth_output_dis_tab -----------------------------------------------------
+ def eth_output_dis_tab(self):
+ fx = self.output.file_open('dis-tab')
+ fempty = True
+ for k in self.conform.get_order('REGISTER'):
+ reg = self.conform.use_item('REGISTER', k)
+ if reg['pdu'] not in self.field: continue
+ f = self.field[reg['pdu']]['ethname']
+ pdu = self.eth_hf[f]['pdu']
+ new_prefix = ''
+ if (pdu['new']): new_prefix = 'new_'
+ if (reg['rtype'] in ('NUM', 'STR')):
+ rstr = ''
+ if (reg['rtype'] == 'STR'):
+ rstr = 'string'
+ else:
+ rstr = 'uint'
+ if (pdu['reg']):
+ dis = self.proto
+ if (pdu['reg'] != '.'): dis += '.' + pdu['reg']
+ if (not pdu['hidden']):
+ hnd = '%s_handle' % (asn2c(dis))
+ else:
+ hnd = 'find_dissector("%s")' % (dis)
+ else:
+ hnd = '%screate_dissector_handle(dissect_%s, proto_%s)' % (new_prefix, f, self.eproto)
+ rport = self.value_get_eth(reg['rport'])
+ fx.write(' dissector_add_%s("%s", %s, %s);\n' % (rstr, reg['rtable'], rport, hnd))
+ elif (reg['rtype'] in ('BER', 'PER', 'OER')):
+ roid = self.value_get_eth(reg['roid'])
+ fx.write(' %sregister_%s_oid_dissector(%s, dissect_%s, proto_%s, %s);\n' % (new_prefix, reg['rtype'].lower(), roid, f, self.eproto, reg['roidname']))
+ fempty = False
+ fx.write('\n')
+ self.output.file_close(fx, discard=fempty)
+
+ #--- eth_output_syn_reg -----------------------------------------------------
+ def eth_output_syn_reg(self):
+ fx = self.output.file_open('syn-reg')
+ fempty = True
+ first_decl = True
+ for k in self.conform.get_order('SYNTAX'):
+ reg = self.conform.use_item('SYNTAX', k)
+ if reg['pdu'] not in self.field: continue
+ f = self.field[reg['pdu']]['ethname']
+ pdu = self.eth_hf[f]['pdu']
+ new_prefix = ''
+ if (pdu['new']): new_prefix = 'new_'
+ if first_decl:
+ fx.write(' /*--- Syntax registrations ---*/\n')
+ first_decl = False
+ fx.write(' %sregister_ber_syntax_dissector(%s, proto_%s, dissect_%s_PDU);\n' % (new_prefix, k, self.eproto, reg['pdu']));
+ fempty=False
+ self.output.file_close(fx, discard=fempty)
+
+ #--- eth_output_tables -----------------------------------------------------
+ def eth_output_tables(self):
+ for num in list(self.conform.report.keys()):
+ fx = self.output.file_open('table' + num)
+ for rep in self.conform.report[num]:
+ self.eth_output_table(fx, rep)
+ self.output.file_close(fx)
+
+ #--- eth_output_table -----------------------------------------------------
+ def eth_output_table(self, fx, rep):
+ if rep['type'] == 'HDR':
+ fx.write('\n')
+ if rep['var']:
+ var = rep['var']
+ var_list = var.split('.', 1)
+ cls = var_list[0]
+ del var_list[0]
+ flds = []
+ not_flds = []
+ sort_flds = []
+ for f in var_list:
+ if f[0] == '!':
+ not_flds.append(f[1:])
+ continue
+ if f[0] == '#':
+ flds.append(f[1:])
+ sort_flds.append(f)
+ continue
+ if f[0] == '@':
+ flds.append(f[1:])
+ sort_flds.append(f[1:])
+ continue
+ flds.append(f)
+ objs = {}
+ objs_ord = []
+ if (cls in self.oassign_cls):
+ for ident in self.oassign_cls[cls]:
+ obj = self.get_obj_repr(ident, flds, not_flds)
+ if not obj:
+ continue
+ obj['_LOOP'] = var
+ obj['_DICT'] = str(obj)
+ objs[ident] = obj
+ objs_ord.append(ident)
+ if (sort_flds):
+ # Sort identifiers according to the matching object in objs.
+ # The order is determined by sort_flds, keys prefixed by a
+ # '#' are compared numerically.
+ def obj_key_fn(name):
+ obj = objs[name]
+ return list(
+ int(obj[f[1:]]) if f[0] == '#' else obj[f]
+ for f in sort_flds
+ )
+ objs_ord.sort(key=obj_key_fn)
+ for ident in objs_ord:
+ obj = objs[ident]
+ try:
+ text = rep['text'] % obj
+ except (KeyError):
+ raise sys.exc_info()[0]("%s:%s invalid key %s for information object %s of %s" % (rep['fn'], rep['lineno'], sys.exc_info()[1], ident, var))
+ fx.write(text)
+ else:
+ fx.write("/* Unknown or empty loop list %s */\n" % (var))
+ else:
+ fx.write(rep['text'])
+ if rep['type'] == 'FTR':
+ fx.write('\n')
+
+ #--- dupl_report -----------------------------------------------------
+ def dupl_report(self):
+ # types
+ tmplist = sorted(self.eth_type_dupl.keys())
+ for t in tmplist:
+ msg = "The same type names for different types. Explicit type renaming is recommended.\n"
+ msg += t + "\n"
+ for tt in self.eth_type_dupl[t]:
+ msg += " %-20s %s\n" % (self.type[tt]['ethname'], tt)
+ warnings.warn_explicit(msg, UserWarning, '', 0)
+ # fields
+ tmplist = list(self.eth_hf_dupl.keys())
+ tmplist.sort()
+ for f in tmplist:
+ msg = "The same field names for different types. Explicit field renaming is recommended.\n"
+ msg += f + "\n"
+ for tt in list(self.eth_hf_dupl[f].keys()):
+ msg += " %-20s %-20s " % (self.eth_hf_dupl[f][tt], tt)
+ msg += ", ".join(self.eth_hf[self.eth_hf_dupl[f][tt]]['ref'])
+ msg += "\n"
+ warnings.warn_explicit(msg, UserWarning, '', 0)
+
+ #--- eth_do_output ------------------------------------------------------------
+ def eth_do_output(self):
+ if self.dbg('a'):
+ print("\n# Assignments")
+ for a in self.assign_ord:
+ v = ' '
+ if (self.assign[a]['virt']): v = '*'
+ print('{} {}'.format(v, a))
+ print("\n# Value assignments")
+ for a in self.vassign_ord:
+ print(' {}'.format(a))
+ print("\n# Information object assignments")
+ for a in self.oassign_ord:
+ print(" %-12s (%s)" % (a, self.oassign[a].cls))
+ if self.dbg('t'):
+ print("\n# Imported Types")
+ print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol"))
+ print("-" * 100)
+ for t in self.type_imp:
+ print("%-40s %-24s %-24s" % (t, self.type[t]['import'], self.type[t]['proto']))
+ print("\n# Imported Values")
+ print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol"))
+ print("-" * 100)
+ for t in self.value_imp:
+ print("%-40s %-24s %-24s" % (t, self.value[t]['import'], self.value[t]['proto']))
+ print("\n# Imported Object Classes")
+ print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol"))
+ print("-" * 100)
+ for t in self.objectclass_imp:
+ print("%-40s %-24s %-24s" % (t, self.objectclass[t]['import'], self.objectclass[t]['proto']))
+ print("\n# Exported Types")
+ print("%-31s %s" % ("Wireshark type", "Export Flag"))
+ print("-" * 100)
+ for t in self.eth_export_ord:
+ print("%-31s 0x%02X" % (t, self.eth_type[t]['export']))
+ print("\n# Exported Values")
+ print("%-40s %s" % ("Wireshark name", "Value"))
+ print("-" * 100)
+ for v in self.eth_vexport_ord:
+ vv = self.eth_value[v]['value']
+ if isinstance (vv, Value):
+ vv = vv.to_str(self)
+ print("%-40s %s" % (v, vv))
+ print("\n# ASN.1 Object Classes")
+ print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol"))
+ print("-" * 100)
+ for t in self.objectclass_ord:
+ print("%-40s " % (t))
+ print("\n# ASN.1 Types")
+ print("%-49s %-24s %-24s" % ("ASN.1 unique name", "'tname'", "Wireshark type"))
+ print("-" * 100)
+ for t in self.type_ord:
+ print("%-49s %-24s %-24s" % (t, self.type[t]['tname'], self.type[t]['ethname']))
+ print("\n# Wireshark Types")
+ print("Wireshark type References (ASN.1 types)")
+ print("-" * 100)
+ for t in self.eth_type_ord:
+ sys.stdout.write("%-31s %d" % (t, len(self.eth_type[t]['ref'])))
+ print(', '.join(self.eth_type[t]['ref']))
+ print("\n# ASN.1 Values")
+ print("%-40s %-18s %-20s %s" % ("ASN.1 unique name", "Type", "Value", "Wireshark value"))
+ print("-" * 100)
+ for v in self.value_ord:
+ vv = self.value[v]['value']
+ if isinstance (vv, Value):
+ vv = vv.to_str(self)
+ print("%-40s %-18s %-20s %s" % (v, self.value[v]['type'].eth_tname(), vv, self.value[v]['ethname']))
+ #print "\n# Wireshark Values"
+ #print "%-40s %s" % ("Wireshark name", "Value")
+ #print "-" * 100
+ #for v in self.eth_value_ord:
+ # vv = self.eth_value[v]['value']
+ # if isinstance (vv, Value):
+ # vv = vv.to_str(self)
+ # print "%-40s %s" % (v, vv)
+ print("\n# ASN.1 Fields")
+ print("ASN.1 unique name Wireshark name ASN.1 type")
+ print("-" * 100)
+ for f in (self.pdu_ord + self.field_ord):
+ print("%-40s %-20s %s" % (f, self.field[f]['ethname'], self.field[f]['type']))
+ print("\n# Wireshark Fields")
+ print("Wireshark name Wireshark type References (ASN.1 fields)")
+ print("-" * 100)
+ for f in (self.eth_hfpdu_ord + self.eth_hf_ord):
+ sys.stdout.write("%-30s %-20s %s" % (f, self.eth_hf[f]['ethtype'], len(self.eth_hf[f]['ref'])))
+ print(', '.join(self.eth_hf[f]['ref']))
+ #print "\n# Order after dependencies"
+ #print '\n'.join(self.eth_type_ord1)
+ print("\n# Cyclic dependencies")
+ for c in self.eth_dep_cycle:
+ print(' -> '.join(c))
+ self.dupl_report()
+ self.output.outnm = self.outnm_opt
+ if (not self.output.outnm):
+ self.output.outnm = self.proto
+ self.output.outnm = self.output.outnm.replace('.', '-')
+ if not self.justexpcnf:
+ self.eth_output_hf()
+ self.eth_output_ett()
+ self.eth_output_types()
+ self.eth_output_hf_arr()
+ self.eth_output_ett_arr()
+ self.eth_output_export()
+ self.eth_output_val()
+ self.eth_output_valexp()
+ self.eth_output_dis_hnd()
+ self.eth_output_dis_reg()
+ self.eth_output_dis_tab()
+ self.eth_output_syn_reg()
+ self.eth_output_tables()
+ if self.expcnf:
+ self.eth_output_expcnf()
+
+ def dbg_modules(self):
+ def print_mod(m):
+ sys.stdout.write("%-30s " % (m))
+ dep = self.module[m][:]
+ for i in range(len(dep)):
+ if dep[i] not in self.module:
+ dep[i] = '*' + dep[i]
+ print(', '.join(dep))
+ # end of print_mod()
+ (mod_ord, mod_cyc) = dependency_compute(self.module_ord, self.module, ignore_fn = lambda t: t not in self.module)
+ print("\n# ASN.1 Moudules")
+ print("Module name Dependency")
+ print("-" * 100)
+ new_ord = False
+ for m in (self.module_ord):
+ print_mod(m)
+ new_ord = new_ord or (self.module_ord.index(m) != mod_ord.index(m))
+ if new_ord:
+ print("\n# ASN.1 Moudules - in dependency order")
+ print("Module name Dependency")
+ print("-" * 100)
+ for m in (mod_ord):
+ print_mod(m)
+ if mod_cyc:
+ print("\nCyclic dependencies:")
+ for i in (list(range(len(mod_cyc)))):
+ print("%02d: %s" % (i + 1, str(mod_cyc[i])))
+
+
+#--- EthCnf -------------------------------------------------------------------
+class EthCnf:
+ def __init__(self):
+ self.ectx = None
+ self.tblcfg = {}
+ self.table = {}
+ self.order = {}
+ self.fn = {}
+ self.report = {}
+ self.suppress_line = False
+ self.include_path = []
+ self.proto_root_name = None
+ # Value name Default value Duplicity check Usage check
+ self.tblcfg['EXPORTS'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['MAKE_ENUM'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['USE_VALS_EXT'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['PDU'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['SYNTAX'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['REGISTER'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['USER_DEFINED'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['NO_EMIT'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['MODULE'] = { 'val_nm' : 'proto', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : False }
+ self.tblcfg['OMIT_ASSIGNMENT'] = { 'val_nm' : 'omit', 'val_dflt' : False, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['NO_OMIT_ASSGN'] = { 'val_nm' : 'omit', 'val_dflt' : True, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['VIRTUAL_ASSGN'] = { 'val_nm' : 'name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['SET_TYPE'] = { 'val_nm' : 'type', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['TYPE_RENAME'] = { 'val_nm' : 'eth_name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['FIELD_RENAME'] = { 'val_nm' : 'eth_name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['IMPORT_TAG'] = { 'val_nm' : 'ttag', 'val_dflt' : (), 'chk_dup' : True, 'chk_use' : False }
+ self.tblcfg['FN_PARS'] = { 'val_nm' : 'pars', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['TYPE_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : False }
+ self.tblcfg['ETYPE_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : False }
+ self.tblcfg['FIELD_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['EFIELD_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True }
+ self.tblcfg['ASSIGNED_ID'] = { 'val_nm' : 'ids', 'val_dflt' : {}, 'chk_dup' : False,'chk_use' : False }
+ self.tblcfg['ASSIGN_VALUE_TO_TYPE'] = { 'val_nm' : 'name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
+
+ for k in list(self.tblcfg.keys()) :
+ self.table[k] = {}
+ self.order[k] = []
+
+ def add_item(self, table, key, fn, lineno, **kw):
+ if self.tblcfg[table]['chk_dup'] and key in self.table[table]:
+ warnings.warn_explicit("Duplicated %s for %s. Previous one is at %s:%d" %
+ (table, key, self.table[table][key]['fn'], self.table[table][key]['lineno']),
+ UserWarning, fn, lineno)
+ return
+ self.table[table][key] = {'fn' : fn, 'lineno' : lineno, 'used' : False}
+ self.table[table][key].update(kw)
+ self.order[table].append(key)
+
+ def update_item(self, table, key, fn, lineno, **kw):
+ if key not in self.table[table]:
+ self.table[table][key] = {'fn' : fn, 'lineno' : lineno, 'used' : False}
+ self.order[table].append(key)
+ self.table[table][key][self.tblcfg[table]['val_nm']] = {}
+ self.table[table][key][self.tblcfg[table]['val_nm']].update(kw[self.tblcfg[table]['val_nm']])
+
+ def get_order(self, table):
+ return self.order[table]
+
+ def check_item(self, table, key):
+ return key in self.table[table]
+
+ def copy_item(self, table, dst_key, src_key):
+ if (src_key in self.table[table]):
+ self.table[table][dst_key] = self.table[table][src_key]
+
+ def check_item_value(self, table, key, **kw):
+ return key in self.table[table] and kw.get('val_nm', self.tblcfg[table]['val_nm']) in self.table[table][key]
+
+ def use_item(self, table, key, **kw):
+ vdflt = kw.get('val_dflt', self.tblcfg[table]['val_dflt'])
+ if key not in self.table[table]: return vdflt
+ vname = kw.get('val_nm', self.tblcfg[table]['val_nm'])
+ #print "use_item() - set used for %s %s" % (table, key)
+ self.table[table][key]['used'] = True
+ return self.table[table][key].get(vname, vdflt)
+
+ def omit_assignment(self, type, ident, module):
+ if self.ectx.conform.use_item('OMIT_ASSIGNMENT', ident):
+ return True
+ if self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*') or \
+ self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*'+type) or \
+ self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*/'+module) or \
+ self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*'+type+'/'+module):
+ return self.ectx.conform.use_item('NO_OMIT_ASSGN', ident)
+ return False
+
+ def add_fn_line(self, name, ctx, line, fn, lineno):
+ if name not in self.fn:
+ self.fn[name] = {'FN_HDR' : None, 'FN_FTR' : None, 'FN_BODY' : None}
+ if (self.fn[name][ctx]):
+ self.fn[name][ctx]['text'] += line
+ else:
+ self.fn[name][ctx] = {'text' : line, 'used' : False,
+ 'fn' : fn, 'lineno' : lineno}
+ def get_fn_presence(self, name):
+ #print "get_fn_presence('%s'):%s" % (name, str(self.fn.has_key(name)))
+ #if self.fn.has_key(name): print self.fn[name]
+ return name in self.fn
+ def get_fn_body_presence(self, name):
+ return name in self.fn and self.fn[name]['FN_BODY']
+ def get_fn_text(self, name, ctx):
+ if (name not in self.fn):
+ return '';
+ if (not self.fn[name][ctx]):
+ return '';
+ self.fn[name][ctx]['used'] = True
+ out = self.fn[name][ctx]['text']
+ if (not self.suppress_line):
+ out = '#line %u "%s"\n%s\n' % (self.fn[name][ctx]['lineno'], rel_dissector_path(self.fn[name][ctx]['fn']), out);
+ return out
+
+ def add_pdu(self, par, fn, lineno):
+ #print "add_pdu(par=%s, %s, %d)" % (str(par), fn, lineno)
+ (reg, hidden) = (None, False)
+ if (len(par) > 1): reg = par[1]
+ if (reg and reg[0]=='@'): (reg, hidden) = (reg[1:], True)
+ attr = {'new' : False, 'reg' : reg, 'hidden' : hidden, 'need_decl' : False, 'export' : False}
+ self.add_item('PDU', par[0], attr=attr, fn=fn, lineno=lineno)
+ return
+
+ def add_syntax(self, par, fn, lineno):
+ #print "add_syntax(par=%s, %s, %d)" % (str(par), fn, lineno)
+ if( (len(par) >=2)):
+ name = par[1]
+ else:
+ name = '"'+par[0]+'"'
+ attr = { 'pdu' : par[0] }
+ self.add_item('SYNTAX', name, attr=attr, fn=fn, lineno=lineno)
+ return
+
+ def add_register(self, pdu, par, fn, lineno):
+ #print "add_register(pdu=%s, par=%s, %s, %d)" % (pdu, str(par), fn, lineno)
+ if (par[0] in ('N', 'NUM')): rtype = 'NUM'; (pmin, pmax) = (2, 2)
+ elif (par[0] in ('S', 'STR')): rtype = 'STR'; (pmin, pmax) = (2, 2)
+ elif (par[0] in ('B', 'BER')): rtype = 'BER'; (pmin, pmax) = (1, 2)
+ elif (par[0] in ('P', 'PER')): rtype = 'PER'; (pmin, pmax) = (1, 2)
+ elif (par[0] in ('O', 'OER')): rtype = 'OER'; (pmin, pmax) = (1, 2)
+ else: warnings.warn_explicit("Unknown registration type '%s'" % (par[2]), UserWarning, fn, lineno); return
+ if ((len(par)-1) < pmin):
+ warnings.warn_explicit("Too few parameters for %s registration type. At least %d parameters are required" % (rtype, pmin), UserWarning, fn, lineno)
+ return
+ if ((len(par)-1) > pmax):
+ warnings.warn_explicit("Too many parameters for %s registration type. Only %d parameters are allowed" % (rtype, pmax), UserWarning, fn, lineno)
+ attr = {'pdu' : pdu, 'rtype' : rtype}
+ if (rtype in ('NUM', 'STR')):
+ attr['rtable'] = par[1]
+ attr['rport'] = par[2]
+ rkey = '/'.join([rtype, attr['rtable'], attr['rport']])
+ elif (rtype in ('BER', 'PER', 'OER')):
+ attr['roid'] = par[1]
+ attr['roidname'] = '""'
+ if (len(par)>=3):
+ attr['roidname'] = par[2]
+ elif attr['roid'][0] != '"':
+ attr['roidname'] = '"' + attr['roid'] + '"'
+ rkey = '/'.join([rtype, attr['roid']])
+ self.add_item('REGISTER', rkey, attr=attr, fn=fn, lineno=lineno)
+
+ def check_par(self, par, pmin, pmax, fn, lineno):
+ for i in range(len(par)):
+ if par[i] == '-':
+ par[i] = None
+ continue
+ if par[i][0] == '#':
+ par[i:] = []
+ break
+ if len(par) < pmin:
+ warnings.warn_explicit("Too few parameters. At least %d parameters are required" % (pmin), UserWarning, fn, lineno)
+ return None
+ if (pmax >= 0) and (len(par) > pmax):
+ warnings.warn_explicit("Too many parameters. Only %d parameters are allowed" % (pmax), UserWarning, fn, lineno)
+ return par[0:pmax]
+ return par
+
+ def read(self, fn):
+ def get_par(line, pmin, pmax, fn, lineno):
+ par = line.split(None, pmax)
+ par = self.check_par(par, pmin, pmax, fn, lineno)
+ return par
+
+ def get_par_nm(line, pmin, pmax, fn, lineno):
+ if pmax:
+ par = line.split(None, pmax)
+ else:
+ par = [line,]
+ for i in range(len(par)):
+ if par[i][0] == '#':
+ par[i:] = []
+ break
+ if len(par) < pmin:
+ warnings.warn_explicit("Too few parameters. At least %d parameters are required" % (pmin), UserWarning, fn, lineno)
+ return None
+ if len(par) > pmax:
+ nmpar = par[pmax]
+ else:
+ nmpar = ''
+ nmpars = {}
+ nmpar_first = re.compile(r'^\s*(?P<attr>[_A-Z][_A-Z0-9]*)\s*=\s*')
+ nmpar_next = re.compile(r'\s+(?P<attr>[_A-Z][_A-Z0-9]*)\s*=\s*')
+ nmpar_end = re.compile(r'\s*$')
+ result = nmpar_first.search(nmpar)
+ pos = 0
+ while result:
+ k = result.group('attr')
+ pos = result.end()
+ result = nmpar_next.search(nmpar, pos)
+ p1 = pos
+ if result:
+ p2 = result.start()
+ else:
+ p2 = nmpar_end.search(nmpar, pos).start()
+ v = nmpar[p1:p2]
+ nmpars[k] = v
+ if len(par) > pmax:
+ par[pmax] = nmpars
+ return par
+
+ f = open(fn, "r")
+ lineno = 0
+ is_import = False
+ directive = re.compile(r'^\s*#\.(?P<name>[A-Z_][A-Z_0-9]*)(\s+|$)')
+ cdirective = re.compile(r'^\s*##')
+ report = re.compile(r'^TABLE(?P<num>\d*)_(?P<type>HDR|BODY|FTR)$')
+ comment = re.compile(r'^\s*#[^.#]')
+ empty = re.compile(r'^\s*$')
+ ctx = None
+ name = ''
+ default_flags = 0x00
+ stack = []
+ while True:
+ if not f.closed:
+ line = f.readline()
+ lineno += 1
+ else:
+ line = None
+ if not line:
+ if not f.closed:
+ f.close()
+ if stack:
+ frec = stack.pop()
+ fn, f, lineno, is_import = frec['fn'], frec['f'], frec['lineno'], frec['is_import']
+ continue
+ else:
+ break
+ if comment.search(line): continue
+ result = directive.search(line)
+ if result: # directive
+ rep_result = report.search(result.group('name'))
+ if result.group('name') == 'END_OF_CNF':
+ f.close()
+ elif result.group('name') == 'OPT':
+ ctx = result.group('name')
+ par = get_par(line[result.end():], 0, -1, fn=fn, lineno=lineno)
+ if not par: continue
+ self.set_opt(par[0], par[1:], fn, lineno)
+ ctx = None
+ elif result.group('name') in ('PDU', 'REGISTER',
+ 'MODULE', 'MODULE_IMPORT',
+ 'OMIT_ASSIGNMENT', 'NO_OMIT_ASSGN',
+ 'VIRTUAL_ASSGN', 'SET_TYPE', 'ASSIGN_VALUE_TO_TYPE',
+ 'TYPE_RENAME', 'FIELD_RENAME', 'TF_RENAME', 'IMPORT_TAG',
+ 'TYPE_ATTR', 'ETYPE_ATTR', 'FIELD_ATTR', 'EFIELD_ATTR',
+ 'SYNTAX'):
+ ctx = result.group('name')
+ elif result.group('name') in ('OMIT_ALL_ASSIGNMENTS', 'OMIT_ASSIGNMENTS_EXCEPT',
+ 'OMIT_ALL_TYPE_ASSIGNMENTS', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT',
+ 'OMIT_ALL_VALUE_ASSIGNMENTS', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'):
+ ctx = result.group('name')
+ key = '*'
+ if ctx in ('OMIT_ALL_TYPE_ASSIGNMENTS', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT'):
+ key += 'T'
+ if ctx in ('OMIT_ALL_VALUE_ASSIGNMENTS', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'):
+ key += 'V'
+ par = get_par(line[result.end():], 0, 1, fn=fn, lineno=lineno)
+ if par:
+ key += '/' + par[0]
+ self.add_item('OMIT_ASSIGNMENT', key, omit=True, fn=fn, lineno=lineno)
+ if ctx in ('OMIT_ASSIGNMENTS_EXCEPT', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'):
+ ctx = 'NO_OMIT_ASSGN'
+ else:
+ ctx = None
+ elif result.group('name') in ('EXPORTS', 'MODULE_EXPORTS', 'USER_DEFINED', 'NO_EMIT'):
+ ctx = result.group('name')
+ default_flags = EF_TYPE|EF_VALS
+ if ctx == 'MODULE_EXPORTS':
+ ctx = 'EXPORTS'
+ default_flags |= EF_MODULE
+ if ctx == 'EXPORTS':
+ par = get_par(line[result.end():], 0, 5, fn=fn, lineno=lineno)
+ else:
+ par = get_par(line[result.end():], 0, 1, fn=fn, lineno=lineno)
+ if not par: continue
+ p = 1
+ if (par[0] == 'WITH_VALS'): default_flags |= EF_TYPE|EF_VALS
+ elif (par[0] == 'WITHOUT_VALS'): default_flags |= EF_TYPE; default_flags &= ~EF_VALS
+ elif (par[0] == 'ONLY_VALS'): default_flags &= ~EF_TYPE; default_flags |= EF_VALS
+ elif (ctx == 'EXPORTS'): p = 0
+ else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[0]), UserWarning, fn, lineno)
+ for i in range(p, len(par)):
+ if (par[i] == 'ONLY_ENUM'): default_flags &= ~(EF_TYPE|EF_VALS); default_flags |= EF_ENUM
+ elif (par[i] == 'WITH_ENUM'): default_flags |= EF_ENUM
+ elif (par[i] == 'VALS_WITH_TABLE'): default_flags |= EF_TABLE
+ elif (par[i] == 'WS_DLL'): default_flags |= EF_WS_DLL
+ elif (par[i] == 'EXTERN'): default_flags |= EF_EXTERN
+ elif (par[i] == 'NO_PROT_PREFIX'): default_flags |= EF_NO_PROT
+ else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno)
+ elif result.group('name') in ('MAKE_ENUM', 'MAKE_DEFINES'):
+ ctx = result.group('name')
+ default_flags = EF_ENUM
+ if ctx == 'MAKE_ENUM': default_flags |= EF_NO_PROT|EF_NO_TYPE
+ if ctx == 'MAKE_DEFINES': default_flags |= EF_DEFINE|EF_UCASE|EF_NO_TYPE
+ par = get_par(line[result.end():], 0, 3, fn=fn, lineno=lineno)
+ for i in range(0, len(par)):
+ if (par[i] == 'NO_PROT_PREFIX'): default_flags |= EF_NO_PROT
+ elif (par[i] == 'PROT_PREFIX'): default_flags &= ~ EF_NO_PROT
+ elif (par[i] == 'NO_TYPE_PREFIX'): default_flags |= EF_NO_TYPE
+ elif (par[i] == 'TYPE_PREFIX'): default_flags &= ~ EF_NO_TYPE
+ elif (par[i] == 'UPPER_CASE'): default_flags |= EF_UCASE
+ elif (par[i] == 'NO_UPPER_CASE'): default_flags &= ~EF_UCASE
+ else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno)
+ elif result.group('name') == 'USE_VALS_EXT':
+ ctx = result.group('name')
+ default_flags = 0xFF
+ elif result.group('name') == 'FN_HDR':
+ minp = 1
+ if (ctx in ('FN_PARS',)) and name: minp = 0
+ par = get_par(line[result.end():], minp, 1, fn=fn, lineno=lineno)
+ if (not par) and (minp > 0): continue
+ ctx = result.group('name')
+ if par: name = par[0]
+ elif result.group('name') == 'FN_FTR':
+ minp = 1
+ if (ctx in ('FN_PARS','FN_HDR')) and name: minp = 0
+ par = get_par(line[result.end():], minp, 1, fn=fn, lineno=lineno)
+ if (not par) and (minp > 0): continue
+ ctx = result.group('name')
+ if par: name = par[0]
+ elif result.group('name') == 'FN_BODY':
+ par = get_par_nm(line[result.end():], 1, 1, fn=fn, lineno=lineno)
+ if not par: continue
+ ctx = result.group('name')
+ name = par[0]
+ if len(par) > 1:
+ self.add_item('FN_PARS', name, pars=par[1], fn=fn, lineno=lineno)
+ elif result.group('name') == 'FN_PARS':
+ par = get_par_nm(line[result.end():], 0, 1, fn=fn, lineno=lineno)
+ ctx = result.group('name')
+ if not par:
+ name = None
+ elif len(par) == 1:
+ name = par[0]
+ self.add_item(ctx, name, pars={}, fn=fn, lineno=lineno)
+ elif len(par) > 1:
+ self.add_item(ctx, par[0], pars=par[1], fn=fn, lineno=lineno)
+ ctx = None
+ elif result.group('name') == 'CLASS':
+ par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno)
+ if not par: continue
+ ctx = result.group('name')
+ name = par[0]
+ add_class_ident(name)
+ if not name.split('$')[-1].isupper():
+ warnings.warn_explicit("No lower-case letters shall be included in information object class name (%s)" % (name),
+ UserWarning, fn, lineno)
+ elif result.group('name') == 'ASSIGNED_OBJECT_IDENTIFIER':
+ par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno)
+ if not par: continue
+ self.update_item('ASSIGNED_ID', 'OBJECT_IDENTIFIER', ids={par[0] : par[0]}, fn=fn, lineno=lineno)
+ elif rep_result: # Reports
+ num = rep_result.group('num')
+ type = rep_result.group('type')
+ if type == 'BODY':
+ par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno)
+ if not par: continue
+ else:
+ par = get_par(line[result.end():], 0, 0, fn=fn, lineno=lineno)
+ rep = { 'type' : type, 'var' : None, 'text' : '', 'fn' : fn, 'lineno' : lineno }
+ if len(par) > 0:
+ rep['var'] = par[0]
+ self.report.setdefault(num, []).append(rep)
+ ctx = 'TABLE'
+ name = num
+ elif result.group('name') in ('INCLUDE', 'IMPORT') :
+ is_imp = result.group('name') == 'IMPORT'
+ par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno)
+ if not par:
+ warnings.warn_explicit("%s requires parameter" % (result.group('name'),), UserWarning, fn, lineno)
+ continue
+ fname = par[0]
+ #print "Try include: %s" % (fname)
+ if (not os.path.exists(fname)):
+ fname = os.path.join(os.path.split(fn)[0], par[0])
+ #print "Try include: %s" % (fname)
+ i = 0
+ while not os.path.exists(fname) and (i < len(self.include_path)):
+ fname = os.path.join(self.include_path[i], par[0])
+ #print "Try include: %s" % (fname)
+ i += 1
+ if (not os.path.exists(fname)):
+ if is_imp:
+ continue # just ignore
+ else:
+ fname = par[0] # report error
+ fnew = open(fname, "r")
+ stack.append({'fn' : fn, 'f' : f, 'lineno' : lineno, 'is_import' : is_import})
+ fn, f, lineno, is_import = par[0], fnew, 0, is_imp
+ elif result.group('name') == 'END':
+ ctx = None
+ else:
+ warnings.warn_explicit("Unknown directive '%s'" % (result.group('name')), UserWarning, fn, lineno)
+ continue
+ if not ctx:
+ if not empty.match(line):
+ warnings.warn_explicit("Non-empty line in empty context", UserWarning, fn, lineno)
+ elif ctx == 'OPT':
+ if empty.match(line): continue
+ par = get_par(line, 1, -1, fn=fn, lineno=lineno)
+ if not par: continue
+ self.set_opt(par[0], par[1:], fn, lineno)
+ elif ctx in ('EXPORTS', 'USER_DEFINED', 'NO_EMIT'):
+ if empty.match(line): continue
+ if ctx == 'EXPORTS':
+ par = get_par(line, 1, 6, fn=fn, lineno=lineno)
+ else:
+ par = get_par(line, 1, 2, fn=fn, lineno=lineno)
+ if not par: continue
+ flags = default_flags
+ p = 2
+ if (len(par)>=2):
+ if (par[1] == 'WITH_VALS'): flags |= EF_TYPE|EF_VALS
+ elif (par[1] == 'WITHOUT_VALS'): flags |= EF_TYPE; flags &= ~EF_VALS
+ elif (par[1] == 'ONLY_VALS'): flags &= ~EF_TYPE; flags |= EF_VALS
+ elif (ctx == 'EXPORTS'): p = 1
+ else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[1]), UserWarning, fn, lineno)
+ for i in range(p, len(par)):
+ if (par[i] == 'ONLY_ENUM'): flags &= ~(EF_TYPE|EF_VALS); flags |= EF_ENUM
+ elif (par[i] == 'WITH_ENUM'): flags |= EF_ENUM
+ elif (par[i] == 'VALS_WITH_TABLE'): flags |= EF_TABLE
+ elif (par[i] == 'WS_DLL'): flags |= EF_WS_DLL
+ elif (par[i] == 'EXTERN'): flags |= EF_EXTERN
+ elif (par[i] == 'NO_PROT_PREFIX'): flags |= EF_NO_PROT
+ else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno)
+ self.add_item(ctx, par[0], flag=flags, fn=fn, lineno=lineno)
+ elif ctx in ('MAKE_ENUM', 'MAKE_DEFINES'):
+ if empty.match(line): continue
+ par = get_par(line, 1, 4, fn=fn, lineno=lineno)
+ if not par: continue
+ flags = default_flags
+ for i in range(1, len(par)):
+ if (par[i] == 'NO_PROT_PREFIX'): flags |= EF_NO_PROT
+ elif (par[i] == 'PROT_PREFIX'): flags &= ~ EF_NO_PROT
+ elif (par[i] == 'NO_TYPE_PREFIX'): flags |= EF_NO_TYPE
+ elif (par[i] == 'TYPE_PREFIX'): flags &= ~ EF_NO_TYPE
+ elif (par[i] == 'UPPER_CASE'): flags |= EF_UCASE
+ elif (par[i] == 'NO_UPPER_CASE'): flags &= ~EF_UCASE
+ else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno)
+ self.add_item('MAKE_ENUM', par[0], flag=flags, fn=fn, lineno=lineno)
+ elif ctx == 'USE_VALS_EXT':
+ if empty.match(line): continue
+ par = get_par(line, 1, 1, fn=fn, lineno=lineno)
+ if not par: continue
+ flags = default_flags
+ self.add_item('USE_VALS_EXT', par[0], flag=flags, fn=fn, lineno=lineno)
+ elif ctx == 'PDU':
+ if empty.match(line): continue
+ par = get_par(line, 1, 5, fn=fn, lineno=lineno)
+ if not par: continue
+ self.add_pdu(par[0:2], fn, lineno)
+ if (len(par)>=3):
+ self.add_register(par[0], par[2:5], fn, lineno)
+ elif ctx == 'SYNTAX':
+ if empty.match(line): continue
+ par = get_par(line, 1, 2, fn=fn, lineno=lineno)
+ if not par: continue
+ if not self.check_item('PDU', par[0]):
+ self.add_pdu(par[0:1], fn, lineno)
+ self.add_syntax(par, fn, lineno)
+ elif ctx == 'REGISTER':
+ if empty.match(line): continue
+ par = get_par(line, 3, 4, fn=fn, lineno=lineno)
+ if not par: continue
+ if not self.check_item('PDU', par[0]):
+ self.add_pdu(par[0:1], fn, lineno)
+ self.add_register(par[0], par[1:4], fn, lineno)
+ elif ctx in ('MODULE', 'MODULE_IMPORT'):
+ if empty.match(line): continue
+ par = get_par(line, 2, 2, fn=fn, lineno=lineno)
+ if not par: continue
+ self.add_item('MODULE', par[0], proto=par[1], fn=fn, lineno=lineno)
+ elif ctx == 'IMPORT_TAG':
+ if empty.match(line): continue
+ par = get_par(line, 3, 3, fn=fn, lineno=lineno)
+ if not par: continue
+ self.add_item(ctx, par[0], ttag=(par[1], par[2]), fn=fn, lineno=lineno)
+ elif ctx == 'OMIT_ASSIGNMENT':
+ if empty.match(line): continue
+ par = get_par(line, 1, 1, fn=fn, lineno=lineno)
+ if not par: continue
+ self.add_item(ctx, par[0], omit=True, fn=fn, lineno=lineno)
+ elif ctx == 'NO_OMIT_ASSGN':
+ if empty.match(line): continue
+ par = get_par(line, 1, 1, fn=fn, lineno=lineno)
+ if not par: continue
+ self.add_item(ctx, par[0], omit=False, fn=fn, lineno=lineno)
+ elif ctx == 'VIRTUAL_ASSGN':
+ if empty.match(line): continue
+ par = get_par(line, 2, -1, fn=fn, lineno=lineno)
+ if not par: continue
+ if (len(par[1].split('/')) > 1) and not self.check_item('SET_TYPE', par[1]):
+ self.add_item('SET_TYPE', par[1], type=par[0], fn=fn, lineno=lineno)
+ self.add_item('VIRTUAL_ASSGN', par[1], name=par[0], fn=fn, lineno=lineno)
+ for nm in par[2:]:
+ self.add_item('SET_TYPE', nm, type=par[0], fn=fn, lineno=lineno)
+ if not par[0][0].isupper():
+ warnings.warn_explicit("Virtual assignment should have uppercase name (%s)" % (par[0]),
+ UserWarning, fn, lineno)
+ elif ctx == 'SET_TYPE':
+ if empty.match(line): continue
+ par = get_par(line, 2, 2, fn=fn, lineno=lineno)
+ if not par: continue
+ if not self.check_item('VIRTUAL_ASSGN', par[0]):
+ self.add_item('SET_TYPE', par[0], type=par[1], fn=fn, lineno=lineno)
+ if not par[1][0].isupper():
+ warnings.warn_explicit("Set type should have uppercase name (%s)" % (par[1]),
+ UserWarning, fn, lineno)
+ elif ctx == 'ASSIGN_VALUE_TO_TYPE':
+ if empty.match(line): continue
+ par = get_par(line, 2, 2, fn=fn, lineno=lineno)
+ if not par: continue
+ self.add_item(ctx, par[0], name=par[1], fn=fn, lineno=lineno)
+ elif ctx == 'TYPE_RENAME':
+ if empty.match(line): continue
+ par = get_par(line, 2, 2, fn=fn, lineno=lineno)
+ if not par: continue
+ self.add_item('TYPE_RENAME', par[0], eth_name=par[1], fn=fn, lineno=lineno)
+ if not par[1][0].isupper():
+ warnings.warn_explicit("Type should be renamed to uppercase name (%s)" % (par[1]),
+ UserWarning, fn, lineno)
+ elif ctx == 'FIELD_RENAME':
+ if empty.match(line): continue
+ par = get_par(line, 2, 2, fn=fn, lineno=lineno)
+ if not par: continue
+ self.add_item('FIELD_RENAME', par[0], eth_name=par[1], fn=fn, lineno=lineno)
+ if not par[1][0].islower():
+ warnings.warn_explicit("Field should be renamed to lowercase name (%s)" % (par[1]),
+ UserWarning, fn, lineno)
+ elif ctx == 'TF_RENAME':
+ if empty.match(line): continue
+ par = get_par(line, 2, 2, fn=fn, lineno=lineno)
+ if not par: continue
+ tmpu = par[1][0].upper() + par[1][1:]
+ tmpl = par[1][0].lower() + par[1][1:]
+ self.add_item('TYPE_RENAME', par[0], eth_name=tmpu, fn=fn, lineno=lineno)
+ if not tmpu[0].isupper():
+ warnings.warn_explicit("Type should be renamed to uppercase name (%s)" % (par[1]),
+ UserWarning, fn, lineno)
+ self.add_item('FIELD_RENAME', par[0], eth_name=tmpl, fn=fn, lineno=lineno)
+ if not tmpl[0].islower():
+ warnings.warn_explicit("Field should be renamed to lowercase name (%s)" % (par[1]),
+ UserWarning, fn, lineno)
+ elif ctx in ('TYPE_ATTR', 'ETYPE_ATTR', 'FIELD_ATTR', 'EFIELD_ATTR'):
+ if empty.match(line): continue
+ par = get_par_nm(line, 1, 1, fn=fn, lineno=lineno)
+ if not par: continue
+ self.add_item(ctx, par[0], attr=par[1], fn=fn, lineno=lineno)
+ elif ctx == 'FN_PARS':
+ if empty.match(line): continue
+ if name:
+ par = get_par_nm(line, 0, 0, fn=fn, lineno=lineno)
+ else:
+ par = get_par_nm(line, 1, 1, fn=fn, lineno=lineno)
+ if not par: continue
+ if name:
+ self.update_item(ctx, name, pars=par[0], fn=fn, lineno=lineno)
+ else:
+ self.add_item(ctx, par[0], pars=par[1], fn=fn, lineno=lineno)
+ elif ctx in ('FN_HDR', 'FN_FTR', 'FN_BODY'):
+ result = cdirective.search(line)
+ if result: # directive
+ line = '#' + line[result.end():]
+ self.add_fn_line(name, ctx, line, fn=fn, lineno=lineno)
+ elif ctx == 'CLASS':
+ if empty.match(line): continue
+ par = get_par(line, 1, 3, fn=fn, lineno=lineno)
+ if not par: continue
+ if not set_type_to_class(name, par[0], par[1:]):
+ warnings.warn_explicit("Could not set type of class member %s.&%s to %s" % (name, par[0], par[1]),
+ UserWarning, fn, lineno)
+ elif ctx == 'TABLE':
+ self.report[name][-1]['text'] += line
+
+ def set_opt(self, opt, par, fn, lineno):
+ #print("set_opt: %s, %s" % (opt, par))
+ if opt in ("-I",):
+ par = self.check_par(par, 1, 1, fn, lineno)
+ if not par: return
+ self.include_path.append(relpath(par[0]))
+ elif opt in ("-b", "BER", "CER", "DER"):
+ par = self.check_par(par, 0, 0, fn, lineno)
+ self.ectx.encoding = 'ber'
+ elif opt in ("PER",):
+ par = self.check_par(par, 0, 0, fn, lineno)
+ self.ectx.encoding = 'per'
+ elif opt in ("OER",):
+ par = self.check_par(par, 0, 0, fn, lineno)
+ self.ectx.encoding = 'oer'
+ elif opt in ("-p", "PROTO"):
+ par = self.check_par(par, 1, 1, fn, lineno)
+ if not par: return
+ self.ectx.proto_opt = par[0]
+ self.ectx.merge_modules = True
+ elif opt in ("ALIGNED",):
+ par = self.check_par(par, 0, 0, fn, lineno)
+ self.ectx.aligned = True
+ elif opt in ("-u", "UNALIGNED"):
+ par = self.check_par(par, 0, 0, fn, lineno)
+ self.ectx.aligned = False
+ elif opt in ("PROTO_ROOT_NAME"):
+ par = self.check_par(par, 1, 1, fn, lineno)
+ if not par: return
+ self.proto_root_name = par[0]
+ elif opt in ("-d",):
+ par = self.check_par(par, 1, 1, fn, lineno)
+ if not par: return
+ self.ectx.dbgopt = par[0]
+ elif opt in ("-e",):
+ par = self.check_par(par, 0, 0, fn, lineno)
+ self.ectx.expcnf = True
+ elif opt in ("-S",):
+ par = self.check_par(par, 0, 0, fn, lineno)
+ self.ectx.merge_modules = True
+ elif opt in ("GROUP_BY_PROT",):
+ par = self.check_par(par, 0, 0, fn, lineno)
+ self.ectx.group_by_prot = True
+ elif opt in ("-o",):
+ par = self.check_par(par, 1, 1, fn, lineno)
+ if not par: return
+ self.ectx.outnm_opt = par[0]
+ elif opt in ("-O",):
+ par = self.check_par(par, 1, 1, fn, lineno)
+ if not par: return
+ self.ectx.output.outdir = relpath(par[0])
+ elif opt in ("-s",):
+ par = self.check_par(par, 1, 1, fn, lineno)
+ if not par: return
+ self.ectx.output.single_file = relpath(par[0])
+ elif opt in ("-k",):
+ par = self.check_par(par, 0, 0, fn, lineno)
+ self.ectx.output.keep = True
+ elif opt in ("-L",):
+ par = self.check_par(par, 0, 0, fn, lineno)
+ self.suppress_line = True
+ elif opt in ("EMBEDDED_PDV_CB",):
+ par = self.check_par(par, 1, 1, fn, lineno)
+ if not par: return
+ self.ectx.default_embedded_pdv_cb = par[0]
+ elif opt in ("EXTERNAL_TYPE_CB",):
+ par = self.check_par(par, 1, 1, fn, lineno)
+ if not par: return
+ self.ectx.default_external_type_cb = par[0]
+ elif opt in ("-r",):
+ par = self.check_par(par, 1, 1, fn, lineno)
+ if not par: return
+ self.ectx.remove_prefix = par[0]
+ else:
+ warnings.warn_explicit("Unknown option %s" % (opt),
+ UserWarning, fn, lineno)
+
+ def dbg_print(self):
+ print("\n# Conformance values")
+ print("%-15s %-4s %-15s %-20s %s" % ("File", "Line", "Table", "Key", "Value"))
+ print("-" * 100)
+ tbls = sorted(self.table.keys())
+ for t in tbls:
+ keys = sorted(self.table[t].keys())
+ for k in keys:
+ print("%-15s %4s %-15s %-20s %s" % (
+ self.table[t][k]['fn'], self.table[t][k]['lineno'], t, k, str(self.table[t][k][self.tblcfg[t]['val_nm']])))
+
+ def unused_report(self):
+ tbls = sorted(self.table.keys())
+ for t in tbls:
+ if not self.tblcfg[t]['chk_use']: continue
+ keys = sorted(self.table[t].keys())
+ for k in keys:
+ if not self.table[t][k]['used']:
+ warnings.warn_explicit("Unused %s for %s" % (t, k),
+ UserWarning, self.table[t][k]['fn'], self.table[t][k]['lineno'])
+ fnms = list(self.fn.keys())
+ fnms.sort()
+ for f in fnms:
+ keys = sorted(self.fn[f].keys())
+ for k in keys:
+ if not self.fn[f][k]: continue
+ if not self.fn[f][k]['used']:
+ warnings.warn_explicit("Unused %s for %s" % (k, f),
+ UserWarning, self.fn[f][k]['fn'], self.fn[f][k]['lineno'])
+
+#--- EthOut -------------------------------------------------------------------
+class EthOut:
+ def __init__(self):
+ self.ectx = None
+ self.outnm = None
+ self.outdir = '.'
+ self.single_file = None
+ self.created_files = {}
+ self.created_files_ord = []
+ self.keep = False
+
+ def outcomment(self, ln, comment=None):
+ if comment:
+ return '%s %s\n' % (comment, ln)
+ else:
+ return '/* %-74s */\n' % (ln)
+
+ def created_file_add(self, name, keep_anyway):
+ name = os.path.normcase(os.path.abspath(name))
+ if name not in self.created_files:
+ self.created_files_ord.append(name)
+ self.created_files[name] = keep_anyway
+ else:
+ self.created_files[name] = self.created_files[name] or keep_anyway
+
+ def created_file_exists(self, name):
+ name = os.path.normcase(os.path.abspath(name))
+ return name in self.created_files
+
+ #--- output_fname -------------------------------------------------------
+ def output_fname(self, ftype, ext='c'):
+ fn = ''
+ if not ext in ('cnf',):
+ fn += 'packet-'
+ fn += self.outnm
+ if (ftype):
+ fn += '-' + ftype
+ fn += '.' + ext
+ return fn
+ #--- file_open -------------------------------------------------------
+ def file_open(self, ftype, ext='c'):
+ fn = self.output_fname(ftype, ext=ext)
+ if self.created_file_exists(fn):
+ fx = open(fn, 'a')
+ else:
+ fx = open(fn, 'w')
+ comment = None
+ if ext in ('cnf',):
+ comment = '#'
+ fx.write(self.fhdr(fn, comment = comment))
+ else:
+ if (not self.single_file and not self.created_file_exists(fn)):
+ fx.write(self.fhdr(fn))
+ if not self.ectx.merge_modules:
+ fx.write('\n')
+ mstr = "--- "
+ if self.ectx.groups():
+ mstr += "Module"
+ if (len(self.ectx.modules) > 1):
+ mstr += "s"
+ for (m, p) in self.ectx.modules:
+ mstr += " %s" % (m)
+ else:
+ mstr += "Module %s" % (self.ectx.Module())
+ mstr += " --- --- ---"
+ fx.write(self.outcomment(mstr, comment))
+ fx.write('\n')
+ return fx
+ #--- file_close -------------------------------------------------------
+ def file_close(self, fx, discard=False, keep_anyway=False):
+ fx.close()
+ if discard and not self.created_file_exists(fx.name):
+ os.unlink(fx.name)
+ else:
+ self.created_file_add(fx.name, keep_anyway)
+ #--- fhdr -------------------------------------------------------
+ def fhdr(self, fn, comment=None):
+ out = ''
+ out += self.outcomment('Do not modify this file. Changes will be overwritten.', comment)
+ out += self.outcomment('Generated automatically by the ASN.1 to Wireshark dissector compiler', comment)
+ out += self.outcomment(os.path.basename(fn), comment)
+ out += self.outcomment(' '.join(['asn2wrs.py'] + sys.argv[1:]), comment)
+ out += '\n'
+ # Make Windows path separator look like Unix path separator
+ out = out.replace('\\', '/')
+ # Change absolute paths and relative paths generated outside
+ # source directory to paths relative to asn1/<proto> subdir.
+ out = re.sub(r'(\s)[./A-Z]\S*/dissectors\b', r'\1../..', out)
+ out = re.sub(r'(\s)[./A-Z]\S*/asn1/\S*?([\s/])', r'\1.\2', out)
+ return out
+
+ #--- dbg_print -------------------------------------------------------
+ def dbg_print(self):
+ print("\n# Output files")
+ print("\n".join(self.created_files_ord))
+ print("\n")
+
+ #--- make_single_file -------------------------------------------------------
+ def make_single_file(self, suppress_line):
+ if (not self.single_file): return
+ in_nm = self.single_file + '.c'
+ out_nm = os.path.join(self.outdir, self.output_fname(''))
+ self.do_include(out_nm, in_nm, suppress_line)
+ in_nm = self.single_file + '.h'
+ if (os.path.exists(in_nm)):
+ out_nm = os.path.join(self.outdir, self.output_fname('', ext='h'))
+ self.do_include(out_nm, in_nm, suppress_line)
+ if (not self.keep):
+ for fn in self.created_files_ord:
+ if not self.created_files[fn]:
+ os.unlink(fn)
+
+ #--- do_include -------------------------------------------------------
+ def do_include(self, out_nm, in_nm, suppress_line):
+ def check_file(fn, fnlist):
+ fnfull = os.path.normcase(os.path.abspath(fn))
+ if (fnfull in fnlist and os.path.exists(fnfull)):
+ return os.path.normpath(fn)
+ return None
+ fin = open(in_nm, "r")
+ fout = open(out_nm, "w")
+ fout.write(self.fhdr(out_nm))
+ if (not suppress_line):
+ fout.write('/* Input file: ' + os.path.basename(in_nm) +' */\n')
+ fout.write('\n')
+ fout.write('#line %u "%s"\n' % (1, rel_dissector_path(in_nm)))
+
+ include = re.compile(r'^\s*#\s*include\s+[<"](?P<fname>[^>"]+)[>"]', re.IGNORECASE)
+
+ cont_linenum = 0;
+
+ while (True):
+ cont_linenum = cont_linenum + 1;
+ line = fin.readline()
+ if (line == ''): break
+ ifile = None
+ result = include.search(line)
+ #if (result): print os.path.normcase(os.path.abspath(result.group('fname')))
+ if (result):
+ ifile = check_file(os.path.join(os.path.split(in_nm)[0], result.group('fname')), self.created_files)
+ if (not ifile):
+ ifile = check_file(os.path.join(self.outdir, result.group('fname')), self.created_files)
+ if (not ifile):
+ ifile = check_file(result.group('fname'), self.created_files)
+ if (ifile):
+ if (not suppress_line):
+ fout.write('\n')
+ fout.write('/*--- Included file: ' + ifile + ' ---*/\n')
+ fout.write('#line %u "%s"\n' % (1, rel_dissector_path(ifile)))
+ finc = open(ifile, "r")
+ fout.write(finc.read())
+ if (not suppress_line):
+ fout.write('\n')
+ fout.write('/*--- End of included file: ' + ifile + ' ---*/\n')
+ fout.write('#line %u "%s"\n' % (cont_linenum+1, rel_dissector_path(in_nm)) )
+ finc.close()
+ else:
+ fout.write(line)
+
+ fout.close()
+ fin.close()
+
+
+#--- Node ---------------------------------------------------------------------
+class Node:
+ def __init__(self,*args, **kw):
+ if len (args) == 0:
+ self.type = self.__class__.__name__
+ else:
+ assert (len(args) == 1)
+ self.type = args[0]
+ self.__dict__.update (kw)
+ def str_child (self, key, child, depth):
+ indent = " " * (2 * depth)
+ keystr = indent + key + ": "
+ if key == 'type': # already processed in str_depth
+ return ""
+ if isinstance (child, Node): # ugh
+ return keystr + "\n" + child.str_depth (depth+1)
+ if isinstance(child, type ([])):
+ l = []
+ for x in child:
+ if isinstance (x, Node):
+ l.append (x.str_depth (depth+1))
+ else:
+ l.append (indent + " " + str(x) + "\n")
+ return keystr + "[\n" + ''.join(l) + indent + "]\n"
+ else:
+ return keystr + str (child) + "\n"
+ def str_depth (self, depth): # ugh
+ indent = " " * (2 * depth)
+ l = ["%s%s" % (indent, self.type)]
+ l.append ("".join ([self.str_child (k_v[0], k_v[1], depth + 1) for k_v in list(self.__dict__.items ())]))
+ return "\n".join (l)
+ def __repr__(self):
+ return "\n" + self.str_depth (0)
+ def to_python (self, ctx):
+ return self.str_depth (ctx.indent_lev)
+
+ def eth_reg(self, ident, ectx):
+ pass
+
+ def fld_obj_repr(self, ectx):
+ return "/* TO DO %s */" % (str(self))
+
+
+#--- ValueAssignment -------------------------------------------------------------
+class ValueAssignment (Node):
+ def __init__(self,*args, **kw) :
+ Node.__init__ (self,*args, **kw)
+
+ def eth_reg(self, ident, ectx):
+ if ectx.conform.omit_assignment('V', self.ident, ectx.Module()): return # Assignment to omit
+ ectx.eth_reg_vassign(self)
+ ectx.eth_reg_value(self.ident, self.typ, self.val)
+
+#--- ObjectAssignment -------------------------------------------------------------
+class ObjectAssignment (Node):
+ def __init__(self,*args, **kw) :
+ Node.__init__ (self,*args, **kw)
+
+ def __eq__(self, other):
+ if self.cls != other.cls:
+ return False
+ if len(self.val) != len(other.val):
+ return False
+ for f in (list(self.val.keys())):
+ if f not in other.val:
+ return False
+ if isinstance(self.val[f], Node) and isinstance(other.val[f], Node):
+ if not self.val[f].fld_obj_eq(other.val[f]):
+ return False
+ else:
+ if str(self.val[f]) != str(other.val[f]):
+ return False
+ return True
+
+ def eth_reg(self, ident, ectx):
+ def make_virtual_type(cls, field, prefix):
+ if isinstance(self.val, str): return
+ if field in self.val and not isinstance(self.val[field], Type_Ref):
+ vnm = prefix + '-' + self.ident
+ virtual_tr = Type_Ref(val = vnm)
+ t = self.val[field]
+ self.val[field] = virtual_tr
+ ectx.eth_reg_assign(vnm, t, virt=True)
+ ectx.eth_reg_type(vnm, t)
+ t.eth_reg_sub(vnm, ectx)
+ if field in self.val and ectx.conform.check_item('PDU', cls + '.' + field):
+ ectx.eth_reg_field(self.val[field].val, self.val[field].val, impl=self.val[field].HasImplicitTag(ectx), pdu=ectx.conform.use_item('PDU', cls + '.' + field))
+ return
+ # end of make_virtual_type()
+ if ectx.conform.omit_assignment('V', self.ident, ectx.Module()): return # Assignment to omit
+ self.module = ectx.Module()
+ ectx.eth_reg_oassign(self)
+ if (self.cls == 'TYPE-IDENTIFIER') or (self.cls == 'ABSTRACT-SYNTAX'):
+ make_virtual_type(self.cls, '&Type', 'TYPE')
+ if (self.cls == 'OPERATION'):
+ make_virtual_type(self.cls, '&ArgumentType', 'ARG')
+ make_virtual_type(self.cls, '&ResultType', 'RES')
+ if (self.cls == 'ERROR'):
+ make_virtual_type(self.cls, '&ParameterType', 'PAR')
+
+
+#--- Type ---------------------------------------------------------------------
+class Type (Node):
+ def __init__(self,*args, **kw) :
+ self.name = None
+ self.constr = None
+ self.tags = []
+ self.named_list = None
+ Node.__init__ (self,*args, **kw)
+
+ def IsNamed(self):
+ if self.name is None :
+ return False
+ else:
+ return True
+
+ def HasConstraint(self):
+ if self.constr is None :
+ return False
+ else :
+ return True
+
+ def HasSizeConstraint(self):
+ return self.HasConstraint() and self.constr.IsSize()
+
+ def HasValueConstraint(self):
+ return self.HasConstraint() and self.constr.IsValue()
+
+ def HasPermAlph(self):
+ return self.HasConstraint() and self.constr.IsPermAlph()
+
+ def HasContentsConstraint(self):
+ return self.HasConstraint() and self.constr.IsContents()
+
+ def HasOwnTag(self):
+ return len(self.tags) > 0
+
+ def HasImplicitTag(self, ectx):
+ return (self.HasOwnTag() and self.tags[0].IsImplicit(ectx))
+
+ def IndetermTag(self, ectx):
+ return False
+
+ def AddTag(self, tag):
+ self.tags[0:0] = [tag]
+
+ def GetTag(self, ectx):
+ #print "GetTag(%s)\n" % self.name;
+ if (self.HasOwnTag()):
+ return self.tags[0].GetTag(ectx)
+ else:
+ return self.GetTTag(ectx)
+
+ def GetTTag(self, ectx):
+ print("#Unhandled GetTTag() in %s" % (self.type))
+ print(self.str_depth(1))
+ return ('BER_CLASS_unknown', 'TAG_unknown')
+
+ def SetName(self, name):
+ self.name = name
+
+ def AddConstraint(self, constr):
+ if not self.HasConstraint():
+ self.constr = constr
+ else:
+ self.constr = Constraint(type = 'Intersection', subtype = [self.constr, constr])
+
+ def eth_tname(self):
+ return '#' + self.type + '_' + str(id(self))
+
+ def eth_ftype(self, ectx):
+ return ('FT_NONE', 'BASE_NONE')
+
+ def eth_strings(self):
+ return 'NULL'
+
+ def eth_omit_field(self):
+ return False
+
+ def eth_need_tree(self):
+ return False
+
+ def eth_has_vals(self):
+ return False
+
+ def eth_has_enum(self, tname, ectx):
+ return self.eth_has_vals() and (ectx.eth_type[tname]['enum'] & EF_ENUM)
+
+ def eth_need_pdu(self, ectx):
+ return None
+
+ def eth_named_bits(self):
+ return None
+
+ def eth_reg_sub(self, ident, ectx):
+ pass
+
+ def get_components(self, ectx):
+ print("#Unhandled get_components() in %s" % (self.type))
+ print(self.str_depth(1))
+ return []
+
+ def sel_req(self, sel, ectx):
+ print("#Selection '%s' required for non-CHOICE type %s" % (sel, self.type))
+ print(self.str_depth(1))
+
+ def fld_obj_eq(self, other):
+ return isinstance(other, Type) and (self.eth_tname() == other.eth_tname())
+
+ def eth_reg(self, ident, ectx, tstrip=0, tagflag=False, selflag=False, idx='', parent=None):
+ #print "eth_reg(): %s, ident=%s, tstrip=%d, tagflag=%s, selflag=%s, parent=%s" %(self.type, ident, tstrip, str(tagflag), str(selflag), str(parent))
+ #print " ", self
+ if (ectx.NeedTags() and (len(self.tags) > tstrip)):
+ tagged_type = self
+ for i in range(len(self.tags)-1, tstrip-1, -1):
+ tagged_type = TaggedType(val=tagged_type, tstrip=i)
+ tagged_type.AddTag(self.tags[i])
+ if not tagflag: # 1st tagged level
+ if self.IsNamed() and not selflag:
+ tagged_type.SetName(self.name)
+ tagged_type.eth_reg(ident, ectx, tstrip=1, tagflag=tagflag, idx=idx, parent=parent)
+ return
+ nm = ''
+ if ident and self.IsNamed() and not tagflag and not selflag:
+ nm = ident + '/' + self.name
+ elif ident:
+ nm = ident
+ elif self.IsNamed():
+ nm = self.name
+ if not ident and ectx.conform.omit_assignment('T', nm, ectx.Module()): return # Assignment to omit
+ if not ident: # Assignment
+ ectx.eth_reg_assign(nm, self)
+ if self.type == 'Type_Ref' and not self.tr_need_own_fn(ectx):
+ ectx.eth_reg_type(nm, self)
+ virtual_tr = Type_Ref(val=ectx.conform.use_item('SET_TYPE', nm))
+ if (self.type == 'Type_Ref') or ectx.conform.check_item('SET_TYPE', nm):
+ if ident and (ectx.conform.check_item('TYPE_RENAME', nm) or ectx.conform.get_fn_presence(nm) or selflag):
+ if ectx.conform.check_item('SET_TYPE', nm):
+ ectx.eth_reg_type(nm, virtual_tr) # dummy Type Reference
+ else:
+ ectx.eth_reg_type(nm, self) # new type
+ trnm = nm
+ elif ectx.conform.check_item('SET_TYPE', nm):
+ trnm = ectx.conform.use_item('SET_TYPE', nm)
+ elif (self.type == 'Type_Ref') and self.tr_need_own_fn(ectx):
+ ectx.eth_reg_type(nm, self) # need own function, e.g. for constraints
+ trnm = nm
+ else:
+ trnm = self.val
+ else:
+ ectx.eth_reg_type(nm, self, mod = ectx.Module())
+ trnm = nm
+ if ectx.conform.check_item('VIRTUAL_ASSGN', nm):
+ vnm = ectx.conform.use_item('VIRTUAL_ASSGN', nm)
+ ectx.eth_reg_assign(vnm, self, virt=True)
+ ectx.eth_reg_type(vnm, self)
+ self.eth_reg_sub(vnm, ectx)
+ if parent and (ectx.type[parent]['val'].type == 'TaggedType'):
+ ectx.type[parent]['val'].eth_set_val_name(parent, trnm, ectx)
+ if ident and not tagflag and not self.eth_omit_field():
+ ectx.eth_reg_field(nm, trnm, idx=idx, parent=parent, impl=self.HasImplicitTag(ectx))
+ if ectx.conform.check_item('SET_TYPE', nm):
+ virtual_tr.eth_reg_sub(nm, ectx)
+ else:
+ self.eth_reg_sub(nm, ectx)
+
+ def eth_get_size_constr(self, ectx):
+ (minv, maxv, ext) = ('MIN', 'MAX', False)
+ if self.HasSizeConstraint():
+ if self.constr.IsSize():
+ (minv, maxv, ext) = self.constr.GetSize(ectx)
+ if (self.constr.type == 'Intersection'):
+ if self.constr.subtype[0].IsSize():
+ (minv, maxv, ext) = self.constr.subtype[0].GetSize(ectx)
+ elif self.constr.subtype[1].IsSize():
+ (minv, maxv, ext) = self.constr.subtype[1].GetSize(ectx)
+ if minv == 'MIN': minv = 'NO_BOUND'
+ if maxv == 'MAX': maxv = 'NO_BOUND'
+ if (ext): ext = 'TRUE'
+ else: ext = 'FALSE'
+ return (minv, maxv, ext)
+
+ def eth_get_value_constr(self, ectx):
+ (minv, maxv, ext) = ('MIN', 'MAX', False)
+ if self.HasValueConstraint():
+ (minv, maxv, ext) = self.constr.GetValue(ectx)
+ if minv == 'MIN': minv = 'NO_BOUND'
+ if maxv == 'MAX': maxv = 'NO_BOUND'
+ if str(minv).isdigit():
+ minv += 'U'
+ elif (str(minv)[0] == "-") and str(minv)[1:].isdigit():
+ if (int(minv) == -(2**31)):
+ minv = "G_MININT32"
+ elif (int(minv) < -(2**31)):
+ minv = "G_GINT64_CONSTANT(%s)" % (str(minv))
+ if str(maxv).isdigit():
+ if (int(maxv) >= 2**32):
+ maxv = "G_GUINT64_CONSTANT(%s)" % (str(maxv))
+ else:
+ maxv += 'U'
+ if (ext): ext = 'TRUE'
+ else: ext = 'FALSE'
+ return (minv, maxv, ext)
+
+ def eth_get_alphabet_constr(self, ectx):
+ (alph, alphlen) = ('NULL', '0')
+ if self.HasPermAlph():
+ alph = self.constr.GetPermAlph(ectx)
+ if not alph:
+ alph = 'NULL'
+ if (alph != 'NULL'):
+ if (((alph[0] + alph[-1]) == '""') and (not alph.count('"', 1, -1))):
+ alphlen = str(len(alph) - 2)
+ else:
+ alphlen = 'strlen(%s)' % (alph)
+ return (alph, alphlen)
+
+ def eth_type_vals(self, tname, ectx):
+ if self.eth_has_vals():
+ print("#Unhandled eth_type_vals('%s') in %s" % (tname, self.type))
+ print(self.str_depth(1))
+ return ''
+
+ def eth_type_enum(self, tname, ectx):
+ if self.eth_has_enum(tname, ectx):
+ print("#Unhandled eth_type_enum('%s') in %s" % (tname, self.type))
+ print(self.str_depth(1))
+ return ''
+
+ def eth_type_default_table(self, ectx, tname):
+ return ''
+
+ def eth_type_default_body(self, ectx, tname):
+ print("#Unhandled eth_type_default_body('%s') in %s" % (tname, self.type))
+ print(self.str_depth(1))
+ return ''
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = {
+ 'TNAME' : tname,
+ 'ER' : ectx.encp(),
+ 'FN_VARIANT' : '',
+ 'TREE' : 'tree',
+ 'TVB' : 'tvb',
+ 'OFFSET' : 'offset',
+ 'ACTX' : 'actx',
+ 'HF_INDEX' : 'hf_index',
+ 'VAL_PTR' : 'NULL',
+ 'IMPLICIT_TAG' : 'implicit_tag',
+ }
+ if (ectx.eth_type[tname]['tree']):
+ pars['ETT_INDEX'] = ectx.eth_type[tname]['tree']
+ if (ectx.merge_modules):
+ pars['PROTOP'] = ''
+ else:
+ pars['PROTOP'] = ectx.eth_type[tname]['proto'] + '_'
+ return pars
+
+ def eth_type_fn(self, proto, tname, ectx):
+ body = self.eth_type_default_body(ectx, tname)
+ pars = self.eth_type_default_pars(ectx, tname)
+ if ectx.conform.check_item('FN_PARS', tname):
+ pars.update(ectx.conform.use_item('FN_PARS', tname))
+ elif ectx.conform.check_item('FN_PARS', ectx.eth_type[tname]['ref'][0]):
+ pars.update(ectx.conform.use_item('FN_PARS', ectx.eth_type[tname]['ref'][0]))
+ pars['DEFAULT_BODY'] = body
+ for i in range(4):
+ for k in list(pars.keys()):
+ try:
+ pars[k] = pars[k] % pars
+ except (ValueError,TypeError):
+ raise sys.exc_info()[0]("%s\n%s" % (str(pars), sys.exc_info()[1]))
+ out = '\n'
+ out += self.eth_type_default_table(ectx, tname) % pars
+ out += ectx.eth_type_fn_hdr(tname)
+ out += ectx.eth_type_fn_body(tname, body, pars=pars)
+ out += ectx.eth_type_fn_ftr(tname)
+ return out
+
+#--- Value --------------------------------------------------------------------
+class Value (Node):
+ def __init__(self,*args, **kw) :
+ self.name = None
+ Node.__init__ (self,*args, **kw)
+
+ def SetName(self, name) :
+ self.name = name
+
+ def to_str(self, ectx):
+ return str(self.val)
+
+ def get_dep(self):
+ return None
+
+ def fld_obj_repr(self, ectx):
+ return self.to_str(ectx)
+
+#--- Value_Ref -----------------------------------------------------------------
+class Value_Ref (Value):
+ def to_str(self, ectx):
+ return asn2c(self.val)
+
+#--- ObjectClass ---------------------------------------------------------------------
+class ObjectClass (Node):
+ def __init__(self,*args, **kw) :
+ self.name = None
+ Node.__init__ (self,*args, **kw)
+
+ def SetName(self, name):
+ self.name = name
+ add_class_ident(self.name)
+
+ def eth_reg(self, ident, ectx):
+ if ectx.conform.omit_assignment('C', self.name, ectx.Module()): return # Assignment to omit
+ ectx.eth_reg_objectclass(self.name, self)
+
+#--- Class_Ref -----------------------------------------------------------------
+class Class_Ref (ObjectClass):
+ pass
+
+#--- ObjectClassDefn ---------------------------------------------------------------------
+class ObjectClassDefn (ObjectClass):
+ def reg_types(self):
+ for fld in self.fields:
+ repr = fld.fld_repr()
+ set_type_to_class(self.name, repr[0], repr[1:])
+
+
+#--- Tag ---------------------------------------------------------------
+class Tag (Node):
+ def to_python (self, ctx):
+ return 'asn1.TYPE(%s,%s)' % (mk_tag_str (ctx, self.tag.cls,
+ self.tag_typ,
+ self.tag.num),
+ self.typ.to_python (ctx))
+ def IsImplicit(self, ectx):
+ return ((self.mode == 'IMPLICIT') or ((self.mode == 'default') and (ectx.tag_def != 'EXPLICIT')))
+
+ def GetTag(self, ectx):
+ tc = ''
+ if (self.cls == 'UNIVERSAL'): tc = 'BER_CLASS_UNI'
+ elif (self.cls == 'APPLICATION'): tc = 'BER_CLASS_APP'
+ elif (self.cls == 'CONTEXT'): tc = 'BER_CLASS_CON'
+ elif (self.cls == 'PRIVATE'): tc = 'BER_CLASS_PRI'
+ return (tc, self.num)
+
+ def eth_tname(self):
+ n = ''
+ if (self.cls == 'UNIVERSAL'): n = 'U'
+ elif (self.cls == 'APPLICATION'): n = 'A'
+ elif (self.cls == 'CONTEXT'): n = 'C'
+ elif (self.cls == 'PRIVATE'): n = 'P'
+ return n + str(self.num)
+
+#--- Constraint ---------------------------------------------------------------
+constr_cnt = 0
+class Constraint (Node):
+ def to_python (self, ctx):
+ print("Ignoring constraint:", self.type)
+ return self.subtype.typ.to_python (ctx)
+ def __str__ (self):
+ return "Constraint: type=%s, subtype=%s" % (self.type, self.subtype)
+
+ def eth_tname(self):
+ return '#' + self.type + '_' + str(id(self))
+
+ def IsSize(self):
+ return (self.type == 'Size' and self.subtype.IsValue()) \
+ or (self.type == 'Intersection' and (self.subtype[0].IsSize() or self.subtype[1].IsSize())) \
+
+ def GetSize(self, ectx):
+ (minv, maxv, ext) = ('MIN', 'MAX', False)
+ if self.IsSize():
+ if self.type == 'Size':
+ (minv, maxv, ext) = self.subtype.GetValue(ectx)
+ ext = ext or (hasattr(self, 'ext') and self.ext)
+ elif self.type == 'Intersection':
+ if self.subtype[0].IsSize() and not self.subtype[1].IsSize():
+ (minv, maxv, ext) = self.subtype[0].GetSize(ectx)
+ elif not self.subtype[0].IsSize() and self.subtype[1].IsSize():
+ (minv, maxv, ext) = self.subtype[1].GetSize(ectx)
+ return (minv, maxv, ext)
+
+ def IsValue(self):
+ return self.type == 'SingleValue' \
+ or self.type == 'ValueRange' \
+ or (self.type == 'Intersection' and (self.subtype[0].IsValue() or self.subtype[1].IsValue())) \
+ or (self.type == 'Union' and (self.subtype[0].IsValue() and self.subtype[1].IsValue()))
+
+ def GetValue(self, ectx):
+ (minv, maxv, ext) = ('MIN', 'MAX', False)
+ if self.IsValue():
+ if self.type == 'SingleValue':
+ minv = ectx.value_get_eth(self.subtype)
+ maxv = ectx.value_get_eth(self.subtype)
+ ext = hasattr(self, 'ext') and self.ext
+ elif self.type == 'ValueRange':
+ minv = ectx.value_get_eth(self.subtype[0])
+ maxv = ectx.value_get_eth(self.subtype[1])
+ ext = hasattr(self, 'ext') and self.ext
+ elif self.type == 'Intersection':
+ if self.subtype[0].IsValue() and not self.subtype[1].IsValue():
+ (minv, maxv, ext) = self.subtype[0].GetValue(ectx)
+ elif not self.subtype[0].IsValue() and self.subtype[1].IsValue():
+ (minv, maxv, ext) = self.subtype[1].GetValue(ectx)
+ elif self.subtype[0].IsValue() and self.subtype[1].IsValue():
+ v0 = self.subtype[0].GetValue(ectx)
+ v1 = self.subtype[1].GetValue(ectx)
+ (minv, maxv, ext) = (ectx.value_max(v0[0],v1[0]), ectx.value_min(v0[1],v1[1]), v0[2] and v1[2])
+ elif self.type == 'Union':
+ if self.subtype[0].IsValue() and self.subtype[1].IsValue():
+ v0 = self.subtype[0].GetValue(ectx)
+ v1 = self.subtype[1].GetValue(ectx)
+ (minv, maxv, ext) = (ectx.value_min(v0[0],v1[0]), ectx.value_max(v0[1],v1[1]), hasattr(self, 'ext') and self.ext)
+ return (minv, maxv, ext)
+
+ def IsAlphabet(self):
+ return self.type == 'SingleValue' \
+ or self.type == 'ValueRange' \
+ or (self.type == 'Intersection' and (self.subtype[0].IsAlphabet() or self.subtype[1].IsAlphabet())) \
+ or (self.type == 'Union' and (self.subtype[0].IsAlphabet() and self.subtype[1].IsAlphabet()))
+
+ def GetAlphabet(self, ectx):
+ alph = None
+ if self.IsAlphabet():
+ if self.type == 'SingleValue':
+ alph = ectx.value_get_eth(self.subtype)
+ elif self.type == 'ValueRange':
+ if ((len(self.subtype[0]) == 3) and ((self.subtype[0][0] + self.subtype[0][-1]) == '""') \
+ and (len(self.subtype[1]) == 3) and ((self.subtype[1][0] + self.subtype[1][-1]) == '""')):
+ alph = '"'
+ for c in range(ord(self.subtype[0][1]), ord(self.subtype[1][1]) + 1):
+ alph += chr(c)
+ alph += '"'
+ elif self.type == 'Union':
+ if self.subtype[0].IsAlphabet() and self.subtype[1].IsAlphabet():
+ a0 = self.subtype[0].GetAlphabet(ectx)
+ a1 = self.subtype[1].GetAlphabet(ectx)
+ if (((a0[0] + a0[-1]) == '""') and not a0.count('"', 1, -1) \
+ and ((a1[0] + a1[-1]) == '""') and not a1.count('"', 1, -1)):
+ alph = '"' + a0[1:-1] + a1[1:-1] + '"'
+ else:
+ alph = a0 + ' ' + a1
+ return alph
+
+ def IsPermAlph(self):
+ return self.type == 'From' and self.subtype.IsAlphabet() \
+ or (self.type == 'Intersection' and (self.subtype[0].IsPermAlph() or self.subtype[1].IsPermAlph())) \
+
+ def GetPermAlph(self, ectx):
+ alph = None
+ if self.IsPermAlph():
+ if self.type == 'From':
+ alph = self.subtype.GetAlphabet(ectx)
+ elif self.type == 'Intersection':
+ if self.subtype[0].IsPermAlph() and not self.subtype[1].IsPermAlph():
+ alph = self.subtype[0].GetPermAlph(ectx)
+ elif not self.subtype[0].IsPermAlph() and self.subtype[1].IsPermAlph():
+ alph = self.subtype[1].GetPermAlph(ectx)
+ return alph
+
+ def IsContents(self):
+ return self.type == 'Contents' \
+ or (self.type == 'Intersection' and (self.subtype[0].IsContents() or self.subtype[1].IsContents())) \
+
+ def GetContents(self, ectx):
+ contents = None
+ if self.IsContents():
+ if self.type == 'Contents':
+ if self.subtype.type == 'Type_Ref':
+ contents = self.subtype.val
+ elif self.type == 'Intersection':
+ if self.subtype[0].IsContents() and not self.subtype[1].IsContents():
+ contents = self.subtype[0].GetContents(ectx)
+ elif not self.subtype[0].IsContents() and self.subtype[1].IsContents():
+ contents = self.subtype[1].GetContents(ectx)
+ return contents
+
+ def IsNegativ(self):
+ def is_neg(sval):
+ return isinstance(sval, str) and (sval[0] == '-')
+ if self.type == 'SingleValue':
+ return is_neg(self.subtype)
+ elif self.type == 'ValueRange':
+ if self.subtype[0] == 'MIN': return True
+ return is_neg(self.subtype[0])
+ return False
+
+ def eth_constrname(self):
+ def int2str(val):
+ if isinstance(val, Value_Ref):
+ return asn2c(val.val)
+ try:
+ if (int(val) < 0):
+ return 'M' + str(-int(val))
+ else:
+ return str(int(val))
+ except (ValueError, TypeError):
+ return asn2c(str(val))
+
+ ext = ''
+ if hasattr(self, 'ext') and self.ext:
+ ext = '_'
+ if self.type == 'SingleValue':
+ return int2str(self.subtype) + ext
+ elif self.type == 'ValueRange':
+ return int2str(self.subtype[0]) + '_' + int2str(self.subtype[1]) + ext
+ elif self.type == 'Size':
+ return 'SIZE_' + self.subtype.eth_constrname() + ext
+ else:
+ if (not hasattr(self, 'constr_num')):
+ global constr_cnt
+ constr_cnt += 1
+ self.constr_num = constr_cnt
+ return 'CONSTR%03d%s' % (self.constr_num, ext)
+
+ def Needs64b(self, ectx):
+ (minv, maxv, ext) = self.GetValue(ectx)
+ if ((str(minv).isdigit() or ((str(minv)[0] == "-") and str(minv)[1:].isdigit())) \
+ and (str(maxv).isdigit() or ((str(maxv)[0] == "-") and str(maxv)[1:].isdigit())) \
+ and ((abs(int(maxv) - int(minv)) >= 2**32) or (int(minv) < -2**31) or (int(maxv) >= 2**32))) \
+ or (maxv == 'MAX') or (minv == 'MIN'):
+ return True
+ return False
+
+class Module (Node):
+ def to_python (self, ctx):
+ ctx.tag_def = self.tag_def.dfl_tag
+ return """#%s
+ %s""" % (self.ident, self.body.to_python (ctx))
+
+ def get_name(self):
+ return self.ident.val
+
+ def get_proto(self, ectx):
+ if (ectx.proto):
+ prot = ectx.proto
+ else:
+ prot = ectx.conform.use_item('MODULE', self.get_name(), val_dflt=self.get_name())
+ return prot
+
+ def to_eth(self, ectx):
+ ectx.tags_def = 'EXPLICIT' # default = explicit
+ ectx.proto = self.get_proto(ectx)
+ ectx.tag_def = self.tag_def.dfl_tag
+ ectx.eth_reg_module(self)
+ self.body.to_eth(ectx)
+
+class Module_Body (Node):
+ def to_python (self, ctx):
+ # XXX handle exports, imports.
+ l = [x.to_python (ctx) for x in self.assign_list]
+ l = [a for a in l if a != '']
+ return "\n".join (l)
+
+ def to_eth(self, ectx):
+ # Exports
+ ectx.eth_exports(self.exports)
+ # Imports
+ for i in self.imports:
+ mod = i.module.val
+ proto = ectx.conform.use_item('MODULE', mod, val_dflt=mod)
+ ectx.eth_module_dep_add(ectx.Module(), mod)
+ for s in i.symbol_list:
+ if isinstance(s, Type_Ref):
+ ectx.eth_import_type(s.val, mod, proto)
+ elif isinstance(s, Value_Ref):
+ ectx.eth_import_value(s.val, mod, proto)
+ elif isinstance(s, Class_Ref):
+ ectx.eth_import_class(s.val, mod, proto)
+ else:
+ msg = 'Unknown kind of imported symbol %s from %s' % (str(s), mod)
+ warnings.warn_explicit(msg, UserWarning, '', 0)
+ # AssignmentList
+ for a in self.assign_list:
+ a.eth_reg('', ectx)
+
+class Default_Tags (Node):
+ def to_python (self, ctx): # not to be used directly
+ assert (0)
+
+# XXX should just calculate dependencies as we go along.
+def calc_dependencies (node, dict, trace = 0):
+ if not hasattr (node, '__dict__'):
+ if trace: print("#returning, node=", node)
+ return
+ if isinstance (node, Type_Ref):
+ dict [node.val] = 1
+ if trace: print("#Setting", node.val)
+ return
+ for (a, val) in list(node.__dict__.items ()):
+ if trace: print("# Testing node ", node, "attr", a, " val", val)
+ if a[0] == '_':
+ continue
+ elif isinstance (val, Node):
+ calc_dependencies (val, dict, trace)
+ elif isinstance (val, type ([])):
+ for v in val:
+ calc_dependencies (v, dict, trace)
+
+
+class Type_Assign (Node):
+ def __init__ (self, *args, **kw):
+ Node.__init__ (self, *args, **kw)
+ if isinstance (self.val, Tag): # XXX replace with generalized get_typ_ignoring_tag (no-op for Node, override in Tag)
+ to_test = self.val.typ
+ else:
+ to_test = self.val
+ if isinstance (to_test, SequenceType):
+ to_test.sequence_name = self.name.name
+
+ def to_python (self, ctx):
+ dep_dict = {}
+ calc_dependencies (self.val, dep_dict, 0)
+ depend_list = list(dep_dict.keys ())
+ return ctx.register_assignment (self.name.name,
+ self.val.to_python (ctx),
+ depend_list)
+
+class PyQuote (Node):
+ def to_python (self, ctx):
+ return ctx.register_pyquote (self.val)
+
+#--- Type_Ref -----------------------------------------------------------------
+class Type_Ref (Type):
+ def to_python (self, ctx):
+ return self.val
+
+ def eth_reg_sub(self, ident, ectx):
+ ectx.eth_dep_add(ident, self.val)
+
+ def eth_tname(self):
+ if self.HasSizeConstraint():
+ return asn2c(self.val) + '_' + self.constr.eth_constrname()
+ else:
+ return asn2c(self.val)
+
+ def tr_need_own_fn(self, ectx):
+ return (ectx.Per() or ectx.Oer()) and self.HasSizeConstraint()
+
+ def fld_obj_repr(self, ectx):
+ return self.val
+
+ def get_components(self, ectx):
+ if self.val not in ectx.type or ectx.type[self.val]['import']:
+ msg = "Can not get COMPONENTS OF %s which is imported type" % (self.val)
+ warnings.warn_explicit(msg, UserWarning, '', 0)
+ return []
+ else:
+ return ectx.type[self.val]['val'].get_components(ectx)
+
+ def GetTTag(self, ectx):
+ #print "GetTTag(%s)\n" % self.val;
+ if (ectx.type[self.val]['import']):
+ if 'ttag' not in ectx.type[self.val]:
+ ttag = ectx.get_ttag_from_all(self.val, ectx.type[self.val]['import'])
+ if not ttag and not ectx.conform.check_item('IMPORT_TAG', self.val):
+ msg = 'Missing tag information for imported type %s from %s (%s)' % (self.val, ectx.type[self.val]['import'], ectx.type[self.val]['proto'])
+ warnings.warn_explicit(msg, UserWarning, '', 0)
+ ttag = ('-1/*imported*/', '-1/*imported*/')
+ ectx.type[self.val]['ttag'] = ectx.conform.use_item('IMPORT_TAG', self.val, val_dflt=ttag)
+ return ectx.type[self.val]['ttag']
+ else:
+ return ectx.type[self.val]['val'].GetTag(ectx)
+
+ def IndetermTag(self, ectx):
+ if (ectx.type[self.val]['import']):
+ return False
+ else:
+ return ectx.type[self.val]['val'].IndetermTag(ectx)
+
+ def eth_type_default_pars(self, ectx, tname):
+ if tname:
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ else:
+ pars = {}
+ t = ectx.type[self.val]['ethname']
+ pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
+ pars['TYPE_REF_TNAME'] = t
+ pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
+ if self.HasSizeConstraint():
+ (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
+ elif (ectx.Per() or ectx.Oer()):
+ if self.HasSizeConstraint():
+ body = ectx.eth_fn_call('dissect_%(ER)s_size_constrained_type', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),
+ ('"%(TYPE_REF_TNAME)s"', '%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),))
+ else:
+ body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
+ else:
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+#--- SelectionType ------------------------------------------------------------
+class SelectionType (Type):
+ def to_python (self, ctx):
+ return self.val
+
+ def sel_of_typeref(self):
+ return self.typ.type == 'Type_Ref'
+
+ def eth_reg_sub(self, ident, ectx):
+ if not self.sel_of_typeref():
+ self.seltype = ''
+ return
+ self.seltype = ectx.eth_sel_req(self.typ.val, self.sel)
+ ectx.eth_dep_add(ident, self.seltype)
+
+ def eth_ftype(self, ectx):
+ (ftype, display) = ('FT_NONE', 'BASE_NONE')
+ if self.sel_of_typeref() and not ectx.type[self.seltype]['import']:
+ (ftype, display) = ectx.type[self.typ.val]['val'].eth_ftype_sel(self.sel, ectx)
+ return (ftype, display)
+
+ def GetTTag(self, ectx):
+ #print "GetTTag(%s)\n" % self.seltype;
+ if (ectx.type[self.seltype]['import']):
+ if 'ttag' not in ectx.type[self.seltype]:
+ if not ectx.conform.check_item('IMPORT_TAG', self.seltype):
+ msg = 'Missing tag information for imported type %s from %s (%s)' % (self.seltype, ectx.type[self.seltype]['import'], ectx.type[self.seltype]['proto'])
+ warnings.warn_explicit(msg, UserWarning, '', 0)
+ ectx.type[self.seltype]['ttag'] = ectx.conform.use_item('IMPORT_TAG', self.seltype, val_dflt=('-1 /*imported*/', '-1 /*imported*/'))
+ return ectx.type[self.seltype]['ttag']
+ else:
+ return ectx.type[self.typ.val]['val'].GetTTagSel(self.sel, ectx)
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ if self.sel_of_typeref():
+ t = ectx.type[self.seltype]['ethname']
+ pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
+ pars['TYPE_REF_TNAME'] = t
+ pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if not self.sel_of_typeref():
+ body = '#error Can not decode %s' % (tname)
+ elif (ectx.Ber()):
+ body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
+ elif (ectx.Per() or ectx.Oer()):
+ body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
+ else:
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+#--- TaggedType -----------------------------------------------------------------
+class TaggedType (Type):
+ def eth_tname(self):
+ tn = ''
+ for i in range(self.tstrip, len(self.val.tags)):
+ tn += self.val.tags[i].eth_tname()
+ tn += '_'
+ tn += self.val.eth_tname()
+ return tn
+
+ def eth_set_val_name(self, ident, val_name, ectx):
+ #print "TaggedType::eth_set_val_name(): ident=%s, val_name=%s" % (ident, val_name)
+ self.val_name = val_name
+ ectx.eth_dep_add(ident, self.val_name)
+
+ def eth_reg_sub(self, ident, ectx):
+ self.val_name = ident + '/' + UNTAG_TYPE_NAME
+ self.val.eth_reg(self.val_name, ectx, tstrip=self.tstrip+1, tagflag=True, parent=ident)
+
+ def GetTTag(self, ectx):
+ #print "GetTTag(%s)\n" % self.seltype;
+ return self.GetTag(ectx)
+
+ def eth_ftype(self, ectx):
+ return self.val.eth_ftype(ectx)
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ t = ectx.type[self.val_name]['ethname']
+ pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
+ pars['TYPE_REF_TNAME'] = t
+ pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
+ (pars['TAG_CLS'], pars['TAG_TAG']) = self.GetTag(ectx)
+ if self.HasImplicitTag(ectx):
+ pars['TAG_IMPL'] = 'TRUE'
+ else:
+ pars['TAG_IMPL'] = 'FALSE'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_tagged_type', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(HF_INDEX)s', '%(TAG_CLS)s', '%(TAG_TAG)s', '%(TAG_IMPL)s', '%(TYPE_REF_FN)s',),))
+ else:
+ body = '#error Can not decode tagged_type %s' % (tname)
+ return body
+
+#--- SqType -----------------------------------------------------------
+class SqType (Type):
+ def out_item(self, f, val, optional, ext, ectx):
+ if (val.eth_omit_field()):
+ t = ectx.type[val.ident]['ethname']
+ fullname = ectx.dummy_eag_field
+ else:
+ ef = ectx.field[f]['ethname']
+ t = ectx.eth_hf[ef]['ethtype']
+ fullname = ectx.eth_hf[ef]['fullname']
+ if (ectx.Ber()):
+ #print "optional=%s, e.val.HasOwnTag()=%s, e.val.IndetermTag()=%s" % (str(e.optional), str(e.val.HasOwnTag()), str(e.val.IndetermTag(ectx)))
+ #print val.str_depth(1)
+ opt = ''
+ if (optional):
+ opt = 'BER_FLAGS_OPTIONAL'
+ if (not val.HasOwnTag()):
+ if (opt): opt += '|'
+ opt += 'BER_FLAGS_NOOWNTAG'
+ elif (val.HasImplicitTag(ectx)):
+ if (opt): opt += '|'
+ opt += 'BER_FLAGS_IMPLTAG'
+ if (val.IndetermTag(ectx)):
+ if (opt): opt += '|'
+ opt += 'BER_FLAGS_NOTCHKTAG'
+ if (not opt): opt = '0'
+ else:
+ if optional:
+ opt = 'ASN1_OPTIONAL'
+ else:
+ opt = 'ASN1_NOT_OPTIONAL'
+ if (ectx.Ber()):
+ (tc, tn) = val.GetTag(ectx)
+ out = ' { %-24s, %-13s, %s, %s, dissect_%s_%s },\n' \
+ % ('&'+fullname, tc, tn, opt, ectx.eth_type[t]['proto'], t)
+ elif (ectx.Per() or ectx.Oer()):
+ out = ' { %-24s, %-23s, %-17s, dissect_%s_%s },\n' \
+ % ('&'+fullname, ext, opt, ectx.eth_type[t]['proto'], t)
+ else:
+ out = ''
+ return out
+
+#--- SeqType -----------------------------------------------------------
+class SeqType (SqType):
+
+ def all_components(self):
+ lst = self.elt_list[:]
+ if hasattr(self, 'ext_list'):
+ lst.extend(self.ext_list)
+ if hasattr(self, 'elt_list2'):
+ lst.extend(self.elt_list2)
+ return lst
+
+ def need_components(self):
+ lst = self.all_components()
+ for e in (lst):
+ if e.type == 'components_of':
+ return True
+ return False
+
+ def expand_components(self, ectx):
+ while self.need_components():
+ for i in range(len(self.elt_list)):
+ if self.elt_list[i].type == 'components_of':
+ comp = self.elt_list[i].typ.get_components(ectx)
+ self.elt_list[i:i+1] = comp
+ break
+ if hasattr(self, 'ext_list'):
+ for i in range(len(self.ext_list)):
+ if self.ext_list[i].type == 'components_of':
+ comp = self.ext_list[i].typ.get_components(ectx)
+ self.ext_list[i:i+1] = comp
+ break
+ if hasattr(self, 'elt_list2'):
+ for i in range(len(self.elt_list2)):
+ if self.elt_list2[i].type == 'components_of':
+ comp = self.elt_list2[i].typ.get_components(ectx)
+ self.elt_list2[i:i+1] = comp
+ break
+
+ def get_components(self, ectx):
+ lst = self.elt_list[:]
+ if hasattr(self, 'elt_list2'):
+ lst.extend(self.elt_list2)
+ return lst
+
+ def eth_reg_sub(self, ident, ectx, components_available=False):
+ # check if autotag is required
+ autotag = False
+ if (ectx.NeedTags() and (ectx.tag_def == 'AUTOMATIC')):
+ autotag = True
+ lst = self.all_components()
+ for e in (self.elt_list):
+ if e.val.HasOwnTag(): autotag = False; break;
+ # expand COMPONENTS OF
+ if self.need_components():
+ if components_available:
+ self.expand_components(ectx)
+ else:
+ ectx.eth_comp_req(ident)
+ return
+ # extension addition groups
+ if hasattr(self, 'ext_list'):
+ if (ectx.Per() or ectx.Oer()): # add names
+ eag_num = 1
+ for e in (self.ext_list):
+ if isinstance(e.val, ExtensionAdditionGroup):
+ e.val.parent_ident = ident
+ e.val.parent_tname = ectx.type[ident]['tname']
+ if (e.val.ver):
+ e.val.SetName("eag_v%s" % (e.val.ver))
+ else:
+ e.val.SetName("eag_%d" % (eag_num))
+ eag_num += 1;
+ else: # expand
+ new_ext_list = []
+ for e in (self.ext_list):
+ if isinstance(e.val, ExtensionAdditionGroup):
+ new_ext_list.extend(e.val.elt_list)
+ else:
+ new_ext_list.append(e)
+ self.ext_list = new_ext_list
+ # do autotag
+ if autotag:
+ atag = 0
+ for e in (self.elt_list):
+ e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
+ atag += 1
+ if autotag and hasattr(self, 'elt_list2'):
+ for e in (self.elt_list2):
+ e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
+ atag += 1
+ if autotag and hasattr(self, 'ext_list'):
+ for e in (self.ext_list):
+ e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
+ atag += 1
+ # register components
+ for e in (self.elt_list):
+ e.val.eth_reg(ident, ectx, tstrip=1, parent=ident)
+ if hasattr(self, 'ext_list'):
+ for e in (self.ext_list):
+ e.val.eth_reg(ident, ectx, tstrip=1, parent=ident)
+ if hasattr(self, 'elt_list2'):
+ for e in (self.elt_list2):
+ e.val.eth_reg(ident, ectx, tstrip=1, parent=ident)
+
+ def eth_type_default_table(self, ectx, tname):
+ #print ("eth_type_default_table(tname='%s')" % (tname))
+ fname = ectx.eth_type[tname]['ref'][0]
+ table = "static const %(ER)s_sequence_t %(TABLE)s[] = {\n"
+ if hasattr(self, 'ext_list'):
+ ext = 'ASN1_EXTENSION_ROOT'
+ else:
+ ext = 'ASN1_NO_EXTENSIONS'
+ empty_ext_flag = '0'
+ if (len(self.elt_list)==0) and hasattr(self, 'ext_list') and (len(self.ext_list)==0) and (not hasattr(self, 'elt_list2') or (len(self.elt_list2)==0)):
+ empty_ext_flag = ext
+ for e in (self.elt_list):
+ f = fname + '/' + e.val.name
+ table += self.out_item(f, e.val, e.optional, ext, ectx)
+ if hasattr(self, 'ext_list'):
+ for e in (self.ext_list):
+ f = fname + '/' + e.val.name
+ table += self.out_item(f, e.val, e.optional, 'ASN1_NOT_EXTENSION_ROOT', ectx)
+ if hasattr(self, 'elt_list2'):
+ for e in (self.elt_list2):
+ f = fname + '/' + e.val.name
+ table += self.out_item(f, e.val, e.optional, ext, ectx)
+ if (ectx.Ber()):
+ table += " { NULL, 0, 0, 0, NULL }\n};\n"
+ else:
+ table += " { NULL, %s, 0, NULL }\n};\n" % (empty_ext_flag)
+ return table
+
+#--- SeqOfType -----------------------------------------------------------
+class SeqOfType (SqType):
+ def eth_type_default_table(self, ectx, tname):
+ #print "eth_type_default_table(tname='%s')" % (tname)
+ fname = ectx.eth_type[tname]['ref'][0]
+ if self.val.IsNamed ():
+ f = fname + '/' + self.val.name
+ else:
+ f = fname + '/' + ITEM_FIELD_NAME
+ table = "static const %(ER)s_sequence_t %(TABLE)s[1] = {\n"
+ table += self.out_item(f, self.val, False, 'ASN1_NO_EXTENSIONS', ectx)
+ table += "};\n"
+ return table
+
+#--- SequenceOfType -----------------------------------------------------------
+class SequenceOfType (SeqOfType):
+ def to_python (self, ctx):
+ # name, tag (None for no tag, EXPLICIT() for explicit), typ)
+ # or '' + (1,) for optional
+ sizestr = ''
+ if self.size_constr is not None:
+ print("#Ignoring size constraint:", self.size_constr.subtype)
+ return "%sasn1.SEQUENCE_OF (%s%s)" % (ctx.spaces (),
+ self.val.to_python (ctx),
+ sizestr)
+
+ def eth_reg_sub(self, ident, ectx):
+ itmnm = ident
+ if not self.val.IsNamed ():
+ itmnm += '/' + ITEM_FIELD_NAME
+ self.val.eth_reg(itmnm, ectx, tstrip=1, idx='[##]', parent=ident)
+
+ def eth_tname(self):
+ if self.val.type != 'Type_Ref':
+ return '#' + self.type + '_' + str(id(self))
+ if not self.HasConstraint():
+ return "SEQUENCE_OF_" + self.val.eth_tname()
+ elif self.constr.IsSize():
+ return 'SEQUENCE_' + self.constr.eth_constrname() + '_OF_' + self.val.eth_tname()
+ else:
+ return '#' + self.type + '_' + str(id(self))
+
+ def eth_ftype(self, ectx):
+ return ('FT_UINT32', 'BASE_DEC')
+
+ def eth_need_tree(self):
+ return True
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_SEQUENCE')
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
+ pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence_of'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ if (ectx.constraints_check and self.HasSizeConstraint()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_constrained_sequence_of', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
+ else:
+ body = ectx.eth_fn_call('dissect_%(ER)s_sequence_of', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
+ elif ((ectx.Per() or ectx.Oer()) and not self.HasConstraint()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_sequence_of', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(ETT_INDEX)s', '%(TABLE)s',),))
+ elif ((ectx.Per() or ectx.Oer()) and self.constr.type == 'Size'):
+ body = ectx.eth_fn_call('dissect_%(ER)s_constrained_sequence_of', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(ETT_INDEX)s', '%(TABLE)s',),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s','%(EXT)s'),))
+ else:
+ body = '#error Can not decode SequenceOfType %s' % (tname)
+ return body
+
+
+#--- SetOfType ----------------------------------------------------------------
+class SetOfType (SeqOfType):
+ def eth_reg_sub(self, ident, ectx):
+ itmnm = ident
+ if not self.val.IsNamed ():
+ itmnm += '/' + ITEM_FIELD_NAME
+ self.val.eth_reg(itmnm, ectx, tstrip=1, idx='(##)', parent=ident)
+
+ def eth_tname(self):
+ if self.val.type != 'Type_Ref':
+ return '#' + self.type + '_' + str(id(self))
+ if not self.HasConstraint():
+ return "SET_OF_" + self.val.eth_tname()
+ elif self.constr.IsSize():
+ return 'SET_' + self.constr.eth_constrname() + '_OF_' + self.val.eth_tname()
+ else:
+ return '#' + self.type + '_' + str(id(self))
+
+ def eth_ftype(self, ectx):
+ return ('FT_UINT32', 'BASE_DEC')
+
+ def eth_need_tree(self):
+ return True
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_SET')
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
+ pars['TABLE'] = '%(PROTOP)s%(TNAME)s_set_of'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ if (ectx.constraints_check and self.HasSizeConstraint()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_constrained_set_of', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
+ else:
+ body = ectx.eth_fn_call('dissect_%(ER)s_set_of', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
+ elif (ectx.Per() and not self.HasConstraint()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_set_of', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(ETT_INDEX)s', '%(TABLE)s',),))
+ elif (ectx.Per() and self.constr.type == 'Size'):
+ body = ectx.eth_fn_call('dissect_%(ER)s_constrained_set_of', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(ETT_INDEX)s', '%(TABLE)s',),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s','%(EXT)s',),))
+ else:
+ body = '#error Can not decode SetOfType %s' % (tname)
+ return body
+
+def mk_tag_str (ctx, cls, typ, num):
+
+ # XXX should do conversion to int earlier!
+ val = int (num)
+ typ = typ.upper()
+ if typ == 'DEFAULT':
+ typ = ctx.tags_def
+ return 'asn1.%s(%d,cls=asn1.%s_FLAG)' % (typ, val, cls) # XXX still ned
+
+#--- SequenceType -------------------------------------------------------------
+class SequenceType (SeqType):
+ def to_python (self, ctx):
+ # name, tag (None for no tag, EXPLICIT() for explicit), typ)
+ # or '' + (1,) for optional
+ # XXX should also collect names for SEQUENCE inside SEQUENCE or
+ # CHOICE or SEQUENCE_OF (where should the SEQUENCE_OF name come
+ # from? for others, element or arm name would be fine)
+ seq_name = getattr (self, 'sequence_name', None)
+ if seq_name is None:
+ seq_name = 'None'
+ else:
+ seq_name = "'" + seq_name + "'"
+ if 'ext_list' in self.__dict__:
+ return "%sasn1.SEQUENCE ([%s], ext=[%s], seq_name = %s)" % (ctx.spaces (),
+ self.elts_to_py (self.elt_list, ctx),
+ self.elts_to_py (self.ext_list, ctx), seq_name)
+ else:
+ return "%sasn1.SEQUENCE ([%s]), seq_name = %s" % (ctx.spaces (),
+ self.elts_to_py (self.elt_list, ctx), seq_name)
+ def elts_to_py (self, list, ctx):
+ # we have elt_type, val= named_type, maybe default=, optional=
+ # named_type node: either ident = or typ =
+ # need to dismember these in order to generate Python output syntax.
+ ctx.indent ()
+ def elt_to_py (e):
+ assert (e.type == 'elt_type')
+ nt = e.val
+ optflag = e.optional
+ #assert (not hasattr (e, 'default')) # XXX add support for DEFAULT!
+ assert (nt.type == 'named_type')
+ tagstr = 'None'
+ identstr = nt.ident
+ if hasattr (nt.typ, 'type') and nt.typ.type == 'tag': # ugh
+ tagstr = mk_tag_str (ctx,nt.typ.tag.cls,
+ nt.typ.tag.tag_typ,nt.typ.tag.num)
+
+
+ nt = nt.typ
+ return "('%s',%s,%s,%d)" % (identstr, tagstr,
+ nt.typ.to_python (ctx), optflag)
+ indentstr = ",\n" + ctx.spaces ()
+ rv = indentstr.join ([elt_to_py (e) for e in list])
+ ctx.outdent ()
+ return rv
+
+ def eth_need_tree(self):
+ return True
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_SEQUENCE')
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_sequence', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
+ elif (ectx.Per() or ectx.Oer()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_sequence', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(ETT_INDEX)s', '%(TABLE)s',),))
+ else:
+ body = '#error Can not decode SequenceType %s' % (tname)
+ return body
+
+#--- ExtensionAdditionGroup ---------------------------------------------------
+class ExtensionAdditionGroup (SeqType):
+ def __init__(self,*args, **kw) :
+ self.parent_ident = None
+ self.parent_tname = None
+ SeqType.__init__ (self,*args, **kw)
+
+ def eth_omit_field(self):
+ return True
+
+ def eth_tname(self):
+ if (self.parent_tname and self.IsNamed()):
+ return self.parent_tname + "_" + self.name
+ else:
+ return SeqType.eth_tname(self)
+
+ def eth_reg_sub(self, ident, ectx):
+ ectx.eth_dummy_eag_field_required()
+ ectx.eth_dep_add(self.parent_ident, ident)
+ SeqType.eth_reg_sub(self, ident, ectx)
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Per()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_sequence_eag', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(TABLE)s',),))
+ else:
+ body = '#error Can not decode ExtensionAdditionGroup %s' % (tname)
+ return body
+
+
+#--- SetType ------------------------------------------------------------------
+class SetType (SeqType):
+
+ def eth_need_tree(self):
+ return True
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_SET')
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ pars['TABLE'] = '%(PROTOP)s%(TNAME)s_set'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_set', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
+ elif (ectx.Per()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_set', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(ETT_INDEX)s', '%(TABLE)s',),))
+ else:
+ body = '#error Can not decode SetType %s' % (tname)
+ return body
+
+#--- ChoiceType ---------------------------------------------------------------
+class ChoiceType (Type):
+ def to_python (self, ctx):
+ # name, tag (None for no tag, EXPLICIT() for explicit), typ)
+ # or '' + (1,) for optional
+ if 'ext_list' in self.__dict__:
+ return "%sasn1.CHOICE ([%s], ext=[%s])" % (ctx.spaces (),
+ self.elts_to_py (self.elt_list, ctx),
+ self.elts_to_py (self.ext_list, ctx))
+ else:
+ return "%sasn1.CHOICE ([%s])" % (ctx.spaces (), self.elts_to_py (self.elt_list, ctx))
+ def elts_to_py (self, list, ctx):
+ ctx.indent ()
+ def elt_to_py (nt):
+ assert (nt.type == 'named_type')
+ tagstr = 'None'
+ if hasattr (nt, 'ident'):
+ identstr = nt.ident
+ else:
+ if hasattr (nt.typ, 'val'):
+ identstr = nt.typ.val # XXX, making up name
+ elif hasattr (nt.typ, 'name'):
+ identstr = nt.typ.name
+ else:
+ identstr = ctx.make_new_name ()
+
+ if hasattr (nt.typ, 'type') and nt.typ.type == 'tag': # ugh
+ tagstr = mk_tag_str (ctx,nt.typ.tag.cls,
+ nt.typ.tag.tag_typ,nt.typ.tag.num)
+
+
+ nt = nt.typ
+ return "('%s',%s,%s)" % (identstr, tagstr,
+ nt.typ.to_python (ctx))
+ indentstr = ",\n" + ctx.spaces ()
+ rv = indentstr.join ([elt_to_py (e) for e in list])
+ ctx.outdent ()
+ return rv
+
+ def eth_reg_sub(self, ident, ectx):
+ #print "eth_reg_sub(ident='%s')" % (ident)
+ # check if autotag is required
+ autotag = False
+ if (ectx.NeedTags() and (ectx.tag_def == 'AUTOMATIC')):
+ autotag = True
+ for e in (self.elt_list):
+ if e.HasOwnTag(): autotag = False; break;
+ if autotag and hasattr(self, 'ext_list'):
+ for e in (self.ext_list):
+ if e.HasOwnTag(): autotag = False; break;
+ # do autotag
+ if autotag:
+ atag = 0
+ for e in (self.elt_list):
+ e.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
+ atag += 1
+ if autotag and hasattr(self, 'ext_list'):
+ for e in (self.ext_list):
+ e.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
+ atag += 1
+ for e in (self.elt_list):
+ e.eth_reg(ident, ectx, tstrip=1, parent=ident)
+ if ectx.conform.check_item('EXPORTS', ident + '.' + e.name):
+ ectx.eth_sel_req(ident, e.name)
+ if hasattr(self, 'ext_list'):
+ for e in (self.ext_list):
+ e.eth_reg(ident, ectx, tstrip=1, parent=ident)
+ if ectx.conform.check_item('EXPORTS', ident + '.' + e.name):
+ ectx.eth_sel_req(ident, e.name)
+
+ def sel_item(self, ident, sel, ectx):
+ lst = self.elt_list[:]
+ if hasattr(self, 'ext_list'):
+ lst.extend(self.ext_list)
+ ee = None
+ for e in (self.elt_list):
+ if e.IsNamed() and (e.name == sel):
+ ee = e
+ break
+ if not ee:
+ print("#CHOICE %s does not contain item %s" % (ident, sel))
+ return ee
+
+ def sel_req(self, ident, sel, ectx):
+ #print "sel_req(ident='%s', sel=%s)\n%s" % (ident, sel, str(self))
+ ee = self.sel_item(ident, sel, ectx)
+ if ee:
+ ee.eth_reg(ident, ectx, tstrip=0, selflag=True)
+
+ def eth_ftype(self, ectx):
+ return ('FT_UINT32', 'BASE_DEC')
+
+ def eth_ftype_sel(self, sel, ectx):
+ ee = self.sel_item('', sel, ectx)
+ if ee:
+ return ee.eth_ftype(ectx)
+ else:
+ return ('FT_NONE', 'BASE_NONE')
+
+ def eth_strings(self):
+ return '$$'
+
+ def eth_need_tree(self):
+ return True
+
+ def eth_has_vals(self):
+ return True
+
+ def GetTTag(self, ectx):
+ lst = self.elt_list
+ cls = 'BER_CLASS_ANY/*choice*/'
+ #if hasattr(self, 'ext_list'):
+ # lst.extend(self.ext_list)
+ #if (len(lst) > 0):
+ # cls = lst[0].GetTag(ectx)[0]
+ #for e in (lst):
+ # if (e.GetTag(ectx)[0] != cls):
+ # cls = '-1/*choice*/'
+ return (cls, '-1/*choice*/')
+
+ def GetTTagSel(self, sel, ectx):
+ ee = self.sel_item('', sel, ectx)
+ if ee:
+ return ee.GetTag(ectx)
+ else:
+ return ('BER_CLASS_ANY/*unknown selection*/', '-1/*unknown selection*/')
+
+ def IndetermTag(self, ectx):
+ #print "Choice IndetermTag()=%s" % (str(not self.HasOwnTag()))
+ return not self.HasOwnTag()
+
+ def detect_tagval(self, ectx):
+ tagval = False
+ lst = self.elt_list[:]
+ if hasattr(self, 'ext_list'):
+ lst.extend(self.ext_list)
+ if (len(lst) > 0) and (not (ectx.Per() or ectx.Oer()) or lst[0].HasOwnTag()):
+ t = lst[0].GetTag(ectx)[0]
+ tagval = True
+ else:
+ t = ''
+ tagval = False
+ if (t == 'BER_CLASS_UNI'):
+ tagval = False
+ for e in (lst):
+ if not (ectx.Per() or ectx.Oer()) or e.HasOwnTag():
+ tt = e.GetTag(ectx)[0]
+ else:
+ tt = ''
+ tagval = False
+ if (tt != t):
+ tagval = False
+ return tagval
+
+ def get_vals(self, ectx):
+ tagval = self.detect_tagval(ectx)
+ vals = []
+ cnt = 0
+ for e in (self.elt_list):
+ if (tagval): val = e.GetTag(ectx)[1]
+ else: val = str(cnt)
+ vals.append((val, e.name))
+ cnt += 1
+ if hasattr(self, 'ext_list'):
+ for e in (self.ext_list):
+ if (tagval): val = e.GetTag(ectx)[1]
+ else: val = str(cnt)
+ vals.append((val, e.name))
+ cnt += 1
+ return vals
+
+ def eth_type_vals(self, tname, ectx):
+ out = '\n'
+ vals = self.get_vals(ectx)
+ out += ectx.eth_vals(tname, vals)
+ return out
+
+ def reg_enum_vals(self, tname, ectx):
+ vals = self.get_vals(ectx)
+ for (val, id) in vals:
+ ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id))
+
+ def eth_type_enum(self, tname, ectx):
+ out = '\n'
+ vals = self.get_vals(ectx)
+ out += ectx.eth_enum(tname, vals)
+ return out
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ pars['TABLE'] = '%(PROTOP)s%(TNAME)s_choice'
+ return pars
+
+ def eth_type_default_table(self, ectx, tname):
+ def out_item(val, e, ext, ectx):
+ has_enum = ectx.eth_type[tname]['enum'] & EF_ENUM
+ if (has_enum):
+ vval = ectx.eth_enum_item(tname, e.name)
+ else:
+ vval = val
+ f = fname + '/' + e.name
+ ef = ectx.field[f]['ethname']
+ t = ectx.eth_hf[ef]['ethtype']
+ if (ectx.Ber()):
+ opt = ''
+ if (not e.HasOwnTag()):
+ opt = 'BER_FLAGS_NOOWNTAG'
+ elif (e.HasImplicitTag(ectx)):
+ if (opt): opt += '|'
+ opt += 'BER_FLAGS_IMPLTAG'
+ if (not opt): opt = '0'
+ if (ectx.Ber()):
+ (tc, tn) = e.GetTag(ectx)
+ out = ' { %3s, %-24s, %-13s, %s, %s, dissect_%s_%s },\n' \
+ % (vval, '&'+ectx.eth_hf[ef]['fullname'], tc, tn, opt, ectx.eth_type[t]['proto'], t)
+ elif (ectx.Per() or ectx.Oer()):
+ out = ' { %3s, %-24s, %-23s, dissect_%s_%s },\n' \
+ % (vval, '&'+ectx.eth_hf[ef]['fullname'], ext, ectx.eth_type[t]['proto'], t)
+ else:
+ out = ''
+ return out
+ # end out_item()
+ #print "eth_type_default_table(tname='%s')" % (tname)
+ fname = ectx.eth_type[tname]['ref'][0]
+ tagval = self.detect_tagval(ectx)
+ table = "static const %(ER)s_choice_t %(TABLE)s[] = {\n"
+ cnt = 0
+ if hasattr(self, 'ext_list'):
+ ext = 'ASN1_EXTENSION_ROOT'
+ else:
+ ext = 'ASN1_NO_EXTENSIONS'
+ empty_ext_flag = '0'
+ if (len(self.elt_list)==0) and hasattr(self, 'ext_list') and (len(self.ext_list)==0):
+ empty_ext_flag = ext
+ for e in (self.elt_list):
+ if (tagval): val = e.GetTag(ectx)[1]
+ else: val = str(cnt)
+ table += out_item(val, e, ext, ectx)
+ cnt += 1
+ if hasattr(self, 'ext_list'):
+ for e in (self.ext_list):
+ if (tagval): val = e.GetTag(ectx)[1]
+ else: val = str(cnt)
+ table += out_item(val, e, 'ASN1_NOT_EXTENSION_ROOT', ectx)
+ cnt += 1
+ if (ectx.Ber()):
+ table += " { 0, NULL, 0, 0, 0, NULL }\n};\n"
+ else:
+ table += " { 0, NULL, %s, NULL }\n};\n" % (empty_ext_flag)
+ return table
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_choice', ret='offset',
+ par=(('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s'),
+ ('%(VAL_PTR)s',),))
+ elif (ectx.Per() or ectx.Oer()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_choice', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(ETT_INDEX)s', '%(TABLE)s',),
+ ('%(VAL_PTR)s',),))
+ else:
+ body = '#error Can not decode ChoiceType %s' % (tname)
+ return body
+
+#--- ChoiceValue ----------------------------------------------------
+class ChoiceValue (Value):
+ def to_str(self, ectx):
+ return self.val.to_str(ectx)
+
+ def fld_obj_eq(self, other):
+ return isinstance(other, ChoiceValue) and (self.choice == other.choice) and (str(self.val.val) == str(other.val.val))
+
+#--- EnumeratedType -----------------------------------------------------------
+class EnumeratedType (Type):
+ def to_python (self, ctx):
+ def strify_one (named_num):
+ return "%s=%s" % (named_num.ident, named_num.val)
+ return "asn1.ENUM(%s)" % ",".join (map (strify_one, self.val))
+
+ def eth_ftype(self, ectx):
+ return ('FT_UINT32', 'BASE_DEC')
+
+ def eth_strings(self):
+ return '$$'
+
+ def eth_has_vals(self):
+ return True
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_ENUMERATED')
+
+ def get_vals_etc(self, ectx):
+ vals = []
+ lastv = 0
+ used = {}
+ maxv = 0
+ root_num = 0
+ ext_num = 0
+ map_table = []
+ for e in (self.val):
+ if e.type == 'NamedNumber':
+ used[int(e.val)] = True
+ for e in (self.val):
+ if e.type == 'NamedNumber':
+ val = int(e.val)
+ else:
+ while lastv in used:
+ lastv += 1
+ val = lastv
+ used[val] = True
+ vals.append((val, e.ident))
+ map_table.append(val)
+ root_num += 1
+ if val > maxv:
+ maxv = val
+ if self.ext is not None:
+ for e in (self.ext):
+ if e.type == 'NamedNumber':
+ used[int(e.val)] = True
+ for e in (self.ext):
+ if e.type == 'NamedNumber':
+ val = int(e.val)
+ else:
+ while lastv in used:
+ lastv += 1
+ val = lastv
+ used[val] = True
+ vals.append((val, e.ident))
+ map_table.append(val)
+ ext_num += 1
+ if val > maxv:
+ maxv = val
+ need_map = False
+ for i in range(len(map_table)):
+ need_map = need_map or (map_table[i] != i)
+ if (not need_map):
+ map_table = None
+ return (vals, root_num, ext_num, map_table)
+
+ def eth_type_vals(self, tname, ectx):
+ out = '\n'
+ vals = self.get_vals_etc(ectx)[0]
+ out += ectx.eth_vals(tname, vals)
+ return out
+
+ def reg_enum_vals(self, tname, ectx):
+ vals = self.get_vals_etc(ectx)[0]
+ for (val, id) in vals:
+ ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id))
+
+ def eth_type_enum(self, tname, ectx):
+ out = '\n'
+ vals = self.get_vals_etc(ectx)[0]
+ out += ectx.eth_enum(tname, vals)
+ return out
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ (root_num, ext_num, map_table) = self.get_vals_etc(ectx)[1:]
+ if self.ext is not None:
+ ext = 'TRUE'
+ else:
+ ext = 'FALSE'
+ pars['ROOT_NUM'] = str(root_num)
+ pars['EXT'] = ext
+ pars['EXT_NUM'] = str(ext_num)
+ if (map_table):
+ pars['TABLE'] = '%(PROTOP)s%(TNAME)s_value_map'
+ else:
+ pars['TABLE'] = 'NULL'
+ return pars
+
+ def eth_type_default_table(self, ectx, tname):
+ if (not ectx.Per() and not ectx.Oer()): return ''
+ map_table = self.get_vals_etc(ectx)[3]
+ if map_table is None: return ''
+ table = "static uint32_t %(TABLE)s[%(ROOT_NUM)s+%(EXT_NUM)s] = {"
+ table += ", ".join([str(v) for v in map_table])
+ table += "};\n"
+ return table
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ if (ectx.constraints_check and self.HasValueConstraint()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ else:
+ body = ectx.eth_fn_call('dissect_%(ER)s_integer', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
+ ('%(VAL_PTR)s',),))
+ elif (ectx.Per() or ectx.Oer()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_enumerated', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(ROOT_NUM)s', '%(VAL_PTR)s', '%(EXT)s', '%(EXT_NUM)s', '%(TABLE)s',),))
+ else:
+ body = '#error Can not decode EnumeratedType %s' % (tname)
+ return body
+
+#--- EmbeddedPDVType -----------------------------------------------------------
+class EmbeddedPDVType (Type):
+ def eth_tname(self):
+ return 'EMBEDDED_PDV'
+
+ def eth_ftype(self, ectx):
+ return ('FT_NONE', 'BASE_NONE')
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_EMBEDDED_PDV')
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ if ectx.default_embedded_pdv_cb:
+ pars['TYPE_REF_FN'] = ectx.default_embedded_pdv_cb
+ else:
+ pars['TYPE_REF_FN'] = 'NULL'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_EmbeddedPDV_Type', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
+ elif (ectx.Per()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_embedded_pdv', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
+ else:
+ body = '#error Can not decode EmbeddedPDVType %s' % (tname)
+ return body
+
+#--- ExternalType -----------------------------------------------------------
+class ExternalType (Type):
+ def eth_tname(self):
+ return 'EXTERNAL'
+
+ def eth_ftype(self, ectx):
+ return ('FT_NONE', 'BASE_NONE')
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_EXTERNAL')
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ if ectx.default_external_type_cb:
+ pars['TYPE_REF_FN'] = ectx.default_external_type_cb
+ else:
+ pars['TYPE_REF_FN'] = 'NULL'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
+ elif (ectx.Per()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
+ else:
+ body = '#error Can not decode ExternalType %s' % (tname)
+ return body
+
+#--- OpenType -----------------------------------------------------------
+class OpenType (Type):
+ def to_python (self, ctx):
+ return "asn1.ANY"
+
+ def single_type(self):
+ if (self.HasConstraint() and
+ self.constr.type == 'Type' and
+ self.constr.subtype.type == 'Type_Ref'):
+ return self.constr.subtype.val
+ return None
+
+ def eth_reg_sub(self, ident, ectx):
+ t = self.single_type()
+ if t:
+ ectx.eth_dep_add(ident, t)
+
+ def eth_tname(self):
+ t = self.single_type()
+ if t:
+ return 'OpenType_' + t
+ else:
+ return Type.eth_tname(self)
+
+ def eth_ftype(self, ectx):
+ return ('FT_NONE', 'BASE_NONE')
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_ANY', '0')
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ pars['FN_VARIANT'] = ectx.default_opentype_variant
+ t = self.single_type()
+ if t:
+ t = ectx.type[t]['ethname']
+ pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
+ pars['TYPE_REF_TNAME'] = t
+ pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
+ else:
+ pars['TYPE_REF_FN'] = 'NULL'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Per() or ectx.Oer()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_open_type%(FN_VARIANT)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
+ else:
+ body = '#error Can not decode OpenType %s' % (tname)
+ return body
+
+#--- InstanceOfType -----------------------------------------------------------
+class InstanceOfType (Type):
+ def eth_tname(self):
+ return 'INSTANCE_OF'
+
+ def eth_ftype(self, ectx):
+ return ('FT_NONE', 'BASE_NONE')
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_EXTERNAL')
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ if ectx.default_external_type_cb:
+ pars['TYPE_REF_FN'] = ectx.default_external_type_cb
+ else:
+ pars['TYPE_REF_FN'] = 'NULL'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
+ else:
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+#--- AnyType -----------------------------------------------------------
+class AnyType (Type):
+ def to_python (self, ctx):
+ return "asn1.ANY"
+
+ def eth_ftype(self, ectx):
+ return ('FT_NONE', 'BASE_NONE')
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_ANY', '0')
+
+ def eth_type_default_body(self, ectx, tname):
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+class Literal (Node):
+ def to_python (self, ctx):
+ return self.val
+
+#--- NullType -----------------------------------------------------------------
+class NullType (Type):
+ def to_python (self, ctx):
+ return 'asn1.NULL'
+
+ def eth_tname(self):
+ return 'NULL'
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_NULL')
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_null', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),))
+ elif (ectx.Per() or ectx.Oer()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_null', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
+ else:
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+#--- NullValue ----------------------------------------------------
+class NullValue (Value):
+ def to_str(self, ectx):
+ return 'NULL'
+
+#--- RealType -----------------------------------------------------------------
+class RealType (Type):
+ def to_python (self, ctx):
+ return 'asn1.REAL'
+
+ def eth_tname(self):
+ return 'REAL'
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_REAL')
+
+ def eth_ftype(self, ectx):
+ return ('FT_DOUBLE', 'BASE_NONE')
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_real', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
+ ('%(VAL_PTR)s',),))
+ elif (ectx.Per()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_real', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ else:
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+#--- BooleanType --------------------------------------------------------------
+class BooleanType (Type):
+ def to_python (self, ctx):
+ return 'asn1.BOOLEAN'
+
+ def eth_tname(self):
+ return 'BOOLEAN'
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_BOOLEAN')
+
+ def eth_ftype(self, ectx):
+ return ('FT_BOOLEAN', 'BASE_NONE')
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s'),))
+ elif (ectx.Per()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ elif (ectx.Oer()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ else:
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+#--- OctetStringType ----------------------------------------------------------
+class OctetStringType (Type):
+ def to_python (self, ctx):
+ return 'asn1.OCTSTRING'
+
+ def eth_tname(self):
+ if not self.HasConstraint():
+ return 'OCTET_STRING'
+ elif self.constr.type == 'Size':
+ return 'OCTET_STRING' + '_' + self.constr.eth_constrname()
+ else:
+ return '#' + self.type + '_' + str(id(self))
+
+ def eth_ftype(self, ectx):
+ return ('FT_BYTES', 'BASE_NONE')
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_OCTETSTRING')
+
+ def eth_need_pdu(self, ectx):
+ pdu = None
+ if self.HasContentsConstraint():
+ t = self.constr.GetContents(ectx)
+ if t and (ectx.default_containing_variant in ('_pdu', '_pdu_new')):
+ pdu = { 'type' : t,
+ 'new' : ectx.default_containing_variant == '_pdu_new' }
+ return pdu
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
+ if self.HasContentsConstraint():
+ pars['FN_VARIANT'] = ectx.default_containing_variant
+ t = self.constr.GetContents(ectx)
+ if t:
+ if pars['FN_VARIANT'] in ('_pdu', '_pdu_new'):
+ t = ectx.field[t]['ethname']
+ pars['TYPE_REF_PROTO'] = ''
+ pars['TYPE_REF_TNAME'] = t
+ pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_TNAME)s'
+ else:
+ t = ectx.type[t]['ethname']
+ pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
+ pars['TYPE_REF_TNAME'] = t
+ pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
+ else:
+ pars['TYPE_REF_FN'] = 'NULL'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ if (ectx.constraints_check and self.HasSizeConstraint()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_constrained_octet_string', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ else:
+ body = ectx.eth_fn_call('dissect_%(ER)s_octet_string', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
+ ('%(VAL_PTR)s',),))
+ elif (ectx.Per() or ectx.Oer()):
+ if self.HasContentsConstraint():
+ body = ectx.eth_fn_call('dissect_%(ER)s_octet_string_containing%(FN_VARIANT)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(TYPE_REF_FN)s',),))
+ else:
+ body = ectx.eth_fn_call('dissect_%(ER)s_octet_string', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(VAL_PTR)s',),))
+ else:
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+#--- CharacterStringType ------------------------------------------------------
+class CharacterStringType (Type):
+ def eth_tname(self):
+ if not self.HasConstraint():
+ return self.eth_tsname()
+ elif self.constr.type == 'Size':
+ return self.eth_tsname() + '_' + self.constr.eth_constrname()
+ else:
+ return '#' + self.type + '_' + str(id(self))
+
+ def eth_ftype(self, ectx):
+ return ('FT_STRING', 'BASE_NONE')
+
+class RestrictedCharacterStringType (CharacterStringType):
+ def to_python (self, ctx):
+ return 'asn1.' + self.eth_tsname()
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_' + self.eth_tsname())
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
+ (pars['STRING_TYPE'], pars['STRING_TAG']) = (self.eth_tsname(), self.GetTTag(ectx)[1])
+ (pars['ALPHABET'], pars['ALPHABET_LEN']) = self.eth_get_alphabet_constr(ectx)
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ if (ectx.constraints_check and self.HasSizeConstraint()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_constrained_restricted_string', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(STRING_TAG)s'),
+ ('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ else:
+ body = ectx.eth_fn_call('dissect_%(ER)s_restricted_string', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(STRING_TAG)s'),
+ ('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
+ ('%(VAL_PTR)s',),))
+ elif (ectx.Per() and self.HasPermAlph() and self.eth_tsname() in KnownMultiplierStringTypes):
+ # XXX: If there is a permitted alphabet but it is extensible,
+ # then the permitted-alphabet is not PER-visible and should be
+ # ignored. (X.691 9.3.10, 9.3.18) We don't handle extensible
+ # permitted-alphabets.
+ body = ectx.eth_fn_call('dissect_%(ER)s_restricted_character_string', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(ALPHABET)s', '%(ALPHABET_LEN)s'),
+ ('%(VAL_PTR)s',),))
+ elif (ectx.Per()):
+ if (self.eth_tsname() == 'GeneralString'):
+ body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
+ elif (self.eth_tsname() == 'GeneralizedTime' or self.eth_tsname() == 'UTCTime'):
+ body = ectx.eth_fn_call('dissect_%(ER)s_VisibleString', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s'),
+ ('%(VAL_PTR)s',),))
+ elif (self.eth_tsname() in KnownMultiplierStringTypes):
+ body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s'),
+ ('%(VAL_PTR)s',),))
+ else:
+ body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),))
+ elif (ectx.Oer()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),))
+ else:
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+class BMPStringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'BMPString'
+
+class GeneralStringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'GeneralString'
+
+class GraphicStringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'GraphicString'
+
+class IA5StringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'IA5String'
+
+class NumericStringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'NumericString'
+
+class PrintableStringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'PrintableString'
+
+class TeletexStringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'TeletexString'
+
+class T61StringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'T61String'
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_TeletexString')
+
+class UniversalStringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'UniversalString'
+
+class UTF8StringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'UTF8String'
+
+class VideotexStringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'VideotexString'
+
+class VisibleStringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'VisibleString'
+
+class ISO646StringType (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'ISO646String'
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_VisibleString')
+
+class UnrestrictedCharacterStringType (CharacterStringType):
+ def to_python (self, ctx):
+ return 'asn1.UnrestrictedCharacterString'
+ def eth_tsname(self):
+ return 'CHARACTER_STRING'
+
+#--- UsefulType ---------------------------------------------------------------
+class GeneralizedTime (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'GeneralizedTime'
+
+ def eth_ftype(self, ectx):
+ if (ectx.Ber()):
+ return ('FT_ABSOLUTE_TIME', 'ABSOLUTE_TIME_LOCAL')
+ else:
+ return ('FT_STRING', 'BASE_NONE')
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),))
+ return body
+ else:
+ return RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname)
+
+class UTCTime (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'UTCTime'
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', 'NULL', 'NULL'),))
+ return body
+ else:
+ return RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname)
+
+class ObjectDescriptor (RestrictedCharacterStringType):
+ def eth_tsname(self):
+ return 'ObjectDescriptor'
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname)
+ elif (ectx.Per()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_object_descriptor', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ else:
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+#--- ObjectIdentifierType -----------------------------------------------------
+class ObjectIdentifierType (Type):
+ def to_python (self, ctx):
+ return 'asn1.OBJECT_IDENTIFIER'
+
+ def eth_tname(self):
+ return 'OBJECT_IDENTIFIER'
+
+ def eth_ftype(self, ectx):
+ return ('FT_OID', 'BASE_NONE')
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_OID')
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ pars['FN_VARIANT'] = ectx.default_oid_variant
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_object_identifier%(FN_VARIANT)s', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ elif (ectx.Per()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_object_identifier%(FN_VARIANT)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ elif (ectx.Oer()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_object_identifier%(FN_VARIANT)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ else:
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+#--- ObjectIdentifierValue ----------------------------------------------------
+class ObjectIdentifierValue (Value):
+ def get_num(self, path, val):
+ return str(oid_names.get(path + '/' + val, val))
+
+ def to_str(self, ectx):
+ out = ''
+ path = ''
+ first = True
+ sep = ''
+ for v in self.comp_list:
+ if isinstance(v, Node) and (v.type == 'name_and_number'):
+ vstr = v.number
+ elif v.isdigit():
+ vstr = v
+ else:
+ vstr = self.get_num(path, v)
+ if not first and not vstr.isdigit():
+ vstr = ectx.value_get_val(vstr)
+ if first:
+ if vstr.isdigit():
+ out += '"' + vstr
+ else:
+ out += ectx.value_get_eth(vstr) + '"'
+ else:
+ out += sep + vstr
+ path += sep + vstr
+ first = False
+ sep = '.'
+ out += '"'
+ return out
+
+ def get_dep(self):
+ v = self.comp_list[0]
+ if isinstance(v, Node) and (v.type == 'name_and_number'):
+ return None
+ elif v.isdigit():
+ return None
+ else:
+ vstr = self.get_num('', v)
+ if vstr.isdigit():
+ return None
+ else:
+ return vstr
+
+class NamedNumber(Node):
+ def to_python (self, ctx):
+ return "('%s',%s)" % (self.ident, self.val)
+ def __lt__(self, other):
+ return int(self.val) < int(other.val)
+
+class NamedNumListBase(Node):
+ def to_python (self, ctx):
+ return "asn1.%s_class ([%s])" % (self.asn1_typ,",".join (
+ [x.to_python (ctx) for x in self.named_list]))
+
+#--- RelativeOIDType ----------------------------------------------------------
+class RelativeOIDType (Type):
+
+ def eth_tname(self):
+ return 'RELATIVE_OID'
+
+ def eth_ftype(self, ectx):
+ return ('FT_REL_OID', 'BASE_NONE')
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_RELATIVE_OID')
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ pars['FN_VARIANT'] = ectx.default_oid_variant
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_relative_oid%(FN_VARIANT)s', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ elif (ectx.Per()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_relative_oid%(FN_VARIANT)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ else:
+ body = '#error Can not decode relative_oid %s' % (tname)
+ return body
+
+
+#--- IntegerType --------------------------------------------------------------
+class IntegerType (Type):
+ def to_python (self, ctx):
+ return "asn1.INTEGER_class ([%s])" % (",".join (
+ [x.to_python (ctx) for x in self.named_list]))
+
+ def add_named_value(self, ident, val):
+ e = NamedNumber(ident = ident, val = val)
+ if not self.named_list:
+ self.named_list = []
+ self.named_list.append(e)
+
+ def eth_tname(self):
+ if self.named_list:
+ return Type.eth_tname(self)
+ if not self.HasConstraint():
+ return 'INTEGER'
+ elif self.constr.type == 'SingleValue' or self.constr.type == 'ValueRange':
+ return 'INTEGER' + '_' + self.constr.eth_constrname()
+ else:
+ return 'INTEGER' + '_' + self.constr.eth_tname()
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_INTEGER')
+
+
+ def eth_ftype(self, ectx):
+ if self.HasConstraint():
+ if not self.constr.IsNegativ():
+ if self.constr.Needs64b(ectx):
+ return ('FT_UINT64', 'BASE_DEC')
+ else:
+ return ('FT_UINT32', 'BASE_DEC')
+ if self.constr.Needs64b(ectx):
+ return ('FT_INT64', 'BASE_DEC')
+ return ('FT_INT32', 'BASE_DEC')
+
+ def eth_strings(self):
+ if (self.named_list):
+ return '$$'
+ else:
+ return 'NULL'
+
+ def eth_has_vals(self):
+ if (self.named_list):
+ return True
+ else:
+ return False
+
+ def get_vals(self, ectx):
+ vals = []
+ for e in (self.named_list):
+ vals.append((int(e.val), e.ident))
+ return vals
+
+ def eth_type_vals(self, tname, ectx):
+ if not self.eth_has_vals(): return ''
+ out = '\n'
+ vals = self.get_vals(ectx)
+ out += ectx.eth_vals(tname, vals)
+ return out
+
+ def reg_enum_vals(self, tname, ectx):
+ vals = self.get_vals(ectx)
+ for (val, id) in vals:
+ ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id))
+
+ def eth_type_enum(self, tname, ectx):
+ if not self.eth_has_enum(tname, ectx): return ''
+ out = '\n'
+ vals = self.get_vals(ectx)
+ out += ectx.eth_enum(tname, vals)
+ return out
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ if self.HasValueConstraint():
+ (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_value_constr(ectx)
+ if (pars['FN_VARIANT'] == '') and self.constr.Needs64b(ectx):
+ if ectx.Ber(): pars['FN_VARIANT'] = '64'
+ else:
+ if (ectx.Oer() and pars['MAX_VAL'] == 'NO_BOUND'):
+ pars['FN_VARIANT'] = '_64b_no_ub'
+ else:
+ pars['FN_VARIANT'] = '_64b'
+ return pars
+
+ def eth_type_default_body(self, ectx, tname):
+ if (ectx.Ber()):
+ if (ectx.constraints_check and self.HasValueConstraint()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer%(FN_VARIANT)s', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
+ else:
+ body = ectx.eth_fn_call('dissect_%(ER)s_integer%(FN_VARIANT)s', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
+ ('%(VAL_PTR)s',),))
+ elif (ectx.Per() or ectx.Oer()):
+ if (self.HasValueConstraint()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer%(FN_VARIANT)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(VAL_PTR)s', '%(EXT)s'),))
+ else:
+ body = ectx.eth_fn_call('dissect_%(ER)s_integer%(FN_VARIANT)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s'),))
+ else:
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+#--- BitStringType ------------------------------------------------------------
+class BitStringType (Type):
+ def to_python (self, ctx):
+ return "asn1.BITSTRING_class ([%s])" % (",".join (
+ [x.to_python (ctx) for x in self.named_list]))
+
+ def eth_tname(self):
+ if self.named_list:
+ return Type.eth_tname(self)
+ elif not self.HasConstraint():
+ return 'BIT_STRING'
+ elif self.constr.IsSize():
+ return 'BIT_STRING' + '_' + self.constr.eth_constrname()
+ else:
+ return '#' + self.type + '_' + str(id(self))
+
+ def GetTTag(self, ectx):
+ return ('BER_CLASS_UNI', 'BER_UNI_TAG_BITSTRING')
+
+ def eth_ftype(self, ectx):
+ return ('FT_BYTES', 'BASE_NONE')
+
+ def eth_need_tree(self):
+ return self.named_list
+
+ def eth_need_pdu(self, ectx):
+ pdu = None
+ if self.HasContentsConstraint():
+ t = self.constr.GetContents(ectx)
+ if t and (ectx.default_containing_variant in ('_pdu', '_pdu_new')):
+ pdu = { 'type' : t,
+ 'new' : ectx.default_containing_variant == '_pdu_new' }
+ return pdu
+
+ def sortNamedBits(self):
+ return self.named_list.val
+
+ def eth_named_bits(self):
+ bits = []
+ if (self.named_list):
+ sorted_list = self.named_list
+ sorted_list.sort()
+ expected_bit_no = 0;
+ for e in (sorted_list):
+ # Fill the table with "spare_bit" for "un named bits"
+ if (int(e.val) != 0) and (expected_bit_no != int(e.val)):
+ while ( expected_bit_no < int(e.val)):
+ bits.append((expected_bit_no, ("spare_bit%u" % (expected_bit_no))))
+ expected_bit_no = expected_bit_no + 1
+ #print ("Adding named bits to list %s bit no %d" % (e.ident, int (e.val)))
+ bits.append((int(e.val), e.ident))
+ expected_bit_no = int(e.val) + 1
+ return bits
+
+ def eth_type_default_pars(self, ectx, tname):
+ pars = Type.eth_type_default_pars(self, ectx, tname)
+ pars['LEN_PTR'] = 'NULL'
+ (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
+ if 'ETT_INDEX' not in pars:
+ pars['ETT_INDEX'] = '-1'
+ pars['TABLE'] = 'NULL'
+ if self.eth_named_bits():
+ pars['TABLE'] = '%(PROTOP)s%(TNAME)s_bits'
+ if self.HasContentsConstraint():
+ pars['FN_VARIANT'] = ectx.default_containing_variant
+ t = self.constr.GetContents(ectx)
+ if t:
+ if pars['FN_VARIANT'] in ('_pdu', '_pdu_new'):
+ t = ectx.field[t]['ethname']
+ pars['TYPE_REF_PROTO'] = ''
+ pars['TYPE_REF_TNAME'] = t
+ pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_TNAME)s'
+ else:
+ t = ectx.type[t]['ethname']
+ pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
+ pars['TYPE_REF_TNAME'] = t
+ pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
+ else:
+ pars['TYPE_REF_FN'] = 'NULL'
+ return pars
+
+ def eth_type_default_table(self, ectx, tname):
+ #print ("eth_type_default_table(tname='%s')" % (tname))
+ table = ''
+ bits = self.eth_named_bits()
+ if (bits):
+ table = ectx.eth_bits(tname, bits)
+ return table
+
+ def eth_type_default_body(self, ectx, tname):
+ bits = self.eth_named_bits()
+ if (ectx.Ber()):
+ if (ectx.constraints_check and self.HasSizeConstraint()):
+ body = ectx.eth_fn_call('dissect_%(ER)s_constrained_bitstring', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%s' % len(bits),'%(HF_INDEX)s', '%(ETT_INDEX)s',),
+ ('%(VAL_PTR)s',),))
+ else:
+ body = ectx.eth_fn_call('dissect_%(ER)s_bitstring', ret='offset',
+ par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
+ ('%(TABLE)s', '%s' % len(bits), '%(HF_INDEX)s', '%(ETT_INDEX)s',),
+ ('%(VAL_PTR)s',),))
+ elif (ectx.Per() or ectx.Oer()):
+ if self.HasContentsConstraint():
+ body = ectx.eth_fn_call('dissect_%(ER)s_bit_string_containing%(FN_VARIANT)s', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(TYPE_REF_FN)s'),))
+ else:
+ body = ectx.eth_fn_call('dissect_%(ER)s_bit_string', ret='offset',
+ par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
+ ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s','%(TABLE)s', '%s' % len(bits), '%(VAL_PTR)s', '%(LEN_PTR)s'),))
+ else:
+ body = '#error Can not decode %s' % (tname)
+ return body
+
+#--- BStringValue ------------------------------------------------------------
+bstring_tab = {
+ '0000' : '0',
+ '0001' : '1',
+ '0010' : '2',
+ '0011' : '3',
+ '0100' : '4',
+ '0101' : '5',
+ '0110' : '6',
+ '0111' : '7',
+ '1000' : '8',
+ '1001' : '9',
+ '1010' : 'A',
+ '1011' : 'B',
+ '1100' : 'C',
+ '1101' : 'D',
+ '1110' : 'E',
+ '1111' : 'F',
+}
+class BStringValue (Value):
+ def to_str(self, ectx):
+ v = self.val[1:-2]
+ if len(v) % 8:
+ v += '0' * (8 - len(v) % 8)
+ vv = '0x'
+ for i in (list(range(0, len(v), 4))):
+ vv += bstring_tab[v[i:i+4]]
+ return vv
+
+#--- HStringValue ------------------------------------------------------------
+class HStringValue (Value):
+ def to_str(self, ectx):
+ vv = '0x'
+ vv += self.val[1:-2]
+ return vv
+ def __int__(self):
+ return int(self.val[1:-2], 16)
+
+#--- FieldSpec ----------------------------------------------------------------
+class FieldSpec (Node):
+ def __init__(self,*args, **kw) :
+ self.name = None
+ Node.__init__ (self,*args, **kw)
+
+ def SetName(self, name):
+ self.name = name
+
+ def get_repr(self):
+ return ['#UNSUPPORTED_' + self.type]
+
+ def fld_repr(self):
+ repr = [self.name]
+ repr.extend(self.get_repr())
+ return repr
+
+class TypeFieldSpec (FieldSpec):
+ def get_repr(self):
+ return []
+
+class FixedTypeValueFieldSpec (FieldSpec):
+ def get_repr(self):
+ if isinstance(self.typ, Type_Ref):
+ repr = ['TypeReference', self.typ.val]
+ else:
+ repr = [self.typ.type]
+ return repr
+
+class VariableTypeValueFieldSpec (FieldSpec):
+ def get_repr(self):
+ return ['_' + self.type]
+
+class FixedTypeValueSetFieldSpec (FieldSpec):
+ def get_repr(self):
+ return ['_' + self.type]
+
+class ObjectFieldSpec (FieldSpec):
+ def get_repr(self):
+ return ['ClassReference', self.cls.val]
+
+class ObjectSetFieldSpec (FieldSpec):
+ def get_repr(self):
+ return ['ClassReference', self.cls.val]
+
+#==============================================================================
+
+def p_module_list_1 (t):
+ 'module_list : module_list ModuleDefinition'
+ t[0] = t[1] + [t[2]]
+
+def p_module_list_2 (t):
+ 'module_list : ModuleDefinition'
+ t[0] = [t[1]]
+
+
+#--- ITU-T Recommendation X.680 -----------------------------------------------
+
+
+# 11 ASN.1 lexical items --------------------------------------------------------
+
+# 11.2 Type references
+def p_type_ref (t):
+ 'type_ref : UCASE_IDENT'
+ t[0] = Type_Ref(val=t[1])
+
+# 11.3 Identifiers
+def p_identifier (t):
+ 'identifier : LCASE_IDENT'
+ t[0] = t[1]
+
+# 11.4 Value references
+# cause reduce/reduce conflict
+#def p_valuereference (t):
+# 'valuereference : LCASE_IDENT'
+# t[0] = Value_Ref(val=t[1])
+
+# 11.5 Module references
+def p_modulereference (t):
+ 'modulereference : UCASE_IDENT'
+ t[0] = t[1]
+
+
+# 12 Module definition --------------------------------------------------------
+
+# 12.1
+def p_ModuleDefinition (t):
+ 'ModuleDefinition : ModuleIdentifier DEFINITIONS TagDefault ASSIGNMENT ModuleBegin BEGIN ModuleBody END'
+ t[0] = Module (ident = t[1], tag_def = t[3], body = t[7])
+
+def p_ModuleBegin (t):
+ 'ModuleBegin : '
+ if t[-4].val == 'Remote-Operations-Information-Objects':
+ x880_module_begin()
+
+def p_TagDefault_1 (t):
+ '''TagDefault : EXPLICIT TAGS
+ | IMPLICIT TAGS
+ | AUTOMATIC TAGS '''
+ t[0] = Default_Tags (dfl_tag = t[1])
+
+def p_TagDefault_2 (t):
+ 'TagDefault : '
+ # 12.2 The "TagDefault" is taken as EXPLICIT TAGS if it is "empty".
+ t[0] = Default_Tags (dfl_tag = 'EXPLICIT')
+
+def p_ModuleIdentifier_1 (t):
+ 'ModuleIdentifier : modulereference DefinitiveIdentifier' # name, oid
+ t [0] = Node('module_ident', val = t[1], ident = t[2])
+
+def p_ModuleIdentifier_2 (t):
+ 'ModuleIdentifier : modulereference' # name, oid
+ t [0] = Node('module_ident', val = t[1], ident = None)
+
+def p_DefinitiveIdentifier (t):
+ 'DefinitiveIdentifier : ObjectIdentifierValue'
+ t[0] = t[1]
+
+#def p_module_ref (t):
+# 'module_ref : UCASE_IDENT'
+# t[0] = t[1]
+
+def p_ModuleBody_1 (t):
+ 'ModuleBody : Exports Imports AssignmentList'
+ t[0] = Module_Body (exports = t[1], imports = t[2], assign_list = t[3])
+
+def p_ModuleBody_2 (t):
+ 'ModuleBody : '
+ t[0] = Node ('module_body', exports = [], imports = [], assign_list = [])
+
+def p_Exports_1 (t):
+ 'Exports : EXPORTS syms_exported SEMICOLON'
+ t[0] = t[2]
+
+def p_Exports_2 (t):
+ 'Exports : EXPORTS ALL SEMICOLON'
+ t[0] = [ 'ALL' ]
+
+def p_Exports_3 (t):
+ 'Exports : '
+ t[0] = [ 'ALL' ]
+
+def p_syms_exported_1 (t):
+ 'syms_exported : exp_sym_list'
+ t[0] = t[1]
+
+def p_syms_exported_2 (t):
+ 'syms_exported : '
+ t[0] = []
+
+def p_exp_sym_list_1 (t):
+ 'exp_sym_list : Symbol'
+ t[0] = [t[1]]
+
+def p_exp_sym_list_2 (t):
+ 'exp_sym_list : exp_sym_list COMMA Symbol'
+ t[0] = t[1] + [t[3]]
+
+
+def p_Imports_1 (t):
+ 'Imports : importsbegin IMPORTS SymbolsImported SEMICOLON'
+ t[0] = t[3]
+ global lcase_ident_assigned
+ lcase_ident_assigned = {}
+
+def p_importsbegin (t):
+ 'importsbegin : '
+ global lcase_ident_assigned
+ global g_conform
+ lcase_ident_assigned = {}
+ lcase_ident_assigned.update(g_conform.use_item('ASSIGNED_ID', 'OBJECT_IDENTIFIER'))
+
+def p_Imports_2 (t):
+ 'Imports : '
+ t[0] = []
+
+def p_SymbolsImported_1(t):
+ 'SymbolsImported : '
+ t[0] = []
+
+def p_SymbolsImported_2 (t):
+ 'SymbolsImported : SymbolsFromModuleList'
+ t[0] = t[1]
+
+def p_SymbolsFromModuleList_1 (t):
+ 'SymbolsFromModuleList : SymbolsFromModuleList SymbolsFromModule'
+ t[0] = t[1] + [t[2]]
+
+def p_SymbolsFromModuleList_2 (t):
+ 'SymbolsFromModuleList : SymbolsFromModule'
+ t[0] = [t[1]]
+
+def p_SymbolsFromModule (t):
+ '''SymbolsFromModule : SymbolList FROM GlobalModuleReference
+ | SymbolList FROM GlobalModuleReference WITH SUCCESSORS'''
+ t[0] = Node ('SymbolList', symbol_list = t[1], module = t[3])
+ for s in (t[0].symbol_list):
+ if (isinstance(s, Value_Ref)): lcase_ident_assigned[s.val] = t[3]
+ import_symbols_from_module(t[0].module, t[0].symbol_list)
+
+def import_symbols_from_module(module, symbol_list):
+ if module.val == 'Remote-Operations-Information-Objects':
+ for i in range(len(symbol_list)):
+ s = symbol_list[i]
+ if isinstance(s, Type_Ref) or isinstance(s, Class_Ref):
+ x880_import(s.val)
+ if isinstance(s, Type_Ref) and is_class_ident(s.val):
+ symbol_list[i] = Class_Ref (val = s.val)
+ return
+ for i in range(len(symbol_list)):
+ s = symbol_list[i]
+ if isinstance(s, Type_Ref) and is_class_ident("$%s$%s" % (module.val, s.val)):
+ import_class_from_module(module.val, s.val)
+ if isinstance(s, Type_Ref) and is_class_ident(s.val):
+ symbol_list[i] = Class_Ref (val = s.val)
+
+def p_GlobalModuleReference (t):
+ 'GlobalModuleReference : modulereference AssignedIdentifier'
+ t [0] = Node('module_ident', val = t[1], ident = t[2])
+
+def p_AssignedIdentifier_1 (t):
+ 'AssignedIdentifier : ObjectIdentifierValue'
+ t[0] = t[1]
+
+def p_AssignedIdentifier_2 (t):
+ 'AssignedIdentifier : LCASE_IDENT_ASSIGNED'
+ t[0] = t[1]
+
+def p_AssignedIdentifier_3 (t):
+ 'AssignedIdentifier : '
+ pass
+
+def p_SymbolList_1 (t):
+ 'SymbolList : Symbol'
+ t[0] = [t[1]]
+
+def p_SymbolList_2 (t):
+ 'SymbolList : SymbolList COMMA Symbol'
+ t[0] = t[1] + [t[3]]
+
+def p_Symbol (t):
+ '''Symbol : Reference
+ | ParameterizedReference'''
+ t[0] = t[1]
+
+def p_Reference_1 (t):
+ '''Reference : type_ref
+ | objectclassreference '''
+ t[0] = t[1]
+
+def p_Reference_2 (t):
+ '''Reference : LCASE_IDENT_ASSIGNED
+ | identifier ''' # instead of valuereference wich causes reduce/reduce conflict
+ t[0] = Value_Ref(val=t[1])
+
+def p_AssignmentList_1 (t):
+ 'AssignmentList : AssignmentList Assignment'
+ t[0] = t[1] + [t[2]]
+
+def p_AssignmentList_2 (t):
+ 'AssignmentList : Assignment SEMICOLON'
+ t[0] = [t[1]]
+
+def p_AssignmentList_3 (t):
+ 'AssignmentList : Assignment'
+ t[0] = [t[1]]
+
+def p_Assignment (t):
+ '''Assignment : TypeAssignment
+ | ValueAssignment
+ | ValueSetTypeAssignment
+ | ObjectClassAssignment
+ | ObjectAssignment
+ | ObjectSetAssignment
+ | ParameterizedAssignment
+ | pyquote '''
+ t[0] = t[1]
+
+
+# 13 Referencing type and value definitions -----------------------------------
+
+# 13.1
+def p_DefinedType (t):
+ '''DefinedType : ExternalTypeReference
+ | type_ref
+ | ParameterizedType'''
+ t[0] = t[1]
+
+def p_DefinedValue_1(t):
+ '''DefinedValue : ExternalValueReference'''
+ t[0] = t[1]
+
+def p_DefinedValue_2(t):
+ '''DefinedValue : identifier ''' # instead of valuereference wich causes reduce/reduce conflict
+ t[0] = Value_Ref(val=t[1])
+
+# 13.6
+def p_ExternalTypeReference (t):
+ 'ExternalTypeReference : modulereference DOT type_ref'
+ t[0] = Node ('ExternalTypeReference', module = t[1], typ = t[3])
+
+def p_ExternalValueReference (t):
+ 'ExternalValueReference : modulereference DOT identifier'
+ t[0] = Node ('ExternalValueReference', module = t[1], ident = t[3])
+
+
+# 15 Assigning types and values -----------------------------------------------
+
+# 15.1
+def p_TypeAssignment (t):
+ 'TypeAssignment : UCASE_IDENT ASSIGNMENT Type'
+ t[0] = t[3]
+ t[0].SetName(t[1])
+
+# 15.2
+def p_ValueAssignment (t):
+ 'ValueAssignment : LCASE_IDENT ValueType ASSIGNMENT Value'
+ t[0] = ValueAssignment(ident = t[1], typ = t[2], val = t[4])
+
+# only "simple" types are supported to simplify grammer
+def p_ValueType (t):
+ '''ValueType : type_ref
+ | BooleanType
+ | IntegerType
+ | ObjectIdentifierType
+ | OctetStringType
+ | RealType '''
+
+ t[0] = t[1]
+
+# 15.6
+def p_ValueSetTypeAssignment (t):
+ 'ValueSetTypeAssignment : UCASE_IDENT ValueType ASSIGNMENT ValueSet'
+ t[0] = Node('ValueSetTypeAssignment', name=t[1], typ=t[2], val=t[4])
+
+# 15.7
+def p_ValueSet (t):
+ 'ValueSet : lbraceignore rbraceignore'
+ t[0] = None
+
+
+# 16 Definition of types and values -------------------------------------------
+
+# 16.1
+def p_Type (t):
+ '''Type : BuiltinType
+ | ReferencedType
+ | ConstrainedType'''
+ t[0] = t[1]
+
+# 16.2
+def p_BuiltinType (t):
+ '''BuiltinType : AnyType
+ | BitStringType
+ | BooleanType
+ | CharacterStringType
+ | ChoiceType
+ | EmbeddedPDVType
+ | EnumeratedType
+ | ExternalType
+ | InstanceOfType
+ | IntegerType
+ | NullType
+ | ObjectClassFieldType
+ | ObjectIdentifierType
+ | OctetStringType
+ | RealType
+ | RelativeOIDType
+ | SequenceType
+ | SequenceOfType
+ | SetType
+ | SetOfType
+ | TaggedType'''
+ t[0] = t[1]
+
+# 16.3
+def p_ReferencedType (t):
+ '''ReferencedType : DefinedType
+ | UsefulType
+ | SelectionType'''
+ t[0] = t[1]
+
+# 16.5
+def p_NamedType (t):
+ 'NamedType : identifier Type'
+ t[0] = t[2]
+ t[0].SetName (t[1])
+
+# 16.7
+def p_Value (t):
+ '''Value : BuiltinValue
+ | ReferencedValue
+ | ObjectClassFieldValue'''
+ t[0] = t[1]
+
+# 16.9
+def p_BuiltinValue (t):
+ '''BuiltinValue : BooleanValue
+ | ChoiceValue
+ | IntegerValue
+ | ObjectIdentifierValue
+ | RealValue
+ | SequenceValue
+ | hex_string
+ | binary_string
+ | char_string''' # XXX we don't support {data} here
+ t[0] = t[1]
+
+# 16.11
+def p_ReferencedValue (t):
+ '''ReferencedValue : DefinedValue
+ | ValueFromObject'''
+ t[0] = t[1]
+
+# 16.13
+#def p_NamedValue (t):
+# 'NamedValue : identifier Value'
+# t[0] = Node ('NamedValue', ident = t[1], value = t[2])
+
+
+# 17 Notation for the boolean type --------------------------------------------
+
+# 17.1
+def p_BooleanType (t):
+ 'BooleanType : BOOLEAN'
+ t[0] = BooleanType ()
+
+# 17.2
+def p_BooleanValue (t):
+ '''BooleanValue : TRUE
+ | FALSE'''
+ t[0] = t[1]
+
+
+# 18 Notation for the integer type --------------------------------------------
+
+# 18.1
+def p_IntegerType_1 (t):
+ 'IntegerType : INTEGER'
+ t[0] = IntegerType (named_list = None)
+
+def p_IntegerType_2 (t):
+ 'IntegerType : INTEGER LBRACE NamedNumberList RBRACE'
+ t[0] = IntegerType(named_list = t[3])
+
+def p_NamedNumberList_1 (t):
+ 'NamedNumberList : NamedNumber'
+ t[0] = [t[1]]
+
+def p_NamedNumberList_2 (t):
+ 'NamedNumberList : NamedNumberList COMMA NamedNumber'
+ t[0] = t[1] + [t[3]]
+
+def p_NamedNumber (t):
+ '''NamedNumber : identifier LPAREN SignedNumber RPAREN
+ | identifier LPAREN DefinedValue RPAREN'''
+ t[0] = NamedNumber(ident = t[1], val = t[3])
+
+def p_SignedNumber_1 (t):
+ 'SignedNumber : NUMBER'
+ t[0] = t [1]
+
+def p_SignedNumber_2 (t):
+ 'SignedNumber : MINUS NUMBER'
+ t[0] = '-' + t[2]
+
+# 18.9
+def p_IntegerValue (t):
+ 'IntegerValue : SignedNumber'
+ t[0] = t [1]
+
+# 19 Notation for the enumerated type -----------------------------------------
+
+# 19.1
+def p_EnumeratedType (t):
+ 'EnumeratedType : ENUMERATED LBRACE Enumerations RBRACE'
+ t[0] = EnumeratedType (val = t[3]['val'], ext = t[3]['ext'])
+
+def p_Enumerations_1 (t):
+ 'Enumerations : Enumeration'
+ t[0] = { 'val' : t[1], 'ext' : None }
+
+def p_Enumerations_2 (t):
+ 'Enumerations : Enumeration COMMA ELLIPSIS ExceptionSpec'
+ t[0] = { 'val' : t[1], 'ext' : [] }
+
+def p_Enumerations_3 (t):
+ 'Enumerations : Enumeration COMMA ELLIPSIS ExceptionSpec COMMA Enumeration'
+ t[0] = { 'val' : t[1], 'ext' : t[6] }
+
+def p_Enumeration_1 (t):
+ 'Enumeration : EnumerationItem'
+ t[0] = [t[1]]
+
+def p_Enumeration_2 (t):
+ 'Enumeration : Enumeration COMMA EnumerationItem'
+ t[0] = t[1] + [t[3]]
+
+def p_EnumerationItem (t):
+ '''EnumerationItem : Identifier
+ | NamedNumber'''
+ t[0] = t[1]
+
+def p_Identifier (t):
+ 'Identifier : identifier'
+ t[0] = Node ('Identifier', ident = t[1])
+
+
+# 20 Notation for the real type -----------------------------------------------
+
+# 20.1
+def p_RealType (t):
+ 'RealType : REAL'
+ t[0] = RealType ()
+
+# 20.6
+def p_RealValue (t):
+ '''RealValue : REAL_NUMBER
+ | SpecialRealValue'''
+ t[0] = t [1]
+
+def p_SpecialRealValue (t):
+ '''SpecialRealValue : PLUS_INFINITY
+ | MINUS_INFINITY'''
+ t[0] = t[1]
+
+
+# 21 Notation for the bitstring type ------------------------------------------
+
+# 21.1
+def p_BitStringType_1 (t):
+ 'BitStringType : BIT STRING'
+ t[0] = BitStringType (named_list = None)
+
+def p_BitStringType_2 (t):
+ 'BitStringType : BIT STRING LBRACE NamedBitList RBRACE'
+ t[0] = BitStringType (named_list = t[4])
+
+def p_NamedBitList_1 (t):
+ 'NamedBitList : NamedBit'
+ t[0] = [t[1]]
+
+def p_NamedBitList_2 (t):
+ 'NamedBitList : NamedBitList COMMA NamedBit'
+ t[0] = t[1] + [t[3]]
+
+def p_NamedBit (t):
+ '''NamedBit : identifier LPAREN NUMBER RPAREN
+ | identifier LPAREN DefinedValue RPAREN'''
+ t[0] = NamedNumber (ident = t[1], val = t[3])
+
+
+# 22 Notation for the octetstring type ----------------------------------------
+
+# 22.1
+def p_OctetStringType (t):
+ 'OctetStringType : OCTET STRING'
+ t[0] = OctetStringType ()
+
+
+# 23 Notation for the null type -----------------------------------------------
+
+# 23.1
+def p_NullType (t):
+ 'NullType : NULL'
+ t[0] = NullType ()
+
+# 23.3
+def p_NullValue (t):
+ 'NullValue : NULL'
+ t[0] = NullValue ()
+
+
+# 24 Notation for sequence types ----------------------------------------------
+
+# 24.1
+def p_SequenceType_1 (t):
+ 'SequenceType : SEQUENCE LBRACE RBRACE'
+ t[0] = SequenceType (elt_list = [])
+
+def p_SequenceType_2 (t):
+ 'SequenceType : SEQUENCE LBRACE ComponentTypeLists RBRACE'
+ t[0] = SequenceType (elt_list = t[3]['elt_list'])
+ if 'ext_list' in t[3]:
+ t[0].ext_list = t[3]['ext_list']
+ if 'elt_list2' in t[3]:
+ t[0].elt_list2 = t[3]['elt_list2']
+
+def p_ExtensionAndException_1 (t):
+ 'ExtensionAndException : ELLIPSIS'
+ t[0] = []
+
+def p_OptionalExtensionMarker_1 (t):
+ 'OptionalExtensionMarker : COMMA ELLIPSIS'
+ t[0] = True
+
+def p_OptionalExtensionMarker_2 (t):
+ 'OptionalExtensionMarker : '
+ t[0] = False
+
+def p_ComponentTypeLists_1 (t):
+ 'ComponentTypeLists : ComponentTypeList'
+ t[0] = {'elt_list' : t[1]}
+
+def p_ComponentTypeLists_2 (t):
+ 'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException OptionalExtensionMarker'
+ t[0] = {'elt_list' : t[1], 'ext_list' : []}
+
+def p_ComponentTypeLists_3 (t):
+ 'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionAdditionList OptionalExtensionMarker'
+ t[0] = {'elt_list' : t[1], 'ext_list' : t[4]}
+
+def p_ComponentTypeLists_4 (t):
+ 'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionEndMarker COMMA ComponentTypeList'
+ t[0] = {'elt_list' : t[1], 'ext_list' : [], 'elt_list2' : t[6]}
+
+def p_ComponentTypeLists_5 (t):
+ 'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionAdditionList ExtensionEndMarker COMMA ComponentTypeList'
+ t[0] = {'elt_list' : t[1], 'ext_list' : t[4], 'elt_list2' : t[7]}
+
+def p_ComponentTypeLists_6 (t):
+ 'ComponentTypeLists : ExtensionAndException OptionalExtensionMarker'
+ t[0] = {'elt_list' : [], 'ext_list' : []}
+
+def p_ComponentTypeLists_7 (t):
+ 'ComponentTypeLists : ExtensionAndException ExtensionAdditionList OptionalExtensionMarker'
+ t[0] = {'elt_list' : [], 'ext_list' : t[2]}
+
+def p_ExtensionEndMarker (t):
+ 'ExtensionEndMarker : COMMA ELLIPSIS'
+ pass
+
+def p_ExtensionAdditionList_1 (t):
+ 'ExtensionAdditionList : COMMA ExtensionAddition'
+ t[0] = [t[2]]
+
+def p_ExtensionAdditionList_2 (t):
+ 'ExtensionAdditionList : ExtensionAdditionList COMMA ExtensionAddition'
+ t[0] = t[1] + [t[3]]
+
+def p_ExtensionAddition_1 (t):
+ 'ExtensionAddition : ExtensionAdditionGroup'
+ t[0] = Node ('elt_type', val = t[1], optional = 0)
+
+def p_ExtensionAddition_2 (t):
+ 'ExtensionAddition : ComponentType'
+ t[0] = t[1]
+
+def p_ExtensionAdditionGroup (t):
+ 'ExtensionAdditionGroup : LVERBRACK VersionNumber ComponentTypeList RVERBRACK'
+ t[0] = ExtensionAdditionGroup (ver = t[2], elt_list = t[3])
+
+def p_VersionNumber_1 (t):
+ 'VersionNumber : '
+
+def p_VersionNumber_2 (t):
+ 'VersionNumber : NUMBER COLON'
+ t[0] = t[1]
+
+def p_ComponentTypeList_1 (t):
+ 'ComponentTypeList : ComponentType'
+ t[0] = [t[1]]
+
+def p_ComponentTypeList_2 (t):
+ 'ComponentTypeList : ComponentTypeList COMMA ComponentType'
+ t[0] = t[1] + [t[3]]
+
+def p_ComponentType_1 (t):
+ 'ComponentType : NamedType'
+ t[0] = Node ('elt_type', val = t[1], optional = 0)
+
+def p_ComponentType_2 (t):
+ 'ComponentType : NamedType OPTIONAL'
+ t[0] = Node ('elt_type', val = t[1], optional = 1)
+
+def p_ComponentType_3 (t):
+ 'ComponentType : NamedType DEFAULT DefaultValue'
+ t[0] = Node ('elt_type', val = t[1], optional = 1, default = t[3])
+
+def p_ComponentType_4 (t):
+ 'ComponentType : COMPONENTS OF Type'
+ t[0] = Node ('components_of', typ = t[3])
+
+def p_DefaultValue_1 (t):
+ '''DefaultValue : ReferencedValue
+ | BooleanValue
+ | ChoiceValue
+ | IntegerValue
+ | RealValue
+ | hex_string
+ | binary_string
+ | char_string
+ | ObjectClassFieldValue'''
+ t[0] = t[1]
+
+def p_DefaultValue_2 (t):
+ 'DefaultValue : lbraceignore rbraceignore'
+ t[0] = ''
+
+# 24.17
+def p_SequenceValue_1 (t):
+ 'SequenceValue : LBRACE RBRACE'
+ t[0] = []
+
+
+#def p_SequenceValue_2 (t):
+# 'SequenceValue : LBRACE ComponentValueList RBRACE'
+# t[0] = t[2]
+
+#def p_ComponentValueList_1 (t):
+# 'ComponentValueList : NamedValue'
+# t[0] = [t[1]]
+
+#def p_ComponentValueList_2 (t):
+# 'ComponentValueList : ComponentValueList COMMA NamedValue'
+# t[0] = t[1] + [t[3]]
+
+
+# 25 Notation for sequence-of types -------------------------------------------
+
+# 25.1
+def p_SequenceOfType (t):
+ '''SequenceOfType : SEQUENCE OF Type
+ | SEQUENCE OF NamedType'''
+ t[0] = SequenceOfType (val = t[3], size_constr = None)
+
+
+# 26 Notation for set types ---------------------------------------------------
+
+# 26.1
+def p_SetType_1 (t):
+ 'SetType : SET LBRACE RBRACE'
+ t[0] = SetType (elt_list = [])
+
+def p_SetType_2 (t):
+ 'SetType : SET LBRACE ComponentTypeLists RBRACE'
+ t[0] = SetType (elt_list = t[3]['elt_list'])
+ if 'ext_list' in t[3]:
+ t[0].ext_list = t[3]['ext_list']
+ if 'elt_list2' in t[3]:
+ t[0].elt_list2 = t[3]['elt_list2']
+
+
+# 27 Notation for set-of types ------------------------------------------------
+
+# 27.1
+def p_SetOfType (t):
+ '''SetOfType : SET OF Type
+ | SET OF NamedType'''
+ t[0] = SetOfType (val = t[3])
+
+# 28 Notation for choice types ------------------------------------------------
+
+# 28.1
+def p_ChoiceType (t):
+ 'ChoiceType : CHOICE LBRACE AlternativeTypeLists RBRACE'
+ if 'ext_list' in t[3]:
+ t[0] = ChoiceType (elt_list = t[3]['elt_list'], ext_list = t[3]['ext_list'])
+ else:
+ t[0] = ChoiceType (elt_list = t[3]['elt_list'])
+
+def p_AlternativeTypeLists_1 (t):
+ 'AlternativeTypeLists : AlternativeTypeList'
+ t[0] = {'elt_list' : t[1]}
+
+def p_AlternativeTypeLists_2 (t):
+ 'AlternativeTypeLists : AlternativeTypeList COMMA ExtensionAndException ExtensionAdditionAlternatives OptionalExtensionMarker'
+ t[0] = {'elt_list' : t[1], 'ext_list' : t[4]}
+
+def p_ExtensionAdditionAlternatives_1 (t):
+ 'ExtensionAdditionAlternatives : ExtensionAdditionAlternativesList'
+ t[0] = t[1]
+
+def p_ExtensionAdditionAlternatives_2 (t):
+ 'ExtensionAdditionAlternatives : '
+ t[0] = []
+
+def p_ExtensionAdditionAlternativesList_1 (t):
+ 'ExtensionAdditionAlternativesList : COMMA ExtensionAdditionAlternative'
+ t[0] = t[2]
+
+def p_ExtensionAdditionAlternativesList_2 (t):
+ 'ExtensionAdditionAlternativesList : ExtensionAdditionAlternativesList COMMA ExtensionAdditionAlternative'
+ t[0] = t[1] + t[3]
+
+def p_ExtensionAdditionAlternative_1 (t):
+ 'ExtensionAdditionAlternative : NamedType'
+ t[0] = [t[1]]
+
+def p_ExtensionAdditionAlternative_2 (t):
+ 'ExtensionAdditionAlternative : ExtensionAdditionAlternativesGroup'
+ t[0] = t[1]
+
+def p_ExtensionAdditionAlternativesGroup (t):
+ 'ExtensionAdditionAlternativesGroup : LVERBRACK VersionNumber AlternativeTypeList RVERBRACK'
+ t[0] = t[3]
+
+def p_AlternativeTypeList_1 (t):
+ 'AlternativeTypeList : NamedType'
+ t[0] = [t[1]]
+
+def p_AlternativeTypeList_2 (t):
+ 'AlternativeTypeList : AlternativeTypeList COMMA NamedType'
+ t[0] = t[1] + [t[3]]
+
+# 28.10
+def p_ChoiceValue_1 (t):
+ '''ChoiceValue : identifier COLON Value
+ | identifier COLON NullValue '''
+ val = t[3]
+ if not isinstance(val, Value):
+ val = Value(val=val)
+ t[0] = ChoiceValue (choice = t[1], val = val)
+
+# 29 Notation for selection types
+
+# 29.1
+def p_SelectionType (t): #
+ 'SelectionType : identifier LT Type'
+ t[0] = SelectionType (typ = t[3], sel = t[1])
+
+# 30 Notation for tagged types ------------------------------------------------
+
+# 30.1
+def p_TaggedType_1 (t):
+ 'TaggedType : Tag Type'
+ t[1].mode = 'default'
+ t[0] = t[2]
+ t[0].AddTag(t[1])
+
+def p_TaggedType_2 (t):
+ '''TaggedType : Tag IMPLICIT Type
+ | Tag EXPLICIT Type'''
+ t[1].mode = t[2]
+ t[0] = t[3]
+ t[0].AddTag(t[1])
+
+def p_Tag (t):
+ 'Tag : LBRACK Class ClassNumber RBRACK'
+ t[0] = Tag(cls = t[2], num = t[3])
+
+def p_ClassNumber_1 (t):
+ 'ClassNumber : number'
+ t[0] = t[1]
+
+def p_ClassNumber_2 (t):
+ 'ClassNumber : DefinedValue'
+ t[0] = t[1]
+
+def p_Class_1 (t):
+ '''Class : UNIVERSAL
+ | APPLICATION
+ | PRIVATE'''
+ t[0] = t[1]
+
+def p_Class_2 (t):
+ 'Class :'
+ t[0] = 'CONTEXT'
+
+
+# 31 Notation for the object identifier type ----------------------------------
+
+# 31.1
+def p_ObjectIdentifierType (t):
+ 'ObjectIdentifierType : OBJECT IDENTIFIER'
+ t[0] = ObjectIdentifierType()
+
+# 31.3
+def p_ObjectIdentifierValue (t):
+ 'ObjectIdentifierValue : LBRACE oid_comp_list RBRACE'
+ t[0] = ObjectIdentifierValue (comp_list=t[2])
+
+def p_oid_comp_list_1 (t):
+ 'oid_comp_list : oid_comp_list ObjIdComponents'
+ t[0] = t[1] + [t[2]]
+
+def p_oid_comp_list_2 (t):
+ 'oid_comp_list : ObjIdComponents'
+ t[0] = [t[1]]
+
+def p_ObjIdComponents (t):
+ '''ObjIdComponents : NameForm
+ | NumberForm
+ | NameAndNumberForm'''
+ t[0] = t[1]
+
+def p_NameForm (t):
+ '''NameForm : LCASE_IDENT
+ | LCASE_IDENT_ASSIGNED'''
+ t [0] = t[1]
+
+def p_NumberForm (t):
+ '''NumberForm : NUMBER'''
+# | DefinedValue'''
+ t [0] = t[1]
+
+def p_NameAndNumberForm (t):
+ '''NameAndNumberForm : LCASE_IDENT_ASSIGNED LPAREN NumberForm RPAREN
+ | LCASE_IDENT LPAREN NumberForm RPAREN'''
+ t[0] = Node('name_and_number', ident = t[1], number = t[3])
+
+# 32 Notation for the relative object identifier type -------------------------
+
+# 32.1
+def p_RelativeOIDType (t):
+ 'RelativeOIDType : RELATIVE_OID'
+ t[0] = RelativeOIDType()
+
+# 33 Notation for the embedded-pdv type ---------------------------------------
+
+# 33.1
+def p_EmbeddedPDVType (t):
+ 'EmbeddedPDVType : EMBEDDED PDV'
+ t[0] = EmbeddedPDVType()
+
+# 34 Notation for the external type -------------------------------------------
+
+# 34.1
+def p_ExternalType (t):
+ 'ExternalType : EXTERNAL'
+ t[0] = ExternalType()
+
+# 36 Notation for character string types --------------------------------------
+
+# 36.1
+def p_CharacterStringType (t):
+ '''CharacterStringType : RestrictedCharacterStringType
+ | UnrestrictedCharacterStringType'''
+ t[0] = t[1]
+
+
+# 37 Definition of restricted character string types --------------------------
+
+def p_RestrictedCharacterStringType_1 (t):
+ 'RestrictedCharacterStringType : BMPString'
+ t[0] = BMPStringType ()
+def p_RestrictedCharacterStringType_2 (t):
+ 'RestrictedCharacterStringType : GeneralString'
+ t[0] = GeneralStringType ()
+def p_RestrictedCharacterStringType_3 (t):
+ 'RestrictedCharacterStringType : GraphicString'
+ t[0] = GraphicStringType ()
+def p_RestrictedCharacterStringType_4 (t):
+ 'RestrictedCharacterStringType : IA5String'
+ t[0] = IA5StringType ()
+def p_RestrictedCharacterStringType_5 (t):
+ 'RestrictedCharacterStringType : ISO646String'
+ t[0] = ISO646StringType ()
+def p_RestrictedCharacterStringType_6 (t):
+ 'RestrictedCharacterStringType : NumericString'
+ t[0] = NumericStringType ()
+def p_RestrictedCharacterStringType_7 (t):
+ 'RestrictedCharacterStringType : PrintableString'
+ t[0] = PrintableStringType ()
+def p_RestrictedCharacterStringType_8 (t):
+ 'RestrictedCharacterStringType : TeletexString'
+ t[0] = TeletexStringType ()
+def p_RestrictedCharacterStringType_9 (t):
+ 'RestrictedCharacterStringType : T61String'
+ t[0] = T61StringType ()
+def p_RestrictedCharacterStringType_10 (t):
+ 'RestrictedCharacterStringType : UniversalString'
+ t[0] = UniversalStringType ()
+def p_RestrictedCharacterStringType_11 (t):
+ 'RestrictedCharacterStringType : UTF8String'
+ t[0] = UTF8StringType ()
+def p_RestrictedCharacterStringType_12 (t):
+ 'RestrictedCharacterStringType : VideotexString'
+ t[0] = VideotexStringType ()
+def p_RestrictedCharacterStringType_13 (t):
+ 'RestrictedCharacterStringType : VisibleString'
+ t[0] = VisibleStringType ()
+
+
+# 40 Definition of unrestricted character string types ------------------------
+
+# 40.1
+def p_UnrestrictedCharacterStringType (t):
+ 'UnrestrictedCharacterStringType : CHARACTER STRING'
+ t[0] = UnrestrictedCharacterStringType ()
+
+
+# 41 Notation for types defined in clauses 42 to 44 ---------------------------
+
+# 42 Generalized time ---------------------------------------------------------
+
+def p_UsefulType_1 (t):
+ 'UsefulType : GeneralizedTime'
+ t[0] = GeneralizedTime()
+
+# 43 Universal time -----------------------------------------------------------
+
+def p_UsefulType_2 (t):
+ 'UsefulType : UTCTime'
+ t[0] = UTCTime()
+
+# 44 The object descriptor type -----------------------------------------------
+
+def p_UsefulType_3 (t):
+ 'UsefulType : ObjectDescriptor'
+ t[0] = ObjectDescriptor()
+
+
+# 45 Constrained types --------------------------------------------------------
+
+# 45.1
+def p_ConstrainedType_1 (t):
+ 'ConstrainedType : Type Constraint'
+ t[0] = t[1]
+ t[0].AddConstraint(t[2])
+
+def p_ConstrainedType_2 (t):
+ 'ConstrainedType : TypeWithConstraint'
+ t[0] = t[1]
+
+# 45.5
+def p_TypeWithConstraint_1 (t):
+ '''TypeWithConstraint : SET Constraint OF Type
+ | SET SizeConstraint OF Type'''
+ t[0] = SetOfType (val = t[4], constr = t[2])
+
+def p_TypeWithConstraint_2 (t):
+ '''TypeWithConstraint : SEQUENCE Constraint OF Type
+ | SEQUENCE SizeConstraint OF Type'''
+ t[0] = SequenceOfType (val = t[4], constr = t[2])
+
+def p_TypeWithConstraint_3 (t):
+ '''TypeWithConstraint : SET Constraint OF NamedType
+ | SET SizeConstraint OF NamedType'''
+ t[0] = SetOfType (val = t[4], constr = t[2])
+
+def p_TypeWithConstraint_4 (t):
+ '''TypeWithConstraint : SEQUENCE Constraint OF NamedType
+ | SEQUENCE SizeConstraint OF NamedType'''
+ t[0] = SequenceOfType (val = t[4], constr = t[2])
+
+# 45.6
+# 45.7
+def p_Constraint (t):
+ 'Constraint : LPAREN ConstraintSpec ExceptionSpec RPAREN'
+ t[0] = t[2]
+
+def p_ConstraintSpec (t):
+ '''ConstraintSpec : ElementSetSpecs
+ | GeneralConstraint'''
+ t[0] = t[1]
+
+# 46 Element set specification ------------------------------------------------
+
+# 46.1
+def p_ElementSetSpecs_1 (t):
+ 'ElementSetSpecs : RootElementSetSpec'
+ t[0] = t[1]
+
+def p_ElementSetSpecs_2 (t):
+ 'ElementSetSpecs : RootElementSetSpec COMMA ELLIPSIS'
+ t[0] = t[1]
+ t[0].ext = True
+
+def p_ElementSetSpecs_3 (t):
+ 'ElementSetSpecs : RootElementSetSpec COMMA ELLIPSIS COMMA AdditionalElementSetSpec'
+ t[0] = t[1]
+ t[0].ext = True
+
+def p_RootElementSetSpec (t):
+ 'RootElementSetSpec : ElementSetSpec'
+ t[0] = t[1]
+
+def p_AdditionalElementSetSpec (t):
+ 'AdditionalElementSetSpec : ElementSetSpec'
+ t[0] = t[1]
+
+def p_ElementSetSpec (t):
+ 'ElementSetSpec : Unions'
+ t[0] = t[1]
+
+def p_Unions_1 (t):
+ 'Unions : Intersections'
+ t[0] = t[1]
+
+def p_Unions_2 (t):
+ 'Unions : UElems UnionMark Intersections'
+ t[0] = Constraint(type = 'Union', subtype = [t[1], t[3]])
+
+def p_UElems (t):
+ 'UElems : Unions'
+ t[0] = t[1]
+
+def p_Intersections_1 (t):
+ 'Intersections : IntersectionElements'
+ t[0] = t[1]
+
+def p_Intersections_2 (t):
+ 'Intersections : IElems IntersectionMark IntersectionElements'
+ t[0] = Constraint(type = 'Intersection', subtype = [t[1], t[3]])
+
+def p_IElems (t):
+ 'IElems : Intersections'
+ t[0] = t[1]
+
+def p_IntersectionElements (t):
+ 'IntersectionElements : Elements'
+ t[0] = t[1]
+
+def p_UnionMark (t):
+ '''UnionMark : BAR
+ | UNION'''
+
+def p_IntersectionMark (t):
+ '''IntersectionMark : CIRCUMFLEX
+ | INTERSECTION'''
+
+# 46.5
+def p_Elements_1 (t):
+ 'Elements : SubtypeElements'
+ t[0] = t[1]
+
+def p_Elements_2 (t):
+ 'Elements : LPAREN ElementSetSpec RPAREN'
+ t[0] = t[2]
+
+# 47 Subtype elements ---------------------------------------------------------
+
+# 47.1 General
+def p_SubtypeElements (t):
+ '''SubtypeElements : SingleValue
+ | ContainedSubtype
+ | ValueRange
+ | PermittedAlphabet
+ | SizeConstraint
+ | TypeConstraint
+ | InnerTypeConstraints
+ | PatternConstraint'''
+ t[0] = t[1]
+
+# 47.2 Single value
+# 47.2.1
+def p_SingleValue (t):
+ 'SingleValue : Value'
+ t[0] = Constraint(type = 'SingleValue', subtype = t[1])
+
+# 47.3 Contained subtype
+# 47.3.1
+def p_ContainedSubtype (t):
+ 'ContainedSubtype : Includes Type'
+ t[0] = Constraint(type = 'ContainedSubtype', subtype = t[2])
+
+def p_Includes (t):
+ '''Includes : INCLUDES
+ | '''
+
+# 47.4 Value range
+# 47.4.1
+def p_ValueRange (t):
+ 'ValueRange : LowerEndpoint RANGE UpperEndpoint'
+ t[0] = Constraint(type = 'ValueRange', subtype = [t[1], t[3]])
+
+# 47.4.3
+def p_LowerEndpoint_1 (t):
+ 'LowerEndpoint : LowerEndValue'
+ t[0] = t[1]
+
+def p_LowerEndpoint_2 (t):
+ 'LowerEndpoint : LowerEndValue LT'
+ t[0] = t[1] # but not inclusive range
+
+def p_UpperEndpoint_1 (t):
+ 'UpperEndpoint : UpperEndValue'
+ t[0] = t[1]
+
+def p_UpperEndpoint_2 (t):
+ 'UpperEndpoint : LT UpperEndValue'
+ t[0] = t[1] # but not inclusive range
+
+# 47.4.4
+def p_LowerEndValue (t):
+ '''LowerEndValue : Value
+ | MIN'''
+ t[0] = t[1] # XXX
+
+def p_UpperEndValue (t):
+ '''UpperEndValue : Value
+ | MAX'''
+ t[0] = t[1]
+
+# 47.5 Size constraint
+# 47.5.1
+def p_SizeConstraint (t):
+ 'SizeConstraint : SIZE Constraint'
+ t[0] = Constraint (type = 'Size', subtype = t[2])
+
+# 47.6 Type constraint
+# 47.6.1
+def p_TypeConstraint (t):
+ 'TypeConstraint : Type'
+ t[0] = Constraint (type = 'Type', subtype = t[1])
+
+# 47.7 Permitted alphabet
+# 47.7.1
+def p_PermittedAlphabet (t):
+ 'PermittedAlphabet : FROM Constraint'
+ t[0] = Constraint (type = 'From', subtype = t[2])
+
+# 47.8 Inner subtyping
+# 47.8.1
+def p_InnerTypeConstraints (t):
+ '''InnerTypeConstraints : WITH COMPONENT SingleTypeConstraint
+ | WITH COMPONENTS MultipleTypeConstraints'''
+ pass # ignore PER invisible constraint
+
+# 47.8.3
+def p_SingleTypeConstraint (t):
+ 'SingleTypeConstraint : Constraint'
+ t[0] = t[1]
+
+# 47.8.4
+def p_MultipleTypeConstraints (t):
+ '''MultipleTypeConstraints : FullSpecification
+ | PartialSpecification'''
+ t[0] = t[1]
+
+def p_FullSpecification (t):
+ 'FullSpecification : LBRACE TypeConstraints RBRACE'
+ t[0] = t[2]
+
+def p_PartialSpecification (t):
+ 'PartialSpecification : LBRACE ELLIPSIS COMMA TypeConstraints RBRACE'
+ t[0] = t[4]
+
+def p_TypeConstraints_1 (t):
+ 'TypeConstraints : named_constraint'
+ t [0] = [t[1]]
+
+def p_TypeConstraints_2 (t):
+ 'TypeConstraints : TypeConstraints COMMA named_constraint'
+ t[0] = t[1] + [t[3]]
+
+def p_named_constraint_1 (t):
+ 'named_constraint : identifier constraint'
+ return Node ('named_constraint', ident = t[1], constr = t[2])
+
+def p_named_constraint_2 (t):
+ 'named_constraint : constraint'
+ return Node ('named_constraint', constr = t[1])
+
+def p_constraint (t):
+ 'constraint : value_constraint presence_constraint'
+ t[0] = Node ('constraint', value = t[1], presence = t[2])
+
+def p_value_constraint_1 (t):
+ 'value_constraint : Constraint'
+ t[0] = t[1]
+
+def p_value_constraint_2 (t):
+ 'value_constraint : '
+ pass
+
+def p_presence_constraint_1 (t):
+ '''presence_constraint : PRESENT
+ | ABSENT
+ | OPTIONAL'''
+ t[0] = t[1]
+
+def p_presence_constraint_2 (t):
+ '''presence_constraint : '''
+ pass
+
+# 47.9 Pattern constraint
+# 47.9.1
+def p_PatternConstraint (t):
+ 'PatternConstraint : PATTERN Value'
+ t[0] = Constraint (type = 'Pattern', subtype = t[2])
+
+# 49 The exception identifier
+
+# 49.4
+def p_ExceptionSpec_1 (t):
+ 'ExceptionSpec : EXCLAMATION ExceptionIdentification'
+ pass
+
+def p_ExceptionSpec_2 (t):
+ 'ExceptionSpec : '
+ pass
+
+def p_ExceptionIdentification (t):
+ '''ExceptionIdentification : SignedNumber
+ | DefinedValue
+ | Type COLON Value '''
+ pass
+
+# /*-----------------------------------------------------------------------*/
+# /* Value Notation Productions */
+# /*-----------------------------------------------------------------------*/
+
+
+
+def p_binary_string (t):
+ 'binary_string : BSTRING'
+ t[0] = BStringValue(val = t[1])
+
+def p_hex_string (t):
+ 'hex_string : HSTRING'
+ t[0] = HStringValue(val = t[1])
+
+def p_char_string (t):
+ 'char_string : QSTRING'
+ t[0] = t[1]
+
+def p_number (t):
+ 'number : NUMBER'
+ t[0] = t[1]
+
+
+#--- ITU-T Recommendation X.208 -----------------------------------------------
+
+# 27 Notation for the any type ------------------------------------------------
+
+# 27.1
+def p_AnyType (t):
+ '''AnyType : ANY
+ | ANY DEFINED BY identifier'''
+ t[0] = AnyType()
+
+#--- ITU-T Recommendation X.681 -----------------------------------------------
+
+# 7 ASN.1 lexical items -------------------------------------------------------
+
+# 7.1 Information object class references
+
+def p_objectclassreference (t):
+ 'objectclassreference : CLASS_IDENT'
+ t[0] = Class_Ref(val=t[1])
+
+# 7.2 Information object references
+
+def p_objectreference (t):
+ 'objectreference : LCASE_IDENT'
+ t[0] = t[1]
+
+# 7.3 Information object set references
+
+#def p_objectsetreference (t):
+# 'objectsetreference : UCASE_IDENT'
+# t[0] = t[1]
+
+# 7.4 Type field references
+# ucasefieldreference
+# 7.5 Value field references
+# lcasefieldreference
+# 7.6 Value set field references
+# ucasefieldreference
+# 7.7 Object field references
+# lcasefieldreference
+# 7.8 Object set field references
+# ucasefieldreference
+
+def p_ucasefieldreference (t):
+ 'ucasefieldreference : AMPERSAND UCASE_IDENT'
+ t[0] = '&' + t[2]
+
+def p_lcasefieldreference (t):
+ 'lcasefieldreference : AMPERSAND LCASE_IDENT'
+ t[0] = '&' + t[2]
+
+# 8 Referencing definitions
+
+# 8.1
+def p_DefinedObjectClass (t):
+ '''DefinedObjectClass : objectclassreference
+ | UsefulObjectClassReference'''
+ t[0] = t[1]
+ global obj_class
+ obj_class = t[0].val
+
+def p_DefinedObject (t):
+ '''DefinedObject : objectreference'''
+ t[0] = t[1]
+
+# 8.4
+def p_UsefulObjectClassReference (t):
+ '''UsefulObjectClassReference : TYPE_IDENTIFIER
+ | ABSTRACT_SYNTAX'''
+ t[0] = Class_Ref(val=t[1])
+
+# 9 Information object class definition and assignment
+
+# 9.1
+def p_ObjectClassAssignment (t):
+ '''ObjectClassAssignment : CLASS_IDENT ASSIGNMENT ObjectClass
+ | UCASE_IDENT ASSIGNMENT ObjectClass'''
+ t[0] = t[3]
+ t[0].SetName(t[1])
+ if isinstance(t[0], ObjectClassDefn):
+ t[0].reg_types()
+
+# 9.2
+def p_ObjectClass (t):
+ '''ObjectClass : DefinedObjectClass
+ | ObjectClassDefn
+ | ParameterizedObjectClass '''
+ t[0] = t[1]
+
+# 9.3
+def p_ObjectClassDefn (t):
+ '''ObjectClassDefn : CLASS LBRACE FieldSpecs RBRACE
+ | CLASS LBRACE FieldSpecs RBRACE WithSyntaxSpec'''
+ t[0] = ObjectClassDefn(fields = t[3])
+
+def p_FieldSpecs_1 (t):
+ 'FieldSpecs : FieldSpec'
+ t[0] = [t[1]]
+
+def p_FieldSpecs_2 (t):
+ 'FieldSpecs : FieldSpecs COMMA FieldSpec'
+ t[0] = t[1] + [t[3]]
+
+def p_WithSyntaxSpec (t):
+ 'WithSyntaxSpec : WITH SYNTAX lbraceignore rbraceignore'
+ t[0] = None
+
+# 9.4
+def p_FieldSpec (t):
+ '''FieldSpec : TypeFieldSpec
+ | FixedTypeValueFieldSpec
+ | VariableTypeValueFieldSpec
+ | FixedTypeValueSetFieldSpec
+ | ObjectFieldSpec
+ | ObjectSetFieldSpec '''
+ t[0] = t[1]
+
+# 9.5
+def p_TypeFieldSpec (t):
+ '''TypeFieldSpec : ucasefieldreference
+ | ucasefieldreference TypeOptionalitySpec '''
+ t[0] = TypeFieldSpec()
+ t[0].SetName(t[1])
+
+def p_TypeOptionalitySpec_1 (t):
+ 'TypeOptionalitySpec ::= OPTIONAL'
+ pass
+
+def p_TypeOptionalitySpec_2 (t):
+ 'TypeOptionalitySpec ::= DEFAULT Type'
+ pass
+
+# 9.6
+def p_FixedTypeValueFieldSpec (t):
+ '''FixedTypeValueFieldSpec : lcasefieldreference Type
+ | lcasefieldreference Type UNIQUE
+ | lcasefieldreference Type ValueOptionalitySpec
+ | lcasefieldreference Type UNIQUE ValueOptionalitySpec '''
+ t[0] = FixedTypeValueFieldSpec(typ = t[2])
+ t[0].SetName(t[1])
+
+def p_ValueOptionalitySpec_1 (t):
+ 'ValueOptionalitySpec ::= OPTIONAL'
+ pass
+
+def p_ValueOptionalitySpec_2 (t):
+ 'ValueOptionalitySpec ::= DEFAULT Value'
+ pass
+
+# 9.8
+
+def p_VariableTypeValueFieldSpec (t):
+ '''VariableTypeValueFieldSpec : lcasefieldreference FieldName
+ | lcasefieldreference FieldName ValueOptionalitySpec '''
+ t[0] = VariableTypeValueFieldSpec()
+ t[0].SetName(t[1])
+
+# 9.9
+def p_FixedTypeValueSetFieldSpec (t):
+ '''FixedTypeValueSetFieldSpec : ucasefieldreference Type
+ | ucasefieldreference Type ValueSetOptionalitySpec '''
+ t[0] = FixedTypeValueSetFieldSpec()
+ t[0].SetName(t[1])
+
+def p_ValueSetOptionalitySpec_1 (t):
+ 'ValueSetOptionalitySpec ::= OPTIONAL'
+ pass
+
+def p_ValueSetOptionalitySpec_2 (t):
+ 'ValueSetOptionalitySpec ::= DEFAULT ValueSet'
+ pass
+
+# 9.11
+def p_ObjectFieldSpec (t):
+ '''ObjectFieldSpec : lcasefieldreference DefinedObjectClass
+ | lcasefieldreference DefinedObjectClass ObjectOptionalitySpec '''
+ t[0] = ObjectFieldSpec(cls=t[2])
+ t[0].SetName(t[1])
+ global obj_class
+ obj_class = None
+
+def p_ObjectOptionalitySpec_1 (t):
+ 'ObjectOptionalitySpec ::= OPTIONAL'
+ pass
+
+def p_ObjectOptionalitySpec_2 (t):
+ 'ObjectOptionalitySpec ::= DEFAULT Object'
+ pass
+
+# 9.12
+def p_ObjectSetFieldSpec (t):
+ '''ObjectSetFieldSpec : ucasefieldreference DefinedObjectClass
+ | ucasefieldreference DefinedObjectClass ObjectSetOptionalitySpec '''
+ t[0] = ObjectSetFieldSpec(cls=t[2])
+ t[0].SetName(t[1])
+
+def p_ObjectSetOptionalitySpec_1 (t):
+ 'ObjectSetOptionalitySpec ::= OPTIONAL'
+ pass
+
+def p_ObjectSetOptionalitySpec_2 (t):
+ 'ObjectSetOptionalitySpec ::= DEFAULT ObjectSet'
+ pass
+
+# 9.13
+def p_PrimitiveFieldName (t):
+ '''PrimitiveFieldName : ucasefieldreference
+ | lcasefieldreference '''
+ t[0] = t[1]
+
+# 9.13
+def p_FieldName_1 (t):
+ 'FieldName : PrimitiveFieldName'
+ t[0] = t[1]
+
+def p_FieldName_2 (t):
+ 'FieldName : FieldName DOT PrimitiveFieldName'
+ t[0] = t[1] + '.' + t[3]
+
+# 11 Information object definition and assignment
+
+# 11.1
+def p_ObjectAssignment (t):
+ 'ObjectAssignment : objectreference DefinedObjectClass ASSIGNMENT Object'
+ t[0] = ObjectAssignment (ident = t[1], cls=t[2].val, val=t[4])
+ global obj_class
+ obj_class = None
+
+# 11.3
+def p_Object (t):
+ '''Object : DefinedObject
+ | ObjectDefn
+ | ParameterizedObject'''
+ t[0] = t[1]
+
+# 11.4
+def p_ObjectDefn (t):
+ 'ObjectDefn : lbraceobject bodyobject rbraceobject'
+ t[0] = t[2]
+
+# {...} block of object definition
+def p_lbraceobject(t):
+ 'lbraceobject : braceobjectbegin LBRACE'
+ t[0] = t[1]
+
+def p_braceobjectbegin(t):
+ 'braceobjectbegin : '
+ global lexer
+ global obj_class
+ if set_class_syntax(obj_class):
+ state = 'INITIAL'
+ else:
+ lexer.level = 1
+ state = 'braceignore'
+ lexer.push_state(state)
+
+def p_rbraceobject(t):
+ 'rbraceobject : braceobjectend RBRACE'
+ t[0] = t[2]
+
+def p_braceobjectend(t):
+ 'braceobjectend : '
+ global lexer
+ lexer.pop_state()
+ set_class_syntax(None)
+
+def p_bodyobject_1 (t):
+ 'bodyobject : '
+ t[0] = { }
+
+def p_bodyobject_2 (t):
+ 'bodyobject : cls_syntax_list'
+ t[0] = t[1]
+
+def p_cls_syntax_list_1 (t):
+ 'cls_syntax_list : cls_syntax_list cls_syntax'
+ t[0] = t[1]
+ t[0].update(t[2])
+
+def p_cls_syntax_list_2 (t):
+ 'cls_syntax_list : cls_syntax'
+ t[0] = t[1]
+
+# X.681
+def p_cls_syntax_1 (t):
+ 'cls_syntax : Type IDENTIFIED BY Value'
+ t[0] = { get_class_fieled(' ') : t[1], get_class_fieled(' '.join((t[2], t[3]))) : t[4] }
+
+def p_cls_syntax_2 (t):
+ 'cls_syntax : HAS PROPERTY Value'
+ t[0] = { get_class_fieled(' '.join(t[1:-1])) : t[-1:][0] }
+
+# X.880
+def p_cls_syntax_3 (t):
+ '''cls_syntax : ERRORS ObjectSet
+ | LINKED ObjectSet
+ | RETURN RESULT BooleanValue
+ | SYNCHRONOUS BooleanValue
+ | INVOKE PRIORITY Value
+ | RESULT_PRIORITY Value
+ | PRIORITY Value
+ | ALWAYS RESPONDS BooleanValue
+ | IDEMPOTENT BooleanValue '''
+ t[0] = { get_class_fieled(' '.join(t[1:-1])) : t[-1:][0] }
+
+def p_cls_syntax_4 (t):
+ '''cls_syntax : ARGUMENT Type
+ | RESULT Type
+ | PARAMETER Type '''
+ t[0] = { get_class_fieled(t[1]) : t[2] }
+
+def p_cls_syntax_5 (t):
+ 'cls_syntax : CODE Value'
+ fld = get_class_fieled(t[1]);
+ t[0] = { fld : t[2] }
+ if isinstance(t[2], ChoiceValue):
+ fldt = fld + '.' + t[2].choice
+ t[0][fldt] = t[2]
+
+def p_cls_syntax_6 (t):
+ '''cls_syntax : ARGUMENT Type OPTIONAL BooleanValue
+ | RESULT Type OPTIONAL BooleanValue
+ | PARAMETER Type OPTIONAL BooleanValue '''
+ t[0] = { get_class_fieled(t[1]) : t[2], get_class_fieled(' '.join((t[1], t[3]))) : t[4] }
+
+# 12 Information object set definition and assignment
+
+# 12.1
+def p_ObjectSetAssignment (t):
+ 'ObjectSetAssignment : UCASE_IDENT CLASS_IDENT ASSIGNMENT ObjectSet'
+ t[0] = Node('ObjectSetAssignment', name=t[1], cls=t[2], val=t[4])
+
+# 12.3
+def p_ObjectSet (t):
+ 'ObjectSet : lbraceignore rbraceignore'
+ t[0] = None
+
+# 14 Notation for the object class field type ---------------------------------
+
+# 14.1
+def p_ObjectClassFieldType (t):
+ 'ObjectClassFieldType : DefinedObjectClass DOT FieldName'
+ t[0] = get_type_from_class(t[1], t[3])
+
+# 14.6
+def p_ObjectClassFieldValue (t):
+ '''ObjectClassFieldValue : OpenTypeFieldVal'''
+ t[0] = t[1]
+
+def p_OpenTypeFieldVal (t):
+ '''OpenTypeFieldVal : Type COLON Value
+ | NullType COLON NullValue'''
+ t[0] = t[3]
+
+
+# 15 Information from objects -------------------------------------------------
+
+# 15.1
+
+def p_ValueFromObject (t):
+ 'ValueFromObject : LCASE_IDENT DOT FieldName'
+ t[0] = t[1] + '.' + t[3]
+
+
+# Annex C - The instance-of type ----------------------------------------------
+
+# C.2
+def p_InstanceOfType (t):
+ 'InstanceOfType : INSTANCE OF DefinedObjectClass'
+ t[0] = InstanceOfType()
+
+
+# --- tables ---
+
+useful_object_class_types = {
+ # Annex A
+ 'TYPE-IDENTIFIER.&id' : lambda : ObjectIdentifierType(),
+ 'TYPE-IDENTIFIER.&Type' : lambda : OpenType(),
+ # Annex B
+ 'ABSTRACT-SYNTAX.&id' : lambda : ObjectIdentifierType(),
+ 'ABSTRACT-SYNTAX.&Type' : lambda : OpenType(),
+ 'ABSTRACT-SYNTAX.&property' : lambda : BitStringType(),
+}
+
+object_class_types = { }
+
+object_class_typerefs = { }
+
+object_class_classrefs = { }
+
+# dummy types
+class _VariableTypeValueFieldSpec (AnyType):
+ pass
+
+class _FixedTypeValueSetFieldSpec (AnyType):
+ pass
+
+class_types_creator = {
+ 'BooleanType' : lambda : BooleanType(),
+ 'IntegerType' : lambda : IntegerType(),
+ 'ObjectIdentifierType' : lambda : ObjectIdentifierType(),
+ 'OpenType' : lambda : OpenType(),
+ # dummy types
+ '_VariableTypeValueFieldSpec' : lambda : _VariableTypeValueFieldSpec(),
+ '_FixedTypeValueSetFieldSpec' : lambda : _FixedTypeValueSetFieldSpec(),
+}
+
+class_names = { }
+
+x681_syntaxes = {
+ 'TYPE-IDENTIFIER' : {
+ ' ' : '&Type',
+ 'IDENTIFIED' : 'IDENTIFIED',
+ #'BY' : 'BY',
+ 'IDENTIFIED BY' : '&id',
+ },
+ 'ABSTRACT-SYNTAX' : {
+ ' ' : '&Type',
+ 'IDENTIFIED' : 'IDENTIFIED',
+ #'BY' : 'BY',
+ 'IDENTIFIED BY' : '&id',
+ 'HAS' : 'HAS',
+ 'PROPERTY' : 'PROPERTY',
+ 'HAS PROPERTY' : '&property',
+ },
+}
+
+class_syntaxes_enabled = {
+ 'TYPE-IDENTIFIER' : True,
+ 'ABSTRACT-SYNTAX' : True,
+}
+
+class_syntaxes = {
+ 'TYPE-IDENTIFIER' : x681_syntaxes['TYPE-IDENTIFIER'],
+ 'ABSTRACT-SYNTAX' : x681_syntaxes['ABSTRACT-SYNTAX'],
+}
+
+class_current_syntax = None
+
+def get_syntax_tokens(syntaxes):
+ tokens = { }
+ for s in (syntaxes):
+ for k in (list(syntaxes[s].keys())):
+ if k.find(' ') < 0:
+ tokens[k] = k
+ tokens[k] = tokens[k].replace('-', '_')
+ return list(tokens.values())
+
+tokens = tokens + get_syntax_tokens(x681_syntaxes)
+
+def set_class_syntax(syntax):
+ global class_syntaxes_enabled
+ global class_current_syntax
+ #print "set_class_syntax", syntax, class_current_syntax
+ if class_syntaxes_enabled.get(syntax, False):
+ class_current_syntax = syntax
+ return True
+ else:
+ class_current_syntax = None
+ return False
+
+def is_class_syntax(name):
+ global class_syntaxes
+ global class_current_syntax
+ #print "is_class_syntax", name, class_current_syntax
+ if not class_current_syntax:
+ return False
+ return name in class_syntaxes[class_current_syntax]
+
+def get_class_fieled(name):
+ if not class_current_syntax:
+ return None
+ return class_syntaxes[class_current_syntax][name]
+
+def is_class_ident(name):
+ return name in class_names
+
+def add_class_ident(name):
+ #print "add_class_ident", name
+ class_names[name] = name
+
+def get_type_from_class(cls, fld):
+ flds = fld.split('.')
+ if (isinstance(cls, Class_Ref)):
+ key = cls.val + '.' + flds[0]
+ else:
+ key = cls + '.' + flds[0]
+
+ if key in object_class_classrefs:
+ return get_type_from_class(object_class_classrefs[key], '.'.join(flds[1:]))
+
+ if key in object_class_typerefs:
+ return Type_Ref(val=object_class_typerefs[key])
+
+ creator = lambda : AnyType()
+ creator = useful_object_class_types.get(key, creator)
+ creator = object_class_types.get(key, creator)
+ return creator()
+
+def set_type_to_class(cls, fld, pars):
+ #print "set_type_to_class", cls, fld, pars
+ key = cls + '.' + fld
+ typename = 'OpenType'
+ if (len(pars) > 0):
+ typename = pars[0]
+ else:
+ pars.append(typename)
+ typeref = None
+ if (len(pars) > 1):
+ if (isinstance(pars[1], Class_Ref)):
+ pars[1] = pars[1].val
+ typeref = pars[1]
+
+ msg = None
+ if key in object_class_types:
+ msg = object_class_types[key]().type
+ if key in object_class_typerefs:
+ msg = "TypeReference " + object_class_typerefs[key]
+ if key in object_class_classrefs:
+ msg = "ClassReference " + object_class_classrefs[key]
+
+ if msg == ' '.join(pars):
+ msg = None
+
+ if msg:
+ msg0 = "Can not define CLASS field %s as '%s'\n" % (key, ' '.join(pars))
+ msg1 = "Already defined as '%s'" % (msg)
+ raise CompError(msg0 + msg1)
+
+ if (typename == 'ClassReference'):
+ if not typeref: return False
+ object_class_classrefs[key] = typeref
+ return True
+
+ if (typename == 'TypeReference'):
+ if not typeref: return False
+ object_class_typerefs[key] = typeref
+ return True
+
+ creator = class_types_creator.get(typename)
+ if creator:
+ object_class_types[key] = creator
+ return True
+ else:
+ return False
+
+def import_class_from_module(mod, cls):
+ add_class_ident(cls)
+ mcls = "$%s$%s" % (mod, cls)
+ for k in list(object_class_classrefs.keys()):
+ kk = k.split('.', 1)
+ if kk[0] == mcls:
+ object_class_classrefs[cls + '.' + kk[0]] = object_class_classrefs[k]
+ for k in list(object_class_typerefs.keys()):
+ kk = k.split('.', 1)
+ if kk[0] == mcls:
+ object_class_typerefs[cls + '.' + kk[0]] = object_class_typerefs[k]
+ for k in list(object_class_types.keys()):
+ kk = k.split('.', 1)
+ if kk[0] == mcls:
+ object_class_types[cls + '.' + kk[0]] = object_class_types[k]
+
+#--- ITU-T Recommendation X.682 -----------------------------------------------
+
+# 8 General constraint specification ------------------------------------------
+
+# 8.1
+def p_GeneralConstraint (t):
+ '''GeneralConstraint : UserDefinedConstraint
+ | TableConstraint
+ | ContentsConstraint'''
+ t[0] = t[1]
+
+# 9 User-defined constraints --------------------------------------------------
+
+# 9.1
+def p_UserDefinedConstraint (t):
+ 'UserDefinedConstraint : CONSTRAINED BY LBRACE UserDefinedConstraintParameterList RBRACE'
+ t[0] = Constraint(type = 'UserDefined', subtype = t[4])
+
+def p_UserDefinedConstraintParameterList_1 (t):
+ 'UserDefinedConstraintParameterList : '
+ t[0] = []
+
+def p_UserDefinedConstraintParameterList_2 (t):
+ 'UserDefinedConstraintParameterList : UserDefinedConstraintParameter'
+ t[0] = [t[1]]
+
+def p_UserDefinedConstraintParameterList_3 (t):
+ 'UserDefinedConstraintParameterList : UserDefinedConstraintParameterList COMMA UserDefinedConstraintParameter'
+ t[0] = t[1] + [t[3]]
+
+# 9.3
+def p_UserDefinedConstraintParameter (t):
+ 'UserDefinedConstraintParameter : Type'
+ t[0] = t[1]
+
+# 10 Table constraints, including component relation constraints --------------
+
+# 10.3
+def p_TableConstraint (t):
+ '''TableConstraint : SimpleTableConstraint
+ | ComponentRelationConstraint'''
+ t[0] = Constraint(type = 'Table', subtype = t[1])
+
+def p_SimpleTableConstraint (t):
+ 'SimpleTableConstraint : LBRACE UCASE_IDENT RBRACE'
+ t[0] = t[2]
+
+# 10.7
+def p_ComponentRelationConstraint (t):
+ 'ComponentRelationConstraint : LBRACE UCASE_IDENT RBRACE LBRACE AtNotations RBRACE'
+ t[0] = t[2] + str(t[5])
+
+def p_AtNotations_1 (t):
+ 'AtNotations : AtNotation'
+ t[0] = [t[1]]
+
+def p_AtNotations_2 (t):
+ 'AtNotations : AtNotations COMMA AtNotation'
+ t[0] = t[1] + [t[3]]
+
+def p_AtNotation_1 (t):
+ 'AtNotation : AT ComponentIdList'
+ t[0] = '@' + t[2]
+
+def p_AtNotation_2 (t):
+ 'AtNotation : AT DOT Level ComponentIdList'
+ t[0] = '@.' + t[3] + t[4]
+
+def p_Level_1 (t):
+ 'Level : DOT Level'
+ t[0] = '.' + t[2]
+
+def p_Level_2 (t):
+ 'Level : '
+ t[0] = ''
+
+def p_ComponentIdList_1 (t):
+ 'ComponentIdList : LCASE_IDENT'
+ t[0] = t[1]
+
+def p_ComponentIdList_2 (t):
+ 'ComponentIdList : ComponentIdList DOT LCASE_IDENT'
+ t[0] = t[1] + '.' + t[3]
+
+# 11 Contents constraints -----------------------------------------------------
+
+# 11.1
+def p_ContentsConstraint (t):
+ 'ContentsConstraint : CONTAINING type_ref'
+ t[0] = Constraint(type = 'Contents', subtype = t[2])
+
+
+#--- ITU-T Recommendation X.683 -----------------------------------------------
+
+# 8 Parameterized assignments -------------------------------------------------
+
+# 8.1
+def p_ParameterizedAssignment (t):
+ '''ParameterizedAssignment : ParameterizedTypeAssignment
+ | ParameterizedObjectClassAssignment
+ | ParameterizedObjectAssignment
+ | ParameterizedObjectSetAssignment'''
+ t[0] = t[1]
+
+# 8.2
+def p_ParameterizedTypeAssignment (t):
+ 'ParameterizedTypeAssignment : UCASE_IDENT ParameterList ASSIGNMENT Type'
+ t[0] = t[4]
+ t[0].SetName(t[1]) # t[0].SetName(t[1] + 'xxx')
+
+def p_ParameterizedObjectClassAssignment (t):
+ '''ParameterizedObjectClassAssignment : CLASS_IDENT ParameterList ASSIGNMENT ObjectClass
+ | UCASE_IDENT ParameterList ASSIGNMENT ObjectClass'''
+ t[0] = t[4]
+ t[0].SetName(t[1])
+ if isinstance(t[0], ObjectClassDefn):
+ t[0].reg_types()
+
+def p_ParameterizedObjectAssignment (t):
+ 'ParameterizedObjectAssignment : objectreference ParameterList DefinedObjectClass ASSIGNMENT Object'
+ t[0] = ObjectAssignment (ident = t[1], cls=t[3].val, val=t[5])
+ global obj_class
+ obj_class = None
+
+def p_ParameterizedObjectSetAssignment (t):
+ 'ParameterizedObjectSetAssignment : UCASE_IDENT ParameterList DefinedObjectClass ASSIGNMENT ObjectSet'
+ t[0] = Node('ObjectSetAssignment', name=t[1], cls=t[3].val, val=t[5])
+
+# 8.3
+def p_ParameterList (t):
+ 'ParameterList : lbraceignore rbraceignore'
+
+#def p_ParameterList (t):
+# 'ParameterList : LBRACE Parameters RBRACE'
+# t[0] = t[2]
+
+#def p_Parameters_1 (t):
+# 'Parameters : Parameter'
+# t[0] = [t[1]]
+
+#def p_Parameters_2 (t):
+# 'Parameters : Parameters COMMA Parameter'
+# t[0] = t[1] + [t[3]]
+
+#def p_Parameter_1 (t):
+# 'Parameter : Type COLON Reference'
+# t[0] = [t[1], t[3]]
+
+#def p_Parameter_2 (t):
+# 'Parameter : Reference'
+# t[0] = t[1]
+
+
+# 9 Referencing parameterized definitions -------------------------------------
+
+# 9.1
+def p_ParameterizedReference (t):
+ 'ParameterizedReference : Reference LBRACE RBRACE'
+ t[0] = t[1]
+ #t[0].val += 'xxx'
+
+# 9.2
+def p_ParameterizedType (t):
+ 'ParameterizedType : type_ref ActualParameterList'
+ t[0] = t[1]
+ #t[0].val += 'xxx'
+
+
+def p_ParameterizedObjectClass (t):
+ 'ParameterizedObjectClass : DefinedObjectClass ActualParameterList'
+ t[0] = t[1]
+ #t[0].val += 'xxx'
+
+def p_ParameterizedObject (t):
+ 'ParameterizedObject : DefinedObject ActualParameterList'
+ t[0] = t[1]
+ #t[0].val += 'xxx'
+
+# 9.5
+def p_ActualParameterList (t):
+ 'ActualParameterList : lbraceignore rbraceignore'
+
+#def p_ActualParameterList (t):
+# 'ActualParameterList : LBRACE ActualParameters RBRACE'
+# t[0] = t[2]
+
+#def p_ActualParameters_1 (t):
+# 'ActualParameters : ActualParameter'
+# t[0] = [t[1]]
+
+#def p_ActualParameters_2 (t):
+# 'ActualParameters : ActualParameters COMMA ActualParameter'
+# t[0] = t[1] + [t[3]]
+
+#def p_ActualParameter (t):
+# '''ActualParameter : Type
+# | Value'''
+# t[0] = t[1]
+
+
+#--- ITU-T Recommendation X.880 -----------------------------------------------
+
+x880_classes = {
+ 'OPERATION' : {
+ '&ArgumentType' : [],
+ '&argumentTypeOptional' : [ 'BooleanType' ],
+ '&returnResult' : [ 'BooleanType' ],
+ '&ResultType' : [],
+ '&resultTypeOptional' : [ 'BooleanType' ],
+ '&Errors' : [ 'ClassReference', 'ERROR' ],
+ '&Linked' : [ 'ClassReference', 'OPERATION' ],
+ '&synchronous' : [ 'BooleanType' ],
+ '&idempotent' : [ 'BooleanType' ],
+ '&alwaysReturns' : [ 'BooleanType' ],
+ '&InvokePriority' : [ '_FixedTypeValueSetFieldSpec' ],
+ '&ResultPriority' : [ '_FixedTypeValueSetFieldSpec' ],
+ '&operationCode' : [ 'TypeReference', 'Code' ],
+ },
+ 'ERROR' : {
+ '&ParameterType' : [],
+ '&parameterTypeOptional' : [ 'BooleanType' ],
+ '&ErrorPriority' : [ '_FixedTypeValueSetFieldSpec' ],
+ '&errorCode' : [ 'TypeReference', 'Code' ],
+ },
+ 'OPERATION-PACKAGE' : {
+ '&Both' : [ 'ClassReference', 'OPERATION' ],
+ '&Consumer' : [ 'ClassReference', 'OPERATION' ],
+ '&Supplier' : [ 'ClassReference', 'OPERATION' ],
+ '&id' : [ 'ObjectIdentifierType' ],
+ },
+ 'CONNECTION-PACKAGE' : {
+ '&bind' : [ 'ClassReference', 'OPERATION' ],
+ '&unbind' : [ 'ClassReference', 'OPERATION' ],
+ '&responderCanUnbind' : [ 'BooleanType' ],
+ '&unbindCanFail' : [ 'BooleanType' ],
+ '&id' : [ 'ObjectIdentifierType' ],
+ },
+ 'CONTRACT' : {
+ '&connection' : [ 'ClassReference', 'CONNECTION-PACKAGE' ],
+ '&OperationsOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ],
+ '&InitiatorConsumerOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ],
+ '&InitiatorSupplierOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ],
+ '&id' : [ 'ObjectIdentifierType' ],
+ },
+ 'ROS-OBJECT-CLASS' : {
+ '&Is' : [ 'ClassReference', 'ROS-OBJECT-CLASS' ],
+ '&Initiates' : [ 'ClassReference', 'CONTRACT' ],
+ '&Responds' : [ 'ClassReference', 'CONTRACT' ],
+ '&InitiatesAndResponds' : [ 'ClassReference', 'CONTRACT' ],
+ '&id' : [ 'ObjectIdentifierType' ],
+ },
+}
+
+x880_syntaxes = {
+ 'OPERATION' : {
+ 'ARGUMENT' : '&ArgumentType',
+ 'ARGUMENT OPTIONAL' : '&argumentTypeOptional',
+ 'RESULT' : '&ResultType',
+ 'RESULT OPTIONAL' : '&resultTypeOptional',
+ 'RETURN' : 'RETURN',
+ 'RETURN RESULT' : '&returnResult',
+ 'ERRORS' : '&Errors',
+ 'LINKED' : '&Linked',
+ 'SYNCHRONOUS' : '&synchronous',
+ 'IDEMPOTENT' : '&idempotent',
+ 'ALWAYS' : 'ALWAYS',
+ 'RESPONDS' : 'RESPONDS',
+ 'ALWAYS RESPONDS' : '&alwaysReturns',
+ 'INVOKE' : 'INVOKE',
+ 'PRIORITY' : 'PRIORITY',
+ 'INVOKE PRIORITY' : '&InvokePriority',
+ 'RESULT-PRIORITY': '&ResultPriority',
+ 'CODE' : '&operationCode',
+ },
+ 'ERROR' : {
+ 'PARAMETER' : '&ParameterType',
+ 'PARAMETER OPTIONAL' : '&parameterTypeOptional',
+ 'PRIORITY' : '&ErrorPriority',
+ 'CODE' : '&errorCode',
+ },
+# 'OPERATION-PACKAGE' : {
+# },
+# 'CONNECTION-PACKAGE' : {
+# },
+# 'CONTRACT' : {
+# },
+# 'ROS-OBJECT-CLASS' : {
+# },
+}
+
+def x880_module_begin():
+ #print "x880_module_begin()"
+ for name in list(x880_classes.keys()):
+ add_class_ident(name)
+
+def x880_import(name):
+ if name in x880_syntaxes:
+ class_syntaxes_enabled[name] = True
+ class_syntaxes[name] = x880_syntaxes[name]
+ if name in x880_classes:
+ add_class_ident(name)
+ for f in (list(x880_classes[name].keys())):
+ set_type_to_class(name, f, x880_classes[name][f])
+
+tokens = tokens + get_syntax_tokens(x880_syntaxes)
+
+# {...} OID value
+#def p_lbrace_oid(t):
+# 'lbrace_oid : brace_oid_begin LBRACE'
+# t[0] = t[1]
+
+#def p_brace_oid_begin(t):
+# 'brace_oid_begin : '
+# global in_oid
+# in_oid = True
+
+#def p_rbrace_oid(t):
+# 'rbrace_oid : brace_oid_end RBRACE'
+# t[0] = t[2]
+
+#def p_brace_oid_end(t):
+# 'brace_oid_end : '
+# global in_oid
+# in_oid = False
+
+# {...} block to be ignored
+def p_lbraceignore(t):
+ 'lbraceignore : braceignorebegin LBRACE'
+ t[0] = t[1]
+
+def p_braceignorebegin(t):
+ 'braceignorebegin : '
+ global lexer
+ lexer.level = 1
+ lexer.push_state('braceignore')
+
+def p_rbraceignore(t):
+ 'rbraceignore : braceignoreend RBRACE'
+ t[0] = t[2]
+
+def p_braceignoreend(t):
+ 'braceignoreend : '
+ global lexer
+ lexer.pop_state()
+
+def p_error(t):
+ global input_file
+ raise ParseError(t, input_file)
+
+def p_pyquote (t):
+ '''pyquote : PYQUOTE'''
+ t[0] = PyQuote (val = t[1])
+
+
+def testlex (s):
+ lexer.input (s)
+ while True:
+ token = lexer.token ()
+ if not token:
+ break
+ print(token)
+
+
+def do_module (ast, defined_dict):
+ assert (ast.type == 'Module')
+ ctx = Ctx (defined_dict)
+ print(ast.to_python (ctx))
+ print(ctx.output_assignments ())
+ print(ctx.output_pyquotes ())
+
+def eth_do_module (ast, ectx):
+ assert (ast.type == 'Module')
+ if ectx.dbg('s'): print(ast.str_depth(0))
+ ast.to_eth(ectx)
+
+def testyacc(s, fn, defined_dict):
+ ast = yacc.parse(s, debug=0)
+ time_str = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
+ print("""#!/usr/bin/env python
+# Auto-generated from %s at %s
+from PyZ3950 import asn1""" % (fn, time_str))
+ for module in ast:
+ eth_do_module (module, defined_dict)
+
+
+# Wireshark compiler
+def eth_usage():
+ print("""
+ asn2wrs [-h|?] [-d dbg] [-b] [-p proto] [-c cnf_file] [-e] input_file(s) ...
+ -h|? : Usage
+ -b : BER (default is PER)
+ -u : Unaligned (default is aligned)
+ -p proto : Protocol name (implies -S). Default is module-name
+ from input_file (renamed by #.MODULE if present)
+ -o name : Output files name core (default is <proto>)
+ -O dir : Output directory for dissector
+ -c cnf_file : Conformance file
+ -I path : Path for conformance file includes
+ -e : Create conformance file for exported types
+ -E : Just create conformance file for exported types
+ -S : Single output for multiple modules
+ -s template : Single file output (template is input file
+ without .c/.h extension)
+ -k : Keep intermediate files though single file output is used
+ -L : Suppress #line directive from .cnf file
+ -D dir : Directory for input_file(s) (default: '.')
+ -C : Add check for SIZE constraints
+ -r prefix : Remove the prefix from type names
+
+ input_file(s) : Input ASN.1 file(s)
+
+ -d dbg : Debug output, dbg = [l][y][p][s][a][t][c][m][o]
+ l - lex
+ y - yacc
+ p - parsing
+ s - internal ASN.1 structure
+ a - list of assignments
+ t - tables
+ c - conformance values
+ m - list of compiled modules with dependency
+ o - list of output files
+ """)
+
+
+## Used to preparse C style comments
+## https://github.com/eerimoq/asn1tools/blob/master/asn1tools/parser.py#L231
+##
+def ignore_comments(string):
+ """Ignore comments in given string by replacing them with spaces. This
+ reduces the parsing time by roughly a factor of two.
+
+ """
+
+ comments = [
+ (mo.start(), mo.group(0))
+ for mo in re.finditer(r'(/\*|\*/|\n)', string)
+ ]
+
+ comments.sort()
+
+ multi_line_comment_depth = 0
+ start_offset = 0
+ non_comment_offset = 0
+ chunks = []
+
+ for offset, kind in comments:
+ if multi_line_comment_depth > 0:
+ if kind == '/*':
+ multi_line_comment_depth += 1
+ elif kind == '*/':
+ multi_line_comment_depth -= 1
+
+ if multi_line_comment_depth == 0:
+ offset += 2
+ chunks.append(' ' * (offset - start_offset))
+ non_comment_offset = offset
+ elif kind == '\n':
+ chunks.append('\n')
+ non_comment_offset = offset
+ elif kind == '/*':
+ multi_line_comment_depth = 1
+ start_offset = offset
+ chunks.append(string[non_comment_offset:start_offset])
+
+ chunks.append(string[non_comment_offset:])
+
+ return ''.join(chunks)
+
+def eth_main():
+ global input_file
+ global g_conform
+ global lexer
+ print("ASN.1 to Wireshark dissector compiler");
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "h?d:D:buXp:FTo:O:c:I:eESs:kLCr:");
+ except getopt.GetoptError:
+ eth_usage(); sys.exit(2)
+ if len(args) < 1:
+ eth_usage(); sys.exit(2)
+
+ conform = EthCnf()
+ conf_to_read = None
+ output = EthOut()
+ ectx = EthCtx(conform, output)
+ ectx.encoding = 'per'
+ ectx.proto_opt = None
+ ectx.fld_opt = {}
+ ectx.tag_opt = False
+ ectx.outnm_opt = None
+ ectx.aligned = True
+ ectx.dbgopt = ''
+ ectx.new = True
+ ectx.expcnf = False
+ ectx.justexpcnf = False
+ ectx.merge_modules = False
+ ectx.group_by_prot = False
+ ectx.conform.last_group = 0
+ ectx.conform.suppress_line = False;
+ ectx.output.outnm = None
+ ectx.output.single_file = None
+ ectx.constraints_check = False;
+ for o, a in opts:
+ if o in ("-h", "-?"):
+ eth_usage(); sys.exit(2)
+ if o in ("-c",):
+ conf_to_read = relpath(a)
+ if o in ("-I",):
+ ectx.conform.include_path.append(relpath(a))
+ if o in ("-E",):
+ ectx.expcnf = True
+ ectx.justexpcnf = True
+ if o in ("-D",):
+ ectx.srcdir = relpath(a)
+ if o in ("-C",):
+ ectx.constraints_check = True
+ if o in ("-L",):
+ ectx.suppress_line = True
+ if o in ("-X",):
+ warnings.warn("Command line option -X is obsolete and can be removed")
+ if o in ("-T",):
+ warnings.warn("Command line option -T is obsolete and can be removed")
+
+ if conf_to_read:
+ ectx.conform.read(conf_to_read)
+
+ for o, a in opts:
+ if o in ("-h", "-?", "-c", "-I", "-E", "-D", "-C", "-X", "-T"):
+ pass # already processed
+ else:
+ par = []
+ if a: par.append(a)
+ ectx.conform.set_opt(o, par, "commandline", 0)
+
+ (ld, yd, pd) = (0, 0, 0);
+ if ectx.dbg('l'): ld = 1
+ if ectx.dbg('y'): yd = 1
+ if ectx.dbg('p'): pd = 2
+ lexer = lex.lex(debug=ld)
+ parser = yacc.yacc(method='LALR', debug=yd, outputdir='.')
+ parser.defaulted_states = {}
+ g_conform = ectx.conform
+ ast = []
+ for fn in args:
+ input_file = fn
+ lexer.lineno = 1
+ if (ectx.srcdir): fn = ectx.srcdir + '/' + fn
+ # Read ASN.1 definition, trying one of the common encodings.
+ data = open(fn, "rb").read()
+ for encoding in ('utf-8', 'windows-1252'):
+ try:
+ data = data.decode(encoding)
+ break
+ except Exception:
+ warnings.warn_explicit("Decoding %s as %s failed, trying next." % (fn, encoding), UserWarning, '', 0)
+ # Py2 compat, name.translate in eth_output_hf_arr fails with unicode
+ if not isinstance(data, str):
+ data = data.encode('utf-8')
+ data = ignore_comments(data)
+ ast.extend(yacc.parse(data, lexer=lexer, debug=pd))
+ ectx.eth_clean()
+ if (ectx.merge_modules): # common output for all module
+ ectx.eth_clean()
+ for module in ast:
+ eth_do_module(module, ectx)
+ ectx.eth_prepare()
+ ectx.eth_do_output()
+ elif (ectx.groups()): # group by protocols/group
+ groups = []
+ pr2gr = {}
+ if (ectx.group_by_prot): # group by protocols
+ for module in ast:
+ prot = module.get_proto(ectx)
+ if prot not in pr2gr:
+ pr2gr[prot] = len(groups)
+ groups.append([])
+ groups[pr2gr[prot]].append(module)
+ else: # group by groups
+ pass
+ for gm in (groups):
+ ectx.eth_clean()
+ for module in gm:
+ eth_do_module(module, ectx)
+ ectx.eth_prepare()
+ ectx.eth_do_output()
+ else: # output for each module
+ for module in ast:
+ ectx.eth_clean()
+ eth_do_module(module, ectx)
+ ectx.eth_prepare()
+ ectx.eth_do_output()
+
+ if ectx.dbg('m'):
+ ectx.dbg_modules()
+
+ if ectx.dbg('c'):
+ ectx.conform.dbg_print()
+ if not ectx.justexpcnf:
+ ectx.conform.unused_report()
+
+ if ectx.dbg('o'):
+ ectx.output.dbg_print()
+ ectx.output.make_single_file(ectx.suppress_line)
+
+
+# Python compiler
+def main():
+ if sys.version_info[0] < 3:
+ print("This requires Python 3")
+ sys.exit(2)
+
+ testfn = testyacc
+ if len (sys.argv) == 1:
+ while True:
+ s = eval(input ('Query: '))
+ if len (s) == 0:
+ break
+ testfn (s, 'console', {})
+ else:
+ defined_dict = {}
+ for fn in sys.argv [1:]:
+ f = open (fn, "r")
+ testfn (f.read (), fn, defined_dict)
+ f.close ()
+ lexer.lineno = 1
+
+
+#--- BODY ---------------------------------------------------------------------
+
+if __name__ == '__main__':
+ if (os.path.splitext(os.path.basename(sys.argv[0]))[0].lower() in ('asn2wrs', 'asn2eth')):
+ eth_main()
+ else:
+ main()
+
+#------------------------------------------------------------------------------
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# c-basic-offset: 4; tab-width: 8; indent-tabs-mode: nil
+# vi: set shiftwidth=4 tabstop=8 expandtab:
+# :indentSize=4:tabSize=8:noTabs=true:
diff --git a/tools/asterix/README.md b/tools/asterix/README.md
new file mode 100644
index 0000000..d7b2101
--- /dev/null
+++ b/tools/asterix/README.md
@@ -0,0 +1,51 @@
+# Asterix parser generator
+
+*Asterix* is a set of standards, where each standard is defined
+as so called *asterix category*.
+In addition, each *asterix category* is potentially released
+in number of editions. There is no guarantie about backward
+compatibility between the editions.
+
+The structured version of asterix specifications is maintained
+in a separate project:
+<https://zoranbosnjak.github.io/asterix-specs/specs.html>
+
+The purpose of this directory is to convert from structured
+specifications (json format) to the `epan/dissectors/packet-asterix.c` file,
+which is the actual asterix parser for this project.
+
+It is important **NOT** to edit `epan/dissectors/packet-asterix.c` file
+manually, since this file is automatically generated.
+
+## Manual update procedure
+
+To sync with the upstream asterix specifications, run:
+
+```bash
+# show current upstream git revision (for reference)
+export ASTERIX_SPECS_REV=$(./tools/asterix/update-specs.py --reference)
+echo $ASTERIX_SPECS_REV
+
+# update asterix decoder
+./tools/asterix/update-specs.py > epan/dissectors/packet-asterix.c
+git add epan/dissectors/packet-asterix.c
+
+# inspect change, rebuild project, test...
+
+# commit change, with reference to upstream version
+git commit -m "asterix: Sync with asterix-specs #$ASTERIX_SPECS_REV"
+```
+
+## Automatic update procedure
+
+To integrate asterix updates to a periodic (GitLab CI) job, use `--update` option.
+For example:
+
+```
+...
+# Asterix categories.
+- ./tools/asterix/update-specs.py --update || echo "asterix failed." >> commit-message.txt
+- COMMIT_FILES+=("epan/dissectors/packet-asterix.c")
+...
+```
+
diff --git a/tools/asterix/packet-asterix-template.c b/tools/asterix/packet-asterix-template.c
new file mode 100644
index 0000000..e655cfd
--- /dev/null
+++ b/tools/asterix/packet-asterix-template.c
@@ -0,0 +1,867 @@
+/*
+
+Notice:
+
+
+This file is auto generated, do not edit!
+See tools/asterix/README.md for details.
+
+
+Data source:
+---{gitrev}---
+
+
+*/
+
+/* packet-asterix.c
+ * Routines for ASTERIX decoding
+ * By Marko Hrastovec <marko.hrastovec@sloveniacontrol.si>
+ *
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald@wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+/*
+ * ASTERIX (All-purpose structured EUROCONTROL surveillances
+ * information exchange) is a protocol related to air traffic control.
+ *
+ * The specifications can be downloaded from
+ * http://www.eurocontrol.int/services/asterix
+ */
+
+#include <config.h>
+
+#include <wsutil/bits_ctz.h>
+
+#include <epan/packet.h>
+#include <epan/prefs.h>
+#include <epan/proto_data.h>
+
+void proto_register_asterix(void);
+void proto_reg_handoff_asterix(void);
+
+#define PROTO_TAG_ASTERIX "ASTERIX"
+#define ASTERIX_PORT 8600
+
+#define MAX_DISSECT_STR 1024
+#define MAX_BUFFER 256
+
+static int proto_asterix = -1;
+
+static int hf_asterix_category = -1;
+static int hf_asterix_length = -1;
+static int hf_asterix_message = -1;
+static int hf_asterix_fspec = -1;
+static int hf_re_field_len = -1;
+static int hf_spare = -1;
+static int hf_counter = -1;
+static int hf_XXX_FX = -1;
+
+static int ett_asterix = -1;
+static int ett_asterix_category = -1;
+static int ett_asterix_length = -1;
+static int ett_asterix_message = -1;
+static int ett_asterix_subtree = -1;
+
+static dissector_handle_t asterix_handle;
+/* The following defines tell us how to decode the length of
+ * fields and how to construct their display structure */
+#define FIXED 1
+#define REPETITIVE 2
+#define FX 3
+/*#define FX_1 4*/
+/*#define RE 5*/
+#define COMPOUND 6
+/*#define SP 7*/
+/*#define FX_UAP 8*/
+#define EXP 9 /* Explicit (RE or SP) */
+
+/* The following defines tell us how to
+ * decode and display individual fields. */
+#define FIELD_PART_INT 0
+#define FIELD_PART_UINT 1
+#define FIELD_PART_FLOAT 2
+#define FIELD_PART_UFLOAT 3
+#define FIELD_PART_SQUAWK 4
+#define FIELD_PART_CALLSIGN 5
+#define FIELD_PART_ASCII 6
+#define FIELD_PART_FX 7
+#define FIELD_PART_HEX 8
+#define FIELD_PART_IAS_IM 9
+#define FIELD_PART_IAS_ASPD 10
+
+typedef struct FieldPart_s FieldPart;
+struct FieldPart_s {
+ uint16_t bit_length; /* length of field in bits */
+ double scaling_factor; /* scaling factor of the field (for instance: 1/128) */
+ uint8_t type; /* Pre-defined type for proper presentation */
+ int *hf; /* Pointer to hf representing this kind of data */
+ const char *format_string; /* format string for showing float values */
+};
+
+DIAG_OFF_PEDANTIC
+typedef struct AsterixField_s AsterixField;
+struct AsterixField_s {
+ uint8_t type; /* type of field */
+ unsigned length; /* fixed length */
+ unsigned repetition_counter_size; /* size of repetition counter, length of one item is in length */
+ unsigned header_length; /* the size is in first header_length bytes of the field */
+ int *hf; /* pointer to Wireshark hf_register_info */
+ const FieldPart **part; /* Look declaration and description of FieldPart above. */
+ const AsterixField *field[]; /* subfields */
+};
+DIAG_ON_PEDANTIC
+
+static void dissect_asterix_packet (tvbuff_t *, packet_info *pinfo, proto_tree *);
+static void dissect_asterix_data_block (tvbuff_t *tvb, packet_info *pinfo, unsigned, proto_tree *, uint8_t, int);
+static int dissect_asterix_fields (tvbuff_t *, packet_info *pinfo, unsigned, proto_tree *, uint8_t, const AsterixField *[]);
+
+static void asterix_build_subtree (tvbuff_t *, packet_info *pinfo, unsigned, proto_tree *, const AsterixField *);
+static void twos_complement (int64_t *, int);
+static uint8_t asterix_bit (uint8_t, uint8_t);
+static unsigned asterix_fspec_len (tvbuff_t *, unsigned);
+static uint8_t asterix_field_exists (tvbuff_t *, unsigned, int);
+static uint8_t asterix_get_active_uap (tvbuff_t *, unsigned, uint8_t);
+static int asterix_field_length (tvbuff_t *, unsigned, const AsterixField *);
+static int asterix_field_offset (tvbuff_t *, unsigned, const AsterixField *[], int);
+static int asterix_message_length (tvbuff_t *, unsigned, uint8_t, uint8_t);
+
+static const char AISCode[] = { ' ', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
+ 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', ' ', ' ', ' ', ' ', ' ',
+ ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ' ', ' ', ' ', ' ', ' ', ' ' };
+
+static const value_string valstr_XXX_FX[] = {
+ { 0, "End of data item" },
+ { 1, "Extension into next extent" },
+ { 0, NULL }
+};
+static const FieldPart IXXX_FX = { 1, 1.0, FIELD_PART_FX, &hf_XXX_FX, NULL };
+static const FieldPart IXXX_1bit_spare = { 1, 1.0, FIELD_PART_UINT, NULL, NULL };
+static const FieldPart IXXX_2bit_spare = { 2, 1.0, FIELD_PART_UINT, NULL, NULL };
+static const FieldPart IXXX_3bit_spare = { 3, 1.0, FIELD_PART_UINT, NULL, NULL };
+static const FieldPart IXXX_4bit_spare = { 4, 1.0, FIELD_PART_UINT, NULL, NULL };
+static const FieldPart IXXX_5bit_spare = { 5, 1.0, FIELD_PART_UINT, NULL, NULL };
+static const FieldPart IXXX_6bit_spare = { 6, 1.0, FIELD_PART_UINT, NULL, NULL };
+static const FieldPart IXXX_7bit_spare = { 7, 1.0, FIELD_PART_UINT, NULL, NULL };
+
+/* Spare Item */
+DIAG_OFF_PEDANTIC
+static const AsterixField IX_SPARE = { FIXED, 0, 0, 0, &hf_spare, NULL, { NULL } };
+
+/* insert1 */
+---{insert1}---
+/* insert1 */
+
+/* settings which category version to use for each ASTERIX category */
+static int global_categories_version[] = {
+ 0, /* 000 */
+ 0, /* 001 */
+ 0, /* 002 */
+ 0, /* 003 */
+ 0, /* 004 */
+ 0, /* 005 */
+ 0, /* 006 */
+ 0, /* 007 */
+ 0, /* 008 */
+ 0, /* 009 */
+ 0, /* 010 */
+ 0, /* 011 */
+ 0, /* 012 */
+ 0, /* 013 */
+ 0, /* 014 */
+ 0, /* 015 */
+ 0, /* 016 */
+ 0, /* 017 */
+ 0, /* 018 */
+ 0, /* 019 */
+ 0, /* 020 */
+ 0, /* 021 */
+ 0, /* 022 */
+ 0, /* 023 */
+ 0, /* 024 */
+ 0, /* 025 */
+ 0, /* 026 */
+ 0, /* 027 */
+ 0, /* 028 */
+ 0, /* 029 */
+ 0, /* 030 */
+ 0, /* 031 */
+ 0, /* 032 */
+ 0, /* 033 */
+ 0, /* 034 */
+ 0, /* 035 */
+ 0, /* 036 */
+ 0, /* 037 */
+ 0, /* 038 */
+ 0, /* 039 */
+ 0, /* 040 */
+ 0, /* 041 */
+ 0, /* 042 */
+ 0, /* 043 */
+ 0, /* 044 */
+ 0, /* 045 */
+ 0, /* 046 */
+ 0, /* 047 */
+ 0, /* 048 */
+ 0, /* 049 */
+ 0, /* 050 */
+ 0, /* 051 */
+ 0, /* 052 */
+ 0, /* 053 */
+ 0, /* 054 */
+ 0, /* 055 */
+ 0, /* 056 */
+ 0, /* 057 */
+ 0, /* 058 */
+ 0, /* 059 */
+ 0, /* 060 */
+ 0, /* 061 */
+ 0, /* 062 */
+ 0, /* 063 */
+ 0, /* 064 */
+ 0, /* 065 */
+ 0, /* 066 */
+ 0, /* 067 */
+ 0, /* 068 */
+ 0, /* 069 */
+ 0, /* 070 */
+ 0, /* 071 */
+ 0, /* 072 */
+ 0, /* 073 */
+ 0, /* 074 */
+ 0, /* 075 */
+ 0, /* 076 */
+ 0, /* 077 */
+ 0, /* 078 */
+ 0, /* 079 */
+ 0, /* 080 */
+ 0, /* 081 */
+ 0, /* 082 */
+ 0, /* 083 */
+ 0, /* 084 */
+ 0, /* 085 */
+ 0, /* 086 */
+ 0, /* 087 */
+ 0, /* 088 */
+ 0, /* 089 */
+ 0, /* 090 */
+ 0, /* 091 */
+ 0, /* 092 */
+ 0, /* 093 */
+ 0, /* 094 */
+ 0, /* 095 */
+ 0, /* 096 */
+ 0, /* 097 */
+ 0, /* 098 */
+ 0, /* 099 */
+ 0, /* 100 */
+ 0, /* 101 */
+ 0, /* 102 */
+ 0, /* 103 */
+ 0, /* 104 */
+ 0, /* 105 */
+ 0, /* 106 */
+ 0, /* 107 */
+ 0, /* 108 */
+ 0, /* 109 */
+ 0, /* 110 */
+ 0, /* 111 */
+ 0, /* 112 */
+ 0, /* 113 */
+ 0, /* 114 */
+ 0, /* 115 */
+ 0, /* 116 */
+ 0, /* 117 */
+ 0, /* 118 */
+ 0, /* 119 */
+ 0, /* 120 */
+ 0, /* 121 */
+ 0, /* 122 */
+ 0, /* 123 */
+ 0, /* 124 */
+ 0, /* 125 */
+ 0, /* 126 */
+ 0, /* 127 */
+ 0, /* 128 */
+ 0, /* 129 */
+ 0, /* 130 */
+ 0, /* 131 */
+ 0, /* 132 */
+ 0, /* 133 */
+ 0, /* 134 */
+ 0, /* 135 */
+ 0, /* 136 */
+ 0, /* 137 */
+ 0, /* 138 */
+ 0, /* 139 */
+ 0, /* 140 */
+ 0, /* 141 */
+ 0, /* 142 */
+ 0, /* 143 */
+ 0, /* 144 */
+ 0, /* 145 */
+ 0, /* 146 */
+ 0, /* 147 */
+ 0, /* 148 */
+ 0, /* 149 */
+ 0, /* 150 */
+ 0, /* 151 */
+ 0, /* 152 */
+ 0, /* 153 */
+ 0, /* 154 */
+ 0, /* 155 */
+ 0, /* 156 */
+ 0, /* 157 */
+ 0, /* 158 */
+ 0, /* 159 */
+ 0, /* 160 */
+ 0, /* 161 */
+ 0, /* 162 */
+ 0, /* 163 */
+ 0, /* 164 */
+ 0, /* 165 */
+ 0, /* 166 */
+ 0, /* 167 */
+ 0, /* 168 */
+ 0, /* 169 */
+ 0, /* 170 */
+ 0, /* 171 */
+ 0, /* 172 */
+ 0, /* 173 */
+ 0, /* 174 */
+ 0, /* 175 */
+ 0, /* 176 */
+ 0, /* 177 */
+ 0, /* 178 */
+ 0, /* 179 */
+ 0, /* 180 */
+ 0, /* 181 */
+ 0, /* 182 */
+ 0, /* 183 */
+ 0, /* 184 */
+ 0, /* 185 */
+ 0, /* 186 */
+ 0, /* 187 */
+ 0, /* 188 */
+ 0, /* 189 */
+ 0, /* 190 */
+ 0, /* 191 */
+ 0, /* 192 */
+ 0, /* 193 */
+ 0, /* 194 */
+ 0, /* 195 */
+ 0, /* 196 */
+ 0, /* 197 */
+ 0, /* 198 */
+ 0, /* 199 */
+ 0, /* 200 */
+ 0, /* 201 */
+ 0, /* 202 */
+ 0, /* 203 */
+ 0, /* 204 */
+ 0, /* 205 */
+ 0, /* 206 */
+ 0, /* 207 */
+ 0, /* 208 */
+ 0, /* 209 */
+ 0, /* 210 */
+ 0, /* 211 */
+ 0, /* 212 */
+ 0, /* 213 */
+ 0, /* 214 */
+ 0, /* 215 */
+ 0, /* 216 */
+ 0, /* 217 */
+ 0, /* 218 */
+ 0, /* 219 */
+ 0, /* 220 */
+ 0, /* 221 */
+ 0, /* 222 */
+ 0, /* 223 */
+ 0, /* 224 */
+ 0, /* 225 */
+ 0, /* 226 */
+ 0, /* 227 */
+ 0, /* 228 */
+ 0, /* 229 */
+ 0, /* 230 */
+ 0, /* 231 */
+ 0, /* 232 */
+ 0, /* 233 */
+ 0, /* 234 */
+ 0, /* 235 */
+ 0, /* 236 */
+ 0, /* 237 */
+ 0, /* 238 */
+ 0, /* 239 */
+ 0, /* 240 */
+ 0, /* 241 */
+ 0, /* 242 */
+ 0, /* 243 */
+ 0, /* 244 */
+ 0, /* 245 */
+ 0, /* 246 */
+ 0, /* 247 */
+ 0, /* 248 */
+ 0, /* 249 */
+ 0, /* 250 */
+ 0, /* 251 */
+ 0, /* 252 */
+ 0, /* 253 */
+ 0, /* 254 */
+ 0 /* 255 */
+};
+
+static int dissect_asterix (tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U_)
+{
+ col_set_str (pinfo->cinfo, COL_PROTOCOL, "ASTERIX");
+ col_clear (pinfo->cinfo, COL_INFO);
+
+ if (tree) { /* we are being asked for details */
+ dissect_asterix_packet (tvb, pinfo, tree);
+ }
+
+ return tvb_captured_length(tvb);
+}
+
+static void dissect_asterix_packet (tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree)
+{
+ unsigned i;
+ uint8_t category;
+ uint16_t length;
+ proto_item *asterix_packet_item;
+ proto_tree *asterix_packet_tree;
+
+ for (i = 0; i < tvb_reported_length (tvb); i += length + 3) {
+ /* all ASTERIX messages have the same structure:
+ *
+ * header:
+ *
+ * 1 byte category even though a category is referenced as I019,
+ * this is just stored as decimal 19 (i.e. 0x13)
+ * 2 bytes length the total length of this ASTERIX message, the
+ * length includes the size of the header.
+ *
+ * Note that the there was a structural change at
+ * one point that changes whether multiple
+ * records can occur after the header or not
+ * (each category specifies this explicitly. All
+ * of the currently supported categories can have
+ * multiple records so this implementation just
+ * assumes that is always the case)
+ *
+ * record (multiple records can exists):
+ *
+ * n bytes FSPEC the field specifier is a bit mask where the
+ * lowest bit of each byte is called the FX bit.
+ * When the FX bit is set this indicates that
+ * the FSPEC extends into the next byte.
+ * Any other bit indicates the presence of the
+ * field that owns that bit (as per the User
+ * Application Profile (UAP)).
+ * X bytes Field Y X is as per the specification for field Y.
+ * etc.
+ *
+ * The User Application Profile (UAP) is simply a mapping from the
+ * FSPEC to fields. Each category has its own UAP.
+ */
+ category = tvb_get_guint8 (tvb, i);
+ length = (tvb_get_guint8 (tvb, i + 1) << 8) + tvb_get_guint8 (tvb, i + 2) - 3; /* -3 for category and length */
+
+ asterix_packet_item = proto_tree_add_item (tree, proto_asterix, tvb, i, length + 3, ENC_NA);
+ proto_item_append_text (asterix_packet_item, ", Category %03d", category);
+ asterix_packet_tree = proto_item_add_subtree (asterix_packet_item, ett_asterix);
+ proto_tree_add_item (asterix_packet_tree, hf_asterix_category, tvb, i, 1, ENC_BIG_ENDIAN);
+ proto_tree_add_item (asterix_packet_tree, hf_asterix_length, tvb, i + 1, 2, ENC_BIG_ENDIAN);
+
+ dissect_asterix_data_block (tvb, pinfo, i + 3, asterix_packet_tree, category, length);
+ }
+}
+
+static void dissect_asterix_data_block (tvbuff_t *tvb, packet_info *pinfo, unsigned offset, proto_tree *tree, uint8_t category, int length)
+{
+ uint8_t active_uap;
+ int fspec_len, inner_offset, size, counter;
+ proto_item *asterix_message_item = NULL;
+ proto_tree *asterix_message_tree = NULL;
+
+ for (counter = 1, inner_offset = 0; inner_offset < length; counter++) {
+
+ /* This loop handles parsing of each ASTERIX record */
+
+ active_uap = asterix_get_active_uap (tvb, offset + inner_offset, category);
+ size = asterix_message_length (tvb, offset + inner_offset, category, active_uap);
+ if (size > 0) {
+ asterix_message_item = proto_tree_add_item (tree, hf_asterix_message, tvb, offset + inner_offset, size, ENC_NA);
+ proto_item_append_text (asterix_message_item, ", #%02d, length: %d", counter, size);
+ asterix_message_tree = proto_item_add_subtree (asterix_message_item, ett_asterix_message);
+ fspec_len = asterix_fspec_len (tvb, offset + inner_offset);
+ /*show_fspec (tvb, asterix_message_tree, offset + inner_offset, fspec_len);*/
+ proto_tree_add_item (asterix_message_tree, hf_asterix_fspec, tvb, offset + inner_offset, fspec_len, ENC_NA);
+
+ size = dissect_asterix_fields (tvb, pinfo, offset + inner_offset, asterix_message_tree, category, categories[category][global_categories_version[category]][active_uap]);
+
+ inner_offset += size + fspec_len;
+ }
+ else {
+ inner_offset = length;
+ }
+ }
+}
+
+static int dissect_asterix_fields (tvbuff_t *tvb, packet_info *pinfo, unsigned offset, proto_tree *tree, uint8_t category, const AsterixField *current_uap[])
+{
+ unsigned i, j, size, start, len, inner_offset, fspec_len;
+ uint64_t counter;
+ proto_item *asterix_field_item = NULL;
+ proto_tree *asterix_field_tree = NULL;
+ proto_item *asterix_field_item2 = NULL;
+ proto_tree *asterix_field_tree2 = NULL;
+
+ if (current_uap == NULL)
+ return 0;
+
+ for (i = 0, size = 0; current_uap[i] != NULL; i++) {
+ start = asterix_field_offset (tvb, offset, current_uap, i);
+ if (start > 0) {
+ len = asterix_field_length (tvb, offset + start, current_uap[i]);
+ size += len;
+ switch(current_uap[i]->type) {
+ case COMPOUND:
+ asterix_field_item = proto_tree_add_item (tree, *current_uap[i]->hf, tvb, offset + start, len, ENC_NA);
+ asterix_field_tree = proto_item_add_subtree (asterix_field_item, ett_asterix_subtree);
+ fspec_len = asterix_fspec_len (tvb, offset + start);
+ proto_tree_add_item (asterix_field_tree, hf_asterix_fspec, tvb, offset + start, fspec_len, ENC_NA);
+ dissect_asterix_fields (tvb, pinfo, offset + start, asterix_field_tree, category, (const AsterixField **)current_uap[i]->field);
+ break;
+ case REPETITIVE:
+ asterix_field_item = proto_tree_add_item (tree, *current_uap[i]->hf, tvb, offset + start, len, ENC_NA);
+ asterix_field_tree = proto_item_add_subtree (asterix_field_item, ett_asterix_subtree);
+ for (j = 0, counter = 0; j < current_uap[i]->repetition_counter_size; j++) {
+ counter = (counter << 8) + tvb_get_guint8 (tvb, offset + start + j);
+ }
+ proto_tree_add_item (asterix_field_tree, hf_counter, tvb, offset + start, current_uap[i]->repetition_counter_size, ENC_BIG_ENDIAN);
+ for (j = 0, inner_offset = 0; j < counter; j++, inner_offset += current_uap[i]->length) {
+ asterix_field_item2 = proto_tree_add_item (asterix_field_tree, *current_uap[i]->hf, tvb, offset + start + current_uap[i]->repetition_counter_size + inner_offset, current_uap[i]->length, ENC_NA);
+ asterix_field_tree2 = proto_item_add_subtree (asterix_field_item2, ett_asterix_subtree);
+ asterix_build_subtree (tvb, pinfo, offset + start + current_uap[i]->repetition_counter_size + inner_offset, asterix_field_tree2, current_uap[i]);
+ }
+ break;
+ /* currently not generated from asterix-spec*/
+ /*case EXP:
+ asterix_field_item = proto_tree_add_item (tree, *current_uap[i]->hf, tvb, offset + start, len, ENC_NA);
+ asterix_field_tree = proto_item_add_subtree (asterix_field_item, ett_asterix_subtree);
+ proto_tree_add_item (asterix_field_tree, hf_re_field_len, tvb, offset + start, 1, ENC_BIG_ENDIAN);
+ start++;
+ fspec_len = asterix_fspec_len (tvb, offset + start);
+ proto_tree_add_item (asterix_field_tree, hf_asterix_fspec, tvb, offset + start, fspec_len, ENC_NA);
+ dissect_asterix_fields (tvb, pinfo, offset + start, asterix_field_tree, category, (const AsterixField **)current_uap[i]->field);
+ break;*/
+ default: /* FIXED, FX, FX_1, FX_UAP */
+ asterix_field_item = proto_tree_add_item (tree, *current_uap[i]->hf, tvb, offset + start, len, ENC_NA);
+ asterix_field_tree = proto_item_add_subtree (asterix_field_item, ett_asterix_subtree);
+ asterix_build_subtree (tvb, pinfo, offset + start, asterix_field_tree, current_uap[i]);
+ break;
+ }
+ }
+ }
+ return size;
+}
+
+static void asterix_build_subtree (tvbuff_t *tvb, packet_info *pinfo, unsigned offset, proto_tree *parent, const AsterixField *field)
+{
+ header_field_info* hfi;
+ int bytes_in_type, byte_offset_of_mask;
+ int i, inner_offset, offset_in_tvb, length_in_tvb;
+ uint8_t go_on;
+ int64_t value;
+ char *str_buffer = NULL;
+ double scaling_factor = 1.0;
+ uint8_t *air_speed_im_bit;
+ if (field->part != NULL) {
+ for (i = 0, inner_offset = 0, go_on = 1; go_on && field->part[i] != NULL; i++) {
+ value = tvb_get_bits64 (tvb, offset * 8 + inner_offset, field->part[i]->bit_length, ENC_BIG_ENDIAN);
+ if (field->part[i]->hf != NULL) {
+ offset_in_tvb = offset + inner_offset / 8;
+ length_in_tvb = (inner_offset % 8 + field->part[i]->bit_length + 7) / 8;
+ switch (field->part[i]->type) {
+ case FIELD_PART_FX:
+ if (!value) go_on = 0;
+ /* Fall through */
+ case FIELD_PART_INT:
+ case FIELD_PART_UINT:
+ case FIELD_PART_HEX:
+ case FIELD_PART_ASCII:
+ case FIELD_PART_SQUAWK:
+ hfi = proto_registrar_get_nth (*field->part[i]->hf);
+ if (hfi->bitmask)
+ {
+ // for a small bit field to decode correctly with
+ // a mask that belongs to a large(r) one we need to
+ // re-adjust offset_in_tvb and length_in_tvb to
+ // correctly align with the given hf mask.
+ //
+ // E.g. the following would not decode correctly:
+ // { &hf_020_050_V, ... FT_UINT16, ... 0x8000, ...
+ // instead one would have to use
+ // { &hf_020_050_V, ... FT_UINT8, ... 0x80, ...
+ //
+ bytes_in_type = ftype_wire_size(hfi->type);
+ if (bytes_in_type > 1)
+ {
+ byte_offset_of_mask = bytes_in_type - (ws_ilog2 (hfi->bitmask) + 8)/8;
+ if (byte_offset_of_mask >= 0)
+ {
+ offset_in_tvb -= byte_offset_of_mask;
+ length_in_tvb = bytes_in_type;
+ }
+ }
+ }
+ proto_tree_add_item (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, ENC_BIG_ENDIAN);
+ break;
+ case FIELD_PART_FLOAT:
+ twos_complement (&value, field->part[i]->bit_length);
+ /* Fall through */
+ case FIELD_PART_UFLOAT:
+ scaling_factor = field->part[i]->scaling_factor;
+ if (field->part[i]->format_string != NULL)
+ proto_tree_add_double_format_value (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, value * scaling_factor, field->part[i]->format_string, value * scaling_factor);
+ else
+ proto_tree_add_double (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, value * scaling_factor);
+ break;
+ case FIELD_PART_CALLSIGN:
+ str_buffer = wmem_strdup_printf(
+ pinfo->pool,
+ "%c%c%c%c%c%c%c%c",
+ AISCode[(value >> 42) & 63],
+ AISCode[(value >> 36) & 63],
+ AISCode[(value >> 30) & 63],
+ AISCode[(value >> 24) & 63],
+ AISCode[(value >> 18) & 63],
+ AISCode[(value >> 12) & 63],
+ AISCode[(value >> 6) & 63],
+ AISCode[value & 63]);
+ proto_tree_add_string (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, str_buffer);
+ break;
+ case FIELD_PART_IAS_IM:
+ /* special processing for I021/150 and I062/380#4 because Air Speed depends on IM subfield */
+ air_speed_im_bit = wmem_new (pinfo->pool, uint8_t);
+ *air_speed_im_bit = (tvb_get_guint8 (tvb, offset_in_tvb) & 0x80) >> 7;
+ /* Save IM info for the packet. key = 21150. */
+ p_add_proto_data (pinfo->pool, pinfo, proto_asterix, 21150, air_speed_im_bit);
+ proto_tree_add_item (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, ENC_BIG_ENDIAN);
+ break;
+ case FIELD_PART_IAS_ASPD:
+ /* special processing for I021/150 and I062/380#4 because Air Speed depends on IM subfield */
+ air_speed_im_bit = (uint8_t *)p_get_proto_data (pinfo->pool, pinfo, proto_asterix, 21150);
+ if (!air_speed_im_bit || *air_speed_im_bit == 0)
+ scaling_factor = 1.0/16384.0;
+ else
+ scaling_factor = 0.001;
+ proto_tree_add_double (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, value * scaling_factor);
+ break;
+ }
+ }
+ inner_offset += field->part[i]->bit_length;
+ }
+ } /* if not null */
+}
+
+static uint8_t asterix_bit (uint8_t b, uint8_t bitNo)
+{
+ return bitNo < 8 && (b & (0x80 >> bitNo)) > 0;
+}
+
+/* Function makes int64_t two's complement.
+ * Only the bit_len bit are set in int64_t. All more significant
+ * bits need to be set to have proper two's complement.
+ * If the number is negative, all other bits must be set to 1.
+ * If the number is positive, all other bits must remain 0. */
+static void twos_complement (int64_t *v, int bit_len)
+{
+ if (*v & (G_GUINT64_CONSTANT(1) << (bit_len - 1))) {
+ *v |= (G_GUINT64_CONSTANT(0xffffffffffffffff) << bit_len);
+ }
+}
+
+static unsigned asterix_fspec_len (tvbuff_t *tvb, unsigned offset)
+{
+ unsigned i;
+ unsigned max_length = tvb_reported_length (tvb) - offset;
+ for (i = 0; (tvb_get_guint8 (tvb, offset + i) & 1) && i < max_length; i++);
+ return i + 1;
+}
+
+static uint8_t asterix_field_exists (tvbuff_t *tvb, unsigned offset, int bitIndex)
+{
+ uint8_t bitNo, i;
+ bitNo = bitIndex + bitIndex / 7;
+ for (i = 0; i < bitNo / 8; i++) {
+ if (!(tvb_get_guint8 (tvb, offset + i) & 1)) return 0;
+ }
+ return asterix_bit (tvb_get_guint8 (tvb, offset + i), bitNo % 8);
+}
+
+static int asterix_field_length (tvbuff_t *tvb, unsigned offset, const AsterixField *field)
+{
+ unsigned size;
+ uint64_t count;
+ uint8_t i;
+
+ size = 0;
+ switch(field->type) {
+ case FIXED:
+ size = field->length;
+ break;
+ case REPETITIVE:
+ for (i = 0, count = 0; i < field->repetition_counter_size && i < sizeof (count); i++)
+ count = (count << 8) + tvb_get_guint8 (tvb, offset + i);
+ size = (unsigned)(field->repetition_counter_size + count * field->length);
+ break;
+ case FX:
+ for (size = field->length + field->header_length; tvb_get_guint8 (tvb, offset + size - 1) & 1; size += field->length);
+ break;
+ case EXP:
+ for (i = 0, size = 0; i < field->header_length; i++) {
+ size = (size << 8) + tvb_get_guint8 (tvb, offset + i);
+ }
+ break;
+ case COMPOUND:
+ /* FSPEC */
+ for (size = 0; tvb_get_guint8 (tvb, offset + size) & 1; size++);
+ size++;
+
+ for (i = 0; field->field[i] != NULL; i++) {
+ if (asterix_field_exists (tvb, offset, i))
+ size += asterix_field_length (tvb, offset + size, field->field[i]);
+ }
+ break;
+ }
+ return size;
+}
+
+/* This works for category 001. For other it may require changes. */
+static uint8_t asterix_get_active_uap (tvbuff_t *tvb, unsigned offset, uint8_t category)
+{
+ int i, inner_offset;
+ AsterixField **current_uap;
+
+ if ((category == 1) && (categories[category] != NULL)) { /* if category is supported */
+ if (categories[category][global_categories_version[category]][1] != NULL) { /* if exists another uap */
+ current_uap = (AsterixField **)categories[category][global_categories_version[category]][0];
+ if (current_uap != NULL) {
+ inner_offset = asterix_fspec_len (tvb, offset);
+ for (i = 0; current_uap[i] != NULL; i++) {
+ if (asterix_field_exists (tvb, offset, i)) {
+ if (i == 1) { /* uap selector (I001/020) is always at index '1' */
+ return tvb_get_guint8 (tvb, offset + inner_offset) >> 7;
+ }
+ inner_offset += asterix_field_length (tvb, offset + inner_offset, current_uap[i]);
+ }
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int asterix_field_offset (tvbuff_t *tvb, unsigned offset, const AsterixField *current_uap[], int field_index)
+{
+ int i, inner_offset;
+ inner_offset = 0;
+ if (asterix_field_exists (tvb, offset, field_index)) {
+ inner_offset = asterix_fspec_len (tvb, offset);
+ for (i = 0; i < field_index; i++) {
+ if (asterix_field_exists (tvb, offset, i))
+ inner_offset += asterix_field_length (tvb, offset + inner_offset, current_uap[i]);
+ }
+ }
+ return inner_offset;
+}
+
+static int asterix_message_length (tvbuff_t *tvb, unsigned offset, uint8_t category, uint8_t active_uap)
+{
+ int i, size;
+ AsterixField **current_uap;
+
+ if (categories[category] != NULL) { /* if category is supported */
+ current_uap = (AsterixField **)categories[category][global_categories_version[category]][active_uap];
+ if (current_uap != NULL) {
+ size = asterix_fspec_len (tvb, offset);
+ for (i = 0; current_uap[i] != NULL; i++) {
+ if (asterix_field_exists (tvb, offset, i)) {
+ size += asterix_field_length (tvb, offset + size, current_uap[i]);
+ }
+ }
+ return size;
+ }
+ }
+ return 0;
+}
+
+void proto_register_asterix (void)
+{
+ static hf_register_info hf[] = {
+ { &hf_asterix_category, { "Category", "asterix.category", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
+ { &hf_asterix_length, { "Length", "asterix.length", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } },
+ { &hf_asterix_message, { "Asterix message", "asterix.message", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
+ { &hf_asterix_fspec, { "FSPEC", "asterix.fspec", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
+ { &hf_re_field_len, { "RE LEN", "asterix.re_field_len", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
+ { &hf_spare, { "Spare", "asterix.spare", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
+ { &hf_counter, { "Counter", "asterix.counter", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } },
+ { &hf_XXX_FX, { "FX", "asterix.FX", FT_UINT8, BASE_DEC, VALS (valstr_XXX_FX), 0x01, "Extension into next extent", HFILL } },
+/* insert2 */
+---{insert2}---
+/* insert2 */
+ };
+
+ /* Setup protocol subtree array */
+ static int *ett[] = {
+ &ett_asterix,
+ &ett_asterix_category,
+ &ett_asterix_length,
+ &ett_asterix_message,
+ &ett_asterix_subtree
+ };
+
+ module_t *asterix_prefs_module;
+
+ proto_asterix = proto_register_protocol (
+ "ASTERIX packet", /* name */
+ "ASTERIX", /* short name */
+ "asterix" /* abbrev */
+ );
+
+ proto_register_field_array (proto_asterix, hf, array_length (hf));
+ proto_register_subtree_array (ett, array_length (ett));
+
+ asterix_handle = register_dissector ("asterix", dissect_asterix, proto_asterix);
+
+ asterix_prefs_module = prefs_register_protocol (proto_asterix, NULL);
+
+/* insert3 */
+---{insert3}---
+/* insert3 */
+}
+
+void proto_reg_handoff_asterix (void)
+{
+ dissector_add_uint_with_preference("udp.port", ASTERIX_PORT, asterix_handle);
+}
+
+/*
+ * Editor modelines - https://www.wireshark.org/tools/modelines.html
+ *
+ * Local variables:
+ * c-basic-offset: 4
+ * tab-width: 8
+ * indent-tabs-mode: nil
+ * End:
+ *
+ * vi: set shiftwidth=4 tabstop=8 expandtab:
+ * :indentSize=4:tabSize=8:noTabs=true:
+ */
diff --git a/tools/asterix/update-specs.py b/tools/asterix/update-specs.py
new file mode 100755
index 0000000..7af735d
--- /dev/null
+++ b/tools/asterix/update-specs.py
@@ -0,0 +1,829 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# By Zoran Bošnjak <zoran.bosnjak@sloveniacontrol.si>
+#
+# Use asterix specifications in JSON format,
+# to generate C/C++ structures, suitable for wireshark.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+import argparse
+
+import urllib.request
+import json
+from copy import copy, deepcopy
+from itertools import chain, repeat, takewhile
+from functools import reduce
+import os
+import sys
+import re
+
+# Path to default upstream repository
+upstream_repo = 'https://zoranbosnjak.github.io/asterix-specs'
+dissector_file = 'epan/dissectors/packet-asterix.c'
+
+class Offset(object):
+ """Keep track of number of added bits.
+ It's like integer, except when offsets are added together,
+ a 'modulo 8' is applied, such that offset is always between [0,7].
+ """
+
+ def __init__(self):
+ self.current = 0
+
+ def __add__(self, other):
+ self.current = (self.current + other) % 8
+ return self
+
+ @property
+ def get(self):
+ return self.current
+
+class Context(object):
+ """Support class to be used as a context manager.
+ The 'tell' method is used to output (print) some data.
+ All output is first collected to a buffer, then rendered
+ using a template file.
+ """
+ def __init__(self):
+ self.buffer = {}
+ self.offset = Offset()
+ self.inside_repetitive = False
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ pass
+
+ def tell(self, channel, s):
+ """Append string 's' to an output channel."""
+ lines = self.buffer.get(channel, [])
+ lines.append(s)
+ self.buffer[channel] = lines
+
+ def reset_offset(self):
+ self.offset = Offset()
+
+def get_number(value):
+ """Get Natural/Real/Rational number as an object."""
+ class Integer(object):
+ def __init__(self, val):
+ self.val = val
+ def __str__(self):
+ return '{}'.format(self.val)
+ def __float__(self):
+ return float(self.val)
+
+ class Ratio(object):
+ def __init__(self, a, b):
+ self.a = a
+ self.b = b
+ def __str__(self):
+ return '{}/{}'.format(self.a, self.b)
+ def __float__(self):
+ return float(self.a) / float(self.b)
+
+ class Real(object):
+ def __init__(self, val):
+ self.val = val
+ def __str__(self):
+ return '{0:f}'.format(self.val).rstrip('0')
+ def __float__(self):
+ return float(self.val)
+
+ t = value['type']
+ val = value['value']
+
+ if t == 'Integer':
+ return Integer(int(val))
+ if t == 'Ratio':
+ x, y = val['numerator'], val['denominator']
+ return Ratio(x, y)
+ if t == 'Real':
+ return Real(float(val))
+ raise Exception('unexpected value type {}'.format(t))
+
+def replace_string(s, mapping):
+ """Helper function to replace each entry from the mapping."""
+ for (key,val) in mapping.items():
+ s = s.replace(key, val)
+ return s
+
+def safe_string(s):
+ """String replacement table."""
+ return replace_string(s, {
+ # from C reference manual
+ chr(92): r"\\", # Backslash character.
+ '?': r"\?", # Question mark character.
+ "'": r"\'", # Single quotation mark.
+ '"': r'\"', # Double quotation mark.
+ "\a": "", # Audible alert.
+ "\b": "", # Backspace character.
+ "\e": "", # <ESC> character. (This is a GNU extension.)
+ "\f": "", # Form feed.
+ "\n": "", # Newline character.
+ "\r": "", # Carriage return.
+ "\t": " ", # Horizontal tab.
+ "\v": "", # Vertical tab.
+ })
+
+def get_scaling(content):
+ """Get scaling factor from the content."""
+ k = content.get('scaling')
+ if k is None:
+ return None
+ k = get_number(k)
+
+ fract = content['fractionalBits']
+
+ if fract > 0:
+ scale = format(float(k) / (pow(2, fract)), '.29f')
+ scale = scale.rstrip('0')
+ else:
+ scale = format(float(k))
+ return scale
+
+def get_fieldpart(content):
+ """Get FIELD_PART* from the content."""
+ t = content['type']
+ if t == 'Raw': return 'FIELD_PART_HEX'
+ elif t == 'Table': return 'FIELD_PART_UINT'
+ elif t == 'String':
+ var = content['variation']
+ if var == 'StringAscii': return 'FIELD_PART_ASCII'
+ elif var == 'StringICAO': return 'FIELD_PART_CALLSIGN'
+ elif var == 'StringOctal': return 'FIELD_PART_SQUAWK'
+ else:
+ raise Exception('unexpected string variation: {}'.format(var))
+ elif t == 'Integer':
+ if content['signed']:
+ return 'FIELD_PART_INT'
+ else:
+ return 'FIELD_PART_UINT'
+ elif t == 'Quantity':
+ if content['signed']:
+ return 'FIELD_PART_FLOAT'
+ else:
+ return 'FIELD_PART_UFLOAT'
+ elif t == 'Bds':
+ return 'FIELD_PART_HEX'
+ else:
+ raise Exception('unexpected content type: {}'.format(t))
+
+def download_url(path):
+ """Download url and return content as a string."""
+ with urllib.request.urlopen(upstream_repo + path) as url:
+ return url.read()
+
+def read_file(path):
+ """Read file content, return string."""
+ with open(path) as f:
+ return f.read()
+
+def load_jsons(paths):
+ """Load json files from either URL or from local disk."""
+
+ # load from url
+ if paths == []:
+ manifest = download_url('/manifest.json').decode()
+ listing = []
+ for spec in json.loads(manifest):
+ cat = spec['category']
+ for edition in spec['cats']:
+ listing.append('/specs/cat{}/cats/cat{}/definition.json'.format(cat, edition))
+ for edition in spec['refs']:
+ listing.append('/specs/cat{}/refs/ref{}/definition.json'.format(cat, edition))
+ return [download_url(i).decode() for i in listing]
+
+ # load from disk
+ else:
+ listing = []
+ for path in paths:
+ if os.path.isdir(path):
+ for root, dirs, files in os.walk(path):
+ for i in files:
+ (a,b) = os.path.splitext(i)
+ if (a,b) != ('definition', '.json'):
+ continue
+ listing.append(os.path.join(root, i))
+ elif os.path.isfile(path):
+ listing.append(path)
+ else:
+ raise Exception('unexpected path type: {}'.path)
+ return [read_file(f) for f in listing]
+
+def load_gitrev(paths):
+ """Read git revision reference."""
+
+ # load from url
+ if paths == []:
+ gitrev = download_url('/gitrev.txt').decode().strip()
+ return [upstream_repo, 'git revision: {}'.format(gitrev)]
+
+ # load from disk
+ else:
+ return ['(local disk)']
+
+def get_ft(ref, n, content, offset):
+ """Get FT... from the content."""
+ a = offset.get
+
+ # bruto bit size (next multiple of 8)
+ (m, b) = divmod(a+n, 8)
+ m = m if b == 0 else m + 1
+ m *= 8
+
+ mask = '0x00'
+ if a != 0 or b != 0:
+ bits = chain(repeat(0, a), repeat(1, n), repeat(0, m-n-a))
+ mask = 0
+ for (a,b) in zip(bits, reversed(range(m))):
+ mask += a*pow(2,b)
+ mask = hex(mask)
+ # prefix mask with zeros '0x000...', to adjust mask size
+ assert mask[0:2] == '0x'
+ mask = mask[2:]
+ required_mask_size = (m//8)*2
+ add_some = required_mask_size - len(mask)
+ mask = '0x' + '0'*add_some + mask
+
+ t = content['type']
+
+ if t == 'Raw':
+ if n > 64: # very long items
+ assert (n % 8) == 0, "very long items require byte alignment"
+ return 'FT_NONE, BASE_NONE, NULL, 0x00'
+
+ if (n % 8): # not byte aligned
+ base = 'DEC'
+ else: # byte aligned
+ if n >= 32: # long items
+ base = 'HEX'
+ else: # short items
+ base = 'HEX_DEC'
+ return 'FT_UINT{}, BASE_{}, NULL, {}'.format(m, base, mask)
+ elif t == 'Table':
+ return 'FT_UINT{}, BASE_DEC, VALS (valstr_{}), {}'.format(m, ref, mask)
+ elif t == 'String':
+ var = content['variation']
+ if var == 'StringAscii':
+ return 'FT_STRING, BASE_NONE, NULL, {}'.format(mask)
+ elif var == 'StringICAO':
+ return 'FT_STRING, BASE_NONE, NULL, {}'.format(mask)
+ elif var == 'StringOctal':
+ return 'FT_UINT{}, BASE_OCT, NULL, {}'.format(m, mask)
+ else:
+ raise Exception('unexpected string variation: {}'.format(var))
+ elif t == 'Integer':
+ signed = content['signed']
+ if signed:
+ return 'FT_INT{}, BASE_DEC, NULL, {}'.format(m, mask)
+ else:
+ return 'FT_UINT{}, BASE_DEC, NULL, {}'.format(m, mask)
+ elif t == 'Quantity':
+ return 'FT_DOUBLE, BASE_NONE, NULL, 0x00'
+ elif t == 'Bds':
+ return 'FT_UINT{}, BASE_DEC, NULL, {}'.format(m, mask)
+ else:
+ raise Exception('unexpected content type: {}'.format(t))
+
+def reference(cat, edition, path):
+ """Create reference string."""
+ name = '_'.join(path)
+ if edition is None:
+ return('{:03d}_{}'.format(cat, name))
+ return('{:03d}_V{}_{}_{}'.format(cat, edition['major'], edition['minor'], name))
+
+def get_content(rule):
+ t = rule['type']
+ # Most cases are 'ContextFree', use as specified.
+ if t == 'ContextFree':
+ return rule['content']
+ # Handle 'Dependent' contents as 'Raw'.
+ elif t == 'Dependent':
+ return {'type': "Raw"}
+ else:
+ raise Exception('unexpected type: {}'.format(t))
+
+def get_bit_size(item):
+ """Return bit size of a (spare) item."""
+ if item['spare']:
+ return item['length']
+ else:
+ return item['variation']['size']
+
+def get_description(item, content=None):
+ """Return item description."""
+ name = item['name'] if not is_generated(item) else None
+ title = item.get('title')
+ if content is not None and content.get('unit'):
+ unit = '[{}]'.format(safe_string(content['unit']))
+ else:
+ unit = None
+
+ parts = filter(lambda x: bool(x), [name, title, unit])
+ if not parts:
+ return ''
+ return reduce(lambda a,b: a + ', ' + b, parts)
+
+def generate_group(item, variation=None):
+ """Generate group-item from element-item."""
+ level2 = copy(item)
+ level2['name'] = 'VALUE'
+ level2['is_generated'] = True
+ if variation is None:
+ level1 = copy(item)
+ level1['variation'] = {
+ 'type': 'Group',
+ 'items': [level2],
+ }
+ else:
+ level2['variation'] = variation['variation']
+ level1 = {
+ 'type': "Group",
+ 'items': [level2],
+ }
+ return level1
+
+def is_generated(item):
+ return item.get('is_generated') is not None
+
+def ungroup(item):
+ """Convert group of items of known size to element"""
+ n = sum([get_bit_size(i) for i in item['variation']['items']])
+ result = copy(item)
+ result['variation'] = {
+ 'rule': {
+ 'content': {'type': 'Raw'},
+ 'type': 'ContextFree',
+ },
+ 'size': n,
+ 'type': 'Element',
+ }
+ return result
+
+def part1(ctx, get_ref, catalogue):
+ """Generate components in order
+ - static int hf_...
+ - FiledPart
+ - FieldPart[]
+ - AsterixField
+ """
+
+ tell = lambda s: ctx.tell('insert1', s)
+ tell_pr = lambda s: ctx.tell('insert2', s)
+
+ ctx.reset_offset()
+
+ def handle_item(path, item):
+ """Handle 'spare' or regular 'item'.
+ This function is used recursively, depending on the item structure.
+ """
+
+ def handle_variation(path, variation):
+ """Handle 'Element, Group...' variations.
+ This function is used recursively, depending on the item structure."""
+
+ t = variation['type']
+
+ ref = get_ref(path)
+
+ def part_of(item):
+ if item['spare']:
+ return '&IXXX_{}bit_spare'.format(item['length'])
+ return '&I{}_{}'.format(ref, item['name'])
+
+ if t == 'Element':
+ tell('static int hf_{} = -1;'.format(ref))
+ n = variation['size']
+ content = get_content(variation['rule'])
+ scaling = get_scaling(content)
+ scaling = scaling if scaling is not None else 1.0
+ fp = get_fieldpart(content)
+
+ if content['type'] == 'Table':
+ tell('static const value_string valstr_{}[] = {}'.format(ref, '{'))
+ for (a,b) in content['values']:
+ tell(' {} {}, "{}" {},'.format('{', a, safe_string(b), '}'))
+ tell(' {} 0, NULL {}'.format('{', '}'))
+ tell('};')
+
+ tell('static const FieldPart I{} = {} {}, {}, {}, &hf_{}, NULL {};'.format(ref, '{', n, scaling, fp, ref, '}'))
+ description = get_description(item, content)
+
+ ft = get_ft(ref, n, content, ctx.offset)
+ tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", {}, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, ft, '}', '}'))
+
+ ctx.offset += n
+
+ elif t == 'Group':
+ ctx.reset_offset()
+
+ description = get_description(item)
+ tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, '}', '}'))
+
+ tell('static int hf_{} = -1;'.format(ref))
+ for i in variation['items']:
+ handle_item(path, i)
+
+ # FieldPart[]
+ tell('static const FieldPart *I{}_PARTS[] = {}'.format(ref,'{'))
+ for i in variation['items']:
+ tell(' {},'.format(part_of(i)))
+ tell(' NULL')
+ tell('};')
+
+ # AsterixField
+ bit_size = sum([get_bit_size(i) for i in variation['items']])
+ byte_size = bit_size // 8
+ parts = 'I{}_PARTS'.format(ref)
+ comp = '{ NULL }'
+ if not ctx.inside_repetitive:
+ tell('static const AsterixField I{} = {} FIXED, {}, 0, 0, &hf_{}, {}, {} {};'.format
+ (ref, '{', byte_size, ref, parts, comp, '}'))
+
+ elif t == 'Extended':
+ ctx.reset_offset()
+
+ description = get_description(item)
+ tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, '}', '}'))
+ tell('static int hf_{} = -1;'.format(ref))
+
+ items = []
+ for i in variation['items']:
+ if i is None:
+ items.append(i)
+ continue
+ if i.get('variation') is not None:
+ if i['variation']['type'] == 'Group':
+ i = ungroup(i)
+ items.append(i)
+
+ for i in items:
+ if i is None:
+ ctx.offset += 1
+ else:
+ handle_item(path, i)
+
+ tell('static const FieldPart *I{}_PARTS[] = {}'.format(ref,'{'))
+ for i in items:
+ if i is None:
+ tell(' &IXXX_FX,')
+ else:
+ tell(' {},'.format(part_of(i)))
+
+ tell(' NULL')
+ tell('};')
+
+ # AsterixField
+ first_part = list(takewhile(lambda x: x is not None, items))
+ n = (sum([get_bit_size(i) for i in first_part]) + 1) // 8
+ parts = 'I{}_PARTS'.format(ref)
+ comp = '{ NULL }'
+ tell('static const AsterixField I{} = {} FX, {}, 0, {}, &hf_{}, {}, {} {};'.format
+ (ref, '{', n, 0, ref, parts, comp, '}'))
+
+ elif t == 'Repetitive':
+ ctx.reset_offset()
+ ctx.inside_repetitive = True
+
+ # Group is required below this item.
+ if variation['variation']['type'] == 'Element':
+ subvar = generate_group(item, variation)
+ else:
+ subvar = variation['variation']
+ handle_variation(path, subvar)
+
+ # AsterixField
+ bit_size = sum([get_bit_size(i) for i in subvar['items']])
+ byte_size = bit_size // 8
+ rep = variation['rep']['size'] // 8
+ parts = 'I{}_PARTS'.format(ref)
+ comp = '{ NULL }'
+ tell('static const AsterixField I{} = {} REPETITIVE, {}, {}, 0, &hf_{}, {}, {} {};'.format
+ (ref, '{', byte_size, rep, ref, parts, comp, '}'))
+ ctx.inside_repetitive = False
+
+ elif t == 'Explicit':
+ ctx.reset_offset()
+ tell('static int hf_{} = -1;'.format(ref))
+ description = get_description(item)
+ tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, '}', '}'))
+ tell('static const AsterixField I{} = {} EXP, 0, 0, 1, &hf_{}, NULL, {} NULL {} {};'.format(ref, '{', ref, '{', '}', '}'))
+
+ elif t == 'Compound':
+ ctx.reset_offset()
+ tell('static int hf_{} = -1;'.format(ref))
+ description = get_description(item)
+ tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, '}', '}'))
+ comp = '{'
+ for i in variation['items']:
+ if i is None:
+ comp += ' &IX_SPARE,'
+ continue
+ # Group is required below this item.
+ if i['variation']['type'] == 'Element':
+ subitem = generate_group(i)
+ else:
+ subitem = i
+ comp += ' &I{}_{},'.format(ref, subitem['name'])
+ handle_item(path, subitem)
+ comp += ' NULL }'
+
+ # AsterixField
+ tell('static const AsterixField I{} = {} COMPOUND, 0, 0, 0, &hf_{}, NULL, {} {};'.format
+ (ref, '{', ref, comp, '}'))
+
+ else:
+ raise Exception('unexpected variation type: {}'.format(t))
+
+ if item['spare']:
+ ctx.offset += item['length']
+ return
+
+ # Group is required on the first level.
+ if path == [] and item['variation']['type'] == 'Element':
+ variation = generate_group(item)['variation']
+ else:
+ variation = item['variation']
+ handle_variation(path + [item['name']], variation)
+
+ for item in catalogue:
+ # adjust 'repetitive fx' item
+ if item['variation']['type'] == 'Repetitive' and item['variation']['rep']['type'] == 'Fx':
+ var = item['variation']['variation'].copy()
+ if var['type'] != 'Element':
+ raise Exception("Expecting 'Element'")
+ item = item.copy()
+ item['variation'] = {
+ 'type': 'Extended',
+ 'items': [{
+ 'definition': None,
+ 'description': None,
+ 'name': 'Subitem',
+ 'remark': None,
+ 'spare': False,
+ 'title': 'Subitem',
+ 'variation': var,
+ }, None]
+ }
+ handle_item([], item)
+ tell('')
+
+def part2(ctx, ref, uap):
+ """Generate UAPs"""
+
+ tell = lambda s: ctx.tell('insert1', s)
+ tell('DIAG_OFF_PEDANTIC')
+
+ ut = uap['type']
+ if ut == 'uap':
+ variations = [{'name': 'uap', 'items': uap['items']}]
+ elif ut == 'uaps':
+ variations = uap['variations']
+ else:
+ raise Exception('unexpected uap type {}'.format(ut))
+
+ for var in variations:
+ tell('static const AsterixField *I{}_{}[] = {}'.format(ref, var['name'], '{'))
+ for i in var['items']:
+ if i is None:
+ tell(' &IX_SPARE,')
+ else:
+ tell(' &I{}_{},'.format(ref, i))
+ tell(' NULL')
+ tell('};')
+
+ tell('static const AsterixField **I{}[] = {}'.format(ref, '{'))
+ for var in variations:
+ tell(' I{}_{},'.format(ref, var['name']))
+ tell(' NULL')
+ tell('};')
+ tell('DIAG_ON_PEDANTIC')
+ tell('')
+
+def part3(ctx, specs):
+ """Generate
+ - static const AsterixField ***...
+ - static const enum_val_t ..._versions[]...
+ """
+ tell = lambda s: ctx.tell('insert1', s)
+ def fmt_edition(cat, edition):
+ return 'I{:03d}_V{}_{}'.format(cat, edition['major'], edition['minor'])
+
+ cats = set([spec['number'] for spec in specs])
+ for cat in sorted(cats):
+ lst = [spec for spec in specs if spec['number'] == cat]
+ editions = sorted([val['edition'] for val in lst], key = lambda x: (x['major'], x['minor']), reverse=True)
+ editions_fmt = [fmt_edition(cat, edition) for edition in editions]
+ editions_str = ', '.join(['I{:03d}'.format(cat)] + editions_fmt)
+ tell('DIAG_OFF_PEDANTIC')
+ tell('static const AsterixField ***I{:03d}all[] = {} {} {};'.format(cat, '{', editions_str, '}'))
+ tell('DIAG_ON_PEDANTIC')
+ tell('')
+
+ tell('static const enum_val_t I{:03d}_versions[] = {}'.format(cat, '{'))
+ edition = editions[0]
+ a = edition['major']
+ b = edition['minor']
+ tell(' {} "I{:03d}", "Version {}.{} (latest)", 0 {},'.format('{', cat, a, b, '}'))
+ for ix, edition in enumerate(editions, start=1):
+ a = edition['major']
+ b = edition['minor']
+ tell(' {} "I{:03d}_v{}_{}", "Version {}.{}", {} {},'.format('{', cat, a, b, a, b, ix, '}'))
+ tell(' { NULL, NULL, 0 }')
+ tell('};')
+ tell('')
+
+def part4(ctx, cats):
+ """Generate
+ - static const AsterixField ****categories[]...
+ - prefs_register_enum_preference ...
+ """
+ tell = lambda s: ctx.tell('insert1', s)
+ tell_pr = lambda s: ctx.tell('insert3', s)
+
+ tell('static const AsterixField ****categories[] = {')
+ for i in range(0, 256):
+ val = 'I{:03d}all'.format(i) if i in cats else 'NULL'
+ tell(' {}, /* {:03d} */'.format(val, i))
+ tell(' NULL')
+ tell('};')
+
+ for cat in sorted(cats):
+ tell_pr(' prefs_register_enum_preference (asterix_prefs_module, "i{:03d}_version", "I{:03d} version", "Select the CAT{:03d} version", &global_categories_version[{}], I{:03d}_versions, false);'.format(cat, cat, cat, cat, cat))
+
+class Output(object):
+ """Output context manager. Write either to stdout or to a dissector
+ file directly, depending on 'update' argument"""
+ def __init__(self, update):
+ self.update = update
+ self.f = None
+
+ def __enter__(self):
+ if self.update:
+ self.f = open(dissector_file, 'w')
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ if self.f is not None:
+ self.f.close()
+
+ def dump(self, line):
+ if self.f is None:
+ print(line)
+ else:
+ self.f.write(line+'\n')
+
+def remove_rfs(spec):
+ """Remove RFS item. It's present in specs, but not used."""
+ catalogue = [] # create new catalogue without RFS
+ rfs_items = []
+ for i in spec['catalogue']:
+ if i['variation']['type'] == 'Rfs':
+ rfs_items.append(i['name'])
+ else:
+ catalogue.append(i)
+ if not rfs_items:
+ return spec
+ spec2 = copy(spec)
+ spec2['catalogue'] = catalogue
+ # remove RFS from UAP(s)
+ uap = deepcopy(spec['uap'])
+ ut = uap['type']
+ if ut == 'uap':
+ items = [None if i in rfs_items else i for i in uap['items']]
+ if items[-1] is None: items = items[:-1]
+ uap['items'] = items
+ elif ut == 'uaps':
+ variations = []
+ for var in uap['variations']:
+ items = [None if i in rfs_items else i for i in var['items']]
+ if items[-1] is None: items = items[:-1]
+ var['items'] = items
+ variations.append(var)
+ uap['variations'] = variations
+ else:
+ raise Exception('unexpected uap type {}'.format(ut))
+ spec2['uap'] = uap
+ return spec2
+
+def is_valid(spec):
+ """Check spec"""
+ def check_item(item):
+ if item['spare']:
+ return True
+ return check_variation(item['variation'])
+ def check_variation(variation):
+ t = variation['type']
+ if t == 'Element':
+ return True
+ elif t == 'Group':
+ return all([check_item(i) for i in variation['items']])
+ elif t == 'Extended':
+ trailing_fx = variation['items'][-1] == None
+ if not trailing_fx:
+ return False
+ return all([check_item(i) for i in variation['items'] if i is not None])
+ elif t == 'Repetitive':
+ return check_variation(variation['variation'])
+ elif t == 'Explicit':
+ return True
+ elif t == 'Compound':
+ items = [i for i in variation['items'] if i is not None]
+ return all([check_item(i) for i in items])
+ else:
+ raise Exception('unexpected variation type {}'.format(t))
+ return all([check_item(i) for i in spec['catalogue']])
+
+def main():
+ parser = argparse.ArgumentParser(description='Process asterix specs files.')
+ parser.add_argument('paths', metavar='PATH', nargs='*',
+ help='json spec file(s), use upstream repository in no input is given')
+ parser.add_argument('--reference', action='store_true',
+ help='print upstream reference and exit')
+ parser.add_argument("--update", action="store_true",
+ help="Update %s as needed instead of writing to stdout" % dissector_file)
+ args = parser.parse_args()
+
+ if args.reference:
+ gitrev_short = download_url('/gitrev.txt').decode().strip()[0:10]
+ print(gitrev_short)
+ sys.exit(0)
+
+ # read and json-decode input files
+ jsons = load_jsons(args.paths)
+ jsons = [json.loads(i) for i in jsons]
+ jsons = sorted(jsons, key = lambda x: (x['number'], x['edition']['major'], x['edition']['minor']))
+ jsons = [spec for spec in jsons if spec['type'] == 'Basic']
+ jsons = [remove_rfs(spec) for spec in jsons]
+ jsons = [spec for spec in jsons if is_valid(spec)]
+
+ cats = list(set([x['number'] for x in jsons]))
+ latest_editions = {cat: sorted(
+ filter(lambda x: x['number'] == cat, jsons),
+ key = lambda x: (x['edition']['major'], x['edition']['minor']), reverse=True)[0]['edition']
+ for cat in cats}
+
+ # regular expression for template rendering
+ ins = re.compile(r'---\{([A-Za-z0-9_]*)\}---')
+
+ gitrev = load_gitrev(args.paths)
+ with Context() as ctx:
+ for i in gitrev:
+ ctx.tell('gitrev', i)
+
+ # generate parts into the context buffer
+ for spec in jsons:
+ is_latest = spec['edition'] == latest_editions[spec['number']]
+
+ ctx.tell('insert1', '/* Category {:03d}, edition {}.{} */'.format(spec['number'], spec['edition']['major'], spec['edition']['minor']))
+
+ # handle part1
+ get_ref = lambda path: reference(spec['number'], spec['edition'], path)
+ part1(ctx, get_ref, spec['catalogue'])
+ if is_latest:
+ ctx.tell('insert1', '/* Category {:03d}, edition {}.{} (latest) */'.format(spec['number'], spec['edition']['major'], spec['edition']['minor']))
+ get_ref = lambda path: reference(spec['number'], None, path)
+ part1(ctx, get_ref, spec['catalogue'])
+
+ # handle part2
+ cat = spec['number']
+ edition = spec['edition']
+ ref = '{:03d}_V{}_{}'.format(cat, edition['major'], edition['minor'])
+ part2(ctx, ref, spec['uap'])
+ if is_latest:
+ ref = '{:03d}'.format(cat)
+ part2(ctx, ref, spec['uap'])
+
+ part3(ctx, jsons)
+ part4(ctx, set([spec['number'] for spec in jsons]))
+
+ # use context buffer to render template
+ script_path = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(script_path, 'packet-asterix-template.c')) as f:
+ template_lines = f.readlines()
+
+ # All input is collected and rendered.
+ # It's safe to update the disector.
+
+ # copy each line of the template to required output,
+ # if the 'insertion' is found in the template,
+ # replace it with the buffer content
+ with Output(args.update) as out:
+ for line in template_lines:
+ line = line.rstrip()
+
+ insertion = ins.match(line)
+ if insertion is None:
+ out.dump(line)
+ else:
+ segment = insertion.group(1)
+ [out.dump(i) for i in ctx.buffer[segment]]
+
+if __name__ == '__main__':
+ main()
+
diff --git a/tools/bsd-setup.sh b/tools/bsd-setup.sh
new file mode 100755
index 0000000..6b018c6
--- /dev/null
+++ b/tools/bsd-setup.sh
@@ -0,0 +1,202 @@
+#!/usr/bin/env sh
+# Setup development environment on BSD-like platforms.
+#
+# Tested on: FreeBSD, OpenBSD, NetBSD.
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# We drag in tools that might not be needed by all users; it's easier
+# that way.
+#
+# We do not use Bash as the shell for this script, and use the POSIX
+# syntax for function definition rather than the
+# "function <name>() { ... }" syntax, as FreeBSD 13, at least, does
+# not have Bash, and its /bin/sh doesn't support the other syntax.
+#
+
+print_usage() {
+ printf "\\nUtility to setup a bsd-based system for Wireshark Development.\\n"
+ printf "The basic usage installs the needed software\\n\\n"
+ printf "Usage: $0 [--install-optional] [...other options...]\\n"
+ printf "\\t--install-optional: install optional software as well\\n"
+ printf "\\t[other]: other options are passed as-is to pkg manager.\\n"
+}
+
+ADDITIONAL=0
+OPTIONS=
+for arg; do
+ case $arg in
+ --help)
+ print_usage
+ exit 0
+ ;;
+ --install-optional)
+ ADDITIONAL=1
+ ;;
+ *)
+ OPTIONS="$OPTIONS $arg"
+ ;;
+ esac
+done
+
+# Check if the user is root
+if [ $(id -u) -ne 0 ]
+then
+ echo "You must be root."
+ exit 1
+fi
+
+BASIC_LIST="\
+ cmake \
+ qt6 \
+ git \
+ pcre2 \
+ speexdsp"
+
+ADDITIONAL_LIST="\
+ gettext-tools \
+ snappy \
+ bcg729 \
+ libssh \
+ libmaxminddb \
+ libsmi \
+ brotli \
+ zstd \
+ lua52 \
+ "
+
+# Uncomment to add PNG compression utilities used by compress-pngs:
+# ADDITIONAL_LIST="$ADDITIONAL_LIST \
+# advancecomp \
+# optipng \
+# pngcrush"
+
+# Guess which package manager we will use
+PM=`which pkgin 2> /dev/null || which pkg 2> /dev/null || which pkg_add 2> /dev/null`
+
+case $PM in
+ */pkgin)
+ PM_OPTIONS="install"
+ PM_SEARCH="pkgin search"
+ PM_MUST_GLOB=no
+ ;;
+ */pkg)
+ PM_OPTIONS="install"
+ PM_SEARCH="pkg search"
+ PM_MUST_GLOB=yes
+ ;;
+ */pkg_add)
+ PM_OPTIONS=""
+ PM_SEARCH="pkg_info"
+ PM_MUST_GLOB=no
+ ;;
+esac
+
+
+echo "Using $PM ($PM_SEARCH)"
+
+# Adds package $2 to list variable $1 if the package is found
+add_package() {
+ local list="$1" pkgname="$2"
+
+ # fail if the package is not known
+ if [ "$PM_MUST_GLOB" = yes ]
+ then
+ #
+ # We need to do a glob search, with a "*" at the
+ # end, so we only find packages that *begin* with
+ # the name; otherwise, searching for pkg-config
+ # could find packages that *don't* begin with
+ # pkg-config, but have it later in the name
+ # (FreeBSD 11 has one such package), so when
+ # we then try to install it, that fails. Doing
+ # an *exact* search fails, as that requires that
+ # the package name include the version number.
+ #
+ $PM_SEARCH -g "$pkgname*" > /dev/null 2>&1 || return 1
+ else
+ $PM_SEARCH "$pkgname" > /dev/null 2>&1 || return 1
+ fi
+
+ # package is found, append it to list
+ eval "${list}=\"\${${list}} \${pkgname}\""
+}
+
+# pkg-config: NetBSD
+# pkgconf: FreeBSD
+add_package BASIC_LIST pkg-config ||
+add_package BASIC_LIST pkgconf ||
+echo "pkg-config is unavailable"
+
+# c-ares: FreeBSD
+# libcares: OpenBSD
+add_package BASIC_LIST c-ares ||
+add_package BASIC_LIST libcares ||
+echo "c-ares is unavailable"
+
+# rubygem-asciidoctor: FreeBSD
+add_package ADDITIONAL_LIST rubygem-asciidoctor ||
+echo "asciidoctor is unavailable"
+
+# liblz4: FreeBSD
+# lz4: NetBSD
+add_package ADDITIONAL_LIST liblz4 ||
+add_package ADDITIONAL_LIST lz4 ||
+echo "lz4 is unavailable"
+
+# libnghttp2: FreeBSD
+# nghttp2: NetBSD
+add_package ADDITIONAL_LIST libnghttp2 ||
+add_package ADDITIONAL_LIST nghttp2 ||
+echo "nghttp2 is unavailable"
+
+# libnghttp3: FreeBSD
+# nghttp3: NetBSD
+add_package ADDITIONAL_LIST libnghttp3 ||
+add_package ADDITIONAL_LIST nghttp3 ||
+echo "nghttp3 is unavailable"
+
+# spandsp: NetBSD
+add_package ADDITIONAL_LIST spandsp ||
+echo "spandsp is unavailable"
+
+# ninja: FreeBSD, OpenBSD
+# ninja-build: NetBSD
+add_package ADDITIONAL_LIST ninja-build ||
+add_package ADDITIONAL_LIST ninja ||
+echo "ninja is unavailable"
+
+# libilbc: FreeBSD
+add_package ADDITIONAL_LIST libilbc ||
+echo "libilbc is unavailable"
+
+# Add OS-specific required/optional packages
+# Those not listed don't require additions.
+case `uname` in
+ FreeBSD | NetBSD)
+ add_package ADDITIONAL_LIST libgcrypt || echo "libgcrypt is unavailable"
+ ;;
+esac
+
+ACTUAL_LIST=$BASIC_LIST
+
+# Now arrange for optional support libraries
+if [ $ADDITIONAL -ne 0 ]
+then
+ ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST"
+fi
+
+$PM $PM_OPTIONS $ACTUAL_LIST $OPTIONS
+if [ ! $? ]
+then
+ exit 2
+fi
+
+if [ $ADDITIONAL -eq 0 ]
+then
+ echo -e "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n"
+fi
diff --git a/tools/checkAPIs.pl b/tools/checkAPIs.pl
new file mode 100755
index 0000000..c9570b5
--- /dev/null
+++ b/tools/checkAPIs.pl
@@ -0,0 +1,1303 @@
+#!/usr/bin/env perl
+
+#
+# Copyright 2006, Jeff Morriss <jeff.morriss.ws[AT]gmail.com>
+#
+# A simple tool to check source code for function calls that should not
+# be called by Wireshark code and to perform certain other checks.
+#
+# Usage:
+# checkAPIs.pl [-M] [-g group1] [-g group2] ...
+# [-s summary-group1] [-s summary-group2] ...
+# [--nocheck-hf]
+# [--nocheck-value-string-array]
+# [--nocheck-shadow]
+# [--debug]
+# file1 file2 ...
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+use strict;
+use Encode;
+use English;
+use Getopt::Long;
+use Text::Balanced qw(extract_bracketed);
+
+my %APIs = (
+ # API groups.
+ # Group name, e.g. 'prohibited'
+ # '<name>' => {
+ # 'count_errors' => 1, # 1 if these are errors, 0 if warnings
+ # 'functions' => [ 'f1', 'f2', ...], # Function array
+ # 'function-counts' => {'f1',0, 'f2',0, ...}, # Function Counts hash (initialized in the code)
+ # }
+ #
+ # APIs that MUST NOT be used in Wireshark
+ 'prohibited' => { 'count_errors' => 1, 'functions' => [
+ # Memory-unsafe APIs
+ # Use something that won't overwrite the end of your buffer instead
+ # of these.
+ #
+ # Microsoft provides lists of unsafe functions and their
+ # recommended replacements in "Security Development Lifecycle
+ # (SDL) Banned Function Calls"
+ # https://docs.microsoft.com/en-us/previous-versions/bb288454(v=msdn.10)
+ # and "Deprecated CRT Functions"
+ # https://docs.microsoft.com/en-us/previous-versions/ms235384(v=vs.100)
+ #
+ 'atoi', # use wsutil/strtoi.h functions
+ 'gets',
+ 'sprintf',
+ 'g_sprintf',
+ 'vsprintf',
+ 'g_vsprintf',
+ 'strcpy',
+ 'strncpy',
+ 'strcat',
+ 'strncat',
+ 'cftime',
+ 'ascftime',
+ ### non-portable APIs
+ # use glib (g_*) versions instead of these:
+ 'ntohl',
+ 'ntohs',
+ 'htonl',
+ 'htons',
+ 'strdup',
+ 'strndup',
+ # Windows doesn't have this; use g_ascii_strtoull() instead
+ 'strtoull',
+ ### non-portable: fails on Windows Wireshark built with VC newer than VC6
+ # See https://gitlab.com/wireshark/wireshark/-/issues/6695#note_400659130
+ 'g_fprintf',
+ 'g_vfprintf',
+ # use native snprintf() and vsnprintf() instead of these:
+ 'g_snprintf',
+ 'g_vsnprintf',
+ ### non-ANSI C
+ # use memset, memcpy, memcmp instead of these:
+ 'bzero',
+ 'bcopy',
+ 'bcmp',
+ # The MSDN page for ZeroMemory recommends SecureZeroMemory
+ # instead.
+ 'ZeroMemory',
+ # use wmem_*, ep_*, or g_* functions instead of these:
+ # (One thing to be aware of is that space allocated with malloc()
+ # may not be freeable--at least on Windows--with g_free() and
+ # vice-versa.)
+ 'malloc',
+ 'calloc',
+ 'realloc',
+ 'valloc',
+ 'free',
+ 'cfree',
+ # Locale-unsafe APIs
+ # These may have unexpected behaviors in some locales (e.g.,
+ # "I" isn't always the upper-case form of "i", and "i" isn't
+ # always the lower-case form of "I"). Use the g_ascii_* version
+ # instead.
+ 'isalnum',
+ 'isascii',
+ 'isalpha',
+ 'iscntrl',
+ 'isdigit',
+ 'islower',
+ 'isgraph',
+ 'isprint',
+ 'ispunct',
+ 'isspace',
+ 'isupper',
+ 'isxdigit',
+ 'tolower',
+ 'atof',
+ 'strtod',
+ 'strcasecmp',
+ 'strncasecmp',
+ # Deprecated in glib 2.68 in favor of g_memdup2
+ # We have our local implementation for older versions
+ 'g_memdup',
+ 'g_strcasecmp',
+ 'g_strncasecmp',
+ 'g_strup',
+ 'g_strdown',
+ 'g_string_up',
+ 'g_string_down',
+ 'strerror', # use g_strerror
+ # Use the ws_* version of these:
+ # (Necessary because on Windows we use UTF8 for throughout the code
+ # so we must tweak that to UTF16 before operating on the file. Code
+ # using these functions will work unless the file/path name contains
+ # non-ASCII chars.)
+ 'open',
+ 'rename',
+ 'mkdir',
+ 'stat',
+ 'unlink',
+ 'remove',
+ 'fopen',
+ 'freopen',
+ 'fstat',
+ 'lseek',
+ # Misc
+ 'tmpnam', # use mkstemp
+ '_snwprintf' # use StringCchPrintf
+ ] },
+
+ ### Soft-Deprecated functions that should not be used in new code but
+ # have not been entirely removed from old code. These will become errors
+ # once they've been removed from all existing code.
+ 'soft-deprecated' => { 'count_errors' => 0, 'functions' => [
+ 'tvb_length_remaining', # replaced with tvb_captured_length_remaining
+
+ # Locale-unsafe APIs
+ # These may have unexpected behaviors in some locales (e.g.,
+ # "I" isn't always the upper-case form of "i", and "i" isn't
+ # always the lower-case form of "I"). Use the g_ascii_* version
+ # instead.
+ 'toupper'
+ ] },
+
+ # APIs that SHOULD NOT be used in Wireshark (any more)
+ 'deprecated' => { 'count_errors' => 1, 'functions' => [
+ 'perror', # Use g_strerror() and report messages in whatever
+ # fashion is appropriate for the code in question.
+ 'ctime', # Use abs_time_secs_to_str()
+ 'next_tvb_add_port', # Use next_tvb_add_uint() (and a matching change
+ # of NTVB_PORT -> NTVB_UINT)
+
+ ### Deprecated GLib/GObject functions/macros
+ # (The list is based upon the GLib 2.30.2 & GObject 2.30.2 documentation;
+ # An entry may be commented out if it is currently
+ # being used in Wireshark and if the replacement functionality
+ # is not available in all the GLib versions that Wireshark
+ # currently supports.
+ # Note: Wireshark currently (Jan 2012) requires GLib 2.14 or newer.
+ # The Wireshark build currently (Jan 2012) defines G_DISABLE_DEPRECATED
+ # so use of any of the following should cause the Wireshark build to fail and
+ # therefore the tests for obsolete GLib function usage in checkAPIs should not be needed.
+ 'G_ALLOC_AND_FREE',
+ 'G_ALLOC_ONLY',
+ 'g_allocator_free', # "use slice allocator" (avail since 2.10,2.14)
+ 'g_allocator_new', # "use slice allocator" (avail since 2.10,2.14)
+ 'g_async_queue_ref_unlocked', # g_async_queue_ref() (OK since 2.8)
+ 'g_async_queue_unref_and_unlock', # g_async_queue_unref() (OK since 2.8)
+ 'g_atomic_int_exchange_and_add', # since 2.30
+ 'g_basename',
+ 'g_blow_chunks', # "use slice allocator" (avail since 2.10,2.14)
+ 'g_cache_value_foreach', # g_cache_key_foreach()
+ 'g_chunk_free', # g_slice_free (avail since 2.10)
+ 'g_chunk_new', # g_slice_new (avail since 2.10)
+ 'g_chunk_new0', # g_slice_new0 (avail since 2.10)
+ 'g_completion_add_items', # since 2.26
+ 'g_completion_clear_items', # since 2.26
+ 'g_completion_complete', # since 2.26
+ 'g_completion_complete_utf8', # since 2.26
+ 'g_completion_free', # since 2.26
+ 'g_completion_new', # since 2.26
+ 'g_completion_remove_items', # since 2.26
+ 'g_completion_set_compare', # since 2.26
+ 'G_CONST_RETURN', # since 2.26
+ 'g_date_set_time', # g_date_set_time_t (avail since 2.10)
+ 'g_dirname',
+ 'g_format_size_for_display', # since 2.30: use g_format_size()
+ 'G_GNUC_FUNCTION',
+ 'G_GNUC_PRETTY_FUNCTION',
+ 'g_hash_table_freeze',
+ 'g_hash_table_thaw',
+ 'G_HAVE_GINT64',
+ 'g_io_channel_close',
+ 'g_io_channel_read',
+ 'g_io_channel_seek',
+ 'g_io_channel_write',
+ 'g_list_pop_allocator', # "does nothing since 2.10"
+ 'g_list_push_allocator', # "does nothing since 2.10"
+ 'g_main_destroy',
+ 'g_main_is_running',
+ 'g_main_iteration',
+ 'g_main_new',
+ 'g_main_pending',
+ 'g_main_quit',
+ 'g_main_run',
+ 'g_main_set_poll_func',
+ 'g_mapped_file_free', # [as of 2.22: use g_map_file_unref]
+ 'g_mem_chunk_alloc', # "use slice allocator" (avail since 2.10)
+ 'g_mem_chunk_alloc0', # "use slice allocator" (avail since 2.10)
+ 'g_mem_chunk_clean', # "use slice allocator" (avail since 2.10)
+ 'g_mem_chunk_create', # "use slice allocator" (avail since 2.10)
+ 'g_mem_chunk_destroy', # "use slice allocator" (avail since 2.10)
+ 'g_mem_chunk_free', # "use slice allocator" (avail since 2.10)
+ 'g_mem_chunk_info', # "use slice allocator" (avail since 2.10)
+ 'g_mem_chunk_new', # "use slice allocator" (avail since 2.10)
+ 'g_mem_chunk_print', # "use slice allocator" (avail since 2.10)
+ 'g_mem_chunk_reset', # "use slice allocator" (avail since 2.10)
+ 'g_node_pop_allocator', # "does nothing since 2.10"
+ 'g_node_push_allocator', # "does nothing since 2.10"
+ 'g_relation_count', # since 2.26
+ 'g_relation_delete', # since 2.26
+ 'g_relation_destroy', # since 2.26
+ 'g_relation_exists', # since 2.26
+ 'g_relation_index', # since 2.26
+ 'g_relation_insert', # since 2.26
+ 'g_relation_new', # since 2.26
+ 'g_relation_print', # since 2.26
+ 'g_relation_select', # since 2.26
+ 'g_scanner_add_symbol',
+ 'g_scanner_remove_symbol',
+ 'g_scanner_foreach_symbol',
+ 'g_scanner_freeze_symbol_table',
+ 'g_scanner_thaw_symbol_table',
+ 'g_slist_pop_allocator', # "does nothing since 2.10"
+ 'g_slist_push_allocator', # "does nothing since 2.10"
+ 'g_source_get_current_time', # since 2.28: use g_source_get_time()
+ 'g_strcasecmp', #
+ 'g_strdown', #
+ 'g_string_down', #
+ 'g_string_sprintf', # use g_string_printf() instead
+ 'g_string_sprintfa', # use g_string_append_printf instead
+ 'g_string_up', #
+ 'g_strncasecmp', #
+ 'g_strup', #
+ 'g_tree_traverse',
+ 'g_tuples_destroy', # since 2.26
+ 'g_tuples_index', # since 2.26
+ 'g_unicode_canonical_decomposition', # since 2.30: use g_unichar_fully_decompose()
+ 'G_UNICODE_COMBINING_MARK', # since 2.30:use G_UNICODE_SPACING_MARK
+ 'g_value_set_boxed_take_ownership', # GObject
+ 'g_value_set_object_take_ownership', # GObject
+ 'g_value_set_param_take_ownership', # GObject
+ 'g_value_set_string_take_ownership', # Gobject
+ 'G_WIN32_DLLMAIN_FOR_DLL_NAME',
+ 'g_win32_get_package_installation_directory',
+ 'g_win32_get_package_installation_subdirectory',
+ 'qVariantFromValue'
+ ] },
+
+ 'dissectors-prohibited' => { 'count_errors' => 1, 'functions' => [
+ # APIs that make the program exit. Dissectors shouldn't call these.
+ 'abort',
+ 'assert',
+ 'assert_perror',
+ 'exit',
+ 'g_assert',
+ 'g_error',
+ ] },
+
+ 'dissectors-restricted' => { 'count_errors' => 0, 'functions' => [
+ # APIs that print to the terminal. Dissectors shouldn't call these.
+ # FIXME: Explain what to use instead.
+ 'printf',
+ 'g_warning',
+ ] },
+
+);
+
+my @apiGroups = qw(prohibited deprecated soft-deprecated);
+
+# Defines array of pairs function/variable which are excluded
+# from prefs_register_*_preference checks
+my @excludePrefsCheck = (
+ [ qw(prefs_register_password_preference), '(const char **)arg->pref_valptr' ],
+ [ qw(prefs_register_string_preference), '(const char **)arg->pref_valptr' ],
+);
+
+
+# Given a ref to a hash containing "functions" and "functions_count" entries:
+# Determine if any item of the list of APIs contained in the array referenced by "functions"
+# exists in the file.
+# For each API which appears in the file:
+# Push the API onto the provided list;
+# Add the number of times the API appears in the file to the total count
+# for the API (stored as the value of the API key in the hash referenced by "function_counts").
+
+sub findAPIinFile($$$)
+{
+ my ($groupHashRef, $fileContentsRef, $foundAPIsRef) = @_;
+
+ for my $api ( @{$groupHashRef->{functions}} )
+ {
+ my $cnt = 0;
+ # Match function calls, but ignore false positives from:
+ # C++ method definition: int MyClass::open(...)
+ # Method invocation: myClass->open(...);
+ # Function declaration: int open(...);
+ # Method invocation: QString().sprintf(...)
+ while (${$fileContentsRef} =~ m/ \W (?<!::|->|\w\ ) (?<!\.) $api \W* \( /gx)
+ {
+ $cnt += 1;
+ }
+ if ($cnt > 0) {
+ push @{$foundAPIsRef}, $api;
+ $groupHashRef->{function_counts}->{$api} += 1;
+ }
+ }
+}
+
+# APIs which (generally) should not be called with an argument of tvb_get_ptr()
+my @TvbPtrAPIs = (
+ # Use NULL for the value_ptr instead of tvb_get_ptr() (only if the
+ # given offset and length are equal) with these:
+ 'proto_tree_add_bytes_format',
+ 'proto_tree_add_bytes_format_value',
+ 'proto_tree_add_ether',
+ # Use the tvb_* version of these:
+ # Use tvb_bytes_to_str[_punct] instead of:
+ 'bytes_to_str',
+ 'bytes_to_str_punct',
+ 'SET_ADDRESS',
+ 'SET_ADDRESS_HF',
+);
+
+sub checkAPIsCalledWithTvbGetPtr($$$)
+{
+ my ($APIs, $fileContentsRef, $foundAPIsRef) = @_;
+
+ for my $api (@{$APIs}) {
+ my @items;
+ my $cnt = 0;
+
+ @items = (${$fileContentsRef} =~ m/ ($api [^;]* ; ) /xsg);
+ while (@items) {
+ my ($item) = @items;
+ shift @items;
+ if ($item =~ / tvb_get_ptr /xos) {
+ $cnt += 1;
+ }
+ }
+
+ if ($cnt > 0) {
+ push @{$foundAPIsRef}, $api;
+ }
+ }
+}
+
+# List of possible shadow variable (Majority coming from macOS..)
+my @ShadowVariable = (
+ 'index',
+ 'time',
+ 'strlen',
+ 'system'
+);
+
+sub check_shadow_variable($$$)
+{
+ my ($groupHashRef, $fileContentsRef, $foundAPIsRef) = @_;
+
+ for my $api ( @{$groupHashRef} )
+ {
+ my $cnt = 0;
+ while (${$fileContentsRef} =~ m/ \s $api \s*+ [^\(\w] /gx)
+ {
+ $cnt += 1;
+ }
+ if ($cnt > 0) {
+ push @{$foundAPIsRef}, $api;
+ }
+ }
+}
+
+sub check_snprintf_plus_strlen($$)
+{
+ my ($fileContentsRef, $filename) = @_;
+ my @items;
+
+ # This catches both snprintf() and g_snprint.
+ # If we need to do more APIs, we can make this function look more like
+ # checkAPIsCalledWithTvbGetPtr().
+ @items = (${$fileContentsRef} =~ m/ (snprintf [^;]* ; ) /xsg);
+ while (@items) {
+ my ($item) = @items;
+ shift @items;
+ if ($item =~ / strlen\s*\( /xos) {
+ print STDERR "Warning: ".$filename." uses snprintf + strlen to assemble strings.\n";
+ last;
+ }
+ }
+}
+
+#### Regex for use when searching for value-string definitions
+my $StaticRegex = qr/ static \s+ /xs;
+my $ConstRegex = qr/ const \s+ /xs;
+my $Static_andor_ConstRegex = qr/ (?: $StaticRegex $ConstRegex | $StaticRegex | $ConstRegex) /xs;
+my $ValueStringVarnameRegex = qr/ (?:value|val64|string|range|bytes)_string /xs;
+my $ValueStringRegex = qr/ $Static_andor_ConstRegex ($ValueStringVarnameRegex) \ + [^;*#]+ = [^;]+ [{] .+? [}] \s*? ; /xs;
+my $EnumValRegex = qr/ $Static_andor_ConstRegex enum_val_t \ + [^;*]+ = [^;]+ [{] .+? [}] \s*? ; /xs;
+my $NewlineStringRegex = qr/ ["] [^"]* \\n [^"]* ["] /xs;
+
+sub check_value_string_arrays($$$)
+{
+ my ($fileContentsRef, $filename, $debug_flag) = @_;
+ my $cnt = 0;
+ # Brute force check for value_string (and string_string or range_string) arrays
+ # which are missing {0, NULL} as the final (terminating) array entry
+
+ # Assumption: definition is of form (pseudo-Regex):
+ # " (static const|static|const) (value|string|range)_string .+ = { .+ ;"
+ # (possibly over multiple lines)
+ while (${$fileContentsRef} =~ / ( $ValueStringRegex ) /xsog) {
+ # XXX_string array definition found; check if NULL terminated
+ my $vs = my $vsx = $1;
+ my $type = $2;
+ if ($debug_flag) {
+ $vsx =~ / ( .+ $ValueStringVarnameRegex [^=]+ ) = /xo;
+ printf STDERR "==> %-35.35s: %s\n", $filename, $1;
+ printf STDERR "%s\n", $vs;
+ }
+ $vs =~ s{ \s } {}xg;
+
+ # Check for expected trailer
+ my $expectedTrailer;
+ my $trailerHint;
+ if ($type eq "string_string") {
+ # XXX shouldn't we reject 0 since it is gchar*?
+ $expectedTrailer = "(NULL|0), NULL";
+ $trailerHint = "NULL, NULL";
+ } elsif ($type eq "range_string") {
+ $expectedTrailer = "0(x0+)?, 0(x0+)?, NULL";
+ $trailerHint = "0, 0, NULL";
+ } elsif ($type eq "bytes_string") {
+ # XXX shouldn't we reject 0 since it is guint8*?
+ $expectedTrailer = "(NULL|0), 0, NULL";
+ $trailerHint = "NULL, NULL";
+ } else {
+ $expectedTrailer = "0(x?0+)?, NULL";
+ $trailerHint = "0, NULL";
+ }
+ if ($vs !~ / [{] $expectedTrailer [}] ,? [}] ; $/x) {
+ $vsx =~ /( $ValueStringVarnameRegex [^=]+ ) = /xo;
+ printf STDERR "Error: %-35.35s: {%s} is required as the last %s array entry: %s\n", $filename, $trailerHint, $type, $1;
+ $cnt++;
+ }
+
+ if ($vs !~ / (static)? const $ValueStringVarnameRegex /xo) {
+ $vsx =~ /( $ValueStringVarnameRegex [^=]+ ) = /xo;
+ printf STDERR "Error: %-35.35s: Missing 'const': %s\n", $filename, $1;
+ $cnt++;
+ }
+ if ($vs =~ / $NewlineStringRegex /xo && $type ne "bytes_string") {
+ $vsx =~ /( $ValueStringVarnameRegex [^=]+ ) = /xo;
+ printf STDERR "Error: %-35.35s: XXX_string contains a newline: %s\n", $filename, $1;
+ $cnt++;
+ }
+ }
+
+ # Brute force check for enum_val_t arrays which are missing {NULL, NULL, ...}
+ # as the final (terminating) array entry
+ # For now use the same option to turn this and value_string checking on and off.
+ # (Is the option even necessary?)
+
+ # Assumption: definition is of form (pseudo-Regex):
+ # " (static const|static|const) enum_val_t .+ = { .+ ;"
+ # (possibly over multiple lines)
+ while (${$fileContentsRef} =~ / ( $EnumValRegex ) /xsog) {
+ # enum_val_t array definition found; check if NULL terminated
+ my $vs = my $vsx = $1;
+ if ($debug_flag) {
+ $vsx =~ / ( .+ enum_val_t [^=]+ ) = /xo;
+ printf STDERR "==> %-35.35s: %s\n", $filename, $1;
+ printf STDERR "%s\n", $vs;
+ }
+ $vs =~ s{ \s } {}xg;
+ # README.developer says
+ # "Don't put a comma after the last tuple of an initializer of an array"
+ # However: since this usage is present in some number of cases, we'll allow for now
+ if ($vs !~ / NULL, NULL, -?[0-9] [}] ,? [}] ; $/xo) {
+ $vsx =~ /( enum_val_t [^=]+ ) = /xo;
+ printf STDERR "Error: %-35.35s: {NULL, NULL, ...} is required as the last enum_val_t array entry: %s\n", $filename, $1;
+ $cnt++;
+ }
+ if ($vs !~ / (static)? const enum_val_t /xo) {
+ $vsx =~ /( enum_val_t [^=]+ ) = /xo;
+ printf STDERR "Error: %-35.35s: Missing 'const': %s\n", $filename, $1;
+ $cnt++;
+ }
+ if ($vs =~ / $NewlineStringRegex /xo) {
+ $vsx =~ /( (?:value|string|range)_string [^=]+ ) = /xo;
+ printf STDERR "Error: %-35.35s: enum_val_t contains a newline: %s\n", $filename, $1;
+ $cnt++;
+ }
+ }
+
+ return $cnt;
+}
+
+
+sub check_included_files($$)
+{
+ my ($fileContentsRef, $filename) = @_;
+ my @incFiles;
+
+ @incFiles = (${$fileContentsRef} =~ m/\#include \s* ([<"].+[>"])/gox);
+
+ # files in the ui/qt directory should include the ui class includes
+ # by using #include <>
+ # this ensures that Visual Studio picks up these files from the
+ # build directory if we're compiling with cmake
+ if ($filename =~ m#ui/qt/# ) {
+ foreach (@incFiles) {
+ if ( m#"ui_.*\.h"$# ) {
+ # strip the quotes to get the base name
+ # for the error message
+ s/\"//g;
+
+ print STDERR "$filename: ".
+ "Please use #include <$_> ".
+ "instead of #include \"$_\".\n";
+ }
+ }
+ }
+}
+
+
+sub check_proto_tree_add_XXX($$)
+{
+ my ($fileContentsRef, $filename) = @_;
+ my @items;
+ my $errorCount = 0;
+
+ @items = (${$fileContentsRef} =~ m/ (proto_tree_add_[_a-z0-9]+) \( ([^;]*) \) \s* ; /xsg);
+
+ while (@items) {
+ my ($func) = @items;
+ shift @items;
+ my ($args) = @items;
+ shift @items;
+
+ #Check to make sure tvb_get* isn't used to pass into a proto_tree_add_<datatype>, when
+ #proto_tree_add_item could just be used instead
+ if ($args =~ /,\s*tvb_get_/xos) {
+ if (($func =~ m/^proto_tree_add_(time|bytes|ipxnet|ipv4|ipv6|ether|guid|oid|string|boolean|float|double|uint|uint64|int|int64|eui64|bitmask_list_value)$/)
+ ) {
+ print STDERR "Error: ".$filename." uses $func with tvb_get_*. Use proto_tree_add_item instead\n";
+ $errorCount++;
+
+ # Print out the function args to make it easier
+ # to find the offending code. But first make
+ # it readable by eliminating extra white space.
+ $args =~ s/\s+/ /g;
+ print STDERR "\tArgs: " . $args . "\n";
+ }
+ }
+
+ # Remove anything inside parenthesis in the arguments so we
+ # don't get false positives when someone calls
+ # proto_tree_add_XXX(..., tvb_YYY(..., ENC_ZZZ))
+ # and allow there to be newlines inside
+ $args =~ s/\(.*\)//sg;
+
+ #Check for accidental usage of ENC_ parameter
+ if ($args =~ /,\s*ENC_/xos) {
+ if (!($func =~ /proto_tree_add_(time|item|bitmask|[a-z0-9]+_bits_format_value|bits_item|bits_ret_val|item_ret_int|item_ret_uint|bytes_item|checksum)/xos)
+ ) {
+ print STDERR "Error: ".$filename." uses $func with ENC_*.\n";
+ $errorCount++;
+
+ # Print out the function args to make it easier
+ # to find the offending code. But first make
+ # it readable by eliminating extra white space.
+ $args =~ s/\s+/ /g;
+ print STDERR "\tArgs: " . $args . "\n";
+ }
+ }
+ }
+
+ return $errorCount;
+}
+
+
+# Verify that all declared ett_ variables are registered.
+# Don't bother trying to check usage (for now)...
+sub check_ett_registration($$)
+{
+ my ($fileContentsRef, $filename) = @_;
+ my @ett_declarations;
+ my @ett_address_uses;
+ my %ett_uses;
+ my @unUsedEtts;
+ my $errorCount = 0;
+
+ # A pattern to match ett variable names. Obviously this assumes that
+ # they start with `ett_`
+ my $EttVarName = qr{ (?: ett_[a-z0-9_]+ (?:\[[0-9]+\])? ) }xi;
+
+ # Find all the ett_ variables declared in the file
+ @ett_declarations = (${$fileContentsRef} =~ m{
+ ^ # assume declarations are on their own line
+ (?:static\s+)? # some declarations aren't static
+ g?int # could be int or gint
+ \s+
+ ($EttVarName) # variable name
+ \s*=\s*
+ -1\s*;
+ }xgiom);
+
+ if (!@ett_declarations) {
+ # Only complain if the file looks like a dissector
+ #print STDERR "Found no etts in ".$filename."\n" if
+ # (${$fileContentsRef} =~ m{proto_register_field_array}os);
+ return;
+ }
+ #print "Found these etts in ".$filename.": ".join(' ', @ett_declarations)."\n\n";
+
+ # Find all the uses of the *addresses* of ett variables in the file.
+ # (We assume if someone is using the address they're using it to
+ # register the ett.)
+ @ett_address_uses = (${$fileContentsRef} =~ m{
+ &\s*($EttVarName)
+ }xgiom);
+
+ if (!@ett_address_uses) {
+ print STDERR "Found no ett address uses in ".$filename."\n";
+ # Don't treat this as an error.
+ # It's more likely a problem with checkAPIs.
+ return;
+ }
+ #print "Found these etts addresses used in ".$filename.": ".join(' ', @ett_address_uses)."\n\n";
+
+ # Convert to a hash for fast lookup
+ $ett_uses{$_}++ for (@ett_address_uses);
+
+ # Find which declared etts are not used.
+ while (@ett_declarations) {
+ my ($ett_var) = @ett_declarations;
+ shift @ett_declarations;
+
+ push(@unUsedEtts, $ett_var) if (not exists $ett_uses{$ett_var});
+ }
+
+ if (@unUsedEtts) {
+ print STDERR "Error: found these unused ett variables in ".$filename.": ".join(' ', @unUsedEtts)."\n";
+ $errorCount++;
+ }
+
+ return $errorCount;
+}
+
+# Given the file contents and a file name, check all of the hf entries for
+# various problems (such as those checked for in proto.c).
+sub check_hf_entries($$)
+{
+ my ($fileContentsRef, $filename) = @_;
+ my $errorCount = 0;
+
+ my @items;
+ my $hfRegex = qr{
+ \{
+ \s*
+ &\s*([A-Z0-9_\[\]-]+) # &hf
+ \s*,\s*
+ }xis;
+ @items = (${$fileContentsRef} =~ m{
+ $hfRegex # &hf
+ \{\s*
+ ("[A-Z0-9 '\./\(\)_:-]+") # name
+ \s*,\s*
+ (NULL|"[A-Z0-9_\.-]*") # abbrev
+ \s*,\s*
+ (FT_[A-Z0-9_]+) # field type
+ \s*,\s*
+ ([A-Z0-9x\|_\s]+) # display
+ \s*,\s*
+ ([^,]+?) # convert
+ \s*,\s*
+ ([A-Z0-9_]+) # bitmask
+ \s*,\s*
+ (NULL|"[A-Z0-9 '\./\(\)\?_:-]+") # blurb (NULL or a string)
+ \s*,\s*
+ HFILL # HFILL
+ }xgios);
+
+ #print "Found @items items\n";
+ while (@items) {
+ ##my $errorCount_save = $errorCount;
+ my ($hf, $name, $abbrev, $ft, $display, $convert, $bitmask, $blurb) = @items;
+ shift @items; shift @items; shift @items; shift @items; shift @items; shift @items; shift @items; shift @items;
+
+ $display =~ s/\s+//g;
+ $convert =~ s/\s+//g;
+ # GET_VALS_EXTP is a macro in packet-mq.h for packet-mq.c and packet-mq-pcf.c
+ $convert =~ s/\bGET_VALS_EXTP\(/VALS_EXT_PTR\(/;
+
+ #print "name=$name, abbrev=$abbrev, ft=$ft, display=$display, convert=>$convert<, bitmask=$bitmask, blurb=$blurb\n";
+
+ if ($abbrev eq '""' || $abbrev eq "NULL") {
+ print STDERR "Error: $hf does not have an abbreviation in $filename\n";
+ $errorCount++;
+ }
+ if ($abbrev =~ m/\.\.+/) {
+ print STDERR "Error: the abbreviation for $hf ($abbrev) contains two or more sequential periods in $filename\n";
+ $errorCount++;
+ }
+ if ($name eq $abbrev) {
+ print STDERR "Error: the abbreviation for $hf ($abbrev) matches the field name ($name) in $filename\n";
+ $errorCount++;
+ }
+ if (lc($name) eq lc($blurb)) {
+ print STDERR "Error: the blurb for $hf ($blurb) matches the field name ($name) in $filename\n";
+ $errorCount++;
+ }
+ if ($name =~ m/"\s+/) {
+ print STDERR "Error: the name for $hf ($name) has leading space in $filename\n";
+ $errorCount++;
+ }
+ if ($name =~ m/\s+"/) {
+ print STDERR "Error: the name for $hf ($name) has trailing space in $filename\n";
+ $errorCount++;
+ }
+ if ($blurb =~ m/"\s+/) {
+ print STDERR "Error: the blurb for $hf ($blurb) has leading space in $filename\n";
+ $errorCount++;
+ }
+ if ($blurb =~ m/\s+"/) {
+ print STDERR "Error: the blurb for $hf ($blurb) has trailing space in $filename\n";
+ $errorCount++;
+ }
+ if ($abbrev =~ m/\s+/) {
+ print STDERR "Error: the abbreviation for $hf ($abbrev) has white space in $filename\n";
+ $errorCount++;
+ }
+ if ("\"".$hf ."\"" eq $name) {
+ print STDERR "Error: name is the hf_variable_name in field $name ($abbrev) in $filename\n";
+ $errorCount++;
+ }
+ if ("\"".$hf ."\"" eq $abbrev) {
+ print STDERR "Error: abbreviation is the hf_variable_name in field $name ($abbrev) in $filename\n";
+ $errorCount++;
+ }
+ if ($ft ne "FT_BOOLEAN" && $convert =~ m/^TFS\(.*\)/) {
+ print STDERR "Error: $hf uses a true/false string but is an $ft instead of FT_BOOLEAN in $filename\n";
+ $errorCount++;
+ }
+ if ($ft eq "FT_BOOLEAN" && $convert =~ m/^VALS\(.*\)/) {
+ print STDERR "Error: $hf uses a value_string but is an FT_BOOLEAN in $filename\n";
+ $errorCount++;
+ }
+ if (($ft eq "FT_BOOLEAN") && ($bitmask !~ /^(0x)?0+$/) && ($display =~ /^BASE_/)) {
+ print STDERR "Error: $hf: FT_BOOLEAN with a bitmask must specify a 'parent field width' for 'display' in $filename\n";
+ $errorCount++;
+ }
+ if (($ft eq "FT_BOOLEAN") && ($convert !~ m/^((0[xX]0?)?0$|NULL$|TFS)/)) {
+ print STDERR "Error: $hf: FT_BOOLEAN with non-null 'convert' field missing TFS in $filename\n";
+ $errorCount++;
+ }
+ if ($convert =~ m/RVALS/ && $display !~ m/BASE_RANGE_STRING/) {
+ print STDERR "Error: $hf uses RVALS but 'display' does not include BASE_RANGE_STRING in $filename\n";
+ $errorCount++;
+ }
+ if ($convert =~ m/VALS64/ && $display !~ m/BASE_VAL64_STRING/) {
+ print STDERR "Error: $hf uses VALS64 but 'display' does not include BASE_VAL64_STRING in $filename\n";
+ $errorCount++;
+ }
+ if ($display =~ /BASE_EXT_STRING/ && $convert !~ /^(VALS_EXT_PTR\(|&)/) {
+ print STDERR "Error: $hf: BASE_EXT_STRING should use VALS_EXT_PTR for 'strings' instead of '$convert' in $filename\n";
+ $errorCount++;
+ }
+ if ($ft =~ m/^FT_U?INT(8|16|24|32)$/ && $convert =~ m/^VALS64\(/) {
+ print STDERR "Error: $hf: 32-bit field must use VALS instead of VALS64 in $filename\n";
+ $errorCount++;
+ }
+ if ($ft =~ m/^FT_U?INT(40|48|56|64)$/ && $convert =~ m/^VALS\(/) {
+ print STDERR "Error: $hf: 64-bit field must use VALS64 instead of VALS in $filename\n";
+ $errorCount++;
+ }
+ if ($convert =~ m/^(VALS|VALS64|RVALS)\(&.*\)/) {
+ print STDERR "Error: $hf is passing the address of a pointer to $1 in $filename\n";
+ $errorCount++;
+ }
+ if ($convert !~ m/^((0[xX]0?)?0$|NULL$|VALS|VALS64|VALS_EXT_PTR|RVALS|TFS|CF_FUNC|FRAMENUM_TYPE|&|STRINGS_ENTERPRISES)/ && $display !~ /BASE_CUSTOM/) {
+ print STDERR "Error: non-null $hf 'convert' field missing 'VALS|VALS64|RVALS|TFS|CF_FUNC|FRAMENUM_TYPE|&|STRINGS_ENTERPRISES' in $filename ?\n";
+ $errorCount++;
+ }
+## Benign...
+## if (($ft eq "FT_BOOLEAN") && ($bitmask =~ /^(0x)?0+$/) && ($display ne "BASE_NONE")) {
+## print STDERR "Error: $abbrev: FT_BOOLEAN with no bitmask must use BASE_NONE for 'display' in $filename\n";
+## $errorCount++;
+## }
+ ##if ($errorCount != $errorCount_save) {
+ ## print STDERR "name=$name, abbrev=$abbrev, ft=$ft, display=$display, convert=>$convert<, bitmask=$bitmask, blurb=$blurb\n";
+ ##}
+
+ }
+
+ return $errorCount;
+}
+
+sub check_pref_var_dupes($$)
+{
+ my ($filecontentsref, $filename) = @_;
+ my $errorcount = 0;
+
+ # Avoid flagging the actual prototypes
+ return 0 if $filename =~ /prefs\.[ch]$/;
+
+ # remove macro lines
+ my $filecontents = ${$filecontentsref};
+ $filecontents =~ s { ^\s*\#.*$} []xogm;
+
+ # At what position is the variable in the prefs_register_*_preference() call?
+ my %prefs_register_var_pos = (
+ static_text => undef, obsolete => undef, # ignore
+ decode_as_range => -2, range => -2, filename => -2, # second to last
+ enum => -3, # third to last
+ # everything else is the last argument
+ );
+
+ my @dupes;
+ my %count;
+ while ($filecontents =~ /prefs_register_(\w+?)_preference/gs) {
+ my ($func) = "prefs_register_$1_preference";
+ my ($args) = extract_bracketed(substr($filecontents, $+[0]), '()');
+ $args = substr($args, 1, -1); # strip parens
+
+ my $pos = $prefs_register_var_pos{$1};
+ next if exists $prefs_register_var_pos{$1} and not defined $pos;
+ $pos //= -1;
+ my $var = (split /\s*,\s*(?![^(]*\))/, $args)[$pos]; # only commas outside parens
+
+ my $ignore = 0;
+ for my $row (@excludePrefsCheck) {
+ my ($rfunc, $rvar) = @$row;
+ if (($rfunc eq $func) && ($rvar eq $var)) {
+ $ignore = 1
+ }
+ }
+ if (!$ignore) {
+ push @dupes, $var if $count{$var}++ == 1;
+ }
+ }
+
+ if (@dupes) {
+ print STDERR "$filename: error: found these preference variables used in more than one prefs_register_*_preference:\n\t".join(', ', @dupes)."\n";
+ $errorcount++;
+ }
+
+ return $errorcount;
+}
+
+# Check for forbidden control flow changes, see epan/exceptions.h
+sub check_try_catch($$)
+{
+ my ($fileContentsRef, $filename) = @_;
+ my $errorCount = 0;
+
+ # Match TRY { ... } ENDTRY (with an optional '\' in case of a macro).
+ my @items = (${$fileContentsRef} =~ m/ \bTRY\s*\{ (.+?) \}\s* \\? \s*ENDTRY\b /xsg);
+ for my $block (@items) {
+ if ($block =~ m/ \breturn\b /x) {
+ print STDERR "Error: return is forbidden in TRY/CATCH in $filename\n";
+ $errorCount++;
+ }
+
+ my @gotoLabels = $block =~ m/ \bgoto\s+ (\w+) /xsg;
+ my %seen = ();
+ for my $gotoLabel (@gotoLabels) {
+ if ($seen{$gotoLabel}) {
+ next;
+ }
+ $seen{$gotoLabel} = 1;
+
+ if ($block !~ /^ \s* $gotoLabel \s* :/xsgm) {
+ print STDERR "Error: goto to label '$gotoLabel' outside TRY/CATCH is forbidden in $filename\n";
+ $errorCount++;
+ }
+ }
+ }
+
+ return $errorCount;
+}
+
+sub print_usage
+{
+ print "Usage: checkAPIs.pl [-M] [-h] [-g group1[:count]] [-g group2] ... \n";
+ print " [-summary-group group1] [-summary-group group2] ... \n";
+ print " [--sourcedir=srcdir] \n";
+ print " [--nocheck-hf]\n";
+ print " [--nocheck-value-string-array] \n";
+ print " [--nocheck-shadow]\n";
+ print " [--debug]\n";
+ print " [--file=/path/to/file_list]\n";
+ print " file1 file2 ...\n";
+ print "\n";
+ print " -M: Generate output for -g in 'machine-readable' format\n";
+ print " -p: used by the git pre-commit hook\n";
+ print " -h: help, print usage message\n";
+ print " -g <group>: Check input files for use of APIs in <group>\n";
+ print " (in addition to the default groups)\n";
+ print " Maximum uses can be specified with <group>:<count>\n";
+ print " -summary-group <group>: Output summary (count) for each API in <group>\n";
+ print " (-g <group> also req'd)\n";
+ print " --nocheck-hf: Skip header field definition checks\n";
+ print " --nocheck-value-string-array: Skip value string array checks\n";
+ print " --nocheck-shadow: Skip shadow variable checks\n";
+ print " --debug: UNDOCUMENTED\n";
+ print "\n";
+ print " Default Groups[-g]: ", join (", ", sort @apiGroups), "\n";
+ print " Available Groups: ", join (", ", sort keys %APIs), "\n";
+}
+
+# -------------
+# action: remove '#if 0'd code from the input string
+# args codeRef, fileName
+# returns: codeRef
+#
+# Essentially: split the input into blocks of code or lines of #if/#if 0/etc.
+# Remove blocks that follow '#if 0' until '#else/#endif' is found.
+
+{ # block begin
+my $debug = 0;
+
+ sub remove_if0_code {
+ my ($codeRef, $fileName) = @_;
+
+ # Preprocess output (ensure trailing LF and no leading WS before '#')
+ $$codeRef =~ s/^\s*#/#/m;
+ if ($$codeRef !~ /\n$/) { $$codeRef .= "\n"; }
+
+ # Split into blocks of normal code or lines with conditionals.
+ my $ifRegExp = qr/if 0|if|else|endif/;
+ my @blocks = split(/^(#\s*(?:$ifRegExp).*\n)/m, $$codeRef);
+
+ my ($if_lvl, $if0_lvl, $if0) = (0,0,0);
+ my $lines = '';
+ for my $block (@blocks) {
+ my $if;
+ if ($block =~ /^#\s*($ifRegExp)/) {
+ # #if/#if 0/#else/#endif processing
+ $if = $1;
+ if ($debug == 99) {
+ print(STDERR "if0=$if0 if0_lvl=$if0_lvl lvl=$if_lvl [$if] - $block");
+ }
+ if ($if eq 'if') {
+ $if_lvl += 1;
+ } elsif ($if eq 'if 0') {
+ $if_lvl += 1;
+ if ($if0_lvl == 0) {
+ $if0_lvl = $if_lvl;
+ $if0 = 1; # inside #if 0
+ }
+ } elsif ($if eq 'else') {
+ if ($if0_lvl == $if_lvl) {
+ $if0 = 0;
+ }
+ } elsif ($if eq 'endif') {
+ if ($if0_lvl == $if_lvl) {
+ $if0 = 0;
+ $if0_lvl = 0;
+ }
+ $if_lvl -= 1;
+ if ($if_lvl < 0) {
+ die "patsub: #if/#endif mismatch in $fileName"
+ }
+ }
+ }
+
+ if ($debug == 99) {
+ print(STDERR "if0=$if0 if0_lvl=$if0_lvl lvl=$if_lvl\n");
+ }
+ # Keep preprocessor lines and blocks that are not enclosed in #if 0
+ if ($if or $if0 != 1) {
+ $lines .= $block;
+ }
+ }
+ $$codeRef = $lines;
+
+ ($debug == 2) && print "==> After Remove if0: code: [$fileName]\n$$codeRef\n===<\n";
+ return $codeRef;
+ }
+} # block end
+
+# The below Regexp are based on those from:
+# https://web.archive.org/web/20080614012925/http://aspn.activestate.com/ASPN/Cookbook/Rx/Recipe/59811
+# They are in the public domain.
+
+# 2. A regex which matches double-quoted strings.
+# ?s added so that strings containing a 'line continuation'
+# ( \ followed by a new-line) will match.
+my $DoubleQuotedStr = qr{ (?: ["] (?s: \\. | [^\"\\])* ["]) }x;
+
+# 3. A regex which matches single-quoted strings.
+my $SingleQuotedStr = qr{ (?: \' (?: \\. | [^\'\\])* [']) }x;
+
+#
+# MAIN
+#
+my $errorCount = 0;
+
+# The default list, which can be expanded.
+my @apiSummaryGroups = ();
+my $machine_readable_output = 0; # default: disabled
+my $check_hf = 1; # default: enabled
+my $check_value_string_array= 1; # default: enabled
+my $check_shadow = 1; # default: enabled
+my $debug_flag = 0; # default: disabled
+my $source_dir = "";
+my $filenamelist = "";
+my $help_flag = 0;
+my $pre_commit = 0;
+
+my $result = GetOptions(
+ 'group=s' => \@apiGroups,
+ 'summary-group=s' => \@apiSummaryGroups,
+ 'Machine-readable' => \$machine_readable_output,
+ 'check-hf!' => \$check_hf,
+ 'check-value-string-array!' => \$check_value_string_array,
+ 'check-shadow!' => \$check_shadow,
+ 'sourcedir=s' => \$source_dir,
+ 'debug' => \$debug_flag,
+ 'pre-commit' => \$pre_commit,
+ 'file=s' => \$filenamelist,
+ 'help' => \$help_flag
+ );
+if (!$result || $help_flag) {
+ print_usage();
+ exit(1);
+}
+
+# the pre-commit hook only calls checkAPIs one file at a time, so this
+# is safe to do globally (and easier)
+if ($pre_commit) {
+ my $filename = $ARGV[0];
+ # if the filename is packet-*.c or packet-*.h, then we set the abort and termoutput groups.
+ if ($filename =~ /\bpacket-[^\/\\]+\.[ch]$/) {
+ push @apiGroups, "abort";
+ push @apiGroups, "termoutput";
+ }
+}
+
+# Add a 'function_count' anonymous hash to each of the 'apiGroup' entries in the %APIs hash.
+for my $apiGroup (keys %APIs) {
+ my @functions = @{$APIs{$apiGroup}{functions}};
+
+ $APIs{$apiGroup}->{function_counts} = {};
+ @{$APIs{$apiGroup}->{function_counts}}{@functions} = (); # Add fcn names as keys to the anonymous hash
+ $APIs{$apiGroup}->{max_function_count} = -1;
+ if ($APIs{$apiGroup}->{count_errors}) {
+ $APIs{$apiGroup}->{max_function_count} = 0;
+ }
+ $APIs{$apiGroup}->{cur_function_count} = 0;
+}
+
+my @filelist;
+push @filelist, @ARGV;
+if ("$filenamelist" ne "") {
+ # We have a file containing a list of files to check (possibly in
+ # addition to those on the command line).
+ open(FC, $filenamelist) || die("Couldn't open $filenamelist");
+
+ while (<FC>) {
+ # file names can be separated by ;
+ push @filelist, split(';');
+ }
+ close(FC);
+}
+
+die "no files to process" unless (scalar @filelist);
+
+# Read through the files; do various checks
+while ($_ = pop @filelist)
+{
+ my $filename = $_;
+ my $fileContents = '';
+ my @foundAPIs = ();
+ my $line;
+
+ if ($source_dir and ! -e $filename) {
+ $filename = $source_dir . '/' . $filename;
+ }
+ if (! -e $filename) {
+ warn "No such file: \"$filename\"";
+ next;
+ }
+
+ # delete leading './'
+ $filename =~ s{ ^ \. / } {}xo;
+ unless (-f $filename) {
+ print STDERR "Warning: $filename is not of type file - skipping.\n";
+ next;
+ }
+
+ # Read in the file (ouch, but it's easier that way)
+ open(FC, $filename) || die("Couldn't open $filename");
+ $line = 1;
+ while (<FC>) {
+ $fileContents .= $_;
+ eval { decode( 'UTF-8', $_, Encode::FB_CROAK ) };
+ if ($EVAL_ERROR) {
+ print STDERR "Error: Found an invalid UTF-8 sequence on line " .$line. " of " .$filename."\n";
+ $errorCount++;
+ }
+ $line++;
+ }
+ close(FC);
+
+ if (($fileContents =~ m{ \$Id .* \$ }xo))
+ {
+ print STDERR "Warning: ".$filename." has an SVN Id tag. Please remove it!\n";
+ }
+
+ if (($fileContents =~ m{ tab-width:\s*[0-7|9]+ | tabstop=[0-7|9]+ | tabSize=[0-7|9]+ }xo))
+ {
+ # To quote Icf0831717de10fc615971fa1cf75af2f1ea2d03d :
+ # HT tab stops are set every 8 spaces on UN*X; UN*X tools that treat an HT character
+ # as tabbing to 4-space tab stops, or that even are configurable but *default* to
+ # 4-space tab stops (I'm looking at *you*, Xcode!) are broken. tab-width: 4,
+ # tabstop=4, and tabSize=4 are errors if you ever expect anybody to look at your file
+ # with a UN*X tool, and every text file will probably be looked at by a UN*X tool at
+ # some point, so Don't Do That.
+ #
+ # Can I get an "amen!"?
+ print STDERR "Error: Found modelines with tabstops set to something other than 8 in " .$filename."\n";
+ $errorCount++;
+ }
+
+ # Remove C/C++ comments
+ # The below pattern is modified (to keep newlines at the end of C++-style comments) from that at:
+ # https://perldoc.perl.org/perlfaq6.html#How-do-I-use-a-regular-expression-to-strip-C-style-comments-from-a-file?
+ $fileContents =~ s#/\*[^*]*\*+([^/*][^*]*\*+)*/|//([^\\]|[^\n][\n]?)*?\n|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^/"'\\]*)#defined $3 ? $3 : "\n"#gse;
+
+ # optionally check the hf entries (including those under #if 0)
+ if ($check_hf) {
+ $errorCount += check_hf_entries(\$fileContents, $filename);
+ }
+
+ if ($fileContents =~ m{ %\d*?ll }dxo)
+ {
+ # use PRI[dux...]N instead of ll
+ print STDERR "Error: Found %ll in " .$filename."\n";
+ $errorCount++;
+ }
+
+ if ($fileContents =~ m{ %hh }xo)
+ {
+ # %hh is C99 and Windows doesn't like it:
+ # http://connect.microsoft.com/VisualStudio/feedback/details/416843/sscanf-cannot-not-handle-hhd-format
+ # Need to use temporary variables instead.
+ print STDERR "Error: Found %hh in " .$filename."\n";
+ $errorCount++;
+ }
+
+ # check for files that we should not include directly
+ # this must be done before quoted strings (#include "file.h") are removed
+ check_included_files(\$fileContents, $filename);
+
+ # Check for value_string and enum_val_t errors: NULL termination,
+ # const-nes, and newlines within strings
+ if ($check_value_string_array) {
+ $errorCount += check_value_string_arrays(\$fileContents, $filename, $debug_flag);
+ }
+
+ # Remove all the quoted strings
+ $fileContents =~ s{ $DoubleQuotedStr | $SingleQuotedStr } []xog;
+
+ $errorCount += check_pref_var_dupes(\$fileContents, $filename);
+
+ # Remove all blank lines
+ $fileContents =~ s{ ^ \s* $ } []xog;
+
+ # Remove all '#if 0'd' code
+ remove_if0_code(\$fileContents, $filename);
+
+ $errorCount += check_ett_registration(\$fileContents, $filename);
+
+ #checkAPIsCalledWithTvbGetPtr(\@TvbPtrAPIs, \$fileContents, \@foundAPIs);
+ #if (@foundAPIs) {
+ # print STDERR "Found APIs with embedded tvb_get_ptr() calls in ".$filename." : ".join(',', @foundAPIs)."\n"
+ #}
+
+ if ($check_shadow) {
+ check_shadow_variable(\@ShadowVariable, \$fileContents, \@foundAPIs);
+ if (@foundAPIs) {
+ print STDERR "Warning: Found shadow variable(s) in ".$filename." : ".join(',', @foundAPIs)."\n"
+ }
+ }
+
+
+ check_snprintf_plus_strlen(\$fileContents, $filename);
+
+ $errorCount += check_proto_tree_add_XXX(\$fileContents, $filename);
+
+ $errorCount += check_try_catch(\$fileContents, $filename);
+
+
+ # Check and count APIs
+ for my $groupArg (@apiGroups) {
+ my $pfx = "Warning";
+ @foundAPIs = ();
+ my @groupParts = split(/:/, $groupArg);
+ my $apiGroup = $groupParts[0];
+ my $curFuncCount = 0;
+
+ if (scalar @groupParts > 1) {
+ $APIs{$apiGroup}->{max_function_count} = $groupParts[1];
+ }
+
+ findAPIinFile($APIs{$apiGroup}, \$fileContents, \@foundAPIs);
+
+ for my $api (keys %{$APIs{$apiGroup}->{function_counts}} ) {
+ $curFuncCount += $APIs{$apiGroup}{function_counts}{$api};
+ }
+
+ # If we have a max function count and we've exceeded it, treat it
+ # as an error.
+ if (!$APIs{$apiGroup}->{count_errors} && $APIs{$apiGroup}->{max_function_count} >= 0) {
+ if ($curFuncCount > $APIs{$apiGroup}->{max_function_count}) {
+ print STDERR $pfx . ": " . $apiGroup . " exceeds maximum function count: " . $APIs{$apiGroup}->{max_function_count} . "\n";
+ $APIs{$apiGroup}->{count_errors} = 1;
+ }
+ }
+
+ if ($curFuncCount <= $APIs{$apiGroup}->{max_function_count}) {
+ next;
+ }
+
+ if ($APIs{$apiGroup}->{count_errors}) {
+ # the use of "prohibited" APIs is an error, increment the error count
+ $errorCount += @foundAPIs;
+ $pfx = "Error";
+ }
+
+ if (@foundAPIs && ! $machine_readable_output) {
+ print STDERR $pfx . ": Found " . $apiGroup . " APIs in ".$filename.": ".join(',', @foundAPIs)."\n";
+ }
+ if (@foundAPIs && $machine_readable_output) {
+ for my $api (@foundAPIs) {
+ printf STDERR "%-8.8s %-20.20s %-30.30s %-45.45s\n", $pfx, $apiGroup, $filename, $api;
+ }
+ }
+ }
+}
+
+# Summary: Print Use Counts of each API in each requested summary group
+
+if (scalar @apiSummaryGroups > 0) {
+ my $fileline = join(", ", @ARGV);
+ printf "\nSummary for " . substr($fileline, 0, 65) . "…\n";
+
+ for my $apiGroup (@apiSummaryGroups) {
+ printf "\nUse counts for %s (maximum allowed total is %d)\n", $apiGroup, $APIs{$apiGroup}->{max_function_count};
+ for my $api (sort {"\L$a" cmp "\L$b"} (keys %{$APIs{$apiGroup}->{function_counts}} )) {
+ if ($APIs{$apiGroup}{function_counts}{$api} < 1) { next; }
+ printf "%5d %-40.40s\n", $APIs{$apiGroup}{function_counts}{$api}, $api;
+ }
+ }
+}
+
+exit($errorCount > 120 ? 120 : $errorCount);
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 8
+# tab-width: 8
+# indent-tabs-mode: nil
+# End:
+#
+# vi: set shiftwidth=8 tabstop=8 expandtab:
+# :indentSize=8:tabSize=8:noTabs=true:
+#
diff --git a/tools/check_dissector.py b/tools/check_dissector.py
new file mode 100755
index 0000000..af1dc64
--- /dev/null
+++ b/tools/check_dissector.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python3
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import sys
+import os
+import signal
+import argparse
+
+# Run battery of tests on one or more dissectors.
+
+# For text colouring/highlighting.
+class bcolors:
+ HEADER = '\033[95m'
+ OKBLUE = '\033[94m'
+ OKGREEN = '\033[92m'
+ ADDED = '\033[45m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+ BOLD = '\033[1m'
+ UNDERLINE = '\033[4m'
+
+
+# Try to exit soon after Ctrl-C is pressed.
+should_exit = False
+
+def signal_handler(sig, frame):
+ global should_exit
+ should_exit = True
+ print('You pressed Ctrl+C - exiting')
+
+signal.signal(signal.SIGINT, signal_handler)
+
+# Command-line args
+parser = argparse.ArgumentParser(description="Run gamut of tests on dissector(s)")
+parser.add_argument('--file', action='append',
+ help='specify individual dissector file to test')
+parser.add_argument('--file-list', action='store',
+ help='file with list of dissectors')
+parser.add_argument('--build-folder', action='store',
+ help='build folder')
+
+args = parser.parse_args()
+
+if not args.file and not args.file_list:
+ print('Need to specify --file or --file-list')
+ exit(1)
+
+# TODO: verify build-folder if set.
+
+# Get list of files to check.
+dissectors = []
+
+# Individually-selected files
+if args.file:
+ for f in args.file:
+ if not os.path.isfile(f):
+ print('Chosen file', f, 'does not exist.')
+ exit(1)
+ else:
+ dissectors.append(f)
+
+# List of dissectors stored in a file
+if args.file_list:
+ if not os.path.isfile(args.file_list):
+ print('Dissector-list file', args.file_list, 'does not exist.')
+ exit(1)
+ else:
+ with open(args.file_list, 'r') as f:
+ contents = f.read().splitlines()
+ for f in contents:
+ if not os.path.isfile(f):
+ print('Chosen file', f, 'does not exist.')
+ exit(1)
+ else:
+ dissectors.append(f)
+
+# Tools that should be run on selected files.
+# Boolean arg is for whether build-dir is needed in order to run it.
+# 3rd is Windows support.
+tools = [
+ ('tools/delete_includes.py --folder .', True, True),
+ ('tools/check_spelling.py', False, True),
+ ('tools/check_tfs.py --check-value-strings', False, True),
+ ('tools/check_typed_item_calls.py --all-checks', False, True),
+ ('tools/check_static.py', True, False),
+ ('tools/check_dissector_urls.py', False, True),
+ ('tools/check_val_to_str.py', False, True),
+ ('tools/cppcheck/cppcheck.sh', False, True),
+ ('tools/checkhf.pl', False, True),
+ ('tools/checkAPIs.pl', False, True),
+ ('tools/fix-encoding-args.pl', False, True),
+ ('tools/checkfiltername.pl', False, True)
+]
+
+
+def run_check(tool, dissectors, python):
+ # Create command-line with all dissectors included
+ command = ''
+
+ # Don't trust shebang on windows.
+ if sys.platform.startswith('win'):
+ if python:
+ command += 'python.exe '
+ else:
+ command += 'perl.exe '
+
+ command += tool[0]
+ if tool[1]:
+ command += ' --build-folder ' + args.build_folder
+
+ for d in dissectors:
+ # Add this dissector file to command-line args
+ command += ((' --file' if python else '') + ' ' + d)
+
+ # Run it
+ print(bcolors.BOLD + command + bcolors.ENDC)
+ os.system(command)
+
+
+# Run all checks on all of my dissectors.
+for tool in tools:
+ if should_exit:
+ exit(1)
+ if ((not sys.platform.startswith('win') or tool[2]) and # Supported on this platform?
+ (not tool[1] or (tool[1] and args.build_folder))): # Have --build-folder if needed?
+
+ # Run it.
+ run_check(tool, dissectors, tool[0].find('.py') != -1)
diff --git a/tools/check_dissector_urls.py b/tools/check_dissector_urls.py
new file mode 100755
index 0000000..373d88b
--- /dev/null
+++ b/tools/check_dissector_urls.py
@@ -0,0 +1,291 @@
+#!/usr/bin/env python3
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import argparse
+import aiohttp
+import asyncio
+import os
+import re
+import shutil
+import signal
+import subprocess
+
+# This utility scans the dissector code for URLs, then attempts to
+# fetch the links. The results are shown in stdout, but also, at
+# the end of the run, written to files:
+# - URLs that couldn't be loaded are written to failures.txt
+# - working URLs are written to successes.txt
+# - any previous failures.txt is also copied to failures_last_run.txt
+#
+# N.B. preferred form of RFC link is e.g., https://tools.ietf.org/html/rfc4349
+
+
+# TODO:
+# - option to write back to dissector file when there is a failure?
+# - optionally parse previous/recent successes.txt and avoid fetching them again?
+# - make sure URLs are really within comments in code?
+# - use urllib.parse or similar to better check URLs?
+# - improve regex to allow '+' in URL (like confluence uses)
+
+# Try to exit soon after Ctrl-C is pressed.
+should_exit = False
+
+
+def signal_handler(sig, frame):
+ global should_exit
+ should_exit = True
+ print('You pressed Ctrl+C - exiting')
+ try:
+ tasks = asyncio.all_tasks()
+ except (RuntimeError):
+ # we haven't yet started the async link checking, we can exit directly
+ exit(1)
+ # ignore further SIGINTs while we're cancelling the running tasks
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ for t in tasks:
+ t.cancel()
+
+signal.signal(signal.SIGINT, signal_handler)
+
+
+class FailedLookup:
+
+ def __init__(self):
+ # Fake values that will be queried (for a requests.get() return value)
+ self.status = 0
+ self.headers = {}
+ self.headers['content-type'] = '<NONE>'
+
+ def __str__(self):
+ s = ('FailedLookup: status=' + str(self.status) +
+ ' content-type=' + self.headers['content-type'])
+ return s
+
+
+# Dictionary from url -> result
+cached_lookups = {}
+
+
+class Link(object):
+
+ def __init__(self, file, line_number, url):
+ self.file = file
+ self.line_number = line_number
+ self.url = url
+ self.tested = False
+ self.r = None
+ self.success = False
+
+ def __str__(self):
+ epan_idx = self.file.find('epan')
+ if epan_idx == -1:
+ filename = self.file
+ else:
+ filename = self.file[epan_idx:]
+ s = ('SUCCESS ' if self.success else 'FAILED ') + \
+ filename + ':' + str(self.line_number) + ' ' + self.url
+ if True: # self.r:
+ if self.r.status:
+ s += " status-code=" + str(self.r.status)
+ if 'content-type' in self.r.headers:
+ s += (' content-type="' +
+ self.r.headers['content-type'] + '"')
+ else:
+ s += ' <No response Received>'
+ return s
+
+ def validate(self):
+ global cached_lookups
+ global should_exit
+ if should_exit:
+ return
+ self.tested = True
+ if self.url in cached_lookups:
+ self.r = cached_lookups[self.url]
+ else:
+ self.r = FailedLookup()
+
+ if self.r.status < 200 or self.r.status >= 300:
+ self.success = False
+ else:
+ self.success = True
+
+ if (args.verbose or not self.success) and not should_exit:
+ print(self)
+
+links = []
+files = []
+all_urls = set()
+
+def find_links_in_file(filename):
+ with open(filename, 'r', encoding="utf8") as f:
+ for line_number, line in enumerate(f, start=1):
+ # TODO: not matching
+ # https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+ urls = re.findall(
+ r'https?://(?:[a-zA-Z0-9./_?&=-]+|%[0-9a-fA-F]{2})+', line)
+
+ for url in urls:
+ # Lop off any trailing chars that are not part of it
+ url = url.rstrip(").',")
+
+ # A url must have a period somewhere
+ if '.' not in url:
+ continue
+ global links, all_urls
+ links.append(Link(filename, line_number, url))
+ all_urls.add(url)
+
+
+# Scan the given folder for links to test.
+def find_links_in_folder(folder):
+ # Look at files in sorted order, to give some idea of how far through it
+ # is.
+ for filename in sorted(os.listdir(folder)):
+ if filename.endswith('.c'):
+ global links
+ find_links_in_file(os.path.join(folder, filename))
+
+
+async def populate_cache(sem, session, url):
+ global cached_lookups
+ if should_exit:
+ return
+ async with sem:
+ try:
+ async with session.get(url) as r:
+ cached_lookups[url] = r
+ if args.verbose:
+ print('checking ', url, ': success', sep='')
+
+ except (asyncio.CancelledError, ValueError, ConnectionError, Exception):
+ cached_lookups[url] = FailedLookup()
+ if args.verbose:
+ print('checking ', url, ': failed', sep='')
+
+
+async def check_all_links(links):
+ sem = asyncio.Semaphore(50)
+ timeout = aiohttp.ClientTimeout(total=25)
+ connector = aiohttp.TCPConnector(limit=30)
+ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}
+ async with aiohttp.ClientSession(connector=connector, headers=headers, timeout=timeout) as session:
+ tasks = [populate_cache(sem, session, u) for u in all_urls]
+ try:
+ await asyncio.gather(*tasks)
+ except (asyncio.CancelledError):
+ await session.close()
+
+ for l in links:
+ l.validate()
+
+
+#################################################################
+# Main logic.
+
+# command-line args. Controls which dissector files should be scanned.
+# If no args given, will just scan epan/dissectors folder.
+parser = argparse.ArgumentParser(description='Check URL links in dissectors')
+parser.add_argument('--file', action='append',
+ help='specify individual dissector file to test')
+parser.add_argument('--commits', action='store',
+ help='last N commits to check')
+parser.add_argument('--open', action='store_true',
+ help='check open files')
+parser.add_argument('--verbose', action='store_true',
+ help='when enabled, show more output')
+
+args = parser.parse_args()
+
+
+def is_dissector_file(filename):
+ p = re.compile(r'epan/dissectors/packet-.*\.c')
+ return p.match(filename)
+
+
+# Get files from wherever command-line args indicate.
+if args.file:
+ # Add specified file(s)
+ for f in args.file:
+ if not f.startswith('epan'):
+ f = os.path.join('epan', 'dissectors', f)
+ if not os.path.isfile(f):
+ print('Chosen file', f, 'does not exist.')
+ exit(1)
+ else:
+ files.append(f)
+ find_links_in_file(f)
+elif args.commits:
+ # Get files affected by specified number of commits.
+ command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits]
+ files = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Fetch links from files (dissectors files only)
+ files = list(filter(is_dissector_file, files))
+ for f in files:
+ find_links_in_file(f)
+elif args.open:
+ # Unstaged changes.
+ command = ['git', 'diff', '--name-only']
+ files = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ files = list(filter(is_dissector_file, files))
+ # Staged changes.
+ command = ['git', 'diff', '--staged', '--name-only']
+ files_staged = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ files_staged = list(filter(is_dissector_file, files_staged))
+ for f in files:
+ find_links_in_file(f)
+ for f in files_staged:
+ if f not in files:
+ find_links_in_file(f)
+ files.append(f)
+else:
+ # Find links from dissector folder.
+ find_links_in_folder(os.path.join(os.path.dirname(
+ __file__), '..', 'epan', 'dissectors'))
+
+
+# If scanning a subset of files, list them here.
+print('Examining:')
+if args.file or args.commits or args.open:
+ if files:
+ print(' '.join(files), '\n')
+ else:
+ print('No files to check.\n')
+else:
+ print('All dissector modules\n')
+
+asyncio.run(check_all_links(links))
+
+# Write failures to a file. Back up any previous first though.
+if os.path.exists('failures.txt'):
+ shutil.copyfile('failures.txt', 'failures_last_run.txt')
+with open('failures.txt', 'w') as f_f:
+ for l in links:
+ if l.tested and not l.success:
+ f_f.write(str(l) + '\n')
+# And successes
+with open('successes.txt', 'w') as f_s:
+ for l in links:
+ if l.tested and l.success:
+ f_s.write(str(l) + '\n')
+
+
+# Count and show overall stats.
+passed, failed = 0, 0
+for l in links:
+ if l.tested:
+ if l.success:
+ passed += 1
+ else:
+ failed += 1
+
+print('--------------------------------------------------------------------------------------------------')
+print(len(links), 'links checked: ', passed, 'passed,', failed, 'failed')
diff --git a/tools/check_help_urls.py b/tools/check_help_urls.py
new file mode 100755
index 0000000..ddf3673
--- /dev/null
+++ b/tools/check_help_urls.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+'''
+Go through all user guide help URLs listed in the program
+and confirm these are present in the User's Guide source files.
+'''
+
+from re import search
+from glob import glob
+from sys import exit
+
+found = {}
+
+with open("ui/help_url.c") as f:
+ for line in f:
+ if url := search(r"user_guide_url\(\"(.*).html\"\);", line):
+ chapter = url.group(1)
+ found[chapter] = False
+
+adoc_files = glob("docbook/wsug_src/*.adoc")
+
+for adoc_file in adoc_files:
+ with open(adoc_file) as f:
+ for line in f:
+ # Fail on legacy block anchor syntax (double square brackets)
+ if tag := search(r"^\[\#(.*)]", line):
+ chapter = tag.group(1)
+ if chapter in found:
+ found[chapter] = True
+
+missing = False
+
+for chapter in found:
+ if not found[chapter]:
+ if not missing:
+ print("The following chapters are missing in the User's Guide:")
+ missing = True
+ print(chapter)
+
+if missing:
+ exit(-1)
diff --git a/tools/check_spelling.py b/tools/check_spelling.py
new file mode 100755
index 0000000..7e31908
--- /dev/null
+++ b/tools/check_spelling.py
@@ -0,0 +1,493 @@
+#!/usr/bin/env python3
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import sys
+import re
+import subprocess
+import argparse
+import signal
+from collections import Counter
+
+# Looks for spelling errors among strings found in source or documentation files.
+# N.B. To run this script, you should install pyspellchecker (not spellchecker) using pip.
+
+# TODO: check structured doxygen comments?
+
+# For text colouring/highlighting.
+class bcolors:
+ HEADER = '\033[95m'
+ OKBLUE = '\033[94m'
+ OKGREEN = '\033[92m'
+ ADDED = '\033[45m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+ BOLD = '\033[1m'
+ UNDERLINE = '\033[4m'
+
+
+# Try to exit soon after Ctrl-C is pressed.
+should_exit = False
+
+def signal_handler(sig, frame):
+ global should_exit
+ should_exit = True
+ print('You pressed Ctrl+C - exiting')
+
+signal.signal(signal.SIGINT, signal_handler)
+
+
+
+# Create spellchecker, and augment with some Wireshark words.
+from spellchecker import SpellChecker
+# Set up our dict with words from text file.
+spell = SpellChecker()
+spell.word_frequency.load_text_file('./tools/wireshark_words.txt')
+
+
+# Track words that were not found.
+missing_words = []
+
+
+# Split camelCase string into separate words.
+def camelCaseSplit(identifier):
+ matches = re.finditer(r'.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
+ return [m.group(0) for m in matches]
+
+
+# A File object contains all of the strings to be checked for a given file.
+class File:
+ def __init__(self, file):
+ self.file = file
+ self.values = []
+
+ filename, extension = os.path.splitext(file)
+ self.code_file = extension in {'.c', '.cpp'}
+
+
+ with open(file, 'r', encoding="utf8") as f:
+ contents = f.read()
+
+ if self.code_file:
+ # Remove comments so as not to trip up RE.
+ contents = removeComments(contents)
+
+ # Find protocol name and add to dict.
+ # N.B. doesn't work when a variable is used instead of a literal for the protocol name...
+ matches = re.finditer(r'proto_register_protocol\s*\([\n\r\s]*\"(.*)\",[\n\r\s]*\"(.*)\",[\n\r\s]*\"(.*)\"', contents)
+ for m in matches:
+ protocol = m.group(3)
+ # Add to dict.
+ spell.word_frequency.load_words([protocol])
+ spell.known([protocol])
+ print('Protocol is: ' + bcolors.BOLD + protocol + bcolors.ENDC)
+
+ # Add a string found in this file.
+ def add(self, value):
+ self.values.append(value.encode('utf-8') if sys.platform.startswith('win') else value)
+
+ # Whole word is not recognised, but is it 2 words concatenated (without camelcase) ?
+ def checkMultiWords(self, word):
+ if len(word) < 6:
+ return False
+
+ # Don't consider if mixed cases.
+ if not (word.islower() or word.isupper()):
+ # But make an exception if only the fist letter is uppercase..
+ if not word == (word[0].upper() + word[1:]):
+ return False
+
+ # Try splitting into 2 words recognised at various points.
+ # Allow 3-letter words.
+ length = len(word)
+ for idx in range(3, length-3):
+ word1 = word[0:idx]
+ word2 = word[idx:]
+
+ if not spell.unknown([word1, word2]):
+ return True
+
+ return self.checkMultiWordsRecursive(word)
+
+ # If word before 'id' is recognised, accept word.
+ def wordBeforeId(self, word):
+ if word.lower().endswith('id'):
+ if not spell.unknown([word[0:len(word)-2]]):
+ return True
+ else:
+ return False
+
+ def checkMultiWordsRecursive(self, word):
+ length = len(word)
+ #print('word=', word)
+ if length < 4:
+ return False
+
+ for idx in range(4, length+1):
+ w = word[0:idx]
+ if not spell.unknown([w]):
+ if idx == len(word):
+ return True
+ else:
+ if self.checkMultiWordsRecursive(word[idx:]):
+ return True
+
+ return False
+
+ def numberPlusUnits(self, word):
+ m = re.search(r'^([0-9]+)([a-zA-Z]+)$', word)
+ if m:
+ if m.group(2).lower() in { "bit", "bits", "gb", "kbps", "gig", "mb", "th", "mhz", "v", "hz", "k",
+ "mbps", "m", "g", "ms", "nd", "nds", "rd", "kb", "kbit", "ghz",
+ "khz", "km", "ms", "usec", "sec", "gbe", "ns", "ksps", "qam", "mm" }:
+ return True
+ return False
+
+
+ # Check the spelling of all the words we have found
+ def spellCheck(self):
+
+ num_values = len(self.values)
+ for value_index,v in enumerate(self.values):
+ if should_exit:
+ exit(1)
+
+ v = str(v)
+
+ # Ignore includes.
+ if v.endswith('.h'):
+ continue
+
+ # Store original (as want to include for context in error report).
+ original = str(v)
+
+ # Replace most punctuation with spaces, and eliminate common format specifiers.
+ v = v.replace('.', ' ')
+ v = v.replace(',', ' ')
+ v = v.replace('`', ' ')
+ v = v.replace(':', ' ')
+ v = v.replace(';', ' ')
+ v = v.replace('"', ' ')
+ v = v.replace('\\', ' ')
+ v = v.replace('+', ' ')
+ v = v.replace('|', ' ')
+ v = v.replace('(', ' ')
+ v = v.replace(')', ' ')
+ v = v.replace('[', ' ')
+ v = v.replace(']', ' ')
+ v = v.replace('{', ' ')
+ v = v.replace('}', ' ')
+ v = v.replace('<', ' ')
+ v = v.replace('>', ' ')
+ v = v.replace('_', ' ')
+ v = v.replace('-', ' ')
+ v = v.replace('/', ' ')
+ v = v.replace('!', ' ')
+ v = v.replace('?', ' ')
+ v = v.replace('=', ' ')
+ v = v.replace('*', ' ')
+ v = v.replace('%', ' ')
+ v = v.replace('#', ' ')
+ v = v.replace('&', ' ')
+ v = v.replace('@', ' ')
+ v = v.replace('$', ' ')
+ v = v.replace('®', '')
+ v = v.replace("'", ' ')
+ v = v.replace('"', ' ')
+ v = v.replace('%u', '')
+ v = v.replace('%d', '')
+ v = v.replace('%s', '')
+
+ # Split into words.
+ value_words = v.split()
+ # Further split up any camelCase words.
+ words = []
+ for w in value_words:
+ words += camelCaseSplit(w)
+
+ # Check each word within this string in turn.
+ for word in words:
+ # Strip trailing digits from word.
+ word = word.rstrip('1234567890')
+
+ # Quote marks found in some of the docs...
+ word = word.replace('“', '')
+ word = word.replace('”', '')
+
+ # Single and collective possession
+ if word.endswith("’s"):
+ word = word[:-2]
+ if word.endswith("s’"):
+ word = word[:-2]
+
+ if self.numberPlusUnits(word):
+ continue
+
+ if len(word) > 4 and spell.unknown([word]) and not self.checkMultiWords(word) and not self.wordBeforeId(word):
+ print(self.file, value_index, '/', num_values, '"' + original + '"', bcolors.FAIL + word + bcolors.ENDC,
+ ' -> ', '?')
+
+ # TODO: this can be interesting, but takes too long!
+ # bcolors.OKGREEN + spell.correction(word) + bcolors.ENDC
+ global missing_words
+ missing_words.append(word)
+
+def removeWhitespaceControl(code_string):
+ code_string = code_string.replace('\\n', ' ')
+ code_string = code_string.replace('\\r', ' ')
+ code_string = code_string.replace('\\t', ' ')
+ return code_string
+
+# Remove any contractions from the given string.
+def removeContractions(code_string):
+ contractions = [ "wireshark’s", "don’t", "let’s", "isn’t", "won’t", "user’s", "hasn’t", "you’re", "o’clock", "you’ll",
+ "you’d", "developer’s", "doesn’t", "what’s", "let’s", "haven’t", "can’t", "you’ve",
+ "shouldn’t", "didn’t", "wouldn’t", "aren’t", "there’s", "packet’s", "couldn’t", "world’s",
+ "needn’t", "graph’s", "table’s", "parent’s", "entity’s", "server’s", "node’s",
+ "querier’s", "sender’s", "receiver’s", "computer’s", "frame’s", "vendor’s", "system’s",
+ "we’ll", "asciidoctor’s", "protocol’s", "microsoft’s", "wasn’t" ]
+ for c in contractions:
+ code_string = code_string.replace(c, "")
+ code_string = code_string.replace(c.capitalize(), "")
+ code_string = code_string.replace(c.replace('’', "'"), "")
+ code_string = code_string.replace(c.capitalize().replace('’', "'"), "")
+ return code_string
+
+def removeComments(code_string):
+ code_string = re.sub(re.compile(r"/\*.*?\*/", re.DOTALL), "" , code_string) # C-style comment
+ # Avoid matching // where it is allowed, e.g., https://www... or file:///...
+ code_string = re.sub(re.compile(r"(?<!:)(?<!/)(?<!\")(?<!\"\s\s)(?<!file:/)//.*?\n" ) ,"" , code_string) # C++-style comment
+ return code_string
+
+def removeSingleQuotes(code_string):
+ code_string = code_string.replace('\\\\', " ") # Separate at \\
+ code_string = code_string.replace('\"\\\\\"', "")
+ code_string = code_string.replace("\\\"", " ")
+ code_string = code_string.replace("'\"'", "")
+ code_string = code_string.replace('…', ' ')
+ return code_string
+
+def removeHexSpecifiers(code_string):
+ # Find all hex numbers
+
+ looking = True
+ while looking:
+ m = re.search(r'(0x[0-9a-fA-F]*)', code_string)
+ if m:
+ code_string = code_string.replace(m.group(0), "")
+ else:
+ looking = False
+
+ return code_string
+
+
+# Create a File object that knows about all of the strings in the given file.
+def findStrings(filename):
+ with open(filename, 'r', encoding="utf8") as f:
+ contents = f.read()
+
+ # Remove comments & embedded quotes so as not to trip up RE.
+ contents = removeContractions(contents)
+ contents = removeWhitespaceControl(contents)
+ contents = removeSingleQuotes(contents)
+ contents = removeHexSpecifiers(contents)
+
+ # Create file object.
+ file = File(filename)
+
+ # What we check depends upon file type.
+ if file.code_file:
+ contents = removeComments(contents)
+ # Code so only checking strings.
+ matches = re.finditer(r'\"([^\"]*)\"', contents)
+ for m in matches:
+ file.add(m.group(1))
+ else:
+ # A documentation file, so examine all words.
+ for w in contents.split():
+ file.add(w)
+
+ return file
+
+
+# Test for whether the given file was automatically generated.
+def isGeneratedFile(filename):
+ # Check file exists - e.g. may have been deleted in a recent commit.
+ if not os.path.exists(filename):
+ return False
+
+ if not filename.endswith('.c'):
+ return False
+
+ # This file is generated, but notice is further in than want to check for all files
+ if filename.endswith('pci-ids.c') or filename.endswith('services-data.c') or filename.endswith('manuf-data.c'):
+ return True
+
+ # Open file
+ f_read = open(os.path.join(filename), 'r', encoding="utf8")
+ for line_no,line in enumerate(f_read):
+ # The comment to say that its generated is near the top, so give up once
+ # get a few lines down.
+ if line_no > 10:
+ f_read.close()
+ return False
+ if (line.find('Generated automatically') != -1 or
+ line.find('Autogenerated from') != -1 or
+ line.find('is autogenerated') != -1 or
+ line.find('automatically generated by Pidl') != -1 or
+ line.find('Created by: The Qt Meta Object Compiler') != -1 or
+ line.find('This file was generated') != -1 or
+ line.find('This filter was automatically generated') != -1 or
+ line.find('This file is auto generated, do not edit!') != -1 or
+ line.find('this file is automatically generated') != -1):
+
+ f_read.close()
+ return True
+
+ # OK, looks like a hand-written file!
+ f_read.close()
+ return False
+
+
+def isAppropriateFile(filename):
+ file, extension = os.path.splitext(filename)
+ if filename.find('CMake') != -1:
+ return False
+ return extension in { '.adoc', '.c', '.cpp', '.pod', '.nsi', '.txt'} or file.endswith('README')
+
+
+def findFilesInFolder(folder, recursive=True):
+ files_to_check = []
+
+ if recursive:
+ for root, subfolders, files in os.walk(folder):
+ for f in files:
+ if should_exit:
+ return
+ f = os.path.join(root, f)
+ if isAppropriateFile(f) and not isGeneratedFile(f):
+ files_to_check.append(f)
+ else:
+ for f in sorted(os.listdir(folder)):
+ f = os.path.join(folder, f)
+ if isAppropriateFile(f) and not isGeneratedFile(f):
+ files_to_check.append(f)
+
+ return files_to_check
+
+
+# Check the given file.
+def checkFile(filename):
+ # Check file exists - e.g. may have been deleted in a recent commit.
+ if not os.path.exists(filename):
+ print(filename, 'does not exist!')
+ return
+
+ file = findStrings(filename)
+ file.spellCheck()
+
+
+
+#################################################################
+# Main logic.
+
+# command-line args. Controls which files should be checked.
+# If no args given, will just scan epan/dissectors folder.
+parser = argparse.ArgumentParser(description='Check spellings in specified files')
+parser.add_argument('--file', action='append',
+ help='specify individual file to test')
+parser.add_argument('--folder', action='store', default='',
+ help='specify folder to test')
+parser.add_argument('--no-recurse', action='store_true', default='',
+ help='do not recurse inside chosen folder')
+parser.add_argument('--commits', action='store',
+ help='last N commits to check')
+parser.add_argument('--open', action='store_true',
+ help='check open files')
+
+args = parser.parse_args()
+
+
+# Get files from wherever command-line args indicate.
+files = []
+if args.file:
+ # Add specified file(s)
+ for f in args.file:
+ if not os.path.isfile(f):
+ print('Chosen file', f, 'does not exist.')
+ exit(1)
+ else:
+ files.append(f)
+elif args.commits:
+ # Get files affected by specified number of commits.
+ command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits]
+ files = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Filter files
+ files = list(filter(lambda f : os.path.exists(f) and isAppropriateFile(f) and not isGeneratedFile(f), files))
+elif args.open:
+ # Unstaged changes.
+ command = ['git', 'diff', '--name-only']
+ files = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Filter files.
+ files = list(filter(lambda f : isAppropriateFile(f) and not isGeneratedFile(f), files))
+ # Staged changes.
+ command = ['git', 'diff', '--staged', '--name-only']
+ files_staged = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Filter files.
+ files_staged = list(filter(lambda f : isAppropriateFile(f) and not isGeneratedFile(f), files_staged))
+ for f in files_staged:
+ if not f in files:
+ files.append(f)
+else:
+ # By default, scan dissectors directory
+ folder = os.path.join('epan', 'dissectors')
+ # But overwrite with any folder entry.
+ if args.folder:
+ folder = args.folder
+ if not os.path.isdir(folder):
+ print('Folder', folder, 'not found!')
+ exit(1)
+
+ # Find files from folder.
+ print('Looking for files in', folder)
+ files = findFilesInFolder(folder, not args.no_recurse)
+
+
+# If scanning a subset of files, list them here.
+print('Examining:')
+if args.file or args.folder or args.commits or args.open:
+ if files:
+ print(' '.join(files), '\n')
+ else:
+ print('No files to check.\n')
+else:
+ print('All dissector modules\n')
+
+
+# Now check the chosen files.
+for f in files:
+ # Check this file.
+ checkFile(f)
+ # But get out if control-C has been pressed.
+ if should_exit:
+ exit(1)
+
+
+
+# Show the most commonly not-recognised words.
+print('')
+counter = Counter(missing_words).most_common(100)
+if len(counter) > 0:
+ for c in counter:
+ print(c[0], ':', c[1])
+
+# Show error count.
+print('\n' + bcolors.BOLD + str(len(missing_words)) + ' issues found' + bcolors.ENDC + '\n')
diff --git a/tools/check_static.py b/tools/check_static.py
new file mode 100755
index 0000000..fbd1d11
--- /dev/null
+++ b/tools/check_static.py
@@ -0,0 +1,326 @@
+#!/usr/bin/env python3
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import re
+import subprocess
+import argparse
+import signal
+
+# Look for dissector symbols that could/should be static.
+# This will not run on Windows, unless/until we check the platform
+# and use (I think) dumpbin.exe
+
+# Try to exit soon after Ctrl-C is pressed.
+should_exit = False
+
+def signal_handler(sig, frame):
+ global should_exit
+ should_exit = True
+ print('You pressed Ctrl+C - exiting')
+
+signal.signal(signal.SIGINT, signal_handler)
+
+# Allow this as a default build folder name...
+build_folder = os.getcwd() + '-build'
+
+# Record which symbols are referred to (by a set of files).
+class CalledSymbols:
+ def __init__(self):
+ self.referred = set()
+
+ def addCalls(self, file):
+ # Make sure that file is built.
+ last_dir = os.path.split(os.path.dirname(file))[-1]
+ if file.find('ui/cli') != -1:
+ # A tshark target-only file
+ object_file = os.path.join(build_folder, 'CMakeFiles', ('tshark' + '.dir'), file + '.o')
+ elif file.find('ui/qt') != -1:
+ object_file = os.path.join(build_folder, os.path.dirname(file), 'CMakeFiles', ('qtui' + '.dir'), os.path.basename(file) + '.o')
+ else:
+ if file.endswith('dissectors.c'):
+ object_file = os.path.join(build_folder, os.path.dirname(file), 'CMakeFiles', 'dissector-registration' + '.dir', os.path.basename(file) + '.o')
+ else:
+ object_file = os.path.join(build_folder, os.path.dirname(file), 'CMakeFiles', last_dir + '.dir', os.path.basename(file) + '.o')
+ if not os.path.exists(object_file):
+ #print('Warning -', object_file, 'does not exist')
+ return
+ command = ['nm', object_file]
+ for f in subprocess.check_output(command).splitlines():
+ l = str(f)[2:-1]
+ # Lines might or might not have an address before letter and symbol.
+ p1 = re.compile(r'[0-9a-f]* ([a-zA-Z]) (.*)')
+ p2 = re.compile(r'[ ]* ([a-zA-Z]) (.*)')
+
+ m = p1.match(l)
+ if not m:
+ m = p2.match(l)
+ if m:
+ letter = m.group(1)
+ function_name = m.group(2)
+
+ # Only interested in undefined references to symbols.
+ if letter == 'U':
+ self.referred.add(function_name)
+
+
+
+# Record which symbols are defined in a single file.
+class DefinedSymbols:
+ def __init__(self, file):
+ self.filename = file
+ self.global_dict = {}
+ self.header_file_contents = None
+
+ # Make sure that file is built.
+ object_file = os.path.join(build_folder, 'epan', 'dissectors', 'CMakeFiles', 'dissectors.dir', os.path.basename(file) + '.o')
+
+ if not os.path.exists(object_file):
+ #print('Warning -', object_file, 'does not exist')
+ return
+
+ header_file= file.replace('.c', '.h')
+ try:
+ f = open(header_file, 'r')
+ self.header_file_contents = f.read()
+ except IOError:
+ pass
+
+
+ command = ['nm', object_file]
+ for f in subprocess.check_output(command).splitlines():
+ # Line consists of whitespace, [address], letter, symbolName
+ l = str(f)[2:-1]
+ p = re.compile(r'[0-9a-f]* ([a-zA-Z]) (.*)')
+ m = p.match(l)
+ if m:
+ letter = m.group(1)
+ function_name = m.group(2)
+ # globally-defined symbols. Would be 't' or 'd' if already static.
+ if letter in 'TD':
+ self.add(function_name, l)
+
+ def add(self, letter, function_name):
+ self.global_dict[letter] = function_name
+
+ def mentionedInHeaders(self, symbol):
+ if self.header_file_contents:
+ if self.header_file_contents.find(symbol) != -1:
+ return True
+ # Also check some of the 'common' header files that don't match the dissector file name.
+ # TODO: could cache the contents of these files, but it's not that slow.
+ common_mismatched_headers = [ os.path.join('epan', 'dissectors', 'packet-ncp-int.h'),
+ os.path.join('epan', 'dissectors', 'packet-mq.h'),
+ os.path.join('epan', 'dissectors', 'packet-ip.h'),
+ os.path.join('epan', 'dissectors', 'packet-gsm_a_common.h'),
+ os.path.join('epan', 'dissectors', 'packet-epl.h'),
+ os.path.join('epan', 'dissectors', 'packet-bluetooth.h'),
+ os.path.join('epan', 'dissectors', 'packet-dcerpc.h'),
+ os.path.join('epan', 'ip_opts.h'),
+ os.path.join('epan', 'eap.h')]
+ for hf in common_mismatched_headers:
+ try:
+ f = open(hf)
+ contents = f.read()
+ if contents.find(symbol) != -1:
+ return True
+ except EnvironmentError:
+ pass
+
+ return False
+
+ def check(self, called_symbols):
+ global issues_found
+ for f in self.global_dict:
+ if not f in called_symbols:
+ mentioned_in_header = self.mentionedInHeaders(f)
+ fun = self.global_dict[f]
+ print(self.filename, '(' + fun + ')', 'is not referred to so could be static?', '(in header)' if mentioned_in_header else '')
+ issues_found += 1
+
+
+
+# Helper functions.
+
+def isDissectorFile(filename):
+ p = re.compile(r'(packet|file)-.*\.c')
+ return p.match(filename)
+
+# Test for whether the given dissector file was automatically generated.
+def isGeneratedFile(filename):
+ # Check file exists - e.g. may have been deleted in a recent commit.
+ if not os.path.exists(filename):
+ return False
+
+ if not filename.endswith('.c'):
+ return False
+
+ # Open file
+ f_read = open(os.path.join(filename), 'r')
+ lines_tested = 0
+ for line in f_read:
+ # The comment to say that its generated is near the top, so give up once
+ # get a few lines down.
+ if lines_tested > 10:
+ f_read.close()
+ return False
+ if (line.find('Generated automatically') != -1 or
+ line.find('Autogenerated from') != -1 or
+ line.find('is autogenerated') != -1 or
+ line.find('automatically generated by Pidl') != -1 or
+ line.find('Created by: The Qt Meta Object Compiler') != -1 or
+ line.find('This file was generated') != -1 or
+ line.find('This filter was automatically generated') != -1):
+
+ f_read.close()
+ return True
+ lines_tested = lines_tested + 1
+
+ # OK, looks like a hand-written file!
+ f_read.close()
+ return False
+
+
+def findDissectorFilesInFolder(folder, include_generated):
+ # Look at files in sorted order, to give some idea of how far through is.
+ tmp_files = []
+
+ for f in sorted(os.listdir(folder)):
+ if should_exit:
+ return
+ if isDissectorFile(f):
+ if include_generated or not isGeneratedFile(os.path.join('epan', 'dissectors', f)):
+ filename = os.path.join(folder, f)
+ tmp_files.append(filename)
+ return tmp_files
+
+def findFilesInFolder(folder):
+ # Look at files in sorted order, to give some idea of how far through is.
+ tmp_files = []
+
+ for f in sorted(os.listdir(folder)):
+ if should_exit:
+ return
+ if f.endswith('.c') or f.endswith('.cpp'):
+ filename = os.path.join(folder, f)
+ tmp_files.append(filename)
+ return tmp_files
+
+
+def is_dissector_file(filename):
+ p = re.compile(r'.*packet-.*\.c')
+ return p.match(filename)
+
+
+issues_found = 0
+
+
+
+#################################################################
+# Main logic.
+
+# command-line args. Controls which dissector files should be checked.
+# If no args given, will just scan epan/dissectors folder.
+parser = argparse.ArgumentParser(description='Check calls in dissectors')
+parser.add_argument('--build-folder', action='store', default='',
+ help='build folder', required=False)
+parser.add_argument('--file', action='append',
+ help='specify individual dissector file to test')
+parser.add_argument('--commits', action='store',
+ help='last N commits to check')
+parser.add_argument('--open', action='store_true',
+ help='check open files')
+
+args = parser.parse_args()
+
+
+# Get files from wherever command-line args indicate.
+files = []
+
+if args.build_folder:
+ build_folder = args.build_folder
+
+if args.file:
+ # Add specified file(s)
+ for f in args.file:
+ if not f.startswith('epan'):
+ f = os.path.join('epan', 'dissectors', f)
+ if not os.path.isfile(f):
+ print('Chosen file', f, 'does not exist.')
+ exit(1)
+ else:
+ files.append(f)
+elif args.commits:
+ # Get files affected by specified number of commits.
+ command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits]
+ files = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Will examine dissector files only
+ files = list(filter(lambda f : is_dissector_file(f), files))
+elif args.open:
+ # Unstaged changes.
+ command = ['git', 'diff', '--name-only']
+ files = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Only interested in dissector files.
+ files = list(filter(lambda f : is_dissector_file(f), files))
+ # Staged changes.
+ command = ['git', 'diff', '--staged', '--name-only']
+ files_staged = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Only interested in dissector files.
+ files_staged = list(filter(lambda f : is_dissector_file(f), files_staged))
+ for f in files:
+ files.append(f)
+ for f in files_staged:
+ if not f in files:
+ files.append(f)
+else:
+ # Find all dissector files from folder.
+ files = findDissectorFilesInFolder(os.path.join('epan', 'dissectors'),
+ include_generated=False)
+
+
+# If scanning a subset of files, list them here.
+print('Examining:')
+if args.file or args.commits or args.open:
+ if files:
+ print(' '.join(files), '\n')
+ else:
+ print('No files to check.\n')
+else:
+ print('All dissector modules\n')
+
+
+if not os.path.isdir(build_folder):
+ print('Build directory not valid', build_folder, '- please set with --build-folder')
+ exit(1)
+
+
+# Get the set of called functions and referred-to data.
+called = CalledSymbols()
+for d in findDissectorFilesInFolder(os.path.join('epan', 'dissectors'), include_generated=True):
+ called.addCalls(d)
+called.addCalls(os.path.join('epan', 'dissectors', 'dissectors.c'))
+# Also check calls from GUI code
+for d in findFilesInFolder('ui'):
+ called.addCalls(d)
+for d in findFilesInFolder(os.path.join('ui', 'qt')):
+ called.addCalls(d)
+# These are from tshark..
+for d in findFilesInFolder(os.path.join('ui', 'cli')):
+ called.addCalls(d)
+
+
+# Now check identified files.
+for f in files:
+ if should_exit:
+ exit(1)
+ DefinedSymbols(f).check(called.referred)
+
+# Show summary.
+print(issues_found, 'issues found')
diff --git a/tools/check_tfs.py b/tools/check_tfs.py
new file mode 100755
index 0000000..cecf8d9
--- /dev/null
+++ b/tools/check_tfs.py
@@ -0,0 +1,595 @@
+#!/usr/bin/env python3
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import re
+import subprocess
+import argparse
+import signal
+
+# This utility scans for tfs items, and works out if standard ones
+# could have been used intead (from epan/tfs.c)
+# Can also check for value_string where common tfs could be used instead.
+
+# TODO:
+# - check how many of the definitions in epan/tfs.c are used in other dissectors
+# - although even if unused, might be in external dissectors?
+# - consider merging Item class with check_typed_item_calls.py ?
+
+
+# Try to exit soon after Ctrl-C is pressed.
+should_exit = False
+
+def signal_handler(sig, frame):
+ global should_exit
+ should_exit = True
+ print('You pressed Ctrl+C - exiting')
+
+signal.signal(signal.SIGINT, signal_handler)
+
+
+# Test for whether the given file was automatically generated.
+def isGeneratedFile(filename):
+ # Check file exists - e.g. may have been deleted in a recent commit.
+ if not os.path.exists(filename):
+ return False
+
+ # Open file
+ f_read = open(os.path.join(filename), 'r')
+ lines_tested = 0
+ for line in f_read:
+ # The comment to say that its generated is near the top, so give up once
+ # get a few lines down.
+ if lines_tested > 10:
+ f_read.close()
+ return False
+ if (line.find('Generated automatically') != -1 or
+ line.find('Generated Automatically') != -1 or
+ line.find('Autogenerated from') != -1 or
+ line.find('is autogenerated') != -1 or
+ line.find('automatically generated by Pidl') != -1 or
+ line.find('Created by: The Qt Meta Object Compiler') != -1 or
+ line.find('This file was generated') != -1 or
+ line.find('This filter was automatically generated') != -1 or
+ line.find('This file is auto generated, do not edit!') != -1 or
+ line.find('This file is auto generated') != -1):
+
+ f_read.close()
+ return True
+ lines_tested = lines_tested + 1
+
+ # OK, looks like a hand-written file!
+ f_read.close()
+ return False
+
+
+# Keep track of custom entries that might appear in multiple dissectors,
+# so we can consider adding them to tfs.c
+custom_tfs_entries = {}
+def AddCustomEntry(val1, val2, file):
+ global custom_tfs_entries
+ if (val1, val2) in custom_tfs_entries:
+ custom_tfs_entries[(val1, val2)].append(file)
+ else:
+ custom_tfs_entries[(val1, val2)] = [file]
+
+
+
+class TFS:
+ def __init__(self, file, name, val1, val2):
+ self.file = file
+ self.name = name
+ self.val1 = val1
+ self.val2 = val2
+
+ global warnings_found
+
+ # Should not be empty
+ if not len(val1) or not len(val2):
+ print('Warning:', file, name, 'has an empty field', self)
+ warnings_found += 1
+ #else:
+ # Strange if one begins with capital but other doesn't?
+ #if val1[0].isalpha() and val2[0].isalpha():
+ # if val1[0].isupper() != val2[0].isupper():
+ # print(file, name, 'one starts lowercase and the other upper', self)
+
+ # Leading or trailing space should not be needed.
+ if val1.startswith(' ') or val1.endswith(' '):
+ print('Note: ' + self.file + ' ' + self.name + ' - false val begins or ends with space \"' + self.val1 + '\"')
+ if val2.startswith(' ') or val2.endswith(' '):
+ print('Note: ' + self.file + ' ' + self.name + ' - true val begins or ends with space \"' + self.val2 + '\"')
+
+ # Should really not be identical...
+ if val1.lower() == val2.lower():
+ print('Warning:', file, name, 'true and false strings are the same', self)
+ warnings_found += 1
+
+ # Shouldn't both be negation (with exception..)
+ if (file != os.path.join('epan', 'dissectors', 'packet-smb.c') and (val1.lower().find('not ') != -1) and (val2.lower().find('not ') != -1)):
+ print('Warning:', file, name, self, 'both strings contain not')
+ warnings_found += 1
+
+ # Not expecting full-stops inside strings..
+ if val1.find('.') != -1 or val2.find('.') != -1:
+ print('Warning:', file, name, 'Period found in string', self)
+ warnings_found += 1
+
+
+ def __str__(self):
+ return '{' + '"' + self.val1 + '", "' + self.val2 + '"}'
+
+
+class ValueString:
+ def __init__(self, file, name, vals):
+ self.file = file
+ self.name = name
+ self.raw_vals = vals
+ self.parsed_vals = {}
+ self.looks_like_tfs = True
+
+ no_lines = self.raw_vals.count('{')
+ if no_lines != 3:
+ self.looks_like_tfs = False
+ return
+
+ # Now parse out each entry in the value_string
+ matches = re.finditer(r'\{([\"a-zA-Z\s\d\,]*)\}', self.raw_vals)
+ for m in matches:
+ entry = m[1]
+ # Check each entry looks like part of a TFS entry.
+ match = re.match(r'\s*([01])\,\s*\"([a-zA-Z\d\s]*\s*)\"', entry)
+ if match:
+ if match[1] == '1':
+ self.parsed_vals[True] = match[2]
+ else:
+ self.parsed_vals[False] = match[2]
+
+ # Now have both entries
+ if len(self.parsed_vals) == 2:
+ break
+ else:
+ self.looks_like_tfs = False
+ break
+
+ def __str__(self):
+ return '{' + '"' + self.raw_vals + '"}'
+
+
+field_widths = {
+ 'FT_BOOLEAN' : 64, # TODO: Width depends upon 'display' field
+ 'FT_CHAR' : 8,
+ 'FT_UINT8' : 8,
+ 'FT_INT8' : 8,
+ 'FT_UINT16' : 16,
+ 'FT_INT16' : 16,
+ 'FT_UINT24' : 24,
+ 'FT_INT24' : 24,
+ 'FT_UINT32' : 32,
+ 'FT_INT32' : 32,
+ 'FT_UINT40' : 40,
+ 'FT_INT40' : 40,
+ 'FT_UINT48' : 48,
+ 'FT_INT48' : 48,
+ 'FT_UINT56' : 56,
+ 'FT_INT56' : 56,
+ 'FT_UINT64' : 64,
+ 'FT_INT64' : 64
+}
+
+
+
+
+# Simplified version of class that is in check_typed_item_calls.py
+class Item:
+
+ previousItem = None
+
+ def __init__(self, filename, hf, filter, label, item_type, type_modifier, strings, macros, mask=None,
+ check_mask=False):
+ self.filename = filename
+ self.hf = hf
+ self.filter = filter
+ self.label = label
+ self.strings = strings
+ self.mask = mask
+
+ # N.B. Not sestting mask by looking up macros.
+
+ self.item_type = item_type
+ self.type_modifier = type_modifier
+
+ self.set_mask_value(macros)
+
+ self.bits_set = 0
+ for n in range(0, self.get_field_width_in_bits()):
+ if self.check_bit(self.mask_value, n):
+ self.bits_set += 1
+
+ def check_bit(self, value, n):
+ return (value & (0x1 << n)) != 0
+
+
+ def __str__(self):
+ return 'Item ({0} "{1}" {2} type={3}:{4} strings={5} mask={6})'.format(self.filename, self.label, self.filter,
+ self.item_type, self.type_modifier, self.strings, self.mask)
+
+
+
+ def set_mask_value(self, macros):
+ try:
+ self.mask_read = True
+
+ # Substitute mask if found as a macro..
+ if self.mask in macros:
+ self.mask = macros[self.mask]
+ elif any(not c in '0123456789abcdefABCDEFxX' for c in self.mask):
+ self.mask_read = False
+ self.mask_value = 0
+ return
+
+
+ # Read according to the appropriate base.
+ if self.mask.startswith('0x'):
+ self.mask_value = int(self.mask, 16)
+ elif self.mask.startswith('0'):
+ self.mask_value = int(self.mask, 8)
+ else:
+ self.mask_value = int(self.mask, 10)
+ except:
+ self.mask_read = False
+ self.mask_value = 0
+
+
+ # Return true if bit position n is set in value.
+ def check_bit(self, value, n):
+ return (value & (0x1 << n)) != 0
+
+
+ def get_field_width_in_bits(self):
+ if self.item_type == 'FT_BOOLEAN':
+ if self.type_modifier == 'NULL':
+ return 8 # i.e. 1 byte
+ elif self.type_modifier == 'BASE_NONE':
+ return 8
+ elif self.type_modifier == 'SEP_DOT': # from proto.h, only meant for FT_BYTES
+ return 64
+ else:
+ try:
+ # For FT_BOOLEAN, modifier is just numerical number of bits. Round up to next nibble.
+ return int((int(self.type_modifier) + 3)/4)*4
+ except:
+ #print('oops', self)
+ return 0
+ else:
+ if self.item_type in field_widths:
+ # Lookup fixed width for this type
+ return field_widths[self.item_type]
+ else:
+ #print('returning 0 for', self)
+ return 0
+
+
+
+
+
+def removeComments(code_string):
+ code_string = re.sub(re.compile(r"/\*.*?\*/",re.DOTALL ) ,"" ,code_string) # C-style comment
+ code_string = re.sub(re.compile(r"//.*?\n" ) ,"" ,code_string) # C++-style comment
+ code_string = re.sub(re.compile(r"#if 0.*?#endif",re.DOTALL ) ,"" , code_string) # Ignored region
+
+ return code_string
+
+
+# Look for true_false_string items in a dissector file.
+def findTFS(filename):
+ tfs_found = {}
+
+ with open(filename, 'r', encoding="utf8") as f:
+ contents = f.read()
+ # Example: const true_false_string tfs_yes_no = { "Yes", "No" };
+
+ # Remove comments so as not to trip up RE.
+ contents = removeComments(contents)
+
+ matches = re.finditer(r'\sconst\s*true_false_string\s*([a-zA-Z0-9_]*)\s*=\s*{\s*\"([a-zA-Z_0-9/:! ]*)\"\s*,\s*\"([a-zA-Z_0-9/:! ]*)\"', contents)
+ for m in matches:
+ name = m.group(1)
+ val1 = m.group(2)
+ val2 = m.group(3)
+ # Store this entry.
+ tfs_found[name] = TFS(filename, name, val1, val2)
+
+ return tfs_found
+
+# Look for value_string entries in a dissector file.
+def findValueStrings(filename):
+ vals_found = {}
+
+ #static const value_string radio_type_vals[] =
+ #{
+ # { 0, "FDD"},
+ # { 1, "TDD"},
+ # { 0, NULL }
+ #};
+
+ with open(filename, 'r', encoding="utf8") as f:
+ contents = f.read()
+
+ # Remove comments so as not to trip up RE.
+ contents = removeComments(contents)
+
+ matches = re.finditer(r'.*const value_string\s*([a-zA-Z0-9_]*)\s*\[\s*\]\s*\=\s*\{([\{\}\d\,a-zA-Z0-9\s\"]*)\};', contents)
+ for m in matches:
+ name = m.group(1)
+ vals = m.group(2)
+ vals_found[name] = ValueString(filename, name, vals)
+
+ return vals_found
+
+# Look for hf items (i.e. full item to be registered) in a dissector file.
+def find_items(filename, macros, check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False):
+ is_generated = isGeneratedFile(filename)
+ items = {}
+ with open(filename, 'r', encoding="utf8") as f:
+ contents = f.read()
+ # Remove comments so as not to trip up RE.
+ contents = removeComments(contents)
+
+ # N.B. re extends all the way to HFILL to avoid greedy matching
+ matches = re.finditer( r'.*\{\s*\&(hf_[a-z_A-Z0-9]*)\s*,\s*{\s*\"(.*?)\"\s*,\s*\"(.*?)\"\s*,\s*(.*?)\s*,\s*([0-9A-Z_\|\s]*?)\s*,\s*(.*?)\s*,\s*(.*?)\s*,\s*([a-zA-Z0-9\W\s_\u00f6\u00e4]*?)\s*,\s*HFILL', contents)
+ for m in matches:
+ # Store this item.
+ hf = m.group(1)
+ items[hf] = Item(filename, hf, filter=m.group(3), label=m.group(2), item_type=m.group(4),
+ type_modifier=m.group(5),
+ strings=m.group(6),
+ macros=macros,
+ mask=m.group(7))
+ return items
+
+def find_macros(filename):
+ macros = {}
+ with open(filename, 'r', encoding="utf8") as f:
+ contents = f.read()
+ # Remove comments so as not to trip up RE.
+ contents = removeComments(contents)
+
+ matches = re.finditer( r'#define\s*([A-Z0-9_]*)\s*([0-9xa-fA-F]*)\n', contents)
+ for m in matches:
+ # Store this mapping.
+ macros[m.group(1)] = m.group(2)
+ return macros
+
+
+
+def is_dissector_file(filename):
+ p = re.compile(r'.*packet-.*\.c')
+ return p.match(filename)
+
+def findDissectorFilesInFolder(folder):
+ # Look at files in sorted order, to give some idea of how far through is.
+ files = []
+
+ for f in sorted(os.listdir(folder)):
+ if should_exit:
+ return
+ if is_dissector_file(f):
+ filename = os.path.join(folder, f)
+ files.append(filename)
+ return files
+
+
+
+warnings_found = 0
+errors_found = 0
+
+
+tfs_found = 0
+
+# Check the given dissector file.
+def checkFile(filename, common_tfs, look_for_common=False, check_value_strings=False):
+ global warnings_found
+ global errors_found
+
+ # Check file exists - e.g. may have been deleted in a recent commit.
+ if not os.path.exists(filename):
+ print(filename, 'does not exist!')
+ return
+
+ # Find items.
+ file_tfs = findTFS(filename)
+
+ # See if any of these items already existed in tfs.c
+ for f in file_tfs:
+ for c in common_tfs:
+ found = False
+
+ #
+ # Do not do this check for plugins; plugins cannot import
+ # data values from libwireshark (functions, yes; data
+ # values, no).
+ #
+ # Test whether there's a common prefix for the file name
+ # and "plugin/epan/"; if so, this is a plugin, and there
+ # is no common path and os.path.commonprefix returns an
+ # empty string, otherwise it returns the common path, so
+ # we check whether the common path is an empty string.
+ #
+ if os.path.commonprefix([filename, 'plugin/epan/']) == '':
+ exact_case = False
+ if file_tfs[f].val1 == common_tfs[c].val1 and file_tfs[f].val2 == common_tfs[c].val2:
+ found = True
+ exact_case = True
+ elif file_tfs[f].val1.upper() == common_tfs[c].val1.upper() and file_tfs[f].val2.upper() == common_tfs[c].val2.upper():
+ found = True
+
+ if found:
+ print("Error:" if exact_case else "Warn: ", filename, f, "- could have used", c, 'from tfs.c instead: ', common_tfs[c],
+ '' if exact_case else ' (capitalisation differs)')
+ if exact_case:
+ errors_found += 1
+ else:
+ warnings_found += 1
+ break
+ if not found:
+ if look_for_common:
+ AddCustomEntry(file_tfs[f].val1, file_tfs[f].val2, filename)
+
+ if check_value_strings:
+ # Get macros
+ macros = find_macros(filename)
+
+ # Get value_string entries.
+ vs = findValueStrings(filename)
+
+ # Also get hf items
+ items = find_items(filename, macros, check_mask=True)
+
+
+ for v in vs:
+ if vs[v].looks_like_tfs:
+ found = False
+ exact_case = False
+
+ #print('Candidate', v, vs[v])
+ for c in common_tfs:
+ found = False
+
+ #
+ # Do not do this check for plugins; plugins cannot import
+ # data values from libwireshark (functions, yes; data
+ # values, no).
+ #
+ # Test whether there's a common prefix for the file name
+ # and "plugin/epan/"; if so, this is a plugin, and there
+ # is no common path and os.path.commonprefix returns an
+ # empty string, otherwise it returns the common path, so
+ # we check whether the common path is an empty string.
+ #
+ if os.path.commonprefix([filename, 'plugin/epan/']) == '':
+ exact_case = False
+ if common_tfs[c].val1 == vs[v].parsed_vals[True] and common_tfs[c].val2 == vs[v].parsed_vals[False]:
+ found = True
+ exact_case = True
+ elif common_tfs[c].val1.upper() == vs[v].parsed_vals[True].upper() and common_tfs[c].val2.upper() == vs[v].parsed_vals[False].upper():
+ found = True
+
+ # Do values match?
+ if found:
+ # OK, now look for items that:
+ # - have VALS(v) AND
+ # - have a mask width of 1 bit (no good if field can have values > 1...)
+ for i in items:
+ if re.match(r'VALS\(\s*'+v+r'\s*\)', items[i].strings):
+ if items[i].bits_set == 1:
+ print("Warn:" if exact_case else "Note:", filename, 'value_string', "'"+v+"'",
+ "- could have used", c, 'from tfs.c instead: ', common_tfs[c], 'for', i,
+ '' if exact_case else ' (capitalisation differs)')
+ if exact_case:
+ warnings_found += 1
+
+
+
+#################################################################
+# Main logic.
+
+# command-line args. Controls which dissector files should be checked.
+# If no args given, will just scan epan/dissectors folder.
+parser = argparse.ArgumentParser(description='Check calls in dissectors')
+parser.add_argument('--file', action='append',
+ help='specify individual dissector file to test')
+parser.add_argument('--commits', action='store',
+ help='last N commits to check')
+parser.add_argument('--open', action='store_true',
+ help='check open files')
+parser.add_argument('--check-value-strings', action='store_true',
+ help='check whether value_strings could have been tfs?')
+
+parser.add_argument('--common', action='store_true',
+ help='check for potential new entries for tfs.c')
+
+
+args = parser.parse_args()
+
+
+# Get files from wherever command-line args indicate.
+files = []
+if args.file:
+ # Add specified file(s)
+ for f in args.file:
+ if not f.startswith('epan'):
+ f = os.path.join('epan', 'dissectors', f)
+ if not os.path.isfile(f):
+ print('Chosen file', f, 'does not exist.')
+ exit(1)
+ else:
+ files.append(f)
+elif args.commits:
+ # Get files affected by specified number of commits.
+ command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits]
+ files = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Will examine dissector files only
+ files = list(filter(lambda f : is_dissector_file(f), files))
+elif args.open:
+ # Unstaged changes.
+ command = ['git', 'diff', '--name-only']
+ files = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Only interested in dissector files.
+ files = list(filter(lambda f : is_dissector_file(f), files))
+ # Staged changes.
+ command = ['git', 'diff', '--staged', '--name-only']
+ files_staged = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Only interested in dissector files.
+ files_staged = list(filter(lambda f : is_dissector_file(f), files_staged))
+ for f in files_staged:
+ if not f in files:
+ files.append(f)
+else:
+ # Find all dissector files from folder.
+ files = findDissectorFilesInFolder(os.path.join('epan', 'dissectors'))
+
+
+# If scanning a subset of files, list them here.
+print('Examining:')
+if args.file or args.commits or args.open:
+ if files:
+ print(' '.join(files), '\n')
+ else:
+ print('No files to check.\n')
+else:
+ print('All dissector modules\n')
+
+
+# Get standard/ shared ones.
+tfs_entries = findTFS(os.path.join('epan', 'tfs.c'))
+
+# Now check the files to see if they could have used shared ones instead.
+for f in files:
+ if should_exit:
+ exit(1)
+ if not isGeneratedFile(f):
+ checkFile(f, tfs_entries, look_for_common=args.common, check_value_strings=args.check_value_strings)
+
+# Report on commonly-defined values.
+if args.common:
+ # Looking for items that could potentially be moved to tfs.c
+ for c in custom_tfs_entries:
+ # Only want to see items that have 3 or more occurrences.
+ # Even then, probably only want to consider ones that sound generic.
+ if len(custom_tfs_entries[c]) > 2:
+ print(c, 'appears', len(custom_tfs_entries[c]), 'times, in: ', custom_tfs_entries[c])
+
+
+# Show summary.
+print(warnings_found, 'warnings found')
+if errors_found:
+ print(errors_found, 'errors found')
+ exit(1)
diff --git a/tools/check_typed_item_calls.py b/tools/check_typed_item_calls.py
new file mode 100755
index 0000000..4800203
--- /dev/null
+++ b/tools/check_typed_item_calls.py
@@ -0,0 +1,1775 @@
+#!/usr/bin/env python3
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import re
+import argparse
+import signal
+import subprocess
+
+# This utility scans the dissector code for various issues.
+# TODO:
+# - Create maps from type -> display types for hf items (see display (FIELDDISPLAY)) in docs/README.dissector
+
+
+# Try to exit soon after Ctrl-C is pressed.
+should_exit = False
+
+def signal_handler(sig, frame):
+ global should_exit
+ should_exit = True
+ print('You pressed Ctrl+C - exiting')
+
+signal.signal(signal.SIGINT, signal_handler)
+
+
+warnings_found = 0
+errors_found = 0
+
+def name_has_one_of(name, substring_list):
+ for word in substring_list:
+ if name.lower().find(word) != -1:
+ return True
+ return False
+
+# An individual call to an API we are interested in.
+# Used by APICheck below.
+class Call:
+ def __init__(self, hf_name, macros, line_number=None, length=None, fields=None):
+ self.hf_name = hf_name
+ self.line_number = line_number
+ self.fields = fields
+ self.length = None
+ if length:
+ try:
+ self.length = int(length)
+ except:
+ if length.isupper():
+ if length in macros:
+ try:
+ self.length = int(macros[length])
+ except:
+ pass
+ pass
+
+
+# These are variable names that have been seen to be used in calls..
+common_hf_var_names = { 'hf_index', 'hf_item', 'hf_idx', 'hf_x', 'hf_id', 'hf_cookie', 'hf_flag',
+ 'hf_dos_time', 'hf_dos_date', 'hf_value', 'hf_num',
+ 'hf_cause_value', 'hf_uuid',
+ 'hf_endian', 'hf_ip', 'hf_port', 'hf_suff', 'hf_string', 'hf_uint',
+ 'hf_tag', 'hf_type', 'hf_hdr', 'hf_field', 'hf_opcode', 'hf_size',
+ 'hf_entry', 'field' }
+
+item_lengths = {}
+item_lengths['FT_CHAR'] = 1
+item_lengths['FT_UINT8'] = 1
+item_lengths['FT_INT8'] = 1
+item_lengths['FT_UINT16'] = 2
+item_lengths['FT_INT16'] = 2
+item_lengths['FT_UINT24'] = 3
+item_lengths['FT_INT24'] = 3
+item_lengths['FT_UINT32'] = 4
+item_lengths['FT_INT32'] = 4
+item_lengths['FT_UINT40'] = 5
+item_lengths['FT_INT40'] = 5
+item_lengths['FT_UINT48'] = 6
+item_lengths['FT_INT48'] = 6
+item_lengths['FT_UINT56'] = 7
+item_lengths['FT_INT56'] = 7
+item_lengths['FT_UINT64'] = 8
+item_lengths['FT_INT64'] = 8
+item_lengths['FT_ETHER'] = 6
+# TODO: other types...
+
+
+# A check for a particular API function.
+class APICheck:
+ def __init__(self, fun_name, allowed_types, positive_length=False):
+ self.fun_name = fun_name
+ self.allowed_types = allowed_types
+ self.positive_length = positive_length
+ self.calls = []
+
+ if fun_name.startswith('ptvcursor'):
+ # RE captures function name + 1st 2 args (always ptvc + hfindex)
+ self.p = re.compile('[^\n]*' + self.fun_name + '\s*\(([a-zA-Z0-9_]+),\s*([a-zA-Z0-9_]+)')
+ elif fun_name.find('add_bitmask') == -1:
+ # Normal case.
+ # RE captures function name + 1st 2 args (always tree + hfindex + length)
+ self.p = re.compile('[^\n]*' + self.fun_name + '\s*\(([a-zA-Z0-9_]+),\s*([a-zA-Z0-9_]+),\s*[a-zA-Z0-9_]+,\s*[a-zA-Z0-9_]+,\s*([a-zA-Z0-9_]+)')
+ else:
+ # _add_bitmask functions.
+ # RE captures function name + 1st + 4th args (always tree + hfindex)
+ # 6th arg is 'fields'
+ self.p = re.compile('[^\n]*' + self.fun_name + '\s*\(([a-zA-Z0-9_]+),\s*[a-zA-Z0-9_]+,\s*[a-zA-Z0-9_]+,\s*([a-zA-Z0-9_]+)\s*,\s*[a-zA-Z0-9_]+\s*,\s*([a-zA-Z0-9_]+)\s*,')
+
+ self.file = None
+ self.mask_allowed = True
+ if fun_name.find('proto_tree_add_bits_') != -1:
+ self.mask_allowed = False
+
+
+ def find_calls(self, file, macros):
+ self.file = file
+ self.calls = []
+
+ with open(file, 'r', encoding="utf8") as f:
+ contents = f.read()
+ lines = contents.splitlines()
+ total_lines = len(lines)
+ for line_number,line in enumerate(lines):
+ # Want to check this, and next few lines
+ to_check = lines[line_number-1] + '\n'
+ # Nothing to check if function name isn't in it
+ if to_check.find(self.fun_name) != -1:
+ # Ok, add the next file lines before trying RE
+ for i in range(1, 4):
+ if to_check.find(';') != -1:
+ break
+ elif line_number+i < total_lines:
+ to_check += (lines[line_number-1+i] + '\n')
+ m = self.p.search(to_check)
+ if m:
+ fields = None
+ length = None
+
+ if self.fun_name.find('add_bitmask') != -1:
+ fields = m.group(3)
+ else:
+ if self.p.groups == 3:
+ length = m.group(3)
+
+ # Add call. We have length if re had 3 groups.
+ num_groups = self.p.groups
+ self.calls.append(Call(m.group(2),
+ macros,
+ line_number=line_number,
+ length=length,
+ fields=fields))
+
+ # Return true if bit position n is set in value.
+ def check_bit(self, value, n):
+ return (value & (0x1 << n)) != 0
+
+ def does_mask_cover_value(self, mask, value):
+ # Walk past any l.s. 0 bits in value
+ n = 0
+
+ mask_start = n
+ # Walk through any bits that are set and check they are in mask
+ while self.check_bit(value, n) and n <= 63:
+ if not self.check_bit(mask, n):
+ return False
+ n += 1
+
+ return True
+
+ def check_against_items(self, items_defined, items_declared, items_declared_extern, check_missing_items=False,
+ field_arrays=None):
+ global errors_found
+ global warnings_found
+
+ for call in self.calls:
+
+ # Check lengths, but for now only for APIs that have length in bytes.
+ if self.fun_name.find('add_bits') == -1 and call.hf_name in items_defined:
+ if call.length and items_defined[call.hf_name].item_type in item_lengths:
+ if item_lengths[items_defined[call.hf_name].item_type] < call.length:
+ print('Warning:', self.file + ':' + str(call.line_number),
+ self.fun_name + ' called for', call.hf_name, ' - ',
+ 'item type is', items_defined[call.hf_name].item_type, 'but call has len', call.length)
+ warnings_found += 1
+
+ # Needs a +ve length
+ if self.positive_length and call.length != None:
+ if call.length != -1 and call.length <= 0:
+ print('Error: ' + self.fun_name + '(.., ' + call.hf_name + ', ...) called at ' +
+ self.file + ':' + str(call.line_number) +
+ ' with length ' + str(call.length) + ' - must be > 0 or -1')
+ errors_found += 1
+
+ if call.hf_name in items_defined:
+ # Is type allowed?
+ if not items_defined[call.hf_name].item_type in self.allowed_types:
+ print('Error: ' + self.fun_name + '(.., ' + call.hf_name + ', ...) called at ' +
+ self.file + ':' + str(call.line_number) +
+ ' with type ' + items_defined[call.hf_name].item_type)
+ print(' (allowed types are', self.allowed_types, ')\n')
+ errors_found += 1
+ # No mask allowed
+ if not self.mask_allowed and items_defined[call.hf_name].mask_value != 0:
+ print('Error: ' + self.fun_name + '(.., ' + call.hf_name + ', ...) called at ' +
+ self.file + ':' + str(call.line_number) +
+ ' with mask ' + items_defined[call.hf_name].mask + ' (must be zero!)\n')
+ errors_found += 1
+
+ if self.fun_name.find('add_bitmask') != -1 and call.hf_name in items_defined and field_arrays:
+ if call.fields in field_arrays:
+ if (items_defined[call.hf_name].mask_value and
+ field_arrays[call.fields][1] != 0 and items_defined[call.hf_name].mask_value != field_arrays[call.fields][1]):
+ # TODO: only really a problem if bit is set in array but not in top-level item?
+ if not self.does_mask_cover_value(items_defined[call.hf_name].mask_value,
+ field_arrays[call.fields][1]):
+ print('Warning:', self.file, call.hf_name, call.fields, "masks don't match. root=",
+ items_defined[call.hf_name].mask,
+ "array has", hex(field_arrays[call.fields][1]))
+ warnings_found += 1
+
+ if check_missing_items:
+ if call.hf_name in items_declared and not call.hf_name in items_declared_extern:
+ #not in common_hf_var_names:
+ print('Warning:', self.file + ':' + str(call.line_number),
+ self.fun_name + ' called for "' + call.hf_name + '"', ' - but no item found')
+ warnings_found += 1
+
+
+# Specialization of APICheck for add_item() calls
+class ProtoTreeAddItemCheck(APICheck):
+ def __init__(self, ptv=None):
+
+ # RE will capture whole call.
+
+ if not ptv:
+ # proto_item *
+ # proto_tree_add_item(proto_tree *tree, int hfindex, tvbuff_t *tvb,
+ # const gint start, gint length, const guint encoding)
+ self.fun_name = 'proto_tree_add_item'
+ self.p = re.compile('[^\n]*' + self.fun_name + '\s*\(\s*[a-zA-Z0-9_]+?,\s*([a-zA-Z0-9_]+?),\s*[a-zA-Z0-9_\+\s]+?,\s*[^,.]+?,\s*(.+),\s*([^,.]+?)\);')
+ else:
+ # proto_item *
+ # ptvcursor_add(ptvcursor_t *ptvc, int hfindex, gint length,
+ # const guint encoding)
+ self.fun_name = 'ptvcursor_add'
+ self.p = re.compile('[^\n]*' + self.fun_name + '\s*\([^,.]+?,\s*([^,.]+?),\s*([^,.]+?),\s*([a-zA-Z0-9_\-\>]+)')
+
+
+ def find_calls(self, file, macros):
+ self.file = file
+ self.calls = []
+ with open(file, 'r', encoding="utf8") as f:
+
+ contents = f.read()
+ lines = contents.splitlines()
+ total_lines = len(lines)
+ for line_number,line in enumerate(lines):
+ # Want to check this, and next few lines
+ to_check = lines[line_number-1] + '\n'
+ # Nothing to check if function name isn't in it
+ fun_idx = to_check.find(self.fun_name)
+ if fun_idx != -1:
+ # Ok, add the next file lines before trying RE
+ for i in range(1, 5):
+ if to_check.find(';') != -1:
+ break
+ elif line_number+i < total_lines:
+ to_check += (lines[line_number-1+i] + '\n')
+ # Lose anything before function call itself.
+ to_check = to_check[fun_idx:]
+ m = self.p.search(to_check)
+ if m:
+ # Throw out if parens not matched
+ if m.group(0).count('(') != m.group(0).count(')'):
+ continue
+
+ enc = m.group(3)
+ hf_name = m.group(1)
+ if not enc.startswith('ENC_'):
+ if not enc in { 'encoding', 'enc', 'client_is_le', 'cigi_byte_order', 'endian', 'endianess', 'machine_encoding', 'byte_order', 'bLittleEndian',
+ 'p_mq_parm->mq_str_enc', 'p_mq_parm->mq_int_enc',
+ 'iEnc', 'strid_enc', 'iCod', 'nl_data->encoding',
+ 'argp->info->encoding', 'gquic_info->encoding', 'writer_encoding',
+ 'tds_get_int2_encoding(tds_info)',
+ 'tds_get_int4_encoding(tds_info)',
+ 'tds_get_char_encoding(tds_info)',
+ 'info->encoding',
+ 'item->encoding',
+ 'DREP_ENC_INTEGER(drep)', 'string_encoding', 'item', 'type',
+ 'dvb_enc_to_item_enc(encoding)',
+ 'packet->enc',
+ 'IS_EBCDIC(uCCS) ? ENC_EBCDIC : ENC_ASCII',
+ 'DREP_ENC_INTEGER(hdr->drep)',
+ 'dhcp_uuid_endian',
+ 'payload_le',
+ 'local_encoding',
+ 'big_endian',
+ 'hf_data_encoding',
+ 'IS_EBCDIC(eStr) ? ENC_EBCDIC : ENC_ASCII',
+ 'big_endian ? ENC_BIG_ENDIAN : ENC_LITTLE_ENDIAN',
+ '(skip == 1) ? ENC_BIG_ENDIAN : ENC_LITTLE_ENDIAN',
+ 'pdu_info->sbc', 'pdu_info->mbc',
+ 'seq_info->txt_enc | ENC_NA',
+ 'BASE_SHOW_UTF_8_PRINTABLE',
+ 'dhcp_secs_endian',
+ 'is_mdns ? ENC_UTF_8|ENC_NA : ENC_ASCII|ENC_NA'
+ }:
+ global warnings_found
+
+ print('Warning:', self.file + ':' + str(line_number),
+ self.fun_name + ' called for "' + hf_name + '"', 'check last/enc param:', enc, '?')
+ warnings_found += 1
+ self.calls.append(Call(hf_name, macros, line_number=line_number, length=m.group(2)))
+
+ def check_against_items(self, items_defined, items_declared, items_declared_extern,
+ check_missing_items=False, field_arrays=None):
+ # For now, only complaining if length if call is longer than the item type implies.
+ #
+ # Could also be bugs where the length is always less than the type allows.
+ # Would involve keeping track (in the item) of whether any call had used the full length.
+
+ global warnings_found
+
+ for call in self.calls:
+ if call.hf_name in items_defined:
+ if call.length and items_defined[call.hf_name].item_type in item_lengths:
+ if item_lengths[items_defined[call.hf_name].item_type] < call.length:
+ print('Warning:', self.file + ':' + str(call.line_number),
+ self.fun_name + ' called for', call.hf_name, ' - ',
+ 'item type is', items_defined[call.hf_name].item_type, 'but call has len', call.length)
+ warnings_found += 1
+ elif check_missing_items:
+ if call.hf_name in items_declared and not call.hf_name in items_declared_extern:
+ #not in common_hf_var_names:
+ print('Warning:', self.file + ':' + str(call.line_number),
+ self.fun_name + ' called for "' + call.hf_name + '"', ' - but no item found')
+ warnings_found += 1
+
+
+
+##################################################################################################
+# This is a set of items (by filter name) where we know that the bitmask is non-contiguous,
+# but is still believed to be correct.
+known_non_contiguous_fields = { 'wlan.fixed.capabilities.cfpoll.sta',
+ 'wlan.wfa.ie.wme.qos_info.sta.reserved',
+ 'btrfcomm.frame_type', # https://os.itec.kit.edu/downloads/sa_2006_roehricht-martin_flow-control-in-bluez.pdf
+ 'capwap.control.message_element.ac_descriptor.dtls_policy.r', # RFC 5415
+ 'couchbase.extras.subdoc.flags.reserved',
+ 'wlan.fixed.capabilities.cfpoll.ap', # These are 3 separate bits...
+ 'wlan.wfa.ie.wme.tspec.ts_info.reserved', # matches other fields in same sequence
+ 'zbee_zcl_se.pp.attr.payment_control_configuration.reserved', # matches other fields in same sequence
+ 'zbee_zcl_se.pp.snapshot_payload_cause.reserved', # matches other fields in same sequence
+ 'ebhscr.eth.rsv', # matches other fields in same sequence
+ 'v120.lli', # non-contiguous field (http://www.acacia-net.com/wwwcla/protocol/v120_l2.htm)
+ 'stun.type.class',
+ 'bssgp.csg_id', 'tiff.t6.unused', 'artnet.ip_prog_reply.unused',
+ 'telnet.auth.mod.enc', 'osc.message.midi.bender', 'btle.data_header.rfu',
+ 'stun.type.method', # figure 3 in rfc 5389
+ 'tds.done.status', # covers all bits in bitset
+ 'hf_iax2_video_csub', # RFC 5456, table 8.7
+ 'iax2.video.subclass',
+ 'dnp3.al.ana.int',
+ 'pwcesopsn.cw.lm',
+ 'gsm_a.rr.format_id', # EN 301 503
+ 'siii.mst.phase', # comment in code seems convinced
+ 'xmcp.type.class',
+ 'xmcp.type.method',
+ 'hf_hiqnet_flags',
+ 'hf_hiqnet_flagmask',
+ 'hf_h223_mux_mpl',
+ 'rdp.flags.pkt'
+ }
+##################################################################################################
+
+
+field_widths = {
+ 'FT_BOOLEAN' : 64, # TODO: Width depends upon 'display' field
+ 'FT_CHAR' : 8,
+ 'FT_UINT8' : 8,
+ 'FT_INT8' : 8,
+ 'FT_UINT16' : 16,
+ 'FT_INT16' : 16,
+ 'FT_UINT24' : 24,
+ 'FT_INT24' : 24,
+ 'FT_UINT32' : 32,
+ 'FT_INT32' : 32,
+ 'FT_UINT40' : 40,
+ 'FT_INT40' : 40,
+ 'FT_UINT48' : 48,
+ 'FT_INT48' : 48,
+ 'FT_UINT56' : 56,
+ 'FT_INT56' : 56,
+ 'FT_UINT64' : 64,
+ 'FT_INT64' : 64
+}
+
+# TODO: most of these might as well be strings...
+def is_ignored_consecutive_filter(filter):
+ ignore_patterns = [
+ re.compile(r'^elf.sh_type'),
+ re.compile(r'^elf.p_type'),
+ re.compile(r'^btavrcp.pdu_id'),
+ re.compile(r'^nstrace.trcdbg.val(\d+)'),
+ re.compile(r'^netlogon.dummy_string'),
+ re.compile(r'^opa.reserved'),
+ re.compile(r'^mpls_pm.timestamp\d\..*'),
+ re.compile(r'^wassp.data.mu_mac'),
+ re.compile(r'^thrift.type'),
+ re.compile(r'^quake2.game.client.command.move.angles'),
+ re.compile(r'^ipp.enum_value'),
+ re.compile(r'^idrp.error.subcode'),
+ re.compile(r'^ftdi-ft.lValue'),
+ re.compile(r'^6lowpan.src'),
+ re.compile(r'^couchbase.flex_frame.frame.id'),
+ re.compile(r'^rtps.param.id'),
+ re.compile(r'^rtps.locator.port'),
+ re.compile(r'^sigcomp.udvm.value'),
+ re.compile(r'^opa.mad.attributemodifier.n'),
+ re.compile(r'^smb.cmd'),
+ re.compile(r'^sctp.checksum'),
+ re.compile(r'^dhcp.option.end'),
+ re.compile(r'^nfapi.num.bf.vector.bf.value'),
+ re.compile(r'^dnp3.al.range.abs'),
+ re.compile(r'^dnp3.al.range.quantity'),
+ re.compile(r'^dnp3.al.index'),
+ re.compile(r'^dnp3.al.size'),
+ re.compile(r'^ftdi-ft.hValue'),
+ re.compile(r'^homeplug_av.op_attr_cnf.data.sw_sub'),
+ re.compile(r'^radiotap.he_mu.preamble_puncturing'),
+ re.compile(r'^ndmp.file'),
+ re.compile(r'^ocfs2.dlm.lvb'),
+ re.compile(r'^oran_fh_cus.reserved'),
+ re.compile(r'^qnet6.kif.msgsend.msg.read.xtypes0-7'),
+ re.compile(r'^qnet6.kif.msgsend.msg.write.xtypes0-7'),
+ re.compile(r'^mih.sig_strength'),
+ re.compile(r'^couchbase.flex_frame.frame.len'),
+ re.compile(r'^nvme-rdma.read_to_host_req'),
+ re.compile(r'^rpcap.dummy'),
+ re.compile(r'^sflow.flow_sample.output_interface'),
+ re.compile(r'^socks.results'),
+ re.compile(r'^opa.mad.attributemodifier.p'),
+ re.compile(r'^v5ua.efa'),
+ re.compile(r'^zbncp.data.tx_power'),
+ re.compile(r'^zbncp.data.nwk_addr'),
+ re.compile(r'^zbee_zcl_hvac.pump_config_control.attr.ctrl_mode'),
+ re.compile(r'^nat-pmp.external_port'),
+ re.compile(r'^zbee_zcl.attr.float'),
+ re.compile(r'^wpan-tap.phr.fsk_ms.mode'),
+ re.compile(r'^mysql.exec_flags'),
+ re.compile(r'^pim.metric_pref'),
+ re.compile(r'^modbus.regval_float'),
+ re.compile(r'^alcap.cau.value'),
+ re.compile(r'^bpv7.crc_field'),
+ re.compile(r'^at.chld.mode'),
+ re.compile(r'^btl2cap.psm'),
+ re.compile(r'^srvloc.srvtypereq.nameauthlistlen'),
+ re.compile(r'^a11.ext.code'),
+ re.compile(r'^adwin_config.port'),
+ re.compile(r'^afp.unknown'),
+ re.compile(r'^ansi_a_bsmap.mid.digit_1'),
+ re.compile(r'^ber.unknown.OCTETSTRING'),
+ re.compile(r'^btatt.handle'),
+ re.compile(r'^btl2cap.option_flushto'),
+ re.compile(r'^cip.network_segment.prod_inhibit'),
+ re.compile(r'^cql.result.rows.table_name'),
+ re.compile(r'^dcom.sa.vartype'),
+ re.compile(r'^f5ethtrailer.slot'),
+ re.compile(r'^ipdr.cm_ipv6_addr'),
+ re.compile(r'^mojito.kuid'),
+ re.compile(r'^mtp3.priority'),
+ re.compile(r'^pw.cw.length'),
+ re.compile(r'^rlc.ciphered_data'),
+ re.compile(r'^vp8.pld.pictureid'),
+ re.compile(r'^gryphon.sched.channel'),
+ re.compile(r'^pn_io.ioxs'),
+ re.compile(r'^pn_dcp.block_qualifier_reset'),
+ re.compile(r'^pn_dcp.suboption_device_instance'),
+ re.compile(r'^nfs.attr'),
+ re.compile(r'^nfs.create_session_flags'),
+ re.compile(r'^rmt-lct.toi64'),
+ re.compile(r'^gryphon.data.header_length'),
+ re.compile(r'^quake2.game.client.command.move.movement'),
+ re.compile(r'^isup.parameter_type'),
+ re.compile(r'^cip.port'),
+ re.compile(r'^adwin.fifo_no'),
+ re.compile(r'^bthci_evt.hci_vers_nr'),
+ re.compile(r'^gryphon.usdt.stmin_active'),
+ re.compile(r'^dnp3.al.anaout.int'),
+ re.compile(r'^dnp3.al.ana.int'),
+ re.compile(r'^dnp3.al.cnt'),
+ re.compile(r'^bthfp.chld.mode'),
+ re.compile(r'^nat-pmp.pml'),
+ re.compile(r'^isystemactivator.actproperties.ts.hdr'),
+ re.compile(r'^rtpdump.txt_addr'),
+ re.compile(r'^unistim.vocoder.id'),
+ re.compile(r'^mac.ueid'),
+ re.compile(r'cip.symbol.size'),
+ re.compile(r'dnp3.al.range.start'),
+ re.compile(r'dnp3.al.range.stop'),
+ re.compile(r'gtpv2.mp'),
+ re.compile(r'gvcp.cmd.resend.firstpacketid'),
+ re.compile(r'gvcp.cmd.resend.lastpacketid'),
+ re.compile(r'wlan.bf.reserved'),
+ re.compile(r'opa.sa.reserved'),
+ re.compile(r'rmt-lct.ext_tol_transfer_len'),
+ re.compile(r'pn_io.error_code2'),
+ re.compile(r'gryphon.ldf.schedsize'),
+ re.compile(r'wimaxmacphy.burst_opt_mimo_matrix_indicator'),
+ re.compile(r'alcap.*bwt.*.[b|f]w'),
+ re.compile(r'ccsds.packet_type'),
+ re.compile(r'iso15765.flow_control.stmin'),
+ re.compile(r'msdo.PieceSize'),
+ re.compile(r'opa.clasportinfo.redirect.reserved'),
+ re.compile(r'p_mul.unused'),
+ re.compile(r'btle.control.phys.le_[1|2]m_phy'),
+ re.compile(r'opa.pm.dataportcounters.reserved'),
+ re.compile(r'opa.switchinfo.switchcapabilitymask.reserved'),
+ re.compile(r'nvme-rdma.read_from_host_resp'),
+ re.compile(r'nvme-rdma.write_to_host_req'),
+ re.compile(r'netlink-route.ifla_linkstats.rx_errors.fifo_errs'),
+ re.compile(r'mtp3mg.japan_spare'),
+ re.compile(r'ixveriwave.errors.ip_checksum_error'),
+ re.compile(r'ansi_a_bsmap.cm2.scm.bc_entry.opmode[0|1]')
+ ]
+
+ for patt in ignore_patterns:
+ if patt.match(filter):
+ return True
+ return False
+
+
+class ValueString:
+ def __init__(self, file, name, vals, macros, do_extra_checks=False):
+ self.file = file
+ self.name = name
+ self.raw_vals = vals
+ self.parsed_vals = {}
+ self.seen_labels = set()
+ self.valid = True
+ self.min_value = 99999
+ self.max_value = -99999
+
+ # Now parse out each entry in the value_string
+ matches = re.finditer(r'\{\s*([0-9_A-Za-z]*)\s*,\s*(".*?")\s*}\s*,', self.raw_vals)
+ for m in matches:
+ value,label = m.group(1), m.group(2)
+ if value in macros:
+ value = macros[value]
+ elif any(not c in '0123456789abcdefABCDEFxX' for c in value):
+ self.valid = False
+ return
+
+ try:
+ # Read according to the appropriate base.
+ if value.lower().startswith('0x'):
+ value = int(value, 16)
+ elif value.startswith('0b'):
+ value = int(value[2:], 2)
+ elif value.startswith('0'):
+ value = int(value, 8)
+ else:
+ value = int(value, 10)
+ except:
+ return
+
+ global warnings_found
+
+ # Check for value conflict before inserting
+ if value in self.parsed_vals and label != self.parsed_vals[value]:
+ print('Warning:', self.file, ': value_string', self.name, '- value ', value, 'repeated with different values - was',
+ self.parsed_vals[value], 'now', label)
+ warnings_found += 1
+ else:
+ # Add into table, while checking for repeated label
+ self.parsed_vals[value] = label
+ if do_extra_checks and label in self.seen_labels:
+ # These are commonly repeated..
+ exceptions = [ 'reserved', 'invalid', 'unused', 'not used', 'unknown', 'undefined', 'spare',
+ 'unallocated', 'not assigned', 'implementation specific', 'unspecified',
+ 'other', 'for further study', 'future', 'vendor specific', 'obsolete', 'none',
+ 'shall not be used', 'national use', 'unassigned', 'oem', 'user defined',
+ 'manufacturer specific', 'not specified', 'proprietary', 'operator-defined',
+ 'dynamically allocated', 'user specified', 'xxx', 'default', 'planned', 'not req' ]
+ excepted = False
+ for ex in exceptions:
+ if label.lower().find(ex) != -1:
+ excepted = True
+ break
+
+ if not excepted:
+ print('Warning:', self.file, ': value_string', self.name, '- label ', label, 'repeated')
+ warnings_found += 1
+ else:
+ self.seen_labels.add(label)
+
+ if value > self.max_value:
+ self.max_value = value
+ if value < self.min_value:
+ self.min_value = value
+
+ def extraChecks(self):
+ global warnings_found
+
+ # Look for one value missing in range (quite common...)
+ num_items = len(self.parsed_vals)
+ span = self.max_value - self.min_value + 1
+ if num_items > 4 and span > num_items and (span-num_items <=1):
+ for val in range(self.min_value, self.max_value):
+ if not val in self.parsed_vals:
+ print('Warning:', self.file, ': value_string', self.name, '- value', val, 'missing?', '(', num_items, 'entries)')
+ global warnings_found
+ warnings_found += 1
+
+ # Do most of the labels match the number?
+ matching_label_entries = set()
+ for val in self.parsed_vals:
+ if self.parsed_vals[val].find(str(val)) != -1:
+ # TODO: pick out multiple values rather than concat into wrong number
+ parsed_value = int(''.join(d for d in self.parsed_vals[val] if d.isdecimal()))
+ if val == parsed_value:
+ matching_label_entries.add(val)
+
+ if len(matching_label_entries) >= 4 and len(matching_label_entries) > 0 and len(matching_label_entries) < num_items and len(matching_label_entries) >= num_items-1:
+ # Be forgiving about first or last entry
+ first_val = list(self.parsed_vals)[0]
+ last_val = list(self.parsed_vals)[-1]
+ if not first_val in matching_label_entries or not last_val in matching_label_entries:
+ return
+ print('Warning:', self.file, ': value_string', self.name, 'Labels match value except for 1!', matching_label_entries, num_items, self)
+
+ # Do all labels start with lower-or-upper char?
+ startLower,startUpper = 0,0
+ for val in self.parsed_vals:
+ first_letter = self.parsed_vals[val][1]
+ if first_letter.isalpha():
+ if first_letter.isupper():
+ startUpper += 1
+ else:
+ startLower += 1
+ if startLower > 0 and startUpper > 0:
+ if startLower+startUpper > 10 and (startLower <=3 or startUpper <=3):
+ standouts = []
+ if startLower < startUpper:
+ standouts += [self.parsed_vals[val] for val in self.parsed_vals if self.parsed_vals[val][1].islower()]
+ if startLower > startUpper:
+ standouts += [self.parsed_vals[val] for val in self.parsed_vals if self.parsed_vals[val][1].isupper()]
+
+ print('Note:', self.file, ': value_string', self.name, 'mix of upper', startUpper, 'and lower', startLower, standouts)
+
+
+ def __str__(self):
+ return self.name + '= { ' + self.raw_vals + ' }'
+
+
+class RangeStringEntry:
+ def __init__(self, min, max, label):
+ self.min = min
+ self.max = max
+ self.label = label
+
+ def hides(self, min, max):
+ return min >= self.min and max <= self.max
+
+ def __str__(self):
+ return '(' + str(self.min) + ', ' + str(self.max) + ') -> ' + self.label
+
+
+class RangeString:
+ def __init__(self, file, name, vals, macros, do_extra_checks=False):
+ self.file = file
+ self.name = name
+ self.raw_vals = vals
+ self.parsed_vals = []
+ self.seen_labels = set()
+ self.valid = True
+ self.min_value = 99999
+ self.max_value = -99999
+
+ # Now parse out each entry in the value_string
+ matches = re.finditer(r'\{\s*([0-9_A-Za-z]*)\s*,\s*([0-9_A-Za-z]*)\s*,\s*(".*?")\s*}\s*,', self.raw_vals)
+ for m in matches:
+ min,max,label = m.group(1), m.group(2), m.group(3)
+ if min in macros:
+ min = macros[min]
+ elif any(not c in '0123456789abcdefABCDEFxX' for c in min):
+ self.valid = False
+ return
+ if max in macros:
+ max = macros[max]
+ elif any(not c in '0123456789abcdefABCDEFxX' for c in max):
+ self.valid = False
+ return
+
+
+ try:
+ # Read according to the appropriate base.
+ if min.lower().startswith('0x'):
+ min = int(min, 16)
+ elif min.startswith('0b'):
+ min = int(min[2:], 2)
+ elif min.startswith('0'):
+ min = int(min, 8)
+ else:
+ min = int(min, 10)
+
+ if max.lower().startswith('0x'):
+ max = int(max, 16)
+ elif max.startswith('0b'):
+ max = int(max[2:], 2)
+ elif max.startswith('0'):
+ max = int(max, 8)
+ else:
+ max = int(max, 10)
+ except:
+ return
+
+ # Now check what we've found.
+ global warnings_found
+
+ if min < self.min_value:
+ self.min_value = min
+ # For overall max value, still use min of each entry.
+ # It is common for entries to extend to e.g. 0xff, but at least we can check for items
+ # that can never match if we only chec the min.
+ if min > self.max_value:
+ self.max_value = min
+
+ # This value should not be entirely hidden by earlier entries
+ for prev in self.parsed_vals:
+ if prev.hides(min, max):
+ print('Warning:', self.file, ': range_string label', label, 'hidden by', prev)
+ warnings_found += 1
+
+ # Max should not be > min
+ if min > max:
+ print('Warning:', self.file, ': range_string', self.name, 'entry', label, 'min', min, '>', max)
+ warnings_found += 1
+
+ # Check label.
+ if label[1:-1].startswith(' ') or label[1:-1].endswith(' '):
+ print('Warning:', self.file, ': range_string', self.name, 'entry', label, 'starts or ends with space')
+ warnings_found += 1
+
+ # OK, add this entry
+ self.parsed_vals.append(RangeStringEntry(min, max, label))
+
+ def extraChecks(self):
+ pass
+ # TODO: some checks over all entries. e.g.,
+ # - can multiple values be coalesced into 1?
+ # - if in all cases min==max, suggest value_string instead?
+
+
+
+
+# Look for value_string entries in a dissector file. Return a dict name -> ValueString
+def findValueStrings(filename, macros, do_extra_checks=False):
+ vals_found = {}
+
+ #static const value_string radio_type_vals[] =
+ #{
+ # { 0, "FDD"},
+ # { 1, "TDD"},
+ # { 0, NULL }
+ #};
+
+ with open(filename, 'r', encoding="utf8") as f:
+ contents = f.read()
+
+ # Remove comments so as not to trip up RE.
+ contents = removeComments(contents)
+
+ matches = re.finditer(r'.*const value_string\s*([a-zA-Z0-9_]*)\s*\[\s*\]\s*\=\s*\{([\{\}\d\,a-zA-Z0-9_\-\*\#\.:\/\(\)\'\s\"]*)\};', contents)
+ for m in matches:
+ name = m.group(1)
+ vals = m.group(2)
+ vals_found[name] = ValueString(filename, name, vals, macros, do_extra_checks)
+
+ return vals_found
+
+# Look for value_string entries in a dissector file. Return a dict name -> ValueString
+def findRangeStrings(filename, macros, do_extra_checks=False):
+ vals_found = {}
+
+ #static const range_string symbol_table_shndx_rvals[] = {
+ # { 0x0000, 0x0000, "Undefined" },
+ # { 0x0001, 0xfeff, "Normal Section" },
+ # { 0, 0, NULL }
+ #};
+
+ with open(filename, 'r', encoding="utf8") as f:
+ contents = f.read()
+
+ # Remove comments so as not to trip up RE.
+ contents = removeComments(contents)
+
+ matches = re.finditer(r'.*const range_string\s*([a-zA-Z0-9_]*)\s*\[\s*\]\s*\=\s*\{([\{\}\d\,a-zA-Z0-9_\-\*\#\.:\/\(\)\'\s\"]*)\};', contents)
+ for m in matches:
+ name = m.group(1)
+ vals = m.group(2)
+ vals_found[name] = RangeString(filename, name, vals, macros, do_extra_checks)
+
+ return vals_found
+
+
+
+# The relevant parts of an hf item. Used as value in dict where hf variable name is key.
+class Item:
+
+ # Keep the previous few items
+ previousItems = []
+
+ def __init__(self, filename, hf, filter, label, item_type, display, strings, macros,
+ value_strings, range_strings,
+ mask=None, check_mask=False, mask_exact_width=False, check_label=False,
+ check_consecutive=False, blurb=''):
+ self.filename = filename
+ self.hf = hf
+ self.filter = filter
+ self.label = label
+ self.mask = mask
+ self.strings = strings
+ self.mask_exact_width = mask_exact_width
+
+ global warnings_found
+
+ self.set_mask_value(macros)
+
+ if check_consecutive:
+ for previous_index,previous_item in enumerate(Item.previousItems):
+ if previous_item.filter == filter:
+ if label != previous_item.label:
+ if not is_ignored_consecutive_filter(self.filter):
+ print('Warning:', filename, hf, ': - filter "' + filter +
+ '" appears ' + str(previous_index+1) + ' items before - labels are "' + previous_item.label + '" and "' + label + '"')
+ warnings_found += 1
+
+ # Add this one to front of (short) previous list
+ Item.previousItems = [self] + Item.previousItems
+ if len(Item.previousItems) > 5:
+ # Get rid of oldest one now
+ #Item.previousItems = Item.previousItems[:-1]
+ Item.previousItems.pop()
+
+ self.item_type = item_type
+ self.display = display
+
+ # Optionally check label (short and long).
+ if check_label:
+ self.check_label(label, 'label')
+ #self.check_label(blurb, 'blurb')
+
+ # Optionally check that mask bits are contiguous
+ if check_mask:
+ if self.mask_read and not mask in { 'NULL', '0x0', '0', '0x00' }:
+ self.check_contiguous_bits(mask)
+ self.check_num_digits(self.mask)
+ # N.B., if last entry in set is removed, see around 18,000 warnings
+ self.check_digits_all_zeros(self.mask)
+
+ # N.B. these checks are already done by checkApis.pl
+ if strings.find('RVALS') != -1 and display.find('BASE_RANGE_STRING') == -1:
+ print('Warning: ' + filename, hf, 'filter "' + filter + ' strings has RVALS but display lacks BASE_RANGE_STRING')
+ warnings_found += 1
+
+ # For RVALS, is BASE_RANGE_STRING also set (checked by checkApis.pl)?
+ if strings.find('VALS_EXT_PTR') != -1 and display.find('BASE_EXT_STRING') == -1:
+ print('Warning: ' + filename, hf, 'filter "' + filter + ' strings has VALS_EXT_PTR but display lacks BASE_EXT_STRING')
+ warnings_found += 1
+
+ # For VALS, lookup the corresponding ValueString and try to check range.
+ vs_re = re.compile(r'VALS\(([a-zA-Z0-9_]*)\)')
+ m = vs_re.search(strings)
+ if m:
+ self.vs_name = m.group(1)
+ if self.vs_name in value_strings:
+ vs = value_strings[self.vs_name]
+ self.check_value_string_range(vs.min_value, vs.max_value)
+
+ # For RVALS, lookup the corresponding RangeString and try to check range.
+ rs_re = re.compile(r'RVALS\(([a-zA-Z0-9_]*)\)')
+ m = rs_re.search(strings)
+ if m:
+ self.rs_name = m.group(1)
+ if self.rs_name in range_strings:
+ rs = range_strings[self.rs_name]
+ self.check_range_string_range(rs.min_value, rs.max_value)
+
+
+ def __str__(self):
+ return 'Item ({0} "{1}" {2} type={3}:{4} {5} mask={6})'.format(self.filename, self.label, self.filter, self.item_type, self.display, self.strings, self.mask)
+
+ def check_label(self, label, label_name):
+ global warnings_found
+
+ # TODO: this is masking a bug where the re for the item can't cope with macro for containing ',' for mask arg..
+ if label.count('"') == 1:
+ return
+
+ if label.startswith(' ') or label.endswith(' '):
+ print('Warning: ' + self.filename, self.hf, 'filter "' + self.filter, label_name, '"' + label + '" begins or ends with a space')
+ warnings_found += 1
+
+ if (label.count('(') != label.count(')') or
+ label.count('[') != label.count(']') or
+ label.count('{') != label.count('}')):
+ # Ignore if includes quotes, as may be unbalanced.
+ if label.find("'") == -1:
+ print('Warning: ' + self.filename, self.hf, 'filter "' + self.filter + '"', label_name, '"' + label + '"', 'has unbalanced parens/braces/brackets')
+ warnings_found += 1
+ if self.item_type != 'FT_NONE' and label.endswith(':'):
+ print('Warning: ' + self.filename, self.hf, 'filter "' + self.filter + '"', label_name, '"' + label + '"', 'ends with an unnecessary colon')
+ warnings_found += 1
+
+
+ def set_mask_value(self, macros):
+ try:
+ self.mask_read = True
+
+ # Substitute mask if found as a macro..
+ if self.mask in macros:
+ self.mask = macros[self.mask]
+ elif any(not c in '0123456789abcdefABCDEFxX' for c in self.mask):
+ self.mask_read = False
+ self.mask_value = 0
+ return
+
+
+ # Read according to the appropriate base.
+ if self.mask.startswith('0x'):
+ self.mask_value = int(self.mask, 16)
+ elif self.mask.startswith('0'):
+ self.mask_value = int(self.mask, 8)
+ else:
+ self.mask_value = int(self.mask, 10)
+ except:
+ self.mask_read = False
+ self.mask_value = 0
+
+ def check_value_string_range(self, vs_min, vs_max):
+ item_width = self.get_field_width_in_bits()
+
+ if item_width is None:
+ # Type field defined by macro?
+ return
+
+ if self.mask_value > 0:
+ # Distance between first and last '1'
+ bitBools = bin(self.mask_value)[2:]
+ mask_width = bitBools.rfind('1') - bitBools.find('1') + 1
+ else:
+ # No mask is effectively a full mask..
+ mask_width = item_width
+
+ item_max = (2 ** mask_width)
+ if vs_max > item_max:
+ global warnings_found
+ print('Warning:', self.filename, self.hf, 'filter=', self.filter,
+ self.strings, "has max value", vs_max, '(' + hex(vs_max) + ')', "which doesn't fit into", mask_width, 'bits',
+ '( mask is', hex(self.mask_value), ')')
+ warnings_found += 1
+
+ def check_range_string_range(self, rs_min, rs_max):
+ item_width = self.get_field_width_in_bits()
+
+ if item_width is None:
+ # Type field defined by macro?
+ return
+
+ if self.mask_value > 0:
+ # Distance between first and last '1'
+ bitBools = bin(self.mask_value)[2:]
+ mask_width = bitBools.rfind('1') - bitBools.find('1') + 1
+ else:
+ # No mask is effectively a full mask..
+ mask_width = item_width
+
+ item_max = (2 ** mask_width)
+ if rs_max > item_max:
+ global warnings_found
+ print('Warning:', self.filename, self.hf, 'filter=', self.filter,
+ self.strings, "has values", rs_min, rs_max, '(' + hex(rs_max) + ')', "which doesn't fit into", mask_width, 'bits',
+ '( mask is', hex(self.mask_value), ')')
+ warnings_found += 1
+
+
+
+
+ # Return true if bit position n is set in value.
+ def check_bit(self, value, n):
+ return (value & (0x1 << n)) != 0
+
+ # Output a warning if non-contigous bits are found in the mask (guint64).
+ # Note that this legimately happens in several dissectors where multiple reserved/unassigned
+ # bits are conflated into one field.
+ # - there is probably a cool/efficient way to check this (+1 => 1-bit set?)
+ def check_contiguous_bits(self, mask):
+ if not self.mask_value:
+ return
+
+ # Do see legitimate non-contiguous bits often for these..
+ if name_has_one_of(self.hf, ['reserved', 'unknown', 'unused', 'spare']):
+ return
+ if name_has_one_of(self.label, ['reserved', 'unknown', 'unused', 'spare']):
+ return
+
+
+ # Walk past any l.s. 0 bits
+ n = 0
+ while not self.check_bit(self.mask_value, n) and n <= 63:
+ n += 1
+ if n==63:
+ return
+
+ mask_start = n
+ # Walk through any bits that are set
+ while self.check_bit(self.mask_value, n) and n <= 63:
+ n += 1
+ n += 1
+
+ if n >= 63:
+ return
+
+ # Look up the field width
+ field_width = 0
+ if not self.item_type in field_widths:
+ print('unexpected item_type is ', self.item_type)
+ field_width = 64
+ else:
+ field_width = self.get_field_width_in_bits()
+
+
+ # Its a problem is the mask_width is > field_width - some of the bits won't get looked at!?
+ mask_width = n-1-mask_start
+ if field_width is not None and (mask_width > field_width):
+ # N.B. No call, so no line number.
+ print(self.filename + ':', self.hf, 'filter=', self.filter, self.item_type, 'so field_width=', field_width,
+ 'but mask is', mask, 'which is', mask_width, 'bits wide!')
+ global warnings_found
+ warnings_found += 1
+ # Now, any more zero set bits are an error!
+ if self.filter in known_non_contiguous_fields or self.filter.startswith('rtpmidi'):
+ # Don't report if we know this one is Ok.
+ # TODO: also exclude items that are used as root in add_bitmask() calls?
+ return
+ while n <= 63:
+ if self.check_bit(self.mask_value, n):
+ print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - mask with non-contiguous bits',
+ mask, '(', hex(self.mask_value), ')')
+ warnings_found += 1
+ return
+ n += 1
+
+ def get_field_width_in_bits(self):
+ if self.item_type == 'FT_BOOLEAN':
+ if self.display == 'NULL':
+ return 8 # i.e. 1 byte
+ elif self.display == 'BASE_NONE':
+ return 8
+ elif self.display == 'SEP_DOT': # from proto.h, only meant for FT_BYTES
+ return 64
+ else:
+ try:
+ # For FT_BOOLEAN, modifier is just numerical number of bits. Round up to next nibble.
+ return int((int(self.display) + 3)/4)*4
+ except:
+ return None
+ else:
+ if self.item_type in field_widths:
+ # Lookup fixed width for this type
+ return field_widths[self.item_type]
+ else:
+ return None
+
+ def check_num_digits(self, mask):
+ if mask.startswith('0x') and len(mask) > 3:
+ global warnings_found
+ global errors_found
+
+ width_in_bits = self.get_field_width_in_bits()
+ # Warn if odd number of digits. TODO: only if >= 5?
+ if len(mask) % 2 and self.item_type != 'FT_BOOLEAN':
+ print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - mask has odd number of digits', mask,
+ 'expected max for', self.item_type, 'is', int(width_in_bits/4))
+ warnings_found += 1
+
+ if self.item_type in field_widths:
+ # Longer than it should be?
+ if width_in_bits is None:
+ return
+ if len(mask)-2 > width_in_bits/4:
+ extra_digits = mask[2:2+(len(mask)-2 - int(width_in_bits/4))]
+ # Its definitely an error if any of these are non-zero, as they won't have any effect!
+ if extra_digits != '0'*len(extra_digits):
+ print('Error:', self.filename, self.hf, 'filter=', self.filter, 'mask', self.mask, "with len is", len(mask)-2,
+ "but type", self.item_type, " indicates max of", int(width_in_bits/4),
+ "and extra digits are non-zero (" + extra_digits + ")")
+ errors_found += 1
+ else:
+ # Has extra leading zeros, still confusing, so warn.
+ print('Warning:', self.filename, self.hf, 'filter=', self.filter, 'mask', self.mask, "with len", len(mask)-2,
+ "but type", self.item_type, " indicates max of", int(width_in_bits/4))
+ warnings_found += 1
+
+ # Strict/fussy check - expecting mask length to match field width exactly!
+ # Currently only doing for FT_BOOLEAN, and don't expect to be in full for 64-bit fields!
+ if self.mask_exact_width:
+ ideal_mask_width = int(width_in_bits/4)
+ if self.item_type == 'FT_BOOLEAN' and ideal_mask_width < 16 and len(mask)-2 != ideal_mask_width:
+ print('Warning:', self.filename, self.hf, 'filter=', self.filter, 'mask', self.mask, "with len", len(mask)-2,
+ "but type", self.item_type, "|", self.display, " indicates should be", int(width_in_bits/4))
+ warnings_found += 1
+
+ else:
+ # This type shouldn't have a mask set at all.
+ print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - item has type', self.item_type, 'but mask set:', mask)
+ warnings_found += 1
+
+ def check_digits_all_zeros(self, mask):
+ if mask.startswith('0x') and len(mask) > 3:
+ if mask[2:] == '0'*(len(mask)-2):
+ print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - item mask has all zeros - this is confusing! :', '"' + mask + '"')
+ global warnings_found
+ warnings_found += 1
+
+ # A mask where all bits are set should instead be 0.
+ # Exceptions might be where:
+ # - in add_bitmask()
+ # - represents flags, but dissector is not yet decoding them
+ def check_full_mask(self, mask, field_arrays):
+ if self.item_type == "FT_BOOLEAN":
+ return
+ if self.label.lower().find('mask') != -1 or self.label.lower().find('flag') != -1 or self.label.lower().find('bitmap') != -1:
+ return
+ if mask.startswith('0x') and len(mask) > 3:
+ width_in_bits = self.get_field_width_in_bits()
+ if not width_in_bits:
+ return
+ num_digits = int(width_in_bits / 4)
+ if num_digits is None:
+ return
+ if mask[2:] == 'f'*num_digits or mask[2:] == 'F'*num_digits:
+ # Don't report if appears in a 'fields' array
+ for arr in field_arrays:
+ list = field_arrays[arr][0]
+ if self.hf in list:
+ # These need to have a mask - don't judge for being 0
+ return
+
+ print('Note:', self.filename, self.hf, 'filter=', self.filter, " - mask is all set - if only want value (rather than bits), set 0 instead? :", '"' + mask + '"')
+
+ # An item that appears in a bitmask set, needs to have a non-zero mask.
+ def check_mask_if_in_field_array(self, mask, field_arrays):
+ # Work out if this item appears in a field array
+ found = False
+ array_name = None
+ for arr in field_arrays:
+ list = field_arrays[arr][0]
+ if self.hf in list:
+ # These need to have a mask - don't judge for being 0
+ found = True
+ array_name = arr
+ break
+
+ if found:
+ # It needs to have a non-zero mask.
+ if self.mask_read and self.mask_value == 0:
+ print('Error:', self.filename, self.hf, 'is in fields array', arr, 'but has a zero mask - this is not allowed')
+ global errors_found
+ errors_found += 1
+
+
+
+ # Return True if appears to be a match
+ def check_label_vs_filter(self, reportError=True, reportNumericalMismatch=True):
+ global warnings_found
+
+ last_filter = self.filter.split('.')[-1]
+ last_filter_orig = last_filter
+ last_filter = last_filter.replace('-', '')
+ last_filter = last_filter.replace('_', '')
+ last_filter = last_filter.replace(' ', '')
+ label = self.label
+ label_orig = label
+ label = label.replace(' ', '')
+ label = label.replace('-', '')
+ label = label.replace('_', '')
+ label = label.replace('(', '')
+ label = label.replace(')', '')
+ label = label.replace('/', '')
+ label = label.replace("'", '')
+
+
+ # OK if filter is abbrev of label.
+ label_words = self.label.split(' ')
+ label_words = [w for w in label_words if len(w)]
+ if len(label_words) == len(last_filter):
+ #print(label_words)
+ abbrev_letters = [w[0] for w in label_words]
+ abbrev = ''.join(abbrev_letters)
+ if abbrev.lower() == last_filter.lower():
+ return True
+
+ # If both have numbers, they should probably match!
+ label_numbers = [int(n) for n in re.findall(r'\d+', label_orig)]
+ filter_numbers = [int(n) for n in re.findall(r'\d+', last_filter_orig)]
+ if len(label_numbers) == len(filter_numbers) and label_numbers != filter_numbers:
+ if reportNumericalMismatch:
+ print('Note:', self.filename, self.hf, 'label="' + self.label + '" has different **numbers** from filter="' + self.filter + '"')
+ print(label_numbers, filter_numbers)
+ return False
+
+ # If they match after trimming number from filter, they should match.
+ if label.lower() == last_filter.lower().rstrip("0123456789"):
+ return True
+
+ # Are they just different?
+ if label.lower().find(last_filter.lower()) == -1:
+ if reportError:
+ print('Warning:', self.filename, self.hf, 'label="' + self.label + '" does not seem to match filter="' + self.filter + '"')
+ warnings_found += 1
+ return False
+
+ return True
+
+
+class CombinedCallsCheck:
+ def __init__(self, file, apiChecks):
+ self.file = file
+ self.apiChecks = apiChecks
+ self.get_all_calls()
+
+ def get_all_calls(self):
+ self.all_calls = []
+ # Combine calls into one list.
+ for check in self.apiChecks:
+ self.all_calls += check.calls
+
+ # Sort by line number.
+ self.all_calls.sort(key=lambda x:x.line_number)
+
+ def check_consecutive_item_calls(self):
+ lines = open(self.file, 'r', encoding="utf8").read().splitlines()
+
+ prev = None
+ for call in self.all_calls:
+
+ # These names commonly do appear together..
+ if name_has_one_of(call.hf_name, [ 'unused', 'unknown', 'spare', 'reserved', 'default']):
+ return
+
+ if prev and call.hf_name == prev.hf_name:
+ # More compelling if close together..
+ if call.line_number>prev.line_number and call.line_number-prev.line_number <= 4:
+ scope_different = False
+ for l in range(prev.line_number, call.line_number-1):
+ if lines[l].find('{') != -1 or lines[l].find('}') != -1 or lines[l].find('else') != -1 or lines[l].find('break;') != -1 or lines[l].find('if ') != -1:
+ scope_different = True
+ break
+ # Also more compelling if check for and scope changes { } in lines in-between?
+ if not scope_different:
+ print('Warning:', f + ':' + str(call.line_number),
+ call.hf_name + ' called consecutively at line', call.line_number, '- previous at', prev.line_number)
+ global warnings_found
+ warnings_found += 1
+ prev = call
+
+
+
+
+# These are APIs in proto.c that check a set of types at runtime and can print '.. is not of type ..' to the console
+# if the type is not suitable.
+apiChecks = []
+apiChecks.append(APICheck('proto_tree_add_item_ret_uint', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32'}, positive_length=True))
+apiChecks.append(APICheck('proto_tree_add_item_ret_int', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'}))
+apiChecks.append(APICheck('ptvcursor_add_ret_uint', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32'}, positive_length=True))
+apiChecks.append(APICheck('ptvcursor_add_ret_int', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'}, positive_length=True))
+apiChecks.append(APICheck('ptvcursor_add_ret_string', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'}))
+apiChecks.append(APICheck('ptvcursor_add_ret_boolean', { 'FT_BOOLEAN'}, positive_length=True))
+apiChecks.append(APICheck('proto_tree_add_item_ret_uint64', { 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64'}, positive_length=True))
+apiChecks.append(APICheck('proto_tree_add_item_ret_int64', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'}, positive_length=True))
+apiChecks.append(APICheck('proto_tree_add_item_ret_boolean', { 'FT_BOOLEAN'}, positive_length=True))
+apiChecks.append(APICheck('proto_tree_add_item_ret_string_and_length', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'}))
+apiChecks.append(APICheck('proto_tree_add_item_ret_display_string_and_length', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING',
+ 'FT_STRINGZPAD', 'FT_STRINGZTRUNC', 'FT_BYTES', 'FT_UINT_BYTES'}))
+apiChecks.append(APICheck('proto_tree_add_item_ret_time_string', { 'FT_ABSOLUTE_TIME', 'FT_RELATIVE_TIME'}))
+apiChecks.append(APICheck('proto_tree_add_uint', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_FRAMENUM'}))
+apiChecks.append(APICheck('proto_tree_add_uint_format_value', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_FRAMENUM'}))
+apiChecks.append(APICheck('proto_tree_add_uint_format', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_FRAMENUM'}))
+apiChecks.append(APICheck('proto_tree_add_uint64', { 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64', 'FT_FRAMENUM'}))
+apiChecks.append(APICheck('proto_tree_add_int64', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'}))
+apiChecks.append(APICheck('proto_tree_add_int64_format_value', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'}))
+apiChecks.append(APICheck('proto_tree_add_int64_format', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'}))
+apiChecks.append(APICheck('proto_tree_add_int', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'}))
+apiChecks.append(APICheck('proto_tree_add_int_format_value', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'}))
+apiChecks.append(APICheck('proto_tree_add_int_format', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'}))
+apiChecks.append(APICheck('proto_tree_add_boolean', { 'FT_BOOLEAN'}))
+apiChecks.append(APICheck('proto_tree_add_boolean64', { 'FT_BOOLEAN'}))
+apiChecks.append(APICheck('proto_tree_add_float', { 'FT_FLOAT'}))
+apiChecks.append(APICheck('proto_tree_add_float_format', { 'FT_FLOAT'}))
+apiChecks.append(APICheck('proto_tree_add_float_format_value', { 'FT_FLOAT'}))
+apiChecks.append(APICheck('proto_tree_add_double', { 'FT_DOUBLE'}))
+apiChecks.append(APICheck('proto_tree_add_double_format', { 'FT_DOUBLE'}))
+apiChecks.append(APICheck('proto_tree_add_double_format_value', { 'FT_DOUBLE'}))
+apiChecks.append(APICheck('proto_tree_add_string', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'}))
+apiChecks.append(APICheck('proto_tree_add_string_format', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'}))
+apiChecks.append(APICheck('proto_tree_add_string_format_value', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'}))
+apiChecks.append(APICheck('proto_tree_add_guid', { 'FT_GUID'}))
+apiChecks.append(APICheck('proto_tree_add_oid', { 'FT_OID'}))
+apiChecks.append(APICheck('proto_tree_add_none_format', { 'FT_NONE'}))
+apiChecks.append(APICheck('proto_tree_add_item_ret_varint', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32', 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64',
+ 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_FRAMENUM',
+ 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64',}))
+apiChecks.append(APICheck('proto_tree_add_boolean_bits_format_value', { 'FT_BOOLEAN'}))
+apiChecks.append(APICheck('proto_tree_add_boolean_bits_format_value64', { 'FT_BOOLEAN'}))
+apiChecks.append(APICheck('proto_tree_add_ascii_7bits_item', { 'FT_STRING'}))
+# TODO: positions are different, and takes 2 hf_fields..
+#apiChecks.append(APICheck('proto_tree_add_checksum', { 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32'}))
+apiChecks.append(APICheck('proto_tree_add_int64_bits_format_value', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'}))
+
+# TODO: add proto_tree_add_bytes_item, proto_tree_add_time_item ?
+
+bitmask_types = { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32',
+ 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32',
+ 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64',
+ 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64',
+ 'FT_BOOLEAN'}
+apiChecks.append(APICheck('proto_tree_add_bitmask', bitmask_types))
+apiChecks.append(APICheck('proto_tree_add_bitmask_tree', bitmask_types))
+apiChecks.append(APICheck('proto_tree_add_bitmask_ret_uint64', bitmask_types))
+apiChecks.append(APICheck('proto_tree_add_bitmask_with_flags', bitmask_types))
+apiChecks.append(APICheck('proto_tree_add_bitmask_with_flags_ret_uint64', bitmask_types))
+apiChecks.append(APICheck('proto_tree_add_bitmask_value', bitmask_types))
+apiChecks.append(APICheck('proto_tree_add_bitmask_value_with_flags', bitmask_types))
+apiChecks.append(APICheck('proto_tree_add_bitmask_len', bitmask_types))
+# N.B., proto_tree_add_bitmask_list does not have a root item, just a subtree...
+
+add_bits_types = { 'FT_CHAR', 'FT_BOOLEAN',
+ 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64',
+ 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32', 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64',
+ 'FT_BYTES'}
+apiChecks.append(APICheck('proto_tree_add_bits_item', add_bits_types))
+apiChecks.append(APICheck('proto_tree_add_bits_ret_val', add_bits_types))
+
+# TODO: doesn't even have an hf_item !
+#apiChecks.append(APICheck('proto_tree_add_bitmask_text', bitmask_types))
+
+# Check some ptvcuror calls too.
+apiChecks.append(APICheck('ptvcursor_add_ret_uint', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32'}))
+apiChecks.append(APICheck('ptvcursor_add_ret_int', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'}))
+apiChecks.append(APICheck('ptvcursor_add_ret_boolean', { 'FT_BOOLEAN'}))
+
+
+# Also try to check proto_tree_add_item() calls (for length)
+apiChecks.append(ProtoTreeAddItemCheck())
+apiChecks.append(ProtoTreeAddItemCheck(True)) # for ptvcursor_add()
+
+
+
+def removeComments(code_string):
+ code_string = re.sub(re.compile(r"/\*.*?\*/",re.DOTALL ) ,"" , code_string) # C-style comment
+ code_string = re.sub(re.compile(r"//.*?\n" ) ,"" , code_string) # C++-style comment
+ code_string = re.sub(re.compile(r"#if 0.*?#endif",re.DOTALL ) ,"" , code_string) # Ignored region
+
+ return code_string
+
+# Test for whether the given file was automatically generated.
+def isGeneratedFile(filename):
+ # Check file exists - e.g. may have been deleted in a recent commit.
+ if not os.path.exists(filename):
+ return False
+
+ # Open file
+ f_read = open(os.path.join(filename), 'r', encoding="utf8")
+ lines_tested = 0
+ for line in f_read:
+ # The comment to say that its generated is near the top, so give up once
+ # get a few lines down.
+ if lines_tested > 10:
+ f_read.close()
+ return False
+ if (line.find('Generated automatically') != -1 or
+ line.find('Generated Automatically') != -1 or
+ line.find('Autogenerated from') != -1 or
+ line.find('is autogenerated') != -1 or
+ line.find('automatically generated by Pidl') != -1 or
+ line.find('Created by: The Qt Meta Object Compiler') != -1 or
+ line.find('This file was generated') != -1 or
+ line.find('This filter was automatically generated') != -1 or
+ line.find('This file is auto generated, do not edit!') != -1):
+
+ f_read.close()
+ return True
+ lines_tested = lines_tested + 1
+
+ # OK, looks like a hand-written file!
+ f_read.close()
+ return False
+
+
+def find_macros(filename):
+ macros = {}
+ with open(filename, 'r', encoding="utf8") as f:
+ contents = f.read()
+ # Remove comments so as not to trip up RE.
+ contents = removeComments(contents)
+
+ matches = re.finditer( r'#define\s*([A-Z0-9_]*)\s*([0-9xa-fA-F]*)\n', contents)
+ for m in matches:
+ # Store this mapping.
+ macros[m.group(1)] = m.group(2)
+ return macros
+
+
+# Look for hf items (i.e. full item to be registered) in a dissector file.
+def find_items(filename, macros, value_strings, range_strings,
+ check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False):
+ is_generated = isGeneratedFile(filename)
+ items = {}
+ with open(filename, 'r', encoding="utf8") as f:
+ contents = f.read()
+ # Remove comments so as not to trip up RE.
+ contents = removeComments(contents)
+
+ # N.B. re extends all the way to HFILL to avoid greedy matching
+ # TODO: fix a problem where re can't cope with mask that involve a macro with commas in it...
+ matches = re.finditer( r'.*\{\s*\&(hf_[a-z_A-Z0-9]*)\s*,\s*{\s*\"(.*?)\"\s*,\s*\"(.*?)\"\s*,\s*(.*?)\s*,\s*([0-9A-Z_\|\s]*?)\s*,\s*(.*?)\s*,\s*(.*?)\s*,\s*([a-zA-Z0-9\W\s_\u00f6\u00e4]*?)\s*,\s*HFILL', contents)
+ for m in matches:
+ # Store this item.
+ hf = m.group(1)
+
+ blurb = m.group(8)
+ if blurb.startswith('"'):
+ blurb = blurb[1:-1]
+
+ items[hf] = Item(filename, hf, filter=m.group(3), label=m.group(2), item_type=m.group(4),
+ display=m.group(5),
+ strings=m.group(6),
+ macros=macros,
+ value_strings=value_strings,
+ range_strings=range_strings,
+ mask=m.group(7),
+ blurb=blurb,
+ check_mask=check_mask,
+ mask_exact_width=mask_exact_width,
+ check_label=check_label,
+ check_consecutive=(not is_generated and check_consecutive))
+ return items
+
+
+# Looking for args to ..add_bitmask_..() calls that are not NULL-terminated or have repeated items.
+# TODO: some dissectors have similar-looking hf arrays for other reasons, so need to cross-reference with
+# the 6th arg of ..add_bitmask_..() calls...
+# TODO: return items (rather than local checks) from here so can be checked against list of calls for given filename
+def find_field_arrays(filename, all_fields, all_hf):
+ field_entries = {}
+ global warnings_found
+ with open(filename, 'r', encoding="utf8") as f:
+ contents = f.read()
+ # Remove comments so as not to trip up RE.
+ contents = removeComments(contents)
+
+ # Find definition of hf array
+ matches = re.finditer(r'static\s*g?int\s*\*\s*const\s+([a-zA-Z0-9_]*)\s*\[\]\s*\=\s*\{([a-zA-Z0-9,_\&\s]*)\}', contents)
+ for m in matches:
+ name = m.group(1)
+ # Ignore if not used in a call to an _add_bitmask_ API
+ if not name in all_fields:
+ continue
+
+ fields_text = m.group(2)
+ fields_text = fields_text.replace('&', '')
+ fields_text = fields_text.replace(',', '')
+
+ # Get list of each hf field in the array
+ fields = fields_text.split()
+
+ if fields[0].startswith('ett_'):
+ continue
+ if fields[-1].find('NULL') == -1 and fields[-1] != '0':
+ print('Warning:', filename, name, 'is not NULL-terminated - {', ', '.join(fields), '}')
+ warnings_found += 1
+ continue
+
+ # Do any hf items reappear?
+ seen_fields = set()
+ for f in fields:
+ if f in seen_fields:
+ print(filename, name, f, 'already added!')
+ warnings_found += 1
+ seen_fields.add(f)
+
+ # Check for duplicated flags among entries..
+ combined_mask = 0x0
+ for f in fields[0:-1]:
+ if f in all_hf:
+ new_mask = all_hf[f].mask_value
+ if new_mask & combined_mask:
+ print('Warning:', filename, name, 'has overlapping mask - {', ', '.join(fields), '} combined currently', hex(combined_mask), f, 'adds', hex(new_mask))
+ warnings_found += 1
+ combined_mask |= new_mask
+
+ # Make sure all entries have the same width
+ set_field_width = None
+ for f in fields[0:-1]:
+ if f in all_hf:
+ new_field_width = all_hf[f].get_field_width_in_bits()
+ if set_field_width is not None and new_field_width != set_field_width:
+ # Its not uncommon for fields to be used in multiple sets, some of which can be different widths..
+ print('Note:', filename, name, 'set items not all same width - {', ', '.join(fields), '} seen', set_field_width, 'now', new_field_width)
+ set_field_width = new_field_width
+
+ # Add entry to table
+ field_entries[name] = (fields[0:-1], combined_mask)
+
+ return field_entries
+
+def find_item_declarations(filename):
+ items = set()
+
+ with open(filename, 'r', encoding="utf8") as f:
+ lines = f.read().splitlines()
+ p = re.compile(r'^static int (hf_[a-zA-Z0-9_]*)\s*\=\s*-1;')
+ for line in lines:
+ m = p.search(line)
+ if m:
+ items.add(m.group(1))
+ return items
+
+def find_item_extern_declarations(filename):
+ items = set()
+ with open(filename, 'r', encoding="utf8") as f:
+ lines = f.read().splitlines()
+ p = re.compile(r'^\s*(hf_[a-zA-Z0-9_]*)\s*\=\s*proto_registrar_get_id_byname\s*\(')
+ for line in lines:
+ m = p.search(line)
+ if m:
+ items.add(m.group(1))
+ return items
+
+
+def is_dissector_file(filename):
+ p = re.compile(r'.*(packet|file)-.*\.c$')
+ return p.match(filename)
+
+
+def findDissectorFilesInFolder(folder, recursive=False):
+ dissector_files = []
+
+ if recursive:
+ for root, subfolders, files in os.walk(folder):
+ for f in files:
+ if should_exit:
+ return
+ f = os.path.join(root, f)
+ dissector_files.append(f)
+ else:
+ for f in sorted(os.listdir(folder)):
+ if should_exit:
+ return
+ filename = os.path.join(folder, f)
+ dissector_files.append(filename)
+
+ return [x for x in filter(is_dissector_file, dissector_files)]
+
+
+
+# Run checks on the given dissector file.
+def checkFile(filename, check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False,
+ check_missing_items=False, check_bitmask_fields=False, label_vs_filter=False, extra_value_string_checks=False):
+ # Check file exists - e.g. may have been deleted in a recent commit.
+ if not os.path.exists(filename):
+ print(filename, 'does not exist!')
+ return
+
+ # Find simple macros so can substitute into items and calls.
+ macros = find_macros(filename)
+
+ # Find (and sanity-check) value_strings
+ value_strings = findValueStrings(filename, macros, do_extra_checks=extra_value_string_checks)
+ if extra_value_string_checks:
+ for name in value_strings:
+ value_strings[name].extraChecks()
+
+ # Find (and sanity-check) range_strings
+ range_strings = findRangeStrings(filename, macros, do_extra_checks=extra_value_string_checks)
+ if extra_value_string_checks:
+ for name in range_strings:
+ range_strings[name].extraChecks()
+
+
+
+ # Find important parts of items.
+ items_defined = find_items(filename, macros, value_strings, range_strings,
+ check_mask, mask_exact_width, check_label, check_consecutive)
+ items_extern_declared = {}
+
+ items_declared = {}
+ if check_missing_items:
+ items_declared = find_item_declarations(filename)
+ items_extern_declared = find_item_extern_declarations(filename)
+
+ fields = set()
+
+ # Get 'fields' out of calls
+ for c in apiChecks:
+ c.find_calls(filename, macros)
+ for call in c.calls:
+ # From _add_bitmask() calls
+ if call.fields:
+ fields.add(call.fields)
+
+ # Checking for lists of fields for add_bitmask calls
+ field_arrays = {}
+ if check_bitmask_fields:
+ field_arrays = find_field_arrays(filename, fields, items_defined)
+
+ if check_mask and check_bitmask_fields:
+ for i in items_defined:
+ item = items_defined[i]
+ item.check_full_mask(item.mask, field_arrays)
+ item.check_mask_if_in_field_array(item.mask, field_arrays)
+
+ # Now actually check the calls
+ for c in apiChecks:
+ c.check_against_items(items_defined, items_declared, items_extern_declared, check_missing_items, field_arrays)
+
+
+ if label_vs_filter:
+ matches = 0
+ for hf in items_defined:
+ if items_defined[hf].check_label_vs_filter(reportError=False, reportNumericalMismatch=True):
+ matches += 1
+
+ # Only checking if almost every field does match.
+ checking = len(items_defined) and matches<len(items_defined) and ((matches / len(items_defined)) > 0.93)
+ if checking:
+ print(filename, ':', matches, 'label-vs-filter matches of out of', len(items_defined), 'so reporting mismatches')
+ for hf in items_defined:
+ items_defined[hf].check_label_vs_filter(reportError=True, reportNumericalMismatch=False)
+
+
+
+#################################################################
+# Main logic.
+
+# command-line args. Controls which dissector files should be checked.
+# If no args given, will just scan epan/dissectors folder.
+parser = argparse.ArgumentParser(description='Check calls in dissectors')
+parser.add_argument('--file', action='append',
+ help='specify individual dissector file to test')
+parser.add_argument('--folder', action='store', default='',
+ help='specify folder to test')
+parser.add_argument('--commits', action='store',
+ help='last N commits to check')
+parser.add_argument('--open', action='store_true',
+ help='check open files')
+parser.add_argument('--mask', action='store_true',
+ help='when set, check mask field too')
+parser.add_argument('--mask-exact-width', action='store_true',
+ help='when set, check width of mask against field width')
+parser.add_argument('--label', action='store_true',
+ help='when set, check label field too')
+parser.add_argument('--consecutive', action='store_true',
+ help='when set, copy copy/paste errors between consecutive items')
+parser.add_argument('--missing-items', action='store_true',
+ help='when set, look for used items that were never registered')
+parser.add_argument('--check-bitmask-fields', action='store_true',
+ help='when set, attempt to check arrays of hf items passed to add_bitmask() calls')
+parser.add_argument('--label-vs-filter', action='store_true',
+ help='when set, check whether label matches last part of filter')
+parser.add_argument('--extra-value-string-checks', action='store_true',
+ help='when set, do extra checks on parsed value_strings')
+parser.add_argument('--all-checks', action='store_true',
+ help='when set, apply all checks to selected files')
+
+
+args = parser.parse_args()
+
+# Turn all checks on.
+if args.all_checks:
+ args.mask = True
+ args.mask_exact_width = True
+ args.consecutive = True
+ args.check_bitmask_fields = True
+ #args.label = True
+ args.label_vs_filter = True
+ args.extra_value_string_checks
+
+if args.check_bitmask_fields:
+ args.mask = True
+
+
+# Get files from wherever command-line args indicate.
+files = []
+if args.file:
+ # Add specified file(s)
+ for f in args.file:
+ if not os.path.isfile(f):
+ print('Chosen file', f, 'does not exist.')
+ exit(1)
+ else:
+ files.append(f)
+elif args.folder:
+ # Add all files from a given folder.
+ folder = args.folder
+ if not os.path.isdir(folder):
+ print('Folder', folder, 'not found!')
+ exit(1)
+ # Find files from folder.
+ print('Looking for files in', folder)
+ files = findDissectorFilesInFolder(folder, recursive=True)
+elif args.commits:
+ # Get files affected by specified number of commits.
+ command = ['git', 'diff', '--name-only', '--diff-filter=d', 'HEAD~' + args.commits]
+ files = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Will examine dissector files only
+ files = list(filter(lambda f : is_dissector_file(f), files))
+elif args.open:
+ # Unstaged changes.
+ command = ['git', 'diff', '--name-only', '--diff-filter=d']
+ files = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Only interested in dissector files.
+ files = list(filter(lambda f : is_dissector_file(f), files))
+ # Staged changes.
+ command = ['git', 'diff', '--staged', '--name-only', '--diff-filter=d']
+ files_staged = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Only interested in dissector files.
+ files_staged = list(filter(lambda f : is_dissector_file(f), files_staged))
+ for f in files_staged:
+ if not f in files:
+ files.append(f)
+else:
+ # Find all dissector files.
+ files = findDissectorFilesInFolder(os.path.join('epan', 'dissectors'))
+ files += findDissectorFilesInFolder(os.path.join('plugins', 'epan'), recursive=True)
+
+
+# If scanning a subset of files, list them here.
+print('Examining:')
+if args.file or args.commits or args.open:
+ if files:
+ print(' '.join(files), '\n')
+ else:
+ print('No files to check.\n')
+else:
+ print('All dissector modules\n')
+
+
+# Now check the files.
+for f in files:
+ if should_exit:
+ exit(1)
+ checkFile(f, check_mask=args.mask, mask_exact_width=args.mask_exact_width, check_label=args.label,
+ check_consecutive=args.consecutive, check_missing_items=args.missing_items,
+ check_bitmask_fields=args.check_bitmask_fields, label_vs_filter=args.label_vs_filter,
+ extra_value_string_checks=args.extra_value_string_checks)
+
+ # Do checks against all calls.
+ if args.consecutive:
+ combined_calls = CombinedCallsCheck(f, apiChecks)
+ # This hasn't really found any issues, but shows lots of false positives (and are difficult to investigate)
+ #combined_calls.check_consecutive_item_calls()
+
+
+# Show summary.
+print(warnings_found, 'warnings')
+if errors_found:
+ print(errors_found, 'errors')
+ exit(1)
diff --git a/tools/check_val_to_str.py b/tools/check_val_to_str.py
new file mode 100755
index 0000000..417655c
--- /dev/null
+++ b/tools/check_val_to_str.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python3
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# Scan dissectors for calls to val_to_str() and friends,
+# checking for appropriate format specifier strings in
+# 'unknown' arg.
+# TODO:
+# - more detailed format specifier checking (check letter, that there is only 1)
+# - scan conformance (.cnf) files for ASN1 dissectors?
+
+import os
+import re
+import subprocess
+import argparse
+import signal
+
+
+# Try to exit soon after Ctrl-C is pressed.
+should_exit = False
+
+def signal_handler(sig, frame):
+ global should_exit
+ should_exit = True
+ print('You pressed Ctrl+C - exiting')
+
+signal.signal(signal.SIGINT, signal_handler)
+
+
+# Test for whether the given file was automatically generated.
+def isGeneratedFile(filename):
+ # Check file exists - e.g. may have been deleted in a recent commit.
+ if not os.path.exists(filename):
+ return False
+
+ # Open file
+ f_read = open(os.path.join(filename), 'r', encoding="utf8")
+ lines_tested = 0
+ for line in f_read:
+ # The comment to say that its generated is near the top, so give up once
+ # get a few lines down.
+ if lines_tested > 10:
+ f_read.close()
+ return False
+ if (line.find('Generated automatically') != -1 or
+ line.find('Generated Automatically') != -1 or
+ line.find('Autogenerated from') != -1 or
+ line.find('is autogenerated') != -1 or
+ line.find('automatically generated by Pidl') != -1 or
+ line.find('Created by: The Qt Meta Object Compiler') != -1 or
+ line.find('This file was generated') != -1 or
+ line.find('This filter was automatically generated') != -1 or
+ line.find('This file is auto generated, do not edit!') != -1 or
+ line.find('This file is auto generated') != -1):
+
+ f_read.close()
+ return True
+ lines_tested = lines_tested + 1
+
+ # OK, looks like a hand-written file!
+ f_read.close()
+ return False
+
+
+
+def removeComments(code_string):
+ code_string = re.sub(re.compile(r"/\*.*?\*/",re.DOTALL ) ,"" ,code_string) # C-style comment
+ code_string = re.sub(re.compile(r"//.*?\n" ) ,"" ,code_string) # C++-style comment
+ return code_string
+
+
+def is_dissector_file(filename):
+ p = re.compile(r'.*packet-.*\.c')
+ return p.match(filename)
+
+def findDissectorFilesInFolder(folder, recursive=False):
+ dissector_files = []
+
+ if recursive:
+ for root, subfolders, files in os.walk(folder):
+ for f in files:
+ if should_exit:
+ return
+ f = os.path.join(root, f)
+ dissector_files.append(f)
+ else:
+ for f in sorted(os.listdir(folder)):
+ if should_exit:
+ return
+ filename = os.path.join(folder, f)
+ dissector_files.append(filename)
+
+ return [x for x in filter(is_dissector_file, dissector_files)]
+
+
+
+warnings_found = 0
+errors_found = 0
+
+# Check the given dissector file.
+def checkFile(filename):
+ global warnings_found
+ global errors_found
+
+ # Check file exists - e.g. may have been deleted in a recent commit.
+ if not os.path.exists(filename):
+ print(filename, 'does not exist!')
+ return
+
+ with open(filename, 'r', encoding="utf8") as f:
+ contents = f.read()
+
+ # Remove comments so as not to trip up RE.
+ contents = removeComments(contents)
+
+ matches = re.finditer(r'(?<!try_)(?<!char_)(?<!bytes)(r?val_to_str(?:_ext|)(?:_const|))\(.*?,.*?,\s*(".*?\")\s*\)', contents)
+ for m in matches:
+ function = m.group(1)
+ format_string = m.group(2)
+
+ # Ignore what appears to be a macro.
+ if format_string.find('#') != -1:
+ continue
+
+ if function.endswith('_const'):
+ # These ones shouldn't have a specifier - its an error if they do.
+ # TODO: I suppose it could be escaped, but haven't seen this...
+ if format_string.find('%') != -1:
+ # This is an error as format specifier would show in app
+ print('Error:', filename, " ", m.group(0), ' - should not have specifiers in unknown string')
+ errors_found += 1
+ else:
+ # These ones need to have a specifier, and it should be suitable for an int
+ specifier_id = format_string.find('%')
+ if specifier_id == -1:
+ print('Warning:', filename, " ", m.group(0), ' - should have suitable format specifier in unknown string (or use _const()?)')
+ warnings_found += 1
+ # TODO: check allowed specifiers (d, u, x, ?) and modifiers (0-9*) in re ?
+ if format_string.find('%s') != -1:
+ # This is an error as this likely causes a crash
+ print('Error:', filename, " ", m.group(0), ' - inappropriate format specifier in unknown string')
+ errors_found += 1
+
+
+
+#################################################################
+# Main logic.
+
+# command-line args. Controls which dissector files should be checked.
+# If no args given, will scan all dissectors.
+parser = argparse.ArgumentParser(description='Check calls in dissectors')
+parser.add_argument('--file', action='append',
+ help='specify individual dissector file to test')
+parser.add_argument('--commits', action='store',
+ help='last N commits to check')
+parser.add_argument('--open', action='store_true',
+ help='check open files')
+
+args = parser.parse_args()
+
+
+# Get files from wherever command-line args indicate.
+files = []
+if args.file:
+ # Add specified file(s)
+ for f in args.file:
+ if not f.startswith('epan'):
+ f = os.path.join('epan', 'dissectors', f)
+ if not os.path.isfile(f):
+ print('Chosen file', f, 'does not exist.')
+ exit(1)
+ else:
+ files.append(f)
+elif args.commits:
+ # Get files affected by specified number of commits.
+ command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits]
+ files = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Will examine dissector files only
+ files = list(filter(lambda f : is_dissector_file(f), files))
+elif args.open:
+ # Unstaged changes.
+ command = ['git', 'diff', '--name-only']
+ files = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Only interested in dissector files.
+ files = list(filter(lambda f : is_dissector_file(f), files))
+ # Staged changes.
+ command = ['git', 'diff', '--staged', '--name-only']
+ files_staged = [f.decode('utf-8')
+ for f in subprocess.check_output(command).splitlines()]
+ # Only interested in dissector files.
+ files_staged = list(filter(lambda f : is_dissector_file(f), files_staged))
+ for f in files_staged:
+ if not f in files:
+ files.append(f)
+else:
+ # Find all dissector files from folder.
+ files = findDissectorFilesInFolder(os.path.join('epan', 'dissectors'))
+ files += findDissectorFilesInFolder(os.path.join('plugins', 'epan'), recursive=True)
+ files += findDissectorFilesInFolder(os.path.join('epan', 'dissectors', 'asn1'), recursive=True)
+
+
+# If scanning a subset of files, list them here.
+print('Examining:')
+if args.file or args.commits or args.open:
+ if files:
+ print(' '.join(files), '\n')
+ else:
+ print('No files to check.\n')
+else:
+ print('All dissectors\n')
+
+
+# Now check the chosen files
+for f in files:
+ if should_exit:
+ exit(1)
+ if not isGeneratedFile(f):
+ checkFile(f)
+
+
+# Show summary.
+print(warnings_found, 'warnings found')
+if errors_found:
+ print(errors_found, 'errors found')
+ exit(1)
diff --git a/tools/checkfiltername.pl b/tools/checkfiltername.pl
new file mode 100755
index 0000000..ea286b2
--- /dev/null
+++ b/tools/checkfiltername.pl
@@ -0,0 +1,790 @@
+#!/usr/bin/perl
+
+my $debug = 0;
+# 0: off
+# 1: specific debug
+# 2: full debug
+
+#
+# verify that display filter names correspond with the PROTABBREV of
+# of the dissector. Enforces the dissector to have a source
+# filename of format packet-PROTABBREV.c
+#
+# Usage: checkfiltername.pl <file or files>
+
+#
+# Copyright 2011 Michael Mann (see AUTHORS file)
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+#
+# Example:
+# ~/work/wireshark/trunk/epan/dissectors> ../../tools/checkfiltername.pl packet-3com-xns.c
+# packet-3com-xns.c (2 (of 2) fields)
+# 102 3comxns.type doesn't match PROTOABBREV of 3com-xns
+# 106 3comxns.type doesn't match PROTOABBREV of 3com-xns
+#
+# or checkfiltername.pl packet-*.c, which will check all the dissector files.
+#
+#
+
+use warnings;
+use strict;
+use Getopt::Long;
+
+my @elements;
+my @elements_dup;
+my @protocols;
+my %filters;
+my %expert_filters;
+my @acceptedprefixes = ("dcerpc-");
+my @asn1automatedfilelist;
+my @dcerpcautomatedfilelist;
+my @idl2wrsautomatedfilelist;
+my @filemanipulationfilelist;
+my @prefixfilelist;
+my @nofieldfilelist;
+my %unique;
+my @uniquefilelist;
+my @noregprotocolfilelist;
+my @periodinfilternamefilelist;
+
+my $showlinenoFlag = '';
+my $showautomatedFlag = '';
+
+my $state = "";
+# "s_unknown",
+# "s_start",
+# "s_in_hf_register_info",
+# "s_hf_register_info_entry",
+# "s_header_field_info_entry",
+# "s_header_field_info_entry_start",
+# "s_header_field_info_entry_name",
+# "s_header_field_info_entry_abbrev",
+# "s_header_field_info_entry_abbrev_end",
+# "s_start_expert",
+# "s_in_ei_register_info",
+# "s_ei_register_info_entry",
+# "s_ei_register_info_entry_start",
+# "s_ei_register_info_entry_abbrev_end",
+# "s_nofields"
+
+my $restofline;
+my $filecount = 0;
+my $currfile = "";
+my $protabbrev = "";
+my $protabbrev_index;
+my $PFNAME_value = "";
+my $linenumber = 1;
+my $totalerrorcount = 0;
+my $errorfilecount = 0;
+my $onefield = 0;
+my $nofields = 0;
+my $noperiod = 0;
+my $noregprotocol = 1;
+my $automated = 0;
+my $more_tokens;
+my $showall = 0;
+
+my $comment = 0;
+
+sub checkprotoabbrev {
+ my $abbrev = "";
+ my $abbrevpos;
+ my $proto_abbrevpos1;
+ my $proto_abbrevpos2;
+ my $afterabbrev = "";
+ my $check_dup_abbrev = "";
+ my $modprotabbrev = "";
+ my $errorline = 0;
+ my $prefix;
+
+ if (($automated == 0) || ($showall == 1)) {
+ $abbrevpos = index($_[0], ".");
+ if ($abbrevpos == -1) {
+ $abbrev = $_[0];
+ }
+ else {
+ $abbrev = substr($_[0], 0, $abbrevpos);
+ $afterabbrev = substr($_[0], $abbrevpos+1, length($_[0])-$abbrevpos);
+ $check_dup_abbrev = $afterabbrev;
+ $afterabbrev = substr($afterabbrev, 0, length($abbrev));
+ }
+
+ if ($abbrev ne $protabbrev) {
+ $errorline = 1;
+
+ #check if there is a supported protocol that matches the abbrev.
+ #This may be a case of filename != PROTOABBREV
+ foreach (@protocols) {
+ if ($abbrev eq $_) {
+ $errorline = 0;
+ } elsif (index($_, ".") != -1) {
+
+ #compare from start of string for each period found
+ $proto_abbrevpos1 = 0;
+ while ((($proto_abbrevpos2 = index($_, ".", $proto_abbrevpos1)) != -1) &&
+ ($errorline == 1)) {
+ if ($abbrev eq substr($_, 0, $proto_abbrevpos2)) {
+ $errorline = 0;
+ }
+
+ $proto_abbrevpos1 = $proto_abbrevpos2+1;
+ }
+ }
+ }
+ }
+
+ # find any underscores that preface or follow a period
+ if (((index($_[0], "._") >= 0) || (index($_[0], "_.") >= 0)) &&
+ #ASN.1 dissectors can intentionally generating this field name, so don't fault the dissector
+ (index($_[0], "_untag_item_element") < 0)) {
+ if ($showlinenoFlag) {
+ push(@elements, "$_[1] $_[0] contains an unnecessary \'_\'\n");
+ } else {
+ push(@elements, "$_[0] contains an unnecessary \'_\'\n");
+ }
+ }
+
+ if (($errorline == 1) && ($showall == 0)) {
+ #try some "accepted" variations of PROTOABBREV
+
+ #replace '-' with '_'
+ $modprotabbrev = $protabbrev;
+ $modprotabbrev =~ s/-/_/g;
+ if ($abbrev eq $modprotabbrev) {
+ $errorline = 0;
+ }
+
+ #remove '-'
+ if ($errorline == 1) {
+ $modprotabbrev = $protabbrev;
+ $modprotabbrev =~ s/-//g;
+ if ($abbrev eq $modprotabbrev) {
+ $errorline = 0;
+ }
+ }
+
+ #remove '_'
+ if ($errorline == 1) {
+ $modprotabbrev = $protabbrev;
+ $modprotabbrev =~ s/_//g;
+ if ($abbrev eq $modprotabbrev) {
+ $errorline = 0;
+ }
+ }
+
+ if ($errorline == 1) {
+ #remove any "accepted" prefix to see if there is still a problem
+ foreach (@acceptedprefixes) {
+ if ($protabbrev =~ /^$_/) {
+ $modprotabbrev = substr($protabbrev, length($_));
+ if ($abbrev eq $modprotabbrev) {
+ push(@prefixfilelist, "$currfile\n");
+ $errorline = 0;
+ }
+ }
+ }
+ }
+ else {
+ push(@filemanipulationfilelist, "$currfile\n");
+ }
+
+ #now check the acceptable "fields from a different protocol"
+ if ($errorline == 1) {
+ if (is_from_other_protocol_allowed($_[0], $currfile) == 1) {
+ $errorline = 0;
+ }
+ }
+
+ #now check the acceptable "fields that include a version number"
+ if ($errorline == 1) {
+ if (is_protocol_version_allowed($_[0], $currfile) == 1) {
+ $errorline = 0;
+ }
+ }
+ }
+
+ if ($errorline == 1) {
+ $debug>1 && print "$_[1] $_[0] doesn't match PROTOABBREV of $protabbrev\n";
+ if ($showlinenoFlag) {
+ push(@elements, "$_[1] $_[0] doesn't match PROTOABBREV of $protabbrev\n");
+ } else {
+ push(@elements, "$_[0] doesn't match PROTOABBREV of $protabbrev\n");
+ }
+ }
+
+ if (($abbrev ne "") && (lc($abbrev) eq lc($afterabbrev))) {
+ # Allow ASN.1 generated files to duplicate part of proto name
+ if ((!(grep {$currfile eq $_ } @asn1automatedfilelist)) &&
+ # Check allowed list
+ (is_proto_dup_allowed($abbrev, $check_dup_abbrev) == 0)) {
+ if ($showlinenoFlag) {
+ push(@elements_dup, "$_[1] $_[0] duplicates PROTOABBREV of $abbrev\n");
+ } else {
+ push(@elements_dup, "$_[0] duplicates PROTOABBREV of $abbrev\n");
+ }
+ }
+ }
+ }
+}
+
+sub printprevfile {
+ my $totalfields = keys(%filters);
+ my $count_ele;
+ my $count_dup;
+ my $total_count;
+
+ foreach (sort keys %filters) {
+ checkprotoabbrev ($filters{$_}, $_);
+ }
+
+ foreach (sort keys %expert_filters) {
+ checkprotoabbrev ($expert_filters{$_}, $_);
+ }
+
+ $count_ele = @elements;
+ $count_dup = @elements_dup;
+ $total_count = $count_ele+$count_dup;
+
+ if ($noregprotocol == 1) {
+ #if no protocol is registered, only worry about duplicates
+ if ($currfile ne "") {
+ push(@noregprotocolfilelist, "$currfile\n");
+ }
+
+ if ($count_dup > 0) {
+ $errorfilecount++;
+ $totalerrorcount += $count_dup;
+ }
+
+ if (($showall == 1) || ($count_dup > 0)) {
+ print "\n\n$currfile - NO PROTOCOL REGISTERED\n";
+ if ($showall == 1) {
+ #everything is included, so count all errors
+ $totalerrorcount += $count_ele;
+ if (($count_ele > 0) && ($count_dup == 0)) {
+ $errorfilecount++;
+ }
+
+ foreach (@elements) {
+ print $_;
+ }
+ }
+ foreach (@elements_dup) {
+ print $_;
+ }
+ }
+ } else {
+ if ($total_count > 0) {
+ $errorfilecount++;
+ $totalerrorcount += $total_count;
+ }
+
+ if (($automated == 0) || ($showall == 1)) {
+ if ($total_count > 0) {
+ if ($automated == 1) {
+ if ($showall == 1) {
+ print "\n\n$currfile - AUTOMATED ($total_count (of $totalfields) fields)\n";
+ }
+ } else {
+ print "\n\n$currfile ($total_count (of $totalfields) fields)\n";
+ }
+
+ foreach (@elements) {
+ print $_;
+ }
+ foreach (@elements_dup) {
+ print $_;
+ }
+ }
+
+ if ((($nofields) || ($totalfields == 0)) && ($currfile ne "")) {
+ if ($showall == 1) {
+ print "\n\n$currfile - NO FIELDS\n";
+ }
+ push(@nofieldfilelist, "$currfile\n");
+ }
+ }
+ }
+}
+
+#--------------------------------------------------------------------
+# This is a list of dissectors that intentionally have filter names
+# where the second segment duplicates (at least partially) the name
+# of the first. The most common case is in ASN.1 dissectors, but
+# those can be dealt with by looking at the first few lines of the
+# dissector. This list has been vetted and justification will need
+# to be provided to add to it. Acknowledge these dissectors aren't
+# a problem for the pre-commit script
+#--------------------------------------------------------------------
+sub is_proto_dup_allowed {
+ if (($_[0] eq "amf") && (index($_[1], "amf0") >= 0)) {return 1;}
+ if (($_[0] eq "amf") && (index($_[1], "amf3") >= 0)) {return 1;}
+ if (($_[0] eq "amqp") && (index($_[1], "amqp") >= 0)) {return 1;}
+ if (($_[0] eq "bat") && (index($_[1], "batman") >= 0)) {return 1;}
+ if (($_[0] eq "browser") && (index($_[1], "browser_") >= 0)) {return 1;}
+ if (($_[0] eq "data") && (index($_[1], "data") >= 0)) {return 1;}
+ if (($_[0] eq "dlsw") && (index($_[1], "dlsw_version") >= 0)) {return 1;}
+ if (($_[0] eq "dns") && (index($_[1], "dnskey") >= 0)) {return 1;}
+ if (($_[0] eq "ecmp") && (index($_[1], "ecmp_") >= 0)) {return 1;}
+ if (($_[0] eq "exported_pdu") && (index($_[1], "exported_pdu") >= 0)) {return 1;}
+ if (($_[0] eq "fc") && (index($_[1], "fctl") >= 0)) {return 1;}
+ if (($_[0] eq "fcs") && (index($_[1], "fcsmask") >= 0)) {return 1;}
+ if (($_[0] eq "fmp") && (index($_[1], "fmp") >= 0)) {return 1;}
+ if (($_[0] eq "fr") && (index($_[1], "frame_relay") >= 0)) {return 1;}
+ if (($_[0] eq "lustre") && (index($_[1], "lustre_") >= 0)) {return 1;}
+ if (($_[0] eq "mac") && (index($_[1], "macd") >= 0)) {return 1;}
+ if (($_[0] eq "mac") && (index($_[1], "macis") >= 0)) {return 1;}
+ if (($_[0] eq "mih") && (index($_[1], "mihf") >= 0)) {return 1;}
+ if (($_[0] eq "mih") && (index($_[1], "mihcap") >= 0)) {return 1;}
+ if (($_[0] eq "ncp") && (index($_[1], "ncp") >= 0)) {return 1;}
+ if (($_[0] eq "nfs") && (index($_[1], "nfs") >= 0)) {return 1;}
+ if (($_[0] eq "oxid") && (index($_[1], "oxid") >= 0)) {return 1;}
+ if (($_[0] eq "rquota") && (index($_[1], "rquota") >= 0)) {return 1;}
+ if (($_[0] eq "pfcp") && (index($_[1], "pfcp") >= 0)) {return 1;}
+ if (($_[0] eq "sm") && (index($_[1], "sm_") >= 0)) {return 1;}
+ if (($_[0] eq "smpp") && (index($_[1], "smppplus") >= 0)) {return 1;}
+ if (($_[0] eq "spray") && (index($_[1], "sprayarr") >= 0)) {return 1;}
+ if (($_[0] eq "stat") && (index($_[1], "stat_") >= 0)) {return 1;}
+ if (($_[0] eq "stat") && (index($_[1], "state") >= 0)) {return 1;}
+ if (($_[0] eq "tds") && (index($_[1], "tds_") >= 0)) {return 1;}
+ if (($_[0] eq "time") && (index($_[1], "time") >= 0)) {return 1;}
+ if (($_[0] eq "tn3270") && (index($_[1], "tn3270e") >= 0)) {return 1;}
+ if (($_[0] eq "usb") && (index($_[1], "usb") >= 0)) {return 1;}
+ if (($_[0] eq "xml") && (index($_[1], "xml") >= 0)) {return 1;}
+
+ return 0;
+}
+
+#--------------------------------------------------------------------
+# This is a list of dissectors that intentionally have filter names
+# shared with other dissectors. This list has been vetted and
+# justification will need to be provided to add to it.
+# Acknowledge these dissectors aren't a problem for the pre-commit script
+#--------------------------------------------------------------------
+sub is_from_other_protocol_allowed {
+ my $proto_filename;
+ my $dir_index = rindex($_[1], "\\");
+
+ #handle directory names on all platforms
+ if ($dir_index < 0) {
+ $dir_index = rindex($_[1], "/");
+ }
+
+ if ($dir_index < 0) {
+ $proto_filename = $_[1];
+ }
+ else {
+ $proto_filename = substr($_[1], $dir_index+1);
+ }
+
+ # XXX - may be faster to hash this (note 1-many relationship)?
+ if (($proto_filename eq "packet-atalk.c") && (index($_[0], "llc") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-awdl.c") && (index($_[0], "llc") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-bpdu.c") && (index($_[0], "mstp") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-bssap.c") && (index($_[0], "bsap") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-caneth.c") && (index($_[0], "can") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-cimetrics.c") && (index($_[0], "llc") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-cipsafety.c") && (index($_[0], "cip") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-cipsafety.c") && (index($_[0], "enip") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-dcerpc-netlogon.c") && (index($_[0], "ntlmssp") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-dcom-oxid.c") && (index($_[0], "dcom") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-dvb-data-mpe.c") && (index($_[0], "mpeg_sect") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-dvb-ipdc.c") && (index($_[0], "ipdc") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-enip.c") && (index($_[0], "cip") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-extreme.c") && (index($_[0], "llc") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-fmp_notify.c") && (index($_[0], "fmp") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-foundry.c") && (index($_[0], "llc") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-glusterfs.c") && (index($_[0], "gluster") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-h248_annex_e.c") && (index($_[0], "h248") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-h248_q1950.c") && (index($_[0], "h248") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-ieee1722.c") && (index($_[0], "can") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-ieee80211.c") && (index($_[0], "eapol") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-ieee80211-radio.c") && (index($_[0], "wlan") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-ieee80211-wlancap.c") && (index($_[0], "wlan") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-ieee802154.c") && (index($_[0], "wpan") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-isup.c") && (index($_[0], "ansi_isup") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-isup.c") && (index($_[0], "bat_ase") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-isup.c") && (index($_[0], "nsap") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-isup.c") && (index($_[0], "x213") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-iwarp-ddp-rdmap.c") && (index($_[0], "iwarp_ddp") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-iwarp-ddp-rdmap.c") && (index($_[0], "iwarp_rdma") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-k12.c") && (index($_[0], "aal2") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-k12.c") && (index($_[0], "atm") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-m3ua.c") && (index($_[0], "mtp3") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-mle.c") && (index($_[0], "wpan") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-mpeg-dsmcc.c") && (index($_[0], "mpeg_sect") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-mpeg-dsmcc.c") && (index($_[0], "etv.dsmcc") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-mpeg1.c") && (index($_[0], "rtp.payload_mpeg_") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-mysql.c") && (index($_[0], "mariadb") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-ndps.c") && (index($_[0], "spx.ndps_") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-pw-atm.c") && (index($_[0], "atm") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-pw-atm.c") && (index($_[0], "pw") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-scsi.c") && (index($_[0], "scsi_sbc") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-sndcp-xid.c") && (index($_[0], "llcgprs") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-wlccp.c") && (index($_[0], "llc") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-wps.c") && (index($_[0], "eap") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-wsp.c") && (index($_[0], "wap") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-xot.c") && (index($_[0], "x25") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-zbee-zcl-misc.c") && (index($_[0], "zbee_zcl_hvac") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-zbee-zcl-misc.c") && (index($_[0], "zbee_zcl_ias") >= 0)) {return 1;}
+
+ #Understand why, but I think it could be prefixed with "dissector"
+ #prefix (which isn't necessarily "protocol")
+ if (($proto_filename eq "packet-rtcp.c") && (index($_[0], "srtcp") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-rtp.c") && (index($_[0], "srtp") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-dcom-cba-acco.c") && (index($_[0], "cba") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-dcom-cba.c") && (index($_[0], "cba") >= 0)) {return 1;}
+
+ #XXX - HACK to get around nested "s in field name
+ if (($proto_filename eq "packet-gsm_sim.c") && (index($_[0], "e\\") >= 0)) {return 1;}
+
+ return 0;
+}
+
+#--------------------------------------------------------------------
+# This is a list of dissectors that use their (protocol) version number
+# as part of the first display filter segment, which checkfiltername
+# usually complains about. Manually allow them so that they can pass
+# pre-commit script
+#--------------------------------------------------------------------
+sub is_protocol_version_allowed {
+ my $proto_filename;
+ my $dir_index = rindex($_[1], "\\");
+
+ #handle directory names on all platforms
+ if ($dir_index < 0) {
+ $dir_index = rindex($_[1], "/");
+ }
+
+ if ($dir_index < 0) {
+ $proto_filename = $_[1];
+ }
+ else {
+ $proto_filename = substr($_[1], $dir_index+1);
+ }
+
+ # XXX - may be faster to hash this?
+ if (($proto_filename eq "packet-ehs.c") && (index($_[0], "ehs2") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-hsrp.c") && (index($_[0], "hsrp2") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-ipv6.c") && (index($_[0], "ip") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-openflow_v1.c") && (index($_[0], "openflow") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-rtnet.c") && (index($_[0], "tdma-v1") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-scsi-osd.c") && (index($_[0], "scsi_osd2") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-sflow.c") && (index($_[0], "sflow_5") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-sflow.c") && (index($_[0], "sflow_245") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-tipc.c") && (index($_[0], "tipcv2") >= 0)) {return 1;}
+ if (($proto_filename eq "packet-bluetooth.c") && (index($_[0], "llc.bluetooth_pid") >= 0)) {return 1;}
+
+ return 0;
+}
+
+# ---------------------------------------------------------------------
+#
+# MAIN
+#
+GetOptions(
+ 'showlineno' => \$showlinenoFlag,
+ 'showautomated' => \$showautomatedFlag,
+ );
+
+while (<>) {
+ if ($currfile !~ /$ARGV/) {
+ &printprevfile();
+
+ # New file - reset array and state
+ $filecount++;
+ $currfile = $ARGV;
+
+ #determine PROTABBREV for dissector based on file name format of (dirs)/packet-PROTABBREV.c or (dirs)/file-PROTABBREV.c
+ $protabbrev_index = rindex($currfile, "packet-");
+ if ($protabbrev_index == -1) {
+ $protabbrev_index = rindex($currfile, "file-");
+ if ($protabbrev_index == -1) {
+ #ignore "non-dissector" files
+ next;
+ }
+
+ $protabbrev = substr($currfile, $protabbrev_index+length("file-"));
+ $protabbrev_index = rindex($protabbrev, ".");
+ if ($protabbrev_index == -1) {
+ print "$currfile doesn't fit format of file-PROTABBREV.c\n";
+ next;
+ }
+ } else {
+ $protabbrev = substr($currfile, $protabbrev_index+length("packet-"));
+ $protabbrev_index = rindex($protabbrev, ".");
+ if ($protabbrev_index == -1) {
+ print "$currfile doesn't fit format of packet-PROTABBREV.c\n";
+ next;
+ }
+ }
+ $protabbrev = substr($protabbrev, 0, $protabbrev_index);
+
+ $PFNAME_value = "";
+ $noregprotocol = 1;
+ $automated = 0;
+ $nofields = 0;
+ $onefield = 0;
+ $noperiod = 0;
+ $linenumber = 1;
+ %filters = ( );
+ %expert_filters = ( );
+ @protocols = ( );
+ @elements = ( );
+ @elements_dup = ( );
+ $state = "s_unknown";
+ }
+
+ if (($automated == 0) && ($showautomatedFlag eq "")) {
+ #DCERPC automated files
+ if ($_ =~ "DO NOT EDIT") {
+ push(@dcerpcautomatedfilelist, "$currfile\n");
+ $automated = 1;
+ next;
+ }
+ #ASN.1 automated files
+ elsif ($_ =~ "Generated automatically by the ASN.1 to Wireshark dissector compiler") {
+ push(@asn1automatedfilelist, "$currfile\n");
+ $automated = 1;
+ next;
+ }
+ #idl2wrs automated files
+ elsif ($_ =~ "Autogenerated from idl2wrs") {
+ push(@idl2wrsautomatedfilelist, "$currfile\n");
+ $automated = 1;
+ next;
+ }
+ }
+
+ # opening then closing comment
+ if (/(.*?)\/\*.*\*\/(.*)/) {
+ $comment = 0;
+ $_ = "$1$2";
+ # closing then opening comment
+ } elsif (/.*?\*\/(.*?)\/\*/) {
+ $comment = 1;
+ $_ = "$1";
+ # opening comment
+ } elsif (/(.*?)\/\*/) {
+ $comment = 1;
+ $_ = "$1";
+ # closing comment
+ } elsif (/\*\/(.*?)/) {
+ $comment = 0;
+ $_ = "$1";
+ } elsif ($comment == 1) {
+ $linenumber++;
+ next;
+ }
+ # unhandled: more than one complete comment per line
+
+ chomp;
+
+ #proto_register_protocol state machine
+ $restofline = $_;
+ $more_tokens = 1;
+
+ #PFNAME is a popular #define for the proto filter name, so use it for testing
+ if ($restofline =~ /#define\s*PFNAME\s*\"([^\"]*)\"/) {
+ $PFNAME_value = $1;
+ $debug>1 && print "PFNAME: '$1'\n";
+ }
+
+ until ($more_tokens == 0) {
+ if (($restofline =~ /proto_register_protocol\s*\((.*)/) ||
+ ($restofline =~ /proto_register_protocol_in_name_only\s*\((.*)/)) {
+ $noregprotocol = 0;
+ $restofline = $1;
+ $state = "s_proto_start";
+ } elsif (($state eq "s_proto_start") && ($restofline =~ /^(\s*\"([^\"]*)\"\s*,)\s*(.*)/)) {
+ $restofline = $3;
+ $state = "s_proto_long_name";
+ $debug>1 && print "proto long name: '$2'\n";
+ } elsif (($state eq "s_proto_start") && ($restofline =~ /^(\s*(([\w\d])+)\s*,)\s*(.*)/)) {
+ $restofline = $4;
+ $state = "s_proto_long_name";
+ $debug>1 && print "proto long name: '$2'\n";
+ } elsif (($state eq "s_proto_long_name") && ($restofline =~ /^(\s*\"([^\"]*)\"\s*,)\s*(.*)/)) {
+ $restofline = $3;
+ $state = "s_proto_short_name";
+ $debug>1 && print "proto short name: '$2'\n";
+ } elsif (($state eq "s_proto_long_name") && ($restofline =~ /^(\s*(([\w\d])+)\s*,)\s*(.*)/)) {
+ $restofline = $4;
+ $state = "s_proto_short_name";
+ $debug>1 && print "proto short name: '$2'\n";
+ } elsif (($state eq "s_proto_short_name") && ($restofline =~ /\s*PFNAME\s*(.*)/)) {
+ $more_tokens = 0;
+ $state = "s_proto_filter_name";
+ if ((index($PFNAME_value, ".") != -1) && ($noperiod == 0)) {
+ push(@periodinfilternamefilelist, "$currfile\n");
+ $noperiod = 1;
+ }
+ push(@protocols, $PFNAME_value);
+ $debug>1 && print "proto filter name: '$PFNAME_value'\n";
+ } elsif (($state eq "s_proto_short_name") && ($restofline =~ /\s*\"([^\"]*)\"\s*(.*)/)) {
+ $more_tokens = 0;
+ $state = "s_proto_filter_name";
+ if ((index($1, ".") != -1) && ($noperiod == 0)) {
+ push(@periodinfilternamefilelist, "$currfile\n");
+ $noperiod = 1;
+ }
+ push(@protocols, $1);
+ $debug>1 && print "proto filter name: '$1'\n";
+ } elsif (($state eq "s_proto_short_name") && ($restofline =~ /\s*(([\w\d])+)\s*(.*)/)) {
+ $more_tokens = 0;
+ $state = "s_proto_filter_name";
+ $debug>1 && print "proto filter name: '$1'\n";
+ } else {
+ $more_tokens = 0;
+ }
+ }
+
+ #retrieving display filters state machine
+ $restofline = $_;
+ $more_tokens = 1;
+ until ($more_tokens == 0) {
+ if ($restofline =~ /\s*static\s*hf_register_info\s*(\w+)\[\](.*)/) {
+ $restofline = $2;
+ $state = "s_start";
+ $debug>1 && print "$linenumber $state\n";
+ } elsif ($restofline =~ /\s*static\s*ei_register_info\s*(\w+)\[\](.*)/) {
+ $restofline = $2;
+ $state = "s_start_expert";
+ $debug>1 && print "$linenumber $state\n";
+ } elsif (($state eq "s_start") && ($restofline =~ /\W+{(.*)/)) {
+ $restofline = $1;
+ $state = "s_in_hf_register_info";
+ $debug>1 && print "$linenumber $state\n";
+ } elsif (($state eq "s_in_hf_register_info") && ($restofline =~ /\W+{(.*)/)) {
+ $restofline = $1;
+ $state = "s_hf_register_info_entry";
+ $debug>1 && print "$linenumber $state\n";
+ $onefield = 1;
+ } elsif (($state eq "s_in_hf_register_info") && ($restofline =~ /\s*};(.*)/)) {
+ $restofline = $1;
+ if ($onefield == 0) {
+ $debug && print "$linenumber NO FIELDS!!!\n";
+ $nofields = 1;
+ $state = "s_nofields";
+ $more_tokens = 0;
+ } else {
+ $state = "s_unknown";
+ }
+ } elsif (($state eq "s_hf_register_info_entry") && ($restofline =~ /\s*&\s*(hf_\w*(\[w*\])?)\s*,?(.*)/)) {
+ $restofline = $3;
+ $debug>1 && print "$linenumber hf_register_info_entry: $1\n";
+ $state = "s_header_field_info_entry";
+ } elsif (($state eq "s_header_field_info_entry") && ($restofline =~ /\s*{(.*)/)) {
+ $restofline = $1;
+ $state = "s_header_field_info_entry_start";
+ $debug>1 && print "$linenumber $state\n";
+ } elsif (($state eq "s_header_field_info_entry_start") && ($restofline =~ /((\"([^\"]*)\")|(\w+))\s*,(.*)/)) {
+ $restofline = $5;
+ $debug>1 && print "$linenumber header_field_info_entry_name: $1\n";
+ $state = "s_header_field_info_entry_name";
+ } elsif (($state eq "s_header_field_info_entry_name") && ($restofline =~ /\"([^\"]*)\"\s*,?(.*)/)) {
+ $restofline = $2;
+ $debug>1 && print "$linenumber header_field_info_entry_abbrev: $1\n";
+ $state = "s_header_field_info_entry_abbrev";
+ $filters{$linenumber} = $1;
+ } elsif (($state eq "s_header_field_info_entry_abbrev") && ($restofline =~ /[^}]*}(.*)/)) {
+ $restofline = $1;
+ $state = "s_header_field_info_entry_abbrev_end";
+ $debug>1 && print "$linenumber $state\n";
+ } elsif (($state eq "s_header_field_info_entry_abbrev_end") && ($restofline =~ /[^}]*}(.*)/)) {
+ $restofline = $1;
+ $state = "s_in_hf_register_info";
+ $debug>1 && print "$linenumber $state\n";
+ } elsif (($state eq "s_start_expert") && ($restofline =~ /\W+{(.*)/)) {
+ $restofline = $1;
+ $state = "s_in_ei_register_info";
+ $debug>1 && print "$linenumber $state\n";
+ } elsif (($state eq "s_in_ei_register_info") && ($restofline =~ /\W+{(.*)/)) {
+ $restofline = $1;
+ $state = "s_ei_register_info_entry";
+ $debug>1 && print "$linenumber $state\n";
+ } elsif (($state eq "s_in_ei_register_info") && ($restofline =~ /\s*};(.*)/)) {
+ $restofline = $1;
+ $state = "s_unknown";
+ } elsif (($state eq "s_ei_register_info_entry") && ($restofline =~ /\s*{(.*)/)) {
+ $restofline = $1;
+ $state = "s_ei_register_info_entry_start";
+ $debug>1 && print "$linenumber $state\n";
+ } elsif (($state eq "s_ei_register_info_entry_start") && ($restofline =~ /\"([^\"]*)\"\s*,(.*)/)) {
+ $restofline = $2;
+ $debug>1 && print "$linenumber ei_register_info_entry_abbrev: $1\n";
+ $expert_filters{$linenumber} = $1;
+ $state = "s_ei_register_info_entry_abbrev_end";
+ } elsif (($state eq "s_ei_register_info_entry_abbrev_end") && ($restofline =~ /[^}]*}(.*)/)) {
+ $restofline = $1;
+ $state = "s_in_ei_register_info";
+ $debug>1 && print "$linenumber $state\n";
+ } else {
+ $more_tokens = 0;
+ }
+ }
+
+ $linenumber++;
+}
+
+&printprevfile();
+
+if ($totalerrorcount > 0) {
+ print "\n\nTOTAL ERRORS: $totalerrorcount";
+
+ if ($filecount > 1) {
+ print " ($errorfilecount files)\n";
+
+ print "NO FIELDS: " . scalar(@nofieldfilelist) . "\n";
+ print "AUTOMATED: " . (scalar(@asn1automatedfilelist) + scalar(@dcerpcautomatedfilelist) + scalar(@idl2wrsautomatedfilelist)) . "\n";
+ print "NO PROTOCOL: " . scalar(@noregprotocolfilelist) . "\n";
+
+ print "\nASN.1 AUTOMATED FILE LIST\n";
+ foreach (@asn1automatedfilelist) {
+ print $_;
+ }
+ print "\nDCE/RPC AUTOMATED FILE LIST\n";
+ foreach (@dcerpcautomatedfilelist) {
+ print $_;
+ }
+ print "\nIDL2WRS AUTOMATED FILE LIST\n";
+ foreach (@idl2wrsautomatedfilelist) {
+ print $_;
+ }
+ print "\n\"FILE MANIPULATION\" FILE LIST\n";
+ @uniquefilelist = grep{ not $unique{$_}++} @filemanipulationfilelist;
+ foreach (@uniquefilelist) {
+ print $_;
+ }
+ print "\nREMOVE PREFIX FILE LIST\n";
+ @uniquefilelist = grep{ not $unique{$_}++} @prefixfilelist;
+ foreach (@uniquefilelist) {
+ print $_;
+ }
+ print "\nNO PROTOCOL REGISTERED FILE LIST\n";
+ foreach (@noregprotocolfilelist) {
+ print $_;
+ }
+ print "\nNO FIELDS FILE LIST\n";
+ foreach (@nofieldfilelist) {
+ print $_;
+ }
+
+ print "\nPERIOD IN PROTO FILTER NAME FILE LIST\n";
+ foreach (@periodinfilternamefilelist) {
+ print $_;
+ }
+ } else {
+ print "\n";
+ }
+
+ exit(1); # exit 1 if ERROR
+}
+
+__END__
diff --git a/tools/checkhf.pl b/tools/checkhf.pl
new file mode 100755
index 0000000..7e01c7e
--- /dev/null
+++ b/tools/checkhf.pl
@@ -0,0 +1,700 @@
+#!/usr/bin/env perl
+#
+# Copyright 2013, William Meier (See AUTHORS file)
+#
+# Validate hf_... and ei_... usage for a dissector file;
+#
+# Usage: checkhf.pl [--debug=?] <file or files>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+## Note: This program is a re-implementation of the
+## original checkhf.pl written and (C) by Joerg Mayer.
+## The overall objective of the new implementation was to reduce
+## the number of false positives which occurred with the
+## original checkhf.pl
+##
+## This program can be used to scan original .c source files or source
+## files which have been passed through a C pre-processor.
+## Operating on pre-processed source files is optimal; There should be
+## minimal false positives.
+## If the .c input is an original source file there may very well be
+## false positives/negatives due to the fact that the hf_... variables & etc
+## may be created via macros.
+##
+## ----- (The following is extracted from the original checkhf.pl with thanks to Joerg) -------
+## Example:
+## ~/work/wireshark/trunk/epan/dissectors> ../../tools/checkhf.pl packet-afs.c
+## Unused entry: packet-afs.c, hf_afs_ubik_voteend
+## Unused entry: packet-afs.c, hf_afs_ubik_errcode
+## Unused entry: packet-afs.c, hf_afs_ubik_votetype
+## ERROR: NO ARRAY: packet-afs.c, hf_afs_fs_ipaddr
+##
+## or checkhf.pl packet-*.c, which will check all the dissector files.
+##
+## NOTE: This tool currently generates false positives!
+##
+## The "NO ARRAY" messages - if accurate - points to an error that will
+## cause (t|wire)shark to report a DISSECTOR_BUG when a packet containing
+## this particular element is being dissected.
+##
+## The "Unused entry" message indicates the opposite: We define an entry but
+## never use it (e.g., in a proto_...add... function).
+## ------------------------------------------------------------------------------------
+
+# ------------------------------------------------------------------------------------
+# Main
+#
+# Logic:
+# 1. Clean the input: remove blank lines, comments, quoted strings and code under '#if 0'.
+# 2. hf_defs:
+# Find (and remove from input) list of hf_... variable
+# definitions ('static? g?int hf_... ;')
+# 2. hf_array_entries:
+# Find (and remove from input) list of hf_... variables
+# referenced in the hf[] entries;
+# 3. hf_usage:
+# From the remaining input, extract list of all strings of form hf_...
+# (which may include strings which are not actually valid
+# hf_... variable references).
+# 4. Checks:
+# If entries in hf_defs not in hf_usage then "unused" (for static hf_defs only)
+# If entries in hf_defs not in hf_array_entries then "ERROR: NO ARRAY";
+
+use strict;
+use warnings;
+
+use Getopt::Long;
+
+my $help_flag = '';
+my $debug = 0; # default: off; 1=cmt; 2=#if0; 3=hf_defs; 4=hf_array_entries; 5=hfusage (See code)
+
+my $sts = GetOptions(
+ 'debug=i' => \$debug,
+ 'help|?' => \$help_flag
+ );
+if (!$sts || $help_flag || !$ARGV[0]) {
+ usage();
+}
+
+my $error = 0;
+
+while (my $filename = $ARGV[0]) {
+ shift;
+
+ my ($file_contents);
+ my (%hf_defs, %hf_static_defs, %hf_array_entries, %hf_usage);
+ my ($unused_href, $no_array_href);
+ my (%ei_defs, %ei_static_defs, %ei_array_entries, %ei_usage);
+ my ($unused_ei, $no_array_ei);
+
+ read_file(\$filename, \$file_contents);
+
+ remove_comments (\$file_contents, $filename);
+ remove_blank_lines (\$file_contents, $filename);
+ $file_contents =~ s/^\s+//m; # Remove leading spaces
+ remove_quoted_strings(\$file_contents, $filename);
+ remove_if0_code (\$file_contents, $filename);
+
+ find_remove_hf_defs (\$file_contents, $filename, \%hf_defs);
+ find_remove_hf_array_entries (\$file_contents, $filename, \%hf_array_entries);
+ find_remove_proto_get_id_hf_assignments(\$file_contents, $filename, \%hf_array_entries);
+ find_hf_usage (\$file_contents, $filename, \%hf_usage);
+
+ find_remove_ei_defs (\$file_contents, $filename, \%ei_defs);
+ find_remove_ei_array_entries (\$file_contents, $filename, \%ei_array_entries);
+ find_ei_usage (\$file_contents, $filename, \%ei_usage);
+
+# Tests (See above)
+# 1. Are all the static hf_defs and ei_defs entries in hf_usage and ei_usage?
+# if not: "Unused entry:"
+#
+
+ # create a hash containing entries just for the static definitions
+ @hf_static_defs{grep {$hf_defs{$_} == 0} keys %hf_defs} = (); # All values in the new hash will be undef
+ @ei_static_defs{grep {$ei_defs{$_} == 0} keys %ei_defs} = (); # All values in the new hash will be undef
+
+ $unused_href = diff_hash(\%hf_static_defs, \%hf_usage);
+ remove_hf_pid_from_unused_if_add_oui_call(\$file_contents, $filename, $unused_href);
+
+ $unused_ei = diff_hash(\%ei_static_defs, \%ei_usage);
+
+ print_list("Unused href entry: $filename: ", $unused_href);
+ print_list("Unused ei entry: $filename: ", $unused_ei);
+
+# 2. Are all the hf_defs and ei_ entries (static and global) in [hf|ei]_array_entries ?
+# (Note: if a static hf_def or ei is "unused", don't check for same in [hf|ei]_array_entries)
+# if not: "ERROR: NO ARRAY"
+
+## Checking for missing global defs currently gives false positives
+## So: only check static defs for now.
+## $no_array_href = diff_hash(\%hf_defs, \%hf_array_entries);
+ $no_array_href = diff_hash(\%hf_static_defs, \%hf_array_entries);
+ $no_array_href = diff_hash($no_array_href, $unused_href); # Remove "unused" hf_... from no_array list
+ $no_array_ei = diff_hash(\%ei_static_defs, \%ei_array_entries);
+ $no_array_ei = diff_hash($no_array_ei, $unused_ei); # Remove "unused" ei_... from no_array list
+
+ print_list("ERROR: NO ARRAY: $filename: ", $no_array_href);
+ print_list("ERROR: NO ARRAY: $filename: ", $no_array_ei);
+
+ if ((keys %{$no_array_href}) != 0) {
+ $error += 1;
+ }
+ if ((keys %{$no_array_ei}) != 0) {
+ $error += 1;
+ }
+}
+
+exit (($error == 0) ? 0 : 1); # exit 1 if ERROR
+
+
+# ---------------------------------------------------------------------
+#
+sub usage {
+ print "Usage: $0 [--debug=n] Filename [...]\n";
+ exit(1);
+}
+
+# ---------------------------------------------------------------------
+# action: read contents of a file to specified string
+# arg: filename_ref, file_contents_ref
+
+sub read_file {
+ my ($filename_ref, $file_contents_ref) = @_;
+
+ die "No such file: \"${$filename_ref}\"\n" if (! -e ${$filename_ref});
+
+ # delete leading './'
+ ${$filename_ref} =~ s{ ^ [.] / } {}xmso;
+
+ # Read in the file (ouch, but it's easier that way)
+ open(my $fci, "<:crlf", ${$filename_ref}) || die("Couldn't open ${$filename_ref}");
+
+ ${$file_contents_ref} = do { local( $/ ) ; <$fci> } ;
+
+ close($fci);
+
+ return;
+}
+
+# ---------------------------------------------------------------------
+# action: Create a hash containing entries in 'a' that are not in 'b'
+# arg: a_href, b_href
+# returns: pointer to hash
+
+sub diff_hash {
+ my ($a_href, $b_href) = @_;
+
+ my %diffs;
+
+ @diffs{grep {! exists $b_href->{$_}} keys %{$a_href}} = (); # All values in the new hash will be undef
+
+ return \%diffs;
+}
+
+# ---------------------------------------------------------------------
+# action: print a list
+# arg: hdr, list_href
+
+sub print_list {
+ my ($hdr, $list_href) = @_;
+
+ print
+ map {"$hdr$_\n"}
+ sort
+ keys %{$list_href};
+
+ return;
+}
+
+# ------------
+# action: remove blank lines from input string
+# arg: code_ref, filename
+
+sub remove_blank_lines {
+ my ($code_ref, $filename) = @_;
+
+ ${$code_ref} =~ s{ ^ \s* \n ? } {}xmsog;
+
+ return;
+}
+
+sub get_quoted_str_regex {
+ # A regex which matches double-quoted strings.
+ # 's' modifier added so that strings containing a 'line continuation'
+ # ( \ followed by a new-line) will match.
+ my $double_quoted_str = qr{ (?: ["] (?: \\. | [^\"\\\n])* ["]) }xmso;
+
+ # A regex which matches single-quoted strings.
+ my $single_quoted_str = qr{ (?: ['] (?: \\. | [^\'\\\n])* [']) }xmso;
+
+ return qr{ $double_quoted_str | $single_quoted_str }xmso;
+}
+
+# ------------
+# action: remove comments from input string
+# arg: code_ref, filename
+
+sub remove_comments {
+ my ($code_ref, $filename) = @_;
+
+ # The below Regexp is based on one from:
+ # https://web.archive.org/web/20080614012925/http://aspn.activestate.com/ASPN/Cookbook/Rx/Recipe/59811
+ # It is in the public domain.
+ # A complicated regex which matches C-style comments.
+ my $c_comment_regex = qr{ / [*] [^*]* [*]+ (?: [^/*] [^*]* [*]+ )* / }xmso;
+
+ ${$code_ref} =~ s{ $c_comment_regex } {}xmsog;
+
+ # Remove single-line C++-style comments. Be careful not to break up strings
+ # like "coap://", so match double quoted strings, single quoted characters,
+ # division operator and other characters before the actual "//" comment.
+ my $quoted_str = get_quoted_str_regex();
+ my $cpp_comment_regex = qr{ ^((?: $quoted_str | /(?!/) | [^'"/\n] )*) // .*$ }xm;
+ ${$code_ref} =~ s{ $cpp_comment_regex } { $1 }xmg;
+
+ ($debug == 1) && print "==> After Remove Comments: code: [$filename]\n${$code_ref}\n===<\n";
+
+ return;
+}
+
+# ------------
+# action: remove quoted strings from input string
+# arg: code_ref, filename
+
+sub remove_quoted_strings {
+ my ($code_ref, $filename) = @_;
+
+ my $quoted_str = get_quoted_str_regex();
+ ${$code_ref} =~ s{ $quoted_str } {}xmsog;
+
+ ($debug == 1) && print "==> After Remove quoted strings: code: [$filename]\n${$code_ref}\n===<\n";
+
+ return;
+}
+
+# -------------
+# action: remove '#if 0'd code from the input string
+# args codeRef, fileName
+# returns: codeRef
+#
+# Essentially: split the input into blocks of code or lines of #if/#if 0/etc.
+# Remove blocks that follow '#if 0' until '#else/#endif' is found.
+
+{ # block begin
+
+ sub remove_if0_code {
+ my ($codeRef, $fileName) = @_;
+
+ # Preprocess outputput (ensure trailing LF and no leading WS before '#')
+ $$codeRef =~ s/^\s*#/#/m;
+ if ($$codeRef !~ /\n$/) { $$codeRef .= "\n"; }
+
+ # Split into blocks of normal code or lines with conditionals.
+ my $ifRegExp = qr/if 0|if|else|endif/;
+ my @blocks = split(/^(#\s*(?:$ifRegExp).*\n)/m, $$codeRef);
+
+ my ($if_lvl, $if0_lvl, $if0) = (0,0,0);
+ my $lines = '';
+ for my $block (@blocks) {
+ my $if;
+ if ($block =~ /^#\s*($ifRegExp)/) {
+ # #if/#if 0/#else/#endif processing
+ $if = $1;
+ if ($debug == 99) {
+ print(STDERR "if0=$if0 if0_lvl=$if0_lvl lvl=$if_lvl [$if] - $block");
+ }
+ if ($if eq 'if') {
+ $if_lvl += 1;
+ } elsif ($if eq 'if 0') {
+ $if_lvl += 1;
+ if ($if0_lvl == 0) {
+ $if0_lvl = $if_lvl;
+ $if0 = 1; # inside #if 0
+ }
+ } elsif ($if eq 'else') {
+ if ($if0_lvl == $if_lvl) {
+ $if0 = 0;
+ }
+ } elsif ($if eq 'endif') {
+ if ($if0_lvl == $if_lvl) {
+ $if0 = 0;
+ $if0_lvl = 0;
+ }
+ $if_lvl -= 1;
+ if ($if_lvl < 0) {
+ die "patsub: #if/#endif mismatch in $fileName"
+ }
+ }
+ }
+
+ if ($debug == 99) {
+ print(STDERR "if0=$if0 if0_lvl=$if0_lvl lvl=$if_lvl\n");
+ }
+ # Keep preprocessor lines and blocks that are not enclosed in #if 0
+ if ($if or $if0 != 1) {
+ $lines .= $block;
+ }
+ }
+ $$codeRef = $lines;
+
+ ($debug == 2) && print "==> After Remove if0: code: [$fileName]\n$$codeRef\n===<\n";
+ return $codeRef;
+ }
+} # block end
+
+# ---------------------------------------------------------------------
+# action: Add to hash an entry for each
+# 'static? g?int hf_...' definition (including array names)
+# in the input string.
+# The entry value will be 0 for 'static' definitions and 1 for 'global' definitions;
+# Remove each definition found from the input string.
+# args: code_ref, filename, hf_defs_href
+# returns: ref to the hash
+
+sub find_remove_hf_defs {
+ my ($code_ref, $filename, $hf_defs_href) = @_;
+
+ # Build pattern to match any of the following
+ # static? g?int hf_foo = -1;
+ # static? g?int hf_foo[xxx];
+ # static? g?int hf_foo[xxx] = {
+
+ # p1: 'static? g?int hf_foo'
+ my $p1_regex = qr{
+ ^
+ \s*
+ (static \s+)?
+ g?int
+ \s+
+ (hf_[a-zA-Z0-9_]+) # hf_..
+ }xmso;
+
+ # p2a: ' = -1;'
+ my $p2a_regex = qr{
+ \s* = \s*
+ (?:
+ - \s* 1
+ )
+ \s* ;
+ }xmso;
+
+ # p2b: '[xxx];' or '[xxx] = {'
+ my $p2b_regex = qr/
+ \s* \[ [^\]]+ \] \s*
+ (?:
+ = \s* [{] | ;
+ )
+ /xmso;
+
+ my $hf_def_regex = qr{ $p1_regex (?: $p2a_regex | $p2b_regex ) }xmso;
+
+ while (${$code_ref} =~ m{ $hf_def_regex }xmsog) {
+ #print ">%s< >$2<\n", (defined $1) ? $1 ; "";
+ $hf_defs_href->{$2} = (defined $1) ? 0 : 1; # 'static' if $1 is defined.
+ }
+ ($debug == 3) && debug_print_hash("VD: $filename", $hf_defs_href); # VariableDefinition
+
+ # remove all
+ ${$code_ref} =~ s{ $hf_def_regex } {}xmsog;
+ ($debug == 3) && print "==> After remove hf_defs: code: [$filename]\n${$code_ref}\n===<\n";
+
+ return;
+}
+
+# ---------------------------------------------------------------------
+# action: Add to hash an entry (hf_...) for each hf[] entry.
+# Remove each hf[] entries found from the input string.
+# args: code_ref, filename, hf_array_entries_href
+
+sub find_remove_hf_array_entries {
+ my ($code_ref, $filename, $hf_array_entries_href) = @_;
+
+# hf[] entry regex (to extract an hf_index_name and associated field type)
+ my $hf_array_entry_regex = qr /
+ [{]
+ \s*
+ & \s* ( [a-zA-Z0-9_]+ ) # &hf
+ (?:
+ \s* [[] [^]]+ []] # optional array ref
+ ) ?
+ \s* , \s*
+ [{]
+ [^}]+
+ , \s*
+ (FT_[a-zA-Z0-9_]+) # field type
+ \s* ,
+ [^}]+
+ , \s*
+ (?:
+ HFILL | HF_REF_TYPE_NONE
+ )
+ [^}]*
+ }
+ [\s,]*
+ [}]
+ /xmso;
+
+ # find all the hf[] entries (searching ${$code_ref}).
+ while (${$code_ref} =~ m{ $hf_array_entry_regex }xmsog) {
+ ($debug == 98) && print "+++ $1 $2\n";
+ $hf_array_entries_href->{$1} = undef;
+ }
+
+ ($debug == 4) && debug_print_hash("AE: $filename", $hf_array_entries_href); # ArrayEntry
+
+ # now remove all
+ ${$code_ref} =~ s{ $hf_array_entry_regex } {}xmsog;
+ ($debug == 4) && print "==> After remove hf_array_entries: code: [$filename]\n${$code_ref}\n===<\n";
+
+ return;
+}
+
+# ---------------------------------------------------------------------
+# action: Add to hash an entry (hf_...) for each hf_... var
+# found in statements of the form:
+# 'hf_... = proto_registrar_get_id_byname ...'
+# 'hf_... = proto_get_id_by_filtername ...'
+# Remove each such statement found from the input string.
+# args: code_ref, filename, hf_array_entries_href
+
+sub find_remove_proto_get_id_hf_assignments {
+ my ($code_ref, $filename, $hf_array_entries_href) = @_;
+
+ my $_regex = qr{ ( hf_ [a-zA-Z0-9_]+ )
+ \s* = \s*
+ (?: proto_registrar_get_id_byname | proto_get_id_by_filter_name )
+ }xmso;
+
+ my @hfvars = ${$code_ref} =~ m{ $_regex }xmsog;
+
+ if (@hfvars == 0) {
+ return;
+ }
+
+ # found:
+ # Sanity check: hf_vars shouldn't already be in hf_array_entries
+ if (defined @$hf_array_entries_href{@hfvars}) {
+ printf "? one or more of [@hfvars] initialized via proto_registrar_get_by_name() also in hf[] ??\n";
+ }
+
+ # Now: add to hf_array_entries
+ @$hf_array_entries_href{@hfvars} = ();
+
+ ($debug == 4) && debug_print_hash("PR: $filename", $hf_array_entries_href);
+
+ # remove from input (so not considered as 'usage')
+ ${$code_ref} =~ s{ $_regex } {}xmsog;
+
+ ($debug == 4) && print "==> After remove proto_registrar_by_name: code: [$filename]\n${$code_ref}\n===<\n";
+
+ return;
+}
+
+# ---------------------------------------------------------------------
+# action: Add to hash all hf_... strings remaining in input string.
+# arga: code_ref, filename, hf_usage_href
+# return: ref to hf_usage hash
+#
+# The hash will include *all* strings of form hf_...
+# which are in the input string (even strings which
+# aren't actually vars).
+# We don't care since we'll be checking only
+# known valid vars against these strings.
+
+sub find_hf_usage {
+ my ($code_ref, $filename, $hf_usage_href) = @_;
+
+ my $hf_usage_regex = qr{
+ \b ( hf_[a-zA-Z0-9_]+ ) # hf_...
+ }xmso;
+
+ while (${$code_ref} =~ m{ $hf_usage_regex }xmsog) {
+ #print "$1\n";
+ $hf_usage_href->{$1} += 1;
+ }
+
+ ($debug == 5) && debug_print_hash("VU: $filename", $hf_usage_href); # VariableUsage
+
+ return;
+}
+
+# ---------------------------------------------------------------------
+# action: Remove from 'unused' hash an instance of a variable named hf_..._pid
+# if the source has a call to llc_add_oui() or ieee802a_add_oui().
+# (This is rather a bit of a hack).
+# arga: code_ref, filename, unused_href
+
+sub remove_hf_pid_from_unused_if_add_oui_call {
+ my ($code_ref, $filename, $unused_href) = @_;
+
+ if ((keys %{$unused_href}) == 0) {
+ return;
+ }
+
+ my @hfvars = grep { m/ ^ hf_ [a-zA-Z0-9_]+ _pid $ /xmso} keys %{$unused_href};
+
+ if ((@hfvars == 0) || (@hfvars > 1)) {
+ return; # if multiple unused hf_..._pid
+ }
+
+ if (${$code_ref} !~ m{ llc_add_oui | ieee802a_add_oui }xmso) {
+ return;
+ }
+
+ # hf_...pid unused var && a call to ..._add_oui(); delete entry from unused
+ # XXX: maybe hf_..._pid should really be added to hfUsed ?
+ delete @$unused_href{@hfvars};
+
+ return;
+}
+
+# ---------------------------------------------------------------------
+# action: Add to hash an entry for each
+# 'static? expert_field ei_...' definition (including array names)
+# in the input string.
+# The entry value will be 0 for 'static' definitions and 1 for 'global' definitions;
+# Remove each definition found from the input string.
+# args: code_ref, filename, hf_defs_href
+# returns: ref to the hash
+
+sub find_remove_ei_defs {
+ my ($code_ref, $filename, $ei_defs_eiref) = @_;
+
+ # Build pattern to match any of the following
+ # static? expert_field ei_foo = -1;
+ # static? expert_field ei_foo[xxx];
+ # static? expert_field ei_foo[xxx] = {
+
+ # p1: 'static? expert_field ei_foo'
+ my $p1_regex = qr{
+ ^
+ (static \s+)?
+ expert_field
+ \s+
+ (ei_[a-zA-Z0-9_]+) # ei_..
+ }xmso;
+
+ # p2a: ' = EI_INIT;'
+ my $p2a_regex = qr{
+ \s* = \s*
+ (?:
+ EI_INIT
+ )
+ \s* ;
+ }xmso;
+
+ # p2b: '[xxx];' or '[xxx] = {'
+ my $p2b_regex = qr/
+ \s* \[ [^\]]+ \] \s*
+ (?:
+ = \s* [{] | ;
+ )
+ /xmso;
+
+ my $ei_def_regex = qr{ $p1_regex (?: $p2a_regex | $p2b_regex ) }xmso;
+
+ while (${$code_ref} =~ m{ $ei_def_regex }xmsog) {
+ #print ">%s< >$2<\n", (defined $1) ? $1 ; "";
+ $ei_defs_eiref->{$2} = (defined $1) ? 0 : 1; # 'static' if $1 is defined.
+ }
+ ($debug == 3) && debug_print_hash("VD: $filename", $ei_defs_eiref); # VariableDefinition
+
+ # remove all
+ ${$code_ref} =~ s{ $ei_def_regex } {}xmsog;
+ ($debug == 3) && print "==> After remove ei_defs: code: [$filename]\n${$code_ref}\n===<\n";
+
+ return;
+}
+
+# ---------------------------------------------------------------------
+# action: Add to hash an entry (ei_...) for each ei[] entry.
+# Remove each ei[] entries found from the input string.
+# args: code_ref, filename, ei_array_entries_href
+
+sub find_remove_ei_array_entries {
+ my ($code_ref, $filename, $ei_array_entries_eiref) = @_;
+
+# ei[] entry regex (to extract an ei_index_name and associated field type)
+ my $ei_array_entry_regex = qr /
+ {
+ \s*
+ & \s* ( [a-zA-Z0-9_]+ ) # &ei
+ (?:
+ \s* [ [^]]+ ] # optional array ref
+ ) ?
+ \s* , \s*
+ {
+ # \s* "[^"]+" # (filter string has been removed already)
+ \s* , \s*
+ PI_[A-Z0-9_]+ # event group
+ \s* , \s*
+ PI_[A-Z0-9_]+ # event severity
+ \s* ,
+ [^,]* # description string (already removed) or NULL
+ , \s*
+ EXPFILL
+ \s*
+ }
+ \s*
+ }
+ /xs;
+
+ # find all the ei[] entries (searching ${$code_ref}).
+ while (${$code_ref} =~ m{ $ei_array_entry_regex }xsg) {
+ ($debug == 98) && print "+++ $1\n";
+ $ei_array_entries_eiref->{$1} = undef;
+ }
+
+ ($debug == 4) && debug_print_hash("AE: $filename", $ei_array_entries_eiref); # ArrayEntry
+
+ # now remove all
+ ${$code_ref} =~ s{ $ei_array_entry_regex } {}xmsog;
+ ($debug == 4) && print "==> After remove ei_array_entries: code: [$filename]\n${$code_ref}\n===<\n";
+
+ return;
+}
+
+# ---------------------------------------------------------------------
+# action: Add to hash all ei_... strings remaining in input string.
+# arga: code_ref, filename, ei_usage_eiref
+# return: ref to ei_usage hash
+#
+# The hash will include *all* strings of form ei_...
+# which are in the input string (even strings which
+# aren't actually vars).
+# We don't care since we'll be checking only
+# known valid vars against these strings.
+
+sub find_ei_usage {
+ my ($code_ref, $filename, $ei_usage_eiref) = @_;
+
+ my $ei_usage_regex = qr{
+ \b ( ei_[a-zA-Z0-9_]+ ) # ei_...
+ }xmso;
+
+ while (${$code_ref} =~ m{ $ei_usage_regex }xmsog) {
+ #print "$1\n";
+ $ei_usage_eiref->{$1} += 1;
+ }
+
+ ($debug == 5) && debug_print_hash("VU: $filename", $ei_usage_eiref); # VariableUsage
+
+ return;
+}
+
+# ---------------------------------------------------------------------
+sub debug_print_hash {
+ my ($title, $href) = @_;
+
+ ##print "==> $title\n";
+ for my $k (sort keys %{$href}) {
+ my $h = defined($href->{$k}) ? $href->{$k} : "undef";
+ printf "%-40.40s %5.5s %s\n", $title, $h, $k;
+ }
+}
diff --git a/tools/checklicenses.py b/tools/checklicenses.py
new file mode 100755
index 0000000..192fecb
--- /dev/null
+++ b/tools/checklicenses.py
@@ -0,0 +1,262 @@
+#!/usr/bin/env python3
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+"""Makes sure that all files contain proper licensing information."""
+
+
+import optparse
+import os.path
+import subprocess
+import sys
+
+
+def PrintUsage():
+ print("""Usage: python checklicenses.py [--root <root>] [tocheck]
+ --root Specifies the repository root. This defaults to ".." relative
+ to the script file. This will be correct given the normal location
+ of the script in "<root>/tools".
+
+ --ignore-suppressions Ignores path-specific allowed license. Useful when
+ trying to remove a suppression/allowed entry.
+
+ --list-allowed Print a list of allowed licenses and exit.
+
+ tocheck Specifies the directory, relative to root, to check. This defaults
+ to "." so it checks everything.
+
+Examples:
+ python checklicenses.py
+ python checklicenses.py --root ~/chromium/src third_party""")
+
+
+ALLOWED_LICENSES = [
+ 'BSD (1 clause)',
+ 'BSD (2 clause)',
+ 'BSD (2 clause) GPL (v2 or later)',
+ 'BSD (3 clause)',
+ 'GPL (v2 or later)',
+ 'GPL (v3 or later) (with Bison parser exception)',
+ 'ISC',
+ 'ISC GPL (v2 or later)',
+ 'LGPL (v2 or later)',
+ 'LGPL (v2.1 or later)',
+ 'MIT/X11 (BSD like)',
+ 'Public domain',
+ 'Public domain GPL (v2 or later)',
+ 'Public domain MIT/X11 (BSD like)',
+ 'zlib/libpng',
+ 'zlib/libpng GPL (v2 or later)',
+]
+
+
+PATH_SPECIFIC_ALLOWED_LICENSES = {
+ 'caputils/airpcap.h': [
+ 'BSD-3-Clause',
+ ],
+ 'wsutil/strnatcmp.c': [
+ 'Zlib',
+ ],
+ 'wsutil/strnatcmp.h': [
+ 'Zlib',
+ ],
+ 'resources/protocols/dtds': [
+ 'UNKNOWN',
+ ],
+ 'resources/protocols/diameter/dictionary.dtd': [
+ 'UNKNOWN',
+ ],
+ 'resources/protocols/wimaxasncp/dictionary.dtd': [
+ 'UNKNOWN',
+ ],
+ 'doc/': [
+ 'UNKNOWN',
+ ],
+ 'docbook/custom_layer_chm.xsl': [
+ 'UNKNOWN',
+ ],
+ 'docbook/custom_layer_single_html.xsl': [
+ 'UNKNOWN',
+ ],
+ 'docbook/ws.css' : [
+ 'UNKNOWN'
+ ],
+ 'fix': [
+ 'UNKNOWN',
+ ],
+ 'wsutil/g711.c': [
+ 'UNKNOWN',
+ ],
+ 'packaging/macosx': [
+ 'UNKNOWN',
+ ],
+ 'epan/except.c': [
+ 'UNKNOWN',
+ ],
+ 'epan/except.h': [
+ 'UNKNOWN',
+ ],
+ # Generated header files by lex/lemon/whatever
+ 'epan/dtd_grammar.h': [
+ 'UNKNOWN',
+ ],
+ 'epan/dfilter/grammar.h': [
+ 'UNKNOWN',
+ ],
+ 'epan/dfilter/grammar.c': [
+ 'UNKNOWN',
+ ],
+ 'epan/dissectors/packet-ieee80211-radiotap-iter.': [ # Using ISC license only
+ 'ISC GPL (v2)'
+ ],
+ # Mentions BSD-3-clause twice due to embedding of code:
+ 'epan/dissectors/packet-communityid.c': [
+ 'BSD (3 clause) BSD (3 clause)',
+ ],
+ 'plugins/mate/mate_grammar.h': [
+ 'UNKNOWN',
+ ],
+ 'vcs_version.h': [
+ 'UNKNOWN',
+ ],
+ # Special IDL license that appears to be compatible as far as I (not a
+ # lawyer) can tell. See
+ # https://www.wireshark.org/lists/wireshark-dev/201310/msg00234.html
+ 'epan/dissectors/pidl/idl_types.h': [
+ 'UNKNOWN',
+ ],
+ # The following tools are under incompatible licenses (mostly GPLv3 or
+ # GPLv3+), but this is OK since they are not actually linked into Wireshark
+ 'tools/pidl': [
+ 'UNKNOWN',
+ ],
+ 'tools/lemon': [
+ 'UNKNOWN',
+ ],
+ 'tools/licensecheck.pl': [
+ 'GPL (v2)'
+ ],
+ '.gitlab/': [
+ 'UNKNOWN',
+ ],
+ 'wsutil/safe-math.h': [ # Public domain (CC0)
+ 'UNKNOWN',
+ ],
+}
+
+def check_licenses(options, args):
+ if options.list_allowed:
+ print('\n'.join(ALLOWED_LICENSES))
+ sys.exit(0)
+
+ # Figure out which directory we have to check.
+ if len(args) == 0:
+ # No directory to check specified, use the repository root.
+ start_dir = options.base_directory
+ elif len(args) == 1:
+ # Directory specified. Start here. It's supposed to be relative to the
+ # base directory.
+ start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
+ else:
+ # More than one argument, we don't handle this.
+ PrintUsage()
+ return 1
+
+ print("Using base directory: %s" % options.base_directory)
+ print("Checking: %s" % start_dir)
+ print("")
+
+ licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
+ 'tools',
+ 'licensecheck.pl'))
+
+ licensecheck = subprocess.Popen([licensecheck_path,
+ '-l', '150',
+ '-r', start_dir],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = licensecheck.communicate()
+ stdout = stdout.decode('utf-8')
+ stderr = stderr.decode('utf-8')
+ if options.verbose:
+ print('----------- licensecheck stdout -----------')
+ print(stdout)
+ print('--------- end licensecheck stdout ---------')
+ if licensecheck.returncode != 0 or stderr:
+ print('----------- licensecheck stderr -----------')
+ print(stderr)
+ print('--------- end licensecheck stderr ---------')
+ print("\nFAILED\n")
+ return 1
+
+ success = True
+ exit_status = 0
+ for line in stdout.splitlines():
+ filename, license = line.split(':', 1)
+ filename = os.path.relpath(filename.strip(), options.base_directory)
+
+ # All files in the build output directory are generated one way or another.
+ # There's no need to check them.
+ if os.path.dirname(filename).startswith('build'):
+ continue
+
+ # For now we're just interested in the license.
+ license = license.replace('*No copyright*', '').strip()
+
+ # Skip generated files.
+ if 'GENERATED FILE' in license:
+ continue
+
+ # Support files which provide a choice between licenses.
+ if any(item in ALLOWED_LICENSES for item in license.split(';')):
+ continue
+
+ if not options.ignore_suppressions:
+ found_path_specific = False
+ for prefix in PATH_SPECIFIC_ALLOWED_LICENSES:
+ if (filename.startswith(prefix) and
+ license in PATH_SPECIFIC_ALLOWED_LICENSES[prefix]):
+ found_path_specific = True
+ break
+ if found_path_specific:
+ continue
+
+ reason = "License '%s' for '%s' is not allowed." % (license, filename)
+ success = False
+ print(reason)
+ exit_status = 1
+
+ if success:
+ print("\nSUCCESS\n")
+ return 0
+ else:
+ print("\nFAILED\n")
+ return exit_status
+
+
+def main():
+ default_root = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '..'))
+ option_parser = optparse.OptionParser()
+ option_parser.add_option('--root', default=default_root,
+ dest='base_directory',
+ help='Specifies the repository root. This defaults '
+ 'to "../.." relative to the script file, which '
+ 'will normally be the repository root.')
+ option_parser.add_option('-v', '--verbose', action='store_true',
+ default=False, help='Print debug logging')
+ option_parser.add_option('--list-allowed',
+ action='store_true',
+ default=False,
+ help='Print a list of allowed licenses and exit.')
+ option_parser.add_option('--ignore-suppressions',
+ action='store_true',
+ default=False,
+ help='Ignore path-specific allowed license.')
+ options, args = option_parser.parse_args()
+ return check_licenses(options, args)
+
+
+if '__main__' == __name__:
+ sys.exit(main())
diff --git a/tools/colorfilters2js.py b/tools/colorfilters2js.py
new file mode 100644
index 0000000..49b8a42
--- /dev/null
+++ b/tools/colorfilters2js.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+#
+# Copyright 2022 by Moshe Kaplan
+# Based on colorfilter2js.pl by Dirk Jagdmann <doj@cubic.org>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+
+# Python script to convert a Wireshark color scheme to javascript
+# code. The javascript function should then be inserted into the
+# pdml2html.xsl file.
+#
+# run this as: python tools/colorfilters2js.py colorfilters
+
+
+import argparse
+import io
+import re
+import sys
+
+js_prologue = """\
+function set_node_color(node, colorname)
+{
+ if (dojo.isString(node))
+ node = dojo.byId(node);
+ if (!node) return;
+ var fg;
+ var bg;
+"""
+
+js_color_entry = """\
+ {7}if (colorname == '{0}') {{
+ bg='#{1:02x}{2:02x}{3:02x}';
+ fg='#{4:02x}{5:02x}{6:02x}';
+ }}\
+"""
+
+js_epilogue = """
+ if (fg.length > 0)
+ node.style.color = fg;
+ if (bg.length > 0)
+ node.style.background = bg;
+}
+"""
+
+
+def generate_javascript(colorlines):
+ output = [js_prologue]
+ else_text = ""
+ for colorline in colorlines:
+ colorvalues = colorline[0], int(colorline[1])//256, int(colorline[2])//256, int(colorline[3])//256, int(colorline[4])//256, int(colorline[5])//256, int(colorline[6])//256, else_text
+ output += [js_color_entry.format(*colorvalues)]
+ else_text = "else "
+ output += [js_epilogue]
+ return "\n".join(output)
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Convert a Wireshark color scheme to javascript code.")
+ parser.add_argument("files", metavar='files', nargs='+', help="paths to colorfiles")
+ parsed_args = parser.parse_args()
+
+ COLORLINE_PATTERN = r"\@(.+?)\@.+\[(\d+),(\d+),(\d+)\]\[(\d+),(\d+),(\d+)\]"
+ colorlines = []
+
+ # Sample line:
+ # @Errors@ct.error@[4626,10023,11822][63479,34695,34695]
+
+ # Read the lines from all files:
+ for filename in parsed_args.files:
+ with open(filename, encoding='utf-8') as fh:
+ file_content = fh.read()
+ colorlines += re.findall(COLORLINE_PATTERN, file_content)
+ javascript_code = generate_javascript(colorlines)
+
+ stdoutu8 = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
+ stdoutu8.write(javascript_code)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/commit-msg b/tools/commit-msg
new file mode 100755
index 0000000..6b3052b
--- /dev/null
+++ b/tools/commit-msg
@@ -0,0 +1,7 @@
+#!/bin/sh
+#
+# Validate the commit message.
+
+./tools/validate-commit.py --commitmsg $1
+
+
diff --git a/tools/compress-pngs.py b/tools/compress-pngs.py
new file mode 100755
index 0000000..ed3e32a
--- /dev/null
+++ b/tools/compress-pngs.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python3
+#
+# compress-pngs.py - Compress PNGs
+#
+# By Gerald Combs <gerald@wireshark.org
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+'''Run various compression and optimization utilities on one or more PNGs'''
+
+import argparse
+import concurrent.futures
+import shutil
+import subprocess
+import sys
+
+PNG_FILE_ARG = '%PNG_FILE_ARG%'
+
+def get_compressors():
+ # Add *lossless* compressors here.
+ compressors = {
+ # https://github.com/shssoichiro/oxipng
+ 'oxipng': { 'args': ['--opt', 'max', '--strip', 'safe', PNG_FILE_ARG] },
+ # http://optipng.sourceforge.net/
+ 'optipng': { 'args': ['-o3', '-quiet', PNG_FILE_ARG] },
+ # https://github.com/amadvance/advancecomp
+ 'advpng': { 'args': ['--recompress', '--shrink-insane', PNG_FILE_ARG] },
+ # https://github.com/amadvance/advancecomp
+ 'advdef': { 'args': ['--recompress', '--shrink-insane', PNG_FILE_ARG] },
+ # https://pmt.sourceforge.io/pngcrush/
+ 'pngcrush': { 'args': ['-q', '-ow', '-brute', '-reduce', '-noforce', PNG_FILE_ARG, 'pngcrush.$$$$.png'] },
+ # https://github.com/fhanau/Efficient-Compression-Tool
+ 'ect': { 'args': ['-5', '--mt-deflate', '--mt-file', '-strip', PNG_FILE_ARG]}
+ }
+ for compressor in compressors:
+ compressor_path = shutil.which(compressor)
+ if compressor_path:
+ compressors[compressor]['path'] = compressor_path
+ return compressors
+
+
+def compress_png(png_file, compressors):
+ for compressor in compressors:
+ if not compressors[compressor].get('path', False):
+ continue
+
+ args = compressors[compressor]['args']
+ args = [arg.replace(PNG_FILE_ARG, png_file) for arg in args]
+
+ try:
+ compress_proc = subprocess.run([compressor] + args)
+ except Exception:
+ print('{} returned {}:'.format(compressor, compress_proc.returncode))
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Compress PNGs')
+ parser.add_argument('--list', action='store_true',
+ help='List available compressors')
+ parser.add_argument('png_files', nargs='*', metavar='png file', help='Files to compress')
+ args = parser.parse_args()
+
+ compressors = get_compressors()
+
+ c_count = 0
+ for compressor in compressors:
+ if 'path' in compressors[compressor]:
+ c_count += 1
+
+ if c_count < 1:
+ sys.stderr.write('No compressors found\n')
+ sys.exit(1)
+
+ if args.list:
+ for compressor in compressors:
+ path = compressors[compressor].get('path', 'Not found')
+ print('{}: {}'.format(compressor, path))
+ sys.exit(0)
+
+ with concurrent.futures.ProcessPoolExecutor() as executor:
+ futures = []
+ for png_file in args.png_files:
+ print('Compressing {}'.format(png_file))
+ futures.append(executor.submit(compress_png, png_file, compressors))
+ concurrent.futures.wait(futures)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/convert-glib-types.py b/tools/convert-glib-types.py
new file mode 100755
index 0000000..aa714d7
--- /dev/null
+++ b/tools/convert-glib-types.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+'''\
+convert-glib-types.py - Convert glib types to their C and C99 eqivalents.
+'''
+
+# Imports
+
+import argparse
+import glob
+import platform
+import re
+import sys
+
+padded_type_map = {}
+
+type_map = {
+ 'gboolean': 'bool',
+ 'gchar': 'char',
+ 'guchar': 'unsigned char',
+ 'gint': 'int',
+ 'guint': 'unsigned', # Matches README.developer
+ 'glong': 'long',
+ 'gulong': 'unsigned long',
+ 'gint8': 'int8_t',
+ 'gint16': 'int16_t',
+ 'gint32': 'int32_t',
+ 'gint64': 'int64_t',
+ 'guint8': 'uint8_t',
+ 'guint16': 'uint16_t',
+ 'guint32': 'uint32_t',
+ 'guint64': 'uint64_t',
+ 'gfloat': 'float',
+ 'gdouble': 'double',
+ 'gpointer ': 'void *', # 'void *foo' instead of 'void * foo'
+ 'gpointer': 'void *',
+ # Is gsize the same as size_t on the platforms we support?
+ # https://gitlab.gnome.org/GNOME/glib/-/issues/2493
+ 'gsize': 'size_t',
+ 'gssize': 'ssize_t',
+}
+
+definition_map = {
+ 'TRUE': 'true',
+ 'FALSE': 'false',
+ 'G_MAXINT8': 'INT8_MAX',
+ 'G_MAXINT16': 'INT16_MAX',
+ 'G_MAXINT32': 'INT32_MAX',
+ 'G_MAXINT64': 'INT64_MAX',
+ 'G_MAXINT': 'INT_MAX',
+ 'G_MAXUINT8': 'UINT8_MAX',
+ 'G_MAXUINT16': 'UINT16_MAX',
+ 'G_MAXUINT32': 'UINT32_MAX',
+ 'G_MAXUINT64': 'UINT64_MAX',
+ 'G_MAXUINT': 'UINT_MAX',
+ 'G_MININT8': 'INT8_MIN',
+ 'G_MININT16': 'INT16_MIN',
+ 'G_MININT32': 'INT32_MIN',
+ 'G_MININT64': 'INT64_MIN',
+ 'G_MININT': 'INT_MIN',
+}
+
+format_spec_map = {
+ 'G_GINT64_FORMAT': 'PRId64',
+ 'G_GUINT64_FORMAT': 'PRIu64',
+}
+
+def convert_file(file):
+ lines = ''
+ try:
+ with open(file, 'r') as f:
+ lines = f.read()
+ for glib_type, c99_type in padded_type_map.items():
+ lines = lines.replace(glib_type, c99_type)
+ for glib_type, c99_type in type_map.items():
+ lines = re.sub(rf'([^"])\b{glib_type}\b([^"])', rf'\1{c99_type}\2', lines, flags=re.MULTILINE)
+ for glib_define, c99_define in definition_map.items():
+ lines = re.sub(rf'\b{glib_define}\b', rf'{c99_define}', lines, flags=re.MULTILINE)
+ for glib_fmt_spec, c99_fmt_spec in format_spec_map.items():
+ lines = re.sub(rf'\b{glib_fmt_spec}\b', rf'{c99_fmt_spec}', lines, flags=re.MULTILINE)
+ except IsADirectoryError:
+ sys.stderr.write(f'{file} is a directory.\n')
+ return
+ except UnicodeDecodeError:
+ sys.stderr.write(f"{file} isn't valid UTF-8.\n")
+ return
+ except:
+ sys.stderr.write(f'Unable to open {file}.\n')
+ return
+
+ with open(file, 'w') as f:
+ f.write(lines)
+ print(f'Converted {file}')
+
+def main():
+ parser = argparse.ArgumentParser(description='Convert glib types to their C and C99 eqivalents.')
+ parser.add_argument('files', metavar='FILE', nargs='*')
+ args = parser.parse_args()
+
+ # Build a padded version of type_map which attempts to preseve alignment
+ for glib_type, c99_type in type_map.items():
+ pg_type = glib_type + ' '
+ pc_type = c99_type + ' '
+ pad_len = max(len(pg_type), len(pc_type))
+ padded_type_map[f'{pg_type:{pad_len}s}'] = f'{pc_type:{pad_len}s}'
+
+ files = []
+ if platform.system() == 'Windows':
+ for arg in args.files:
+ files += glob.glob(arg)
+ else:
+ files = args.files
+
+ for file in files:
+ convert_file(file)
+
+# On with the show
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tools/convert_expert_add_info_format.pl b/tools/convert_expert_add_info_format.pl
new file mode 100755
index 0000000..5728936
--- /dev/null
+++ b/tools/convert_expert_add_info_format.pl
@@ -0,0 +1,417 @@
+#!/usr/bin/env perl
+#
+# Copyright 2013 Michael Mann (see AUTHORS file)
+#
+# A program to help convert the "old" expert_add_info_format API calls into filterable "items" that
+# use the other expert API calls. The program requires 2 passes. "Pass 1" (generate) collects
+# the eligible expert_add_info_format calls and outputs the necessary data into a delimited
+# file. "Pass 2" (fix-all) takes the data from the delimited file and replaces the
+# expert_add_info_format calls with filterable "expert info" calls as well as
+# generating a separate files for the ei variable declarations and array data.
+# The ei "file" can be copy/pasted into the dissector where appropriate
+#
+# Note that the output from "Pass 1" won't always be a perfect conversion for "Pass 2", so
+# "human interaction" is needed as an intermediary to verify and update the delimited file
+# before "Pass 2" is done.
+#
+# Delimited file field format:
+# <convert expert_add_info_format_call[1-4]><add ei variable[0|1]><ei var><[GROUP]><[SEVERITY]><[FIELDNAME]><[EXPERTABBREV]>
+# <pinfo var><proto_item var><tvb var><offset><length><params>
+#
+# convert proto_tree_add_text_call enumerations:
+# 1 - expert_add_info
+# 2 - expert_add_info_format
+# 3 - proto_tree_add_expert
+# 4 - proto_tree_add_expert_format
+#
+# Usage: convert_expert_add_info_format.pl action=<generate|fix-all> <file or files>
+#
+# Based off of convert_proto_tree_add_text.pl
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+use strict;
+use warnings;
+
+use Getopt::Long;
+
+my %EXPERT_SEVERITY = ('PI_COMMENT' => "PI_COMMENT",
+ 'PI_CHAT' => "PI_CHAT",
+ 'PI_NOTE' => "PI_NOTE",
+ 'PI_WARN' => "PI_WARN",
+ 'PI_ERROR' => "PI_ERROR");
+
+my %EXPERT_GROUPS = ('PI_CHECKSUM' => "PI_CHECKSUM",
+ 'PI_SEQUENCE' => "PI_SEQUENCE",
+ 'PI_RESPONSE_CODE' => "PI_RESPONSE_CODE",
+ 'PI_REQUEST_CODE' => "PI_REQUEST_CODE",
+ 'PI_UNDECODED' => "PI_UNDECODED",
+ 'PI_REASSEMBLE' => "PI_REASSEMBLE",
+ 'PI_MALFORMED' => "PI_MALFORMED",
+ 'PI_DEBUG' => "PI_DEBUG",
+ 'PI_PROTOCOL' => "PI_PROTOCOL",
+ 'PI_SECURITY' => "PI_SECURITY",
+ 'PI_COMMENTS_GROUP' => "PI_COMMENTS_GROUP",
+ 'PI_DECRYPTION' => "PI_DECRYPTION",
+ 'PI_ASSUMPTION' => "PI_ASSUMPTION",
+ 'PI_DEPRECATED' => "PI_DEPRECATED");
+
+my @expert_list;
+my $protabbrev = "";
+
+# Perl trim function to remove whitespace from the start and end of the string
+sub trim($)
+{
+ my $string = shift;
+ $string =~ s/^\s+//;
+ $string =~ s/\s+$//;
+ return $string;
+}
+
+# ---------------------------------------------------------------------
+#
+# MAIN
+#
+my $helpFlag = '';
+my $action = 'generate';
+my $register = '';
+
+my $result = GetOptions(
+ 'action=s' => \$action,
+ 'register' => \$register,
+ 'help|?' => \$helpFlag
+ );
+
+if (!$result || $helpFlag || !$ARGV[0]) {
+ usage();
+}
+
+sub usage {
+ print "\nUsage: $0 [--action=generate|fix-all|find-all] FILENAME [...]\n\n";
+ print " --action = generate (default)\n";
+ print " generate - create a delimited file (FILENAME.expert_add_info_input) with\n";
+ print " expert_add_info_format fields in FILENAME(s)\n";
+ print " fix-all - Use delimited file (FILENAME.expert_add_info_input) to convert\n";
+ print " expert_add_info_format to \"filterable\" expert API\n";
+ print " Also generates FILENAME.ei to be copy/pasted into\n";
+ print " the dissector where appropriate\n\n";
+ print " --register = generate ei_register_info and expert register function calls\n\n";
+
+ exit(1);
+}
+
+#
+# XXX Outline general algorithm here
+#
+my $found_total = 0;
+my $protabbrev_index;
+my $line_number = 0;
+
+while (my $fileName = $ARGV[0]) {
+ shift;
+ my $fileContents = '';
+
+ die "No such file: \"$fileName\"\n" if (! -e $fileName);
+
+ # delete leading './'
+ $fileName =~ s{ ^ \. / } {}xo;
+
+ #determine PROTABBREV for dissector based on file name format of (dirs)/packet-PROTABBREV.c
+ $protabbrev_index = rindex($fileName, "packet-");
+ if ($protabbrev_index == -1) {
+ print "$fileName doesn't fit format of packet-PROTABBREV.c\n";
+ next;
+ }
+
+ $protabbrev = substr($fileName, $protabbrev_index+length("packet-"));
+ $protabbrev_index = rindex($protabbrev, ".");
+ if ($protabbrev_index == -1) {
+ print "$fileName doesn't fit format of packet-PROTABBREV.c\n";
+ next;
+ }
+ $protabbrev = lc(substr($protabbrev, 0, $protabbrev_index));
+
+ # Read in the file (ouch, but it's easier that way)
+ open(FCI, "<", $fileName) || die("Couldn't open $fileName");
+ while (<FCI>) {
+ $fileContents .= $_;
+ }
+ close(FCI);
+
+ if ($action eq "generate") {
+ generate_eis(\$fileContents, $fileName);
+ }
+
+ if ($action eq "fix-all") {
+ # Read in the ei "input" file
+ $line_number = 0;
+ my $errors = 0;
+ open(FCI, "<", $fileName . ".expert_add_info_input") || die("Couldn't open $fileName.expert_add_info_input");
+ while(my $line=<FCI>){
+ my @expert_item = split(/;|\n/, $line);
+
+ $line_number++;
+ $errors += verify_line(@expert_item);
+
+ push(@expert_list, \@expert_item);
+ }
+ close(FCI);
+
+ if ($errors > 0) {
+ print "Aborting conversion.\n";
+ exit(-1);
+ }
+
+ fix_expert_add_info_format(\$fileContents, $fileName);
+
+ # Write out the ei data
+ output_ei_data($fileName);
+
+ # Write out the changed version to a file
+ open(FCO, ">", $fileName . ".expert_add_info_format");
+ print FCO "$fileContents";
+ close(FCO);
+ }
+
+} # while
+
+exit $found_total;
+
+# ---------------------------------------------------------------------
+# Sanity check the data in the .proto_tree_input file
+sub verify_line {
+ my( @expert_item) = @_;
+ my $errors = 0;
+
+ #do some basic error checking of the file
+ if (($expert_item[0] eq "1") ||
+ ($expert_item[0] eq "2") ||
+ ($expert_item[0] eq "3") ||
+ ($expert_item[0] eq "4")) {
+ #expert info conversions
+ if (!($expert_item[2] =~ /^ei_/)) {
+ print "$line_number: Poorly formed ei_ variable ($expert_item[2])!\n";
+ $errors++;
+ }
+ } else {
+ print "$line_number: Bad conversion value!\n";
+ $errors++;
+ }
+
+ if ($expert_item[1] eq "1") {
+ if (!($expert_item[2] =~ /^ei_/)) {
+ print "$line_number: Poorly formed ei_ variable ($expert_item[2])!\n";
+ $errors++;
+ }
+ if (!exists($EXPERT_SEVERITY{$expert_item[4]})) {
+ print "$line_number: Expert severity value '$expert_item[5]' unknown!\n";
+ $errors++;
+ }
+ if (!exists($EXPERT_GROUPS{$expert_item[3]})) {
+ print "$line_number: Expert group value '$expert_item[4]' unknown!\n";
+ $errors++;
+ }
+
+ } elsif ($expert_item[1] ne "0") {
+ print "$line_number: Bad ei variable generation value!\n";
+ $errors++;
+ }
+
+ return $errors;
+}
+
+sub generate_eis {
+ my( $fileContentsRef, $fileName) = @_;
+ my @args;
+ my $num_items = 0;
+ my @temp;
+ my $str_temp;
+ my $pat;
+
+ $pat = qr /
+ (
+ (?:expert_add_info_format)\s* \(
+ (([^[\,;])*\,){4,}
+ [^;]*
+ \s* \) \s* ;
+ )
+ /xs;
+
+ while ($$fileContentsRef =~ / $pat /xgso) {
+
+ my @expert_item = (1, 1, "ei_name", "GROUP", "SEVERITY", "fieldfullname", "fieldabbrevname",
+ "pinfo", "item", "tvb", "offset", "length", "params");
+ my $arg_loop = 5;
+ my $str = "${1}\n";
+ $str =~ tr/\t\n\r/ /d;
+ $str =~ s/ \s+ / /xg;
+ #print "$fileName: $str\n";
+
+ @args = split(/,/, $str);
+ #printf "ARGS(%d): %s\n", scalar @args, join("# ", @args);
+ $args[0] =~ s/expert_add_info_format\s*\(\s*//;
+
+ $expert_item[7] = $args[0]; #pinfo
+ $expert_item[8] = trim($args[1]); #item
+ $expert_item[3] = trim($args[2]); #GROUP
+ $expert_item[4] = trim($args[3]); #SEVERITY
+ $expert_item[5] = trim($args[4]); #fieldfullname
+ $expert_item[5] =~ s/\"//;
+
+ #XXX - conditional?
+ $expert_item[5] =~ s/\"\s*\)\s*;$//;
+ $expert_item[5] =~ s/\"$//;
+
+ #params
+ $expert_item[12] = "";
+ while ($arg_loop < scalar @args) {
+ $expert_item[12] .= trim($args[$arg_loop]);
+ if ($arg_loop+1 < scalar @args) {
+ $expert_item[12] .= ", ";
+ }
+ $arg_loop += 1;
+ }
+ $expert_item[12] =~ s/\s*\)\s*;$//;
+
+ #ei variable name
+ $expert_item[2] = sprintf("ei_%s_%s", $protabbrev, lc($expert_item[5]));
+ $expert_item[2] =~ s/\s+|-|:/_/g;
+
+ #field abbreviated name
+ $expert_item[6] = sprintf("%s.%s", $protabbrev, lc($expert_item[5]));
+ $expert_item[6] =~ s/\s+|-|:/_/g;
+
+ push(@expert_list, \@expert_item);
+
+ $num_items += 1;
+ }
+
+ if ($num_items > 0) {
+ open(FCO, ">", $fileName . ".expert_add_info_input");
+ for my $item (@expert_list) {
+ print FCO join(";", @{$item}), "\n";
+ }
+ close(FCO);
+ }
+}
+
+# ---------------------------------------------------------------------
+# Find all expert_add_info_format calls and replace them with the data
+# found in expert_list
+sub fix_expert_add_info_format {
+ my( $fileContentsRef, $fileName) = @_;
+ my $found = 0;
+ my $pat;
+
+ $pat = qr /
+ (
+ (?:expert_add_info_format)\s* \(
+ (([^[\,;])*\,){4,}
+ [^;]*
+ \s* \) \s* ;
+ )
+ /xs;
+
+ $$fileContentsRef =~ s/ $pat /patsub($found, $1)/xges;
+}
+
+# ---------------------------------------------------------------------
+# Format expert info functions with expert_list data
+sub patsub {
+ my $item_str;
+
+ #print $expert_list[$_[0]][2] . " = ";
+ #print $#{$expert_list[$_[0]]}+1;
+ #print "\n";
+
+ if ($expert_list[$_[0]][0] eq "1") {
+ $item_str = sprintf("expert_add_info(%s, %s, &%s);",
+ $expert_list[$_[0]][7], $expert_list[$_[0]][8], $expert_list[$_[0]][2]);
+ } elsif ($expert_list[$_[0]][0] eq "2") {
+ $item_str = sprintf("expert_add_info_format(%s, %s, &%s, \"%s\"",
+ $expert_list[$_[0]][7], $expert_list[$_[0]][8],
+ $expert_list[$_[0]][2], $expert_list[$_[0]][5]);
+ if (($#{$expert_list[$_[0]]}+1 > 12 ) && ($expert_list[$_[0]][12] ne "")) {
+ $item_str .= ", $expert_list[$_[0]][12]";
+ }
+ $item_str .= ");";
+ } elsif ($expert_list[$_[0]][0] eq "3") {
+ $item_str = sprintf("proto_tree_add_expert(%s, %s, &%s, %s, %s, %s);",
+ $expert_list[$_[0]][8], $expert_list[$_[0]][7],
+ $expert_list[$_[0]][2], $expert_list[$_[0]][9],
+ $expert_list[$_[0]][10], $expert_list[$_[0]][11]);
+ } elsif ($expert_list[$_[0]][0] eq "4") {
+ $item_str = sprintf("proto_tree_add_expert_format(%s, %s, &%s, %s, %s, %s, \"%s\"",
+ $expert_list[$_[0]][8], $expert_list[$_[0]][7], $expert_list[$_[0]][2],
+ $expert_list[$_[0]][9], $expert_list[$_[0]][10],
+ $expert_list[$_[0]][11], $expert_list[$_[0]][5]);
+ if (($#{$expert_list[$_[0]]}+1 > 12) && ($expert_list[$_[0]][12] ne "")) {
+ $item_str .= ", $expert_list[$_[0]][12]";
+ }
+ $item_str .= ");";
+ }
+
+ $_[0] += 1;
+
+ return $item_str;
+}
+
+# ---------------------------------------------------------------------
+# Output the ei variable declarations and expert array. For now, write them to a file.
+# XXX - Eventually find the right place to add it to the modified dissector file
+sub output_ei_data {
+ my( $fileName) = @_;
+ my %eis = ();
+ my $index;
+ my $key;
+
+ #add ei to hash table to prevent against (accidental) duplicates
+ for ($index=0;$index<@expert_list;$index++) {
+ if ($expert_list[$index][1] eq "1") {
+ $eis{$expert_list[$index][2]} = $expert_list[$index][2];
+ }
+ }
+
+ open(FCO, ">", $fileName . ".ei");
+
+ print FCO "/* Generated from convert_expert_add_info_format.pl */\n";
+
+ foreach $key (keys %eis) {
+ print FCO "static expert_field $key = EI_INIT;\n";
+ }
+ print FCO "\n\n";
+
+ if ($register ne "") {
+ print FCO " static ei_register_info ei[] = {\n";
+ }
+
+ %eis = ();
+ for ($index=0;$index<@expert_list;$index++) {
+ if ($expert_list[$index][1] eq "1") {
+ if (exists($eis{$expert_list[$index][2]})) {
+ print "duplicate ei entry '$expert_list[$index][2]' found! Aborting conversion.\n";
+ exit(-1);
+ }
+ $eis{$expert_list[$index][2]} = $expert_list[$index][2];
+
+ print FCO " { &$expert_list[$index][2], { \"$expert_list[$index][6]\", $expert_list[$index][3], ";
+ print FCO "$expert_list[$index][4], \"$expert_list[$index][5]\", EXPFILL }},\r\n";
+ }
+ }
+
+ if ($register ne "") {
+ print FCO " };\n\n\n";
+ print FCO " expert_module_t* expert_$protabbrev;\n\n";
+
+ print FCO " expert_$protabbrev = expert_register_protocol(proto_$protabbrev);\n";
+ print FCO " expert_register_field_array(expert_$protabbrev, ei, array_length(ei));\n\n";
+ }
+
+
+ close(FCO);
+}
diff --git a/tools/convert_proto_tree_add_text.pl b/tools/convert_proto_tree_add_text.pl
new file mode 100755
index 0000000..3576455
--- /dev/null
+++ b/tools/convert_proto_tree_add_text.pl
@@ -0,0 +1,759 @@
+#!/usr/bin/env perl
+#
+# Copyright 2013 Michael Mann (see AUTHORS file)
+#
+# A program to help convert proto_tree_add_text calls into filterable "items" that
+# use proto_tree_add_item. The program requires 2 passes. "Pass 1" (generate) collects
+# the eligible proto_tree_add_text calls and outputs the necessary data into a delimited
+# file. "Pass 2" (fix-all) takes the data from the delimited file and replaces the
+# proto_tree_add_text calls with proto_tree_add_item or "expert info" calls as well as
+# generating separate files for the hf and/or ei variable declarations and hf and/or ei array data.
+# The hf "files" can be copy/pasted into the dissector where appropriate (until such time as
+# its done automatically)
+#
+# Note that the output from "Pass 1" won't always be a perfect conversion for "Pass 2", so
+# "human interaction" is needed as an intermediary to verify and update the delimited file
+# before "Pass 2" is done.
+# It is also recommended to run checkhf.pl and checkAPIs.pl after "Pass 2" is completed.
+#
+# Delimited file field format:
+# <convert proto_tree_add_text_call[0|1|10-13]><add hf or ei variable[0|1|2]><proto_tree var><hf var><tvb var><offset><length><encoding|[EXPERT_GROUPS]>
+# <[FIELDNAME]><[FIELDTYPE]|[EXPERT_SEVERITY]><[FIELDABBREV]><[FIELDDISPLAY]><[FIELDCONVERT]><[BITMASK]>
+#
+# convert proto_tree_add_text_call enumerations:
+# 0 - no conversions
+# 1 - proto_tree_add_item
+# 10 - expert_add_info
+# 11 - expert_add_info_format
+# 12 - proto_tree_add_expert
+# 13 - proto_tree_add_expert_format
+#
+# Usage: convert_proto_tree_add_text.pl action=<generate|fix-all> <file or files>
+#
+# Lots of code shamelessly borrowed from fix-encoding-args.pl (Thanks Bill!)
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+use strict;
+use warnings;
+
+use Getopt::Long;
+
+my %DISPLAY_BASE = ('BASE_NONE' => "BASE_NONE",
+ 'BASE_DEC' => "BASE_DEC",
+ 'BASE_HEX' => "BASE_HEX",
+ 'BASE_OCT' => "BASE_OCT",
+ 'BASE_DEC_HEX' => "BASE_DEC_HEX",
+ 'BASE_HEX_DEC' => "BASE_HEX_DEC",
+ 'BASE_EXT_STRING' => "BASE_EXT_STRING",
+ 'BASE_RANGE_STRING' => "BASE_RANGE_STRING",
+ 'ABSOLUTE_TIME_LOCAL' => "ABSOLUTE_TIME_LOCAL",
+ 'ABSOLUTE_TIME_UTC' => "ABSOLUTE_TIME_UTC",
+ 'ABSOLUTE_TIME_DOY_UTC' => "ABSOLUTE_TIME_DOY_UTC",
+ 'BASE_CUSTOM' => "BASE_CUSTOM");
+
+my %ENCODINGS = ('ENC_BIG_ENDIAN' => "ENC_BIG_ENDIAN",
+ 'ENC_LITTLE_ENDIAN' => "ENC_LITTLE_ENDIAN",
+ 'ENC_TIME_SECS_NSECS' => "ENC_TIME_SECS_NSECS",
+ 'ENC_TIME_NTP' => "ENC_TIME_NTP",
+ 'ENC_ASCII' => "ENC_ASCII",
+ 'ENC_UTF_8' => "ENC_UTF_8",
+ 'ENC_UTF_16' => "ENC_UTF_16",
+ 'ENC_UCS_2' => "ENC_UCS_2",
+ 'ENC_EBCDIC' => "ENC_EBCDIC",
+ 'ENC_NA' => "ENC_NA");
+
+my %FIELD_TYPE = ('FT_NONE' => "FT_NONE", 'FT_PROTOCOL' => "FT_PROTOCOL", 'FT_BOOLEAN' => "FT_BOOLEAN",
+ 'FT_UINT8' => "FT_UINT8", 'FT_UINT16' => "FT_UINT16", 'FT_UINT24' => "FT_UINT24", 'FT_UINT32' => "FT_UINT32", 'FT_UINT64' => "FT_UINT64",
+ 'FT_INT8' => "FT_INT8", 'FT_INT16' => "FT_INT16", 'FT_INT24' => "FT_INT24", 'FT_INT32' => "FT_INT32", 'FT_INT64' => "FT_INT64",
+ 'FT_FLOAT' => "FT_FLOAT", 'FT_DOUBLE' => "FT_DOUBLE",
+ 'FT_ABSOLUTE_TIME' => "FT_ABSOLUTE_TIME", 'FT_RELATIVE_TIME' => "FT_RELATIVE_TIME",
+ 'FT_STRING' => "FT_STRING", 'FT_STRINGZ' => "FT_STRINGZ", 'FT_UINT_STRING' => "FT_UINT_STRING",
+ 'FT_ETHER' => "FT_ETHER", 'FT_BYTES' => "FT_BYTES", 'FT_UINT_BYTES' => "FT_UINT_BYTES",
+ 'FT_IPv4' => "FT_IPv4", 'FT_IPv6' => "FT_IPv6", 'FT_IPXNET' => "FT_IPXNET", 'FT_AX25' => "FT_AX25", 'FT_VINES' => "FT_VINES",
+ 'FT_FRAMENUM' => "FT_FRAMENUM", 'FT_GUID' => "FT_GUID", 'FT_OID' => "FT_OID", 'FT_REL_OID' => "FT_REL_OID", 'FT_EUI64' => "FT_EUI64");
+
+my %EXPERT_SEVERITY = ('PI_COMMENT' => "PI_COMMENT",
+ 'PI_CHAT' => "PI_CHAT",
+ 'PI_NOTE' => "PI_NOTE",
+ 'PI_WARN' => "PI_WARN",
+ 'PI_ERROR' => "PI_ERROR");
+
+my %EXPERT_GROUPS = ('PI_CHECKSUM' => "PI_CHECKSUM",
+ 'PI_SEQUENCE' => "PI_SEQUENCE",
+ 'PI_RESPONSE_CODE' => "PI_RESPONSE_CODE",
+ 'PI_REQUEST_CODE' => "PI_REQUEST_CODE",
+ 'PI_UNDECODED' => "PI_UNDECODED",
+ 'PI_REASSEMBLE' => "PI_REASSEMBLE",
+ 'PI_MALFORMED' => "PI_MALFORMED",
+ 'PI_DEBUG' => "PI_DEBUG",
+ 'PI_PROTOCOL' => "PI_PROTOCOL",
+ 'PI_SECURITY' => "PI_SECURITY",
+ 'PI_COMMENTS_GROUP' => "PI_COMMENTS_GROUP",
+ 'PI_DECRYPTION' => "PI_DECRYPTION",
+ 'PI_ASSUMPTION' => "PI_ASSUMPTION",
+ 'PI_DEPRECATED' => "PI_DEPRECATED");
+
+my @proto_tree_list;
+my @expert_list;
+my $protabbrev = "";
+
+# Perl trim function to remove whitespace from the start and end of the string
+sub trim($)
+{
+ my $string = shift;
+ $string =~ s/^\s+//;
+ $string =~ s/\s+$//;
+ return $string;
+}
+
+# ---------------------------------------------------------------------
+#
+# MAIN
+#
+my $helpFlag = '';
+my $action = 'generate';
+my $encoding = '';
+my $expert = '';
+
+my $result = GetOptions(
+ 'action=s' => \$action,
+ 'encoding=s' => \$encoding,
+ 'expert' => \$expert,
+ 'help|?' => \$helpFlag
+ );
+
+if (!$result || $helpFlag || !$ARGV[0]) {
+ usage();
+}
+
+sub usage {
+ print "\nUsage: $0 [--action=generate|fix-all|find-all] [--encoding=ENC_BIG_ENDIAN|ENC_LITTLE_ENDIAN] FILENAME [...]\n\n";
+ print " --action = generate (default)\n";
+ print " generate - create a delimited file (FILENAME.proto_tree_input) with\n";
+ print " proto_tree_add_text fields in FILENAME(s)\n";
+ print " fix-all - Use delimited file (FILENAME.proto_tree_input) to convert\n";
+ print " proto_tree_add_text to proto_tree_add_item\n";
+ print " Also generates FILENAME.hf and FILENAME.hf_array to be\n";
+ print " copy/pasted into the dissector where appropriate\n";
+ print " find-all - Output the number of eligible proto_tree_add_text calls\n";
+ print " for conversion\n\n";
+ print " --expert (Optional) Includes proto_tree_add_text calls with no printf arguments in\n";
+ print " the .proto_tree_input file as they could be converted to expert info\n";
+ print " (otherwise they are ignored)\n";
+ print " Must be called for 'fix-all' if called on 'generate'\n";
+ print " --encoding (Optional) Default encoding if one can't be determined\n";
+ print " (effective only for generate)\n";
+ print " If not specified, an encoding will not be auto-populated\n";
+ print " if undetermined\n\n";
+
+ exit(1);
+}
+
+#
+# XXX Outline general algorithm here
+#
+my $found_total = 0;
+my $protabbrev_index;
+my $line_number = 0;
+
+while (my $fileName = $ARGV[0]) {
+ shift;
+ my $fileContents = '';
+
+ die "No such file: \"$fileName\"\n" if (! -e $fileName);
+
+ # delete leading './'
+ $fileName =~ s{ ^ \. / } {}xo;
+
+ #determine PROTABBREV for dissector based on file name format of (dirs)/packet-PROTABBREV.c
+ $protabbrev_index = rindex($fileName, "packet-");
+ if ($protabbrev_index == -1) {
+ print "$fileName doesn't fit format of packet-PROTABBREV.c\n";
+ next;
+ }
+
+ $protabbrev = substr($fileName, $protabbrev_index+length("packet-"));
+ $protabbrev_index = rindex($protabbrev, ".");
+ if ($protabbrev_index == -1) {
+ print "$fileName doesn't fit format of packet-PROTABBREV.c\n";
+ next;
+ }
+ $protabbrev = lc(substr($protabbrev, 0, $protabbrev_index));
+
+ # Read in the file (ouch, but it's easier that way)
+ open(FCI, "<", $fileName) || die("Couldn't open $fileName");
+ while (<FCI>) {
+ $fileContents .= $_;
+ }
+ close(FCI);
+
+ if ($action eq "generate") {
+ generate_hfs(\$fileContents, $fileName);
+ }
+
+ if ($action eq "fix-all") {
+ # Read in the hf "input" file
+ $line_number = 0;
+ my $errors = 0;
+ open(FCI, "<", $fileName . ".proto_tree_input") || die("Couldn't open $fileName.proto_tree_input");
+ while(my $line=<FCI>){
+ my @proto_tree_item = split(/;|\n/, $line);
+
+ $line_number++;
+ $errors += verify_line(@proto_tree_item);
+
+ push(@proto_tree_list, \@proto_tree_item);
+ if ($proto_tree_item[1] eq "2") {
+ push(@expert_list, \@proto_tree_item);
+ }
+ }
+ close(FCI);
+
+ if ($errors > 0) {
+ print "Aborting conversion.\n";
+ exit(-1);
+ }
+
+ fix_proto_tree_add_text(\$fileContents, $fileName);
+
+ # Write out the hf data
+ output_hf_array($fileName);
+ output_hf($fileName);
+
+ # Write out the changed version to a file
+ open(FCO, ">", $fileName . ".proto_tree_add_text");
+ print FCO "$fileContents";
+ close(FCO);
+ }
+
+ if ($action eq "find-all") {
+ # Find all proto_tree_add_text() statements eligible for conversion
+ $found_total += find_all(\$fileContents, $fileName);
+ }
+
+} # while
+
+exit $found_total;
+
+# ---------------------------------------------------------------------
+# Sanity check the data in the .proto_tree_input file
+sub verify_line {
+ my( @proto_tree_item) = @_;
+ my $errors = 0;
+
+ #do some basic error checking of the file
+ if ($proto_tree_item[0] eq "1") {
+ if (!($proto_tree_item[3] =~ /^hf_/)) {
+ print "$line_number: Poorly formed hf_ variable ($proto_tree_item[3])!\n";
+ $errors++;
+ }
+
+ foreach (split(/\|/, $proto_tree_item[7])) {
+ if (!exists($ENCODINGS{$_})) {
+ print "$line_number: Encoding value '$_' unknown!\n";
+ $errors++;
+ }
+ }
+ } elsif (($proto_tree_item[0] eq "10") ||
+ ($proto_tree_item[0] eq "11") ||
+ ($proto_tree_item[0] eq "12") ||
+ ($proto_tree_item[0] eq "13")) {
+ #expert info conversions
+ if (!($proto_tree_item[3] =~ /^ei_/)) {
+ print "$line_number: Poorly formed ei_ variable ($proto_tree_item[3])!\n";
+ $errors++;
+ }
+ } elsif ($proto_tree_item[0] ne "0") {
+ print "Bad conversion value! Aborting conversion.\n";
+ $errors++;
+ }
+
+ if ($proto_tree_item[1] eq "1") {
+ if (!($proto_tree_item[3] =~ /^hf_/)) {
+ print "$line_number: Poorly formed hf_ variable ($proto_tree_item[3])!\n";
+ $errors++;
+ }
+ if (!exists($FIELD_TYPE{$proto_tree_item[9]})) {
+ print "$line_number: Field type '$proto_tree_item[9]' unknown!\n";
+ $errors++;
+ }
+ foreach (split(/\|/, $proto_tree_item[11])) {
+ if ((!exists($DISPLAY_BASE{$_})) &&
+ (!($proto_tree_item[11] =~ /\d+/))) {
+ print "$line_number: Display base '$proto_tree_item[11]' unknown!\n";
+ $errors++;
+ }
+ }
+ if (($proto_tree_item[9] eq "FT_UINT8") ||
+ ($proto_tree_item[9] eq "FT_UINT16") ||
+ ($proto_tree_item[9] eq "FT_UINT24") ||
+ ($proto_tree_item[9] eq "FT_UINT32") ||
+ ($proto_tree_item[9] eq "FT_UINT64") ||
+ ($proto_tree_item[9] eq "FT_INT8") ||
+ ($proto_tree_item[9] eq "FT_INT16") ||
+ ($proto_tree_item[9] eq "FT_INT24") ||
+ ($proto_tree_item[9] eq "FT_INT32") ||
+ ($proto_tree_item[9] eq "FT_INT64")) {
+ if ($proto_tree_item[11] eq "BASE_NONE") {
+ print "$line_number: Interger type should not be BASE_NONE!\n";
+ $errors++;
+ }
+ }
+
+ } elsif ($proto_tree_item[1] eq "2") {
+ if (!($proto_tree_item[3] =~ /^ei_/)) {
+ print "$line_number: Poorly formed ei_ variable ($proto_tree_item[3])!\n";
+ $errors++;
+ }
+ if (!exists($EXPERT_SEVERITY{$proto_tree_item[9]})) {
+ print "$line_number: Expert severity value '$proto_tree_item[9]' unknown!\n";
+ $errors++;
+ }
+ if (!exists($EXPERT_GROUPS{$proto_tree_item[7]})) {
+ print "$line_number: Expert group value '$proto_tree_item[7]' unknown!\n";
+ $errors++;
+ }
+
+ } elsif ($proto_tree_item[1] ne "0") {
+ print "$line_number: Bad hf/ei variable generation value!\n";
+ $errors++;
+ }
+
+ return $errors;
+}
+
+sub generate_hfs {
+ my( $fileContentsRef, $fileName) = @_;
+ my @args;
+ my $num_items = 0;
+ my @temp;
+ my $str_temp;
+ my $pat;
+
+ if ($expert ne "") {
+ $pat = qr /
+ (
+ (?:proto_tree_add_text)\s* \(
+ (([^[\,;])*\,){4,}
+ [^;]*
+ \s* \) \s* ;
+ )
+ /xs;
+ } else {
+ $pat = qr /
+ (
+ (?:proto_tree_add_text)\s* \(
+ (([^[\,;])*\,){5,}
+ [^;]*
+ \s* \) \s* ;
+ )
+ /xs;
+ }
+
+ while ($$fileContentsRef =~ / $pat /xgso) {
+ my @proto_tree_item = (1, 1, "tree", "hf_name", "tvb", "offset", "length", "encoding",
+ "fieldfullname", "fieldtype", "fieldabbrevname", "BASE_NONE", "NULL", "0x0");
+ my $str = "${1}\n";
+ $str =~ tr/\t\n\r/ /d;
+ $str =~ s/ \s+ / /xg;
+ #print "$fileName: $str\n";
+
+ @args = split(/,/, $str);
+ #printf "ARGS(%d): %s\n", scalar @args, join("# ", @args);
+ $args[0] =~ s/proto_tree_add_text\s*\(\s*//;
+ $proto_tree_item[2] = $args[0]; #tree
+ $proto_tree_item[4] = trim($args[1]); #tvb
+ $proto_tree_item[5] = trim($args[2]); #offset
+ $proto_tree_item[6] = trim($args[3]); #length
+ if (scalar @args == 5) {
+ #remove the "); at the end
+ $args[4] =~ s/\"\s*\)\s*;$//;
+ }
+
+ #encoding
+ if (scalar @args > 5) {
+ if (($proto_tree_item[6] eq "1") ||
+ ($args[5] =~ /tvb_get_guint8/) ||
+ ($args[5] =~ /tvb_bytes_to_str/) ||
+ ($args[5] =~ /tvb_ether_to_str/)) {
+ $proto_tree_item[7] = "ENC_NA";
+ } elsif ($args[5] =~ /tvb_get_ntoh/) {
+ $proto_tree_item[7] = "ENC_BIG_ENDIAN";
+ } elsif ($args[5] =~ /tvb_get_letoh/) {
+ $proto_tree_item[7] = "ENC_LITTLE_ENDIAN";
+ } elsif (($args[5] =~ /tvb_get_ephemeral_string/) ||
+ ($args[5] =~ /tvb_format_text/)){
+ $proto_tree_item[7] = "ENC_NA|ENC_ASCII";
+ } elsif ($encoding ne "") {
+ $proto_tree_item[7] = $encoding;
+ }
+ }
+
+ #field full name
+ if (($expert ne "") || (scalar @args > 5)) {
+ my @arg_temp = split(/=|:/, $args[4]);
+ $proto_tree_item[8] = $arg_temp[0];
+ } else {
+ $proto_tree_item[8] = $args[4];
+ }
+ $proto_tree_item[8] =~ s/\"//;
+ $proto_tree_item[8] = trim($proto_tree_item[8]);
+
+ if ($proto_tree_item[8] eq "%s\"") {
+ #assume proto_tree_add_text will not be converted
+ $proto_tree_item[0] = 0;
+ $proto_tree_item[1] = 0;
+ $proto_tree_item[3] = sprintf("hf_%s_", $protabbrev);
+ $proto_tree_item[10] = sprintf("%s.", $protabbrev);
+ } else {
+ #hf variable name
+ $proto_tree_item[3] = sprintf("hf_%s_%s", $protabbrev, lc($proto_tree_item[8]));
+ $proto_tree_item[3] =~ s/\s+|-|:/_/g;
+
+ #field abbreviated name
+ $proto_tree_item[10] = sprintf("%s.%s", $protabbrev, lc($proto_tree_item[8]));
+ $proto_tree_item[10] =~ s/\s+|-|:/_/g;
+ }
+
+ #VALS
+ if ($str =~ /val_to_str(_const)?\(\s*tvb_get_[^\(]*\([^\,]*,[^\)]*\)\s*\,\s*([^\,]*)\s*\,\s*([^\)]*)\)/) {
+ $proto_tree_item[12] = sprintf("VALS(%s)", trim($2));
+ } elsif ($str =~ /val_to_str(_const)?\([^\,]*\,([^\,]*)\,/) {
+ $proto_tree_item[12] = sprintf("VALS(%s)", trim($2));
+ } elsif ($str =~ /val_to_str_ext(_const)?\(\s*tvb_get_[^\(]*\([^\,]*,[^\)]*\)\s*\,\s*([^\,]*)\s*\,\s*([^\)]*)\)/) {
+ $proto_tree_item[12] = trim($2);
+ } elsif ($str =~ /val_to_str_ext(_const)?\([^\,]*\,([^\,]*)\,/) {
+ $proto_tree_item[12] = trim($2);
+ }
+
+ #field type
+ if (scalar @args > 5) {
+ if ($args[5] =~ /tvb_get_guint8/) {
+ if ($args[4] =~ /%[0-9]*[i]/) {
+ $proto_tree_item[9] = "FT_INT8";
+ } else {
+ $proto_tree_item[9] = "FT_UINT8";
+ }
+ } elsif ($args[5] =~ /tvb_get_(n|"le")tohs/) {
+ if ($args[4] =~ /%[0-9]*[i]/) {
+ $proto_tree_item[9] = "FT_INT16";
+ } else {
+ $proto_tree_item[9] = "FT_UINT16";
+ }
+ } elsif ($args[5] =~ /tvb_get_(n|"le")toh24/) {
+ if ($args[4] =~ /%[0-9]*[i]/) {
+ $proto_tree_item[9] = "FT_INT24";
+ } else {
+ $proto_tree_item[9] = "FT_UINT24";
+ }
+ } elsif ($args[5] =~ /tvb_get_(n|"le")tohl/) {
+ if ($args[4] =~ /%[0-9]*[i]/) {
+ $proto_tree_item[9] = "FT_INT32";
+ } else {
+ $proto_tree_item[9] = "FT_UINT32";
+ }
+ } elsif ($args[5] =~ /tvb_get_(n|"le")toh("40"|"48"|"56"|"64")/) {
+ if ($args[4] =~ /%[0-9]*[i]/) {
+ $proto_tree_item[9] = "FT_INT64";
+ } else {
+ $proto_tree_item[9] = "FT_UINT64";
+ }
+ } elsif (($args[5] =~ /tvb_get_(n|"le")tohieee_float/) ||
+ ($args[4] =~ /%[0-9\.]*[fFeEgG]/)) {
+ $proto_tree_item[9] = "FT_FLOAT";
+ } elsif ($args[5] =~ /tvb_get_(n|"le")tohieee_double/) {
+ $proto_tree_item[9] = "FT_DOUBLE";
+ } elsif (($args[5] =~ /tvb_get_ipv4/) ||
+ ($args[5] =~ /tvb_ip_to_str/)) {
+ $proto_tree_item[9] = "FT_IPv4";
+ } elsif (($args[5] =~ /tvb_get_ipv6/) ||
+ ($args[5] =~ /tvb_ip6_to_str/)) {
+ $proto_tree_item[9] = "FT_IPv6";
+ } elsif ($args[5] =~ /tvb_get_(n|"le")tohguid/) {
+ $proto_tree_item[9] = "FT_GUID";
+ } elsif ($args[5] =~ /tvb_get_ephemeral_stringz/) {
+ $proto_tree_item[9] = "FT_STRINGZ";
+ } elsif (($args[5] =~ /tvb_get_ephemeral_string/) ||
+ ($args[5] =~ /tvb_format_text/)){
+ $proto_tree_item[9] = "FT_STRING";
+ } elsif (($args[5] =~ /tvb_bytes_to_str/)) {
+ $proto_tree_item[9] = "FT_BYTES";
+ } elsif ($args[5] =~ /tvb_ether_to_str/) {
+ $proto_tree_item[9] = "FT_ETHER";
+ }
+
+ #if we still can't determine type, assume a constant length
+ #value means we have an unsigned value
+ if ($proto_tree_item[9] eq "fieldtype") {
+ my $len_str = trim($args[3]);
+ if ($len_str eq "1") {
+ $proto_tree_item[9] = "FT_UINT8";
+ } elsif ($len_str eq "2") {
+ $proto_tree_item[9] = "FT_UINT16";
+ } elsif ($len_str eq "3") {
+ $proto_tree_item[9] = "FT_UINT24";
+ } elsif ($len_str eq "4") {
+ $proto_tree_item[9] = "FT_UINT32";
+ } elsif ($len_str eq "8") {
+ $proto_tree_item[9] = "FT_UINT64";
+ }
+ }
+ }
+
+ #display base
+ if ($args[4] =~ /%[0-9]*[xX]/) {
+ $proto_tree_item[11] = "BASE_HEX";
+ } elsif ($args[4] =~ /%[0-9]*[uld]/) {
+ $proto_tree_item[11] = "BASE_DEC";
+ } elsif ($args[4] =~ /%[0-9]*o/) {
+ $proto_tree_item[11] = "BASE_OCT";
+ }
+ if ($str =~ /val_to_str_ext(_const)?\([^\,]*\,([^\,]*)\,/) {
+ $proto_tree_item[11] .= "|BASE_EXT_STRING";
+ }
+
+ if (($proto_tree_item[7] eq "encoding") && ($proto_tree_item[9] eq "FT_BYTES")) {
+ $proto_tree_item[7] = "ENC_NA";
+ }
+
+ push(@proto_tree_list, \@proto_tree_item);
+
+ $num_items += 1;
+ }
+
+ if ($num_items > 0) {
+ open(FCO, ">", $fileName . ".proto_tree_input");
+ for my $item (@proto_tree_list) {
+ print FCO join(";", @{$item}), "\n";
+ }
+ close(FCO);
+ }
+}
+
+# ---------------------------------------------------------------------
+# Find all proto_tree_add_text calls and replace them with the data
+# found in proto_tree_list
+sub fix_proto_tree_add_text {
+ my( $fileContentsRef, $fileName) = @_;
+ my $found = 0;
+ my $pat;
+
+ if ($expert ne "") {
+ $pat = qr /
+ (
+ (?:proto_tree_add_text)\s* \(
+ (([^[\,;])*\,){4,}
+ [^;]*
+ \s* \) \s* ;
+ )
+ /xs;
+ } else {
+ $pat = qr /
+ (
+ (?:proto_tree_add_text)\s* \(
+ (([^[\,;])*\,){5,}
+ [^;]*
+ \s* \) \s* ;
+ )
+ /xs;
+ }
+
+ $$fileContentsRef =~ s/ $pat /patsub($found, $1)/xges;
+}
+
+# ---------------------------------------------------------------------
+# Format proto_tree_add_item or expert info functions with proto_tree_list data
+sub patsub {
+ my $item_str;
+ if ($proto_tree_list[$_[0]][0] eq "1") {
+ $item_str = sprintf("proto_tree_add_item(%s, %s, %s, %s, %s, %s);",
+ $proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3],
+ $proto_tree_list[$_[0]][4], $proto_tree_list[$_[0]][5],
+ $proto_tree_list[$_[0]][6], $proto_tree_list[$_[0]][7]);
+ } elsif ($proto_tree_list[$_[0]][0] eq "10") {
+ $item_str = sprintf("expert_add_info(pinfo, %s, &%s);",
+ $proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3]);
+ } elsif ($proto_tree_list[$_[0]][0] eq "11") {
+ $item_str = sprintf("expert_add_info_format(pinfo, %s, &%s, \"%s\"",
+ $proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3],
+ $proto_tree_list[$_[0]][8]);
+ if ($proto_tree_list[$_[0]][11] ne "") {
+ $item_str .= ", $proto_tree_list[$_[0]][11]";
+ }
+ $item_str .= ");";
+ } elsif ($proto_tree_list[$_[0]][0] eq "12") {
+ $item_str = sprintf("proto_tree_add_expert(%s, pinfo, &%s, %s, %s, %s);",
+ $proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3],
+ $proto_tree_list[$_[0]][4], $proto_tree_list[$_[0]][5],
+ $proto_tree_list[$_[0]][6]);
+ } elsif ($proto_tree_list[$_[0]][0] eq "13") {
+ $item_str = sprintf("proto_tree_add_expert_format(%s, pinfo, &%s, %s, %s, %s, \"%s\"",
+ $proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3],
+ $proto_tree_list[$_[0]][4], $proto_tree_list[$_[0]][5],
+ $proto_tree_list[$_[0]][6], $proto_tree_list[$_[0]][8]);
+ if ($proto_tree_list[$_[0]][11] ne "") {
+ $item_str .= ", $proto_tree_list[$_[0]][11]";
+ }
+ $item_str .= ");";
+ } else {
+ $item_str = $1;
+ }
+
+ $_[0] += 1;
+
+ return $item_str;
+}
+
+# ---------------------------------------------------------------------
+# Output the hf variable declarations. For now, write them to a file.
+# XXX - Eventually find the right place to add it to the modified dissector file
+sub output_hf {
+ my( $fileName) = @_;
+ my %hfs = ();
+ my %eis = ();
+ my $index;
+ my $key;
+
+ open(FCO, ">", $fileName . ".hf");
+
+ print FCO "/* Generated from convert_proto_tree_add_text.pl */\n";
+
+ #add hfs to hash table to prevent against (accidental) duplicates
+ for ($index=0;$index<@proto_tree_list;$index++) {
+ if ($proto_tree_list[$index][1] eq "1") {
+ $hfs{$proto_tree_list[$index][3]} = $proto_tree_list[$index][3];
+ print FCO "static int $proto_tree_list[$index][3] = -1;\n";
+ } elsif ($proto_tree_list[$index][1] eq "2") {
+ $eis{$proto_tree_list[$index][3]} = $proto_tree_list[$index][3];
+ }
+ }
+
+ if (scalar keys %hfs > 0) {
+ print FCO "\n\n";
+ }
+
+ print FCO "/* Generated from convert_proto_tree_add_text.pl */\n";
+
+ foreach $key (keys %eis) {
+ print FCO "static expert_field $key = EI_INIT;\n";
+ }
+ close(FCO);
+
+}
+
+# ---------------------------------------------------------------------
+# Output the hf array items. For now, write them to a file.
+# XXX - Eventually find the right place to add it to the modified dissector file
+# (bonus points if formatting of hf array in dissector file is kept)
+sub output_hf_array {
+ my( $fileName) = @_;
+ my $index;
+ my %hfs = ();
+ my %eis = ();
+
+ open(FCO, ">", $fileName . ".hf_array");
+
+ print FCO " /* Generated from convert_proto_tree_add_text.pl */\n";
+
+ for ($index=0;$index<@proto_tree_list;$index++) {
+ if ($proto_tree_list[$index][1] eq "1") {
+ if (exists($hfs{$proto_tree_list[$index][3]})) {
+ print "duplicate hf entry '$proto_tree_list[$index][3]' found! Aborting conversion.\n";
+ exit(-1);
+ }
+ $hfs{$proto_tree_list[$index][3]} = $proto_tree_list[$index][3];
+ print FCO " { &$proto_tree_list[$index][3], { \"$proto_tree_list[$index][8]\", \"$proto_tree_list[$index][10]\", ";
+ print FCO "$proto_tree_list[$index][9], $proto_tree_list[$index][11], $proto_tree_list[$index][12], $proto_tree_list[$index][13], NULL, HFILL }},\r\n";
+ }
+ }
+
+ if ($index > 0) {
+ print FCO "\n\n";
+ }
+
+ print FCO " /* Generated from convert_proto_tree_add_text.pl */\n";
+ for ($index=0;$index<@expert_list;$index++) {
+ if (exists($eis{$expert_list[$index][3]})) {
+ print "duplicate ei entry '$expert_list[$index][3]' found! Aborting conversion.\n";
+ exit(-1);
+ }
+ $eis{$expert_list[$index][3]} = $expert_list[$index][3];
+
+ print FCO " { &$expert_list[$index][3], { \"$expert_list[$index][10]\", $expert_list[$index][7], ";
+ print FCO "$expert_list[$index][9], \"$expert_list[$index][8]\", EXPFILL }},\r\n";
+ }
+
+ close(FCO);
+}
+
+# ---------------------------------------------------------------------
+# Find all proto_tree_add_text calls that have parameters passed in them
+# and output number found
+
+sub find_all {
+ my( $fileContentsRef, $fileName) = @_;
+
+ my $found = 0;
+ my $tvb_found = 0;
+ my $pat;
+ my $tvb_percent;
+
+ if ($expert ne "") {
+ $pat = qr /
+ (
+ (?:proto_tree_add_text)\s* \(
+ (([^[\,;])*\,){4,}
+ [^;]*
+ \s* \) \s* ;
+ )
+ /xs;
+ } else {
+ $pat = qr /
+ (
+ (?:proto_tree_add_text)\s* \(
+ (([^[\,;])*\,){5,}
+ [^;]*
+ \s* \) \s* ;
+ )
+ /xs;
+ }
+
+ while ($$fileContentsRef =~ / $pat /xgso) {
+ my $str = "${1}\n";
+ my @args = split(/,/, ${1});
+
+ #cleanup whitespace to show proto_tree_add_text in single line (easier for seeing grep results)
+ $str =~ tr/\t\n\r/ /d;
+ $str =~ s/ \s+ / /xg;
+ #print "$fileName: $str\n";
+
+ #find all instances where proto_tree_add_text has a tvb_get (or similar) call, because
+ #convert_proto_tree_add_text.pl has an easier time determining hf_ field values with it
+ if (scalar @args > 5) {
+ my $tvb = trim($args[5]);
+ if ($tvb =~ /^tvb_/) {
+ $tvb_found += 1;
+ }
+ }
+
+ $found += 1;
+ }
+
+ if ($found > 0) {
+ if ($tvb_found > 0) {
+ $tvb_percent = 100*$tvb_found/$found;
+
+ printf "%s: Found %d proto_tree_add_text calls eligible for conversion, %d contain a \"tvb get\" call (%.2f%%).\n",
+ $fileName, $found, $tvb_found, $tvb_percent;
+ } else {
+ print "$fileName: Found $found proto_tree_add_text calls eligible for conversion, 0 \"tvb get\" calls.\n";
+ }
+ }
+ return $found;
+}
diff --git a/tools/cppcheck/cppcheck.sh b/tools/cppcheck/cppcheck.sh
new file mode 100755
index 0000000..780fbbc
--- /dev/null
+++ b/tools/cppcheck/cppcheck.sh
@@ -0,0 +1,158 @@
+#!/bin/bash
+
+#
+# cppcheck.sh
+# Script to run CppCheck Static Analyzer.
+# http://cppcheck.sourceforge.net/
+#
+# Usage: tools/cppcheck/cppcheck.sh [options] [file]
+# Where options can be:
+# -a disable suppression list (see $CPPCHECK_DIR/suppressions)
+# -c colorize html output
+# -h html output (default is gcc)
+# -x xml output (default is gcc)
+# -j n threads (default: 4)
+# -l n check files from the last [n] commits
+# -o check modified files
+# -v quiet mode
+# If argument file is omitted then checking all files in the current directory.
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 2012 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+CPPCHECK=$(type -p cppcheck)
+CPPCHECK_DIR=$(dirname "$0")
+
+if [ -z "$CPPCHECK" ] ; then
+ echo "cppcheck not found"
+ exit 1
+fi
+
+THREADS=4
+LAST_COMMITS=0
+TARGET=""
+QUIET="--quiet"
+SUPPRESSIONS="--suppressions-list=$CPPCHECK_DIR/suppressions"
+INCLUDES="--includes-file=$CPPCHECK_DIR/includes"
+MODE="gcc"
+COLORIZE_HTML_MODE="no"
+OPEN_FILES="no"
+XML_ARG=""
+
+colorize_worker()
+{
+ # always uses stdin/stdout
+ [ "$COLORIZE_HTML_MODE" = "yes" ] && \
+ sed -e '/<td>warning<\/td>/s/^<tr>/<tr bgcolor="#ff3">/' \
+ -e '/<td>error<\/td>/s/^<tr>/<tr bgcolor="#faa">/' \
+ || sed ''
+}
+
+# switcher
+colorize()
+{
+ [ -z "$1" ] && colorize_worker || colorize_worker <<< "$1"
+}
+
+exit_cleanup() {
+ if [ "$MODE" = "html" ]; then
+ echo "</table></body></html>"
+ fi
+ if [ -n "$1" ] ; then
+ exit "$1"
+ fi
+}
+
+while getopts "achxj:l:ov" OPTCHAR ; do
+ case $OPTCHAR in
+ a) SUPPRESSIONS=" " ;;
+ c) COLORIZE_HTML_MODE="yes" ;;
+ h) MODE="html" ;;
+ x) MODE="xml" ;;
+ j) THREADS="$OPTARG" ;;
+ l) LAST_COMMITS="$OPTARG" ;;
+ o) OPEN_FILES="yes" ;;
+ v) QUIET=" " ;;
+ *) printf "Unknown option %s" "$OPTCHAR"
+ esac
+done
+shift $(( OPTIND - 1 ))
+
+if [ "$MODE" = "gcc" ]; then
+ TEMPLATE="gcc"
+elif [ "$MODE" = "html" ]; then
+ echo "<html><body><table border=1>"
+ echo "<tr><th>File</th><th>Line</th><th>Severity</th>"
+ echo "<th>Message</th><th>ID</th></tr>"
+ TEMPLATE="<tr><td>{file}</td><td>{line}</td><td>{severity}</td><td>{message}</td><td>{id}</td></tr>"
+fi
+
+# Ensure that the COLORIZE_HTML_MODE option is used only with HTML-mode and not with GCC-mode.
+[ "$MODE" = "html" ] && [ "$COLORIZE_HTML_MODE" = "yes" ] || COLORIZE_HTML_MODE="no"
+
+if [ "$LAST_COMMITS" -gt 0 ] ; then
+ TARGET=$( git diff --name-only --diff-filter=d HEAD~"$LAST_COMMITS".. | grep -E '\.(c|cpp)$' )
+ if [ -z "${TARGET//[[:space:]]/}" ] ; then
+ >&2 echo "No C or C++ files found in the last $LAST_COMMITS commit(s)."
+ exit_cleanup 0
+ fi
+fi
+
+if [ "$OPEN_FILES" = "yes" ] ; then
+ TARGET=$(git diff --name-only | grep -E '\.(c|cpp)$' )
+ TARGET="$TARGET $(git diff --staged --name-only | grep -E '\.(c|cpp)$' )"
+ if [ -z "${TARGET//[[:space:]]/}" ] ; then
+ >&2 echo "No C or C++ files are currently opened (modified or added for next commit)."
+ exit_cleanup 0
+ fi
+fi
+
+if [ $# -gt 0 ]; then
+ TARGET="$TARGET $*"
+fi
+
+if [ -z "$TARGET" ] ; then
+ TARGET=.
+fi
+
+if [ "$MODE" = "xml" ]; then
+ XML_ARG="--xml"
+fi
+
+# Use a little-documented feature of the shell to pass SIGINTs only to the
+# child process (cppcheck in this case). That way the final 'echo' still
+# runs and we aren't left with broken HTML.
+trap : INT
+
+if [ "$QUIET" = " " ]; then
+ echo "Examining:"
+ echo $TARGET
+ echo
+fi
+
+# shellcheck disable=SC2086
+$CPPCHECK --force --enable=style $QUIET \
+ $SUPPRESSIONS $INCLUDES \
+ -i doc/ \
+ -i epan/dissectors/asn1/ \
+ --std=c11 --template=$TEMPLATE \
+ -j $THREADS $TARGET $XML_ARG 2>&1 | colorize
+
+exit_cleanup
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 4
+# tab-width: 8
+# indent-tabs-mode: nil
+# End:
+#
+# vi: set shiftwidth=4 tabstop=8 expandtab:
+# :indentSize=4:tabSize=8:noTabs=true:
+#
diff --git a/tools/cppcheck/includes b/tools/cppcheck/includes
new file mode 100644
index 0000000..896651e
--- /dev/null
+++ b/tools/cppcheck/includes
@@ -0,0 +1,7 @@
+./epan/
+./epan/dissectors/
+./epan/wslua/
+./tools/lemon/
+./ui/
+./wiretap/
+.
diff --git a/tools/cppcheck/suppressions b/tools/cppcheck/suppressions
new file mode 100644
index 0000000..734cd5a
--- /dev/null
+++ b/tools/cppcheck/suppressions
@@ -0,0 +1,7 @@
+variableScope
+duplicateExpression
+invalidscanf
+noConstructor
+internalAstError
+syntaxError
+
diff --git a/tools/debian-nightly-package.sh b/tools/debian-nightly-package.sh
new file mode 100755
index 0000000..c07185a
--- /dev/null
+++ b/tools/debian-nightly-package.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+set -e
+
+if test -z $1; then
+ echo "Usage:"
+ echo " $0 <distribution>"
+ echo " e.g: $0 xenial"
+ exit 1
+fi
+
+DIST=$1
+VERSION=$(git describe --tags | sed 's/v//;s/-/~/g;s/rc/~rc/')
+ln --symbolic --no-dereference --force packaging/debian ./debian
+rm packaging/debian/changelog || true
+EDITOR=touch dch -p --package wireshark --create --force-distribution -v${VERSION}~${DIST}1 -D $DIST
+sed -i 's/\* Initial release.*/* Nightly build for '${DIST^}'/' packaging/debian/changelog
+dpkg-buildpackage -S -d
diff --git a/tools/debian-setup.sh b/tools/debian-setup.sh
new file mode 100755
index 0000000..9b68879
--- /dev/null
+++ b/tools/debian-setup.sh
@@ -0,0 +1,300 @@
+#!/bin/bash
+# Setup development environment on Debian and derivatives such as Ubuntu
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# We drag in tools that might not be needed by all users; it's easier
+# that way.
+#
+
+set -e -u -o pipefail
+
+function print_usage() {
+ printf "\\nUtility to setup a debian-based system for Wireshark Development.\\n"
+ printf "The basic usage installs the needed software\\n\\n"
+ printf "Usage: %s [--install-optional] [--install-deb-deps] [...other options...]\\n" "$0"
+ printf "\\t--install-optional: install optional software as well\\n"
+ printf "\\t--install-deb-deps: install packages required to build the .deb file\\n"
+ printf "\\t--install-test-deps: install packages required to run all tests\\n"
+ printf "\\t--install-qt5-deps: force installation of packages required to use Qt5\\n"
+ printf "\\t--install-qt6-deps: force installation of packages required to use Qt6\\n"
+ printf "\\t--install-all: install everything\\n"
+ printf "\\t[other]: other options are passed as-is to apt\\n"
+}
+
+ADDITIONAL=0
+DEBDEPS=0
+TESTDEPS=0
+ADD_QT5=0
+ADD_QT6=0
+HAVE_ADD_QT=0
+OPTIONS=
+for arg; do
+ case $arg in
+ --help)
+ print_usage
+ exit 0
+ ;;
+ --install-optional)
+ ADDITIONAL=1
+ ;;
+ --install-deb-deps)
+ DEBDEPS=1
+ ;;
+ --install-test-deps)
+ TESTDEPS=1
+ ;;
+ --install-qt5-deps)
+ ADD_QT5=1
+ ;;
+ --install-qt6-deps)
+ ADD_QT6=1
+ ;;
+ --install-all)
+ ADDITIONAL=1
+ DEBDEPS=1
+ TESTDEPS=1
+ ADD_QT5=1
+ ADD_QT6=1
+ HAVE_ADD_QT=1
+ ;;
+ *)
+ OPTIONS="$OPTIONS $arg"
+ ;;
+ esac
+done
+
+# Check if the user is root
+if [ "$(id -u)" -ne 0 ]
+then
+ echo "You must be root."
+ exit 1
+fi
+
+BASIC_LIST="gcc \
+ g++\
+ libglib2.0-dev \
+ libc-ares-dev \
+ libpcap-dev \
+ libpcre2-dev \
+ flex \
+ make \
+ python3 \
+ libgcrypt-dev \
+ libspeexdsp-dev"
+
+QT5_LIST="qttools5-dev \
+ qttools5-dev-tools \
+ libqt5svg5-dev \
+ qtmultimedia5-dev \
+ qtbase5-dev \
+ qtchooser \
+ qt5-qmake \
+ qtbase5-dev-tools"
+
+QT6_LIST="qt6-base-dev \
+ qt6-multimedia-dev \
+ qt6-tools-dev \
+ qt6-tools-dev-tools \
+ qt6-l10n-tools \
+ libqt6core5compat6-dev \
+ freeglut3-dev \
+ libvulkan-dev \
+ libxkbcommon-dev"
+
+if [ $ADD_QT5 -ne 0 ]
+then
+ BASIC_LIST="$BASIC_LIST $QT5_LIST"
+ HAVE_ADD_QT=1
+fi
+
+if [ $ADD_QT6 -ne 0 ]
+then
+ BASIC_LIST="$BASIC_LIST $QT6_LIST"
+ HAVE_ADD_QT=1
+fi
+
+if [ $HAVE_ADD_QT -eq 0 ]
+then
+ # Try to select Qt version from distro
+ test -e /etc/os-release && os_release='/etc/os-release' || os_release='/usr/lib/os-release'
+ # shellcheck disable=SC1090
+ . "${os_release}"
+
+ # Ubuntu 22.04 (jammy) or later
+ MAJOR=$(echo "$VERSION_ID" | cut -f1 -d.)
+ if [ "${ID:-linux}" = "ubuntu" ] && [ "${MAJOR:-0}" -ge "22" ]; then
+ echo "Installing Qt6."
+ BASIC_LIST="$BASIC_LIST $QT6_LIST"
+ else
+ echo "Installing Qt5."
+ BASIC_LIST="$BASIC_LIST $QT5_LIST"
+ fi
+fi
+
+ADDITIONAL_LIST="libnl-3-dev \
+ libkrb5-dev \
+ libsmi2-dev \
+ libsbc-dev \
+ liblua5.2-dev \
+ libnl-cli-3-dev \
+ libparse-yapp-perl \
+ libcap-dev \
+ liblz4-dev \
+ libsnappy-dev \
+ libzstd-dev \
+ libspandsp-dev \
+ libxml2-dev \
+ libminizip-dev \
+ git \
+ ninja-build \
+ perl \
+ xsltproc \
+ ccache \
+ doxygen"
+
+# Uncomment to add PNG compression utilities used by compress-pngs:
+# ADDITIONAL_LIST="$ADDITIONAL_LIST \
+# advancecomp \
+# optipng \
+# pngcrush"
+
+DEBDEPS_LIST="debhelper \
+ dh-python \
+ asciidoctor \
+ docbook-xml \
+ docbook-xsl \
+ libxml2-utils \
+ lintian \
+ lsb-release \
+ po-debconf \
+ python3-ply \
+ quilt"
+
+TESTDEPS_LIST="python3-pytest \
+ python3-pytest-xdist"
+
+# Adds package $2 to list variable $1 if the package is found.
+# If $3 is given, then this version requirement must be satisfied.
+add_package() {
+ local list="$1" pkgname="$2" versionreq="${3:-}" version
+
+ version=$(apt-cache show "$pkgname" 2>/dev/null |
+ awk '/^Version:/{ print $2; exit}')
+ # fail if the package is not known
+ if [ -z "$version" ]; then
+ return 1
+ elif [ -n "$versionreq" ]; then
+ # Require minimum version or fail.
+ # shellcheck disable=SC2086
+ dpkg --compare-versions $version $versionreq || return 1
+ fi
+
+ # package is found, append it to list
+ eval "${list}=\"\${${list}} \${pkgname}\""
+}
+
+# apt-get update must be called before calling add_package
+# otherwise available packages appear as unavailable
+apt-get update || exit 2
+
+# cmake3 3.5.1: Ubuntu 14.04
+# cmake >= 3.5: Debian >= jessie-backports, Ubuntu >= 16.04
+add_package BASIC_LIST cmake3 ||
+BASIC_LIST="$BASIC_LIST cmake"
+
+# Debian >= wheezy-backports, Ubuntu >= 16.04
+add_package ADDITIONAL_LIST libnghttp2-dev ||
+echo "libnghttp2-dev is unavailable" >&2
+
+# Debian >= bookworm, Ubuntu >= 22.04
+add_package ADDITIONAL_LIST libnghttp3-dev ||
+echo "libnghttp3-dev is unavailable" >&2
+
+# libssh-gcrypt-dev: Debian >= jessie, Ubuntu >= 16.04
+# libssh-dev (>= 0.6): Debian >= jessie, Ubuntu >= 14.04
+add_package ADDITIONAL_LIST libssh-gcrypt-dev ||
+add_package ADDITIONAL_LIST libssh-dev ||
+echo "libssh-gcrypt-dev and libssh-dev are unavailable" >&2
+
+# libgnutls28-dev: Debian >= wheezy-backports, Ubuntu >= 12.04
+add_package ADDITIONAL_LIST libgnutls28-dev ||
+echo "libgnutls28-dev is unavailable" >&2
+
+# Debian >= jessie-backports, Ubuntu >= 16.04
+add_package ADDITIONAL_LIST libmaxminddb-dev ||
+echo "libmaxminddb-dev is unavailable" >&2
+
+# Debian >= stretch-backports, Ubuntu >= 16.04
+add_package ADDITIONAL_LIST libbrotli-dev ||
+echo "libbrotli-dev is unavailable" >&2
+
+# libsystemd-journal-dev: Ubuntu 14.04
+# libsystemd-dev: Ubuntu >= 16.04
+add_package ADDITIONAL_LIST libsystemd-dev ||
+add_package ADDITIONAL_LIST libsystemd-journal-dev ||
+echo "libsystemd-dev is unavailable"
+
+# ilbc library from http://www.deb-multimedia.org
+add_package ADDITIONAL_LIST libilbc-dev ||
+echo "libilbc-dev is unavailable"
+
+# opus library libopus-dev
+add_package ADDITIONAL_LIST libopus-dev ||
+ echo "libopus-dev is unavailable"
+
+# bcg729 library libbcg729-dev
+add_package ADDITIONAL_LIST libbcg729-dev ||
+ echo "libbcg729-dev is unavailable"
+
+# softhsm2 2.0.0: Ubuntu 16.04
+# softhsm2 2.2.0: Debian >= jessie-backports, Ubuntu 18.04
+# softhsm2 >= 2.4.0: Debian >= buster, Ubuntu >= 18.10
+if ! add_package TESTDEPS_LIST softhsm2 '>= 2.3.0'; then
+ if add_package TESTDEPS_LIST softhsm2; then
+ # If SoftHSM 2.3.0 is unavailble, install p11tool.
+ TESTDEPS_LIST="$TESTDEPS_LIST gnutls-bin"
+ else
+ echo "softhsm2 is unavailable" >&2
+ fi
+fi
+
+ACTUAL_LIST=$BASIC_LIST
+
+# Now arrange for optional support libraries
+if [ $ADDITIONAL -ne 0 ]
+then
+ ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST"
+fi
+
+if [ $DEBDEPS -ne 0 ]
+then
+ ACTUAL_LIST="$ACTUAL_LIST $DEBDEPS_LIST"
+fi
+
+if [ $TESTDEPS -ne 0 ]
+then
+ ACTUAL_LIST="$ACTUAL_LIST $TESTDEPS_LIST"
+fi
+
+# shellcheck disable=SC2086
+apt-get install $ACTUAL_LIST $OPTIONS || exit 2
+
+if [ $ADDITIONAL -eq 0 ]
+then
+ printf "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n"
+fi
+
+if [ $DEBDEPS -eq 0 ]
+then
+ printf "\n*** Debian packages build deps not installed. Rerun with --install-deb-deps to have them.\n"
+fi
+
+if [ $TESTDEPS -eq 0 ]
+then
+ printf "\n*** Test deps not installed. Rerun with --install-test-deps to have them.\n"
+fi
diff --git a/tools/debug-alloc.env b/tools/debug-alloc.env
new file mode 100644
index 0000000..d6d454c
--- /dev/null
+++ b/tools/debug-alloc.env
@@ -0,0 +1,33 @@
+##############################################################################
+### Set up environment variables for testing ###
+##############################################################################
+
+# Use the Wmem strict allocator which does canaries and scrubbing etc.
+export WIRESHARK_DEBUG_WMEM_OVERRIDE=strict
+# Abort if a dissector adds too many items to the tree
+export WIRESHARK_ABORT_ON_TOO_MANY_ITEMS=
+
+# Turn on GLib memory debugging (since 2.13)
+export G_SLICE=debug-blocks
+
+# Cause glibc (Linux) to abort() if some memory errors are found
+export MALLOC_CHECK_=3
+
+# Cause FreeBSD (and other BSDs) to abort() on allocator warnings and
+# initialize allocated memory (to 0xa5) and freed memory (to 0x5a). see:
+# https://www.freebsd.org/cgi/man.cgi?query=malloc&apropos=0&sektion=0&manpath=FreeBSD+8.2-RELEASE&format=html
+export MALLOC_OPTIONS=AJ
+
+# macOS options; see https://developer.apple.com/library/archive/documentation/Performance/Conceptual/ManagingMemory/Articles/MallocDebug.html
+# Initialize allocated memory to 0xAA and freed memory to 0x55
+export MallocPreScribble=1
+export MallocScribble=1
+# Add guard pages before and after large allocations
+export MallocGuardEdges=1
+# Call abort() if heap corruption is detected. Heap is checked every 1000
+# allocations (may need to be tuned!)
+export MallocCheckHeapStart=1000
+export MallocCheckHeapEach=1000
+export MallocCheckHeapAbort=1
+# Call abort() if an illegal free() call is made
+export MallocBadFreeAbort=1
diff --git a/tools/delete_includes.py b/tools/delete_includes.py
new file mode 100755
index 0000000..cc804e0
--- /dev/null
+++ b/tools/delete_includes.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python3
+
+# Martin Mathieson
+# Look for and removes unnecessary includes in .cpp or .c files
+# Run from wireshark source folder as e.g.,
+# ./tools/delete_includes.py --build-folder ~/wireshark-build/ --folder epan/dissectors/
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+import subprocess
+import os
+import sys
+import shutil
+import argparse
+import signal
+import re
+from pathlib import Path
+
+
+# Try to exit soon after Ctrl-C is pressed.
+should_exit = False
+
+def signal_handler(sig, frame):
+ global should_exit
+ should_exit = True
+ print('You pressed Ctrl+C - exiting')
+
+signal.signal(signal.SIGINT, signal_handler)
+
+# For text colouring/highlighting.
+class bcolors:
+ HEADER = '\033[95m'
+ OKBLUE = '\033[94m'
+ OKGREEN = '\033[92m'
+ ADDED = '\033[45m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+ BOLD = '\033[1m'
+ UNDERLINE = '\033[4m'
+
+
+
+# command-line args
+#
+# Controls which dissector files should be checked. If no args given, will just
+# scan whole epan/dissectors folder.
+parser = argparse.ArgumentParser(description='Check calls in dissectors')
+# required
+parser.add_argument('--build-folder', action='store', required=True,
+ help='specify individual dissector file to test')
+parser.add_argument('--file', action='append',
+ help='specify individual dissector file to test')
+parser.add_argument('--folder', action='store', default=os.path.join('epan', 'dissectors'),
+ help='specify folder to test, relative to current/wireshark folder')
+parser.add_argument('--commits', action='store',
+ help='last N commits to check')
+parser.add_argument('--open', action='store_true',
+ help='check open files')
+parser.add_argument('--first-file', action='store',
+ help='first file in folder to test')
+parser.add_argument('--last-file', action='store',
+ help='last file in folder to test')
+args = parser.parse_args()
+
+
+test_folder = os.path.join(os.getcwd(), args.folder)
+
+
+# Usually only building one module, so no -j benefit?
+make_command = ['cmake', '--build', args.build_folder]
+if sys.platform.startswith('win'):
+ make_command += ['--config', 'RelWithDebInfo']
+
+
+
+# A list of header files that it is not safe to uninclude, as doing so
+# has been seen to cause link failures against implemented functions...
+# TODO: some of these could probably be removed on more permissive platforms.
+includes_to_keep = {
+ 'config.h',
+ 'epan/packet.h',
+ 'stdlib.h',
+ 'math.h',
+ 'errno.h',
+ 'string.h',
+ 'prefs.h',
+ # These are probably mostly redundant in that they are now covered by the check
+ # for 'self-includes'...
+ 'x11-keysym.h',
+ 'packet-atm.h',
+ 'packet-atalk.h',
+ 'packet-ppp.h',
+ 'packet-scsi-mmc.h',
+ 'packet-tls.h'
+}
+
+
+# Build stats.
+class BuildStats:
+ def __init__(self):
+ self.files_examined = 0
+ self.includes_tested = 0
+ self.includes_deleted = 0
+ self.files_not_built_list = []
+ self.generated_files_ignored = []
+ self.includes_to_keep_kept = 0
+
+ def showSummary(self):
+ print('\n\n')
+ print('Summary')
+ print('=========')
+ print('files examined: %d' % self.files_examined)
+ print('includes tested: %d' % self.includes_tested)
+ print('includes deleted: %d' % self.includes_deleted)
+ print('files not built: %d' % len(self.files_not_built_list))
+ for abandoned_file in self.files_not_built_list:
+ print(' %s' % abandoned_file)
+ print('generated files not tested: %d' % len(self.generated_files_ignored))
+ for generated_file in self.generated_files_ignored:
+ print(' %s' % generated_file)
+ print('includes kept as not safe to remove: %d' % self.includes_to_keep_kept)
+
+stats = BuildStats()
+
+
+# We want to confirm that this file is actually built as part of the build.
+# To do this, add some nonsense to the front of the file and confirm that the
+# build then fails. If it doesn't, won't want to remove #includes from that file!
+def test_file_is_built(filename):
+ print('test_file_is_built(', filename, ')')
+ temp_filename = filename + '.tmp'
+
+ f_read = open(filename, 'r')
+ write_filename = filename + '.new'
+ f_write = open(write_filename, 'w')
+ # Write the file with nonsense at start.
+ f_write.write('NO WAY THIS FILE BUILDS!!!!!')
+ # Copy remaining lines as-is.
+ for line in f_read:
+ f_write.write(line)
+ f_read.close()
+ f_write.close()
+ # Backup file, and do this build with the one we wrote.
+ shutil.copy(filename, temp_filename)
+ shutil.copy(write_filename, filename)
+
+ # Try the build.
+ result = subprocess.call(make_command)
+ # Restore proper file & delete temp files
+ shutil.copy(temp_filename, filename)
+ os.remove(temp_filename)
+ os.remove(write_filename)
+
+ if result == 0:
+ # Build succeeded so this file wasn't in it
+ return False
+ else:
+ # Build failed so this file *is* part of it
+ return True
+
+
+# Function to test removal of each #include from a file in turn.
+# At the end, only those that appear to be needed will be left.
+def test_file(filename):
+ global stats
+
+ print('\n------------------------------')
+ print(bcolors.OKBLUE, bcolors.BOLD, 'Testing', filename, bcolors.ENDC)
+
+ temp_filename = filename + '.tmp'
+
+ # Test if file seems to be part of the build.
+ is_built = test_file_is_built(filename)
+ if not is_built:
+ print(bcolors.WARNING, '***** File not used in build, so ignore!!!!', bcolors.ENDC)
+ # TODO: should os.path.join with root before adding?
+ stats.files_not_built_list.append(filename)
+ return
+ else:
+ print('This file is part of the build')
+
+ # OK, we are going to test removing includes from this file.
+ tested_line_number = 0
+
+ # Don't want to delete 'self-includes', so prepare filename.
+ module_name = Path(filename).stem
+ extension = Path(filename).suffix
+
+ module_header = module_name + '.h'
+
+ # Loop around, finding all possible include lines to comment out
+ while (True):
+ if should_exit:
+ exit(1)
+
+ have_deleted_line = False
+ result = 0
+
+ # Open read & write files
+ f_read = open(filename, 'r')
+ write_filename = filename + '.new'
+ f_write = open(write_filename, 'w')
+
+ # Walk the file again looking for another place to comment out an include
+ this_line_number = 1
+ hash_if_level = 0
+
+ for line in f_read:
+ this_line_deleted = False
+
+ # Maintain view of how many #if or #ifdefs we are in.
+ # Don't want to remove any includes that may not be active in this build.
+ if line.startswith('#if'):
+ hash_if_level = hash_if_level + 1
+
+ if line.startswith('#endif'):
+ if hash_if_level > 1:
+ hash_if_level = hash_if_level - 1
+
+ # Consider deleting this line have haven't already reached.
+ if (not have_deleted_line and (tested_line_number < this_line_number)):
+
+ # Test line for starting with #include, and eligible for deletion.
+ if line.startswith('#include ') and hash_if_level == 0 and line.find(module_header) == -1:
+ # Check that this isn't a header file that known unsafe to uninclude.
+ allowed_to_delete = True
+ for entry in includes_to_keep:
+ if line.find(entry) != -1:
+ allowed_to_delete = False
+ stats.includes_to_keep_kept += 1
+ continue
+
+ if allowed_to_delete:
+ # OK, actually doing it.
+ have_deleted_line = True
+ this_line_deleted = True
+ tested_line_number = this_line_number
+
+ # Write line to output file, unless this very one was deleted.
+ if not this_line_deleted:
+ f_write.write(line)
+ this_line_number = this_line_number + 1
+
+ # Close both files.
+ f_read.close()
+ f_write.close()
+
+ # If we commented out a line, try to build file without it.
+ if (have_deleted_line):
+ # Test a build. 0 means success, others are failures.
+ shutil.copy(filename, temp_filename)
+ shutil.copy(write_filename, filename)
+
+ # Try build
+ result = subprocess.call(make_command)
+ if result == 0:
+ print(bcolors.OKGREEN +bcolors.BOLD + 'Good build' + bcolors.ENDC)
+ # Line was eliminated so decrement line counter
+ tested_line_number = tested_line_number - 1
+ # Inc successes counter
+ stats.includes_deleted += 1
+ # Good - promote this version by leaving it here!
+
+ # Occasionally fails so delete this file each time.
+ # TODO: this is very particular to dissector target...
+ if sys.argv[1] == 'dissectors':
+ os.remove(os.path.join(args.build_folder, 'vc100.pdb'))
+ else:
+ print(bcolors.FAIL +bcolors.BOLD + 'Bad build' + bcolors.ENDC)
+ # Never mind, go back to previous building version
+ shutil.copy(temp_filename, filename)
+
+ # Inc counter of tried
+ stats.includes_tested += 1
+
+ else:
+ # Reached the end of the file without making changes, so nothing doing.
+ # Delete temporary files
+ if os.path.isfile(temp_filename):
+ os.remove(temp_filename)
+ if os.path.isfile(write_filename):
+ os.remove(write_filename)
+ return
+
+# Test for whether a the given file is under source control
+def under_version_control(filename):
+ # TODO: git command to see if under version control. Check retcode of 'git log <filename>' ?
+ return True
+
+# Test for whether the given file was automatically generated.
+def generated_file(filename):
+ # Special known case.
+ if filename == 'register.c':
+ return True
+
+ # Open file
+ f_read = open(filename, 'r')
+ lines_tested = 0
+ for line in f_read:
+ # The comment to say that its generated is near the top, so give up once
+ # get a few lines down.
+ if lines_tested > 10:
+ f_read.close()
+ return False
+ if (line.find('Generated automatically') != -1 or
+ line.find('Generated Automatically') != -1 or
+ line.find('Autogenerated from') != -1 or
+ line.find('is autogenerated') != -1 or
+ line.find('automatically generated by Pidl') != -1 or
+ line.find('Created by: The Qt Meta Object Compiler') != -1 or
+ line.find('This file was generated') != -1 or
+ line.find('This filter was automatically generated') != -1 or
+ line.find('This file is auto generated, do not edit!') != -1):
+
+ f_read.close()
+ return True
+ lines_tested = lines_tested + 1
+
+ # OK, looks like a hand-written file!
+ f_read.close()
+ return False
+
+def isBuildableFile(filename):
+ return filename.endswith('.c') or filename.endswith('.cpp')
+
+
+def findFilesInFolder(folder, recursive=False):
+ dissector_files = []
+
+ if recursive:
+ for root, subfolders, files in os.walk(folder):
+ for f in files:
+ if should_exit:
+ return
+ f = os.path.join(root, f)
+ dissector_files.append(f)
+ else:
+ for f in sorted(os.listdir(folder)):
+ if should_exit:
+ return
+ filename = os.path.join(folder, f)
+ dissector_files.append(filename)
+
+ return [x for x in filter(isBuildableFile, dissector_files)]
+
+
+######################################################################################
+# MAIN PROGRAM STARTS HERE
+######################################################################################
+
+# Work out which files we want to look at.
+files = []
+if args.file:
+ # Add specified file(s)
+ for f in args.file:
+ if not os.path.isfile(f):
+ print('Chosen file', f, 'does not exist.')
+ exit(1)
+ else:
+ files.append(f)
+elif args.folder:
+ # Add all files from a given folder.
+ folder = args.folder
+ if not os.path.isdir(folder):
+ print('Folder', folder, 'not found!')
+ exit(1)
+ # Find files from folder.
+ print('Looking for files in', folder)
+ files = findFilesInFolder(folder, recursive=False)
+
+
+# If first-file/last-file are given, will need to trim files accordingly
+if args.first_file:
+ idx = files.index(args.first_file)
+ if idx == -1:
+ print('first-file entry', args.first_file, 'not in list of files to be checked')
+ exit(1)
+ else:
+ files = files[idx:]
+
+if args.last_file:
+ idx = files.index(args.last_file)
+ if idx == -1:
+ print('last-file entry', args.last_file, 'not in list of files to be checked')
+ exit(1)
+ else:
+ files = files[:idx+1]
+
+
+# Confirm that the build is currently passing, if not give up now.
+print(bcolors.OKBLUE,bcolors.BOLD,
+ 'Doing an initial build to check we have a stable base.',
+ bcolors.ENDC)
+result = subprocess.call(make_command)
+if result != 0:
+ print(bcolors.FAIL, bcolors.BOLD, 'Initial build failed - give up now!!!!', bcolors.ENDC)
+ exit (-1)
+
+
+
+# Test each file.
+for filename in files:
+
+ # Want to filter out generated files that are not checked in.
+ if not generated_file(filename) and under_version_control(filename):
+ # OK, try this file
+ test_file(filename)
+
+ # Inc counter
+ stats.files_examined += 1
+ else:
+ if generated_file(filename):
+ reason = 'generated file...'
+ if not under_version_control(filename):
+ reason = 'not under source control'
+ print('Ignoring %s: %s' % (filename, reason))
+
+
+
+# Show summary stats of run
+stats.showSummary()
diff --git a/tools/detect_bad_alloc_patterns.py b/tools/detect_bad_alloc_patterns.py
new file mode 100644
index 0000000..a89ceb6
--- /dev/null
+++ b/tools/detect_bad_alloc_patterns.py
@@ -0,0 +1,120 @@
+"""
+Detect and replace instances of g_malloc() and wmem_alloc() with
+g_new() wmem_new(), to improve the readability of Wireshark's code.
+
+Also detect and replace instances of
+g_malloc(sizeof(struct myobj) * foo)
+with:
+g_new(struct myobj, foo)
+to better prevent integer overflows
+
+SPDX-License-Identifier: MIT
+"""
+
+import os
+import re
+import sys
+
+print_replacement_info = True
+
+patterns = [
+# Replace (myobj *)g_malloc(sizeof(myobj)) with g_new(myobj, 1)
+# Replace (struct myobj *)g_malloc(sizeof(struct myobj)) with g_new(struct myobj, 1)
+(re.compile(r'\(\s*([struct]{0,6}\s*[^\s\*]+)\s*\*\s*\)\s*g_malloc(0?)\s*\(\s*sizeof\s*\(\s*\1\s*\)\s*\)'), r'g_new\2(\1, 1)'),
+
+# Replace (myobj *)g_malloc(sizeof(myobj) * foo) with g_new(myobj, foo)
+# Replace (struct myobj *)g_malloc(sizeof(struct myobj) * foo) with g_new(struct myobj, foo)
+(re.compile(r'\(\s*([struct]{0,6}\s*[^\s\*]+)\s*\*\s*\)\s*g_malloc(0?)\s*\(\s*sizeof\s*\(\s*\1\s*\)\s*\*\s*([^\s]+)\s*\)'), r'g_new\2(\1, \3)'),
+
+# Replace (myobj *)g_malloc(foo * sizeof(myobj)) with g_new(myobj, foo)
+# Replace (struct myobj *)g_malloc(foo * sizeof(struct myobj)) with g_new(struct myobj, foo)
+(re.compile(r'\(\s*([struct]{0,6}\s*[^\s\*]+)\s*\*\s*\)\s*g_malloc(0?)\s*\(\s*([^\s]+)\s*\*\s*sizeof\s*\(\s*\1\s*\)\s*\)'), r'g_new\2(\1, \3)'),
+
+# Replace (myobj *)wmem_alloc(wmem_file_scope(), sizeof(myobj)) with wmem_new(wmem_file_scope(), myobj)
+# Replace (struct myobj *)wmem_alloc(wmem_file_scope(), sizeof(struct myobj)) with wmem_new(wmem_file_scope(), struct myobj)
+(re.compile(r'\(\s*([struct]{0,6}\s*[^\s\*]+)\s*\*\s*\)\s*wmem_alloc(0?)\s*\(\s*([_a-z\(\)->]+),\s*sizeof\s*\(\s*\1\s*\)\s*\)'), r'wmem_new\2(\3, \1)'),
+]
+
+def replace_file(fpath):
+ with open(fpath, 'r') as fh:
+ fdata_orig = fh.read()
+ fdata = fdata_orig
+ for pattern, replacewith in patterns:
+ fdata_out = pattern.sub(replacewith, fdata)
+ if print_replacement_info and fdata != fdata_out:
+ for match in re.finditer(pattern, fdata):
+ replacement = re.sub(pattern, replacewith, match.group(0))
+ print("Bad malloc pattern in %s: Replace '%s' with '%s'" % (fpath, match.group(0), replacement))
+ fdata = fdata_out
+ if fdata_out != fdata_orig:
+ with open(fpath, 'w') as fh:
+ fh.write(fdata_out)
+ return fdata_out
+
+def run_specific_files(fpaths):
+ for fpath in fpaths:
+ if not (fpath.endswith('.c') or fpath.endswith('.cpp')):
+ continue
+ replace_file(fpath)
+
+def run_recursive(root_dir):
+ for root, dirs, files in os.walk(root_dir):
+ fpaths = []
+ for fname in files:
+ fpath = os.path.join(root, fname)
+ fpaths.append(fpath)
+ run_specific_files(fpaths)
+
+def test_replacements():
+ test_string = """\
+(if_info_t*) g_malloc0(sizeof(if_info_t))
+(oui_info_t *)g_malloc(sizeof (oui_info_t))
+(guint8 *)g_malloc(16 * sizeof(guint8))
+(guint32 *)g_malloc(sizeof(guint32)*2)
+(struct imf_field *)g_malloc (sizeof (struct imf_field))
+(rtspstat_t *)g_malloc( sizeof(rtspstat_t) )
+(proto_data_t *)wmem_alloc(scope, sizeof(proto_data_t))
+(giop_sub_handle_t *)wmem_alloc(wmem_epan_scope(), sizeof (giop_sub_handle_t))
+(mtp3_addr_pc_t *)wmem_alloc0(pinfo->pool, sizeof(mtp3_addr_pc_t))
+(dcerpc_bind_value *)wmem_alloc(wmem_file_scope(), sizeof (dcerpc_bind_value))
+(dcerpc_matched_key *)wmem_alloc(wmem_file_scope(), sizeof (dcerpc_matched_key));
+(struct smtp_session_state *)wmem_alloc0(wmem_file_scope(), sizeof(struct smtp_session_state))
+(struct batman_packet_v5 *)wmem_alloc(pinfo->pool, sizeof(struct batman_packet_v5))
+(struct knx_keyring_mca_keys*) wmem_alloc( wmem_epan_scope(), sizeof( struct knx_keyring_mca_keys ) )
+"""
+ expected_output = """\
+g_new0(if_info_t, 1)
+g_new(oui_info_t, 1)
+g_new(guint8, 16)
+g_new(guint32, 2)
+g_new(struct imf_field, 1)
+g_new(rtspstat_t, 1)
+wmem_new(scope, proto_data_t)
+wmem_new(wmem_epan_scope(), giop_sub_handle_t)
+wmem_new0(pinfo->pool, mtp3_addr_pc_t)
+wmem_new(wmem_file_scope(), dcerpc_bind_value)
+wmem_new(wmem_file_scope(), dcerpc_matched_key);
+wmem_new0(wmem_file_scope(), struct smtp_session_state)
+wmem_new(pinfo->pool, struct batman_packet_v5)
+wmem_new(wmem_epan_scope(), struct knx_keyring_mca_keys)
+"""
+ output = test_string
+ for pattern, replacewith in patterns:
+ output = pattern.sub(replacewith, output)
+ assert(output == expected_output)
+
+def main():
+ test_replacements()
+ if len(sys.argv) == 2:
+ root_dir = sys.argv[1]
+ run_recursive(root_dir)
+ else:
+ fpaths = []
+ for line in sys.stdin:
+ line = line.strip()
+ if line:
+ fpaths.append(line)
+ run_specific_files(fpaths)
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/eti2wireshark.py b/tools/eti2wireshark.py
new file mode 100755
index 0000000..98fb291
--- /dev/null
+++ b/tools/eti2wireshark.py
@@ -0,0 +1,1166 @@
+#!/usr/bin/env python3
+
+# Generate Wireshark Dissectors for eletronic trading/market data
+# protocols such as ETI/EOBI.
+#
+# Targets Wireshark 3.5 or later.
+#
+# SPDX-FileCopyrightText: © 2021 Georg Sauthoff <mail@gms.tf>
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+
+import argparse
+import itertools
+import re
+import sys
+import xml.etree.ElementTree as ET
+
+
+# inlined from upstream's etimodel.py
+
+import itertools
+
+def get_max_sizes(st, dt):
+ h = {}
+ for name, e in dt.items():
+ v = e.get('size', '0')
+ h[name] = int(v)
+ for name, e in itertools.chain((i for i in st.items() if i[1].get('type') != 'Message'),
+ (i for i in st.items() if i[1].get('type') == 'Message')):
+ s = 0
+ for m in e:
+ x = h.get(m.get('type'), 0)
+ s += x * int(m.get('cardinality'))
+ h[name] = s
+ return h
+
+def get_min_sizes(st, dt):
+ h = {}
+ for name, e in dt.items():
+ v = e.get('size', '0')
+ if e.get('variableSize') is None:
+ h[name] = int(v)
+ else:
+ h[name] = 0
+ for name, e in itertools.chain((i for i in st.items() if i[1].get('type') != 'Message'),
+ (i for i in st.items() if i[1].get('type') == 'Message')):
+ s = 0
+ for m in e:
+ x = h.get(m.get('type'), 0)
+ s += x * int(m.get('minCardinality', '1'))
+ h[name] = s
+ return h
+
+# end # inlined from upstream's etimodel.py
+
+
+def get_used_types(st):
+ xs = set(y.get('type') for _, x in st.items() for y in x)
+ return xs
+
+def get_data_types(d):
+ r = d.getroot()
+ x = r.find('DataTypes')
+ h = {}
+ for e in x:
+ h[e.get('name')] = e
+ return h
+
+def get_structs(d):
+ r = d.getroot()
+ x = r.find('Structures')
+ h = {}
+ for e in x:
+ h[e.get('name')] = e
+ return h
+
+def get_templates(st):
+ ts = []
+ for k, v in st.items():
+ if v.get('type') == 'Message':
+ ts.append((int(v.get('numericID')), k))
+ ts.sort()
+ return ts
+
+
+def gen_header(proto, desc, o=sys.stdout):
+ if proto.startswith('eti') or proto.startswith('xti'):
+ ph = '#include "packet-tcp.h" // tcp_dissect_pdus()'
+ else:
+ ph = '#include "packet-udp.h" // udp_dissect_pdus()'
+ print(f'''// auto-generated by Georg Sauthoff's eti2wireshark.py
+
+/* packet-eti.c
+ * Routines for {proto.upper()} dissection
+ * Copyright 2021, Georg Sauthoff <mail@gms.tf>
+ *
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald@wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+/*
+ * The {desc} ({proto.upper()}) is an electronic trading protocol
+ * that is used by a few exchanges (Eurex, Xetra, ...).
+ *
+ * It's a Length-Tag based protocol consisting of mostly fix sized
+ * request/response messages.
+ *
+ * Links:
+ * https://en.wikipedia.org/wiki/List_of_electronic_trading_protocols#Europe
+ * https://github.com/gsauthof/python-eti#protocol-descriptions
+ * https://github.com/gsauthof/python-eti#protocol-introduction
+ *
+ */
+
+#include <config.h>
+
+
+#include <epan/packet.h> // Should be first Wireshark include (other than config.h)
+{ph}
+#include <epan/expert.h> // expert info
+
+#include <inttypes.h>
+#include <stdio.h> // snprintf()
+
+
+/* Prototypes */
+/* (Required to prevent [-Wmissing-prototypes] warnings */
+void proto_reg_handoff_{proto}(void);
+void proto_register_{proto}(void);
+''', file=o)
+
+
+def name2ident(name):
+ ll = True
+ xs = []
+ for i, c in enumerate(name):
+ if c.isupper():
+ if i > 0 and ll:
+ xs.append('_')
+ xs.append(c.lower())
+ ll = False
+ else:
+ xs.append(c)
+ ll = True
+ return ''.join(xs)
+
+def gen_enums(dt, ts, o=sys.stdout):
+ print('static const value_string template_id_vals[] = { // TemplateID', file=o)
+ min_tid, max_tid = ts[0][0], ts[-1][0]
+ xs = [None] * (max_tid - min_tid + 1)
+ for tid, name in ts:
+ xs[tid-min_tid] = name
+ for i, name in enumerate(xs):
+ if name is None:
+ print(f' {{ {min_tid + i}, "Unknown" }},', file=o)
+ else:
+ print(f' {{ {min_tid + i}, "{name}" }},', file=o)
+ print(''' { 0, NULL }
+};
+static value_string_ext template_id_vals_ext = VALUE_STRING_EXT_INIT(template_id_vals);''', file=o)
+ name2access = { 'TemplateID': '&template_id_vals_ext' }
+
+ dedup = {}
+ for name, e in dt.items():
+ vs = [ (x.get('value'), x.get('name')) for x in e.findall('ValidValue') ]
+ if not vs:
+ continue
+ if e.get('rootType') == 'String' and e.get('size') != '1':
+ continue
+
+ ident = name2ident(name)
+
+ nv = e.get('noValue')
+ ws = [ v[0] for v in vs ]
+ if nv not in ws:
+ if nv.startswith('0x0') and e.get('rootType') == 'String':
+ nv = '\0'
+ vs.append( (nv, 'NO_VALUE') )
+
+ if e.get('type') == 'int':
+ vs.sort(key = lambda x : int(x[0], 0))
+ else:
+ vs.sort(key = lambda x : ord(x[0]))
+ s = '-'.join(f'{v[0]}:{v[1]}' for v in vs)
+ x = dedup.get(s)
+ if x is None:
+ dedup[s] = name
+ else:
+ name2access[name] = name2access[x]
+ print(f'// {name} aliased by {x}', file=o)
+ continue
+
+ print(f'static const value_string {ident}_vals[] = {{ // {name}', file=o)
+ for i, v in enumerate(vs):
+ if e.get('rootType') == 'String':
+ k = f"'{v[0]}'" if ord(v[0]) != 0 else '0'
+ print(f''' {{ {k}, "{v[1]}" }},''', file=o)
+ else:
+ print(f' {{ {v[0]}, "{v[1]}" }},', file=o)
+ print(''' { 0, NULL }
+};''', file=o)
+
+ if len(vs) > 7:
+ print(f'static value_string_ext {ident}_vals_ext = VALUE_STRING_EXT_INIT({ident}_vals);', file=o)
+ name2access[name] = f'&{ident}_vals_ext'
+ else:
+ name2access[name] = f'VALS({ident}_vals)'
+
+ return name2access
+
+
+def get_fields(st, dt):
+ seen = {}
+ for name, e in st.items():
+ for m in e:
+ t = dt.get(m.get('type'))
+ if is_padding(t):
+ continue
+ if not (is_int(t) or is_fixed_string(t) or is_var_string(t)):
+ continue
+ name = m.get('name')
+ if name in seen:
+ if seen[name] != t:
+ raise RuntimeError(f'Mismatching type for: {name}')
+ else:
+ seen[name] = t
+ vs = list(seen.items())
+ vs.sort()
+ return vs
+
+def gen_field_handles(st, dt, proto, o=sys.stdout):
+ print(f'''static expert_field ei_{proto}_counter_overflow = EI_INIT;
+static expert_field ei_{proto}_invalid_template = EI_INIT;
+static expert_field ei_{proto}_invalid_length = EI_INIT;''', file=o)
+ if not proto.startswith('eobi'):
+ print(f'static expert_field ei_{proto}_unaligned = EI_INIT;', file=o)
+ print(f'''static expert_field ei_{proto}_missing = EI_INIT;
+static expert_field ei_{proto}_overused = EI_INIT;
+''', file=o)
+
+ vs = get_fields(st, dt)
+ s = ', '.join('-1' for i in range(len(vs)))
+ print(f'static int hf_{proto}[] = {{ {s} }};', file=o)
+ print(f'''static int hf_{proto}_dscp_exec_summary = -1;
+static int hf_{proto}_dscp_improved = -1;
+static int hf_{proto}_dscp_widened = -1;''', file=o)
+ print('enum Field_Handle_Index {', file=o)
+ for i, (name, _) in enumerate(vs):
+ c = ' ' if i == 0 else ','
+ print(f' {c} {name.upper()}_FH_IDX', file=o)
+ print('};', file=o)
+
+def type2ft(t):
+ if is_timestamp_ns(t):
+ return 'FT_ABSOLUTE_TIME'
+ if is_dscp(t):
+ return 'FT_UINT8'
+ if is_int(t):
+ if t.get('rootType') == 'String':
+ return 'FT_CHAR'
+ u = 'U' if is_unsigned(t) else ''
+ if t.get('size') is None:
+ raise RuntimeError(f'None size: {t.get("name")}')
+ size = int(t.get('size')) * 8
+ return f'FT_{u}INT{size}'
+ if is_fixed_string(t) or is_var_string(t):
+ # NB: technically, ETI fixed-strings are blank-padded,
+ # unless they are marked NO_VALUE, in that case
+ # the first byte is zero, followed by unspecified content.
+ # Also, some fixed-strings are zero-terminated, where again
+ # the bytes following the terminator are unspecified.
+ return 'FT_STRINGZTRUNC'
+ raise RuntimeError('unexpected type')
+
+def type2enc(t):
+ if is_timestamp_ns(t):
+ return 'ABSOLUTE_TIME_UTC'
+ if is_dscp(t):
+ return 'BASE_HEX'
+ if is_int(t):
+ if t.get('rootType') == 'String':
+ # NB: basically only used when enum and value is unknown
+ return 'BASE_HEX'
+ else:
+ return 'BASE_DEC'
+ if is_fixed_string(t) or is_var_string(t):
+ # previously 'STR_ASCII', which was removed upstream
+ # cf. 19dcb725b61e384f665ad4b955f3b78f63e626d9
+ return 'BASE_NONE'
+ raise RuntimeError('unexpected type')
+
+def gen_field_info(st, dt, n2enum, proto='eti', o=sys.stdout):
+ print(' static hf_register_info hf[] ={', file=o)
+ vs = get_fields(st, dt)
+ for i, (name, t) in enumerate(vs):
+ c = ' ' if i == 0 else ','
+ ft = type2ft(t)
+ enc = type2enc(t)
+ if is_enum(t) and not is_dscp(t):
+ vals = n2enum[t.get('name')]
+ if vals.startswith('&'):
+ extra_enc = '| BASE_EXT_STRING'
+ else:
+ extra_enc = ''
+ else:
+ vals = 'NULL'
+ extra_enc = ''
+ print(f''' {c} {{ &hf_{proto}[{name.upper()}_FH_IDX],
+ {{ "{name}", "{proto}.{name.lower()}",
+ {ft}, {enc}{extra_enc}, {vals}, 0x0,
+ NULL, HFILL }}
+ }}''', file=o)
+ print(f''' , {{ &hf_{proto}_dscp_exec_summary,
+ {{ "DSCP_ExecSummary", "{proto}.dscp_execsummary",
+ FT_BOOLEAN, 8, NULL, 0x10,
+ NULL, HFILL }}
+ }}
+ , {{ &hf_{proto}_dscp_improved,
+ {{ "DSCP_Improved", "{proto}.dscp_improved",
+ FT_BOOLEAN, 8, NULL, 0x20,
+ NULL, HFILL }}
+ }}
+ , {{ &hf_{proto}_dscp_widened,
+ {{ "DSCP_Widened", "{proto}.dscp_widened",
+ FT_BOOLEAN, 8, NULL, 0x40,
+ NULL, HFILL }}
+ }}''', file=o)
+ print(' };', file=o)
+
+
+def gen_subtree_handles(st, proto='eti', o=sys.stdout):
+ ns = [ name for name, e in st.items() if e.get('type') != 'Message' ]
+ ns.sort()
+ s = ', '.join('-1' for i in range(len(ns) + 1))
+ h = dict( (n, i) for i, n in enumerate(ns, 1) )
+ print(f'static gint ett_{proto}[] = {{ {s} }};', file=o)
+ print(f'static gint ett_{proto}_dscp = -1;', file=o)
+ return h
+
+
+def gen_subtree_array(st, proto='eti', o=sys.stdout):
+ n = sum(1 for name, e in st.items() if e.get('type') != 'Message')
+ n += 1
+ s = ', '.join(f'&ett_{proto}[{i}]' for i in range(n))
+ print(f' static gint * const ett[] = {{ {s}, &ett_{proto}_dscp }};', file=o)
+
+
+def gen_fields_table(st, dt, sh, o=sys.stdout):
+ name2off = {}
+ off = 0
+ names = []
+ for name, e in st.items():
+ if e.get('type') == 'Message':
+ continue
+ if name.endswith('Comp'):
+ s = name[:-4]
+ name2off[name] = off
+ off += len(s) + 1
+ names.append(s)
+ s = '\\0'.join(names)
+ print(f' static const char struct_names[] = "{s}";', file=o)
+
+ xs = [ x for x in st.items() if x[1].get('type') != 'Message' ]
+ xs += [ x for x in st.items() if x[1].get('type') == 'Message' ]
+ print(' static const struct ETI_Field fields[] = {', file=o)
+ i = 0
+ fields2idx = {}
+ for name, e in xs:
+ fields2idx[name] = i
+ print(f' // {name}@{i}', file=o)
+ counters = {}
+ cnt = 0
+ for m in e:
+ t = dt.get(m.get('type'))
+ c = ' ' if i == 0 else ','
+ typ = ''
+ size = int(t.get('size')) if t is not None else 0
+ rep = ''
+ fh = f'{m.get("name").upper()}_FH_IDX'
+ sub = ''
+ if is_padding(t):
+ print(f' {c} {{ ETI_PADDING, 0, {size}, 0, 0 }}', file=o)
+ elif is_fixed_point(t):
+ if size != 8:
+ raise RuntimeError('only supporting 8 byte fixed point')
+ fraction = int(t.get('precision'))
+ if fraction > 16:
+ raise RuntimeError('unusual high precisio in fixed point')
+ print(f' {c} {{ ETI_FIXED_POINT, {fraction}, {size}, {fh}, 0 }}', file=o)
+ elif is_timestamp_ns(t):
+ if size != 8:
+ raise RuntimeError('only supporting timestamps')
+ print(f' {c} {{ ETI_TIMESTAMP_NS, 0, {size}, {fh}, 0 }}', file=o)
+ elif is_dscp(t):
+ print(f' {c} {{ ETI_DSCP, 0, {size}, {fh}, 0 }}', file=o)
+ elif is_int(t):
+ u = 'U' if is_unsigned(t) else ''
+ if t.get('rootType') == 'String':
+ typ = 'ETI_CHAR'
+ else:
+ typ = f'ETI_{u}INT'
+ if is_enum(t):
+ typ += '_ENUM'
+ if t.get('type') == 'Counter':
+ counters[m.get('name')] = cnt
+ suf = f' // <- counter@{cnt}'
+ if cnt > 7:
+ raise RuntimeError(f'too many counters in message: {name}')
+ rep = cnt
+ cnt += 1
+ if typ != 'ETI_UINT':
+ raise RuntimeError('only unsigned counters supported')
+ if size > 2:
+ raise RuntimeError('only smaller counters supported')
+ typ = 'ETI_COUNTER'
+ ett_idx = t.get('maxValue')
+ else:
+ rep = 0
+ suf = ''
+ ett_idx = 0
+ print(f' {c} {{ {typ}, {rep}, {size}, {fh}, {ett_idx} }}{suf}', file=o)
+ elif is_fixed_string(t):
+ print(f' {c} {{ ETI_STRING, 0, {size}, {fh}, 0 }}', file=o)
+ elif is_var_string(t):
+ k = m.get('counter')
+ x = counters[k]
+ print(f' {c} {{ ETI_VAR_STRING, {x}, {size}, {fh}, 0 }}', file=o)
+ else:
+ a = m.get('type')
+ fields_idx = fields2idx[a]
+ k = m.get('counter')
+ if k:
+ counter_off = counters[k]
+ typ = 'ETI_VAR_STRUCT'
+ else:
+ counter_off = 0
+ typ = 'ETI_STRUCT'
+ names_off = name2off[m.get('type')]
+ ett_idx = sh[a]
+ print(f' {c} {{ {typ}, {counter_off}, {names_off}, {fields_idx}, {ett_idx} }} // {m.get("name")}', file=o)
+ i += 1
+ print(' , { ETI_EOF, 0, 0, 0, 0 }', file=o)
+ i += 1
+ print(' };', file=o)
+ return fields2idx
+
+def gen_template_table(min_templateid, n, ts, fields2idx, o=sys.stdout):
+ xs = [ '-1' ] * n
+ for tid, name in ts:
+ xs[tid - min_templateid] = f'{fields2idx[name]} /* {name} */'
+ s = '\n , '.join(xs)
+ print(f' static const int16_t tid2fidx[] = {{\n {s}\n }};', file=o)
+
+def gen_sizes_table(min_templateid, n, st, dt, ts, proto, o=sys.stdout):
+ is_eobi = proto.startswith('eobi')
+ xs = [ '0' if is_eobi else '{ 0, 0}' ] * n
+ min_s = get_min_sizes(st, dt)
+ max_s = get_max_sizes(st, dt)
+ if is_eobi:
+ for tid, name in ts:
+ xs[tid - min_templateid] = f'{max_s[name]} /* {name} */'
+ else:
+ for tid, name in ts:
+ xs[tid - min_templateid] = f'{{ {min_s[name]}, {max_s[name]} }} /* {name} */'
+ s = '\n , '.join(xs)
+ if is_eobi:
+ print(f' static const uint32_t tid2size[] = {{\n {s}\n }};', file=o)
+ else:
+ print(f' static const uint32_t tid2size[{n}][2] = {{\n {s}\n }};', file=o)
+
+
+# yes, usage attribute of single fields depends on the context
+# otherwise, we could just put the information into the fields table
+# Example: EOBI.PacketHeader.MessageHeader.MsgSeqNum is unused whereas
+# it's required in the EOBI ExecutionSummary and other messages
+def gen_usage_table(min_templateid, n, ts, ams, o=sys.stdout):
+ def map_usage(m):
+ x = m.get('usage')
+ if x == 'mandatory':
+ return 0
+ elif x == 'optional':
+ return 1
+ elif x == 'unused':
+ return 2
+ else:
+ raise RuntimeError(f'unknown usage value: {x}')
+
+ h = {}
+ i = 0
+ print(' static const unsigned char usages[] = {', file=o)
+ for am in ams:
+ name = am.get("name")
+ tid = int(am.get('numericID'))
+ print(f' // {name}', file=o)
+ h[tid] = i
+ for e in am:
+ if e.tag == 'Group':
+ print(f' //// {e.get("type")}', file=o)
+ for m in e:
+ if m.get('hidden') == 'true' or pad_re.match(m.get('name')):
+ continue
+ k = ' ' if i == 0 else ','
+ print(f' {k} {map_usage(m)} // {m.get("name")}#{i}', file=o)
+ i += 1
+ print(' ///', file=o)
+ else:
+ if e.get('hidden') == 'true' or pad_re.match(e.get('name')):
+ continue
+ k = ' ' if i == 0 else ','
+ print(f' {k} {map_usage(e)} // {e.get("name")}#{i}', file=o)
+ i += 1
+
+ # NB: the last element is a filler to simplify the out-of-bounds check
+ # (cf. the uidx DISSECTOR_ASSER_CMPUINIT() before the switch statement)
+ # when the ETI_EOF of the message whose usage information comes last
+ # is reached
+ print(f' , 0 // filler', file=o)
+ print(' };', file=o)
+ xs = [ '-1' ] * n
+ t2n = dict(ts)
+ for tid, uidx in h.items():
+ name = t2n[tid]
+ xs[tid - min_templateid] = f'{uidx} /* {name} */'
+ s = '\n , '.join(xs)
+ print(f' static const int16_t tid2uidx[] = {{\n {s}\n }};', file=o)
+
+
+def gen_dscp_table(proto, o=sys.stdout):
+ print(f''' static int * const dscp_bits[] = {{
+ &hf_{proto}_dscp_exec_summary,
+ &hf_{proto}_dscp_improved,
+ &hf_{proto}_dscp_widened,
+ NULL
+ }};''', file=o)
+
+
+def mk_int_case(size, signed, proto):
+ signed_str = 'i' if signed else ''
+ unsigned_str = '' if signed else 'u'
+ fmt_str = 'i' if signed else 'u'
+ if size == 2:
+ size_str = 's'
+ elif size == 4:
+ size_str = 'l'
+ elif size == 8:
+ size_str = '64'
+ type_str = f'g{unsigned_str}int{size * 8}'
+ no_value_str = f'INT{size * 8}_MIN' if signed else f'UINT{size * 8}_MAX'
+ pt_size = '64' if size == 8 else ''
+ if signed:
+ hex_str = '0x80' + '00' * (size - 1)
+ else:
+ hex_str = '0x' + 'ff' * size
+ if size == 1:
+ fn = f'tvb_get_g{unsigned_str}int8'
+ else:
+ fn = f'tvb_get_letoh{signed_str}{size_str}'
+ s = f'''case {size}:
+ {{
+ {type_str} x = {fn}(tvb, off);
+ if (x == {no_value_str}) {{
+ proto_item *e = proto_tree_add_{unsigned_str}int{pt_size}_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "NO_VALUE ({hex_str})");
+ if (!usages[uidx])
+ expert_add_info_format(pinfo, e, &ei_{proto}_missing, "required value is missing");
+ }} else {{
+ proto_item *e = proto_tree_add_{unsigned_str}int{pt_size}_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "%" PRI{fmt_str}{size * 8}, x);
+ if (usages[uidx] == 2)
+ expert_add_info_format(pinfo, e, &ei_{proto}_overused, "unused value is set");
+ }}
+ }}
+ break;'''
+ return s
+
+
+def gen_dissect_structs(o=sys.stdout):
+ print('''
+enum ETI_Type {
+ ETI_EOF,
+ ETI_PADDING,
+ ETI_UINT,
+ ETI_INT,
+ ETI_UINT_ENUM,
+ ETI_INT_ENUM,
+ ETI_COUNTER,
+ ETI_FIXED_POINT,
+ ETI_TIMESTAMP_NS,
+ ETI_CHAR,
+ ETI_STRING,
+ ETI_VAR_STRING,
+ ETI_STRUCT,
+ ETI_VAR_STRUCT,
+ ETI_DSCP
+};
+
+struct ETI_Field {
+ uint8_t type;
+ uint8_t counter_off; // offset into counter array
+ // if ETI_COUNTER => storage
+ // if ETI_VAR_STRING or ETI_VAR_STRUCT => load
+ // to get length or repeat count
+ // if ETI_FIXED_POINT: #fractional digits
+ uint16_t size; // or offset into struct_names if ETI_STRUCT/ETI_VAR_STRUCT
+ uint16_t field_handle_idx; // or index into fields array if ETI_STRUCT/ETI_VAR_STRUT
+ uint16_t ett_idx; // index into ett array if ETI_STRUCT/ETI_VAR_STRUCT
+ // or max value if ETI_COUNTER
+};
+''', file=o)
+
+def gen_dissect_fn(st, dt, ts, sh, ams, proto, o=sys.stdout):
+ if proto.startswith('eti') or proto.startswith('xti'):
+ bl_fn = 'tvb_get_letohl'
+ template_off = 4
+ else:
+ bl_fn = 'tvb_get_letohs'
+ template_off = 2
+ print(f'''/* This method dissects fully reassembled messages */
+static int
+dissect_{proto}_message(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_)
+{{
+ col_set_str(pinfo->cinfo, COL_PROTOCOL, "{proto.upper()}");
+ col_clear(pinfo->cinfo, COL_INFO);
+ guint16 templateid = tvb_get_letohs(tvb, {template_off});
+ const char *template_str = val_to_str_ext(templateid, &template_id_vals_ext, "Unknown {proto.upper()} template: 0x%04x");
+ col_add_fstr(pinfo->cinfo, COL_INFO, "%s", template_str);
+
+ /* create display subtree for the protocol */
+ proto_item *ti = proto_tree_add_item(tree, proto_{proto}, tvb, 0, -1, ENC_NA);
+ guint32 bodylen= {bl_fn}(tvb, 0);
+ proto_item_append_text(ti, ", %s (%" PRIu16 "), BodyLen: %u", template_str, templateid, bodylen);
+ proto_tree *root = proto_item_add_subtree(ti, ett_{proto}[0]);
+''', file=o)
+
+ min_templateid = ts[0][0]
+ max_templateid = ts[-1][0]
+ n = max_templateid - min_templateid + 1
+
+ fields2idx = gen_fields_table(st, dt, sh, o)
+ gen_template_table(min_templateid, n, ts, fields2idx, o)
+ gen_sizes_table(min_templateid, n, st, dt, ts, proto, o)
+ gen_usage_table(min_templateid, n, ts, ams, o)
+ gen_dscp_table(proto, o)
+
+ print(f''' if (templateid < {min_templateid} || templateid > {max_templateid}) {{
+ proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_template, tvb, {template_off}, 4,
+ "Template ID out of range: %" PRIu16, templateid);
+ return tvb_captured_length(tvb);
+ }}
+ int fidx = tid2fidx[templateid - {min_templateid}];
+ if (fidx == -1) {{
+ proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_template, tvb, {template_off}, 4,
+ "Unallocated Template ID: %" PRIu16, templateid);
+ return tvb_captured_length(tvb);
+ }}''', file=o)
+
+ if proto.startswith('eobi'):
+ print(f''' if (bodylen != tid2size[templateid - {min_templateid}]) {{
+ proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_length, tvb, 0, {template_off},
+ "Unexpected BodyLen value of %" PRIu32 ", expected: %" PRIu32, bodylen, tid2size[templateid - {min_templateid}]);
+ }}''', file=o)
+ else:
+ print(f''' if (bodylen < tid2size[templateid - {min_templateid}][0] || bodylen > tid2size[templateid - {min_templateid}][1]) {{
+ if (tid2size[templateid - {min_templateid}][0] != tid2size[templateid - {min_templateid}][1])
+ proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_length, tvb, 0, {template_off},
+ "Unexpected BodyLen value of %" PRIu32 ", expected: %" PRIu32 "..%" PRIu32, bodylen, tid2size[templateid - {min_templateid}][0], tid2size[templateid - {min_templateid}][1]);
+ else
+ proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_length, tvb, 0, {template_off},
+ "Unexpected BodyLen value of %" PRIu32 ", expected: %" PRIu32, bodylen, tid2size[templateid - {min_templateid}][0]);
+ }}
+ if (bodylen % 8)
+ proto_tree_add_expert_format(root, pinfo, &ei_{proto}_unaligned, tvb, 0, {template_off},
+ "BodyLen value of %" PRIu32 " is not divisible by 8", bodylen);
+''', file=o)
+
+ print(f''' int uidx = tid2uidx[templateid - {min_templateid}];
+ DISSECTOR_ASSERT_CMPINT(uidx, >=, 0);
+ DISSECTOR_ASSERT_CMPUINT(((size_t)uidx), <, (sizeof usages / sizeof usages[0]));
+''', file=o)
+
+ print(f''' int old_fidx = 0;
+ int old_uidx = 0;
+ unsigned top = 1;
+ unsigned counter[8] = {{0}};
+ unsigned off = 0;
+ unsigned struct_off = 0;
+ unsigned repeats = 0;
+ proto_tree *t = root;
+ while (top) {{
+ DISSECTOR_ASSERT_CMPINT(fidx, >=, 0);
+ DISSECTOR_ASSERT_CMPUINT(((size_t)fidx), <, (sizeof fields / sizeof fields[0]));
+ DISSECTOR_ASSERT_CMPINT(uidx, >=, 0);
+ DISSECTOR_ASSERT_CMPUINT(((size_t)uidx), <, (sizeof usages / sizeof usages[0]));
+
+ switch (fields[fidx].type) {{
+ case ETI_EOF:
+ DISSECTOR_ASSERT_CMPUINT(top, >=, 1);
+ DISSECTOR_ASSERT_CMPUINT(top, <=, 2);
+ if (t != root)
+ proto_item_set_len(t, off - struct_off);
+ if (repeats) {{
+ --repeats;
+ fidx = fields[old_fidx].field_handle_idx;
+ uidx = old_uidx;
+ t = proto_tree_add_subtree(root, tvb, off, -1, ett_{proto}[fields[old_fidx].ett_idx], NULL, &struct_names[fields[old_fidx].size]);
+ struct_off = off;
+ }} else {{
+ fidx = old_fidx + 1;
+ t = root;
+ --top;
+ }}
+ break;
+ case ETI_VAR_STRUCT:
+ case ETI_STRUCT:
+ DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, <, sizeof counter / sizeof counter[0]);
+ repeats = fields[fidx].type == ETI_VAR_STRUCT ? counter[fields[fidx].counter_off] : 1;
+ if (repeats) {{
+ --repeats;
+ t = proto_tree_add_subtree(root, tvb, off, -1, ett_{proto}[fields[fidx].ett_idx], NULL, &struct_names[fields[fidx].size]);
+ struct_off = off;
+ old_fidx = fidx;
+ old_uidx = uidx;
+ fidx = fields[fidx].field_handle_idx;
+ DISSECTOR_ASSERT_CMPUINT(top, ==, 1);
+ ++top;
+ }} else {{
+ ++fidx;
+ }}
+ break;
+ case ETI_PADDING:
+ off += fields[fidx].size;
+ ++fidx;
+ break;
+ case ETI_CHAR:
+ proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, ENC_ASCII);
+ off += fields[fidx].size;
+ ++fidx;
+ ++uidx;
+ break;
+ case ETI_STRING:
+ {{
+ guint8 c = tvb_get_guint8(tvb, off);
+ if (c)
+ proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, ENC_ASCII);
+ else {{
+ proto_item *e = proto_tree_add_string(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, "NO_VALUE ('0x00...')");
+ if (!usages[uidx])
+ expert_add_info_format(pinfo, e, &ei_{proto}_missing, "required value is missing");
+ }}
+ }}
+ off += fields[fidx].size;
+ ++fidx;
+ ++uidx;
+ break;
+ case ETI_VAR_STRING:
+ DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, <, sizeof counter / sizeof counter[0]);
+ proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, counter[fields[fidx].counter_off], ENC_ASCII);
+ off += counter[fields[fidx].counter_off];
+ ++fidx;
+ ++uidx;
+ break;
+ case ETI_COUNTER:
+ DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, <, sizeof counter / sizeof counter[0]);
+ DISSECTOR_ASSERT_CMPUINT(fields[fidx].size, <=, 2);
+ {{
+ switch (fields[fidx].size) {{
+ case 1:
+ {{
+ guint8 x = tvb_get_guint8(tvb, off);
+ if (x == UINT8_MAX) {{
+ proto_tree_add_uint_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "NO_VALUE (0xff)");
+ counter[fields[fidx].counter_off] = 0;
+ }} else {{
+ proto_item *e = proto_tree_add_uint_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "%" PRIu8, x);
+ if (x > fields[fidx].ett_idx) {{
+ counter[fields[fidx].counter_off] = fields[fidx].ett_idx;
+ expert_add_info_format(pinfo, e, &ei_{proto}_counter_overflow, "Counter overflow: %" PRIu8 " > %" PRIu16, x, fields[fidx].ett_idx);
+ }} else {{
+ counter[fields[fidx].counter_off] = x;
+ }}
+ }}
+ }}
+ break;
+ case 2:
+ {{
+ guint16 x = tvb_get_letohs(tvb, off);
+ if (x == UINT16_MAX) {{
+ proto_tree_add_uint_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "NO_VALUE (0xffff)");
+ counter[fields[fidx].counter_off] = 0;
+ }} else {{
+ proto_item *e = proto_tree_add_uint_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "%" PRIu16, x);
+ if (x > fields[fidx].ett_idx) {{
+ counter[fields[fidx].counter_off] = fields[fidx].ett_idx;
+ expert_add_info_format(pinfo, e, &ei_{proto}_counter_overflow, "Counter overflow: %" PRIu16 " > %" PRIu16, x, fields[fidx].ett_idx);
+ }} else {{
+ counter[fields[fidx].counter_off] = x;
+ }}
+ }}
+ }}
+ break;
+ }}
+ }}
+ off += fields[fidx].size;
+ ++fidx;
+ ++uidx;
+ break;
+ case ETI_UINT:
+ switch (fields[fidx].size) {{
+ {mk_int_case(1, False, proto)}
+ {mk_int_case(2, False, proto)}
+ {mk_int_case(4, False, proto)}
+ {mk_int_case(8, False, proto)}
+ }}
+ off += fields[fidx].size;
+ ++fidx;
+ ++uidx;
+ break;
+ case ETI_INT:
+ switch (fields[fidx].size) {{
+ {mk_int_case(1, True, proto)}
+ {mk_int_case(2, True, proto)}
+ {mk_int_case(4, True, proto)}
+ {mk_int_case(8, True, proto)}
+ }}
+ off += fields[fidx].size;
+ ++fidx;
+ ++uidx;
+ break;
+ case ETI_UINT_ENUM:
+ case ETI_INT_ENUM:
+ proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, ENC_LITTLE_ENDIAN);
+ off += fields[fidx].size;
+ ++fidx;
+ ++uidx;
+ break;
+ case ETI_FIXED_POINT:
+ DISSECTOR_ASSERT_CMPUINT(fields[fidx].size, ==, 8);
+ DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, >, 0);
+ DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, <=, 16);
+ {{
+ gint64 x = tvb_get_letohi64(tvb, off);
+ if (x == INT64_MIN) {{
+ proto_item *e = proto_tree_add_int64_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "NO_VALUE (0x8000000000000000)");
+ if (!usages[uidx])
+ expert_add_info_format(pinfo, e, &ei_{proto}_missing, "required value is missing");
+ }} else {{
+ unsigned slack = fields[fidx].counter_off + 1;
+ if (x < 0)
+ slack += 1;
+ char s[21];
+ int n = snprintf(s, sizeof s, "%0*" PRIi64, slack, x);
+ DISSECTOR_ASSERT_CMPUINT(n, >, 0);
+ unsigned k = n - fields[fidx].counter_off;
+ proto_tree_add_int64_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "%.*s.%s", k, s, s + k);
+ }}
+ }}
+ off += fields[fidx].size;
+ ++fidx;
+ ++uidx;
+ break;
+ case ETI_TIMESTAMP_NS:
+ DISSECTOR_ASSERT_CMPUINT(fields[fidx].size, ==, 8);
+ proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, ENC_LITTLE_ENDIAN | ENC_TIME_NSECS);
+ off += fields[fidx].size;
+ ++fidx;
+ ++uidx;
+ break;
+ case ETI_DSCP:
+ DISSECTOR_ASSERT_CMPUINT(fields[fidx].size, ==, 1);
+ proto_tree_add_bitmask(t, tvb, off, hf_{proto}[fields[fidx].field_handle_idx], ett_{proto}_dscp, dscp_bits, ENC_LITTLE_ENDIAN);
+ off += fields[fidx].size;
+ ++fidx;
+ ++uidx;
+ break;
+ }}
+ }}
+''', file=o)
+
+ print(''' return tvb_captured_length(tvb);
+}
+''', file=o)
+
+ print(f'''/* determine PDU length of protocol {proto.upper()} */
+static guint
+get_{proto}_message_len(packet_info *pinfo _U_, tvbuff_t *tvb, int offset, void *data _U_)
+{{
+ return (guint){bl_fn}(tvb, offset);
+}}
+''', file=o)
+
+ if proto.startswith('eobi'):
+ print(f'''static int
+dissect_{proto}(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ void *data)
+{{
+ return udp_dissect_pdus(tvb, pinfo, tree, 4, NULL,
+ get_{proto}_message_len, dissect_{proto}_message, data);
+}}
+''', file=o)
+ else:
+ print(f'''static int
+dissect_{proto}(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ void *data)
+{{
+ tcp_dissect_pdus(tvb, pinfo, tree, TRUE, 4 /* bytes to read for bodylen */,
+ get_{proto}_message_len, dissect_{proto}_message, data);
+ return tvb_captured_length(tvb);
+}}
+''', file=o)
+
+def gen_register_fn(st, dt, n2enum, proto, desc, o=sys.stdout):
+ print(f'''void
+proto_register_{proto}(void)
+{{''', file=o)
+ gen_field_info(st, dt, n2enum, proto, o)
+
+ print(f''' static ei_register_info ei[] = {{
+ {{
+ &ei_{proto}_counter_overflow,
+ {{ "{proto}.counter_overflow", PI_PROTOCOL, PI_WARN, "Counter Overflow", EXPFILL }}
+ }},
+ {{
+ &ei_{proto}_invalid_template,
+ {{ "{proto}.invalid_template", PI_PROTOCOL, PI_ERROR, "Invalid Template ID", EXPFILL }}
+ }},
+ {{
+ &ei_{proto}_invalid_length,
+ {{ "{proto}.invalid_length", PI_PROTOCOL, PI_ERROR, "Invalid Body Length", EXPFILL }}
+ }},''', file=o)
+ if not proto.startswith('eobi'):
+ print(f''' {{
+ &ei_{proto}_unaligned,
+ {{ "{proto}.unaligned", PI_PROTOCOL, PI_ERROR, "A Body Length not divisible by 8 leads to unaligned followup messages", EXPFILL }}
+ }},''', file=o)
+ print(f''' {{
+ &ei_{proto}_missing,
+ {{ "{proto}.missing", PI_PROTOCOL, PI_WARN, "A required value is missing", EXPFILL }}
+ }},
+ {{
+ &ei_{proto}_overused,
+ {{ "{proto}.overused", PI_PROTOCOL, PI_WARN, "An unused value is set", EXPFILL }}
+ }}
+ }};''', file=o)
+
+ print(f''' proto_{proto} = proto_register_protocol("{desc}",
+ "{proto.upper()}", "{proto}");''', file=o)
+
+ print(f''' expert_module_t *expert_{proto} = expert_register_protocol(proto_{proto});
+ expert_register_field_array(expert_{proto}, ei, array_length(ei));''', file=o)
+
+ print(f' proto_register_field_array(proto_{proto}, hf, array_length(hf));',
+ file=o)
+ gen_subtree_array(st, proto, o)
+ print(' proto_register_subtree_array(ett, array_length(ett));', file=o)
+ if proto.startswith('eobi'):
+ print(f' proto_disable_by_default(proto_{proto});', file=o)
+ print('}\n', file=o)
+
+
+def gen_handoff_fn(proto, o=sys.stdout):
+ print(f'''void
+proto_reg_handoff_{proto}(void)
+{{
+ dissector_handle_t {proto}_handle = create_dissector_handle(dissect_{proto},
+ proto_{proto});
+
+ // cf. N7 Network Access Guide, e.g.
+ // https://www.xetra.com/xetra-en/technology/t7/system-documentation/release10-0/Release-10.0-2692700?frag=2692724
+ // https://www.xetra.com/resource/blob/2762078/388b727972b5122945eedf0e63c36920/data/N7-Network-Access-Guide-v2.0.59.pdf
+
+''', file=o)
+ if proto.startswith('eti'):
+ print(f''' // NB: can only be called once for a port/handle pair ...
+ // dissector_add_uint_with_preference("tcp.port", 19006 /* LF PROD */, eti_handle);
+
+ dissector_add_uint("tcp.port", 19006 /* LF PROD */, {proto}_handle);
+ dissector_add_uint("tcp.port", 19043 /* PS PROD */, {proto}_handle);
+ dissector_add_uint("tcp.port", 19506 /* LF SIMU */, {proto}_handle);
+ dissector_add_uint("tcp.port", 19543 /* PS SIMU */, {proto}_handle);''', file=o)
+ elif proto.startswith('xti'):
+ print(f''' // NB: unfortunately, Cash-ETI shares the same ports as Derivatives-ETI ...
+ // We thus can't really add a well-know port for XTI.
+ // Use Wireshark's `Decode As...` or tshark's `-d tcp.port=19043,xti` feature
+ // to switch from ETI to XTI dissection.
+ dissector_add_uint_with_preference("tcp.port", 19042 /* dummy */, {proto}_handle);''', file=o)
+ else:
+ print(f''' static const int ports[] = {{
+ 59000, // Snapshot EUREX US-allowed PROD
+ 59001, // Incremental EUREX US-allowed PROD
+ 59032, // Snapshot EUREX US-restricted PROD
+ 59033, // Incremental EUREX US-restricted PROD
+ 59500, // Snapshot EUREX US-allowed SIMU
+ 59501, // Incremental EUREX US-allowed SIMU
+ 59532, // Snapshot EUREX US-restricted SIMU
+ 59533, // Incremental EUREX US-restricted SIMU
+
+ 57000, // Snapshot FX US-allowed PROD
+ 57001, // Incremental FX US-allowed PROD
+ 57032, // Snapshot FX US-restricted PROD
+ 57033, // Incremental FX US-restricted PROD
+ 57500, // Snapshot FX US-allowed SIMU
+ 57501, // Incremental FX US-allowed SIMU
+ 57532, // Snapshot FX US-restricted SIMU
+ 57533, // Incremental FX US-restricted SIMU
+
+ 59000, // Snapshot Xetra PROD
+ 59001, // Incremental Xetra PROD
+ 59500, // Snapshot Xetra SIMU
+ 59501, // Incremental Xetra SIMU
+
+ 56000, // Snapshot Boerse Frankfurt PROD
+ 56001, // Incremental Boerse Frankfurt PROD
+ 56500, // Snapshot Boerse Frankfurt SIMU
+ 56501 // Incremental Boerse Frankfurt SIMU
+ }};
+ for (unsigned i = 0; i < sizeof ports / sizeof ports[0]; ++i)
+ dissector_add_uint("udp.port", ports[i], {proto}_handle);''', file=o)
+ print('}', file=o)
+
+def is_int(t):
+ if t is not None:
+ r = t.get('rootType')
+ return r in ('int', 'floatDecimal') or (r == 'String' and t.get('size') == '1')
+ return False
+
+def is_enum(t):
+ if t is not None:
+ r = t.get('rootType')
+ if r == 'int' or (r == 'String' and t.get('size') == '1'):
+ return t.find('ValidValue') is not None
+ return False
+
+def is_fixed_point(t):
+ return t is not None and t.get('rootType') == 'floatDecimal'
+
+def is_timestamp_ns(t):
+ return t is not None and t.get('type') == 'UTCTimestamp'
+
+def is_dscp(t):
+ return t is not None and t.get('name') == 'DSCP'
+
+pad_re = re.compile('Pad[1-9]')
+
+def is_padding(t):
+ if t is not None:
+ return t.get('rootType') == 'String' and pad_re.match(t.get('name'))
+ return False
+
+def is_fixed_string(t):
+ if t is not None:
+ return t.get('rootType') in ('String', 'data') and not t.get('variableSize')
+ return False
+
+def is_var_string(t):
+ if t is not None:
+ return t.get('rootType') in ('String', 'data') and t.get('variableSize') is not None
+ return False
+
+def is_unsigned(t):
+ v = t.get('minValue')
+ return v is not None and not v.startswith('-')
+
+def is_counter(t):
+ return t.get('type') == 'Counter'
+
+def type_to_fmt(t):
+ if is_padding(t):
+ return f'{t.get("size")}x'
+ elif is_int(t):
+ n = int(t.get('size'))
+ if n == 1:
+ return 'B'
+ else:
+ if n == 2:
+ c = 'h'
+ elif n == 4:
+ c = 'i'
+ elif n == 8:
+ c = 'q'
+ else:
+ raise ValueError(f'unknown int size {n}')
+ if is_unsigned(t):
+ c = c.upper()
+ return c
+ elif is_fixed_string(t):
+ return f'{t.get("size")}s'
+ else:
+ return '?'
+
+def pp_int_type(t):
+ if not is_int(t):
+ return None
+ s = 'i'
+ if is_unsigned(t):
+ s = 'u'
+ n = int(t.get('size'))
+ s += str(n)
+ return s
+
+def is_elementary(t):
+ return t is not None and t.get('counter') is None
+
+def group_members(e, dt):
+ xs = []
+ ms = []
+ for m in e:
+ t = dt.get(m.get('type'))
+ if is_elementary(t):
+ ms.append(m)
+ else:
+ if ms:
+ xs.append(ms)
+ ms = []
+ xs.append([m])
+ if ms:
+ xs.append(ms)
+ return xs
+
+
+
+def parse_args():
+ p = argparse.ArgumentParser(description='Generate Wireshark Dissector for ETI/EOBI style protocol specifictions')
+ p.add_argument('filename', help='protocol description XML file')
+ p.add_argument('--proto', default='eti',
+ help='short protocol name (default: %(default)s)')
+ p.add_argument('--desc', '-d',
+ default='Enhanced Trading Interface',
+ help='protocol description (default: %(default)s)')
+ p.add_argument('--output', '-o', default='-',
+ help='output filename (default: stdout)')
+ args = p.parse_args()
+ return args
+
+def main():
+ args = parse_args()
+ filename = args.filename
+ d = ET.parse(filename)
+ o = sys.stdout if args.output == '-' else open(args.output, 'w')
+ proto = args.proto
+
+ version = (d.getroot().get('version'), d.getroot().get('subVersion'))
+ desc = f'{args.desc} {version[0]}'
+
+ dt = get_data_types(d)
+ st = get_structs(d)
+ used = get_used_types(st)
+ for k in list(dt.keys()):
+ if k not in used:
+ del dt[k]
+ ts = get_templates(st)
+ ams = d.getroot().find('ApplicationMessages')
+
+ gen_header(proto, desc, o)
+ print(f'static int proto_{proto} = -1;', file=o)
+ gen_field_handles(st, dt, proto, o)
+ n2enum = gen_enums(dt, ts, o)
+ gen_dissect_structs(o)
+ sh = gen_subtree_handles(st, proto, o)
+ gen_dissect_fn(st, dt, ts, sh, ams, proto, o)
+ gen_register_fn(st, dt, n2enum, proto, desc, o)
+ gen_handoff_fn(proto, o)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/extract_asn1_from_spec.pl b/tools/extract_asn1_from_spec.pl
new file mode 100755
index 0000000..f542632
--- /dev/null
+++ b/tools/extract_asn1_from_spec.pl
@@ -0,0 +1,125 @@
+#!/usr/bin/perl
+#
+# This script extracts the ASN1 definition from TS 36.331/36.355/25.331/38.331/37.355/36.413/38.413/36.423/38.423
+# /38.463/38.473 , and generates asn files that can be processed by asn2wrs
+# First download the specification from 3gpp.org as a word document and open it
+# Then in "view" menu, select normal, draft or web layout (any kind that removes page header and footers)
+# Finally save the document as a text file
+# Example with TS 36.331: "perl extract_asn1_from_spec.pl 36331-xxx.txt"
+# It should generate: EUTRA-RRC-Definitions.asn, EUTRA-UE-Variables.asn and EUTRA-InterNodeDefinitions
+#
+# Copyright 2011 Vincent Helfre and Erwan Yvin
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+use warnings;
+$input_file = $ARGV[0];
+$version = 0;
+
+sub extract_spec_version;
+sub extract_asn1;
+
+open(INPUT_FILE, "< $input_file") or die "Can not open file $input_file";
+
+extract_spec_version();
+
+extract_asn1();
+
+close(INPUT_FILE);
+
+# This subroutine extracts the version of the specification
+sub extract_spec_version {
+ my $line;
+ while($line = <INPUT_FILE>){
+ if($line =~ m/3GPP TS ((25|36|38)\.331|(36|37)\.355|(36|38)\.413|(36|38)\.423|36\.(443|444)|(36|38)\.455|38\.463|38\.473|37\.483) V/){
+ $version = $line;
+ return;
+ }
+ }
+}
+
+# This subroutine copies the text delimited by -- ASN1START and -- ASN1STOP in INPUT_FILE
+# and copies it into OUTPUT_FILE.
+# The OUTPUT_FILE is opened on encounter of the keyword "DEFINITIONS AUTOMATIC TAGS"
+# and closed on encounter of the keyword "END"
+sub extract_asn1 {
+ my $line;
+ my $prev_line;
+ my $is_asn1 = 0;
+ my $output_file_name = 0;
+ my $file_name_found = 0;
+
+ while($line = <INPUT_FILE>){
+ if ($line =~ m/-- ASN1STOP/) {
+ $is_asn1 = 0;
+ }
+
+ if(($file_name_found == 0) && ($line =~ m/^LPP-PDU-Definitions/)){
+ $output_file_name = "LPP-PDU-Definitions.asn";
+ print "generating $output_file_name\n";
+ open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name";
+ $file_name_found = 1;
+ syswrite OUTPUT_FILE,"-- "."$version"."\n";
+ }
+
+ if(($file_name_found == 0) && ($line =~ m/^LPP-Broadcast-Definitions/)){
+ $output_file_name = "LPP-Broadcast-Definitions.asn";
+ print "generating $output_file_name\n";
+ open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name";
+ $file_name_found = 1;
+ syswrite OUTPUT_FILE,"-- "."$version"."\n";
+ }
+
+ if(($file_name_found == 0) && ($line =~ m/SonTransfer-IEs/)){
+ $output_file_name = "S1AP-SonTransfer-IEs.asn";
+ print "generating $output_file_name\n";
+ open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name";
+ $is_asn1 = 1;
+ $file_name_found = 1;
+ syswrite OUTPUT_FILE,"-- "."$version"."\n";
+ }
+
+ if(($file_name_found == 0) && ($line =~ m/itu-t \(0\) identified-organization \(4\) etsi \(0\) mobileDomain \(0\)/)){
+ ($output_file_name) = ($prev_line =~ m/^([a-zA-Z0-9\-]+)\s/);
+ $output_file_name = "$output_file_name".".asn";
+ print "generating $output_file_name\n";
+ open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name";
+ $is_asn1 = 1;
+ $file_name_found = 1;
+ syswrite OUTPUT_FILE,"-- "."$version"."\n";
+ syswrite OUTPUT_FILE,"$prev_line";
+ }
+
+ if(($file_name_found == 0) && ($line =~ m/DEFINITIONS AUTOMATIC TAGS ::=/)){
+ ($output_file_name) = ($line =~ m/^([a-zA-Z0-9\-]+)\s+DEFINITIONS AUTOMATIC TAGS ::=/);
+ $output_file_name = "$output_file_name".".asn";
+ print "generating $output_file_name\n";
+ open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name";
+ $is_asn1 = 1;
+ $file_name_found = 1;
+ syswrite OUTPUT_FILE,"-- "."$version"."\n";
+ }
+
+ if (($line =~ /^END[\r\n]/) && (defined fileno OUTPUT_FILE)){
+ syswrite OUTPUT_FILE,"$line";
+ close(OUTPUT_FILE);
+ $is_asn1 = 0;
+ $file_name_found = 0;
+ }
+
+ if (($is_asn1 == 1) && (defined fileno OUTPUT_FILE)){
+ syswrite OUTPUT_FILE,"$line";
+ }
+
+ if ($line =~ m/-- ASN1START/) {
+ $is_asn1 = 1;
+ }
+
+ $prev_line = $line;
+ }
+}
+
diff --git a/tools/fix-encoding-args.pl b/tools/fix-encoding-args.pl
new file mode 100755
index 0000000..04151a2
--- /dev/null
+++ b/tools/fix-encoding-args.pl
@@ -0,0 +1,698 @@
+#!/usr/bin/env perl
+#
+# Copyright 2011, William Meier <wmeier[AT]newsguy.com>
+#
+# A program to fix encoding args for certain Wireshark API function calls
+# from TRUE/FALSE to ENC_?? as appropriate (and possible)
+# - proto_tree_add_item
+# - proto_tree_add_bits_item
+# - proto_tree_add_bits_ret_val
+# - proto_tree_add_bitmask
+# - proto_tree_add_bitmask_text !! ToDo: encoding arg not last arg
+# - tvb_get_bits
+# - tvb_get_bits16
+# - tvb_get_bits24
+# - tvb_get_bits32
+# - tvb_get_bits64
+# - ptvcursor_add
+# - ptvcursor_add_no_advance
+# - ptvcursor_add_with_subtree !! ToDo: encoding arg not last arg
+#
+# ToDo: Rework program so that it can better be used to *validate* encoding-args
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+use strict;
+use warnings;
+
+use Getopt::Long;
+
+# Conversion "Requests"
+
+# Standard conversions
+my $searchReplaceFalseTrueHRef =
+ {
+ "FALSE" => "ENC_BIG_ENDIAN",
+ "0" => "ENC_BIG_ENDIAN",
+ "TRUE" => "ENC_LITTLE_ENDIAN",
+ "1" => "ENC_LITTLE_ENDIAN"
+ };
+
+my $searchReplaceEncNAHRef =
+ {
+ "FALSE" => "ENC_NA",
+ "0" => "ENC_NA",
+ "TRUE" => "ENC_NA",
+ "1" => "ENC_NA",
+ "ENC_LITTLE_ENDIAN" => "ENC_NA",
+ "ENC_BIG_ENDIAN" => "ENC_NA",
+ "ENC_ASCII|ENC_NA" => "ENC_NA",
+ "ENC_ASCII | ENC_NA" => "ENC_NA"
+ };
+
+my $searchReplaceDissectorTable =
+ {
+ "FALSE" => "STRING_CASE_SENSITIVE",
+ "0" => "STRING_CASE_SENSITIVE",
+ "BASE_NONE" => "STRING_CASE_SENSITIVE",
+ "TRUE" => "STRING_CASE_INSENSITIVE",
+ "1" => "STRING_CASE_INSENSITIVE"
+ };
+
+# ---------------------------------------------------------------------
+# Conversion "request" structure
+# (
+# [ <list of field types for which this conversion request applies> ],
+# { <hash of desired encoding arg conversions> }
+# }
+
+my @types_NA =
+ (
+ [ qw (FT_NONE FT_BYTES FT_ETHER FT_IPv6 FT_IPXNET FT_OID FT_REL_OID)],
+ $searchReplaceEncNAHRef
+ );
+
+my @types_INT =
+ (
+ [ qw (FT_UINT8 FT_UINT16 FT_UINT24 FT_UINT32 FT_UINT64 FT_INT8
+ FT_INT16 FT_INT24 FT_INT32 FT_INT64 FT_FLOAT FT_DOUBLE)],
+ $searchReplaceFalseTrueHRef
+ );
+
+my @types_MISC =
+ (
+ [ qw (FT_BOOLEAN FT_IPv4 FT_GUID FT_EUI64)],
+ $searchReplaceFalseTrueHRef
+ );
+
+my @types_STRING =
+ (
+ [qw (FT_STRING FT_STRINGZ)],
+ {
+ "FALSE" => "ENC_ASCII",
+ "0" => "ENC_ASCII",
+ "TRUE" => "ENC_ASCII",
+ "1" => "ENC_ASCII",
+ "ENC_LITTLE_ENDIAN" => "ENC_ASCII",
+ "ENC_BIG_ENDIAN" => "ENC_ASCII",
+ "ENC_NA" => "ENC_ASCII",
+
+ "ENC_ASCII|ENC_LITTLE_ENDIAN" => "ENC_ASCII",
+ "ENC_ASCII|ENC_BIG_ENDIAN" => "ENC_ASCII",
+
+ "ENC_UTF_8|ENC_LITTLE_ENDIAN" => "ENC_UTF_8",
+ "ENC_UTF_8|ENC_BIG_ENDIAN" => "ENC_UTF_8",
+
+ "ENC_EBCDIC|ENC_LITTLE_ENDIAN" => "ENC_EBCDIC",
+ "ENC_EBCDIC|ENC_BIG_ENDIAN" => "ENC_EBCDIC",
+ }
+ );
+
+my @types_UINT_STRING =
+ (
+ [qw (FT_UINT_STRING)],
+ {
+ "FALSE" => "ENC_ASCII|ENC_BIG_ENDIAN",
+ "0" => "ENC_ASCII|ENC_BIG_ENDIAN",
+ "TRUE" => "ENC_ASCII|ENC_LITTLE_ENDIAN",
+ "1" => "ENC_ASCII|ENC_LITTLE_ENDIAN",
+ "ENC_BIG_ENDIAN" => "ENC_ASCII|ENC_BIG_ENDIAN",
+ "ENC_LITTLE_ENDIAN" => "ENC_ASCII|ENC_LITTLE_ENDIAN",
+ "ENC_ASCII|ENC_NA" => "ENC_ASCII|ENC_BIG_ENDIAN",
+ "ENC_ASCII" => "ENC_ASCII|ENC_BIG_ENDIAN",
+ "ENC_NA" => "ENC_ASCII|ENC_BIG_ENDIAN"
+ }
+ );
+
+my @types_REG_PROTO =
+ (
+ [ qw (REG_PROTO)],
+ $searchReplaceEncNAHRef
+ );
+
+# ---------------------------------------------------------------------
+
+my @findAllFunctionList =
+## proto_tree_add_bitmask_text !! ToDo: encoding arg not last arg
+## ptvcursor_add_with_subtree !! ToDo: encoding Arg not last arg
+ qw (
+ proto_tree_add_item
+ proto_tree_add_bits_item
+ proto_tree_add_bits_ret_val
+ proto_tree_add_bitmask
+ proto_tree_add_bitmask_with_flags
+ tvb_get_bits
+ tvb_get_bits16
+ tvb_get_bits24
+ tvb_get_bits32
+ tvb_get_bits64
+ ptvcursor_add
+ ptvcursor_add_no_advance
+ register_dissector_table
+ );
+
+# ---------------------------------------------------------------------
+#
+# MAIN
+#
+my $writeFlag = '';
+my $helpFlag = '';
+my $action = 'fix-all';
+
+my $result = GetOptions(
+ 'action=s' => \$action,
+ 'write' => \$writeFlag,
+ 'help|?' => \$helpFlag
+ );
+
+if (!$result || $helpFlag || !$ARGV[0]) {
+ usage();
+}
+
+if (($action ne 'fix-all') && ($action ne 'find-all')) {
+ usage();
+}
+
+sub usage {
+ print "\nUsage: $0 [--action=fix-all|find-all] [--write] FILENAME [...]\n\n";
+ print " --action = fix-all (default)\n";
+ print " Fix <certain-fcn-names>() encoding arg when possible in FILENAME(s)\n";
+ print " Fixes (if any) are listed on stdout)\n\n";
+ print " --write create FILENAME.encoding-arg-fixes (original file with fixes)\n";
+ print " (effective only for fix-all)\n";
+ print "\n";
+ print " --action = find-all\n";
+ print " Find all occurrences of <certain-fcn-names>() statements)\n";
+ print " highlighting the 'encoding' arg\n";
+ exit(1);
+}
+
+# Read through the files; fix up encoding parameter of proto_tree_add_item() calls
+# Essentially:
+# For each file {
+# . Create a hash of the hf_index_names & associated field types from the entries in hf[]
+# . For each requested "conversion request" {
+# . . For each hf[] entry hf_index_name with a field type in a set of specified field types {
+# . . . For each proto_tree_add_item() statement
+# . . . . - replace encoding arg in proto_tree_add_item(..., hf_index_name, ..., 'encoding-arg')
+# specific values ith new values
+# . . . . - print the statement showing the change
+# . . . }
+# . . }
+# . }
+# . If requested and if replacements done: write new file "orig-filename.encoding-arg-fixes"
+# }
+#
+# Note: The proto_tree_add_item() encoding arg will be converted only if
+# the hf_index_name referenced is in one of the entries in hf[] in the same file
+
+my $found_total = 0;
+
+while (my $fileName = $ARGV[0]) {
+ shift;
+ my $fileContents = '';
+
+ die "No such file: \"$fileName\"\n" if (! -e $fileName);
+
+ # delete leading './'
+ $fileName =~ s{ ^ \. / } {}xo;
+ ##print "$fileName\n";
+
+ # Read in the file (ouch, but it's easier that way)
+ open(FCI, "<", $fileName) || die("Couldn't open $fileName");
+ while (<FCI>) {
+ $fileContents .= $_;
+ }
+ close(FCI);
+
+ # Create a hash of the hf[] entries (name_index_name=>field_type)
+ my $hfArrayEntryFieldTypeHRef = find_hf_array_entries(\$fileContents, $fileName);
+
+ if ($action eq "fix-all") {
+
+ # Find and replace: <fcn_name_pattern>() encoding arg in $fileContents for:
+ # - hf[] entries with specified field types;
+ # - 'proto' as returned from proto_register_protocol()
+ my $fcn_name = "(?:proto_tree_add_item|ptvcursor_add(?:_no_advance)?)";
+ my $found = 0;
+ $found += fix_encoding_args_by_hf_type(1, \@types_NA, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName);
+ $found += fix_encoding_args_by_hf_type(1, \@types_INT, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName);
+ $found += fix_encoding_args_by_hf_type(1, \@types_MISC, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName);
+ $found += fix_encoding_args_by_hf_type(1, \@types_STRING, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName);
+ $found += fix_encoding_args_by_hf_type(1, \@types_UINT_STRING, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName);
+ $found += fix_encoding_args_by_hf_type(1, \@types_REG_PROTO, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName);
+
+ # Find and replace: alters <fcn_name>() encoding arg in $fileContents
+ $found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "proto_tree_add_bits_(?:item|ret_val)", \$fileContents, $fileName);
+ $found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "proto_tree_add_bitmask", \$fileContents, $fileName);
+ $found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "proto_tree_add_bitmask_with_flags", \$fileContents, $fileName);
+ $found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "tvb_get_bits(?:16|24|32|64)?", \$fileContents, $fileName);
+ $found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "tvb_get_(?:ephemeral_)?unicode_string[z]?", \$fileContents, $fileName);
+
+ $found += fix_dissector_table_args(1, $searchReplaceDissectorTable, "register_dissector_table", \$fileContents, $fileName);
+
+ # If desired and if any changes, write out the changed version to a file
+ if (($writeFlag) && ($found > 0)) {
+ open(FCO, ">", $fileName . ".encoding-arg-fixes");
+# open(FCO, ">", $fileName );
+ print FCO "$fileContents";
+ close(FCO);
+ }
+ $found_total += $found;
+ }
+
+ if ($action eq "find-all") {
+ # Find all proto_tree_add_item() statements
+ # and output same highlighting the encoding arg
+ $found_total += find_all(\@findAllFunctionList, \$fileContents, $fileName);
+ }
+
+} # while
+
+exit $found_total;
+
+# ---------------------------------------------------------------------
+# Create a hash containing an entry (hf_index_name => field_type) for each hf[]entry.
+# also: create an entry in the hash for the 'protocol name' variable (proto... => FT_PROTOCOL)
+# returns: ref to the hash
+
+sub find_hf_array_entries {
+ my ($fileContentsRef, $fileName) = @_;
+
+ # The below Regexp is based on one from:
+ # https://web.archive.org/web/20080614012925/http://aspn.activestate.com/ASPN/Cookbook/Rx/Recipe/59811
+ # It is in the public domain.
+ # A complicated regex which matches C-style comments.
+ my $CCommentRegEx = qr{ / [*] [^*]* [*]+ (?: [^/*] [^*]* [*]+ )* / }xo;
+
+ # hf[] entry regex (to extract an hf_index_name and associated field type)
+ my $hfArrayFieldTypeRegEx = qr {
+ \{
+ \s*
+ &\s*([A-Z0-9_\[\]-]+) # &hf
+ \s*,\s*
+ \{\s*
+ .+? # (a bit dangerous)
+ \s*,\s*
+ (FT_[A-Z0-9_]+) # field type
+ \s*,\s*
+ .+?
+ \s*,\s*
+ HFILL # HFILL
+ }xios;
+
+ # create a copy of $fileContents with comments removed
+ my $fileContentsWithoutComments = $$fileContentsRef;
+ $fileContentsWithoutComments =~ s {$CCommentRegEx} []xg;
+
+ # find all the hf[] entries (searching $fileContentsWithoutComments).
+ # Create a hash keyed by the hf_index_name with the associated value being the field_type
+ my %hfArrayEntryFieldType;
+ while ($fileContentsWithoutComments =~ m{ $hfArrayFieldTypeRegEx }xgis) {
+# print "$1 $2\n";
+ if (exists $hfArrayEntryFieldType{$1}) {
+ printf "%-35.35s: ? duplicate hf[] entry: no fixes done for: $1; manual action may be req'd\n", $fileName;
+ $hfArrayEntryFieldType{$1} = "???"; # prevent any substitutions for this hf_index_name
+ } else {
+ $hfArrayEntryFieldType{$1} = $2;
+ }
+ }
+
+ # pre-process contents to fold multiple lines and speed up matching.
+ $fileContentsWithoutComments =~ s/\s*=\s*/=/gs;
+ $fileContentsWithoutComments =~ s/^\s+//g;
+
+ # RegEx to get "proto" variable name
+ my $protoRegEx = qr /
+ ^ # note m modifier below
+ (
+ [a-zA-Z0-9_]+
+ )
+ =
+ proto_register_protocol\b
+ /xom;
+
+ # Find all registered protocols
+ while ($fileContentsWithoutComments =~ m { $protoRegEx }xgom ) {
+ ##print "$1\n";
+ if (exists $hfArrayEntryFieldType{$1}) {
+ printf "%-35.35s: ? duplicate 'proto': no fixes done for: $1; manual action may be req'd\n", $fileName;
+ $hfArrayEntryFieldType{$1} = "???"; # prevent any substitutions for this protocol
+ } else {
+ $hfArrayEntryFieldType{$1} = "REG_PROTO";
+ }
+ }
+
+ return \%hfArrayEntryFieldType;
+}
+
+# ---------------------------------------------------------------------
+# fix_encoding_args
+# Substitute new values for the specified <fcn_name>() encoding arg values
+# when the encoding arg is the *last* arg of the call to fcn_name
+# args:
+# substitute_flag: 1: replace specified encoding arg values by a new value (keys/values in search hash);
+# ref to hash containing search (keys) and replacement (values) for encoding arg
+# fcn_name string
+# ref to string containing file contents
+# filename string
+#
+{ # block begin
+
+ # shared variables
+ my $fileName;
+ my $searchReplaceHRef;
+ my $found;
+
+ sub fix_encoding_args {
+ (my $subFlag, $searchReplaceHRef, my $fcn_name, my $fileContentsRef, $fileName) = @_;
+
+ my $encArgPat;
+
+ if ($subFlag == 1) {
+ # just match for <fcn_name>() statements which have an encoding arg matching one of the
+ # keys in the searchReplace hash.
+ # Escape any "|" characters in the keys
+ # and then create "alternatives" string containing all the resulting key strings. Ex: "(A|B|C\|D|..."
+ $encArgPat = join "|", map { my $copy = $_; $copy =~ s{ ( \| ) }{\\$1}gx; $copy } keys %$searchReplaceHRef;
+ } elsif ($subFlag == 3) {
+ # match for <fcn_name>() statements for any value of the encoding parameter
+ # IOW: find all the <fcn_name> statements
+ $encArgPat = qr / [^,)]+? /x;
+ }
+
+ # build the complete pattern
+ my $patRegEx = qr /
+ # part 1: $1
+ (
+ (?:^|=) # don't try to handle fcn_name call when arg of another fcn call
+ \s*
+ $fcn_name \s* \(
+ [^;]+? # a bit dangerous
+ ,\s*
+ )
+
+ # part 2: $2
+ # exact match of pattern (including spaces)
+ ((?-x)$encArgPat)
+
+ # part 3: $3
+ (
+ \s* \)
+ \s* ;
+ )
+ /xms; # m for ^ above
+
+ ##print "$patRegEx\n";
+
+ ## Match and substitute as specified
+ $found = 0;
+
+ $$fileContentsRef =~ s/ $patRegEx /patsubx($1,$2,$3)/xges;
+
+ return $found;
+ }
+
+ # Called from fix_encoding_args to determine replacement string when a regex match is encountered
+ # $_[0]: part 1
+ # $_[1]: part 2: encoding arg
+ # $_[2]: part 3
+ # lookup the desired replacement value for the encoding arg
+ # print match string showing and highlighting the encoding arg replacement
+ # return "replacement" string
+ sub patsubx {
+ $found += 1;
+ my $substr = exists $$searchReplaceHRef{$_[1]} ? $$searchReplaceHRef{$_[1]} : "???";
+ my $str = sprintf("%s[[%s]-->[%s]]%s", $_[0], $_[1], $substr, $_[2]);
+ $str =~ tr/\t\n\r/ /d;
+ printf "%s: $str\n", $fileName;
+ return $_[0] . $substr . $_[2];
+ }
+} # block end
+
+# ---------------------------------------------------------------------
+# fix_encoding_args_by_hf_type
+#
+# Substitute new values for certain proto_tree_add_item() encoding arg
+# values (for specified hf field types)
+# Variants: search for and display for "exceptions" to allowed encoding arg values;
+# search for and display all encoding arg values
+# args:
+# substitute_flag: 1: replace specified encoding arg values by a new value (keys/values in search hash);
+# 2: search for "exceptions" to allowed encoding arg values (values in search hash);
+# 3: search for all encoding arg values
+# ref to array containing two elements:
+# - ref to array containing hf[] types to be processed (FT_STRING, etc)
+# - ref to hash containing search (keys) and replacement (values) for encoding arg
+# fcn_name string
+# ref to string containing file contents
+# ref to hfArrayEntries hash (key: hf name; value: field type)
+# filename string
+
+{ # block begin
+
+# shared variables
+ my $fileName;
+ my $searchReplaceHRef;
+ my $found;
+ my $hf_field_type;
+
+ sub fix_encoding_args_by_hf_type {
+
+ (my $subFlag, my $mapArg, my $fcn_name, my $fileContentsRef, my $hfArrayEntryFieldTypeHRef, $fileName) = @_;
+
+ my $hf_index_name;
+ my $hfTypesARef;
+ my $encArgPat;
+
+ $hfTypesARef = $$mapArg[0];
+ $searchReplaceHRef = $$mapArg[1];
+
+ my %hfTypes;
+ @hfTypes{@$hfTypesARef}=();
+
+ # set up the encoding arg match pattern
+ if ($subFlag == 1) {
+ # just match for <fcn_name>() statements which have an encoding arg matching one of the
+ # keys in the searchReplace hash.
+ # Escape any "|" characters in the keys
+ # and then create "alternatives" string containing all the resulting key strings. Ex: "A|B|C\|D|..."
+ $encArgPat = join "|", map { my $copy = $_; $copy =~ s{ ( \| ) }{\\$1}gx; $copy } keys %$searchReplaceHRef;
+ } elsif ($subFlag == 2) {
+ # Find all the <fcn_name>() statements wherein the encoding arg is a value other than
+ # one of the "replace" values.
+ # Uses zero-length negative-lookahead to find <fcn_name>() statements for which the encoding
+ # arg is something other than one of the provided replace values.
+ # Escape any "|" characters in the values to be matched
+ # and then create "alternatives" string containing all the value strings. Ex: "A|B|C\|D|..."
+ my $match_str = join "|", map { my $copy = $_; $copy =~ s{ ( \| ) }{\\$1}gx; $copy } values %$searchReplaceHRef;
+ $encArgPat = qr /
+ (?! # negative zero-length look-ahead
+ \s*
+ (?: $match_str ) # alternatives we don't want to match
+ \s*
+ )
+ [^,)]+? # OK: enoding arg is other than one of the alternatives:
+ # match to end of the arg
+ /x;
+ } elsif ($subFlag == 3) {
+ # match for <fcn_name>() statements for any value of the encoding parameter
+ # IOW: find all the proto_tree_add_item statements with an hf entry of the desired types
+ $encArgPat = qr / [^,)]+? /x;
+ }
+
+ my @hf_index_names;
+
+ # For each hf[] entry which matches a type in %hfTypes do replacements
+ $found = 0;
+ foreach my $key (keys %$hfArrayEntryFieldTypeHRef) {
+ $hf_index_name = $key;
+ $hf_field_type = $$hfArrayEntryFieldTypeHRef{$key};
+ ##printf "--> %-35.35s: %s\n", $hf_index_name, $hf_field_type;
+
+ next unless exists $hfTypes{$hf_field_type}; # Do we want to process for this hf[] entry type ?
+
+ ##print "\n$hf_index_name $hf_field_type\n";
+ push @hf_index_names, $hf_index_name;
+ }
+
+ if (@hf_index_names) {
+ # build the complete pattern
+ my $hf_index_names_re = join('|', @hf_index_names);
+ $hf_index_names_re =~ s/\[|\]/\\$&/g; # escape any "[" or "]" characters
+ my $patRegEx = qr /
+ # part 1: $1
+ (
+ $fcn_name \s* \(
+ [^;]+?
+ ,\s*
+ (?:$hf_index_names_re)
+ \s*,
+ [^;]+
+ ,\s*
+ )
+
+ # part 2: $2
+ # exact match of pattern (including spaces)
+ ((?-x)$encArgPat)
+
+ # part 3: $3
+ (
+ \s* \)
+ \s* ;
+ )
+ /xs;
+
+ ##print "\n$patRegEx\n";
+
+ ## Match and substitute as specified
+ $$fileContentsRef =~ s/ $patRegEx /patsub($1,$2,$3)/xges;
+
+ }
+
+ return $found;
+ }
+
+ # Called from fix_encoding_args to determine replacement string when a regex match is encountered
+ # $_[0]: part 1
+ # $_[1]: part 2: encoding arg
+ # $_[2]: part 3
+ # lookup the desired replacement value for the encoding arg
+ # print match string showing and highlighting the encoding arg replacement
+ # return "replacement" string
+ sub patsub {
+ $found += 1;
+ my $substr = exists $$searchReplaceHRef{$_[1]} ? $$searchReplaceHRef{$_[1]} : "???";
+ my $str = sprintf("%s[[%s]-->[%s]]%s", $_[0], $_[1], $substr, $_[2]);
+ $str =~ tr/\t\n\r/ /d;
+ printf "%s: %-17.17s $str\n", $fileName, $hf_field_type . ":";
+ return $_[0] . $substr . $_[2];
+ }
+} # block end
+
+# ---------------------------------------------------------------------
+# fix_dissector_table_args
+# Substitute new values for the specified <fcn_name>() encoding arg values
+# when the encoding arg is the *last* arg of the call to fcn_name
+# args:
+# substitute_flag: 1: replace specified encoding arg values by a new value (keys/values in search hash);
+# ref to hash containing search (keys) and replacement (values) for encoding arg
+# fcn_name string
+# ref to string containing file contents
+# filename string
+#
+{ # block begin
+
+ # shared variables
+ my $fileName;
+ my $searchReplaceHRef;
+ my $found;
+
+ sub fix_dissector_table_args {
+ (my $subFlag, $searchReplaceHRef, my $fcn_name, my $fileContentsRef, $fileName) = @_;
+
+ my $encArgPat;
+
+ if ($subFlag == 1) {
+ # just match for <fcn_name>() statements which have an encoding arg matching one of the
+ # keys in the searchReplace hash.
+ # Escape any "|" characters in the keys
+ # and then create "alternatives" string containing all the resulting key strings. Ex: "(A|B|C\|D|..."
+ $encArgPat = join "|", map { my $copy = $_; $copy =~ s{ ( \| ) }{\\$1}gx; $copy } keys %$searchReplaceHRef;
+ } elsif ($subFlag == 3) {
+ # match for <fcn_name>() statements for any value of the encoding parameter
+ # IOW: find all the <fcn_name> statements
+ $encArgPat = qr / [^,)]+? /x;
+ }
+
+ # build the complete pattern
+ my $patRegEx = qr /
+ # part 1: $1
+ (
+ (?:^|=) # don't try to handle fcn_name call when arg of another fcn call
+ \s*
+ $fcn_name \s* \(
+ [^;]+? # a bit dangerous
+ ,\s*
+ FT_STRING[A-Z]*
+ ,\s*
+ )
+
+ # part 2: $2
+ # exact match of pattern (including spaces)
+ ((?-x)$encArgPat)
+
+ # part 3: $3
+ (
+ \s* \)
+ \s* ;
+ )
+ /xms; # m for ^ above
+
+ ##print "$patRegEx\n";
+
+ ## Match and substitute as specified
+ $found = 0;
+
+ $$fileContentsRef =~ s/ $patRegEx /patsuby($1,$2,$3)/xges;
+
+ return $found;
+ }
+
+ # Called from fix_encoding_args to determine replacement string when a regex match is encountered
+ # $_[0]: part 1
+ # $_[1]: part 2: encoding arg
+ # $_[2]: part 3
+ # lookup the desired replacement value for the encoding arg
+ # print match string showing and highlighting the encoding arg replacement
+ # return "replacement" string
+ sub patsuby {
+ $found += 1;
+ my $substr = exists $$searchReplaceHRef{$_[1]} ? $$searchReplaceHRef{$_[1]} : "???";
+ my $str = sprintf("%s[[%s]-->[%s]]%s", $_[0], $_[1], $substr, $_[2]);
+ $str =~ tr/\t\n\r/ /d;
+ printf "%s: $str\n", $fileName;
+ return $_[0] . $substr . $_[2];
+ }
+} # block end
+
+# ---------------------------------------------------------------------
+# Find all <fcnList> statements
+# and output same highlighting the encoding arg
+# Currently: encoding arg is matched as the *last* arg of the function call
+
+sub find_all {
+ my( $fcnListARef, $fileContentsRef, $fileName) = @_;
+
+ my $found = 0;
+ my $fcnListPat = join "|", @$fcnListARef;
+ my $pat = qr /
+ (
+ (?:$fcnListPat) \s* \(
+ [^;]+
+ , \s*
+ )
+ (
+ [^ \t,)]+?
+ )
+ (
+ \s* \)
+ \s* ;
+ )
+ /xs;
+
+ while ($$fileContentsRef =~ / $pat /xgso) {
+ my $str = "${1}[[${2}]]${3}\n";
+ $str =~ tr/\t\n\r/ /d;
+ $str =~ s/ \s+ / /xg;
+ print "$fileName: $str\n";
+ $found += 1;
+ }
+ return $found;
+}
+
diff --git a/tools/fuzz-test.sh b/tools/fuzz-test.sh
new file mode 100755
index 0000000..b63f647
--- /dev/null
+++ b/tools/fuzz-test.sh
@@ -0,0 +1,317 @@
+#!/bin/bash
+#
+# Fuzz-testing script for TShark
+#
+# This script uses Editcap to add random errors ("fuzz") to a set of
+# capture files specified on the command line. It runs TShark on
+# each fuzzed file and checks for errors. The files are processed
+# repeatedly until an error is found.
+#
+# Copyright 2013 Gerald Combs <gerald@wireshark.org>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+TEST_TYPE="fuzz"
+# shellcheck source=tools/test-common.sh
+. "$( dirname "$0" )"/test-common.sh || exit 1
+
+# Sanity check to make sure we can find our plugins. Zero or less disables.
+MIN_PLUGINS=0
+
+# Did we catch a signal or time out?
+DONE=false
+
+# Currently running children
+RUNNER_PIDS=
+
+# Perform a two-pass analysis on the capture file?
+TWO_PASS=
+
+# Specific config profile ?
+CONFIG_PROFILE=
+
+# Run under valgrind ?
+VALGRIND=0
+
+# Abort on UTF-8 encoding errors
+CHECK_UTF_8="--log-fatal-domains=UTF-8 "
+
+# Run under AddressSanitizer ?
+ASAN=$CONFIGURED_WITH_ASAN
+
+# Don't skip any byte from being changed
+CHANGE_OFFSET=0
+
+# The maximum permitted amount of memory leaked. Eventually this should be
+# worked down to zero, but right now that would fail on every single capture.
+# Only has effect when running under valgrind.
+MAX_LEAK=$(( 1024 * 100 ))
+
+# Our maximum run time.
+RUN_START_SECONDS=$SECONDS
+RUN_MAX_SECONDS=$(( RUN_START_SECONDS + 86400 ))
+
+# To do: add options for file names and limits
+while getopts "2b:C:d:e:agp:P:o:t:U" OPTCHAR ; do
+ case $OPTCHAR in
+ a) ASAN=1 ;;
+ 2) TWO_PASS="-2 " ;;
+ b) WIRESHARK_BIN_DIR=$OPTARG ;;
+ C) CONFIG_PROFILE="-C $OPTARG " ;;
+ d) TMP_DIR=$OPTARG ;;
+ e) ERR_PROB=$OPTARG ;;
+ g) VALGRIND=1 ;;
+ p) MAX_PASSES=$OPTARG ;;
+ P) MIN_PLUGINS=$OPTARG ;;
+ o) CHANGE_OFFSET=$OPTARG ;;
+ t) RUN_MAX_SECONDS=$(( RUN_START_SECONDS + OPTARG )) ;;
+ U) CHECK_UTF_8= ;; # disable
+ *) printf "Unknown option %s\n" "$OPTCHAR"
+ esac
+done
+shift $((OPTIND - 1))
+
+### usually you won't have to change anything below this line ###
+
+ws_bind_exec_paths
+ws_check_exec "$TSHARK" "$EDITCAP" "$CAPINFOS" "$DATE" "$TMP_DIR"
+
+COMMON_ARGS="${CONFIG_PROFILE}${TWO_PASS}${CHECK_UTF_8}"
+KEEP=
+PACKET_RANGE=
+if [ $VALGRIND -eq 1 ]; then
+ RUNNER=$( dirname "$0" )"/valgrind-wireshark.sh"
+ COMMON_ARGS="-b $WIRESHARK_BIN_DIR $COMMON_ARGS"
+ declare -a RUNNER_ARGS=("" "-T")
+ # Valgrind requires more resources, so permit 1.5x memory and 3x time
+ # (1.5x time is too small for a few large captures in the menagerie)
+ MAX_CPU_TIME=$(( 3 * MAX_CPU_TIME ))
+ MAX_VMEM=$(( 3 * MAX_VMEM / 2 ))
+ # Valgrind is slow. Trim captures to the first 10k packets so that
+ # we don't time out.
+ KEEP=-r
+ PACKET_RANGE=1-10000
+else
+ # Not using valgrind, use regular tshark.
+ # TShark arguments (you won't have to change these)
+ # n Disable network object name resolution
+ # V Print a view of the details of the packet rather than a one-line summary of the packet
+ # x Cause TShark to print a hex and ASCII dump of the packet data after printing the summary or details
+ # r Read packet data from the following infile
+ RUNNER="$TSHARK"
+ declare -a RUNNER_ARGS=("-nVxr" "-nr")
+ # Running with a read filter but without generating the tree exposes some
+ # "More than 100000 items in tree" bugs.
+ # Not sure if we want to add even more cycles to the fuzz bot's work load...
+ #declare -a RUNNER_ARGS=("${CONFIG_PROFILE}${TWO_PASS}-nVxr" "${CONFIG_PROFILE}${TWO_PASS}-nr" "-Yframe ${CONFIG_PROFILE}${TWO_PASS}-nr")
+fi
+
+
+# Make sure we have a valid test set
+FOUND=0
+for CF in "$@" ; do
+ if [ "$OSTYPE" == "cygwin" ] ; then
+ CF=$( cygpath --windows "$CF" )
+ fi
+ "$CAPINFOS" "$CF" > /dev/null 2>&1 && FOUND=1
+ if [ $FOUND -eq 1 ] ; then break ; fi
+done
+
+if [ $FOUND -eq 0 ] ; then
+ cat <<FIN
+Error: No valid capture files found.
+
+Usage: $( basename "$0" ) [-2] [-b bin_dir] [-C config_profile] [-d work_dir] [-e error probability] [-o changes offset] [-g] [-a] [-p passes] capture file 1 [capture file 2]...
+FIN
+ exit 1
+fi
+
+PLUGIN_COUNT=$( $TSHARK -G plugins | grep -c dissector )
+if [ "$MIN_PLUGINS" -gt 0 ] && [ "$PLUGIN_COUNT" -lt "$MIN_PLUGINS" ] ; then
+ echo "Warning: Found fewer plugins than expected ($PLUGIN_COUNT vs $MIN_PLUGINS)."
+ exit 1
+fi
+
+if [ $ASAN -ne 0 ]; then
+ echo -n "ASan enabled. Virtual memory limit is "
+ ulimit -v
+else
+ echo "ASan disabled. Virtual memory limit is $MAX_VMEM"
+fi
+
+HOWMANY="forever"
+if [ "$MAX_PASSES" -gt 0 ]; then
+ HOWMANY="$MAX_PASSES passes"
+fi
+echo -n "Running $RUNNER $COMMON_ARGS with args: "
+printf "\"%s\"\n" "${RUNNER_ARGS[@]}"
+echo "($HOWMANY)"
+echo ""
+
+# Clean up on <ctrl>C, etc
+trap_all() {
+ printf '\n\nCaught signal. Exiting.\n'
+ rm -f "$TMP_DIR/$TMP_FILE" "$TMP_DIR/$ERR_FILE"*
+ exit 0
+}
+
+trap_abrt() {
+ for RUNNER_PID in $RUNNER_PIDS ; do
+ kill -ABRT "$RUNNER_PID"
+ done
+ trap_all
+}
+
+trap trap_all HUP INT TERM
+trap trap_abrt ABRT
+
+# Iterate over our capture files.
+PASS=0
+while { [ $PASS -lt "$MAX_PASSES" ] || [ "$MAX_PASSES" -lt 1 ]; } && ! $DONE ; do
+ PASS=$(( PASS+1 ))
+ echo "Pass $PASS:"
+ RUN=0
+
+ for CF in "$@" ; do
+ if $DONE; then
+ break # We caught a signal or timed out
+ fi
+ RUN=$(( RUN + 1 ))
+ if [ $(( RUN % 50 )) -eq 0 ] ; then
+ echo " [Pass $PASS]"
+ fi
+ if [ "$OSTYPE" == "cygwin" ] ; then
+ CF=$( cygpath --windows "$CF" )
+ fi
+
+ "$CAPINFOS" "$CF" > /dev/null 2> "$TMP_DIR/$ERR_FILE"
+ RETVAL=$?
+ if [ $RETVAL -eq 1 ] || [ $RETVAL -eq 2 ] ; then
+ echo "Not a valid capture file"
+ rm -f "$TMP_DIR/$ERR_FILE"
+ continue
+ elif [ $RETVAL -ne 0 ] && ! $DONE ; then
+ # Some other error
+ ws_exit_error
+ fi
+
+ # Choose a random subset of large captures.
+ KEEP=
+ PACKET_RANGE=
+ CF_PACKETS=$( "$CAPINFOS" -T -r -c "$CF" | cut -f2 )
+ if [[ CF_PACKETS -gt $MAX_FUZZ_PACKETS ]] ; then
+ START_PACKET=$(( CF_PACKETS - MAX_FUZZ_PACKETS ))
+ START_PACKET=$( shuf --input-range=1-$START_PACKET --head-count=1 )
+ END_PACKET=$(( START_PACKET + MAX_FUZZ_PACKETS ))
+ KEEP=-r
+ PACKET_RANGE="$START_PACKET-$END_PACKET"
+ printf " Fuzzing packets %d-%d of %d\n" "$START_PACKET" "$END_PACKET" "$CF_PACKETS"
+ fi
+
+ DISSECTOR_BUG=0
+ VG_ERR_CNT=0
+
+ printf " %s: " "$( basename "$CF" )"
+ # shellcheck disable=SC2086
+ "$EDITCAP" -E "$ERR_PROB" -o "$CHANGE_OFFSET" $KEEP "$CF" "$TMP_DIR/$TMP_FILE" $PACKET_RANGE > /dev/null 2>&1
+ RETVAL=$?
+ if [ $RETVAL -ne 0 ] ; then
+ # shellcheck disable=SC2086
+ "$EDITCAP" -E "$ERR_PROB" -o "$CHANGE_OFFSET" $KEEP -T ether "$CF" "$TMP_DIR/$TMP_FILE" $PACKET_RANGE \
+ > /dev/null 2>&1
+ RETVAL=$?
+ if [ $RETVAL -ne 0 ] ; then
+ echo "Invalid format for editcap"
+ continue
+ fi
+ fi
+
+ FILE_START_SECONDS=$SECONDS
+ RUNNER_PIDS=
+ RUNNER_ERR_FILES=
+ for ARGS in "${RUNNER_ARGS[@]}" ; do
+ if $DONE; then
+ break # We caught a signal
+ fi
+ echo -n "($ARGS) "
+
+ # Run in a child process with limits.
+ (
+ # Set some limits to the child processes, e.g. stop it if
+ # it's running longer than MAX_CPU_TIME seconds. (ulimit
+ # is not supported well on cygwin - it shows some warnings -
+ # and the features we use may not all be supported on some
+ # UN*X platforms.)
+ ulimit -S -t "$MAX_CPU_TIME" -s "$MAX_STACK"
+
+ # Allow core files to be generated
+ ulimit -c unlimited
+
+ # Don't enable ulimit -v when using ASAN. See
+ # https://github.com/google/sanitizers/wiki/AddressSanitizer#ulimit--v
+ if [ $ASAN -eq 0 ]; then
+ ulimit -S -v "$MAX_VMEM"
+ fi
+
+ # shellcheck disable=SC2016
+ SUBSHELL_PID=$($SHELL -c 'echo $PPID')
+
+ printf 'Command and args: %s %s %s\n' "$RUNNER" "$COMMON_ARGS" "$ARGS" > "$TMP_DIR/$ERR_FILE.$SUBSHELL_PID"
+ # shellcheck disable=SC2086
+ "$RUNNER" $COMMON_ARGS $ARGS "$TMP_DIR/$TMP_FILE" \
+ > /dev/null 2>> "$TMP_DIR/$ERR_FILE.$SUBSHELL_PID"
+ ) &
+ RUNNER_PID=$!
+ RUNNER_PIDS="$RUNNER_PIDS $RUNNER_PID"
+ RUNNER_ERR_FILES="$RUNNER_ERR_FILES $TMP_DIR/$ERR_FILE.$RUNNER_PID"
+
+ if [ $SECONDS -ge $RUN_MAX_SECONDS ] ; then
+ printf "\nStopping after %d seconds.\n" $(( SECONDS - RUN_START_SECONDS ))
+ DONE=true
+ fi
+ done
+
+ for RUNNER_PID in $RUNNER_PIDS ; do
+ wait "$RUNNER_PID"
+ RUNNER_RETVAL=$?
+ mv "$TMP_DIR/$ERR_FILE.$RUNNER_PID" "$TMP_DIR/$ERR_FILE"
+
+ # Uncomment the next two lines to enable dissector bug
+ # checking.
+ #grep -i "dissector bug" $TMP_DIR/$ERR_FILE \
+ # > /dev/null 2>&1 && DISSECTOR_BUG=1
+
+ if [ $VALGRIND -eq 1 ] && ! $DONE; then
+ VG_ERR_CNT=$( grep "ERROR SUMMARY:" "$TMP_DIR/$ERR_FILE" | cut -f4 -d' ' )
+ VG_DEF_LEAKED=$( grep "definitely lost:" "$TMP_DIR/$ERR_FILE" | cut -f7 -d' ' | tr -d , )
+ VG_IND_LEAKED=$( grep "indirectly lost:" "$TMP_DIR/$ERR_FILE" | cut -f7 -d' ' | tr -d , )
+ VG_TOTAL_LEAKED=$(( VG_DEF_LEAKED + VG_IND_LEAKED ))
+ if [ $RUNNER_RETVAL -ne 0 ] ; then
+ echo "General Valgrind failure."
+ VG_ERR_CNT=1
+ elif [ "$VG_TOTAL_LEAKED" -gt "$MAX_LEAK" ] ; then
+ echo "Definitely + indirectly ($VG_DEF_LEAKED + $VG_IND_LEAKED) exceeds max ($MAX_LEAK)."
+ echo "Definitely + indirectly ($VG_DEF_LEAKED + $VG_IND_LEAKED) exceeds max ($MAX_LEAK)." >> "$TMP_DIR/$ERR_FILE"
+ VG_ERR_CNT=1
+ fi
+ if grep -q "Valgrind cannot continue" "$TMP_DIR/$ERR_FILE" ; then
+ echo "Valgrind unable to continue."
+ VG_ERR_CNT=-1
+ fi
+ fi
+
+ if ! $DONE && { [ $RUNNER_RETVAL -ne 0 ] || [ $DISSECTOR_BUG -ne 0 ] || [ $VG_ERR_CNT -ne 0 ]; } ; then
+ # shellcheck disable=SC2086
+ rm -f $RUNNER_ERR_FILES
+ ws_exit_error
+ fi
+ done
+
+ printf " OK (%s seconds)\\n" $(( SECONDS - FILE_START_SECONDS ))
+ rm -f "$TMP_DIR/$TMP_FILE" "$TMP_DIR/$ERR_FILE"
+ done
+done
diff --git a/tools/gen-bugnote b/tools/gen-bugnote
new file mode 100755
index 0000000..786886e
--- /dev/null
+++ b/tools/gen-bugnote
@@ -0,0 +1,54 @@
+#!/bin/bash
+#
+# Given a Wireshark issue ID, fetch its title and prepare an entry suitable
+# for pasting into the release notes. Requires curl and jq.
+#
+# Usage: gen-bugnote <issue number>
+#
+# Copyright 2013 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+gitlab_issue_url_pfx="https://gitlab.com/api/v4/projects/wireshark%2Fwireshark/issues"
+issue_id="${1#\#}" # Strip leading "#"
+
+case "$OSTYPE" in
+ darwin*)
+ clipboard_cmd="pbcopy -Pascii"
+ ;;
+ cygwin*)
+ clipboard_cmd="cat > /dev/clipboard"
+ ;;
+ linux*)
+ clipboard_cmd="xsel --clipboard"
+ ;;
+ *)
+ echo "Unable to copy to clipboard"
+ clipboard_cmd="cat > /dev/null"
+ ;;
+esac
+
+if [ -z "$issue_id" ] ; then
+ echo "Usage: $( basename "$0" ) <issue id>"
+ exit 1
+fi
+
+issue_title=$(
+ curl -s -o - "${gitlab_issue_url_pfx}/$issue_id" \
+ | jq '.title'
+ )
+
+# We can escape backslashes in jq's --raw-output or we can trim quotes off
+# its plain output.
+issue_title="${issue_title%\"}"
+issue_title="${issue_title#\"}"
+trailing_period=""
+if [[ ! ${issue_title: -1} =~ [[:punct:]] ]] ; then
+ trailing_period="."
+fi
+
+printf "* %s%s wsbuglink:${issue_id}[].\\n" "$issue_title" "$trailing_period" \
+ | $clipboard_cmd
+
+echo "Copied $issue_id: $issue_title"
diff --git a/tools/generate-bacnet-vendors.py b/tools/generate-bacnet-vendors.py
new file mode 100755
index 0000000..14fc530
--- /dev/null
+++ b/tools/generate-bacnet-vendors.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+
+'''
+ Copyright 2023 Jaap Keuter <jaap.keuter@xs4all.nl>
+ based on work by Anish Bhatt <anish@chelsio.com>
+
+SPDX-License-Identifier: GPL-2.0-or-later
+'''
+
+import sys
+import urllib.request, urllib.error, urllib.parse
+from bs4 import BeautifulSoup
+
+req_headers = { 'User-Agent': 'Wireshark generate-bacnet-vendors' }
+try:
+ req = urllib.request.Request("https://bacnet.org/assigned-vendor-ids/", headers=req_headers)
+ response = urllib.request.urlopen(req)
+ lines = response.read().decode()
+ response.close()
+except urllib.error.HTTPError as err:
+ exit_msg("HTTP error fetching {0}: {1}".format(url, err.reason))
+except urllib.error.URLError as err:
+ exit_msg("URL error fetching {0}: {1}".format(url, err.reason))
+except OSError as err:
+ exit_msg("OS error fetching {0}".format(url, err.strerror))
+except Exception:
+ exit_msg("Unexpected error:", sys.exc_info()[0])
+
+soup = BeautifulSoup(lines, "html.parser")
+table = soup.find('table')
+rows = table.findAll('tr')
+
+entry = "static const value_string\nBACnetVendorIdentifiers [] = {"
+
+for tr in rows:
+ cols = tr.findAll('td')
+ for index,td in enumerate(cols[0:2]):
+ text = ''.join(td.find(string=True))
+ if index == 0:
+ entry = " { %4s" % text
+ else:
+ entry += ", \"%s\" }," % text.rstrip()
+ print(entry)
+
+entry = " { 0, NULL }\n};"
+print(entry)
+
diff --git a/tools/generate-dissector.py b/tools/generate-dissector.py
new file mode 100755
index 0000000..4d8ab37
--- /dev/null
+++ b/tools/generate-dissector.py
@@ -0,0 +1,158 @@
+#!/usr/bin/env python3
+#
+# Copyright 2019, Dario Lombardo <lomato@gmail.com>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# This script generates a Wireshark skeleton dissector, based on the example in the doc/ directory.
+#
+# Example usage:
+#
+# generate-dissector.py --name "My Self" --email "myself@example.com" --protoname "The dumb protocol"
+# --protoshortname DUMB --protoabbrev dumb --license GPL-2.0-or-later --years "2019-2020"
+#
+
+import argparse
+from datetime import datetime
+import os
+
+
+parser = argparse.ArgumentParser(description='The Wireshark Dissector Generator')
+parser.add_argument("--name", help="The author of the dissector", required=True)
+parser.add_argument("--email", help="The email address of the author", required=True)
+parser.add_argument("--protoname", help="The name of the protocol", required=True)
+parser.add_argument("--protoshortname", help="The protocol short name", required=True)
+parser.add_argument("--protoabbrev", help="The protocol abbreviation", required=True)
+parser.add_argument("--license", help="The license for this dissector (please use a SPDX-License-Identifier). If omitted, %(default)s will be used", default="GPL-2.0-or-later")
+parser.add_argument("--years", help="Years of validity for the license. If omitted, the current year will be used", default=str(datetime.now().year))
+parser.add_argument("-f", "--force", action='store_true', help="Force overwriting the dissector file if it already exists")
+parser.add_argument("-p", "--plugin", action='store_true', help="Create as a plugin. Default is to create in epan")
+
+
+def wsdir():
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+
+
+def output_dir(args):
+ if args.plugin:
+ os.makedirs(os.path.join(wsdir(), "plugins/epan/" + args.protoabbrev), exist_ok=True)
+ return os.path.join(wsdir(), "plugins/epan/" + args.protoabbrev)
+ return os.path.join(wsdir(), "epan/dissectors")
+
+
+def output_file(args):
+ return os.path.join(output_dir(args), "packet-" + args.protoabbrev + ".c")
+
+
+def read_skeleton(filename):
+ skeletonfile = os.path.join(wsdir(), "doc/" + filename)
+ print("Reading skeleton file: " + skeletonfile)
+ return open(skeletonfile).read()
+
+
+def replace_fields(buffer, args):
+ print("Replacing fields in skeleton")
+ output = buffer\
+ .replace("YOUR_NAME", args.name)\
+ .replace("YOUR_EMAIL_ADDRESS", args.email)\
+ .replace("PROTONAME", args.protoname)\
+ .replace("PROTOSHORTNAME", args.protoshortname)\
+ .replace("PROTOABBREV", args.protoabbrev)\
+ .replace("FIELDNAME", "Sample Field")\
+ .replace("FIELDABBREV", "sample_field")\
+ .replace("FT_FIELDTYPE", "FT_STRING")\
+ .replace("FIELDDISPLAY", "BASE_NONE")\
+ .replace("FIELDCONVERT", "NULL")\
+ .replace("BITMASK", "0x0")\
+ .replace("FIELDDESCR", "NULL")\
+ .replace("MAX_NEEDED_FOR_HEURISTICS", "1")\
+ .replace("TEST_HEURISTICS_FAIL", "0")\
+ .replace("ENC_xxx", "ENC_NA")\
+ .replace("EXPERTABBREV", "expert")\
+ .replace("PI_GROUP", "PI_PROTOCOL")\
+ .replace("PI_SEVERITY", "PI_ERROR")\
+ .replace("TEST_EXPERT_condition", "0")\
+ .replace("const char *subtree", "\"\"")\
+ .replace("LICENSE", args.license)\
+ .replace("YEARS", args.years)
+
+ return output
+
+
+def write_dissector(buffer, args):
+ ofile = output_file(args)
+ if os.path.isfile(ofile) and not args.force:
+ raise Exception("The file " + ofile + " already exists. You're likely overwriting an existing dissector.")
+ print("Writing output file: " + ofile)
+ return open(ofile, "w").write(buffer)
+
+
+def patch_makefile(args):
+ if args.plugin:
+ cmakefile = os.path.join(wsdir(), "CMakeLists.txt")
+ patchline = "\t\tplugins/epan/" + args.protoabbrev
+ groupstart = "set(PLUGIN_SRC_DIRS"
+ else:
+ cmakefile = os.path.join(wsdir(), "epan/dissectors/CMakeLists.txt")
+ patchline = "\t${CMAKE_CURRENT_SOURCE_DIR}/packet-" + args.protoabbrev + ".c"
+ groupstart = "set(DISSECTOR_SRC"
+ print("Patching makefile: " + cmakefile)
+ output = ""
+ in_group = False
+ patched = False
+ for line in open(cmakefile):
+ line_strip = line.strip()
+ if in_group and line_strip == ")":
+ in_group = False
+ if in_group and not patched and line_strip > patchline:
+ output += patchline + "\n"
+ patched = True
+ if line_strip == groupstart:
+ in_group = True
+ if line_strip != patchline:
+ output += line
+ open(cmakefile, "w").write(output)
+
+
+def write_plugin_makefile(args):
+ if not args.plugin:
+ return True
+ buffer = replace_fields(read_skeleton("CMakeLists-PROTOABBREV.txt"), args)
+ ofile = os.path.join(output_dir(args), "CMakeLists.txt")
+ print("Writing output file: " + ofile)
+ return open(ofile, "w").write(buffer)
+
+
+def print_header():
+ print("")
+ print("**************************************************")
+ print("* Wireshark skeleton dissector generator *")
+ print("* *")
+ print("* Generate a new dissector for your protocol *")
+ print("* starting from the skeleton provided in the *")
+ print("* doc directory. *")
+ print("* *")
+ print("* Copyright 2019 Dario Lombardo *")
+ print("**************************************************")
+ print("")
+
+
+def print_trailer(args):
+ print("")
+ print("The skeleton for the dissector of the " + args.protoshortname + " protocol has been generated.")
+ print("Please review/extend it to match your specific criterias.")
+ print("")
+
+
+if __name__ == '__main__':
+ print_header()
+ args = parser.parse_args()
+ buffer = replace_fields(read_skeleton("packet-PROTOABBREV.c"), args)
+ write_dissector(buffer, args)
+ patch_makefile(args)
+ write_plugin_makefile(args)
+ print_trailer(args)
diff --git a/tools/generate-nl80211-fields.py b/tools/generate-nl80211-fields.py
new file mode 100755
index 0000000..dfa8faa
--- /dev/null
+++ b/tools/generate-nl80211-fields.py
@@ -0,0 +1,373 @@
+#!/usr/bin/env python3
+# Parses the nl80211.h interface and generate appropriate enums and fields
+# (value_string) for packet-netlink-nl80211.c
+#
+# Copyright (c) 2017, Peter Wu <peter@lekensteyn.nl>
+# Copyright (c) 2018, Mikael Kanstrup <mikael.kanstrup@sony.com>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+#
+# To update the dissector source file, run this from the source directory:
+#
+# python tools/generate-nl80211-fields.py --update
+#
+
+import argparse
+import re
+import requests
+import sys
+
+# Begin of comment, followed by the actual array definition
+HEADER = "/* Definitions from linux/nl80211.h {{{ */\n"
+FOOTER = "/* }}} */\n"
+# Enums to extract from the header file
+EXPORT_ENUMS = {
+ # 'enum_name': ('field_name', field_type', 'field_blurb')
+ 'nl80211_commands': ('Command', 'FT_UINT8', '"Generic Netlink Command"'),
+ 'nl80211_attrs': (None, None, None),
+ 'nl80211_iftype': (None, None, None),
+ 'nl80211_sta_flags': (None, None, None),
+ 'nl80211_sta_p2p_ps_status': ('Attribute Value', 'FT_UINT8', None),
+ 'nl80211_he_gi': (None, None, None),
+ 'nl80211_he_ru_alloc': (None, None, None),
+ 'nl80211_rate_info': (None, None, None),
+ 'nl80211_sta_bss_param': (None, None, None),
+ 'nl80211_sta_info': (None, None, None),
+ 'nl80211_tid_stats': (None, None, None),
+ 'nl80211_txq_stats': (None, None, None),
+ 'nl80211_mpath_flags': (None, None, None),
+ 'nl80211_mpath_info': (None, None, None),
+ 'nl80211_band_iftype_attr': (None, None, None),
+ 'nl80211_band_attr': (None, None, None),
+ 'nl80211_wmm_rule': (None, None, None),
+ 'nl80211_frequency_attr': (None, None, None),
+ 'nl80211_bitrate_attr': (None, None, None),
+ 'nl80211_reg_initiator': ('Attribute Value', 'FT_UINT8', None),
+ 'nl80211_reg_type': ('Attribute Value', 'FT_UINT8', None),
+ 'nl80211_reg_rule_attr': (None, None, None),
+ 'nl80211_sched_scan_match_attr': (None, None, None),
+ 'nl80211_reg_rule_flags': (None, None, None),
+ 'nl80211_dfs_regions': ('Attribute Value', 'FT_UINT8', None),
+ 'nl80211_user_reg_hint_type': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_survey_info': (None, None, None),
+ 'nl80211_mntr_flags': (None, None, None),
+ 'nl80211_mesh_power_mode': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_meshconf_params': (None, None, None),
+ 'nl80211_mesh_setup_params': (None, None, None),
+ 'nl80211_txq_attr': (None, None, None),
+ 'nl80211_ac': (None, None, None),
+ 'nl80211_channel_type': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_key_mode': (None, None, None),
+ 'nl80211_chan_width': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_bss_scan_width': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_bss': (None, None, None),
+ 'nl80211_bss_status': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_auth_type': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_key_type': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_mfp': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_wpa_versions': (None, None, None),
+ 'nl80211_key_default_types': (None, None, None),
+ 'nl80211_key_attributes': (None, None, None),
+ 'nl80211_tx_rate_attributes': (None, None, None),
+ 'nl80211_txrate_gi': (None, None, None),
+ 'nl80211_band': (None, None, None),
+ 'nl80211_ps_state': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_attr_cqm': (None, None, None),
+ 'nl80211_cqm_rssi_threshold_event': (None, None, None),
+ 'nl80211_tx_power_setting': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_packet_pattern_attr': (None, None, None),
+ 'nl80211_wowlan_triggers': (None, None, None),
+ 'nl80211_wowlan_tcp_attrs': (None, None, None),
+ 'nl80211_attr_coalesce_rule': (None, None, None),
+ 'nl80211_coalesce_condition': (None, None, None),
+ 'nl80211_iface_limit_attrs': (None, None, None),
+ 'nl80211_if_combination_attrs': (None, None, None),
+ 'nl80211_plink_state': ('Attribute Value', 'FT_UINT8', None),
+ 'plink_actions': ('Attribute Value', 'FT_UINT8', None),
+ 'nl80211_rekey_data': (None, None, None),
+ 'nl80211_hidden_ssid': (None, None, None),
+ 'nl80211_sta_wme_attr': (None, None, None),
+ 'nl80211_pmksa_candidate_attr': (None, None, None),
+ 'nl80211_tdls_operation': ('Attribute Value', 'FT_UINT8', None),
+ #Reserved for future use 'nl80211_ap_sme_features': (None, None, None),
+ 'nl80211_feature_flags': (None, None, None),
+ 'nl80211_ext_feature_index': (None, None, None),
+ 'nl80211_probe_resp_offload_support_attr': (None, None, None),
+ 'nl80211_connect_failed_reason': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_timeout_reason': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_scan_flags': (None, None, None),
+ 'nl80211_acl_policy': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_smps_mode': ('Attribute Value', 'FT_UINT8', None),
+ 'nl80211_radar_event': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_dfs_state': (None, None, None),
+ 'nl80211_protocol_features': (None, None, None),
+ 'nl80211_crit_proto_id': ('Attribute Value', 'FT_UINT16', None),
+ 'nl80211_rxmgmt_flags': (None, None, None),
+ 'nl80211_tdls_peer_capability': (None, None, None),
+ 'nl80211_sched_scan_plan': (None, None, None),
+ 'nl80211_bss_select_attr': (None, None, None),
+ 'nl80211_nan_function_type': (None, None, None),
+ 'nl80211_nan_publish_type': (None, None, None),
+ 'nl80211_nan_func_term_reason': (None, None, None),
+ 'nl80211_nan_func_attributes': (None, None, None),
+ 'nl80211_nan_srf_attributes': (None, None, None),
+ 'nl80211_nan_match_attributes': (None, None, None),
+ 'nl80211_external_auth_action': ('Attribute Value', 'FT_UINT32', None),
+ 'nl80211_ftm_responder_attributes': (None, None, None),
+ 'nl80211_ftm_responder_stats': (None, None, None),
+ 'nl80211_preamble': (None, None, None),
+ 'nl80211_peer_measurement_type': (None, None, None),
+ 'nl80211_peer_measurement_status': (None, None, None),
+ 'nl80211_peer_measurement_req': (None, None, None),
+ 'nl80211_peer_measurement_resp': (None, None, None),
+ 'nl80211_peer_measurement_peer_attrs': (None, None, None),
+ 'nl80211_peer_measurement_attrs': (None, None, None),
+ 'nl80211_peer_measurement_ftm_capa': (None, None, None),
+ 'nl80211_peer_measurement_ftm_req': (None, None, None),
+ 'nl80211_peer_measurement_ftm_failure_reasons': (None, None, None),
+ 'nl80211_peer_measurement_ftm_resp': (None, None, None),
+ 'nl80211_obss_pd_attributes': (None, None, None),
+}
+# File to be patched
+SOURCE_FILE = "epan/dissectors/packet-netlink-nl80211.c"
+# URL where the latest version can be found
+URL = "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/include/uapi/linux/nl80211.h"
+
+def make_enum(name, values, expressions, indent):
+ code = 'enum ws_%s {\n' % name
+ for value, expression in zip(values, expressions):
+ if expression and 'NL80211' in expression:
+ expression = 'WS_%s' % expression
+ if expression:
+ code += '%sWS_%s = %s,\n' % (indent, value, expression)
+ else:
+ code += '%sWS_%s,\n' % (indent, value)
+
+ code += '};\n'
+ return code
+
+def make_value_string(name, values, indent,):
+ code = 'static const value_string ws_%s_vals[] = {\n' % name
+ align = 40
+ for value in values:
+ code += indent + ('{ WS_%s,' % value).ljust(align - 1) + ' '
+ code += '"%s" },\n' % value
+ code += '%s{ 0, NULL }\n' % indent
+ code += '};\n'
+ code += 'static value_string_ext ws_%s_vals_ext =' % name
+ code += ' VALUE_STRING_EXT_INIT(ws_%s_vals);\n' % name
+ return code
+
+def remove_prefix(prefix, text):
+ if text.startswith(prefix):
+ return text[len(prefix):]
+ return text
+
+def make_hf_defs(name, indent):
+ code = 'static gint hf_%s = -1;' % name
+ return code
+
+def make_hf(name, indent):
+ (field_name, field_type, field_blurb) = EXPORT_ENUMS.get(name)
+ field_abbrev = name
+
+ # Fill in default values
+ if not field_name:
+ field_name = 'Attribute Type'
+ if not field_type:
+ field_type = 'FT_UINT16'
+ if not field_blurb:
+ field_blurb = 'NULL'
+
+ # Special treatment of already existing field names
+ rename_fields = {
+ 'nl80211_attrs': 'nl80211_attr_type',
+ 'nl80211_commands': 'nl80211_cmd'
+ }
+ if rename_fields.get(name):
+ field_abbrev = rename_fields[name]
+ field_abbrev = remove_prefix('nl80211_', field_abbrev)
+
+ code = indent + indent + '{ &hf_%s,\n' % name
+ code += indent*3 + '{ "%s", "nl80211.%s",\n' % (field_name, field_abbrev)
+ code += indent*3 + ' %s, BASE_DEC | BASE_EXT_STRING,\n' % (field_type)
+ code += indent*3 + ' VALS_EXT_PTR(&ws_%s_vals_ext), 0x00,\n' % (name)
+ code += indent*3 + ' %s, HFILL },\n' % (field_blurb)
+ code += indent + indent + '},'
+ return code
+
+def make_ett_defs(name, indent):
+ code = 'static gint ett_%s = -1;' % name
+ return code
+
+def make_ett(name, indent):
+ code = indent + indent + '&ett_%s,' % name
+ return code
+
+class EnumStore(object):
+ __RE_ENUM_VALUE = re.compile(
+ r'\s+?(?P<value>\w+)(?:\ /\*.*?\*\/)?(?:\s*=\s*(?P<expression>.*?))?(?:\s*,|$)',
+ re.MULTILINE | re.DOTALL)
+
+ def __init__(self, name, values):
+ self.name = name
+ self.values = []
+ self.expressions = []
+ self.active = True
+ self.parse_values(values)
+
+
+ def parse_values(self, values):
+ for m in self.__RE_ENUM_VALUE.finditer(values):
+ value, expression = m.groups()
+ if value.startswith('NUM_'):
+ break
+ if value.endswith('_AFTER_LAST'):
+ break
+ if value.endswith('_LAST'):
+ break
+ if value.startswith('__') and value.endswith('_NUM'):
+ break
+ if expression and expression in self.values:
+ # Skip aliases
+ continue
+ self.values.append(value)
+ self.expressions.append(expression)
+
+ def finish(self):
+ return self.name, self.values, self.expressions
+
+RE_ENUM = re.compile(
+ r'enum\s+?(?P<enum>\w+)\s+?\{(?P<values>.*?)\}\;',
+ re.MULTILINE | re.DOTALL)
+RE_COMMENT = re.compile(r'/\*.*?\*/', re.MULTILINE | re.DOTALL)
+
+def parse_header(content):
+ # Strip comments
+ content = re.sub(RE_COMMENT, '', content)
+
+ enums = []
+ for m in RE_ENUM.finditer(content):
+ enum = m.group('enum')
+ values = m.group('values')
+ if enum in EXPORT_ENUMS:
+ enums.append(EnumStore(enum, values).finish())
+
+ return enums
+
+def parse_source():
+ """
+ Reads the source file and tries to split it in the parts before, inside and
+ after the block.
+ """
+ begin, block, end = '', '', ''
+ parts = []
+ # Stages: 1 (before block), 2 (in block, skip), 3 (after block)
+ stage = 1
+ with open(SOURCE_FILE) as f:
+ for line in f:
+ if line == FOOTER and stage == 2:
+ stage = 3 # End of block
+ if stage == 1:
+ begin += line
+ elif stage == 2:
+ block += line
+ elif stage == 3:
+ end += line
+ if line == HEADER and stage == 1:
+ stage = 2 # Begin of block
+ if line == HEADER and stage == 3:
+ stage = 2 # Begin of next code block
+ parts.append((begin, block, end))
+ begin, block, end = '', '', ''
+
+ parts.append((begin, block, end))
+ if stage != 3 or len(parts) != 3:
+ raise RuntimeError("Could not parse file (in stage %d) (parts %d)" % (stage, len(parts)))
+ return parts
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--update", action="store_true",
+ help="Update %s as needed instead of writing to stdout" % SOURCE_FILE)
+parser.add_argument("--indent", default=" " * 4,
+ help="indentation (use \\t for tabs, default 4 spaces)")
+parser.add_argument("header_file", nargs="?", default=URL,
+ help="nl80211.h header file (use - for stdin or a HTTP(S) URL, "
+ "default %(default)s)")
+
+def main():
+ args = parser.parse_args()
+
+ indent = args.indent.replace("\\t", "\t")
+
+ if any(args.header_file.startswith(proto) for proto in ('http:', 'https')):
+ r = requests.get(args.header_file)
+ r.raise_for_status()
+ enums = parse_header(r.text)
+ elif args.header_file == "-":
+ enums = parse_header(sys.stdin.read())
+ else:
+ with open(args.header_file) as f:
+ enums = parse_header(f.read())
+
+ assert len(enums) == len(EXPORT_ENUMS), \
+ "Could not parse data, found %d/%d results" % \
+ (len(enums), len(EXPORT_ENUMS))
+
+ code_enums, code_vals, code_hf_defs, code_ett_defs, code_hf, code_ett = '', '', '', '', '', ''
+ for enum_name, enum_values, expressions in enums:
+ code_enums += make_enum(enum_name, enum_values, expressions, indent) + '\n'
+ code_vals += make_value_string(enum_name, enum_values, indent) + '\n'
+ code_hf_defs += make_hf_defs(enum_name, indent) + '\n'
+ code_ett_defs += make_ett_defs(enum_name, indent) + '\n'
+ code_hf += make_hf(enum_name, indent) + '\n'
+ code_ett += make_ett(enum_name, indent) + '\n'
+
+ code_top = code_enums + code_vals + code_hf_defs + '\n' + code_ett_defs
+ code_top = code_top.rstrip("\n") + "\n"
+
+ code = [code_top, code_hf, code_ett]
+
+ update = False
+ if args.update:
+ parts = parse_source()
+
+ # Check if file needs update
+ for (begin, old_code, end), new_code in zip(parts, code):
+ if old_code != new_code:
+ update = True
+ break
+ if not update:
+ print("File is up-to-date")
+ return
+ # Update file
+ with open(SOURCE_FILE, "w") as f:
+ for (begin, old_code, end), new_code in zip(parts, code):
+ f.write(begin)
+ f.write(new_code)
+ f.write(end)
+ print("Updated %s" % SOURCE_FILE)
+ else:
+ for new_code in code:
+ print(new_code)
+
+if __name__ == '__main__':
+ main()
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 4
+# tab-width: 8
+# indent-tabs-mode: nil
+# End:
+#
+# vi: set shiftwidth=4 tabstop=8 expandtab:
+# :indentSize=4:tabSize=8:noTabs=true:
+#
diff --git a/tools/generate-sysdig-event.py b/tools/generate-sysdig-event.py
new file mode 100755
index 0000000..67419c8
--- /dev/null
+++ b/tools/generate-sysdig-event.py
@@ -0,0 +1,412 @@
+#!/usr/bin/env python3
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+'''\
+Generate Sysdig event dissector sections from the sysdig sources.
+
+Reads driver/event_table.c and driver/ppm_events_public.h and generates
+corresponding dissection code in packet-sysdig-event.c. Updates are
+performed in-place in the dissector code.
+
+Requires an Internet connection. Assets are loaded from GitHub over HTTPS, from falcosecurity/libs master.
+'''
+
+import logging
+import os
+import os.path
+import re
+import urllib.request, urllib.error, urllib.parse
+import sys
+
+sysdig_repo_pfx = 'https://raw.githubusercontent.com/falcosecurity/libs/master/'
+
+def exit_msg(msg=None, status=1):
+ if msg is not None:
+ sys.stderr.write(msg + '\n\n')
+ sys.stderr.write(__doc__ + '\n')
+ sys.exit(status)
+
+def get_url_lines(url):
+ '''Open a URL.
+ Returns the URL body as a list of lines.
+ '''
+ req_headers = { 'User-Agent': 'Wireshark generate-sysdig-event' }
+ try:
+ req = urllib.request.Request(url, headers=req_headers)
+ response = urllib.request.urlopen(req)
+ lines = response.read().decode().splitlines()
+ response.close()
+ except urllib.error.HTTPError as err:
+ exit_msg("HTTP error fetching {0}: {1}".format(url, err.reason))
+ except urllib.error.URLError as err:
+ exit_msg("URL error fetching {0}: {1}".format(url, err.reason))
+ except OSError as err:
+ exit_msg("OS error fetching {0}".format(url, err.strerror))
+ except Exception:
+ exit_msg("Unexpected error:", sys.exc_info()[0])
+
+ return lines
+
+
+ppm_ev_pub_lines = get_url_lines(sysdig_repo_pfx + 'driver/ppm_events_public.h')
+
+ppme_re = re.compile('^\s+PPME_([A-Z0-9_]+_[EX])\s*=\s*([0-9]+)\s*,')
+ppm_sc_x_re = re.compile('^\s+PPM_SC_X\s*\(\s*(\S+)\s*,\s*(\d+)\s*\)')
+
+event_info_d = {}
+
+def get_event_defines():
+ event_d = {}
+ for line in ppm_ev_pub_lines:
+ m = ppme_re.match(line)
+ if m:
+ event_d[int(m.group(2))] = m.group(1)
+ return event_d
+
+def get_syscall_code_defines():
+ sc_d = {}
+ for line in ppm_ev_pub_lines:
+ m = ppm_sc_x_re.match(line)
+ if m:
+ sc_d[int(m.group(2))] = m.group(1)
+ return sc_d
+
+ppm_ev_table_lines = get_url_lines(sysdig_repo_pfx + 'driver/event_table.c')
+
+hf_d = {}
+
+event_info_re = re.compile('^\s+\[\s*PPME_.*\]\s*=\s*{\s*"([A-Za-z0-9_]+)"\s*,[^,]+,[^,]+,\s*([0-9]+)\s*[,{}]')
+event_param_re = re.compile('{\s*"([A-Za-z0-9_ ]+)"\s*,\s*PT_([A-Z0-9_]+)\s*,\s*PF_([A-Z0-9_]+)\s*[,}]')
+
+def get_event_names():
+ '''Return a contiguous list of event names. Names are lower case.'''
+ event_name_l = []
+ for line in ppm_ev_table_lines:
+ ei = event_info_re.match(line)
+ if ei:
+ event_name_l.append(ei.group(1))
+ return event_name_l
+
+# PT_xxx to FT_xxx
+pt_to_ft = {
+ 'BYTEBUF': 'BYTES',
+ 'CHARBUF': 'STRING',
+ 'ERRNO': 'INT64',
+ 'FD': 'INT64',
+ 'FLAGS8': 'INT8',
+ 'FLAGS16': 'INT16',
+ 'FLAGS32': 'INT32',
+ 'FSPATH': 'STRING',
+ 'FSRELPATH': 'STRING',
+ 'GID': 'INT32',
+ 'MODE': 'INT32',
+ 'PID': 'INT64',
+ 'UID': 'INT32',
+ 'SYSCALLID': 'UINT16',
+}
+
+# FT_xxx to BASE_xxx
+force_param_formats = {
+ 'STRING': 'NONE',
+ 'INT.*': 'DEC',
+}
+
+def get_event_params():
+ '''Return a list of dictionaries containing event names and parameter info.'''
+ event_param_l = []
+ event_num = 0
+ force_string_l = ['args', 'env']
+ for line in ppm_ev_table_lines:
+ ei = event_info_re.match(line)
+ ep = event_param_re.findall(line)
+ if ei and ep:
+ event_name = ei.group(1)
+ src_param_count = int(ei.group(2))
+ if len(ep) != src_param_count:
+ err_msg = '{}: found {} parameters. Expected {}. Params: {}'.format(
+ event_name, len(ep), src_param_count, repr(ep))
+ if len(ep) > src_param_count:
+ logging.warning(err_msg)
+ del ep[src_param_count:]
+ else:
+ raise NameError(err_msg)
+ for p in ep:
+ if p[0] in force_string_l:
+ param_type = 'STRING'
+ elif p[1] in pt_to_ft:
+ param_type = pt_to_ft[p[1]]
+ elif p[0] == 'flags' and p[1].startswith('INT') and 'HEX' in p[2]:
+ param_type = 'U' + p[1]
+ elif 'INT' in p[1]:
+ # Ints
+ param_type = p[1]
+ else:
+ print(f"p fallback {p}")
+ # Fall back to bytes
+ param_type = 'BYTES'
+
+ if p[2] == 'NA':
+ if 'INT' in param_type:
+ param_format = 'DEC'
+ else:
+ param_format = 'NONE'
+ elif param_type == 'BYTES':
+ param_format = 'NONE'
+ else:
+ param_format = p[2]
+
+ for pt_pat, force_pf in force_param_formats.items():
+ if re.match(pt_pat, param_type) and param_format != force_pf:
+ err_msg = 'Forcing {} {} format to {}. Params: {}'.format(
+ event_name, param_type, force_pf, repr(ep))
+ logging.warning(err_msg)
+ param_format = force_pf
+
+ param_d = {
+ 'event_name': event_name,
+ 'event_num': event_num,
+ # use replace() to account for "plugin ID" param name (ie: param names with space)
+ 'param_name': p[0].replace(" ", "_"),
+ 'param_type': param_type,
+ 'param_format': param_format,
+ }
+ event_param_l.append(param_d)
+ if ei:
+ event_num += 1
+ return event_param_l
+
+def param_to_hf_name(param):
+ return 'hf_param_{}_{}'.format(param['param_name'], param['param_type'].lower())
+
+def param_to_value_string_name(param):
+ return '{}_{}_vals'.format(param['param_name'], param['param_type'].lower())
+
+def get_param_desc(param):
+ # Try to coerce event names and parameters into human-friendly
+ # strings.
+ # XXX This could use some work.
+
+ # Specific descriptions. Event name + parameter name.
+ param_descs = {
+ 'accept.queuepct': 'Accept queue per connection',
+ 'execve.args': 'Program arguments',
+ 'execve.comm': 'Command',
+ 'execve.cwd': 'Current working directory',
+ }
+ # General descriptions. Event name only.
+ event_descs = {
+ 'ioctl': 'I/O control',
+ }
+
+ event_name = param['event_name']
+ param_id = '{}.{}'.format(event_name, param['param_name'])
+ if param_id in param_descs:
+ param_desc = param_descs[param_id]
+ elif event_name in event_descs:
+ param_desc = '{}: {}'.format(event_descs[event_name], param['param_name'])
+ else:
+ param_desc = param['param_name']
+ return param_desc
+
+def main():
+ logging.basicConfig(format='%(levelname)s: %(message)s')
+
+ # Event list
+ event_d = get_event_defines()
+ event_nums = list(event_d.keys())
+ event_nums.sort()
+
+ event_name_l = get_event_names()
+ event_param_l = get_event_params()
+
+ hf_d = {}
+ for param in event_param_l:
+ hf_name = param_to_hf_name(param)
+ hf_d[hf_name] = param
+
+ idx_id_to_name = { '': 'no' }
+ parameter_index_l = []
+
+ for en in range (0, len(event_nums)):
+ param_id = ''
+ param_l = []
+ event_var = event_d[en].lower()
+ for param in event_param_l:
+ if param['event_num'] == en:
+ hf_name = param_to_hf_name(param)
+ param_l.append(hf_name)
+ param_id += ':' + param['param_name'] + '_' + param['param_type']
+
+ ei_str = ''
+ if param_id not in idx_id_to_name:
+ idx_id_to_name[param_id] = event_var
+ ei_str = 'static int * const {}_indexes[] = {{ &{}, NULL }};'.format(
+ event_var,
+ ', &'.join(param_l)
+ )
+ else:
+ ei_str = '#define {}_indexes {}_indexes'.format(event_var, idx_id_to_name[param_id])
+
+ parameter_index_l.append(ei_str)
+
+ dissector_path = os.path.join(os.path.dirname(__file__),
+ '..', 'epan', 'dissectors', 'packet-sysdig-event.c')
+ dissector_f = open(dissector_path, 'r')
+ dissector_lines = list(dissector_f)
+ dissector_f = open(dissector_path, 'w+')
+
+ # Strip out old content
+ strip_re_l = []
+ strip_re_l.append(re.compile('^static\s+int\s+hf_param_.*;'))
+ strip_re_l.append(re.compile('^#define\s+EVT_STR_[A-Z0-9_]+\s+"[A-Za-z0-9_]+"'))
+ strip_re_l.append(re.compile('^#define\s+EVT_[A-Z0-9_]+\s+[0-9]+'))
+ strip_re_l.append(re.compile('^\s*{\s*EVT_[A-Z0-9_]+\s*,\s*EVT_STR_[A-Z0-9_]+\s*}'))
+ strip_re_l.append(re.compile('^static\s+const\s+int\s+\*\s*[a-z0-9_]+_[ex]_indexes\[\]\s*=\s*\{\s*&hf_param_.*NULL\s*\}\s*;'))
+ strip_re_l.append(re.compile('^static\s+int\s*\*\s+const\s+[a-z0-9_]+_[ex]_indexes\[\]\s*=\s*\{\s*&hf_param_.*NULL\s*\}\s*;'))
+ strip_re_l.append(re.compile('^\s*#define\s+[a-z0-9_]+_[ex]_indexes\s+[a-z0-9_]+_indexes'))
+ strip_re_l.append(re.compile('^\s*\{\s*EVT_[A-Z0-9_]+_[EX]\s*,\s*[a-z0-9_]+_[ex]_indexes\s*}\s*,'))
+ strip_re_l.append(re.compile('^\s*\{\s*\d+\s*,\s*"\S+"\s*}\s*,\s*//\s*PPM_SC_\S+'))
+ strip_re_l.append(re.compile('^\s*{\s*&hf_param_.*},')) # Must all be on one line
+
+ for strip_re in strip_re_l:
+ dissector_lines = [l for l in dissector_lines if not strip_re.search(l)]
+
+ # Find our value strings
+ value_string_re = re.compile('static\s+const\s+value_string\s+([A-Za-z0-9_]+_vals)')
+ value_string_l = []
+ for line in dissector_lines:
+ vs = value_string_re.match(line)
+ if vs:
+ value_string_l.append(vs.group(1))
+
+ # Add in new content after comments.
+
+ header_fields_c = 'Header fields'
+ header_fields_re = re.compile('/\*\s+' + header_fields_c, flags = re.IGNORECASE)
+ header_fields_l = []
+ for hf_name in sorted(hf_d.keys()):
+ header_fields_l.append('static int {} = -1;'.format(hf_name))
+
+ event_names_c = 'Event names'
+ event_names_re = re.compile('/\*\s+' + event_names_c, flags = re.IGNORECASE)
+ event_names_l = []
+ event_str_l = list(set(event_name_l))
+ event_str_l.sort()
+ for evt_str in event_str_l:
+ event_names_l.append('#define EVT_STR_{0:24s} "{1:s}"'.format(evt_str.upper(), evt_str))
+
+ event_definitions_c = 'Event definitions'
+ event_definitions_re = re.compile('/\*\s+' + event_definitions_c, flags = re.IGNORECASE)
+ event_definitions_l = []
+ for evt in event_nums:
+ event_definitions_l.append('#define EVT_{0:24s} {1:3d}'.format(event_d[evt], evt))
+
+ value_strings_c = 'Value strings'
+ value_strings_re = re.compile('/\*\s+' + value_strings_c, flags = re.IGNORECASE)
+ value_strings_l = []
+ for evt in event_nums:
+ evt_num = 'EVT_{},'.format(event_d[evt])
+ evt_str = 'EVT_STR_' + event_name_l[evt].upper()
+ value_strings_l.append(' {{ {0:<32s} {1:s} }},'.format(evt_num, evt_str))
+
+ parameter_index_c = 'Parameter indexes'
+ parameter_index_re = re.compile('/\*\s+' + parameter_index_c, flags = re.IGNORECASE)
+ # parameter_index_l defined above.
+
+ event_tree_c = 'Event tree'
+ event_tree_re = re.compile('/\*\s+' + event_tree_c, flags = re.IGNORECASE)
+ event_tree_l = []
+ for evt in event_nums:
+ evt_num = 'EVT_{}'.format(event_d[evt])
+ evt_idx = '{}_indexes'.format(event_d[evt].lower())
+ event_tree_l.append(' {{ {}, {} }},'.format(evt_num, evt_idx))
+
+ # Syscall codes
+ syscall_code_d = get_syscall_code_defines()
+ syscall_code_c = 'Syscall codes'
+ syscall_code_re = re.compile('/\*\s+' + syscall_code_c, flags = re.IGNORECASE)
+ syscall_code_l = []
+ for sc_num in syscall_code_d:
+ syscall_code_l.append(f' {{ {sc_num:3}, "{syscall_code_d[sc_num].lower()}" }}, // PPM_SC_{syscall_code_d[sc_num]}')
+
+ header_field_reg_c = 'Header field registration'
+ header_field_reg_re = re.compile('/\*\s+' + header_field_reg_c, flags = re.IGNORECASE)
+ header_field_reg_l = []
+ for hf_name in sorted(hf_d.keys()):
+ param = hf_d[hf_name]
+ event_name = param['event_name']
+ param_desc = get_param_desc(param)
+ param_name = param['param_name']
+ param_type = param['param_type']
+ param_format = param['param_format']
+ fieldconvert = 'NULL'
+ vs_name = param_to_value_string_name(param)
+ if vs_name in value_string_l and 'INT' in param_type:
+ fieldconvert = 'VALS({})'.format(vs_name)
+ header_field_reg_l.append(' {{ &{}, {{ "{}", "sysdig.param.{}.{}", FT_{}, BASE_{}, {}, 0, NULL, HFILL }} }},'.format(
+ hf_name,
+ param_desc,
+ event_name,
+ param_name,
+ param_type,
+ param_format,
+ fieldconvert
+ ))
+
+ for line in dissector_lines:
+ fill_comment = None
+ fill_l = []
+
+ if header_fields_re.match(line):
+ fill_comment = header_fields_c
+ fill_l = header_fields_l
+ elif event_names_re.match(line):
+ fill_comment = event_names_c
+ fill_l = event_names_l
+ elif event_definitions_re.match(line):
+ fill_comment = event_definitions_c
+ fill_l = event_definitions_l
+ elif value_strings_re.match(line):
+ fill_comment = value_strings_c
+ fill_l = value_strings_l
+ elif parameter_index_re.match(line):
+ fill_comment = parameter_index_c
+ fill_l = parameter_index_l
+ elif event_tree_re.match(line):
+ fill_comment = event_tree_c
+ fill_l = event_tree_l
+ elif syscall_code_re.match(line):
+ fill_comment = syscall_code_c
+ fill_l = syscall_code_l
+ elif header_field_reg_re.match(line):
+ fill_comment = header_field_reg_c
+ fill_l = header_field_reg_l
+
+ if fill_comment is not None:
+ # Write our comment followed by the content
+ print(('Generating {}, {:d} lines'.format(fill_comment, len(fill_l))))
+ dissector_f.write('/* {}. Automatically generated by tools/{} */\n'.format(
+ fill_comment,
+ os.path.basename(__file__)
+ ))
+ for line in fill_l:
+ dissector_f.write('{}\n'.format(line))
+ # Fill each section only once
+ del fill_l[:]
+ else:
+ # Existing content
+ dissector_f.write(line)
+
+ dissector_f.close()
+
+#
+# On with the show
+#
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tools/generate_authors.py b/tools/generate_authors.py
new file mode 100755
index 0000000..a74ef1c
--- /dev/null
+++ b/tools/generate_authors.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python3
+
+#
+# Generate the AUTHORS file combining existing AUTHORS file with
+# git commit log.
+#
+# Usage: generate_authors.py AUTHORS.src
+
+# Copyright 2022 Moshe Kaplan
+# Based on generate_authors.pl by Michael Mann
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import argparse
+import io
+import re
+import subprocess
+import sys
+
+
+def get_git_authors():
+ '''
+ Sample line:
+ # 4321 Navin R. Johnson <nrjohnson@example.com>
+ '''
+ GIT_LINE_REGEX = r"^\s*\d+\s+([^<]*)\s*<([^>]*)>"
+ cmd = "git --no-pager shortlog --email --summary HEAD".split(' ')
+ # check_output is used for Python 3.4 compatability
+ git_cmd_output = subprocess.check_output(cmd, universal_newlines=True, encoding='utf-8')
+
+ git_authors = []
+ for line in git_cmd_output.splitlines():
+ # Check if this is needed:
+ line = line.strip()
+ match = re.match(GIT_LINE_REGEX, line)
+ name = match.group(1).strip()
+ email = match.group(2).strip()
+ # Try to lower how much spam people get:
+ email = email.replace('@', '[AT]')
+ git_authors.append((name, email))
+ return git_authors
+
+
+def extract_contributors(authors_content):
+ # Extract names and email addresses from the AUTHORS file Contributors
+ contributors_content = authors_content.split("= Contributors =", 1)[1]
+ CONTRIBUTOR_LINE_REGEX = r"^([\w\.\-\'\x80-\xff]+(\s*[\w+\.\-\'\x80-\xff])*)\s+<([^>]*)>"
+ contributors = []
+ state = ""
+ for line in contributors_content.splitlines():
+ contributor_match = re.match(CONTRIBUTOR_LINE_REGEX, line)
+ if re.search(r'([^\{]*)\{', line):
+ if contributor_match:
+ name = contributor_match.group(1)
+ email = contributor_match.group(3)
+ contributors.append((name, email))
+ state = "s_in_bracket"
+ elif state == "s_in_bracket":
+ if re.search(r'([^\}]*)\}', line):
+ state = ""
+ elif re.search('<', line):
+ if contributor_match:
+ name = contributor_match.group(1)
+ email = contributor_match.group(3)
+ contributors.append((name, email))
+ elif re.search(r"(e-mail address removed at contributor's request)", line):
+ if contributor_match:
+ name = contributor_match.group(1)
+ email = contributor_match.group(3)
+ contributors.append((name, email))
+ else:
+ pass
+ return contributors
+
+
+def generate_git_contributors_text(contributors_emails, git_authors_emails):
+ # Track the email addresses seen to avoid including the same email address twice
+ emails_addresses_seen = set()
+ for name, email in contributors_emails:
+ emails_addresses_seen.add(email.lower())
+
+ output_lines = []
+ for name, email in git_authors_emails:
+ if email.lower() in emails_addresses_seen:
+ continue
+
+ # Skip Gerald, since he's part of the header:
+ if email == "gerald[AT]wireshark.org":
+ continue
+
+ ntab = 3
+ if len(name) >= 8*ntab:
+ line = "{name} <{email}>".format(name=name, email=email)
+ else:
+ ntab -= len(name)/8
+ if len(name) % 8:
+ ntab += 1
+ tabs = '\t'*int(ntab)
+ line = "{name}{tabs}<{email}>".format(name=name, tabs=tabs, email=email)
+
+ emails_addresses_seen.add(email.lower())
+ output_lines += [line]
+ return "\n".join(output_lines)
+
+
+# Read authos file until we find gitlog entries, then stop
+def read_authors(parsed_args):
+ lines = []
+ with open(parsed_args.authors[0], 'r', encoding='utf-8') as fh:
+ for line in fh.readlines():
+ if '= From git log =' in line:
+ break
+ lines.append(line)
+ return ''.join(lines)
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Generate the AUTHORS file combining existing AUTHORS file with git commit log.")
+ parser.add_argument("authors", metavar='authors', nargs=1, help="path to AUTHORS file")
+ parsed_args = parser.parse_args()
+
+ author_content = read_authors(parsed_args)
+
+ # Collect the listed contributors emails so that we don't duplicate them
+ # in the listing of git contributors
+ contributors_emails = extract_contributors(author_content)
+ git_authors_emails = get_git_authors()
+ # Then generate the text output for git contributors
+ git_contributors_text = generate_git_contributors_text(contributors_emails, git_authors_emails)
+
+ # Now we can write our output:
+ git_contributor_header = '= From git log =\n\n'
+ output = author_content + git_contributor_header + git_contributors_text + '\n'
+
+ with open(parsed_args.authors[0], 'w', encoding='utf-8') as fh:
+ fh.write(output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/generate_cbor_pcap.py b/tools/generate_cbor_pcap.py
new file mode 100755
index 0000000..545b985
--- /dev/null
+++ b/tools/generate_cbor_pcap.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python3
+'''
+Convert a CBOR diagnostic notation file into an HTTP request
+for the encoded cbor.
+This allows straightforward test and debugging of simple pcap files.
+
+ Copyright 2021 Brian Sipos <brian.sipos@gmail.com>
+
+SPDX-License-Identifier: LGPL-2.1-or-later
+'''
+
+from argparse import ArgumentParser
+from io import BytesIO
+import scapy
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, TCP
+from scapy.layers.http import HTTP, HTTPRequest
+from scapy.packet import Raw
+from scapy.utils import wrpcap
+from subprocess import check_output
+import sys
+
+
+def main():
+ parser = ArgumentParser()
+ parser.add_argument('--content-type', default='application/cbor',
+ help='The request content-type header')
+ parser.add_argument('--infile', default='-',
+ help='The diagnostic text input file, or "-" for stdin')
+ parser.add_argument('--outfile', default='-',
+ help='The PCAP output file, or "-" for stdout')
+ parser.add_argument('--intype', default='cbordiag',
+ choices=['cbordiag', 'raw'],
+ help='The input data type.')
+ args = parser.parse_args()
+
+ # First get the CBOR data itself
+ infile_name = args.infile.strip()
+ if infile_name != '-':
+ infile = open(infile_name, 'rb')
+ else:
+ infile = sys.stdin.buffer
+
+ if args.intype == 'raw':
+ cbordata = infile.read()
+ elif args.intype == 'cbordiag':
+ cbordata = check_output('diag2cbor.rb', stdin=infile)
+
+ # Now synthesize an HTTP request with that body
+ req = HTTPRequest(
+ Method='POST',
+ Host='example.com',
+ User_Agent='scapy',
+ Content_Type=args.content_type,
+ Content_Length=str(len(cbordata)),
+ ) / Raw(cbordata)
+
+ # Write the request directly into pcap
+ outfile_name = args.outfile.strip()
+ if outfile_name != '-':
+ outfile = open(outfile_name, 'wb')
+ else:
+ outfile = sys.stdout.buffer
+
+ pkt = Ether()/IP()/TCP()/HTTP()/req
+ wrpcap(outfile, pkt)
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/html2text.py b/tools/html2text.py
new file mode 100755
index 0000000..da290b1
--- /dev/null
+++ b/tools/html2text.py
@@ -0,0 +1,249 @@
+#!/usr/bin/env python3
+#
+# html2text.py - converts HTML to text
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from __future__ import unicode_literals
+
+__author__ = "Peter Wu <peter@lekensteyn.nl>"
+__copyright__ = "Copyright 2015, Peter Wu"
+__license__ = "GPL (v2 or later)"
+
+# TODO:
+# multiple list indentation levels (modify bullets?)
+# maybe allow for ascii output instead of utf-8?
+
+import sys
+from textwrap import TextWrapper
+try:
+ from HTMLParser import HTMLParser
+ from htmlentitydefs import name2codepoint
+except ImportError: # Python 3
+ from html.parser import HTMLParser
+ from html.entities import name2codepoint
+ unichr = chr # for html entity handling
+
+class TextHTMLParser(HTMLParser):
+ """Converts a HTML document to text."""
+ def __init__(self):
+ try:
+ # Python 3.4
+ HTMLParser. __init__(self, convert_charrefs=True)
+ except Exception:
+ HTMLParser. __init__(self)
+ # All text, concatenated
+ self.output_buffer = ''
+ # The current text block which is being constructed
+ self.text_block = ''
+ # Whether the previous element was terminated with whitespace
+ self.need_space = False
+ # Whether to prevent word-wrapping the contents (for "pre" tag)
+ self.skip_wrap = False
+ # Quoting
+ self.need_quote = False
+ self.quote_stack = []
+ # Suffixes
+ self.need_suffix = False
+ self.suffix_stack = []
+ # track list items
+ self.list_item_prefix = None
+ self.ordered_list_index = None
+ self.stack_list_item_prefix = []
+ self.stack_ordered_list_index = []
+ self.list_indent_level = 0
+ self.list_item_indent = ""
+ # Indentation (for heading and paragraphs)
+ self.indent_levels = [0, 0]
+ # Don't dump CSS, scripts, etc.
+ self.ignore_tags = ('head', 'style', 'script')
+ self.ignore_level = 0
+ # href footnotes.
+ self.footnotes = []
+ self.href = None
+
+ def _wrap_text(self, text):
+ """Wraps text, but additionally indent list items."""
+ initial_indent = indent = sum(self.indent_levels) * ' '
+ if self.list_item_prefix:
+ initial_indent += self.list_item_prefix
+ indent += ' '
+ kwargs = {
+ 'width': 72,
+ 'initial_indent': initial_indent,
+ 'subsequent_indent': indent
+ }
+ kwargs['break_on_hyphens'] = False
+ wrapper = TextWrapper(**kwargs)
+ return '\n'.join(wrapper.wrap(text))
+
+ def _commit_block(self, newline='\n\n'):
+ text = self.text_block
+ if text:
+ if not self.skip_wrap:
+ text = self._wrap_text(text)
+ self.output_buffer += text + newline
+ self.text_block = ''
+ self.need_space = False
+
+ def handle_starttag(self, tag, attrs):
+ # end a block of text on <br>, but also flush list items which are not
+ # terminated.
+ if tag == 'br' or tag == 'li':
+ self._commit_block('\n')
+ if tag == 'code':
+ self.need_quote = True
+ self.quote_stack.append('`')
+ if tag == 'pre':
+ self.skip_wrap = True
+ if tag in ('ol', 'ul'):
+ self.list_indent_level += 1
+ self.list_item_indent = " " * (self.list_indent_level - 1)
+ self.stack_ordered_list_index.append(self.ordered_list_index)
+ self.stack_list_item_prefix.append(self.list_item_prefix)
+ # Following list items are numbered.
+ if tag == 'ol':
+ self.ordered_list_index = 1
+ if tag == 'ul':
+ self.list_item_prefix = self.list_item_indent + ' • '
+ if tag == 'li' and self.ordered_list_index:
+ self.list_item_prefix = self.list_item_indent + ' %d. ' % (self.ordered_list_index)
+ self.ordered_list_index += 1
+ if tag[0] == 'h' and len(tag) == 2 and \
+ (tag[1] >= '1' and tag[1] <= '6'):
+ self.indent_levels = [int(tag[1]) - 1, 0]
+ if tag == 'p':
+ self.indent_levels[1] = 1
+ if tag == 'a':
+ try:
+ href = [attr[1] for attr in attrs if attr[0] == 'href'][0]
+ if '://' in href: # Skip relative URLs and links.
+ self.href = href
+ except IndexError:
+ self.href = None
+ if tag == 'span':
+ try:
+ el_class = [attr[1] for attr in attrs if attr[0] == 'class'][0]
+ if 'menuseq' in el_class:
+ self.need_quote = True
+ self.quote_stack.append('"')
+ except IndexError:
+ pass
+ if tag == 'div':
+ try:
+ el_class = [attr[1] for attr in attrs if attr[0] == 'class'][0]
+ if 'title' in el_class.split(' '):
+ self.need_suffix = True
+ self.suffix_stack.append(':')
+ except IndexError:
+ pass
+ if tag in self.ignore_tags:
+ self.ignore_level += 1
+
+ def handle_data(self, data):
+ quote = ''
+ if self.need_quote:
+ quote = self.quote_stack[-1]
+ suffix = ''
+ if self.need_suffix:
+ suffix = self.suffix_stack.pop()
+ if self.ignore_level > 0:
+ return
+ elif self.skip_wrap:
+ block = data
+ else:
+ if self.href and data == self.href:
+ # This is a self link. Don't create a footnote.
+ self.href = None
+
+ # For normal text, fold multiple whitespace and strip
+ # leading and trailing spaces for the whole block (but
+ # keep spaces in the middle).
+ block = quote
+ if data.strip() and data[:1].isspace():
+ # Keep spaces in the middle
+ self.need_space = True
+ if self.need_space and data.strip() and self.text_block:
+ block = ' ' + quote
+ block += ' '.join(data.split()) + suffix
+ self.need_space = data[-1:].isspace()
+ self.text_block += block
+ self.need_quote = False
+ self.need_suffix = False
+
+ def handle_endtag(self, tag):
+ block_elements = 'p li ul pre ol h1 h2 h3 h4 h5 h6 tr'
+ #block_elements += ' dl dd dt'
+ if tag in block_elements.split():
+ self._commit_block()
+ if tag in ('code', 'span'):
+ # XXX This span isn't guaranteed to match its opening.
+ self.text_block += self.quote_stack.pop()
+ if tag in ('ol', 'ul'):
+ self.list_indent_level -= 1
+ self.list_item_indent = " " * (self.list_indent_level - 1)
+ self.ordered_list_index = self.stack_ordered_list_index.pop()
+ self.list_item_prefix = self.stack_list_item_prefix.pop()
+ if tag == 'pre':
+ self.skip_wrap = False
+ if tag == 'a' and self.href:
+ self.footnotes.append(self.href)
+ self.text_block += '[{0}]'.format(len(self.footnotes))
+ if tag in self.ignore_tags:
+ self.ignore_level -= 1
+
+ def handle_charref(self, name):
+ self.handle_data(unichr(int(name)))
+
+ def handle_entityref(self, name):
+ self.handle_data(unichr(name2codepoint[name]))
+
+ def close(self):
+ HTMLParser.close(self)
+ self._commit_block()
+
+ if len(self.footnotes) > 0:
+ self.list_item_prefix = None
+ self.indent_levels = [1, 0]
+ self.text_block = 'References'
+ self._commit_block()
+ self.indent_levels = [1, 1]
+ footnote_num = 1
+ for href in self.footnotes:
+ self.text_block += '{0:>2}. {1}\n'.format(footnote_num, href)
+ footnote_num += 1
+ self._commit_block('\n')
+
+
+ byte_output = self.output_buffer.encode('utf-8')
+ if hasattr(sys.stdout, 'buffer'):
+ sys.stdout.buffer.write(byte_output)
+ else:
+ sys.stdout.write(byte_output)
+
+
+def main():
+ htmlparser = TextHTMLParser()
+ if len(sys.argv) > 1 and sys.argv[1] != '-':
+ filename = sys.argv[1]
+ f = open(filename, 'rb')
+ else:
+ filename = None
+ f = sys.stdin
+ try:
+ if hasattr(f, 'buffer'):
+ # Access raw (byte) buffer in Python 3 instead of decoded one
+ f = f.buffer
+ # Read stdin as a Unicode string
+ htmlparser.feed(f.read().decode('utf-8'))
+ finally:
+ if filename is not None:
+ f.close()
+ htmlparser.close()
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/idl2deb b/tools/idl2deb
new file mode 100755
index 0000000..18f1b05
--- /dev/null
+++ b/tools/idl2deb
@@ -0,0 +1,141 @@
+#!/usr/bin/env python3
+
+# idl2deb - quick hack by W. Martin Borgert <debacle@debian.org> to create
+# Debian GNU/Linux packages from idl2wrs modules for Wireshark.
+# Copyright 2003, 2008, W. Martin Borgert
+
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.com>
+# Copyright 1998 Gerald Combs
+
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import optparse
+import os
+import string
+import sys
+import time
+
+scriptinfo = """idl2deb version 2008-03-10
+Copyright 2003, 2008, W. Martin Borgert
+Free software, released under the terms of the GPL."""
+
+def create_file(preserve, filename, content, mode = None):
+ """Create a file with given content."""
+ if preserve and os.path.isfile(filename):
+ return
+ f = open(filename, 'w')
+ f.write(content)
+ f.close()
+ if mode:
+ os.chmod(filename, mode)
+
+def create_files(version, deb, email, idl, name, preserve, iso, rfc):
+ """Create all files for the .deb build process."""
+ base = os.path.basename(idl.lower().split(".idl")[0])
+
+ if not os.path.isdir("packaging/debian"):
+ os.mkdir("packaging/debian")
+
+ create_file(preserve, "packaging/debian/rules", """#!/usr/bin/make -f
+
+include /usr/share/cdbs/1/rules/debhelper.mk
+include /usr/share/cdbs/1/class/autotools.mk
+
+PREFIX=`pwd`/packaging/debian/wireshark-giop-%s
+
+binary-post-install/wireshark-giop-%s::
+ rm -f $(PREFIX)/usr/lib/wireshark/plugins/%s/*.a
+""" % (base, base, version), 0o755)
+
+ create_file(preserve, "packaging/debian/control", """Source: wireshark-giop-%s
+Section: net
+Priority: optional
+Maintainer: %s <%s>
+Standards-Version: 3.6.1.0
+Build-Depends: wireshark-dev, autotools-dev, debhelper, cdbs
+
+Package: wireshark-giop-%s
+Architecture: any
+Depends: wireshark (= %s), ${shlibs:Depends}
+Description: GIOP dissector for CORBA interface %s
+ This package provides a dissector for GIOP (General Inter-ORB
+ Protocol) for the Wireshark protocol analyser. It decodes the CORBA
+ (Common Object Request Broker Architecture) interfaces described
+ in the IDL (Interface Definition Language) file '%s.idl'.
+""" % (base, name, email, base, deb, base, base))
+
+ create_file(preserve, "packaging/debian/changelog",
+ """wireshark-giop-%s (0.0.1-1) unstable; urgency=low
+
+ * Automatically created package.
+
+ -- %s <%s> %s
+""" % (base, name, email, rfc))
+
+ create_file(preserve, "packaging/debian/copyright",
+ """This package has been created automatically by idl2deb on
+%s for Debian GNU/Linux.
+
+Wireshark: https://www.wireshark.org/
+
+Copyright:
+
+GPL, as evidenced by existence of GPL license file \"COPYING\".
+(the GNU GPL may be viewed on Debian systems in
+/usr/share/common-licenses/GPL)
+""" % (iso))
+
+def get_wrs_version():
+ """Detect version of wireshark-dev package."""
+ deb = os.popen(
+ "dpkg-query -W --showformat='${Version}' wireshark-dev").read()
+ debv = string.find(deb, "-")
+ if debv == -1: debv = len(deb)
+ version = deb[string.find(deb, ":")+1:debv]
+ return version, deb
+
+def get_time():
+ """Detect current time and return ISO and RFC time string."""
+ currenttime = time.gmtime()
+ return time.strftime("%Y-%m-%d %H:%M:%S +0000", currenttime), \
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", currenttime)
+
+def main():
+ opts = process_opts(sys.argv)
+ iso, rfc = get_time()
+ version, deb = get_wrs_version()
+ create_files(version, deb,
+ opts.email, opts.idl, opts.name, opts.preserve,
+ iso, rfc)
+ os.system("dpkg-buildpackage " + opts.dbopts)
+
+def process_opts(argv):
+ """Process command line options."""
+ parser = optparse.OptionParser(
+ version=scriptinfo,
+ description="""Example:
+%prog -e me@foo.net -i bar.idl -n \"My Name\" -d \"-rfakeroot -uc -us\"""")
+ parser.add_option("-d", "--dbopts",
+ default="", metavar="opts",
+ help="options for dpkg-buildpackage")
+ parser.add_option("-e", "--email", metavar="address",
+ default="invalid@invalid.invalid",
+ help="use e-mail address")
+ parser.add_option("-i", "--idl", metavar="idlfile",
+ help="IDL file to use (mandatory)")
+ parser.add_option("-n", "--name", default="No Name",
+ help="use user name", metavar="name")
+ parser.add_option("-p", "--preserve", action="store_true",
+ help="do not overwrite files")
+ opts, args = parser.parse_args()
+ if not opts.idl:
+ print("mandatory IDL file parameter missing")
+ sys.exit(1)
+ if not os.access(opts.idl, os.R_OK):
+ print("IDL file not accessible")
+ sys.exit(1)
+ return opts
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/idl2wrs b/tools/idl2wrs
new file mode 100755
index 0000000..7a51f4b
--- /dev/null
+++ b/tools/idl2wrs
@@ -0,0 +1,114 @@
+#!/bin/sh
+#
+# File : idl2wrs
+#
+# Author : Frank Singleton (frank.singleton@ericsson.com)
+#
+# Copyright (C) 2001 Frank Singleton, Ericsson Inc.
+#
+# This file is a simple shell script wrapper for the IDL to
+# Wireshark dissector code.
+#
+# ie: wireshark_be.py and wireshark_gen.py
+#
+# This file is used to generate "Wireshark" dissectors from IDL descriptions.
+# The output language generated is "C". It will generate code to use the
+# GIOP/IIOP get_CDR_XXX API.
+#
+# Please see packet-giop.h in Wireshark distro for API description.
+# Wireshark is available at https://www.wireshark.org/
+#
+# Omniidl is part of the OmniOrb distribution, and is available at
+# http://omniorb.sourceforge.net/
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+# 02111-1307, USA.
+#
+
+
+# Must at least supply an IDL file
+
+if [ $# -lt 1 ]; then
+ echo "idl2wrs Error: no IDL file specified."
+ echo "Usage: idl2wrs idl_file_name"
+ exit 1;
+fi
+
+# Check the file name for valid characters.
+# Implementation based on Dave Taylor's validalnum shell script from his book,
+# "Wicked Cool Shell Scripts", as well as Mark Rushakoff's answer he provided
+# to the question posted at stackoverflow.com entitled, "How can I use the
+# UNIX shell to count the number of times a letter appears in a text file?"
+file=$(basename $1)
+compressed="$(echo $file | sed 's/[^[:alnum:]._]//g')"
+if [ "$compressed" != "$file" ]; then
+ echo "idl2wrs Error: Invalid file name: $file"
+ exit 1;
+fi
+
+# Only allow one '.' at most.
+count=$(echo $compressed | awk -F. '{c += NF - 1} END {print c}')
+if [ $count -gt 1 ] ; then
+ echo "idl2wrs Error: Invalid file name: $file"
+ exit 1;
+fi
+
+#
+# Run wireshark backend, looking for wireshark_be.py and wireshark_gen.py
+# in pythons's "site-packages" directory. If cannot find that, then
+# try looking in current directory. If still cannot, then exit with
+# error.
+
+if [ -f $PYTHONPATH/site-packages/wireshark_be.py ] && [ -f $PYTHONPATH/site-packages/wireshark_gen.py ]; then
+ exec omniidl -p $PYTHONPATH/site-packages -b wireshark_be $@
+ /* not reached */
+fi
+
+# Try current directory.
+
+if [ -f ./wireshark_be.py ] && [ -f ./wireshark_gen.py ]; then
+ exec omniidl -p ./ -b wireshark_be $@
+ /* not reached */
+fi
+
+# Could not find both wireshark_be.py AND wireshark_gen.py
+# So let's just try to run it without -p, hoping that the installation
+# set up a valid path.
+
+exec omniidl -b wireshark_be $@
+
+old code: not reached
+
+echo "idl2wrs Error: Could not find both wireshark_be.py AND wireshark_gen.py."
+echo "Please ensure you have the PYTHONPATH variable set, or that wireshark_be.py "
+echo "and wireshark_gen.py exist in the current directory. "
+echo
+echo "On this system, PYTHONPATH is : $PYTHONPATH"
+echo
+
+exit 2
+
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 4
+# indent-tabs-mode: nil
+# End:
+#
+# vi: set shiftwidth=4 expandtab:
+# :indentSize=4:noTabs=true:
+#
diff --git a/tools/indexcap.py b/tools/indexcap.py
new file mode 100755
index 0000000..d18e76f
--- /dev/null
+++ b/tools/indexcap.py
@@ -0,0 +1,283 @@
+#!/usr/bin/env python3
+#
+# Tool to index protocols that appears in the given capture files
+#
+# The script list_protos_in_cap.sh does the same thing.
+#
+# Copyright 2009, Kovarththanan Rajaratnam <kovarththanan.rajaratnam@gmail.com>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+from optparse import OptionParser
+import multiprocessing
+import sys
+import os
+import subprocess
+import re
+import pickle
+import tempfile
+import filecmp
+import random
+
+def extract_protos_from_file_proces(tshark, file):
+ try:
+ cmd = [tshark, "-Tfields", "-e", "frame.protocols", "-r", file]
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (stdout, stderr) = p.communicate()
+ stdout = stdout.decode('utf-8')
+ if p.returncode != 0:
+ return (file, {})
+
+ proto_hash = {}
+ for line in stdout.splitlines():
+ if not re.match(r'^[\w:-]+$', line):
+ continue
+
+ for proto in line.split(':'):
+ proto_hash[proto] = 1 + proto_hash.setdefault(proto, 0)
+
+ return (file, proto_hash)
+ except KeyboardInterrupt:
+ return None
+
+def extract_protos_from_file(tshark, num_procs, max_files, cap_files, cap_hash, index_file_name):
+ pool = multiprocessing.Pool(num_procs)
+ results = [pool.apply_async(extract_protos_from_file_proces, [tshark, file]) for file in cap_files]
+ try:
+ for (cur_item_idx,result_async) in enumerate(results):
+ file_result = result_async.get()
+ action = "SKIPPED" if file_result[1] is {} else "PROCESSED"
+ print("%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result[0], os.path.getsize(file_result[0])))
+ cap_hash.update(dict([file_result]))
+ except KeyboardInterrupt:
+ print("%s was interrupted by user" % (sys.argv[0]))
+ pool.terminate()
+ exit(1)
+
+ index_file = open(index_file_name, "wb")
+ pickle.dump(cap_hash, index_file)
+ index_file.close()
+ exit(0)
+
+def dissect_file_process(tshark, tmpdir, file):
+ try:
+ (handle_o, tmpfile_o) = tempfile.mkstemp(suffix='_stdout', dir=tmpdir)
+ (handle_e, tmpfile_e) = tempfile.mkstemp(suffix='_stderr', dir=tmpdir)
+ cmd = [tshark, "-nxVr", file]
+ p = subprocess.Popen(cmd, stdout=handle_o, stderr=handle_e)
+ (stdout, stderr) = p.communicate()
+ if p.returncode == 0:
+ return (file, True, tmpfile_o, tmpfile_e)
+ else:
+ return (file, False, tmpfile_o, tmpfile_e)
+
+ except KeyboardInterrupt:
+ return False
+
+ finally:
+ os.close(handle_o)
+ os.close(handle_e)
+
+def dissect_files(tshark, tmpdir, num_procs, max_files, cap_files):
+ pool = multiprocessing.Pool(num_procs)
+ results = [pool.apply_async(dissect_file_process, [tshark, tmpdir, file]) for file in cap_files]
+ try:
+ for (cur_item_idx,result_async) in enumerate(results):
+ file_result = result_async.get()
+ action = "FAILED" if file_result[1] is False else "PASSED"
+ print("%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result[0], os.path.getsize(file_result[0])))
+ except KeyboardInterrupt:
+ print("%s was interrupted by user" % (sys.argv[0]))
+ pool.terminate()
+ exit(1)
+
+def compare_files(tshark_bin, tmpdir, tshark_cmp, num_procs, max_files, cap_files):
+ pool = multiprocessing.Pool(num_procs)
+ results_bin = [pool.apply_async(dissect_file_process, [tshark_bin, tmpdir, file]) for file in cap_files]
+ results_cmp = [pool.apply_async(dissect_file_process, [tshark_cmp, tmpdir, file]) for file in cap_files]
+ try:
+ for (cur_item_idx,(result_async_bin, result_async_cmp)) in enumerate(zip(results_bin, results_cmp)):
+ file_result_bin = result_async_bin.get()
+ file_result_cmp = result_async_cmp.get()
+ if file_result_cmp[1] is False or file_result_bin[1] is False:
+ action = "FAILED (exitcode)"
+ if not filecmp.cmp(file_result_bin[2], file_result_cmp[2]):
+ action = "FAILED (stdout)"
+ if not filecmp.cmp(file_result_bin[3], file_result_cmp[3]):
+ action = "FAILED (stderr)"
+ else:
+ action = "PASSED"
+ os.remove(file_result_bin[2])
+ os.remove(file_result_cmp[2])
+ os.remove(file_result_bin[3])
+ os.remove(file_result_cmp[3])
+
+ print("%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result_bin[0], os.path.getsize(file_result_bin[0])))
+ print("%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result_cmp[0], os.path.getsize(file_result_cmp[0])))
+ except KeyboardInterrupt:
+ print("%s was interrupted by user" % (sys.argv[0]))
+ pool.terminate()
+ exit(1)
+
+def list_all_proto(cap_hash):
+ proto_hash = {}
+ for files_hash in cap_hash.values():
+ for proto,count in files_hash.items():
+ proto_hash[proto] = count + proto_hash.setdefault(proto, 0)
+
+ return proto_hash
+
+def list_all_files(cap_hash):
+ files = list(cap_hash.keys())
+ files.sort()
+
+ return files
+
+def list_all_proto_files(cap_hash, proto_comma_delit):
+ protos = [ x.strip() for x in proto_comma_delit.split(',') ]
+ files = []
+ for (file, files_hash) in cap_hash.items():
+ for proto in files_hash.keys():
+ if proto in protos:
+ files.append(file)
+ break
+
+ return files
+
+def index_file_action(options):
+ return options.list_all_proto or \
+ options.list_all_files or \
+ options.list_all_proto_files or \
+ options.dissect_files
+
+def find_capture_files(paths, cap_hash):
+ cap_files = []
+ for path in paths:
+ if os.path.isdir(path):
+ path = os.path.normpath(path)
+ for root, dirs, files in os.walk(path):
+ cap_files += [os.path.join(root, name) for name in files if os.path.join(root, name) not in cap_hash]
+ elif path not in cap_hash:
+ cap_files.append(path)
+ return cap_files
+
+def find_tshark_executable(bin_dir):
+ for file in ["tshark.exe", "tshark"]:
+ tshark = os.path.join(bin_dir, file)
+ if os.access(tshark, os.X_OK):
+ return tshark
+
+ return None
+
+def main():
+ parser = OptionParser(usage="usage: %prog [options] index_file [file_1|dir_1 [.. file_n|dir_n]]")
+ parser.add_option("-d", "--dissect-files", dest="dissect_files", default=False, action="store_true",
+ help="Dissect all matching files")
+ parser.add_option("-m", "--max-files", dest="max_files", default=sys.maxsize, type="int",
+ help="Max number of files to process")
+ parser.add_option("-b", "--binary-dir", dest="bin_dir", default=os.getcwd(),
+ help="Directory containing tshark executable")
+ parser.add_option("-c", "--compare-dir", dest="compare_dir", default=None,
+ help="Directory containing tshark executable which is used for comparison")
+ parser.add_option("-j", dest="num_procs", default=multiprocessing.cpu_count(), type=int,
+ help="Max number of processes to spawn")
+ parser.add_option("-r", "--randomize", default=False, action="store_true",
+ help="Randomize the file list order")
+ parser.add_option("", "--list-all-proto", dest="list_all_proto", default=False, action="store_true",
+ help="List all protocols in index file")
+ parser.add_option("", "--list-all-files", dest="list_all_files", default=False, action="store_true",
+ help="List all files in index file")
+ parser.add_option("", "--list-all-proto-files", dest="list_all_proto_files", default=False,
+ metavar="PROTO_1[, .. PROTO_N]",
+ help="List all files in index file containing the given protocol")
+
+ (options, args) = parser.parse_args()
+
+ if len(args) == 0:
+ parser.error("index_file is a required argument")
+
+ if len(args) == 1 and not index_file_action(options):
+ parser.error("one capture file/directory must be specified")
+
+ if options.dissect_files and not options.list_all_files and not options.list_all_proto_files:
+ parser.error("--list-all-files or --list-all-proto-files must be specified")
+
+ if options.dissect_files and not options.compare_dir is None:
+ parser.error("--dissect-files and --compare-dir cannot be specified at the same time")
+
+ index_file_name = args.pop(0)
+ paths = args
+ cap_hash = {}
+ try:
+ index_file = open(index_file_name, "rb")
+ print("index file: %s [OPENED]" % index_file.name)
+ cap_hash = pickle.load(index_file)
+ index_file.close()
+ print("%d files" % len(cap_hash))
+ except IOError:
+ print("index file: %s [NEW]" % index_file_name)
+
+ if options.list_all_proto:
+ print(list_all_proto(cap_hash))
+ exit(0)
+
+ indexed_files = []
+ if options.list_all_files:
+ indexed_files = list_all_files(cap_hash)
+ print(indexed_files)
+
+ if options.list_all_proto_files:
+ indexed_files = list_all_proto_files(cap_hash, options.list_all_proto_files)
+ print(indexed_files)
+
+ tshark_bin = find_tshark_executable(options.bin_dir)
+ if not tshark_bin is None:
+ print("tshark: %s [FOUND]" % tshark_bin)
+ else:
+ print("tshark: %s [MISSING]" % tshark_bin)
+ exit(1)
+
+ if not options.compare_dir is None:
+ tshark_cmp = find_tshark_executable(options.compare_dir)
+ if not tshark_cmp is None:
+ print("tshark: %s [FOUND]" % tshark_cmp)
+ else:
+ print("tshark: %s [MISSING]" % tshark_cmp)
+ exit(1)
+
+ if options.dissect_files or options.compare_dir:
+ cap_files = indexed_files
+ elif options.list_all_proto_files or options.list_all_files:
+ exit(0)
+ else:
+ cap_files = find_capture_files(paths, cap_hash)
+
+ if options.randomize:
+ random.shuffle(cap_files)
+ else:
+ cap_files.sort()
+
+ options.max_files = min(options.max_files, len(cap_files))
+ print("%u total files, %u working files" % (len(cap_files), options.max_files))
+ cap_files = cap_files[:options.max_files]
+ if options.compare_dir or options.dissect_files:
+ tmpdir = tempfile.mkdtemp()
+ print("Temporary working dir: %s" % tmpdir)
+ try:
+ if options.compare_dir:
+ compare_files(tshark_bin, tmpdir, tshark_cmp, options.num_procs, options.max_files, cap_files)
+ elif options.dissect_files:
+ dissect_files(tshark_bin, tmpdir, options.num_procs, options.max_files, cap_files)
+ else:
+ extract_protos_from_file(tshark_bin, options.num_procs, options.max_files, cap_files, cap_hash, index_file_name)
+ finally:
+ # Dissection may result in a non-empty directory.
+ if options.compare_dir:
+ os.rmdir(tmpdir)
+if __name__ == "__main__":
+ main()
diff --git a/tools/json2pcap/json2pcap.py b/tools/json2pcap/json2pcap.py
new file mode 100755
index 0000000..2a059ad
--- /dev/null
+++ b/tools/json2pcap/json2pcap.py
@@ -0,0 +1,686 @@
+#!/usr/bin/env python3
+
+#
+# Copyright 2020, Martin Kacer <kacer.martin[AT]gmail.com> and contributors
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import sys
+import ijson
+import operator
+import copy
+import binascii
+import array
+import argparse
+import string
+import random
+import math
+import hashlib
+import re
+from collections import OrderedDict
+from scapy import all as scapy
+
+# Field anonymization class
+class AnonymizedField:
+ '''
+ The Anonymization field object specifying anonymization
+ :filed arg: field name
+ :type arg: anonymization type [0 masking 0xff, 1 anonymization shake_256]
+ :start arg: If specified, the anonymization starts at given byte number
+ :end arg: If specified, the anonymization ends at given byte number
+ '''
+ def __init__(self, field, type):
+ self.field = field
+ self.type = type
+ self.start = None
+ self.end = None
+
+ match = re.search(r'(\S+)\[(-?\d+)?:(-?\d+)?\]', field)
+ if match:
+ self.field = match.group(1)
+ self.start = match.group(2)
+ if self.start is not None:
+ self.start = int(self.start)
+ self.end = match.group(3)
+ if self.end is not None:
+ self.end = int(self.end)
+
+ # Returns the new field value after anonymization
+ def anonymize_field_shake256(self, field, type, salt):
+ shake = hashlib.shake_256(str(field + ':' + salt).encode('utf-8'))
+
+ # String type, output should be ASCII
+ if type in [26, 27, 28]:
+ length = math.ceil(len(field)/4)
+ shake_hash = shake.hexdigest(length)
+ ret_string = array.array('B', str.encode(shake_hash))
+ ret_string = ''.join('{:02x}'.format(x) for x in ret_string)
+ # Other types, output could be HEX
+ else:
+ length = math.ceil(len(field)/2)
+ shake_hash = shake.hexdigest(length)
+ ret_string = shake_hash
+
+ # Correct the string length
+ if (len(ret_string) < len(field)):
+ ret_string = ret_string.ljust(len(field))
+ if (len(ret_string) > len(field)):
+ ret_string = ret_string[:len(field)]
+
+ return ret_string
+
+ def anonymize_field(self, _h, _t, salt):
+ s = 0
+ e = None
+ if self.start:
+ s = self.start
+ if self.end:
+ e = self.end
+ if e < 0:
+ e = len(_h) + e
+ else:
+ e = len(_h)
+ h = _h[s:e]
+ if self.type == 0:
+ h = 'f' * len(h)
+ elif self.type == 1:
+ h = self.anonymize_field_shake256(h, _t, salt)
+
+ h_mask = '0' * len(_h[0:s]) + 'f' * len(h) + '0' * len(_h[e:])
+ h = _h[0:s] + h + _h[e:]
+ return [h, h_mask]
+
+def make_unique(key, dct):
+ counter = 0
+ unique_key = key
+
+ while unique_key in dct:
+ counter += 1
+ unique_key = '{}_{}'.format(key, counter)
+ return unique_key
+
+
+def parse_object_pairs(pairs):
+ dct = OrderedDict()
+ for key, value in pairs:
+ if key in dct:
+ key = make_unique(key, dct)
+ dct[key] = value
+
+ return dct
+
+#
+# ********* PY TEMPLATES *********
+#
+def read_py_function(name):
+ s = ''
+ record = False
+ indent = 0
+
+ file = open(__file__)
+ for line in file:
+
+ ind = len(line) - len(line.lstrip())
+
+ if line.find("def " + name) != -1:
+ record = True
+ indent = ind
+ elif record and indent == ind and len(line) > 1:
+ record = False
+
+ if record:
+ s = s + line
+
+ file.close()
+ return s
+
+py_header = """#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# File generated by json2pcap.py
+# json2pcap.py created by Martin Kacer, 2020
+
+import os
+import binascii
+import array
+import sys
+import subprocess
+from collections import OrderedDict
+from scapy import all as scapy
+
+# *****************************************************
+# * PACKET PAYLOAD GENERATED FROM INPUT PCAP *
+# * Modify this function to edit the packet *
+# *****************************************************
+def main():
+ d = OrderedDict()
+"""
+
+py_footer = """ generate_pcap(d)
+
+# *****************************************************
+# * FUNCTIONS from TEMPLATE *
+# * Do not edit these functions if not required *
+# *****************************************************
+
+"""
+py_footer = py_footer + read_py_function("to_bytes")
+py_footer = py_footer + read_py_function("lsb")
+py_footer = py_footer + read_py_function("multiply_strings")
+py_footer = py_footer + read_py_function("rewrite_frame")
+py_footer = py_footer + read_py_function("assemble_frame")
+py_footer = py_footer + read_py_function("generate_pcap")
+
+py_footer = py_footer + """
+
+if __name__ == '__main__':
+ main()
+"""
+#
+# ***** End of PY TEMPLATES ******
+#
+
+
+
+#
+# ********** FUNCTIONS ***********
+#
+
+def raw_flat_collector(dict):
+ if hasattr(dict, 'items'):
+ for k, v in dict.items():
+ if k.endswith("_raw"):
+ yield k, v
+ else:
+ for val in raw_flat_collector(v):
+ yield val
+
+
+# d - input dictionary, parsed from json
+# r - result dictionary
+# frame_name - parent protocol name
+# frame_position - parent protocol position
+def py_generator(d, r, frame_name='frame_raw', frame_position=0):
+ if (d is None or d is None):
+ return
+
+ if hasattr(d, 'items'):
+ for k, v in d.items():
+
+ # no recursion
+ if k.endswith("_raw") or "_raw_" in k:
+ if isinstance(v[1], (list, tuple)) or isinstance(v[2], (list, tuple)):
+ #i = 1;
+ for _v in v:
+ h = _v[0]
+ p = _v[1]
+ l = _v[2] * 2
+ b = _v[3]
+ t = _v[4]
+ if (len(h) != l):
+ l = len(h)
+
+ p = p - frame_position
+
+ # Add into result dictionary
+ key = str(k).replace('.', '_')
+ key = make_unique(key, r)
+
+ fn = frame_name.replace('.', '_')
+ if (fn == key):
+ fn = None
+ value = [fn, h, p, l, b, t]
+
+ r[key] = value
+
+ else:
+ h = v[0]
+ p = v[1]
+ l = v[2] * 2
+ b = v[3]
+ t = v[4]
+ if (len(h) != l):
+ l = len(h)
+
+ p = p - frame_position
+
+ # Add into result dictionary
+ key = str(k).replace('.', '_')
+ key = make_unique(key, r)
+
+ fn = frame_name.replace('.', '_')
+ if (fn == key):
+ fn = None
+ value = [fn , h, p, l, b, t]
+
+ r[key] = value
+
+ # recursion
+ else:
+ if isinstance(v, dict):
+ fn = frame_name
+ fp = frame_position
+
+ # if there is also preceding raw protocol frame use it
+ # remove tree suffix
+ key = k
+ if (key.endswith("_tree") or ("_tree_" in key)):
+ key = key.replace('_tree', '')
+
+ raw_key = key + "_raw"
+ if (raw_key in d):
+ # f = d[raw_key][0]
+ fn = raw_key
+ fp = d[raw_key][1]
+
+
+ py_generator(v, r, fn, fp)
+
+ elif isinstance(v, (list, tuple)):
+
+ fn = frame_name
+ fp = frame_position
+
+ # if there is also preceding raw protocol frame use it
+ # remove tree suffix
+ key = k
+ if (key.endswith("_tree") or ("_tree_" in key)):
+ key = key.replace('_tree', '')
+
+ raw_key = key + "_raw"
+ if (raw_key in d):
+ fn = raw_key
+ fp = d[raw_key][1]
+
+ for _v in v:
+ py_generator(_v, r, frame_name, frame_position)
+
+# To emulate Python 3.2
+def to_bytes(n, length, endianess='big'):
+ h = '%x' % n
+ s = bytearray.fromhex(('0' * (len(h) % 2) + h).zfill(length * 2))
+ return s if endianess == 'big' else s[::-1]
+
+# Returns the index, counting from 0, of the least significant set bit in x
+def lsb(x):
+ return (x & -x).bit_length() - 1
+
+# Replace parts of original_string by new_string, only if mask in the byte is not ff
+def multiply_strings(original_string, new_string, mask):
+
+ ret_string = new_string
+ if mask is None:
+ return ret_string
+ for i in range(0, min(len(original_string), len(new_string), len(mask)), 2):
+ if mask[i:i + 2] == 'ff':
+ #print("ff")
+ ret_string = ret_string[:i] + original_string[i:i + 2] + ret_string[i + 2:]
+
+ return ret_string
+
+# Rewrite frame
+# h - hex bytes
+# p - position
+# l - length
+# b - bitmask
+# t - type
+# frame_amask - optional, anonymization mask (00 - not anonymized byte, ff - anonymized byte)
+def rewrite_frame(frame_raw, h, p, l, b, t, frame_amask=None):
+ if p < 0 or l < 0 or h is None:
+ return frame_raw
+
+ # no bitmask
+ if(b == 0):
+ if (len(h) != l):
+ l = len(h)
+ frame_raw_new = frame_raw[:p] + h + frame_raw[p + l:]
+ return multiply_strings(frame_raw, frame_raw_new, frame_amask)
+ # bitmask
+ else:
+ # get hex string from frame which will be replaced
+ _h = frame_raw[p:p + l]
+
+ # add 0 padding to have correct length
+ if (len(_h) % 2 == 1):
+ _h = '0' + _h
+ if (len(h) % 2 == 1):
+ h = '0' + h
+
+ # Only replace bits defined by mask
+ # new_hex = (old_hex & !mask) | (new_hex & mask)
+ _H = bytearray.fromhex(_h)
+ _H = array.array('B', _H)
+
+ M = to_bytes(b, len(_H))
+ M = array.array('B', M)
+ # shift mask aligned to position
+ for i in range(len(M)):
+ if (i + p / 2) < len(M):
+ M[i] = M[i + int(p / 2)]
+ else:
+ M[i] = 0x00
+
+ H = bytearray.fromhex(h)
+ H = array.array('B', H)
+
+ # for i in range(len(_H)):
+ # print "{0:08b}".format(_H[i]),
+ # print
+ # for i in range(len(M)):
+ # print "{0:08b}".format(M[i]),
+ # print
+
+ j = 0
+ for i in range(len(_H)):
+ if (M[i] != 0):
+ v = H[j] << lsb(M[i])
+ # print "Debug: {0:08b}".format(v),
+ _H[i] = (_H[i] & ~M[i]) | (v & M[i])
+ # print "Debug: " + str(_H[i]),
+ j = j + 1
+
+ # for i in range(len(_H)):
+ # print "{0:08b}".format(_H[i]),
+ # print
+
+ masked_h = binascii.hexlify(_H)
+ masked_h = masked_h.decode('ascii')
+
+ frame_raw_new = frame_raw[:p] + str(masked_h) + frame_raw[p + l:]
+ return multiply_strings(frame_raw, frame_raw_new, frame_amask)
+
+
+def assemble_frame(d, frame_time):
+ input = d['frame_raw'][1]
+ isFlat = False
+ linux_cooked_header = False
+ while not isFlat:
+ isFlat = True
+ _d = d.copy()
+ for key, val in _d.items():
+ h = str(val[1]) # hex
+ p = val[2] * 2 # position
+ l = val[3] * 2 # length
+ b = val[4] # bitmask
+ t = val[5] # type
+
+ if (key == "sll_raw"):
+ linux_cooked_header = True
+
+ # only if the node is not parent
+ isParent = False
+ for k, v in d.items():
+ if (v[0] == key):
+ isParent = True
+ isFlat = False
+ break
+
+ if not isParent and val[0] is not None:
+ d[val[0]][1] = rewrite_frame(d[val[0]][1], h, p, l, b, t)
+ del d[key]
+
+ output = d['frame_raw'][1]
+
+ # for Linux cooked header replace dest MAC and remove two bytes to reconstruct normal frame
+ if (linux_cooked_header):
+ output = "000000000000" + output[6*2:] # replce dest MAC
+ output = output[:12*2] + "" + output[14*2:] # remove two bytes before Protocol
+
+ return output
+
+def generate_pcap(d):
+ # 1. Assemble frame
+ input = d['frame_raw'][1]
+ output = assemble_frame(d, None)
+ print(input)
+ print(output)
+ # 2. Testing: compare input and output for not modified json
+ if (input != output):
+ print("Modified frames: ")
+ s1 = input
+ s2 = output
+ print(s1)
+ print(s2)
+ if (len(s1) == len(s2)):
+ d = [i for i in range(len(s1)) if s1[i] != s2[i]]
+ print(d)
+ # 3. Generate pcap
+ outfile = sys.argv[0] + ".pcap"
+ pcap_out = scapy.PcapWriter(outfile, append=False, sync=False)
+ new_packet = scapy.Packet(bytearray.fromhex(output))
+ pcap_out.write(new_packet)
+ print("Generated " + outfile)
+
+#
+# ************ MAIN **************
+#
+VERSION = "1.1"
+
+parser = argparse.ArgumentParser(description="""
+json2pcap {version}
+
+Utility to generate pcap from json format.
+
+Packet modification:
+In input json it is possible to modify the raw values of decoded fields.
+The output pcap will include the modified values. The algorithm of
+generating the output pcap is to get all raw hex fields from input json and
+then assembling them by layering from longest (less decoded fields) to
+shortest (more decoded fields). It means if the modified raw field is
+shorter field (more decoded field) it takes precedence against modification
+in longer field (less decoded field). If the json includes duplicated raw
+fields with same position and length, the behavior is not deterministic.
+For manual packet editing it is always possible to remove any not required
+raw fields from json, only frame_raw is field mandatory for reconstruction.
+
+Packet modification with -p switch:
+The python script is generated instead of pcap. This python script when
+executed will generate the pcap of 1st packet from input json. The
+generated code includes the decoded fields and the function to assembly the
+packet. This enables to modify the script and programmatically edit or
+encode the packet variables. The assembling algorithm is different, because
+the decoded packet fields are relative and points to parent node with their
+position (compared to input json which has absolute positions).
+
+Pcap masking and anonymization with -m and -a switch:
+The script allows to mask or anonymize the selected json raw fields. If the
+The fields are selected and located on lower protocol layers, they are not
+The overwritten by upper fields which are not marked by these switches.
+The pcap masking and anonymization can be performed in the following way:
+
+tshark -r orig.pcap -T json -x | \ python json2pcap.py -m "ip.src_raw"
+-a "ip.dst_raw" -o anonymized.pcap
+In this example the ip.src_raw field is masked with ffffffff by byte values
+and ip.dst_raw is hashed by randomly generated salt.
+
+Additionally the following syntax is valid to anonymize portion of field
+tshark -r orig.pcap -T json -x | \ python json2pcap.py -m "ip.src_raw[2:]"
+-a "ip.dst_raw[:-2]" -o anonymized.pcap
+Where the src_ip first byte is preserved and dst_ip last byte is preserved.
+And the same can be achieved by
+tshark -r orig.pcap -T json -x | \ python json2pcap.py -m "ip.src_raw[2:8]"
+-a "ip.dst_raw[0:6]" -o anonymized.pcap
+
+Masking and anonymization limitations are mainly the following:
+- In case the tshark is performing reassembling from multiple frames, the
+backward pcap reconstruction is not properly performed and can result in
+malformed frames.
+- The new values in the fields could violate the field format, as the
+json2pcap is no performing correct protocol encoding with respect to
+allowed values of the target field and field encoding.
+
+""".format(version=VERSION), formatter_class=argparse.RawTextHelpFormatter)
+parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
+parser.add_argument('-i', '--infile', nargs='?', help='json generated by tshark -T json -x\nor by tshark -T jsonraw (not preserving frame timestamps).\nIf no inpout file is specified script reads from stdin.')
+parser.add_argument('-o', '--outfile', required=True, help='output pcap filename')
+parser.add_argument('-p', '--python', help='generate python payload instead of pcap (only 1st packet)', default=False, action='store_true')
+parser.add_argument('-m', '--mask', help='mask the specific raw field (e.g. -m "ip.src_raw" -m "ip.dst_raw[2:6]")', action='append', metavar='MASKED_FIELD')
+parser.add_argument('-a', '--anonymize', help='anonymize the specific raw field (e.g. -a "ip.src_raw[2:]" -a "ip.dst_raw[:-2]")', action='append', metavar='ANONYMIZED_FIELD')
+parser.add_argument('-s', '--salt', help='salt use for anonymization. If no value is provided it is randomized.', default=None)
+parser.add_argument('-v', '--verbose', help='verbose output', default=False, action='store_true')
+args = parser.parse_args()
+
+# read JSON
+infile = args.infile
+outfile = args.outfile
+
+# Read from input file
+if infile:
+ data_file = open(infile)
+# Read from pipe
+else:
+ data_file = sys.stdin
+
+# Parse anonymization fields
+anonymize = {}
+if args.mask:
+ for m in args.mask:
+ if '_raw' not in m:
+ print("Error: The specified fields by -m switch should be raw fields. " + m + " does not have _raw suffix")
+ sys.exit()
+ af = AnonymizedField(m, 0)
+ anonymize[af.field] = af
+if args.anonymize:
+ for a in args.anonymize:
+ if '_raw' not in a:
+ print("Error: The specified fields by -a switch should be raw fields. " + a + " does not have _raw suffix")
+ sys.exit()
+ af = AnonymizedField(a, 1)
+ anonymize[af.field] = af
+
+input_frame_raw = ''
+frame_raw = ''
+frame_time = None
+
+salt = args.salt
+if salt is None:
+ # generate random salt if no salt was provided
+ salt = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
+
+# Generate pcap
+if args.python is False:
+ pcap_out = scapy.PcapWriter(outfile, append=False, sync=False)
+
+ # Iterate over packets in JSON
+ for packet in ijson.items(data_file, "item", buf_size=200000):
+ _list = []
+ linux_cooked_header = False
+
+ # get flat raw fields into _list
+ for raw in raw_flat_collector(packet['_source']['layers']):
+ if len(raw) >= 2:
+ if (raw[0] == "frame_raw"):
+ frame_raw = raw[1][0]
+ frame_amask = "0"*len(frame_raw) # initialize anonymization mask
+ input_frame_raw = copy.copy(frame_raw)
+ frame_time = None
+ if 'frame.time_epoch' in packet['_source']['layers']['frame']:
+ frame_time = packet['_source']['layers']['frame']['frame.time_epoch']
+ else:
+ # add into value list into raw[5] the field name
+ if isinstance(raw[1], list):
+ raw[1].append(raw[0])
+ _list.append(raw[1])
+ if (raw[0] == "sll_raw"):
+ linux_cooked_header = True
+
+ # sort _list
+ sorted_list = sorted(_list, key=operator.itemgetter(1), reverse=False)
+ sorted_list = sorted(sorted_list, key=operator.itemgetter(2), reverse=True)
+ # print("Debug: " + str(sorted_list))
+
+ # rewrite frame
+ for raw in sorted_list:
+ if len(raw) >= 6:
+ h = str(raw[0]) # hex
+ p = raw[1] * 2 # position
+ l = raw[2] * 2 # length
+ b = raw[3] # bitmask
+ t = raw[4] # type
+ # raw[5] # field_name (added by script)
+ h_mask = h # hex for anonymization mask
+
+ # anonymize fields
+ if (raw[5] in anonymize):
+ [h, h_mask] = anonymize[raw[5]].anonymize_field(h, t, salt)
+
+ if (isinstance(p, (list, tuple)) or isinstance(l, (list, tuple))):
+ for r in raw:
+ _h = str(r[0]) # hex
+ _p = r[1] * 2 # position
+ _l = r[2] * 2 # length
+ _b = r[3] # bitmask
+ _t = r[4] # type
+ # raw[5] # field_name (added by script)
+ _h_mask = _h # hex for anonymization mask
+
+ # anonymize fields
+ if (raw[5] in anonymize):
+ [_h, _h_mask] = anonymize[raw[5]].anonymize_field(_h, _t, salt)
+
+ # print("Debug: " + str(raw))
+ frame_raw = rewrite_frame(frame_raw, _h, _p, _l, _b, _t, frame_amask)
+
+ # update anonymization mask
+ if (raw[5] in anonymize):
+ frame_amask = rewrite_frame(frame_amask, _h_mask, _p, _l, _b, _t)
+
+ else:
+ # print("Debug: " + str(raw))
+ frame_raw = rewrite_frame(frame_raw, h, p, l, b, t, frame_amask)
+
+ # update anonymization mask
+ if (raw[5] in anonymize):
+ frame_amask = rewrite_frame(frame_amask, h_mask, p, l, b, t)
+
+ # for Linux cooked header replace dest MAC and remove two bytes to reconstruct normal frame using text2pcap
+ if (linux_cooked_header):
+ frame_raw = "000000000000" + frame_raw[6 * 2:] # replce dest MAC
+ frame_raw = frame_raw[:12 * 2] + "" + frame_raw[14 * 2:] # remove two bytes before Protocol
+
+ # Testing: remove comment to compare input and output for not modified json
+ if (args.verbose and input_frame_raw != frame_raw):
+ print("Modified frames: ")
+ s1 = input_frame_raw
+ s2 = frame_raw
+ print(s1)
+ print(s2)
+ if (len(s1) == len(s2)):
+ d = [i for i in range(len(s1)) if s1[i] != s2[i]]
+ print(d)
+
+ new_packet = scapy.Packet(bytearray.fromhex(frame_raw))
+ if frame_time:
+ new_packet.time = float(frame_time)
+ pcap_out.write(new_packet)
+
+# Generate python payload only for first packet
+else:
+ py_outfile = outfile + '.py'
+ f = open(py_outfile, 'w')
+
+ #for packet in json:
+ for packet in ijson.items(data_file, "item", buf_size=200000):
+ f.write(py_header)
+
+ r = OrderedDict({})
+
+ #print "packet = " + str(packet['_source']['layers'])
+ py_generator(packet['_source']['layers'], r)
+
+ for key, value in r.items():
+ f.write(" d['" + key + "'] =",)
+ f.write(" " + str(value) + "\n")
+
+ f.write(py_footer)
+
+ # Currently only first packet is used from pcap
+ f.close
+
+ print("Generated " + py_outfile)
+
+ break
diff --git a/tools/lemon/CMakeLists.txt b/tools/lemon/CMakeLists.txt
new file mode 100644
index 0000000..529eeae
--- /dev/null
+++ b/tools/lemon/CMakeLists.txt
@@ -0,0 +1,46 @@
+# CMakeLists.txt
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+add_executable(lemon lemon.c)
+
+if(DEFINED LEMON_C_COMPILER)
+ set(CMAKE_C_COMPILER "${LEMON_C_COMPILER}")
+ set(CMAKE_C_FLAGS "")
+endif()
+
+# To keep lemon.c as close to upstream as possible disable all warnings
+if(CMAKE_C_COMPILER_ID MATCHES "MSVC")
+ target_compile_options(lemon PRIVATE /w)
+else()
+ target_compile_options(lemon PRIVATE -w)
+endif()
+if(CMAKE_C_COMPILER_ID MATCHES "Clang")
+ # Disable static analysis for lemon source code. These issues don't
+ # affect Wireshark at runtime.
+ target_compile_options(lemon PRIVATE -Xclang -analyzer-disable-all-checks)
+endif()
+if(DEFINED NO_SANITIZE_CFLAGS)
+ target_compile_options(lemon PRIVATE ${NO_SANITIZE_CFLAGS})
+endif()
+if(DEFINED NO_SANITIZE_LDFLAGS)
+ target_link_options(lemon PRIVATE ${NO_SANITIZE_LDFLAGS})
+endif()
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 8
+# tab-width: 8
+# indent-tabs-mode: t
+# End:
+#
+# vi: set shiftwidth=8 tabstop=8 noexpandtab:
+# :indentSize=8:tabSize=8:noTabs=false:
+#
diff --git a/tools/lemon/README b/tools/lemon/README
new file mode 100644
index 0000000..59ed343
--- /dev/null
+++ b/tools/lemon/README
@@ -0,0 +1,52 @@
+The Lemon Parser Generator's home page is: https://www.hwaci.com/sw/lemon/
+Lemon seems now to be maintained at: https://sqlite.org/lemon.html
+
+Documentation is available at: https://sqlite.org/src/doc/trunk/doc/lemon.html
+Git mirror of the upstream Fossil repository: https://github.com/mackyle/sqlite
+
+The lempar.c and lemon.c are taken from sqlite and are modified as little as
+possible to make it easier to synchronize changes. Last updated at:
+
+ commit a913f942cf6b32b85de6428fd542b39458df2a88
+ Author: D. Richard Hipp <drh@hwaci.com>
+ Date: Wed Dec 28 14:03:47 2022 +0000
+
+ Version 3.40.1
+
+To check for changes (adjust "previous commit" accordingly):
+
+ git clone --depth=1000 https://github.com/sqlite/sqlite
+ cd sqlite/tools
+ git log -p 273ee15121.. lemon.c lempar.c
+
+To create a Wireshark version (steps 1-3) and validate the result (steps 4-5):
+1. Copy the two files.
+2. Run ./apply-patches.sh to apply local patches.
+3. Update the commit in this README (to ensure the base is known).
+4. Check for CSA warnings: clang-check -analyze lemon.c --
+5. Build and run lemon: ninja epan/dfilter/grammar.c
+
+To keep the lemon source as pristine as possible from upstream all warnings
+when building lemon itself are disabled. Only patch the lemon source code as
+a last resort.
+
+Warnings for lemon generated code are few in practice with -Wall -Wextra. These
+are preferably selectively disabled in the Wireshark build.
+
+The patches to lemon to silence compiler warnings and static analysis reports
+(for edge cases that cannot occur) are not proposed upstream because that
+process is difficult. From <https://www.sqlite.org/copyright.html>:
+
+ SQLite is open-source, meaning that you can make as many copies of it as you
+ want and do whatever you want with those copies, without limitation. But
+ SQLite is not open-contribution. In order to keep SQLite in the public
+ domain and ensure that the code does not become contaminated with
+ proprietary or licensed content, the project does not accept patches from
+ unknown persons.
+
+A note about the Lemon patches, we have no intention to fork Lemon and maintain
+it. These patches are written to address static analyzer warnings without
+actually modifying the functionality. If upstream is willing to accept patches,
+then that would be great and the intention is to make it as easy as possible.
+The lemon and lempar patches are dedicated to the public domain, as set forward
+in Creative Commons Zero v1.0 Universal (IANAL, but I hope this is sufficient).
diff --git a/tools/lemon/apply-patches.sh b/tools/lemon/apply-patches.sh
new file mode 100755
index 0000000..e445c87
--- /dev/null
+++ b/tools/lemon/apply-patches.sh
@@ -0,0 +1,16 @@
+#!/bin/sh -e
+# Patch lemon.c and lempar.c to silence static analyzer warnings.
+# See also tools/lemon/README
+
+# Strip trailing whitespace
+sed -e 's/ \+$//' -i lemon.c lempar.c
+
+# Other patches
+if [ -d "patches" ]; then
+ for i in patches/*.patch; do
+ echo "Applying $i"
+ patch --silent -p1 -i "$i"
+ done
+fi
+
+echo DONE
diff --git a/tools/lemon/lemon.c b/tools/lemon/lemon.c
new file mode 100644
index 0000000..869ac58
--- /dev/null
+++ b/tools/lemon/lemon.c
@@ -0,0 +1,5893 @@
+/*
+** This file contains all sources (including headers) to the LEMON
+** LALR(1) parser generator. The sources have been combined into a
+** single file to make it easy to include LEMON in the source tree
+** and Makefile of another program.
+**
+** The author of this program disclaims copyright.
+*/
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <ctype.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#define ISSPACE(X) isspace((unsigned char)(X))
+#define ISDIGIT(X) isdigit((unsigned char)(X))
+#define ISALNUM(X) isalnum((unsigned char)(X))
+#define ISALPHA(X) isalpha((unsigned char)(X))
+#define ISUPPER(X) isupper((unsigned char)(X))
+#define ISLOWER(X) islower((unsigned char)(X))
+
+
+#ifndef __WIN32__
+# if defined(_WIN32) || defined(WIN32)
+# define __WIN32__
+# endif
+#endif
+
+#ifdef __WIN32__
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern int access(const char *path, int mode);
+#ifdef __cplusplus
+}
+#endif
+#else
+#include <unistd.h>
+#endif
+
+/* #define PRIVATE static */
+#define PRIVATE
+
+#ifdef TEST
+#define MAXRHS 5 /* Set low to exercise exception code */
+#else
+#define MAXRHS 1000
+#endif
+
+extern void memory_error();
+static int showPrecedenceConflict = 0;
+static char *msort(char*,char**,int(*)(const char*,const char*));
+
+/*
+** Compilers are getting increasingly pedantic about type conversions
+** as C evolves ever closer to Ada.... To work around the latest problems
+** we have to define the following variant of strlen().
+*/
+#define lemonStrlen(X) ((int)strlen(X))
+
+/*
+** Compilers are starting to complain about the use of sprintf() and strcpy(),
+** saying they are unsafe. So we define our own versions of those routines too.
+**
+** There are three routines here: lemon_sprintf(), lemon_vsprintf(), and
+** lemon_addtext(). The first two are replacements for sprintf() and vsprintf().
+** The third is a helper routine for vsnprintf() that adds texts to the end of a
+** buffer, making sure the buffer is always zero-terminated.
+**
+** The string formatter is a minimal subset of stdlib sprintf() supporting only
+** a few simply conversions:
+**
+** %d
+** %s
+** %.*s
+**
+*/
+static void lemon_addtext(
+ char *zBuf, /* The buffer to which text is added */
+ int *pnUsed, /* Slots of the buffer used so far */
+ const char *zIn, /* Text to add */
+ int nIn, /* Bytes of text to add. -1 to use strlen() */
+ int iWidth /* Field width. Negative to left justify */
+){
+ if( nIn<0 ) for(nIn=0; zIn[nIn]; nIn++){}
+ while( iWidth>nIn ){ zBuf[(*pnUsed)++] = ' '; iWidth--; }
+ if( nIn==0 ) return;
+ memcpy(&zBuf[*pnUsed], zIn, nIn);
+ *pnUsed += nIn;
+ while( (-iWidth)>nIn ){ zBuf[(*pnUsed)++] = ' '; iWidth++; }
+ zBuf[*pnUsed] = 0;
+}
+static int lemon_vsprintf(char *str, const char *zFormat, va_list ap){
+ int i, j, k, c;
+ int nUsed = 0;
+ const char *z;
+ char zTemp[50];
+ str[0] = 0;
+ for(i=j=0; (c = zFormat[i])!=0; i++){
+ if( c=='%' ){
+ int iWidth = 0;
+ lemon_addtext(str, &nUsed, &zFormat[j], i-j, 0);
+ c = zFormat[++i];
+ if( ISDIGIT(c) || (c=='-' && ISDIGIT(zFormat[i+1])) ){
+ if( c=='-' ) i++;
+ while( ISDIGIT(zFormat[i]) ) iWidth = iWidth*10 + zFormat[i++] - '0';
+ if( c=='-' ) iWidth = -iWidth;
+ c = zFormat[i];
+ }
+ if( c=='d' ){
+ int v = va_arg(ap, int);
+ if( v<0 ){
+ lemon_addtext(str, &nUsed, "-", 1, iWidth);
+ v = -v;
+ }else if( v==0 ){
+ lemon_addtext(str, &nUsed, "0", 1, iWidth);
+ }
+ k = 0;
+ while( v>0 ){
+ k++;
+ zTemp[sizeof(zTemp)-k] = (v%10) + '0';
+ v /= 10;
+ }
+ lemon_addtext(str, &nUsed, &zTemp[sizeof(zTemp)-k], k, iWidth);
+ }else if( c=='s' ){
+ z = va_arg(ap, const char*);
+ lemon_addtext(str, &nUsed, z, -1, iWidth);
+ }else if( c=='.' && memcmp(&zFormat[i], ".*s", 3)==0 ){
+ i += 2;
+ k = va_arg(ap, int);
+ z = va_arg(ap, const char*);
+ lemon_addtext(str, &nUsed, z, k, iWidth);
+ }else if( c=='%' ){
+ lemon_addtext(str, &nUsed, "%", 1, 0);
+ }else{
+ fprintf(stderr, "illegal format\n");
+ exit(1);
+ }
+ j = i+1;
+ }
+ }
+ lemon_addtext(str, &nUsed, &zFormat[j], i-j, 0);
+ return nUsed;
+}
+static int lemon_sprintf(char *str, const char *format, ...){
+ va_list ap;
+ int rc;
+ va_start(ap, format);
+ rc = lemon_vsprintf(str, format, ap);
+ va_end(ap);
+ return rc;
+}
+static void lemon_strcpy(char *dest, const char *src){
+ while( (*(dest++) = *(src++))!=0 ){}
+}
+static void lemon_strcat(char *dest, const char *src){
+ while( *dest ) dest++;
+ lemon_strcpy(dest, src);
+}
+
+
+/* a few forward declarations... */
+struct rule;
+struct lemon;
+struct action;
+
+static struct action *Action_new(void);
+static struct action *Action_sort(struct action *);
+
+/********** From the file "build.h" ************************************/
+void FindRulePrecedences(struct lemon*);
+void FindFirstSets(struct lemon*);
+void FindStates(struct lemon*);
+void FindLinks(struct lemon*);
+void FindFollowSets(struct lemon*);
+void FindActions(struct lemon*);
+
+/********* From the file "configlist.h" *********************************/
+void Configlist_init(void);
+struct config *Configlist_add(struct rule *, int);
+struct config *Configlist_addbasis(struct rule *, int);
+void Configlist_closure(struct lemon *);
+void Configlist_sort(void);
+void Configlist_sortbasis(void);
+struct config *Configlist_return(void);
+struct config *Configlist_basis(void);
+void Configlist_eat(struct config *);
+void Configlist_reset(void);
+
+/********* From the file "error.h" ***************************************/
+void ErrorMsg(const char *, int,const char *, ...);
+
+/****** From the file "option.h" ******************************************/
+enum option_type { OPT_FLAG=1, OPT_INT, OPT_DBL, OPT_STR,
+ OPT_FFLAG, OPT_FINT, OPT_FDBL, OPT_FSTR};
+struct s_options {
+ enum option_type type;
+ const char *label;
+ char *arg;
+ const char *message;
+};
+int OptInit(char**,struct s_options*,FILE*);
+int OptNArgs(void);
+char *OptArg(int);
+void OptErr(int);
+void OptPrint(void);
+
+/******** From the file "parse.h" *****************************************/
+void Parse(struct lemon *lemp);
+
+/********* From the file "plink.h" ***************************************/
+struct plink *Plink_new(void);
+void Plink_add(struct plink **, struct config *);
+void Plink_copy(struct plink **, struct plink *);
+void Plink_delete(struct plink *);
+
+/********** From the file "report.h" *************************************/
+void Reprint(struct lemon *);
+void ReportOutput(struct lemon *);
+void ReportTable(struct lemon *, int, int);
+void ReportHeader(struct lemon *);
+void CompressTables(struct lemon *);
+void ResortStates(struct lemon *);
+
+/********** From the file "set.h" ****************************************/
+void SetSize(int); /* All sets will be of size N */
+char *SetNew(void); /* A new set for element 0..N */
+void SetFree(char*); /* Deallocate a set */
+int SetAdd(char*,int); /* Add element to a set */
+int SetUnion(char *,char *); /* A <- A U B, thru element N */
+#define SetFind(X,Y) (X[Y]) /* True if Y is in set X */
+
+/********** From the file "struct.h" *************************************/
+/*
+** Principal data structures for the LEMON parser generator.
+*/
+
+typedef enum {LEMON_FALSE=0, LEMON_TRUE} Boolean;
+
+/* Symbols (terminals and nonterminals) of the grammar are stored
+** in the following: */
+enum symbol_type {
+ TERMINAL,
+ NONTERMINAL,
+ MULTITERMINAL
+};
+enum e_assoc {
+ LEFT,
+ RIGHT,
+ NONE,
+ UNK
+};
+struct symbol {
+ const char *name; /* Name of the symbol */
+ int index; /* Index number for this symbol */
+ enum symbol_type type; /* Symbols are all either TERMINALS or NTs */
+ struct rule *rule; /* Linked list of rules of this (if an NT) */
+ struct symbol *fallback; /* fallback token in case this token doesn't parse */
+ int prec; /* Precedence if defined (-1 otherwise) */
+ enum e_assoc assoc; /* Associativity if precedence is defined */
+ char *firstset; /* First-set for all rules of this symbol */
+ Boolean lambda; /* True if NT and can generate an empty string */
+ int useCnt; /* Number of times used */
+ char *destructor; /* Code which executes whenever this symbol is
+ ** popped from the stack during error processing */
+ int destLineno; /* Line number for start of destructor. Set to
+ ** -1 for duplicate destructors. */
+ char *datatype; /* The data type of information held by this
+ ** object. Only used if type==NONTERMINAL */
+ int dtnum; /* The data type number. In the parser, the value
+ ** stack is a union. The .yy%d element of this
+ ** union is the correct data type for this object */
+ int bContent; /* True if this symbol ever carries content - if
+ ** it is ever more than just syntax */
+ /* The following fields are used by MULTITERMINALs only */
+ int nsubsym; /* Number of constituent symbols in the MULTI */
+ struct symbol **subsym; /* Array of constituent symbols */
+};
+
+/* Each production rule in the grammar is stored in the following
+** structure. */
+struct rule {
+ struct symbol *lhs; /* Left-hand side of the rule */
+ const char *lhsalias; /* Alias for the LHS (NULL if none) */
+ int lhsStart; /* True if left-hand side is the start symbol */
+ int ruleline; /* Line number for the rule */
+ int nrhs; /* Number of RHS symbols */
+ struct symbol **rhs; /* The RHS symbols */
+ const char **rhsalias; /* An alias for each RHS symbol (NULL if none) */
+ int line; /* Line number at which code begins */
+ const char *code; /* The code executed when this rule is reduced */
+ const char *codePrefix; /* Setup code before code[] above */
+ const char *codeSuffix; /* Breakdown code after code[] above */
+ struct symbol *precsym; /* Precedence symbol for this rule */
+ int index; /* An index number for this rule */
+ int iRule; /* Rule number as used in the generated tables */
+ Boolean noCode; /* True if this rule has no associated C code */
+ Boolean codeEmitted; /* True if the code has been emitted already */
+ Boolean canReduce; /* True if this rule is ever reduced */
+ Boolean doesReduce; /* Reduce actions occur after optimization */
+ Boolean neverReduce; /* Reduce is theoretically possible, but prevented
+ ** by actions or other outside implementation */
+ struct rule *nextlhs; /* Next rule with the same LHS */
+ struct rule *next; /* Next rule in the global list */
+};
+
+/* A configuration is a production rule of the grammar together with
+** a mark (dot) showing how much of that rule has been processed so far.
+** Configurations also contain a follow-set which is a list of terminal
+** symbols which are allowed to immediately follow the end of the rule.
+** Every configuration is recorded as an instance of the following: */
+enum cfgstatus {
+ COMPLETE,
+ INCOMPLETE
+};
+struct config {
+ struct rule *rp; /* The rule upon which the configuration is based */
+ int dot; /* The parse point */
+ char *fws; /* Follow-set for this configuration only */
+ struct plink *fplp; /* Follow-set forward propagation links */
+ struct plink *bplp; /* Follow-set backwards propagation links */
+ struct state *stp; /* Pointer to state which contains this */
+ enum cfgstatus status; /* used during followset and shift computations */
+ struct config *next; /* Next configuration in the state */
+ struct config *bp; /* The next basis configuration */
+};
+
+enum e_action {
+ SHIFT,
+ ACCEPT,
+ REDUCE,
+ ERROR,
+ SSCONFLICT, /* A shift/shift conflict */
+ SRCONFLICT, /* Was a reduce, but part of a conflict */
+ RRCONFLICT, /* Was a reduce, but part of a conflict */
+ SH_RESOLVED, /* Was a shift. Precedence resolved conflict */
+ RD_RESOLVED, /* Was reduce. Precedence resolved conflict */
+ NOT_USED, /* Deleted by compression */
+ SHIFTREDUCE /* Shift first, then reduce */
+};
+
+/* Every shift or reduce operation is stored as one of the following */
+struct action {
+ struct symbol *sp; /* The look-ahead symbol */
+ enum e_action type;
+ union {
+ struct state *stp; /* The new state, if a shift */
+ struct rule *rp; /* The rule, if a reduce */
+ } x;
+ struct symbol *spOpt; /* SHIFTREDUCE optimization to this symbol */
+ struct action *next; /* Next action for this state */
+ struct action *collide; /* Next action with the same hash */
+};
+
+/* Each state of the generated parser's finite state machine
+** is encoded as an instance of the following structure. */
+struct state {
+ struct config *bp; /* The basis configurations for this state */
+ struct config *cfp; /* All configurations in this set */
+ int statenum; /* Sequential number for this state */
+ struct action *ap; /* List of actions for this state */
+ int nTknAct, nNtAct; /* Number of actions on terminals and nonterminals */
+ int iTknOfst, iNtOfst; /* yy_action[] offset for terminals and nonterms */
+ int iDfltReduce; /* Default action is to REDUCE by this rule */
+ struct rule *pDfltReduce;/* The default REDUCE rule. */
+ int autoReduce; /* True if this is an auto-reduce state */
+};
+#define NO_OFFSET (-2147483647)
+
+/* A followset propagation link indicates that the contents of one
+** configuration followset should be propagated to another whenever
+** the first changes. */
+struct plink {
+ struct config *cfp; /* The configuration to which linked */
+ struct plink *next; /* The next propagate link */
+};
+
+/* The state vector for the entire parser generator is recorded as
+** follows. (LEMON uses no global variables and makes little use of
+** static variables. Fields in the following structure can be thought
+** of as begin global variables in the program.) */
+struct lemon {
+ struct state **sorted; /* Table of states sorted by state number */
+ struct rule *rule; /* List of all rules */
+ struct rule *startRule; /* First rule */
+ int nstate; /* Number of states */
+ int nxstate; /* nstate with tail degenerate states removed */
+ int nrule; /* Number of rules */
+ int nruleWithAction; /* Number of rules with actions */
+ int nsymbol; /* Number of terminal and nonterminal symbols */
+ int nterminal; /* Number of terminal symbols */
+ int minShiftReduce; /* Minimum shift-reduce action value */
+ int errAction; /* Error action value */
+ int accAction; /* Accept action value */
+ int noAction; /* No-op action value */
+ int minReduce; /* Minimum reduce action */
+ int maxAction; /* Maximum action value of any kind */
+ struct symbol **symbols; /* Sorted array of pointers to symbols */
+ int errorcnt; /* Number of errors */
+ struct symbol *errsym; /* The error symbol */
+ struct symbol *wildcard; /* Token that matches anything */
+ char *name; /* Name of the generated parser */
+ char *arg; /* Declaration of the 3rd argument to parser */
+ char *ctx; /* Declaration of 2nd argument to constructor */
+ char *tokentype; /* Type of terminal symbols in the parser stack */
+ char *vartype; /* The default type of non-terminal symbols */
+ char *start; /* Name of the start symbol for the grammar */
+ char *stacksize; /* Size of the parser stack */
+ char *include; /* Code to put at the start of the C file */
+ char *error; /* Code to execute when an error is seen */
+ char *overflow; /* Code to execute on a stack overflow */
+ char *failure; /* Code to execute on parser failure */
+ char *accept; /* Code to execute when the parser excepts */
+ char *extracode; /* Code appended to the generated file */
+ char *tokendest; /* Code to execute to destroy token data */
+ char *vardest; /* Code for the default non-terminal destructor */
+ char *filename; /* Name of the input file */
+ char *outname; /* Name of the current output file */
+ char *tokenprefix; /* A prefix added to token names in the .h file */
+ int nconflict; /* Number of parsing conflicts */
+ int nactiontab; /* Number of entries in the yy_action[] table */
+ int nlookaheadtab; /* Number of entries in yy_lookahead[] */
+ int tablesize; /* Total table size of all tables in bytes */
+ int basisflag; /* Print only basis configurations */
+ int printPreprocessed; /* Show preprocessor output on stdout */
+ int has_fallback; /* True if any %fallback is seen in the grammar */
+ int nolinenosflag; /* True if #line statements should not be printed */
+ char *argv0; /* Name of the program */
+};
+
+#define MemoryCheck(X) if((X)==0){ \
+ extern void memory_error(); \
+ memory_error(); \
+}
+
+/**************** From the file "table.h" *********************************/
+/*
+** All code in this file has been automatically generated
+** from a specification in the file
+** "table.q"
+** by the associative array code building program "aagen".
+** Do not edit this file! Instead, edit the specification
+** file, then rerun aagen.
+*/
+/*
+** Code for processing tables in the LEMON parser generator.
+*/
+/* Routines for handling a strings */
+
+const char *Strsafe(const char *);
+
+void Strsafe_init(void);
+int Strsafe_insert(const char *);
+const char *Strsafe_find(const char *);
+
+/* Routines for handling symbols of the grammar */
+
+struct symbol *Symbol_new(const char *);
+int Symbolcmpp(const void *, const void *);
+void Symbol_init(void);
+int Symbol_insert(struct symbol *, const char *);
+struct symbol *Symbol_find(const char *);
+struct symbol *Symbol_Nth(int);
+int Symbol_count(void);
+struct symbol **Symbol_arrayof(void);
+
+/* Routines to manage the state table */
+
+int Configcmp(const char *, const char *);
+struct state *State_new(void);
+void State_init(void);
+int State_insert(struct state *, struct config *);
+struct state *State_find(struct config *);
+struct state **State_arrayof(void);
+
+/* Routines used for efficiency in Configlist_add */
+
+void Configtable_init(void);
+int Configtable_insert(struct config *);
+struct config *Configtable_find(struct config *);
+void Configtable_clear(int(*)(struct config *));
+
+/****************** From the file "action.c" *******************************/
+/*
+** Routines processing parser actions in the LEMON parser generator.
+*/
+
+/* Allocate a new parser action */
+static struct action *Action_new(void){
+ static struct action *actionfreelist = 0;
+ struct action *newaction;
+
+ if( actionfreelist==0 ){
+ int i;
+ int amt = 100;
+ actionfreelist = (struct action *)calloc(amt, sizeof(struct action));
+ if( actionfreelist==0 ){
+ fprintf(stderr,"Unable to allocate memory for a new parser action.");
+ exit(1);
+ }
+ for(i=0; i<amt-1; i++) actionfreelist[i].next = &actionfreelist[i+1];
+ actionfreelist[amt-1].next = 0;
+ }
+ newaction = actionfreelist;
+ actionfreelist = actionfreelist->next;
+ return newaction;
+}
+
+/* Compare two actions for sorting purposes. Return negative, zero, or
+** positive if the first action is less than, equal to, or greater than
+** the first
+*/
+static int actioncmp(
+ struct action *ap1,
+ struct action *ap2
+){
+ int rc;
+ rc = ap1->sp->index - ap2->sp->index;
+ if( rc==0 ){
+ rc = (int)ap1->type - (int)ap2->type;
+ }
+ if( rc==0 && (ap1->type==REDUCE || ap1->type==SHIFTREDUCE) ){
+ rc = ap1->x.rp->index - ap2->x.rp->index;
+ }
+ if( rc==0 ){
+ rc = (int) (ap2 - ap1);
+ }
+ return rc;
+}
+
+/* Sort parser actions */
+static struct action *Action_sort(
+ struct action *ap
+){
+ ap = (struct action *)msort((char *)ap,(char **)&ap->next,
+ (int(*)(const char*,const char*))actioncmp);
+ return ap;
+}
+
+void Action_add(
+ struct action **app,
+ enum e_action type,
+ struct symbol *sp,
+ char *arg
+){
+ struct action *newaction;
+ newaction = Action_new();
+ newaction->next = *app;
+ *app = newaction;
+ newaction->type = type;
+ newaction->sp = sp;
+ newaction->spOpt = 0;
+ if( type==SHIFT ){
+ newaction->x.stp = (struct state *)arg;
+ }else{
+ newaction->x.rp = (struct rule *)arg;
+ }
+}
+/********************** New code to implement the "acttab" module ***********/
+/*
+** This module implements routines use to construct the yy_action[] table.
+*/
+
+/*
+** The state of the yy_action table under construction is an instance of
+** the following structure.
+**
+** The yy_action table maps the pair (state_number, lookahead) into an
+** action_number. The table is an array of integers pairs. The state_number
+** determines an initial offset into the yy_action array. The lookahead
+** value is then added to this initial offset to get an index X into the
+** yy_action array. If the aAction[X].lookahead equals the value of the
+** of the lookahead input, then the value of the action_number output is
+** aAction[X].action. If the lookaheads do not match then the
+** default action for the state_number is returned.
+**
+** All actions associated with a single state_number are first entered
+** into aLookahead[] using multiple calls to acttab_action(). Then the
+** actions for that single state_number are placed into the aAction[]
+** array with a single call to acttab_insert(). The acttab_insert() call
+** also resets the aLookahead[] array in preparation for the next
+** state number.
+*/
+struct lookahead_action {
+ int lookahead; /* Value of the lookahead token */
+ int action; /* Action to take on the given lookahead */
+};
+typedef struct acttab acttab;
+struct acttab {
+ int nAction; /* Number of used slots in aAction[] */
+ int nActionAlloc; /* Slots allocated for aAction[] */
+ struct lookahead_action
+ *aAction, /* The yy_action[] table under construction */
+ *aLookahead; /* A single new transaction set */
+ int mnLookahead; /* Minimum aLookahead[].lookahead */
+ int mnAction; /* Action associated with mnLookahead */
+ int mxLookahead; /* Maximum aLookahead[].lookahead */
+ int nLookahead; /* Used slots in aLookahead[] */
+ int nLookaheadAlloc; /* Slots allocated in aLookahead[] */
+ int nterminal; /* Number of terminal symbols */
+ int nsymbol; /* total number of symbols */
+};
+
+/* Return the number of entries in the yy_action table */
+#define acttab_lookahead_size(X) ((X)->nAction)
+
+/* The value for the N-th entry in yy_action */
+#define acttab_yyaction(X,N) ((X)->aAction[N].action)
+
+/* The value for the N-th entry in yy_lookahead */
+#define acttab_yylookahead(X,N) ((X)->aAction[N].lookahead)
+
+/* Free all memory associated with the given acttab */
+void acttab_free(acttab *p){
+ free( p->aAction );
+ free( p->aLookahead );
+ free( p );
+}
+
+/* Allocate a new acttab structure */
+acttab *acttab_alloc(int nsymbol, int nterminal){
+ acttab *p = (acttab *) calloc( 1, sizeof(*p) );
+ if( p==0 ){
+ fprintf(stderr,"Unable to allocate memory for a new acttab.");
+ exit(1);
+ }
+ memset(p, 0, sizeof(*p));
+ p->nsymbol = nsymbol;
+ p->nterminal = nterminal;
+ return p;
+}
+
+/* Add a new action to the current transaction set.
+**
+** This routine is called once for each lookahead for a particular
+** state.
+*/
+void acttab_action(acttab *p, int lookahead, int action){
+ if( p->nLookahead>=p->nLookaheadAlloc ){
+ p->nLookaheadAlloc += 25;
+ p->aLookahead = (struct lookahead_action *) realloc( p->aLookahead,
+ sizeof(p->aLookahead[0])*p->nLookaheadAlloc );
+ if( p->aLookahead==0 ){
+ fprintf(stderr,"malloc failed\n");
+ exit(1);
+ }
+ }
+ if( p->nLookahead==0 ){
+ p->mxLookahead = lookahead;
+ p->mnLookahead = lookahead;
+ p->mnAction = action;
+ }else{
+ if( p->mxLookahead<lookahead ) p->mxLookahead = lookahead;
+ if( p->mnLookahead>lookahead ){
+ p->mnLookahead = lookahead;
+ p->mnAction = action;
+ }
+ }
+ p->aLookahead[p->nLookahead].lookahead = lookahead;
+ p->aLookahead[p->nLookahead].action = action;
+ p->nLookahead++;
+}
+
+/*
+** Add the transaction set built up with prior calls to acttab_action()
+** into the current action table. Then reset the transaction set back
+** to an empty set in preparation for a new round of acttab_action() calls.
+**
+** Return the offset into the action table of the new transaction.
+**
+** If the makeItSafe parameter is true, then the offset is chosen so that
+** it is impossible to overread the yy_lookaside[] table regardless of
+** the lookaside token. This is done for the terminal symbols, as they
+** come from external inputs and can contain syntax errors. When makeItSafe
+** is false, there is more flexibility in selecting offsets, resulting in
+** a smaller table. For non-terminal symbols, which are never syntax errors,
+** makeItSafe can be false.
+*/
+int acttab_insert(acttab *p, int makeItSafe){
+ int i, j, k, n, end;
+ assert( p->nLookahead>0 );
+
+ /* Make sure we have enough space to hold the expanded action table
+ ** in the worst case. The worst case occurs if the transaction set
+ ** must be appended to the current action table
+ */
+ n = p->nsymbol + 1;
+ if( p->nAction + n >= p->nActionAlloc ){
+ int oldAlloc = p->nActionAlloc;
+ p->nActionAlloc = p->nAction + n + p->nActionAlloc + 20;
+ p->aAction = (struct lookahead_action *) realloc( p->aAction,
+ sizeof(p->aAction[0])*p->nActionAlloc);
+ if( p->aAction==0 ){
+ fprintf(stderr,"malloc failed\n");
+ exit(1);
+ }
+ for(i=oldAlloc; i<p->nActionAlloc; i++){
+ p->aAction[i].lookahead = -1;
+ p->aAction[i].action = -1;
+ }
+ }
+
+ /* Scan the existing action table looking for an offset that is a
+ ** duplicate of the current transaction set. Fall out of the loop
+ ** if and when the duplicate is found.
+ **
+ ** i is the index in p->aAction[] where p->mnLookahead is inserted.
+ */
+ end = makeItSafe ? p->mnLookahead : 0;
+ for(i=p->nAction-1; i>=end; i--){
+ if( p->aAction[i].lookahead==p->mnLookahead ){
+ /* All lookaheads and actions in the aLookahead[] transaction
+ ** must match against the candidate aAction[i] entry. */
+ if( p->aAction[i].action!=p->mnAction ) continue;
+ for(j=0; j<p->nLookahead; j++){
+ k = p->aLookahead[j].lookahead - p->mnLookahead + i;
+ if( k<0 || k>=p->nAction ) break;
+ if( p->aLookahead[j].lookahead!=p->aAction[k].lookahead ) break;
+ if( p->aLookahead[j].action!=p->aAction[k].action ) break;
+ }
+ if( j<p->nLookahead ) continue;
+
+ /* No possible lookahead value that is not in the aLookahead[]
+ ** transaction is allowed to match aAction[i] */
+ n = 0;
+ for(j=0; j<p->nAction; j++){
+ if( p->aAction[j].lookahead<0 ) continue;
+ if( p->aAction[j].lookahead==j+p->mnLookahead-i ) n++;
+ }
+ if( n==p->nLookahead ){
+ break; /* An exact match is found at offset i */
+ }
+ }
+ }
+
+ /* If no existing offsets exactly match the current transaction, find an
+ ** an empty offset in the aAction[] table in which we can add the
+ ** aLookahead[] transaction.
+ */
+ if( i<end ){
+ /* Look for holes in the aAction[] table that fit the current
+ ** aLookahead[] transaction. Leave i set to the offset of the hole.
+ ** If no holes are found, i is left at p->nAction, which means the
+ ** transaction will be appended. */
+ i = makeItSafe ? p->mnLookahead : 0;
+ for(; i<p->nActionAlloc - p->mxLookahead; i++){
+ if( p->aAction[i].lookahead<0 ){
+ for(j=0; j<p->nLookahead; j++){
+ k = p->aLookahead[j].lookahead - p->mnLookahead + i;
+ if( k<0 ) break;
+ if( p->aAction[k].lookahead>=0 ) break;
+ }
+ if( j<p->nLookahead ) continue;
+ for(j=0; j<p->nAction; j++){
+ if( p->aAction[j].lookahead==j+p->mnLookahead-i ) break;
+ }
+ if( j==p->nAction ){
+ break; /* Fits in empty slots */
+ }
+ }
+ }
+ }
+ /* Insert transaction set at index i. */
+#if 0
+ printf("Acttab:");
+ for(j=0; j<p->nLookahead; j++){
+ printf(" %d", p->aLookahead[j].lookahead);
+ }
+ printf(" inserted at %d\n", i);
+#endif
+ for(j=0; j<p->nLookahead; j++){
+ k = p->aLookahead[j].lookahead - p->mnLookahead + i;
+ p->aAction[k] = p->aLookahead[j];
+ if( k>=p->nAction ) p->nAction = k+1;
+ }
+ if( makeItSafe && i+p->nterminal>=p->nAction ) p->nAction = i+p->nterminal+1;
+ p->nLookahead = 0;
+
+ /* Return the offset that is added to the lookahead in order to get the
+ ** index into yy_action of the action */
+ return i - p->mnLookahead;
+}
+
+/*
+** Return the size of the action table without the trailing syntax error
+** entries.
+*/
+int acttab_action_size(acttab *p){
+ int n = p->nAction;
+ while( n>0 && p->aAction[n-1].lookahead<0 ){ n--; }
+ return n;
+}
+
+/********************** From the file "build.c" *****************************/
+/*
+** Routines to construction the finite state machine for the LEMON
+** parser generator.
+*/
+
+/* Find a precedence symbol of every rule in the grammar.
+**
+** Those rules which have a precedence symbol coded in the input
+** grammar using the "[symbol]" construct will already have the
+** rp->precsym field filled. Other rules take as their precedence
+** symbol the first RHS symbol with a defined precedence. If there
+** are not RHS symbols with a defined precedence, the precedence
+** symbol field is left blank.
+*/
+void FindRulePrecedences(struct lemon *xp)
+{
+ struct rule *rp;
+ for(rp=xp->rule; rp; rp=rp->next){
+ if( rp->precsym==0 ){
+ int i, j;
+ for(i=0; i<rp->nrhs && rp->precsym==0; i++){
+ struct symbol *sp = rp->rhs[i];
+ if( sp->type==MULTITERMINAL ){
+ for(j=0; j<sp->nsubsym; j++){
+ if( sp->subsym[j]->prec>=0 ){
+ rp->precsym = sp->subsym[j];
+ break;
+ }
+ }
+ }else if( sp->prec>=0 ){
+ rp->precsym = rp->rhs[i];
+ }
+ }
+ }
+ }
+ return;
+}
+
+/* Find all nonterminals which will generate the empty string.
+** Then go back and compute the first sets of every nonterminal.
+** The first set is the set of all terminal symbols which can begin
+** a string generated by that nonterminal.
+*/
+void FindFirstSets(struct lemon *lemp)
+{
+ int i, j;
+ struct rule *rp;
+ int progress;
+
+ for(i=0; i<lemp->nsymbol; i++){
+ lemp->symbols[i]->lambda = LEMON_FALSE;
+ }
+ for(i=lemp->nterminal; i<lemp->nsymbol; i++){
+ lemp->symbols[i]->firstset = SetNew();
+ }
+
+ /* First compute all lambdas */
+ do{
+ progress = 0;
+ for(rp=lemp->rule; rp; rp=rp->next){
+ if( rp->lhs->lambda ) continue;
+ for(i=0; i<rp->nrhs; i++){
+ struct symbol *sp = rp->rhs[i];
+ assert( sp->type==NONTERMINAL || sp->lambda==LEMON_FALSE );
+ if( sp->lambda==LEMON_FALSE ) break;
+ }
+ if( i==rp->nrhs ){
+ rp->lhs->lambda = LEMON_TRUE;
+ progress = 1;
+ }
+ }
+ }while( progress );
+
+ /* Now compute all first sets */
+ do{
+ struct symbol *s1, *s2;
+ progress = 0;
+ for(rp=lemp->rule; rp; rp=rp->next){
+ s1 = rp->lhs;
+ for(i=0; i<rp->nrhs; i++){
+ s2 = rp->rhs[i];
+ if( s2->type==TERMINAL ){
+ progress += SetAdd(s1->firstset,s2->index);
+ break;
+ }else if( s2->type==MULTITERMINAL ){
+ for(j=0; j<s2->nsubsym; j++){
+ progress += SetAdd(s1->firstset,s2->subsym[j]->index);
+ }
+ break;
+ }else if( s1==s2 ){
+ if( s1->lambda==LEMON_FALSE ) break;
+ }else{
+ progress += SetUnion(s1->firstset,s2->firstset);
+ if( s2->lambda==LEMON_FALSE ) break;
+ }
+ }
+ }
+ }while( progress );
+ return;
+}
+
+/* Compute all LR(0) states for the grammar. Links
+** are added to between some states so that the LR(1) follow sets
+** can be computed later.
+*/
+PRIVATE struct state *getstate(struct lemon *); /* forward reference */
+void FindStates(struct lemon *lemp)
+{
+ struct symbol *sp;
+ struct rule *rp;
+
+ Configlist_init();
+
+ /* Find the start symbol */
+ if( lemp->start ){
+ sp = Symbol_find(lemp->start);
+ if( sp==0 ){
+ ErrorMsg(lemp->filename,0,
+ "The specified start symbol \"%s\" is not "
+ "in a nonterminal of the grammar. \"%s\" will be used as the start "
+ "symbol instead.",lemp->start,lemp->startRule->lhs->name);
+ lemp->errorcnt++;
+ sp = lemp->startRule->lhs;
+ }
+ }else if( lemp->startRule ){
+ sp = lemp->startRule->lhs;
+ }else{
+ ErrorMsg(lemp->filename,0,"Internal error - no start rule\n");
+ exit(1);
+ }
+
+ /* Make sure the start symbol doesn't occur on the right-hand side of
+ ** any rule. Report an error if it does. (YACC would generate a new
+ ** start symbol in this case.) */
+ for(rp=lemp->rule; rp; rp=rp->next){
+ int i;
+ for(i=0; i<rp->nrhs; i++){
+ if( rp->rhs[i]==sp ){ /* FIX ME: Deal with multiterminals */
+ ErrorMsg(lemp->filename,0,
+ "The start symbol \"%s\" occurs on the "
+ "right-hand side of a rule. This will result in a parser which "
+ "does not work properly.",sp->name);
+ lemp->errorcnt++;
+ }
+ }
+ }
+
+ /* The basis configuration set for the first state
+ ** is all rules which have the start symbol as their
+ ** left-hand side */
+ for(rp=sp->rule; rp; rp=rp->nextlhs){
+ struct config *newcfp;
+ rp->lhsStart = 1;
+ newcfp = Configlist_addbasis(rp,0);
+ SetAdd(newcfp->fws,0);
+ }
+
+ /* Compute the first state. All other states will be
+ ** computed automatically during the computation of the first one.
+ ** The returned pointer to the first state is not used. */
+ (void)getstate(lemp);
+ return;
+}
+
+/* Return a pointer to a state which is described by the configuration
+** list which has been built from calls to Configlist_add.
+*/
+PRIVATE void buildshifts(struct lemon *, struct state *); /* Forwd ref */
+PRIVATE struct state *getstate(struct lemon *lemp)
+{
+ struct config *cfp, *bp;
+ struct state *stp;
+
+ /* Extract the sorted basis of the new state. The basis was constructed
+ ** by prior calls to "Configlist_addbasis()". */
+ Configlist_sortbasis();
+ bp = Configlist_basis();
+
+ /* Get a state with the same basis */
+ stp = State_find(bp);
+ if( stp ){
+ /* A state with the same basis already exists! Copy all the follow-set
+ ** propagation links from the state under construction into the
+ ** preexisting state, then return a pointer to the preexisting state */
+ struct config *x, *y;
+ for(x=bp, y=stp->bp; x && y; x=x->bp, y=y->bp){
+ Plink_copy(&y->bplp,x->bplp);
+ Plink_delete(x->fplp);
+ x->fplp = x->bplp = 0;
+ }
+ cfp = Configlist_return();
+ Configlist_eat(cfp);
+ }else{
+ /* This really is a new state. Construct all the details */
+ Configlist_closure(lemp); /* Compute the configuration closure */
+ Configlist_sort(); /* Sort the configuration closure */
+ cfp = Configlist_return(); /* Get a pointer to the config list */
+ stp = State_new(); /* A new state structure */
+ MemoryCheck(stp);
+ stp->bp = bp; /* Remember the configuration basis */
+ stp->cfp = cfp; /* Remember the configuration closure */
+ stp->statenum = lemp->nstate++; /* Every state gets a sequence number */
+ stp->ap = 0; /* No actions, yet. */
+ State_insert(stp,stp->bp); /* Add to the state table */
+ buildshifts(lemp,stp); /* Recursively compute successor states */
+ }
+ return stp;
+}
+
+/*
+** Return true if two symbols are the same.
+*/
+int same_symbol(struct symbol *a, struct symbol *b)
+{
+ int i;
+ if( a==b ) return 1;
+ if( a->type!=MULTITERMINAL ) return 0;
+ if( b->type!=MULTITERMINAL ) return 0;
+ if( a->nsubsym!=b->nsubsym ) return 0;
+ for(i=0; i<a->nsubsym; i++){
+ if( a->subsym[i]!=b->subsym[i] ) return 0;
+ }
+ return 1;
+}
+
+/* Construct all successor states to the given state. A "successor"
+** state is any state which can be reached by a shift action.
+*/
+PRIVATE void buildshifts(struct lemon *lemp, struct state *stp)
+{
+ struct config *cfp; /* For looping thru the config closure of "stp" */
+ struct config *bcfp; /* For the inner loop on config closure of "stp" */
+ struct config *newcfg; /* */
+ struct symbol *sp; /* Symbol following the dot in configuration "cfp" */
+ struct symbol *bsp; /* Symbol following the dot in configuration "bcfp" */
+ struct state *newstp; /* A pointer to a successor state */
+
+ /* Each configuration becomes complete after it contributes to a successor
+ ** state. Initially, all configurations are incomplete */
+ for(cfp=stp->cfp; cfp; cfp=cfp->next) cfp->status = INCOMPLETE;
+
+ /* Loop through all configurations of the state "stp" */
+ for(cfp=stp->cfp; cfp; cfp=cfp->next){
+ if( cfp->status==COMPLETE ) continue; /* Already used by inner loop */
+ if( cfp->dot>=cfp->rp->nrhs ) continue; /* Can't shift this config */
+ Configlist_reset(); /* Reset the new config set */
+ sp = cfp->rp->rhs[cfp->dot]; /* Symbol after the dot */
+
+ /* For every configuration in the state "stp" which has the symbol "sp"
+ ** following its dot, add the same configuration to the basis set under
+ ** construction but with the dot shifted one symbol to the right. */
+ for(bcfp=cfp; bcfp; bcfp=bcfp->next){
+ if( bcfp->status==COMPLETE ) continue; /* Already used */
+ if( bcfp->dot>=bcfp->rp->nrhs ) continue; /* Can't shift this one */
+ bsp = bcfp->rp->rhs[bcfp->dot]; /* Get symbol after dot */
+ if( !same_symbol(bsp,sp) ) continue; /* Must be same as for "cfp" */
+ bcfp->status = COMPLETE; /* Mark this config as used */
+ newcfg = Configlist_addbasis(bcfp->rp,bcfp->dot+1);
+ Plink_add(&newcfg->bplp,bcfp);
+ }
+
+ /* Get a pointer to the state described by the basis configuration set
+ ** constructed in the preceding loop */
+ newstp = getstate(lemp);
+
+ /* The state "newstp" is reached from the state "stp" by a shift action
+ ** on the symbol "sp" */
+ if( sp->type==MULTITERMINAL ){
+ int i;
+ for(i=0; i<sp->nsubsym; i++){
+ Action_add(&stp->ap,SHIFT,sp->subsym[i],(char*)newstp);
+ }
+ }else{
+ Action_add(&stp->ap,SHIFT,sp,(char *)newstp);
+ }
+ }
+}
+
+/*
+** Construct the propagation links
+*/
+void FindLinks(struct lemon *lemp)
+{
+ int i;
+ struct config *cfp, *other;
+ struct state *stp;
+ struct plink *plp;
+
+ /* Housekeeping detail:
+ ** Add to every propagate link a pointer back to the state to
+ ** which the link is attached. */
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ for(cfp=stp?stp->cfp:0; cfp; cfp=cfp->next){
+ cfp->stp = stp;
+ }
+ }
+
+ /* Convert all backlinks into forward links. Only the forward
+ ** links are used in the follow-set computation. */
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ for(cfp=stp?stp->cfp:0; cfp; cfp=cfp->next){
+ for(plp=cfp->bplp; plp; plp=plp->next){
+ other = plp->cfp;
+ Plink_add(&other->fplp,cfp);
+ }
+ }
+ }
+}
+
+/* Compute all followsets.
+**
+** A followset is the set of all symbols which can come immediately
+** after a configuration.
+*/
+void FindFollowSets(struct lemon *lemp)
+{
+ int i;
+ struct config *cfp;
+ struct plink *plp;
+ int progress;
+ int change;
+
+ for(i=0; i<lemp->nstate; i++){
+ assert( lemp->sorted[i]!=0 );
+ for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){
+ cfp->status = INCOMPLETE;
+ }
+ }
+
+ do{
+ progress = 0;
+ for(i=0; i<lemp->nstate; i++){
+ assert( lemp->sorted[i]!=0 );
+ for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){
+ if( cfp->status==COMPLETE ) continue;
+ for(plp=cfp->fplp; plp; plp=plp->next){
+ change = SetUnion(plp->cfp->fws,cfp->fws);
+ if( change ){
+ plp->cfp->status = INCOMPLETE;
+ progress = 1;
+ }
+ }
+ cfp->status = COMPLETE;
+ }
+ }
+ }while( progress );
+}
+
+static int resolve_conflict(struct action *,struct action *);
+
+/* Compute the reduce actions, and resolve conflicts.
+*/
+void FindActions(struct lemon *lemp)
+{
+ int i,j;
+ struct config *cfp;
+ struct state *stp;
+ struct symbol *sp;
+ struct rule *rp;
+
+ /* Add all of the reduce actions
+ ** A reduce action is added for each element of the followset of
+ ** a configuration which has its dot at the extreme right.
+ */
+ for(i=0; i<lemp->nstate; i++){ /* Loop over all states */
+ stp = lemp->sorted[i];
+ for(cfp=stp->cfp; cfp; cfp=cfp->next){ /* Loop over all configurations */
+ if( cfp->rp->nrhs==cfp->dot ){ /* Is dot at extreme right? */
+ for(j=0; j<lemp->nterminal; j++){
+ if( SetFind(cfp->fws,j) ){
+ /* Add a reduce action to the state "stp" which will reduce by the
+ ** rule "cfp->rp" if the lookahead symbol is "lemp->symbols[j]" */
+ Action_add(&stp->ap,REDUCE,lemp->symbols[j],(char *)cfp->rp);
+ }
+ }
+ }
+ }
+ }
+
+ /* Add the accepting token */
+ if( lemp->start ){
+ sp = Symbol_find(lemp->start);
+ if( sp==0 ){
+ if( lemp->startRule==0 ){
+ fprintf(stderr, "internal error on source line %d: no start rule\n",
+ __LINE__);
+ exit(1);
+ }
+ sp = lemp->startRule->lhs;
+ }
+ }else{
+ sp = lemp->startRule->lhs;
+ }
+ /* Add to the first state (which is always the starting state of the
+ ** finite state machine) an action to ACCEPT if the lookahead is the
+ ** start nonterminal. */
+ Action_add(&lemp->sorted[0]->ap,ACCEPT,sp,0);
+
+ /* Resolve conflicts */
+ for(i=0; i<lemp->nstate; i++){
+ struct action *ap, *nap;
+ stp = lemp->sorted[i];
+ /* assert( stp->ap ); */
+ stp->ap = Action_sort(stp->ap);
+ for(ap=stp->ap; ap && ap->next; ap=ap->next){
+ for(nap=ap->next; nap && nap->sp==ap->sp; nap=nap->next){
+ /* The two actions "ap" and "nap" have the same lookahead.
+ ** Figure out which one should be used */
+ lemp->nconflict += resolve_conflict(ap,nap);
+ }
+ }
+ }
+
+ /* Report an error for each rule that can never be reduced. */
+ for(rp=lemp->rule; rp; rp=rp->next) rp->canReduce = LEMON_FALSE;
+ for(i=0; i<lemp->nstate; i++){
+ struct action *ap;
+ for(ap=lemp->sorted[i]->ap; ap; ap=ap->next){
+ if( ap->type==REDUCE ) ap->x.rp->canReduce = LEMON_TRUE;
+ }
+ }
+ for(rp=lemp->rule; rp; rp=rp->next){
+ if( rp->canReduce ) continue;
+ ErrorMsg(lemp->filename,rp->ruleline,"This rule can not be reduced.\n");
+ lemp->errorcnt++;
+ }
+}
+
+/* Resolve a conflict between the two given actions. If the
+** conflict can't be resolved, return non-zero.
+**
+** NO LONGER TRUE:
+** To resolve a conflict, first look to see if either action
+** is on an error rule. In that case, take the action which
+** is not associated with the error rule. If neither or both
+** actions are associated with an error rule, then try to
+** use precedence to resolve the conflict.
+**
+** If either action is a SHIFT, then it must be apx. This
+** function won't work if apx->type==REDUCE and apy->type==SHIFT.
+*/
+static int resolve_conflict(
+ struct action *apx,
+ struct action *apy
+){
+ struct symbol *spx, *spy;
+ int errcnt = 0;
+ assert( apx->sp==apy->sp ); /* Otherwise there would be no conflict */
+ if( apx->type==SHIFT && apy->type==SHIFT ){
+ apy->type = SSCONFLICT;
+ errcnt++;
+ }
+ if( apx->type==SHIFT && apy->type==REDUCE ){
+ spx = apx->sp;
+ spy = apy->x.rp->precsym;
+ if( spy==0 || spx->prec<0 || spy->prec<0 ){
+ /* Not enough precedence information. */
+ apy->type = SRCONFLICT;
+ errcnt++;
+ }else if( spx->prec>spy->prec ){ /* higher precedence wins */
+ apy->type = RD_RESOLVED;
+ }else if( spx->prec<spy->prec ){
+ apx->type = SH_RESOLVED;
+ }else if( spx->prec==spy->prec && spx->assoc==RIGHT ){ /* Use operator */
+ apy->type = RD_RESOLVED; /* associativity */
+ }else if( spx->prec==spy->prec && spx->assoc==LEFT ){ /* to break tie */
+ apx->type = SH_RESOLVED;
+ }else{
+ assert( spx->prec==spy->prec && spx->assoc==NONE );
+ apx->type = ERROR;
+ }
+ }else if( apx->type==REDUCE && apy->type==REDUCE ){
+ spx = apx->x.rp->precsym;
+ spy = apy->x.rp->precsym;
+ if( spx==0 || spy==0 || spx->prec<0 ||
+ spy->prec<0 || spx->prec==spy->prec ){
+ apy->type = RRCONFLICT;
+ errcnt++;
+ }else if( spx->prec>spy->prec ){
+ apy->type = RD_RESOLVED;
+ }else if( spx->prec<spy->prec ){
+ apx->type = RD_RESOLVED;
+ }
+ }else{
+ assert(
+ apx->type==SH_RESOLVED ||
+ apx->type==RD_RESOLVED ||
+ apx->type==SSCONFLICT ||
+ apx->type==SRCONFLICT ||
+ apx->type==RRCONFLICT ||
+ apy->type==SH_RESOLVED ||
+ apy->type==RD_RESOLVED ||
+ apy->type==SSCONFLICT ||
+ apy->type==SRCONFLICT ||
+ apy->type==RRCONFLICT
+ );
+ /* The REDUCE/SHIFT case cannot happen because SHIFTs come before
+ ** REDUCEs on the list. If we reach this point it must be because
+ ** the parser conflict had already been resolved. */
+ }
+ return errcnt;
+}
+/********************* From the file "configlist.c" *************************/
+/*
+** Routines to processing a configuration list and building a state
+** in the LEMON parser generator.
+*/
+
+static struct config *freelist = 0; /* List of free configurations */
+static struct config *current = 0; /* Top of list of configurations */
+static struct config **currentend = 0; /* Last on list of configs */
+static struct config *basis = 0; /* Top of list of basis configs */
+static struct config **basisend = 0; /* End of list of basis configs */
+
+/* Return a pointer to a new configuration */
+PRIVATE struct config *newconfig(void){
+ return (struct config*)calloc(1, sizeof(struct config));
+}
+
+/* The configuration "old" is no longer used */
+PRIVATE void deleteconfig(struct config *old)
+{
+ old->next = freelist;
+ freelist = old;
+}
+
+/* Initialized the configuration list builder */
+void Configlist_init(void){
+ current = 0;
+ currentend = &current;
+ basis = 0;
+ basisend = &basis;
+ Configtable_init();
+ return;
+}
+
+/* Initialized the configuration list builder */
+void Configlist_reset(void){
+ current = 0;
+ currentend = &current;
+ basis = 0;
+ basisend = &basis;
+ Configtable_clear(0);
+ return;
+}
+
+/* Add another configuration to the configuration list */
+struct config *Configlist_add(
+ struct rule *rp, /* The rule */
+ int dot /* Index into the RHS of the rule where the dot goes */
+){
+ struct config *cfp, model;
+
+ assert( currentend!=0 );
+ model.rp = rp;
+ model.dot = dot;
+ cfp = Configtable_find(&model);
+ if( cfp==0 ){
+ cfp = newconfig();
+ cfp->rp = rp;
+ cfp->dot = dot;
+ cfp->fws = SetNew();
+ cfp->stp = 0;
+ cfp->fplp = cfp->bplp = 0;
+ cfp->next = 0;
+ cfp->bp = 0;
+ *currentend = cfp;
+ currentend = &cfp->next;
+ Configtable_insert(cfp);
+ }
+ return cfp;
+}
+
+/* Add a basis configuration to the configuration list */
+struct config *Configlist_addbasis(struct rule *rp, int dot)
+{
+ struct config *cfp, model;
+
+ assert( basisend!=0 );
+ assert( currentend!=0 );
+ model.rp = rp;
+ model.dot = dot;
+ cfp = Configtable_find(&model);
+ if( cfp==0 ){
+ cfp = newconfig();
+ cfp->rp = rp;
+ cfp->dot = dot;
+ cfp->fws = SetNew();
+ cfp->stp = 0;
+ cfp->fplp = cfp->bplp = 0;
+ cfp->next = 0;
+ cfp->bp = 0;
+ *currentend = cfp;
+ currentend = &cfp->next;
+ *basisend = cfp;
+ basisend = &cfp->bp;
+ Configtable_insert(cfp);
+ }
+ return cfp;
+}
+
+/* Compute the closure of the configuration list */
+void Configlist_closure(struct lemon *lemp)
+{
+ struct config *cfp, *newcfp;
+ struct rule *rp, *newrp;
+ struct symbol *sp, *xsp;
+ int i, dot;
+
+ assert( currentend!=0 );
+ for(cfp=current; cfp; cfp=cfp->next){
+ rp = cfp->rp;
+ dot = cfp->dot;
+ if( dot>=rp->nrhs ) continue;
+ sp = rp->rhs[dot];
+ if( sp->type==NONTERMINAL ){
+ if( sp->rule==0 && sp!=lemp->errsym ){
+ ErrorMsg(lemp->filename,rp->line,"Nonterminal \"%s\" has no rules.",
+ sp->name);
+ lemp->errorcnt++;
+ }
+ for(newrp=sp->rule; newrp; newrp=newrp->nextlhs){
+ newcfp = Configlist_add(newrp,0);
+ for(i=dot+1; i<rp->nrhs; i++){
+ xsp = rp->rhs[i];
+ if( xsp->type==TERMINAL ){
+ SetAdd(newcfp->fws,xsp->index);
+ break;
+ }else if( xsp->type==MULTITERMINAL ){
+ int k;
+ for(k=0; k<xsp->nsubsym; k++){
+ SetAdd(newcfp->fws, xsp->subsym[k]->index);
+ }
+ break;
+ }else{
+ SetUnion(newcfp->fws,xsp->firstset);
+ if( xsp->lambda==LEMON_FALSE ) break;
+ }
+ }
+ if( i==rp->nrhs ) Plink_add(&cfp->fplp,newcfp);
+ }
+ }
+ }
+ return;
+}
+
+/* Sort the configuration list */
+void Configlist_sort(void){
+ current = (struct config*)msort((char*)current,(char**)&(current->next),
+ Configcmp);
+ currentend = 0;
+ return;
+}
+
+/* Sort the basis configuration list */
+void Configlist_sortbasis(void){
+ basis = (struct config*)msort((char*)current,(char**)&(current->bp),
+ Configcmp);
+ basisend = 0;
+ return;
+}
+
+/* Return a pointer to the head of the configuration list and
+** reset the list */
+struct config *Configlist_return(void){
+ struct config *old;
+ old = current;
+ current = 0;
+ currentend = 0;
+ return old;
+}
+
+/* Return a pointer to the head of the configuration list and
+** reset the list */
+struct config *Configlist_basis(void){
+ struct config *old;
+ old = basis;
+ basis = 0;
+ basisend = 0;
+ return old;
+}
+
+/* Free all elements of the given configuration list */
+void Configlist_eat(struct config *cfp)
+{
+ struct config *nextcfp;
+ for(; cfp; cfp=nextcfp){
+ nextcfp = cfp->next;
+ assert( cfp->fplp==0 );
+ assert( cfp->bplp==0 );
+ if( cfp->fws ) SetFree(cfp->fws);
+ deleteconfig(cfp);
+ }
+ return;
+}
+/***************** From the file "error.c" *********************************/
+/*
+** Code for printing error message.
+*/
+
+void ErrorMsg(const char *filename, int lineno, const char *format, ...){
+ va_list ap;
+ fprintf(stderr, "%s:%d: ", filename, lineno);
+ va_start(ap, format);
+ vfprintf(stderr,format,ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+/**************** From the file "main.c" ************************************/
+/*
+** Main program file for the LEMON parser generator.
+*/
+
+/* Report an out-of-memory condition and abort. This function
+** is used mostly by the "MemoryCheck" macro in struct.h
+*/
+void memory_error(void){
+ fprintf(stderr,"Out of memory. Aborting...\n");
+ exit(1);
+}
+
+static int nDefine = 0; /* Number of -D options on the command line */
+static char **azDefine = 0; /* Name of the -D macros */
+
+/* This routine is called with the argument to each -D command-line option.
+** Add the macro defined to the azDefine array.
+*/
+static void handle_D_option(char *z){
+ char **paz;
+ nDefine++;
+ azDefine = (char **) realloc(azDefine, sizeof(azDefine[0])*nDefine);
+ if( azDefine==0 ){
+ fprintf(stderr,"out of memory\n");
+ exit(1);
+ }
+ paz = &azDefine[nDefine-1];
+ *paz = (char *) malloc( lemonStrlen(z)+1 );
+ if( *paz==0 ){
+ fprintf(stderr,"out of memory\n");
+ exit(1);
+ }
+ lemon_strcpy(*paz, z);
+ for(z=*paz; *z && *z!='='; z++){}
+ *z = 0;
+}
+
+/* Rember the name of the output directory
+*/
+static char *outputDir = NULL;
+static void handle_d_option(char *z){
+ outputDir = (char *) malloc( lemonStrlen(z)+1 );
+ if( outputDir==0 ){
+ fprintf(stderr,"out of memory\n");
+ exit(1);
+ }
+ lemon_strcpy(outputDir, z);
+}
+
+static char *user_templatename = NULL;
+static void handle_T_option(char *z){
+ user_templatename = (char *) malloc( lemonStrlen(z)+1 );
+ if( user_templatename==0 ){
+ memory_error();
+ }
+ lemon_strcpy(user_templatename, z);
+}
+
+/* Merge together to lists of rules ordered by rule.iRule */
+static struct rule *Rule_merge(struct rule *pA, struct rule *pB){
+ struct rule *pFirst = 0;
+ struct rule **ppPrev = &pFirst;
+ while( pA && pB ){
+ if( pA->iRule<pB->iRule ){
+ *ppPrev = pA;
+ ppPrev = &pA->next;
+ pA = pA->next;
+ }else{
+ *ppPrev = pB;
+ ppPrev = &pB->next;
+ pB = pB->next;
+ }
+ }
+ if( pA ){
+ *ppPrev = pA;
+ }else{
+ *ppPrev = pB;
+ }
+ return pFirst;
+}
+
+/*
+** Sort a list of rules in order of increasing iRule value
+*/
+static struct rule *Rule_sort(struct rule *rp){
+ unsigned int i;
+ struct rule *pNext;
+ struct rule *x[32];
+ memset(x, 0, sizeof(x));
+ while( rp ){
+ pNext = rp->next;
+ rp->next = 0;
+ for(i=0; i<sizeof(x)/sizeof(x[0])-1 && x[i]; i++){
+ rp = Rule_merge(x[i], rp);
+ x[i] = 0;
+ }
+ x[i] = rp;
+ rp = pNext;
+ }
+ rp = 0;
+ for(i=0; i<sizeof(x)/sizeof(x[0]); i++){
+ rp = Rule_merge(x[i], rp);
+ }
+ return rp;
+}
+
+/* forward reference */
+static const char *minimum_size_type(int lwr, int upr, int *pnByte);
+
+/* Print a single line of the "Parser Stats" output
+*/
+static void stats_line(const char *zLabel, int iValue){
+ int nLabel = lemonStrlen(zLabel);
+ printf(" %s%.*s %5d\n", zLabel,
+ 35-nLabel, "................................",
+ iValue);
+}
+
+/* The main program. Parse the command line and do it... */
+int main(int argc, char **argv){
+ static int version = 0;
+ static int rpflag = 0;
+ static int basisflag = 0;
+ static int compress = 0;
+ static int quiet = 0;
+ static int statistics = 0;
+ static int mhflag = 0;
+ static int nolinenosflag = 0;
+ static int noResort = 0;
+ static int sqlFlag = 0;
+ static int printPP = 0;
+
+ static struct s_options options[] = {
+ {OPT_FLAG, "b", (char*)&basisflag, "Print only the basis in report."},
+ {OPT_FLAG, "c", (char*)&compress, "Don't compress the action table."},
+ {OPT_FSTR, "d", (char*)&handle_d_option, "Output directory. Default '.'"},
+ {OPT_FSTR, "D", (char*)handle_D_option, "Define an %ifdef macro."},
+ {OPT_FLAG, "E", (char*)&printPP, "Print input file after preprocessing."},
+ {OPT_FSTR, "f", 0, "Ignored. (Placeholder for -f compiler options.)"},
+ {OPT_FLAG, "g", (char*)&rpflag, "Print grammar without actions."},
+ {OPT_FSTR, "I", 0, "Ignored. (Placeholder for '-I' compiler options.)"},
+ {OPT_FLAG, "m", (char*)&mhflag, "Output a makeheaders compatible file."},
+ {OPT_FLAG, "l", (char*)&nolinenosflag, "Do not print #line statements."},
+ {OPT_FSTR, "O", 0, "Ignored. (Placeholder for '-O' compiler options.)"},
+ {OPT_FLAG, "p", (char*)&showPrecedenceConflict,
+ "Show conflicts resolved by precedence rules"},
+ {OPT_FLAG, "q", (char*)&quiet, "(Quiet) Don't print the report file."},
+ {OPT_FLAG, "r", (char*)&noResort, "Do not sort or renumber states"},
+ {OPT_FLAG, "s", (char*)&statistics,
+ "Print parser stats to standard output."},
+ {OPT_FLAG, "S", (char*)&sqlFlag,
+ "Generate the *.sql file describing the parser tables."},
+ {OPT_FLAG, "x", (char*)&version, "Print the version number."},
+ {OPT_FSTR, "T", (char*)handle_T_option, "Specify a template file."},
+ {OPT_FSTR, "W", 0, "Ignored. (Placeholder for '-W' compiler options.)"},
+ {OPT_FLAG,0,0,0}
+ };
+ int i;
+ int exitcode;
+ struct lemon lem;
+ struct rule *rp;
+
+ (void)argc;
+ OptInit(argv,options,stderr);
+ if( version ){
+ printf("Lemon version 1.0\n");
+ exit(0);
+ }
+ if( OptNArgs()!=1 ){
+ fprintf(stderr,"Exactly one filename argument is required.\n");
+ exit(1);
+ }
+ memset(&lem, 0, sizeof(lem));
+ lem.errorcnt = 0;
+
+ /* Initialize the machine */
+ Strsafe_init();
+ Symbol_init();
+ State_init();
+ lem.argv0 = argv[0];
+ lem.filename = OptArg(0);
+ lem.basisflag = basisflag;
+ lem.nolinenosflag = nolinenosflag;
+ lem.printPreprocessed = printPP;
+ Symbol_new("$");
+
+ /* Parse the input file */
+ Parse(&lem);
+ if( lem.printPreprocessed || lem.errorcnt ) exit(lem.errorcnt);
+ if( lem.nrule==0 ){
+ fprintf(stderr,"Empty grammar.\n");
+ exit(1);
+ }
+ lem.errsym = Symbol_find("error");
+
+ /* Count and index the symbols of the grammar */
+ Symbol_new("{default}");
+ lem.nsymbol = Symbol_count();
+ lem.symbols = Symbol_arrayof();
+ for(i=0; i<lem.nsymbol; i++) lem.symbols[i]->index = i;
+ qsort(lem.symbols,lem.nsymbol,sizeof(struct symbol*), Symbolcmpp);
+ for(i=0; i<lem.nsymbol; i++) lem.symbols[i]->index = i;
+ while( lem.symbols[i-1]->type==MULTITERMINAL ){ i--; }
+ assert( strcmp(lem.symbols[i-1]->name,"{default}")==0 );
+ lem.nsymbol = i - 1;
+ for(i=1; ISUPPER(lem.symbols[i]->name[0]); i++);
+ lem.nterminal = i;
+
+ /* Assign sequential rule numbers. Start with 0. Put rules that have no
+ ** reduce action C-code associated with them last, so that the switch()
+ ** statement that selects reduction actions will have a smaller jump table.
+ */
+ for(i=0, rp=lem.rule; rp; rp=rp->next){
+ rp->iRule = rp->code ? i++ : -1;
+ }
+ lem.nruleWithAction = i;
+ for(rp=lem.rule; rp; rp=rp->next){
+ if( rp->iRule<0 ) rp->iRule = i++;
+ }
+ lem.startRule = lem.rule;
+ lem.rule = Rule_sort(lem.rule);
+
+ /* Generate a reprint of the grammar, if requested on the command line */
+ if( rpflag ){
+ Reprint(&lem);
+ }else{
+ /* Initialize the size for all follow and first sets */
+ SetSize(lem.nterminal+1);
+
+ /* Find the precedence for every production rule (that has one) */
+ FindRulePrecedences(&lem);
+
+ /* Compute the lambda-nonterminals and the first-sets for every
+ ** nonterminal */
+ FindFirstSets(&lem);
+
+ /* Compute all LR(0) states. Also record follow-set propagation
+ ** links so that the follow-set can be computed later */
+ lem.nstate = 0;
+ FindStates(&lem);
+ lem.sorted = State_arrayof();
+
+ /* Tie up loose ends on the propagation links */
+ FindLinks(&lem);
+
+ /* Compute the follow set of every reducible configuration */
+ FindFollowSets(&lem);
+
+ /* Compute the action tables */
+ FindActions(&lem);
+
+ /* Compress the action tables */
+ if( compress==0 ) CompressTables(&lem);
+
+ /* Reorder and renumber the states so that states with fewer choices
+ ** occur at the end. This is an optimization that helps make the
+ ** generated parser tables smaller. */
+ if( noResort==0 ) ResortStates(&lem);
+
+ /* Generate a report of the parser generated. (the "y.output" file) */
+ if( !quiet ) ReportOutput(&lem);
+
+ /* Generate the source code for the parser */
+ ReportTable(&lem, mhflag, sqlFlag);
+
+ /* Produce a header file for use by the scanner. (This step is
+ ** omitted if the "-m" option is used because makeheaders will
+ ** generate the file for us.) */
+ if( !mhflag ) ReportHeader(&lem);
+ }
+ if( statistics ){
+ printf("Parser statistics:\n");
+ stats_line("terminal symbols", lem.nterminal);
+ stats_line("non-terminal symbols", lem.nsymbol - lem.nterminal);
+ stats_line("total symbols", lem.nsymbol);
+ stats_line("rules", lem.nrule);
+ stats_line("states", lem.nxstate);
+ stats_line("conflicts", lem.nconflict);
+ stats_line("action table entries", lem.nactiontab);
+ stats_line("lookahead table entries", lem.nlookaheadtab);
+ stats_line("total table size (bytes)", lem.tablesize);
+ }
+ if( lem.nconflict > 0 ){
+ fprintf(stderr,"%d parsing conflicts.\n",lem.nconflict);
+ }
+
+ /* return 0 on success, 1 on failure. */
+ exitcode = ((lem.errorcnt > 0) || (lem.nconflict > 0)) ? 1 : 0;
+ exit(exitcode);
+ return (exitcode);
+}
+/******************** From the file "msort.c" *******************************/
+/*
+** A generic merge-sort program.
+**
+** USAGE:
+** Let "ptr" be a pointer to some structure which is at the head of
+** a null-terminated list. Then to sort the list call:
+**
+** ptr = msort(ptr,&(ptr->next),cmpfnc);
+**
+** In the above, "cmpfnc" is a pointer to a function which compares
+** two instances of the structure and returns an integer, as in
+** strcmp. The second argument is a pointer to the pointer to the
+** second element of the linked list. This address is used to compute
+** the offset to the "next" field within the structure. The offset to
+** the "next" field must be constant for all structures in the list.
+**
+** The function returns a new pointer which is the head of the list
+** after sorting.
+**
+** ALGORITHM:
+** Merge-sort.
+*/
+
+/*
+** Return a pointer to the next structure in the linked list.
+*/
+#define NEXT(A) (*(char**)(((char*)A)+offset))
+
+/*
+** Inputs:
+** a: A sorted, null-terminated linked list. (May be null).
+** b: A sorted, null-terminated linked list. (May be null).
+** cmp: A pointer to the comparison function.
+** offset: Offset in the structure to the "next" field.
+**
+** Return Value:
+** A pointer to the head of a sorted list containing the elements
+** of both a and b.
+**
+** Side effects:
+** The "next" pointers for elements in the lists a and b are
+** changed.
+*/
+static char *merge(
+ char *a,
+ char *b,
+ int (*cmp)(const char*,const char*),
+ int offset
+){
+ char *ptr, *head;
+
+ if( a==0 ){
+ head = b;
+ }else if( b==0 ){
+ head = a;
+ }else{
+ if( (*cmp)(a,b)<=0 ){
+ ptr = a;
+ a = NEXT(a);
+ }else{
+ ptr = b;
+ b = NEXT(b);
+ }
+ head = ptr;
+ while( a && b ){
+ if( (*cmp)(a,b)<=0 ){
+ NEXT(ptr) = a;
+ ptr = a;
+ a = NEXT(a);
+ }else{
+ NEXT(ptr) = b;
+ ptr = b;
+ b = NEXT(b);
+ }
+ }
+ if( a ) NEXT(ptr) = a;
+ else NEXT(ptr) = b;
+ }
+ return head;
+}
+
+/*
+** Inputs:
+** list: Pointer to a singly-linked list of structures.
+** next: Pointer to pointer to the second element of the list.
+** cmp: A comparison function.
+**
+** Return Value:
+** A pointer to the head of a sorted list containing the elements
+** originally in list.
+**
+** Side effects:
+** The "next" pointers for elements in list are changed.
+*/
+#define LISTSIZE 30
+static char *msort(
+ char *list,
+ char **next,
+ int (*cmp)(const char*,const char*)
+){
+ unsigned long offset;
+ char *ep;
+ char *set[LISTSIZE];
+ int i;
+ offset = (unsigned long)((char*)next - (char*)list);
+ for(i=0; i<LISTSIZE; i++) set[i] = 0;
+ while( list ){
+ ep = list;
+ list = NEXT(list);
+ NEXT(ep) = 0;
+ for(i=0; i<LISTSIZE-1 && set[i]!=0; i++){
+ ep = merge(ep,set[i],cmp,offset);
+ set[i] = 0;
+ }
+ set[i] = ep;
+ }
+ ep = 0;
+ for(i=0; i<LISTSIZE; i++) if( set[i] ) ep = merge(set[i],ep,cmp,offset);
+ return ep;
+}
+/************************ From the file "option.c" **************************/
+static char **g_argv;
+static struct s_options *op;
+static FILE *errstream;
+
+#define ISOPT(X) ((X)[0]=='-'||(X)[0]=='+'||strchr((X),'=')!=0)
+
+/*
+** Print the command line with a carrot pointing to the k-th character
+** of the n-th field.
+*/
+static void errline(int n, int k, FILE *err)
+{
+ int spcnt, i;
+ if( g_argv[0] ){
+ fprintf(err,"%s",g_argv[0]);
+ spcnt = lemonStrlen(g_argv[0]) + 1;
+ }else{
+ spcnt = 0;
+ }
+ for(i=1; i<n && g_argv[i]; i++){
+ fprintf(err," %s",g_argv[i]);
+ spcnt += lemonStrlen(g_argv[i])+1;
+ }
+ spcnt += k;
+ for(; g_argv[i]; i++) fprintf(err," %s",g_argv[i]);
+ if( spcnt<20 ){
+ fprintf(err,"\n%*s^-- here\n",spcnt,"");
+ }else{
+ fprintf(err,"\n%*shere --^\n",spcnt-7,"");
+ }
+}
+
+/*
+** Return the index of the N-th non-switch argument. Return -1
+** if N is out of range.
+*/
+static int argindex(int n)
+{
+ int i;
+ int dashdash = 0;
+ if( g_argv!=0 && *g_argv!=0 ){
+ for(i=1; g_argv[i]; i++){
+ if( dashdash || !ISOPT(g_argv[i]) ){
+ if( n==0 ) return i;
+ n--;
+ }
+ if( strcmp(g_argv[i],"--")==0 ) dashdash = 1;
+ }
+ }
+ return -1;
+}
+
+static char emsg[] = "Command line syntax error: ";
+
+/*
+** Process a flag command line argument.
+*/
+static int handleflags(int i, FILE *err)
+{
+ int v;
+ int errcnt = 0;
+ int j;
+ for(j=0; op[j].label; j++){
+ if( strncmp(&g_argv[i][1],op[j].label,lemonStrlen(op[j].label))==0 ) break;
+ }
+ v = g_argv[i][0]=='-' ? 1 : 0;
+ if( op[j].label==0 ){
+ if( err ){
+ fprintf(err,"%sundefined option.\n",emsg);
+ errline(i,1,err);
+ }
+ errcnt++;
+ }else if( op[j].arg==0 ){
+ /* Ignore this option */
+ }else if( op[j].type==OPT_FLAG ){
+ *((int*)op[j].arg) = v;
+ }else if( op[j].type==OPT_FFLAG ){
+ (*(void(*)(int))(op[j].arg))(v);
+ }else if( op[j].type==OPT_FSTR ){
+ (*(void(*)(char *))(op[j].arg))(&g_argv[i][2]);
+ }else{
+ if( err ){
+ fprintf(err,"%smissing argument on switch.\n",emsg);
+ errline(i,1,err);
+ }
+ errcnt++;
+ }
+ return errcnt;
+}
+
+/*
+** Process a command line switch which has an argument.
+*/
+static int handleswitch(int i, FILE *err)
+{
+ int lv = 0;
+ double dv = 0.0;
+ char *sv = 0, *end;
+ char *cp;
+ int j;
+ int errcnt = 0;
+ cp = strchr(g_argv[i],'=');
+ assert( cp!=0 );
+ *cp = 0;
+ for(j=0; op[j].label; j++){
+ if( strcmp(g_argv[i],op[j].label)==0 ) break;
+ }
+ *cp = '=';
+ if( op[j].label==0 ){
+ if( err ){
+ fprintf(err,"%sundefined option.\n",emsg);
+ errline(i,0,err);
+ }
+ errcnt++;
+ }else{
+ cp++;
+ switch( op[j].type ){
+ case OPT_FLAG:
+ case OPT_FFLAG:
+ if( err ){
+ fprintf(err,"%soption requires an argument.\n",emsg);
+ errline(i,0,err);
+ }
+ errcnt++;
+ break;
+ case OPT_DBL:
+ case OPT_FDBL:
+ dv = strtod(cp,&end);
+ if( *end ){
+ if( err ){
+ fprintf(err,
+ "%sillegal character in floating-point argument.\n",emsg);
+ errline(i,(int)((char*)end-(char*)g_argv[i]),err);
+ }
+ errcnt++;
+ }
+ break;
+ case OPT_INT:
+ case OPT_FINT:
+ lv = strtol(cp,&end,0);
+ if( *end ){
+ if( err ){
+ fprintf(err,"%sillegal character in integer argument.\n",emsg);
+ errline(i,(int)((char*)end-(char*)g_argv[i]),err);
+ }
+ errcnt++;
+ }
+ break;
+ case OPT_STR:
+ case OPT_FSTR:
+ sv = cp;
+ break;
+ }
+ switch( op[j].type ){
+ case OPT_FLAG:
+ case OPT_FFLAG:
+ break;
+ case OPT_DBL:
+ *(double*)(op[j].arg) = dv;
+ break;
+ case OPT_FDBL:
+ (*(void(*)(double))(op[j].arg))(dv);
+ break;
+ case OPT_INT:
+ *(int*)(op[j].arg) = lv;
+ break;
+ case OPT_FINT:
+ (*(void(*)(int))(op[j].arg))((int)lv);
+ break;
+ case OPT_STR:
+ *(char**)(op[j].arg) = sv;
+ break;
+ case OPT_FSTR:
+ (*(void(*)(char *))(op[j].arg))(sv);
+ break;
+ }
+ }
+ return errcnt;
+}
+
+int OptInit(char **a, struct s_options *o, FILE *err)
+{
+ int errcnt = 0;
+ g_argv = a;
+ op = o;
+ errstream = err;
+ if( g_argv && *g_argv && op ){
+ int i;
+ for(i=1; g_argv[i]; i++){
+ if( g_argv[i][0]=='+' || g_argv[i][0]=='-' ){
+ errcnt += handleflags(i,err);
+ }else if( strchr(g_argv[i],'=') ){
+ errcnt += handleswitch(i,err);
+ }
+ }
+ }
+ if( errcnt>0 ){
+ fprintf(err,"Valid command line options for \"%s\" are:\n",*a);
+ OptPrint();
+ exit(1);
+ }
+ return 0;
+}
+
+int OptNArgs(void){
+ int cnt = 0;
+ int dashdash = 0;
+ int i;
+ if( g_argv!=0 && g_argv[0]!=0 ){
+ for(i=1; g_argv[i]; i++){
+ if( dashdash || !ISOPT(g_argv[i]) ) cnt++;
+ if( strcmp(g_argv[i],"--")==0 ) dashdash = 1;
+ }
+ }
+ return cnt;
+}
+
+char *OptArg(int n)
+{
+ int i;
+ i = argindex(n);
+ return i>=0 ? g_argv[i] : 0;
+}
+
+void OptErr(int n)
+{
+ int i;
+ i = argindex(n);
+ if( i>=0 ) errline(i,0,errstream);
+}
+
+void OptPrint(void){
+ int i;
+ int max, len;
+ max = 0;
+ for(i=0; op[i].label; i++){
+ len = lemonStrlen(op[i].label) + 1;
+ switch( op[i].type ){
+ case OPT_FLAG:
+ case OPT_FFLAG:
+ break;
+ case OPT_INT:
+ case OPT_FINT:
+ len += 9; /* length of "<integer>" */
+ break;
+ case OPT_DBL:
+ case OPT_FDBL:
+ len += 6; /* length of "<real>" */
+ break;
+ case OPT_STR:
+ case OPT_FSTR:
+ len += 8; /* length of "<string>" */
+ break;
+ }
+ if( len>max ) max = len;
+ }
+ for(i=0; op[i].label; i++){
+ switch( op[i].type ){
+ case OPT_FLAG:
+ case OPT_FFLAG:
+ fprintf(errstream," -%-*s %s\n",max,op[i].label,op[i].message);
+ break;
+ case OPT_INT:
+ case OPT_FINT:
+ fprintf(errstream," -%s<integer>%*s %s\n",op[i].label,
+ (int)(max-lemonStrlen(op[i].label)-9),"",op[i].message);
+ break;
+ case OPT_DBL:
+ case OPT_FDBL:
+ fprintf(errstream," -%s<real>%*s %s\n",op[i].label,
+ (int)(max-lemonStrlen(op[i].label)-6),"",op[i].message);
+ break;
+ case OPT_STR:
+ case OPT_FSTR:
+ fprintf(errstream," -%s<string>%*s %s\n",op[i].label,
+ (int)(max-lemonStrlen(op[i].label)-8),"",op[i].message);
+ break;
+ }
+ }
+}
+/*********************** From the file "parse.c" ****************************/
+/*
+** Input file parser for the LEMON parser generator.
+*/
+
+/* The state of the parser */
+enum e_state {
+ INITIALIZE,
+ WAITING_FOR_DECL_OR_RULE,
+ WAITING_FOR_DECL_KEYWORD,
+ WAITING_FOR_DECL_ARG,
+ WAITING_FOR_PRECEDENCE_SYMBOL,
+ WAITING_FOR_ARROW,
+ IN_RHS,
+ LHS_ALIAS_1,
+ LHS_ALIAS_2,
+ LHS_ALIAS_3,
+ RHS_ALIAS_1,
+ RHS_ALIAS_2,
+ PRECEDENCE_MARK_1,
+ PRECEDENCE_MARK_2,
+ RESYNC_AFTER_RULE_ERROR,
+ RESYNC_AFTER_DECL_ERROR,
+ WAITING_FOR_DESTRUCTOR_SYMBOL,
+ WAITING_FOR_DATATYPE_SYMBOL,
+ WAITING_FOR_FALLBACK_ID,
+ WAITING_FOR_WILDCARD_ID,
+ WAITING_FOR_CLASS_ID,
+ WAITING_FOR_CLASS_TOKEN,
+ WAITING_FOR_TOKEN_NAME
+};
+struct pstate {
+ char *filename; /* Name of the input file */
+ int tokenlineno; /* Linenumber at which current token starts */
+ int errorcnt; /* Number of errors so far */
+ char *tokenstart; /* Text of current token */
+ struct lemon *gp; /* Global state vector */
+ enum e_state state; /* The state of the parser */
+ struct symbol *fallback; /* The fallback token */
+ struct symbol *tkclass; /* Token class symbol */
+ struct symbol *lhs; /* Left-hand side of current rule */
+ const char *lhsalias; /* Alias for the LHS */
+ int nrhs; /* Number of right-hand side symbols seen */
+ struct symbol *rhs[MAXRHS]; /* RHS symbols */
+ const char *alias[MAXRHS]; /* Aliases for each RHS symbol (or NULL) */
+ struct rule *prevrule; /* Previous rule parsed */
+ const char *declkeyword; /* Keyword of a declaration */
+ char **declargslot; /* Where the declaration argument should be put */
+ int insertLineMacro; /* Add #line before declaration insert */
+ int *decllinenoslot; /* Where to write declaration line number */
+ enum e_assoc declassoc; /* Assign this association to decl arguments */
+ int preccounter; /* Assign this precedence to decl arguments */
+ struct rule *firstrule; /* Pointer to first rule in the grammar */
+ struct rule *lastrule; /* Pointer to the most recently parsed rule */
+};
+
+/* Parse a single token */
+static void parseonetoken(struct pstate *psp)
+{
+ const char *x;
+ x = Strsafe(psp->tokenstart); /* Save the token permanently */
+#if 0
+ printf("%s:%d: Token=[%s] state=%d\n",psp->filename,psp->tokenlineno,
+ x,psp->state);
+#endif
+ switch( psp->state ){
+ case INITIALIZE:
+ psp->prevrule = 0;
+ psp->preccounter = 0;
+ psp->firstrule = psp->lastrule = 0;
+ psp->gp->nrule = 0;
+ /* fall through */
+ case WAITING_FOR_DECL_OR_RULE:
+ if( x[0]=='%' ){
+ psp->state = WAITING_FOR_DECL_KEYWORD;
+ }else if( ISLOWER(x[0]) ){
+ psp->lhs = Symbol_new(x);
+ psp->nrhs = 0;
+ psp->lhsalias = 0;
+ psp->state = WAITING_FOR_ARROW;
+ }else if( x[0]=='{' ){
+ if( psp->prevrule==0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "There is no prior rule upon which to attach the code "
+ "fragment which begins on this line.");
+ psp->errorcnt++;
+ }else if( psp->prevrule->code!=0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Code fragment beginning on this line is not the first "
+ "to follow the previous rule.");
+ psp->errorcnt++;
+ }else if( strcmp(x, "{NEVER-REDUCE")==0 ){
+ psp->prevrule->neverReduce = 1;
+ }else{
+ psp->prevrule->line = psp->tokenlineno;
+ psp->prevrule->code = &x[1];
+ psp->prevrule->noCode = 0;
+ }
+ }else if( x[0]=='[' ){
+ psp->state = PRECEDENCE_MARK_1;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Token \"%s\" should be either \"%%\" or a nonterminal name.",
+ x);
+ psp->errorcnt++;
+ }
+ break;
+ case PRECEDENCE_MARK_1:
+ if( !ISUPPER(x[0]) ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "The precedence symbol must be a terminal.");
+ psp->errorcnt++;
+ }else if( psp->prevrule==0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "There is no prior rule to assign precedence \"[%s]\".",x);
+ psp->errorcnt++;
+ }else if( psp->prevrule->precsym!=0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Precedence mark on this line is not the first "
+ "to follow the previous rule.");
+ psp->errorcnt++;
+ }else{
+ psp->prevrule->precsym = Symbol_new(x);
+ }
+ psp->state = PRECEDENCE_MARK_2;
+ break;
+ case PRECEDENCE_MARK_2:
+ if( x[0]!=']' ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Missing \"]\" on precedence mark.");
+ psp->errorcnt++;
+ }
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ break;
+ case WAITING_FOR_ARROW:
+ if( x[0]==':' && x[1]==':' && x[2]=='=' ){
+ psp->state = IN_RHS;
+ }else if( x[0]=='(' ){
+ psp->state = LHS_ALIAS_1;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Expected to see a \":\" following the LHS symbol \"%s\".",
+ psp->lhs->name);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case LHS_ALIAS_1:
+ if( ISALPHA(x[0]) ){
+ psp->lhsalias = x;
+ psp->state = LHS_ALIAS_2;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "\"%s\" is not a valid alias for the LHS \"%s\"\n",
+ x,psp->lhs->name);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case LHS_ALIAS_2:
+ if( x[0]==')' ){
+ psp->state = LHS_ALIAS_3;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Missing \")\" following LHS alias name \"%s\".",psp->lhsalias);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case LHS_ALIAS_3:
+ if( x[0]==':' && x[1]==':' && x[2]=='=' ){
+ psp->state = IN_RHS;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Missing \"->\" following: \"%s(%s)\".",
+ psp->lhs->name,psp->lhsalias);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case IN_RHS:
+ if( x[0]=='.' ){
+ struct rule *rp;
+ rp = (struct rule *)calloc( sizeof(struct rule) +
+ sizeof(struct symbol*)*psp->nrhs + sizeof(char*)*psp->nrhs, 1);
+ if( rp==0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Can't allocate enough memory for this rule.");
+ psp->errorcnt++;
+ psp->prevrule = 0;
+ }else{
+ int i;
+ rp->ruleline = psp->tokenlineno;
+ rp->rhs = (struct symbol**)&rp[1];
+ rp->rhsalias = (const char**)&(rp->rhs[psp->nrhs]);
+ for(i=0; i<psp->nrhs; i++){
+ rp->rhs[i] = psp->rhs[i];
+ rp->rhsalias[i] = psp->alias[i];
+ if( rp->rhsalias[i]!=0 ){ rp->rhs[i]->bContent = 1; }
+ }
+ rp->lhs = psp->lhs;
+ rp->lhsalias = psp->lhsalias;
+ rp->nrhs = psp->nrhs;
+ rp->code = 0;
+ rp->noCode = 1;
+ rp->precsym = 0;
+ rp->index = psp->gp->nrule++;
+ rp->nextlhs = rp->lhs->rule;
+ rp->lhs->rule = rp;
+ rp->next = 0;
+ if( psp->firstrule==0 ){
+ psp->firstrule = psp->lastrule = rp;
+ }else{
+ psp->lastrule->next = rp;
+ psp->lastrule = rp;
+ }
+ psp->prevrule = rp;
+ }
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( ISALPHA(x[0]) ){
+ if( psp->nrhs>=MAXRHS ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Too many symbols on RHS of rule beginning at \"%s\".",
+ x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }else{
+ psp->rhs[psp->nrhs] = Symbol_new(x);
+ psp->alias[psp->nrhs] = 0;
+ psp->nrhs++;
+ }
+ }else if( (x[0]=='|' || x[0]=='/') && psp->nrhs>0 && ISUPPER(x[1]) ){
+ struct symbol *msp = psp->rhs[psp->nrhs-1];
+ if( msp->type!=MULTITERMINAL ){
+ struct symbol *origsp = msp;
+ msp = (struct symbol *) calloc(1,sizeof(*msp));
+ memset(msp, 0, sizeof(*msp));
+ msp->type = MULTITERMINAL;
+ msp->nsubsym = 1;
+ msp->subsym = (struct symbol **) calloc(1,sizeof(struct symbol*));
+ msp->subsym[0] = origsp;
+ msp->name = origsp->name;
+ psp->rhs[psp->nrhs-1] = msp;
+ }
+ msp->nsubsym++;
+ msp->subsym = (struct symbol **) realloc(msp->subsym,
+ sizeof(struct symbol*)*msp->nsubsym);
+ msp->subsym[msp->nsubsym-1] = Symbol_new(&x[1]);
+ if( ISLOWER(x[1]) || ISLOWER(msp->subsym[0]->name[0]) ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Cannot form a compound containing a non-terminal");
+ psp->errorcnt++;
+ }
+ }else if( x[0]=='(' && psp->nrhs>0 ){
+ psp->state = RHS_ALIAS_1;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Illegal character on RHS of rule: \"%s\".",x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case RHS_ALIAS_1:
+ if( ISALPHA(x[0]) ){
+ psp->alias[psp->nrhs-1] = x;
+ psp->state = RHS_ALIAS_2;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "\"%s\" is not a valid alias for the RHS symbol \"%s\"\n",
+ x,psp->rhs[psp->nrhs-1]->name);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case RHS_ALIAS_2:
+ if( x[0]==')' ){
+ psp->state = IN_RHS;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Missing \")\" following LHS alias name \"%s\".",psp->lhsalias);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case WAITING_FOR_DECL_KEYWORD:
+ if( ISALPHA(x[0]) ){
+ psp->declkeyword = x;
+ psp->declargslot = 0;
+ psp->decllinenoslot = 0;
+ psp->insertLineMacro = 1;
+ psp->state = WAITING_FOR_DECL_ARG;
+ if( strcmp(x,"name")==0 ){
+ psp->declargslot = &(psp->gp->name);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"include")==0 ){
+ psp->declargslot = &(psp->gp->include);
+ }else if( strcmp(x,"code")==0 ){
+ psp->declargslot = &(psp->gp->extracode);
+ }else if( strcmp(x,"token_destructor")==0 ){
+ psp->declargslot = &psp->gp->tokendest;
+ }else if( strcmp(x,"default_destructor")==0 ){
+ psp->declargslot = &psp->gp->vardest;
+ }else if( strcmp(x,"token_prefix")==0 ){
+ psp->declargslot = &psp->gp->tokenprefix;
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"syntax_error")==0 ){
+ psp->declargslot = &(psp->gp->error);
+ }else if( strcmp(x,"parse_accept")==0 ){
+ psp->declargslot = &(psp->gp->accept);
+ }else if( strcmp(x,"parse_failure")==0 ){
+ psp->declargslot = &(psp->gp->failure);
+ }else if( strcmp(x,"stack_overflow")==0 ){
+ psp->declargslot = &(psp->gp->overflow);
+ }else if( strcmp(x,"extra_argument")==0 ){
+ psp->declargslot = &(psp->gp->arg);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"extra_context")==0 ){
+ psp->declargslot = &(psp->gp->ctx);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"token_type")==0 ){
+ psp->declargslot = &(psp->gp->tokentype);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"default_type")==0 ){
+ psp->declargslot = &(psp->gp->vartype);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"stack_size")==0 ){
+ psp->declargslot = &(psp->gp->stacksize);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"start_symbol")==0 ){
+ psp->declargslot = &(psp->gp->start);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"left")==0 ){
+ psp->preccounter++;
+ psp->declassoc = LEFT;
+ psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
+ }else if( strcmp(x,"right")==0 ){
+ psp->preccounter++;
+ psp->declassoc = RIGHT;
+ psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
+ }else if( strcmp(x,"nonassoc")==0 ){
+ psp->preccounter++;
+ psp->declassoc = NONE;
+ psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
+ }else if( strcmp(x,"destructor")==0 ){
+ psp->state = WAITING_FOR_DESTRUCTOR_SYMBOL;
+ }else if( strcmp(x,"type")==0 ){
+ psp->state = WAITING_FOR_DATATYPE_SYMBOL;
+ }else if( strcmp(x,"fallback")==0 ){
+ psp->fallback = 0;
+ psp->state = WAITING_FOR_FALLBACK_ID;
+ }else if( strcmp(x,"token")==0 ){
+ psp->state = WAITING_FOR_TOKEN_NAME;
+ }else if( strcmp(x,"wildcard")==0 ){
+ psp->state = WAITING_FOR_WILDCARD_ID;
+ }else if( strcmp(x,"token_class")==0 ){
+ psp->state = WAITING_FOR_CLASS_ID;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Unknown declaration keyword: \"%%%s\".",x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Illegal declaration keyword: \"%s\".",x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }
+ break;
+ case WAITING_FOR_DESTRUCTOR_SYMBOL:
+ if( !ISALPHA(x[0]) ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Symbol name missing after %%destructor keyword");
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }else{
+ struct symbol *sp = Symbol_new(x);
+ psp->declargslot = &sp->destructor;
+ psp->decllinenoslot = &sp->destLineno;
+ psp->insertLineMacro = 1;
+ psp->state = WAITING_FOR_DECL_ARG;
+ }
+ break;
+ case WAITING_FOR_DATATYPE_SYMBOL:
+ if( !ISALPHA(x[0]) ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Symbol name missing after %%type keyword");
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }else{
+ struct symbol *sp = Symbol_find(x);
+ if((sp) && (sp->datatype)){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Symbol %%type \"%s\" already defined", x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }else{
+ if (!sp){
+ sp = Symbol_new(x);
+ }
+ psp->declargslot = &sp->datatype;
+ psp->insertLineMacro = 0;
+ psp->state = WAITING_FOR_DECL_ARG;
+ }
+ }
+ break;
+ case WAITING_FOR_PRECEDENCE_SYMBOL:
+ if( x[0]=='.' ){
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( ISUPPER(x[0]) ){
+ struct symbol *sp;
+ sp = Symbol_new(x);
+ if( sp->prec>=0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Symbol \"%s\" has already be given a precedence.",x);
+ psp->errorcnt++;
+ }else{
+ sp->prec = psp->preccounter;
+ sp->assoc = psp->declassoc;
+ }
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Can't assign a precedence to \"%s\".",x);
+ psp->errorcnt++;
+ }
+ break;
+ case WAITING_FOR_DECL_ARG:
+ if( x[0]=='{' || x[0]=='\"' || ISALNUM(x[0]) ){
+ const char *zOld, *zNew;
+ char *zBuf, *z;
+ int nOld, n, nLine = 0, nNew, nBack;
+ int addLineMacro;
+ char zLine[50];
+ zNew = x;
+ if( zNew[0]=='"' || zNew[0]=='{' ) zNew++;
+ nNew = lemonStrlen(zNew);
+ if( *psp->declargslot ){
+ zOld = *psp->declargslot;
+ }else{
+ zOld = "";
+ }
+ nOld = lemonStrlen(zOld);
+ n = nOld + nNew + 20;
+ addLineMacro = !psp->gp->nolinenosflag
+ && psp->insertLineMacro
+ && psp->tokenlineno>1
+ && (psp->decllinenoslot==0 || psp->decllinenoslot[0]!=0);
+ if( addLineMacro ){
+ for(z=psp->filename, nBack=0; *z; z++){
+ if( *z=='\\' ) nBack++;
+ }
+ lemon_sprintf(zLine, "#line %d ", psp->tokenlineno);
+ nLine = lemonStrlen(zLine);
+ n += nLine + lemonStrlen(psp->filename) + nBack;
+ }
+ *psp->declargslot = (char *) realloc(*psp->declargslot, n);
+ zBuf = *psp->declargslot + nOld;
+ if( addLineMacro ){
+ if( nOld && zBuf[-1]!='\n' ){
+ *(zBuf++) = '\n';
+ }
+ memcpy(zBuf, zLine, nLine);
+ zBuf += nLine;
+ *(zBuf++) = '"';
+ for(z=psp->filename; *z; z++){
+ if( *z=='\\' ){
+ *(zBuf++) = '\\';
+ }
+ *(zBuf++) = *z;
+ }
+ *(zBuf++) = '"';
+ *(zBuf++) = '\n';
+ }
+ if( psp->decllinenoslot && psp->decllinenoslot[0]==0 ){
+ psp->decllinenoslot[0] = psp->tokenlineno;
+ }
+ memcpy(zBuf, zNew, nNew);
+ zBuf += nNew;
+ *zBuf = 0;
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Illegal argument to %%%s: %s",psp->declkeyword,x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }
+ break;
+ case WAITING_FOR_FALLBACK_ID:
+ if( x[0]=='.' ){
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( !ISUPPER(x[0]) ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "%%fallback argument \"%s\" should be a token", x);
+ psp->errorcnt++;
+ }else{
+ struct symbol *sp = Symbol_new(x);
+ if( psp->fallback==0 ){
+ psp->fallback = sp;
+ }else if( sp->fallback ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "More than one fallback assigned to token %s", x);
+ psp->errorcnt++;
+ }else{
+ sp->fallback = psp->fallback;
+ psp->gp->has_fallback = 1;
+ }
+ }
+ break;
+ case WAITING_FOR_TOKEN_NAME:
+ /* Tokens do not have to be declared before use. But they can be
+ ** in order to control their assigned integer number. The number for
+ ** each token is assigned when it is first seen. So by including
+ **
+ ** %token ONE TWO THREE.
+ **
+ ** early in the grammar file, that assigns small consecutive values
+ ** to each of the tokens ONE TWO and THREE.
+ */
+ if( x[0]=='.' ){
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( !ISUPPER(x[0]) ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "%%token argument \"%s\" should be a token", x);
+ psp->errorcnt++;
+ }else{
+ (void)Symbol_new(x);
+ }
+ break;
+ case WAITING_FOR_WILDCARD_ID:
+ if( x[0]=='.' ){
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( !ISUPPER(x[0]) ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "%%wildcard argument \"%s\" should be a token", x);
+ psp->errorcnt++;
+ }else{
+ struct symbol *sp = Symbol_new(x);
+ if( psp->gp->wildcard==0 ){
+ psp->gp->wildcard = sp;
+ }else{
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "Extra wildcard to token: %s", x);
+ psp->errorcnt++;
+ }
+ }
+ break;
+ case WAITING_FOR_CLASS_ID:
+ if( !ISLOWER(x[0]) ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "%%token_class must be followed by an identifier: %s", x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }else if( Symbol_find(x) ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "Symbol \"%s\" already used", x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }else{
+ psp->tkclass = Symbol_new(x);
+ psp->tkclass->type = MULTITERMINAL;
+ psp->state = WAITING_FOR_CLASS_TOKEN;
+ }
+ break;
+ case WAITING_FOR_CLASS_TOKEN:
+ if( x[0]=='.' ){
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( ISUPPER(x[0]) || ((x[0]=='|' || x[0]=='/') && ISUPPER(x[1])) ){
+ struct symbol *msp = psp->tkclass;
+ msp->nsubsym++;
+ msp->subsym = (struct symbol **) realloc(msp->subsym,
+ sizeof(struct symbol*)*msp->nsubsym);
+ if( !ISUPPER(x[0]) ) x++;
+ msp->subsym[msp->nsubsym-1] = Symbol_new(x);
+ }else{
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "%%token_class argument \"%s\" should be a token", x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }
+ break;
+ case RESYNC_AFTER_RULE_ERROR:
+/* if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE;
+** break; */
+ case RESYNC_AFTER_DECL_ERROR:
+ if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE;
+ if( x[0]=='%' ) psp->state = WAITING_FOR_DECL_KEYWORD;
+ break;
+ }
+}
+
+/* The text in the input is part of the argument to an %ifdef or %ifndef.
+** Evaluate the text as a boolean expression. Return true or false.
+*/
+static int eval_preprocessor_boolean(char *z, int lineno){
+ int neg = 0;
+ int res = 0;
+ int okTerm = 1;
+ int i;
+ for(i=0; z[i]!=0; i++){
+ if( ISSPACE(z[i]) ) continue;
+ if( z[i]=='!' ){
+ if( !okTerm ) goto pp_syntax_error;
+ neg = !neg;
+ continue;
+ }
+ if( z[i]=='|' && z[i+1]=='|' ){
+ if( okTerm ) goto pp_syntax_error;
+ if( res ) return 1;
+ i++;
+ okTerm = 1;
+ continue;
+ }
+ if( z[i]=='&' && z[i+1]=='&' ){
+ if( okTerm ) goto pp_syntax_error;
+ if( !res ) return 0;
+ i++;
+ okTerm = 1;
+ continue;
+ }
+ if( z[i]=='(' ){
+ int k;
+ int n = 1;
+ if( !okTerm ) goto pp_syntax_error;
+ for(k=i+1; z[k]; k++){
+ if( z[k]==')' ){
+ n--;
+ if( n==0 ){
+ z[k] = 0;
+ res = eval_preprocessor_boolean(&z[i+1], -1);
+ z[k] = ')';
+ if( res<0 ){
+ i = i-res;
+ goto pp_syntax_error;
+ }
+ i = k;
+ break;
+ }
+ }else if( z[k]=='(' ){
+ n++;
+ }else if( z[k]==0 ){
+ i = k;
+ goto pp_syntax_error;
+ }
+ }
+ if( neg ){
+ res = !res;
+ neg = 0;
+ }
+ okTerm = 0;
+ continue;
+ }
+ if( ISALPHA(z[i]) ){
+ int j, k, n;
+ if( !okTerm ) goto pp_syntax_error;
+ for(k=i+1; ISALNUM(z[k]) || z[k]=='_'; k++){}
+ n = k - i;
+ res = 0;
+ for(j=0; j<nDefine; j++){
+ if( strncmp(azDefine[j],&z[i],n)==0 && azDefine[j][n]==0 ){
+ res = 1;
+ break;
+ }
+ }
+ i = k-1;
+ if( neg ){
+ res = !res;
+ neg = 0;
+ }
+ okTerm = 0;
+ continue;
+ }
+ goto pp_syntax_error;
+ }
+ return res;
+
+pp_syntax_error:
+ if( lineno>0 ){
+ fprintf(stderr, "%%if syntax error on line %d.\n", lineno);
+ fprintf(stderr, " %.*s <-- syntax error here\n", i+1, z);
+ exit(1);
+ }else{
+ return -(i+1);
+ }
+}
+
+/* Run the preprocessor over the input file text. The global variables
+** azDefine[0] through azDefine[nDefine-1] contains the names of all defined
+** macros. This routine looks for "%ifdef" and "%ifndef" and "%endif" and
+** comments them out. Text in between is also commented out as appropriate.
+*/
+static void preprocess_input(char *z){
+ int i, j, k;
+ int exclude = 0;
+ int start = 0;
+ int lineno = 1;
+ int start_lineno = 1;
+ for(i=0; z[i]; i++){
+ if( z[i]=='\n' ) lineno++;
+ if( z[i]!='%' || (i>0 && z[i-1]!='\n') ) continue;
+ if( strncmp(&z[i],"%endif",6)==0 && ISSPACE(z[i+6]) ){
+ if( exclude ){
+ exclude--;
+ if( exclude==0 ){
+ for(j=start; j<i; j++) if( z[j]!='\n' ) z[j] = ' ';
+ }
+ }
+ for(j=i; z[j] && z[j]!='\n'; j++) z[j] = ' ';
+ }else if( strncmp(&z[i],"%else",5)==0 && ISSPACE(z[i+5]) ){
+ if( exclude==1){
+ exclude = 0;
+ for(j=start; j<i; j++) if( z[j]!='\n' ) z[j] = ' ';
+ }else if( exclude==0 ){
+ exclude = 1;
+ start = i;
+ start_lineno = lineno;
+ }
+ for(j=i; z[j] && z[j]!='\n'; j++) z[j] = ' ';
+ }else if( strncmp(&z[i],"%ifdef ",7)==0
+ || strncmp(&z[i],"%if ",4)==0
+ || strncmp(&z[i],"%ifndef ",8)==0 ){
+ if( exclude ){
+ exclude++;
+ }else{
+ int isNot;
+ int iBool;
+ for(j=i; z[j] && !ISSPACE(z[j]); j++){}
+ iBool = j;
+ isNot = (j==i+7);
+ while( z[j] && z[j]!='\n' ){ j++; }
+ k = z[j];
+ z[j] = 0;
+ exclude = eval_preprocessor_boolean(&z[iBool], lineno);
+ z[j] = k;
+ if( !isNot ) exclude = !exclude;
+ if( exclude ){
+ start = i;
+ start_lineno = lineno;
+ }
+ }
+ for(j=i; z[j] && z[j]!='\n'; j++) z[j] = ' ';
+ }
+ }
+ if( exclude ){
+ fprintf(stderr,"unterminated %%ifdef starting on line %d\n", start_lineno);
+ exit(1);
+ }
+}
+
+/* In spite of its name, this function is really a scanner. It read
+** in the entire input file (all at once) then tokenizes it. Each
+** token is passed to the function "parseonetoken" which builds all
+** the appropriate data structures in the global state vector "gp".
+*/
+void Parse(struct lemon *gp)
+{
+ struct pstate ps;
+ FILE *fp;
+ char *filebuf;
+ unsigned int filesize;
+ int lineno;
+ int c;
+ char *cp, *nextcp;
+ int startline = 0;
+
+ memset(&ps, '\0', sizeof(ps));
+ ps.gp = gp;
+ ps.filename = gp->filename;
+ ps.errorcnt = 0;
+ ps.state = INITIALIZE;
+
+ /* Begin by reading the input file */
+ fp = fopen(ps.filename,"rb");
+ if( fp==0 ){
+ ErrorMsg(ps.filename,0,"Can't open this file for reading.");
+ gp->errorcnt++;
+ return;
+ }
+ fseek(fp,0,2);
+ filesize = ftell(fp);
+ rewind(fp);
+ filebuf = (char *)malloc( filesize+1 );
+ if( filesize>100000000 || filebuf==0 ){
+ ErrorMsg(ps.filename,0,"Input file too large.");
+ free(filebuf);
+ gp->errorcnt++;
+ fclose(fp);
+ return;
+ }
+ if( fread(filebuf,1,filesize,fp)!=filesize ){
+ ErrorMsg(ps.filename,0,"Can't read in all %d bytes of this file.",
+ filesize);
+ free(filebuf);
+ gp->errorcnt++;
+ fclose(fp);
+ return;
+ }
+ fclose(fp);
+ filebuf[filesize] = 0;
+
+ /* Make an initial pass through the file to handle %ifdef and %ifndef */
+ preprocess_input(filebuf);
+ if( gp->printPreprocessed ){
+ printf("%s\n", filebuf);
+ return;
+ }
+
+ /* Now scan the text of the input file */
+ lineno = 1;
+ for(cp=filebuf; (c= *cp)!=0; ){
+ if( c=='\n' ) lineno++; /* Keep track of the line number */
+ if( ISSPACE(c) ){ cp++; continue; } /* Skip all white space */
+ if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments */
+ cp+=2;
+ while( (c= *cp)!=0 && c!='\n' ) cp++;
+ continue;
+ }
+ if( c=='/' && cp[1]=='*' ){ /* Skip C style comments */
+ cp+=2;
+ if( (*cp)=='/' ) cp++;
+ while( (c= *cp)!=0 && (c!='/' || cp[-1]!='*') ){
+ if( c=='\n' ) lineno++;
+ cp++;
+ }
+ if( c ) cp++;
+ continue;
+ }
+ ps.tokenstart = cp; /* Mark the beginning of the token */
+ ps.tokenlineno = lineno; /* Linenumber on which token begins */
+ if( c=='\"' ){ /* String literals */
+ cp++;
+ while( (c= *cp)!=0 && c!='\"' ){
+ if( c=='\n' ) lineno++;
+ cp++;
+ }
+ if( c==0 ){
+ ErrorMsg(ps.filename,startline,
+ "String starting on this line is not terminated before "
+ "the end of the file.");
+ ps.errorcnt++;
+ nextcp = cp;
+ }else{
+ nextcp = cp+1;
+ }
+ }else if( c=='{' ){ /* A block of C code */
+ int level;
+ cp++;
+ for(level=1; (c= *cp)!=0 && (level>1 || c!='}'); cp++){
+ if( c=='\n' ) lineno++;
+ else if( c=='{' ) level++;
+ else if( c=='}' ) level--;
+ else if( c=='/' && cp[1]=='*' ){ /* Skip comments */
+ int prevc;
+ cp = &cp[2];
+ prevc = 0;
+ while( (c= *cp)!=0 && (c!='/' || prevc!='*') ){
+ if( c=='\n' ) lineno++;
+ prevc = c;
+ cp++;
+ }
+ }else if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments too */
+ cp = &cp[2];
+ while( (c= *cp)!=0 && c!='\n' ) cp++;
+ if( c ) lineno++;
+ }else if( c=='\'' || c=='\"' ){ /* String a character literals */
+ int startchar, prevc;
+ startchar = c;
+ prevc = 0;
+ for(cp++; (c= *cp)!=0 && (c!=startchar || prevc=='\\'); cp++){
+ if( c=='\n' ) lineno++;
+ if( prevc=='\\' ) prevc = 0;
+ else prevc = c;
+ }
+ }
+ }
+ if( c==0 ){
+ ErrorMsg(ps.filename,ps.tokenlineno,
+ "C code starting on this line is not terminated before "
+ "the end of the file.");
+ ps.errorcnt++;
+ nextcp = cp;
+ }else{
+ nextcp = cp+1;
+ }
+ }else if( ISALNUM(c) ){ /* Identifiers */
+ while( (c= *cp)!=0 && (ISALNUM(c) || c=='_') ) cp++;
+ nextcp = cp;
+ }else if( c==':' && cp[1]==':' && cp[2]=='=' ){ /* The operator "::=" */
+ cp += 3;
+ nextcp = cp;
+ }else if( (c=='/' || c=='|') && ISALPHA(cp[1]) ){
+ cp += 2;
+ while( (c = *cp)!=0 && (ISALNUM(c) || c=='_') ) cp++;
+ nextcp = cp;
+ }else{ /* All other (one character) operators */
+ cp++;
+ nextcp = cp;
+ }
+ c = *cp;
+ *cp = 0; /* Null terminate the token */
+ parseonetoken(&ps); /* Parse the token */
+ *cp = (char)c; /* Restore the buffer */
+ cp = nextcp;
+ }
+ free(filebuf); /* Release the buffer after parsing */
+ gp->rule = ps.firstrule;
+ gp->errorcnt = ps.errorcnt;
+}
+/*************************** From the file "plink.c" *********************/
+/*
+** Routines processing configuration follow-set propagation links
+** in the LEMON parser generator.
+*/
+static struct plink *plink_freelist = 0;
+
+/* Allocate a new plink */
+struct plink *Plink_new(void){
+ struct plink *newlink;
+
+ if( plink_freelist==0 ){
+ int i;
+ int amt = 100;
+ plink_freelist = (struct plink *)calloc( amt, sizeof(struct plink) );
+ if( plink_freelist==0 ){
+ fprintf(stderr,
+ "Unable to allocate memory for a new follow-set propagation link.\n");
+ exit(1);
+ }
+ for(i=0; i<amt-1; i++) plink_freelist[i].next = &plink_freelist[i+1];
+ plink_freelist[amt-1].next = 0;
+ }
+ newlink = plink_freelist;
+ plink_freelist = plink_freelist->next;
+ return newlink;
+}
+
+/* Add a plink to a plink list */
+void Plink_add(struct plink **plpp, struct config *cfp)
+{
+ struct plink *newlink;
+ newlink = Plink_new();
+ newlink->next = *plpp;
+ *plpp = newlink;
+ newlink->cfp = cfp;
+}
+
+/* Transfer every plink on the list "from" to the list "to" */
+void Plink_copy(struct plink **to, struct plink *from)
+{
+ struct plink *nextpl;
+ while( from ){
+ nextpl = from->next;
+ from->next = *to;
+ *to = from;
+ from = nextpl;
+ }
+}
+
+/* Delete every plink on the list */
+void Plink_delete(struct plink *plp)
+{
+ struct plink *nextpl;
+
+ while( plp ){
+ nextpl = plp->next;
+ plp->next = plink_freelist;
+ plink_freelist = plp;
+ plp = nextpl;
+ }
+}
+/*********************** From the file "report.c" **************************/
+/*
+** Procedures for generating reports and tables in the LEMON parser generator.
+*/
+
+/* Generate a filename with the given suffix. Space to hold the
+** name comes from malloc() and must be freed by the calling
+** function.
+*/
+PRIVATE char *file_makename(struct lemon *lemp, const char *suffix)
+{
+ char *name;
+ char *cp;
+ char *filename = lemp->filename;
+ int sz;
+
+ if( outputDir ){
+ cp = strrchr(filename, '/');
+ if( cp ) filename = cp + 1;
+ }
+ sz = lemonStrlen(filename);
+ sz += lemonStrlen(suffix);
+ if( outputDir ) sz += lemonStrlen(outputDir) + 1;
+ sz += 5;
+ name = (char*)malloc( sz );
+ if( name==0 ){
+ fprintf(stderr,"Can't allocate space for a filename.\n");
+ exit(1);
+ }
+ name[0] = 0;
+ if( outputDir ){
+ lemon_strcpy(name, outputDir);
+ lemon_strcat(name, "/");
+ }
+ lemon_strcat(name,filename);
+ cp = strrchr(name,'.');
+ if( cp ) *cp = 0;
+ lemon_strcat(name,suffix);
+ return name;
+}
+
+/* Open a file with a name based on the name of the input file,
+** but with a different (specified) suffix, and return a pointer
+** to the stream */
+PRIVATE FILE *file_open(
+ struct lemon *lemp,
+ const char *suffix,
+ const char *mode
+){
+ FILE *fp;
+
+ if( lemp->outname ) free(lemp->outname);
+ lemp->outname = file_makename(lemp, suffix);
+ fp = fopen(lemp->outname,mode);
+ if( fp==0 && *mode=='w' ){
+ fprintf(stderr,"Can't open file \"%s\".\n",lemp->outname);
+ lemp->errorcnt++;
+ return 0;
+ }
+ return fp;
+}
+
+/* Print the text of a rule
+*/
+void rule_print(FILE *out, struct rule *rp){
+ int i, j;
+ fprintf(out, "%s",rp->lhs->name);
+ /* if( rp->lhsalias ) fprintf(out,"(%s)",rp->lhsalias); */
+ fprintf(out," ::=");
+ for(i=0; i<rp->nrhs; i++){
+ struct symbol *sp = rp->rhs[i];
+ if( sp->type==MULTITERMINAL ){
+ fprintf(out," %s", sp->subsym[0]->name);
+ for(j=1; j<sp->nsubsym; j++){
+ fprintf(out,"|%s", sp->subsym[j]->name);
+ }
+ }else{
+ fprintf(out," %s", sp->name);
+ }
+ /* if( rp->rhsalias[i] ) fprintf(out,"(%s)",rp->rhsalias[i]); */
+ }
+}
+
+/* Duplicate the input file without comments and without actions
+** on rules */
+void Reprint(struct lemon *lemp)
+{
+ struct rule *rp;
+ struct symbol *sp;
+ int i, j, maxlen, len, ncolumns, skip;
+ printf("// Reprint of input file \"%s\".\n// Symbols:\n",lemp->filename);
+ maxlen = 10;
+ for(i=0; i<lemp->nsymbol; i++){
+ sp = lemp->symbols[i];
+ len = lemonStrlen(sp->name);
+ if( len>maxlen ) maxlen = len;
+ }
+ ncolumns = 76/(maxlen+5);
+ if( ncolumns<1 ) ncolumns = 1;
+ skip = (lemp->nsymbol + ncolumns - 1)/ncolumns;
+ for(i=0; i<skip; i++){
+ printf("//");
+ for(j=i; j<lemp->nsymbol; j+=skip){
+ sp = lemp->symbols[j];
+ assert( sp->index==j );
+ printf(" %3d %-*.*s",j,maxlen,maxlen,sp->name);
+ }
+ printf("\n");
+ }
+ for(rp=lemp->rule; rp; rp=rp->next){
+ rule_print(stdout, rp);
+ printf(".");
+ if( rp->precsym ) printf(" [%s]",rp->precsym->name);
+ /* if( rp->code ) printf("\n %s",rp->code); */
+ printf("\n");
+ }
+}
+
+/* Print a single rule.
+*/
+void RulePrint(FILE *fp, struct rule *rp, int iCursor){
+ struct symbol *sp;
+ int i, j;
+ fprintf(fp,"%s ::=",rp->lhs->name);
+ for(i=0; i<=rp->nrhs; i++){
+ if( i==iCursor ) fprintf(fp," *");
+ if( i==rp->nrhs ) break;
+ sp = rp->rhs[i];
+ if( sp->type==MULTITERMINAL ){
+ fprintf(fp," %s", sp->subsym[0]->name);
+ for(j=1; j<sp->nsubsym; j++){
+ fprintf(fp,"|%s",sp->subsym[j]->name);
+ }
+ }else{
+ fprintf(fp," %s", sp->name);
+ }
+ }
+}
+
+/* Print the rule for a configuration.
+*/
+void ConfigPrint(FILE *fp, struct config *cfp){
+ RulePrint(fp, cfp->rp, cfp->dot);
+}
+
+/* #define TEST */
+#if 0
+/* Print a set */
+PRIVATE void SetPrint(out,set,lemp)
+FILE *out;
+char *set;
+struct lemon *lemp;
+{
+ int i;
+ char *spacer;
+ spacer = "";
+ fprintf(out,"%12s[","");
+ for(i=0; i<lemp->nterminal; i++){
+ if( SetFind(set,i) ){
+ fprintf(out,"%s%s",spacer,lemp->symbols[i]->name);
+ spacer = " ";
+ }
+ }
+ fprintf(out,"]\n");
+}
+
+/* Print a plink chain */
+PRIVATE void PlinkPrint(out,plp,tag)
+FILE *out;
+struct plink *plp;
+char *tag;
+{
+ while( plp ){
+ fprintf(out,"%12s%s (state %2d) ","",tag,plp->cfp->stp->statenum);
+ ConfigPrint(out,plp->cfp);
+ fprintf(out,"\n");
+ plp = plp->next;
+ }
+}
+#endif
+
+/* Print an action to the given file descriptor. Return FALSE if
+** nothing was actually printed.
+*/
+int PrintAction(
+ struct action *ap, /* The action to print */
+ FILE *fp, /* Print the action here */
+ int indent /* Indent by this amount */
+){
+ int result = 1;
+ switch( ap->type ){
+ case SHIFT: {
+ struct state *stp = ap->x.stp;
+ fprintf(fp,"%*s shift %-7d",indent,ap->sp->name,stp->statenum);
+ break;
+ }
+ case REDUCE: {
+ struct rule *rp = ap->x.rp;
+ fprintf(fp,"%*s reduce %-7d",indent,ap->sp->name,rp->iRule);
+ RulePrint(fp, rp, -1);
+ break;
+ }
+ case SHIFTREDUCE: {
+ struct rule *rp = ap->x.rp;
+ fprintf(fp,"%*s shift-reduce %-7d",indent,ap->sp->name,rp->iRule);
+ RulePrint(fp, rp, -1);
+ break;
+ }
+ case ACCEPT:
+ fprintf(fp,"%*s accept",indent,ap->sp->name);
+ break;
+ case ERROR:
+ fprintf(fp,"%*s error",indent,ap->sp->name);
+ break;
+ case SRCONFLICT:
+ case RRCONFLICT:
+ fprintf(fp,"%*s reduce %-7d ** Parsing conflict **",
+ indent,ap->sp->name,ap->x.rp->iRule);
+ break;
+ case SSCONFLICT:
+ fprintf(fp,"%*s shift %-7d ** Parsing conflict **",
+ indent,ap->sp->name,ap->x.stp->statenum);
+ break;
+ case SH_RESOLVED:
+ if( showPrecedenceConflict ){
+ fprintf(fp,"%*s shift %-7d -- dropped by precedence",
+ indent,ap->sp->name,ap->x.stp->statenum);
+ }else{
+ result = 0;
+ }
+ break;
+ case RD_RESOLVED:
+ if( showPrecedenceConflict ){
+ fprintf(fp,"%*s reduce %-7d -- dropped by precedence",
+ indent,ap->sp->name,ap->x.rp->iRule);
+ }else{
+ result = 0;
+ }
+ break;
+ case NOT_USED:
+ result = 0;
+ break;
+ }
+ if( result && ap->spOpt ){
+ fprintf(fp," /* because %s==%s */", ap->sp->name, ap->spOpt->name);
+ }
+ return result;
+}
+
+/* Generate the "*.out" log file */
+void ReportOutput(struct lemon *lemp)
+{
+ int i, n;
+ struct state *stp;
+ struct config *cfp;
+ struct action *ap;
+ struct rule *rp;
+ FILE *fp;
+
+ fp = file_open(lemp,".out","wb");
+ if( fp==0 ) return;
+ for(i=0; i<lemp->nxstate; i++){
+ stp = lemp->sorted[i];
+ fprintf(fp,"State %d:\n",stp->statenum);
+ if( lemp->basisflag ) cfp=stp->bp;
+ else cfp=stp->cfp;
+ while( cfp ){
+ char buf[20];
+ if( cfp->dot==cfp->rp->nrhs ){
+ lemon_sprintf(buf,"(%d)",cfp->rp->iRule);
+ fprintf(fp," %5s ",buf);
+ }else{
+ fprintf(fp," ");
+ }
+ ConfigPrint(fp,cfp);
+ fprintf(fp,"\n");
+#if 0
+ SetPrint(fp,cfp->fws,lemp);
+ PlinkPrint(fp,cfp->fplp,"To ");
+ PlinkPrint(fp,cfp->bplp,"From");
+#endif
+ if( lemp->basisflag ) cfp=cfp->bp;
+ else cfp=cfp->next;
+ }
+ fprintf(fp,"\n");
+ for(ap=stp->ap; ap; ap=ap->next){
+ if( PrintAction(ap,fp,30) ) fprintf(fp,"\n");
+ }
+ fprintf(fp,"\n");
+ }
+ fprintf(fp, "----------------------------------------------------\n");
+ fprintf(fp, "Symbols:\n");
+ fprintf(fp, "The first-set of non-terminals is shown after the name.\n\n");
+ for(i=0; i<lemp->nsymbol; i++){
+ int j;
+ struct symbol *sp;
+
+ sp = lemp->symbols[i];
+ fprintf(fp, " %3d: %s", i, sp->name);
+ if( sp->type==NONTERMINAL ){
+ fprintf(fp, ":");
+ if( sp->lambda ){
+ fprintf(fp, " <lambda>");
+ }
+ for(j=0; j<lemp->nterminal; j++){
+ if( sp->firstset && SetFind(sp->firstset, j) ){
+ fprintf(fp, " %s", lemp->symbols[j]->name);
+ }
+ }
+ }
+ if( sp->prec>=0 ) fprintf(fp," (precedence=%d)", sp->prec);
+ fprintf(fp, "\n");
+ }
+ fprintf(fp, "----------------------------------------------------\n");
+ fprintf(fp, "Syntax-only Symbols:\n");
+ fprintf(fp, "The following symbols never carry semantic content.\n\n");
+ for(i=n=0; i<lemp->nsymbol; i++){
+ int w;
+ struct symbol *sp = lemp->symbols[i];
+ if( sp->bContent ) continue;
+ w = (int)strlen(sp->name);
+ if( n>0 && n+w>75 ){
+ fprintf(fp,"\n");
+ n = 0;
+ }
+ if( n>0 ){
+ fprintf(fp, " ");
+ n++;
+ }
+ fprintf(fp, "%s", sp->name);
+ n += w;
+ }
+ if( n>0 ) fprintf(fp, "\n");
+ fprintf(fp, "----------------------------------------------------\n");
+ fprintf(fp, "Rules:\n");
+ for(rp=lemp->rule; rp; rp=rp->next){
+ fprintf(fp, "%4d: ", rp->iRule);
+ rule_print(fp, rp);
+ fprintf(fp,".");
+ if( rp->precsym ){
+ fprintf(fp," [%s precedence=%d]",
+ rp->precsym->name, rp->precsym->prec);
+ }
+ fprintf(fp,"\n");
+ }
+ fclose(fp);
+ return;
+}
+
+/* Search for the file "name" which is in the same directory as
+** the executable */
+PRIVATE char *pathsearch(char *argv0, char *name, int modemask)
+{
+ const char *pathlist;
+ char *pathbufptr = 0;
+ char *pathbuf = 0;
+ char *path,*cp;
+ char c;
+
+#ifdef __WIN32__
+ cp = strrchr(argv0,'\\');
+#else
+ cp = strrchr(argv0,'/');
+#endif
+ if( cp ){
+ c = *cp;
+ *cp = 0;
+ path = (char *)malloc( lemonStrlen(argv0) + lemonStrlen(name) + 2 );
+ if( path ) lemon_sprintf(path,"%s/%s",argv0,name);
+ *cp = c;
+ }else{
+ pathlist = getenv("PATH");
+ if( pathlist==0 ) pathlist = ".:/bin:/usr/bin";
+ pathbuf = (char *) malloc( lemonStrlen(pathlist) + 1 );
+ path = (char *)malloc( lemonStrlen(pathlist)+lemonStrlen(name)+2 );
+ if( (pathbuf != 0) && (path!=0) ){
+ pathbufptr = pathbuf;
+ lemon_strcpy(pathbuf, pathlist);
+ while( *pathbuf ){
+ cp = strchr(pathbuf,':');
+ if( cp==0 ) cp = &pathbuf[lemonStrlen(pathbuf)];
+ c = *cp;
+ *cp = 0;
+ lemon_sprintf(path,"%s/%s",pathbuf,name);
+ *cp = c;
+ if( c==0 ) pathbuf[0] = 0;
+ else pathbuf = &cp[1];
+ if( access(path,modemask)==0 ) break;
+ }
+ }
+ free(pathbufptr);
+ }
+ return path;
+}
+
+/* Given an action, compute the integer value for that action
+** which is to be put in the action table of the generated machine.
+** Return negative if no action should be generated.
+*/
+PRIVATE int compute_action(struct lemon *lemp, struct action *ap)
+{
+ int act;
+ switch( ap->type ){
+ case SHIFT: act = ap->x.stp->statenum; break;
+ case SHIFTREDUCE: {
+ /* Since a SHIFT is inherient after a prior REDUCE, convert any
+ ** SHIFTREDUCE action with a nonterminal on the LHS into a simple
+ ** REDUCE action: */
+ if( ap->sp->index>=lemp->nterminal
+ && (lemp->errsym==0 || ap->sp->index!=lemp->errsym->index)
+ ){
+ act = lemp->minReduce + ap->x.rp->iRule;
+ }else{
+ act = lemp->minShiftReduce + ap->x.rp->iRule;
+ }
+ break;
+ }
+ case REDUCE: act = lemp->minReduce + ap->x.rp->iRule; break;
+ case ERROR: act = lemp->errAction; break;
+ case ACCEPT: act = lemp->accAction; break;
+ default: act = -1; break;
+ }
+ return act;
+}
+
+#define LINESIZE 1000
+/* The next cluster of routines are for reading the template file
+** and writing the results to the generated parser */
+/* The first function transfers data from "in" to "out" until
+** a line is seen which begins with "%%". The line number is
+** tracked.
+**
+** if name!=0, then any word that begin with "Parse" is changed to
+** begin with *name instead.
+*/
+PRIVATE void tplt_xfer(char *name, FILE *in, FILE *out, int *lineno)
+{
+ int i, iStart;
+ char line[LINESIZE];
+ while( fgets(line,LINESIZE,in) && (line[0]!='%' || line[1]!='%') ){
+ (*lineno)++;
+ iStart = 0;
+ if( name ){
+ for(i=0; line[i]; i++){
+ if( line[i]=='P' && strncmp(&line[i],"Parse",5)==0
+ && (i==0 || !ISALPHA(line[i-1]))
+ ){
+ if( i>iStart ) fprintf(out,"%.*s",i-iStart,&line[iStart]);
+ fprintf(out,"%s",name);
+ i += 4;
+ iStart = i+1;
+ }
+ }
+ }
+ fprintf(out,"%s",&line[iStart]);
+ }
+}
+
+/* Skip forward past the header of the template file to the first "%%"
+*/
+PRIVATE void tplt_skip_header(FILE *in, int *lineno)
+{
+ char line[LINESIZE];
+ while( fgets(line,LINESIZE,in) && (line[0]!='%' || line[1]!='%') ){
+ (*lineno)++;
+ }
+}
+
+/* The next function finds the template file and opens it, returning
+** a pointer to the opened file. */
+PRIVATE FILE *tplt_open(struct lemon *lemp)
+{
+ static char templatename[] = "lempar.c";
+ char buf[1000];
+ FILE *in;
+ char *tpltname;
+ char *toFree = 0;
+ char *cp;
+
+ /* first, see if user specified a template filename on the command line. */
+ if (user_templatename != 0) {
+ if( access(user_templatename,004)==-1 ){
+ fprintf(stderr,"Can't find the parser driver template file \"%s\".\n",
+ user_templatename);
+ lemp->errorcnt++;
+ return 0;
+ }
+ in = fopen(user_templatename,"rb");
+ if( in==0 ){
+ fprintf(stderr,"Can't open the template file \"%s\".\n",
+ user_templatename);
+ lemp->errorcnt++;
+ return 0;
+ }
+ return in;
+ }
+
+ cp = strrchr(lemp->filename,'.');
+ if( cp ){
+ lemon_sprintf(buf,"%.*s.lt",(int)(cp-lemp->filename),lemp->filename);
+ }else{
+ lemon_sprintf(buf,"%s.lt",lemp->filename);
+ }
+ if( access(buf,004)==0 ){
+ tpltname = buf;
+ }else if( access(templatename,004)==0 ){
+ tpltname = templatename;
+ }else{
+ toFree = tpltname = pathsearch(lemp->argv0,templatename,0);
+ }
+ if( tpltname==0 ){
+ fprintf(stderr,"Can't find the parser driver template file \"%s\".\n",
+ templatename);
+ lemp->errorcnt++;
+ return 0;
+ }
+ in = fopen(tpltname,"rb");
+ if( in==0 ){
+ fprintf(stderr,"Can't open the template file \"%s\".\n",tpltname);
+ lemp->errorcnt++;
+ }
+ free(toFree);
+ return in;
+}
+
+/* Print a #line directive line to the output file. */
+PRIVATE void tplt_linedir(FILE *out, int lineno, char *filename)
+{
+ fprintf(out,"#line %d \"",lineno);
+ while( *filename ){
+ if( *filename == '\\' ) putc('\\',out);
+ putc(*filename,out);
+ filename++;
+ }
+ fprintf(out,"\"\n");
+}
+
+/* Print a string to the file and keep the linenumber up to date */
+PRIVATE void tplt_print(FILE *out, struct lemon *lemp, char *str, int *lineno)
+{
+ if( str==0 ) return;
+ while( *str ){
+ putc(*str,out);
+ if( *str=='\n' ) (*lineno)++;
+ str++;
+ }
+ if( str[-1]!='\n' ){
+ putc('\n',out);
+ (*lineno)++;
+ }
+ if (!lemp->nolinenosflag) {
+ (*lineno)++; tplt_linedir(out,*lineno,lemp->outname);
+ }
+ return;
+}
+
+/*
+** The following routine emits code for the destructor for the
+** symbol sp
+*/
+void emit_destructor_code(
+ FILE *out,
+ struct symbol *sp,
+ struct lemon *lemp,
+ int *lineno
+){
+ char *cp = 0;
+
+ if( sp->type==TERMINAL ){
+ cp = lemp->tokendest;
+ if( cp==0 ) return;
+ fprintf(out,"{\n"); (*lineno)++;
+ }else if( sp->destructor ){
+ cp = sp->destructor;
+ fprintf(out,"{\n"); (*lineno)++;
+ if( !lemp->nolinenosflag ){
+ (*lineno)++;
+ tplt_linedir(out,sp->destLineno,lemp->filename);
+ }
+ }else if( lemp->vardest ){
+ cp = lemp->vardest;
+ if( cp==0 ) return;
+ fprintf(out,"{\n"); (*lineno)++;
+ }else{
+ assert( 0 ); /* Cannot happen */
+ }
+ for(; *cp; cp++){
+ if( *cp=='$' && cp[1]=='$' ){
+ fprintf(out,"(yypminor->yy%d)",sp->dtnum);
+ cp++;
+ continue;
+ }
+ if( *cp=='\n' ) (*lineno)++;
+ fputc(*cp,out);
+ }
+ fprintf(out,"\n"); (*lineno)++;
+ if (!lemp->nolinenosflag) {
+ (*lineno)++; tplt_linedir(out,*lineno,lemp->outname);
+ }
+ fprintf(out,"}\n"); (*lineno)++;
+ return;
+}
+
+/*
+** Return TRUE (non-zero) if the given symbol has a destructor.
+*/
+int has_destructor(struct symbol *sp, struct lemon *lemp)
+{
+ int ret;
+ if( sp->type==TERMINAL ){
+ ret = lemp->tokendest!=0;
+ }else{
+ ret = lemp->vardest!=0 || sp->destructor!=0;
+ }
+ return ret;
+}
+
+/*
+** Append text to a dynamically allocated string. If zText is 0 then
+** reset the string to be empty again. Always return the complete text
+** of the string (which is overwritten with each call).
+**
+** n bytes of zText are stored. If n==0 then all of zText up to the first
+** \000 terminator is stored. zText can contain up to two instances of
+** %d. The values of p1 and p2 are written into the first and second
+** %d.
+**
+** If n==-1, then the previous character is overwritten.
+*/
+PRIVATE char *append_str(const char *zText, int n, int p1, int p2){
+ static char empty[1] = { 0 };
+ static char *z = 0;
+ static int alloced = 0;
+ static int used = 0;
+ int c;
+ char zInt[40];
+ if( zText==0 ){
+ if( used==0 && z!=0 ) z[0] = 0;
+ used = 0;
+ return z;
+ }
+ if( n<=0 ){
+ if( n<0 ){
+ used += n;
+ assert( used>=0 );
+ }
+ n = lemonStrlen(zText);
+ }
+ if( (int) (n+sizeof(zInt)*2+used) >= alloced ){
+ alloced = n + sizeof(zInt)*2 + used + 200;
+ z = (char *) realloc(z, alloced);
+ }
+ if( z==0 ) return empty;
+ while( n-- > 0 ){
+ c = *(zText++);
+ if( c=='%' && n>0 && zText[0]=='d' ){
+ lemon_sprintf(zInt, "%d", p1);
+ p1 = p2;
+ lemon_strcpy(&z[used], zInt);
+ used += lemonStrlen(&z[used]);
+ zText++;
+ n--;
+ }else{
+ z[used++] = (char)c;
+ }
+ }
+ z[used] = 0;
+ return z;
+}
+
+/*
+** Write and transform the rp->code string so that symbols are expanded.
+** Populate the rp->codePrefix and rp->codeSuffix strings, as appropriate.
+**
+** Return 1 if the expanded code requires that "yylhsminor" local variable
+** to be defined.
+*/
+PRIVATE int translate_code(struct lemon *lemp, struct rule *rp){
+ char *cp, *xp;
+ int i;
+ int rc = 0; /* True if yylhsminor is used */
+ int dontUseRhs0 = 0; /* If true, use of left-most RHS label is illegal */
+ const char *zSkip = 0; /* The zOvwrt comment within rp->code, or NULL */
+ char lhsused = 0; /* True if the LHS element has been used */
+ char lhsdirect; /* True if LHS writes directly into stack */
+ char used[MAXRHS]; /* True for each RHS element which is used */
+ char zLhs[50]; /* Convert the LHS symbol into this string */
+ char zOvwrt[900]; /* Comment that to allow LHS to overwrite RHS */
+
+ for(i=0; i<rp->nrhs; i++) used[i] = 0;
+ lhsused = 0;
+
+ if( rp->code==0 ){
+ static char newlinestr[2] = { '\n', '\0' };
+ rp->code = newlinestr;
+ rp->line = rp->ruleline;
+ rp->noCode = 1;
+ }else{
+ rp->noCode = 0;
+ }
+
+
+ if( rp->nrhs==0 ){
+ /* If there are no RHS symbols, then writing directly to the LHS is ok */
+ lhsdirect = 1;
+ }else if( rp->rhsalias[0]==0 ){
+ /* The left-most RHS symbol has no value. LHS direct is ok. But
+ ** we have to call the destructor on the RHS symbol first. */
+ lhsdirect = 1;
+ if( has_destructor(rp->rhs[0],lemp) ){
+ append_str(0,0,0,0);
+ append_str(" yy_destructor(yypParser,%d,&yymsp[%d].minor);\n", 0,
+ rp->rhs[0]->index,1-rp->nrhs);
+ rp->codePrefix = Strsafe(append_str(0,0,0,0));
+ rp->noCode = 0;
+ }
+ }else if( rp->lhsalias==0 ){
+ /* There is no LHS value symbol. */
+ lhsdirect = 1;
+ }else if( strcmp(rp->lhsalias,rp->rhsalias[0])==0 ){
+ /* The LHS symbol and the left-most RHS symbol are the same, so
+ ** direct writing is allowed */
+ lhsdirect = 1;
+ lhsused = 1;
+ used[0] = 1;
+ if( rp->lhs->dtnum!=rp->rhs[0]->dtnum ){
+ ErrorMsg(lemp->filename,rp->ruleline,
+ "%s(%s) and %s(%s) share the same label but have "
+ "different datatypes.",
+ rp->lhs->name, rp->lhsalias, rp->rhs[0]->name, rp->rhsalias[0]);
+ lemp->errorcnt++;
+ }
+ }else{
+ lemon_sprintf(zOvwrt, "/*%s-overwrites-%s*/",
+ rp->lhsalias, rp->rhsalias[0]);
+ zSkip = strstr(rp->code, zOvwrt);
+ if( zSkip!=0 ){
+ /* The code contains a special comment that indicates that it is safe
+ ** for the LHS label to overwrite left-most RHS label. */
+ lhsdirect = 1;
+ }else{
+ lhsdirect = 0;
+ }
+ }
+ if( lhsdirect ){
+ sprintf(zLhs, "yymsp[%d].minor.yy%d",1-rp->nrhs,rp->lhs->dtnum);
+ }else{
+ rc = 1;
+ sprintf(zLhs, "yylhsminor.yy%d",rp->lhs->dtnum);
+ }
+
+ append_str(0,0,0,0);
+
+ /* This const cast is wrong but harmless, if we're careful. */
+ for(cp=(char *)rp->code; *cp; cp++){
+ if( cp==zSkip ){
+ append_str(zOvwrt,0,0,0);
+ cp += lemonStrlen(zOvwrt)-1;
+ dontUseRhs0 = 1;
+ continue;
+ }
+ if( ISALPHA(*cp) && (cp==rp->code || (!ISALNUM(cp[-1]) && cp[-1]!='_')) ){
+ char saved;
+ for(xp= &cp[1]; ISALNUM(*xp) || *xp=='_'; xp++);
+ saved = *xp;
+ *xp = 0;
+ if( rp->lhsalias && strcmp(cp,rp->lhsalias)==0 ){
+ append_str(zLhs,0,0,0);
+ cp = xp;
+ lhsused = 1;
+ }else{
+ for(i=0; i<rp->nrhs; i++){
+ if( rp->rhsalias[i] && strcmp(cp,rp->rhsalias[i])==0 ){
+ if( i==0 && dontUseRhs0 ){
+ ErrorMsg(lemp->filename,rp->ruleline,
+ "Label %s used after '%s'.",
+ rp->rhsalias[0], zOvwrt);
+ lemp->errorcnt++;
+ }else if( cp!=rp->code && cp[-1]=='@' ){
+ /* If the argument is of the form @X then substituted
+ ** the token number of X, not the value of X */
+ append_str("yymsp[%d].major",-1,i-rp->nrhs+1,0);
+ }else{
+ struct symbol *sp = rp->rhs[i];
+ int dtnum;
+ if( sp->type==MULTITERMINAL ){
+ dtnum = sp->subsym[0]->dtnum;
+ }else{
+ dtnum = sp->dtnum;
+ }
+ append_str("yymsp[%d].minor.yy%d",0,i-rp->nrhs+1, dtnum);
+ }
+ cp = xp;
+ used[i] = 1;
+ break;
+ }
+ }
+ }
+ *xp = saved;
+ }
+ append_str(cp, 1, 0, 0);
+ } /* End loop */
+
+ /* Main code generation completed */
+ cp = append_str(0,0,0,0);
+ if( cp && cp[0] ) rp->code = Strsafe(cp);
+ append_str(0,0,0,0);
+
+ /* Check to make sure the LHS has been used */
+ if( rp->lhsalias && !lhsused ){
+ ErrorMsg(lemp->filename,rp->ruleline,
+ "Label \"%s\" for \"%s(%s)\" is never used.",
+ rp->lhsalias,rp->lhs->name,rp->lhsalias);
+ lemp->errorcnt++;
+ }
+
+ /* Generate destructor code for RHS minor values which are not referenced.
+ ** Generate error messages for unused labels and duplicate labels.
+ */
+ for(i=0; i<rp->nrhs; i++){
+ if( rp->rhsalias[i] ){
+ if( i>0 ){
+ int j;
+ if( rp->lhsalias && strcmp(rp->lhsalias,rp->rhsalias[i])==0 ){
+ ErrorMsg(lemp->filename,rp->ruleline,
+ "%s(%s) has the same label as the LHS but is not the left-most "
+ "symbol on the RHS.",
+ rp->rhs[i]->name, rp->rhsalias[i]);
+ lemp->errorcnt++;
+ }
+ for(j=0; j<i; j++){
+ if( rp->rhsalias[j] && strcmp(rp->rhsalias[j],rp->rhsalias[i])==0 ){
+ ErrorMsg(lemp->filename,rp->ruleline,
+ "Label %s used for multiple symbols on the RHS of a rule.",
+ rp->rhsalias[i]);
+ lemp->errorcnt++;
+ break;
+ }
+ }
+ }
+ if( !used[i] ){
+ ErrorMsg(lemp->filename,rp->ruleline,
+ "Label %s for \"%s(%s)\" is never used.",
+ rp->rhsalias[i],rp->rhs[i]->name,rp->rhsalias[i]);
+ lemp->errorcnt++;
+ }
+ }else if( i>0 && has_destructor(rp->rhs[i],lemp) ){
+ append_str(" yy_destructor(yypParser,%d,&yymsp[%d].minor);\n", 0,
+ rp->rhs[i]->index,i-rp->nrhs+1);
+ }
+ }
+
+ /* If unable to write LHS values directly into the stack, write the
+ ** saved LHS value now. */
+ if( lhsdirect==0 ){
+ append_str(" yymsp[%d].minor.yy%d = ", 0, 1-rp->nrhs, rp->lhs->dtnum);
+ append_str(zLhs, 0, 0, 0);
+ append_str(";\n", 0, 0, 0);
+ }
+
+ /* Suffix code generation complete */
+ cp = append_str(0,0,0,0);
+ if( cp && cp[0] ){
+ rp->codeSuffix = Strsafe(cp);
+ rp->noCode = 0;
+ }
+
+ return rc;
+}
+
+/*
+** Generate code which executes when the rule "rp" is reduced. Write
+** the code to "out". Make sure lineno stays up-to-date.
+*/
+PRIVATE void emit_code(
+ FILE *out,
+ struct rule *rp,
+ struct lemon *lemp,
+ int *lineno
+){
+ const char *cp;
+
+ /* Setup code prior to the #line directive */
+ if( rp->codePrefix && rp->codePrefix[0] ){
+ fprintf(out, "{%s", rp->codePrefix);
+ for(cp=rp->codePrefix; *cp; cp++){ if( *cp=='\n' ) (*lineno)++; }
+ }
+
+ /* Generate code to do the reduce action */
+ if( rp->code ){
+ if( !lemp->nolinenosflag ){
+ (*lineno)++;
+ tplt_linedir(out,rp->line,lemp->filename);
+ }
+ fprintf(out,"{%s",rp->code);
+ for(cp=rp->code; *cp; cp++){ if( *cp=='\n' ) (*lineno)++; }
+ fprintf(out,"}\n"); (*lineno)++;
+ if( !lemp->nolinenosflag ){
+ (*lineno)++;
+ tplt_linedir(out,*lineno,lemp->outname);
+ }
+ }
+
+ /* Generate breakdown code that occurs after the #line directive */
+ if( rp->codeSuffix && rp->codeSuffix[0] ){
+ fprintf(out, "%s", rp->codeSuffix);
+ for(cp=rp->codeSuffix; *cp; cp++){ if( *cp=='\n' ) (*lineno)++; }
+ }
+
+ if( rp->codePrefix ){
+ fprintf(out, "}\n"); (*lineno)++;
+ }
+
+ return;
+}
+
+/*
+** Print the definition of the union used for the parser's data stack.
+** This union contains fields for every possible data type for tokens
+** and nonterminals. In the process of computing and printing this
+** union, also set the ".dtnum" field of every terminal and nonterminal
+** symbol.
+*/
+void print_stack_union(
+ FILE *out, /* The output stream */
+ struct lemon *lemp, /* The main info structure for this parser */
+ int *plineno, /* Pointer to the line number */
+ int mhflag /* True if generating makeheaders output */
+){
+ int lineno; /* The line number of the output */
+ char **types; /* A hash table of datatypes */
+ int arraysize; /* Size of the "types" array */
+ int maxdtlength; /* Maximum length of any ".datatype" field. */
+ char *stddt; /* Standardized name for a datatype */
+ int i,j; /* Loop counters */
+ unsigned hash; /* For hashing the name of a type */
+ const char *name; /* Name of the parser */
+
+ /* Allocate and initialize types[] and allocate stddt[] */
+ arraysize = lemp->nsymbol * 2;
+ types = (char**)calloc( arraysize, sizeof(char*) );
+ if( types==0 ){
+ fprintf(stderr,"Out of memory.\n");
+ exit(1);
+ }
+ for(i=0; i<arraysize; i++) types[i] = 0;
+ maxdtlength = 0;
+ if( lemp->vartype ){
+ maxdtlength = lemonStrlen(lemp->vartype);
+ }
+ for(i=0; i<lemp->nsymbol; i++){
+ int len;
+ struct symbol *sp = lemp->symbols[i];
+ if( sp->datatype==0 ) continue;
+ len = lemonStrlen(sp->datatype);
+ if( len>maxdtlength ) maxdtlength = len;
+ }
+ stddt = (char*)malloc( maxdtlength*2 + 1 );
+ if( stddt==0 ){
+ fprintf(stderr,"Out of memory.\n");
+ exit(1);
+ }
+
+ /* Build a hash table of datatypes. The ".dtnum" field of each symbol
+ ** is filled in with the hash index plus 1. A ".dtnum" value of 0 is
+ ** used for terminal symbols. If there is no %default_type defined then
+ ** 0 is also used as the .dtnum value for nonterminals which do not specify
+ ** a datatype using the %type directive.
+ */
+ for(i=0; i<lemp->nsymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+ char *cp;
+ if( sp==lemp->errsym ){
+ sp->dtnum = arraysize+1;
+ continue;
+ }
+ if( sp->type!=NONTERMINAL || (sp->datatype==0 && lemp->vartype==0) ){
+ sp->dtnum = 0;
+ continue;
+ }
+ cp = sp->datatype;
+ if( cp==0 ) cp = lemp->vartype;
+ j = 0;
+ while( ISSPACE(*cp) ) cp++;
+ while( *cp ) stddt[j++] = *cp++;
+ while( j>0 && ISSPACE(stddt[j-1]) ) j--;
+ stddt[j] = 0;
+ if( lemp->tokentype && strcmp(stddt, lemp->tokentype)==0 ){
+ sp->dtnum = 0;
+ continue;
+ }
+ hash = 0;
+ for(j=0; stddt[j]; j++){
+ hash = hash*53 + stddt[j];
+ }
+ hash = (hash & 0x7fffffff)%arraysize;
+ while( types[hash] ){
+ if( strcmp(types[hash],stddt)==0 ){
+ sp->dtnum = hash + 1;
+ break;
+ }
+ hash++;
+ if( hash>=(unsigned)arraysize ) hash = 0;
+ }
+ if( types[hash]==0 ){
+ sp->dtnum = hash + 1;
+ types[hash] = (char*)malloc( lemonStrlen(stddt)+1 );
+ if( types[hash]==0 ){
+ fprintf(stderr,"Out of memory.\n");
+ exit(1);
+ }
+ lemon_strcpy(types[hash],stddt);
+ }
+ }
+
+ /* Print out the definition of YYTOKENTYPE and YYMINORTYPE */
+ name = lemp->name ? lemp->name : "Parse";
+ lineno = *plineno;
+ if( mhflag ){ fprintf(out,"#if INTERFACE\n"); lineno++; }
+ fprintf(out,"#define %sTOKENTYPE %s\n",name,
+ lemp->tokentype?lemp->tokentype:"void*"); lineno++;
+ if( mhflag ){ fprintf(out,"#endif\n"); lineno++; }
+ fprintf(out,"typedef union {\n"); lineno++;
+ fprintf(out," int yyinit;\n"); lineno++;
+ fprintf(out," %sTOKENTYPE yy0;\n",name); lineno++;
+ for(i=0; i<arraysize; i++){
+ if( types[i]==0 ) continue;
+ fprintf(out," %s yy%d;\n",types[i],i+1); lineno++;
+ free(types[i]);
+ }
+ if( lemp->errsym && lemp->errsym->useCnt ){
+ fprintf(out," int yy%d;\n",lemp->errsym->dtnum); lineno++;
+ }
+ free(stddt);
+ free(types);
+ fprintf(out,"} YYMINORTYPE;\n"); lineno++;
+ *plineno = lineno;
+}
+
+/*
+** Return the name of a C datatype able to represent values between
+** lwr and upr, inclusive. If pnByte!=NULL then also write the sizeof
+** for that type (1, 2, or 4) into *pnByte.
+*/
+static const char *minimum_size_type(int lwr, int upr, int *pnByte){
+ const char *zType = "int";
+ int nByte = 4;
+ if( lwr>=0 ){
+ if( upr<=255 ){
+ zType = "unsigned char";
+ nByte = 1;
+ }else if( upr<65535 ){
+ zType = "unsigned short int";
+ nByte = 2;
+ }else{
+ zType = "unsigned int";
+ nByte = 4;
+ }
+ }else if( lwr>=-127 && upr<=127 ){
+ zType = "signed char";
+ nByte = 1;
+ }else if( lwr>=-32767 && upr<32767 ){
+ zType = "short";
+ nByte = 2;
+ }
+ if( pnByte ) *pnByte = nByte;
+ return zType;
+}
+
+/*
+** Each state contains a set of token transaction and a set of
+** nonterminal transactions. Each of these sets makes an instance
+** of the following structure. An array of these structures is used
+** to order the creation of entries in the yy_action[] table.
+*/
+struct axset {
+ struct state *stp; /* A pointer to a state */
+ int isTkn; /* True to use tokens. False for non-terminals */
+ int nAction; /* Number of actions */
+ int iOrder; /* Original order of action sets */
+};
+
+/*
+** Compare to axset structures for sorting purposes
+*/
+static int axset_compare(const void *a, const void *b){
+ struct axset *p1 = (struct axset*)a;
+ struct axset *p2 = (struct axset*)b;
+ int c;
+ c = p2->nAction - p1->nAction;
+ if( c==0 ){
+ c = p1->iOrder - p2->iOrder;
+ }
+ assert( c!=0 || p1==p2 );
+ return c;
+}
+
+/*
+** Write text on "out" that describes the rule "rp".
+*/
+static void writeRuleText(FILE *out, struct rule *rp){
+ int j;
+ fprintf(out,"%s ::=", rp->lhs->name);
+ for(j=0; j<rp->nrhs; j++){
+ struct symbol *sp = rp->rhs[j];
+ if( sp->type!=MULTITERMINAL ){
+ fprintf(out," %s", sp->name);
+ }else{
+ int k;
+ fprintf(out," %s", sp->subsym[0]->name);
+ for(k=1; k<sp->nsubsym; k++){
+ fprintf(out,"|%s",sp->subsym[k]->name);
+ }
+ }
+ }
+}
+
+
+/* Generate C source code for the parser */
+void ReportTable(
+ struct lemon *lemp,
+ int mhflag, /* Output in makeheaders format if true */
+ int sqlFlag /* Generate the *.sql file too */
+){
+ FILE *out, *in, *sql;
+ int lineno;
+ struct state *stp;
+ struct action *ap;
+ struct rule *rp;
+ struct acttab *pActtab;
+ int i, j, n, sz;
+ int nLookAhead;
+ int szActionType; /* sizeof(YYACTIONTYPE) */
+ int szCodeType; /* sizeof(YYCODETYPE) */
+ const char *name;
+ int mnTknOfst, mxTknOfst;
+ int mnNtOfst, mxNtOfst;
+ struct axset *ax;
+ char *prefix;
+
+ lemp->minShiftReduce = lemp->nstate;
+ lemp->errAction = lemp->minShiftReduce + lemp->nrule;
+ lemp->accAction = lemp->errAction + 1;
+ lemp->noAction = lemp->accAction + 1;
+ lemp->minReduce = lemp->noAction + 1;
+ lemp->maxAction = lemp->minReduce + lemp->nrule;
+
+ in = tplt_open(lemp);
+ if( in==0 ) return;
+ out = file_open(lemp,".c","wb");
+ if( out==0 ){
+ fclose(in);
+ return;
+ }
+ if( sqlFlag==0 ){
+ sql = 0;
+ }else{
+ sql = file_open(lemp, ".sql", "wb");
+ if( sql==0 ){
+ fclose(in);
+ fclose(out);
+ return;
+ }
+ fprintf(sql,
+ "BEGIN;\n"
+ "CREATE TABLE symbol(\n"
+ " id INTEGER PRIMARY KEY,\n"
+ " name TEXT NOT NULL,\n"
+ " isTerminal BOOLEAN NOT NULL,\n"
+ " fallback INTEGER REFERENCES symbol"
+ " DEFERRABLE INITIALLY DEFERRED\n"
+ ");\n"
+ );
+ for(i=0; i<lemp->nsymbol; i++){
+ fprintf(sql,
+ "INSERT INTO symbol(id,name,isTerminal,fallback)"
+ "VALUES(%d,'%s',%s",
+ i, lemp->symbols[i]->name,
+ i<lemp->nterminal ? "TRUE" : "FALSE"
+ );
+ if( lemp->symbols[i]->fallback ){
+ fprintf(sql, ",%d);\n", lemp->symbols[i]->fallback->index);
+ }else{
+ fprintf(sql, ",NULL);\n");
+ }
+ }
+ fprintf(sql,
+ "CREATE TABLE rule(\n"
+ " ruleid INTEGER PRIMARY KEY,\n"
+ " lhs INTEGER REFERENCES symbol(id),\n"
+ " txt TEXT\n"
+ ");\n"
+ "CREATE TABLE rulerhs(\n"
+ " ruleid INTEGER REFERENCES rule(ruleid),\n"
+ " pos INTEGER,\n"
+ " sym INTEGER REFERENCES symbol(id)\n"
+ ");\n"
+ );
+ for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){
+ assert( i==rp->iRule );
+ fprintf(sql,
+ "INSERT INTO rule(ruleid,lhs,txt)VALUES(%d,%d,'",
+ rp->iRule, rp->lhs->index
+ );
+ writeRuleText(sql, rp);
+ fprintf(sql,"');\n");
+ for(j=0; j<rp->nrhs; j++){
+ struct symbol *sp = rp->rhs[j];
+ if( sp->type!=MULTITERMINAL ){
+ fprintf(sql,
+ "INSERT INTO rulerhs(ruleid,pos,sym)VALUES(%d,%d,%d);\n",
+ i,j,sp->index
+ );
+ }else{
+ int k;
+ for(k=0; k<sp->nsubsym; k++){
+ fprintf(sql,
+ "INSERT INTO rulerhs(ruleid,pos,sym)VALUES(%d,%d,%d);\n",
+ i,j,sp->subsym[k]->index
+ );
+ }
+ }
+ }
+ }
+ fprintf(sql, "COMMIT;\n");
+ }
+ lineno = 1;
+
+ fprintf(out,
+ "/* This file is automatically generated by Lemon from input grammar\n"
+ "** source file \"%s\". */\n", lemp->filename); lineno += 2;
+
+ /* The first %include directive begins with a C-language comment,
+ ** then skip over the header comment of the template file
+ */
+ if( lemp->include==0 ) lemp->include = "";
+ for(i=0; ISSPACE(lemp->include[i]); i++){
+ if( lemp->include[i]=='\n' ){
+ lemp->include += i+1;
+ i = -1;
+ }
+ }
+ if( lemp->include[0]=='/' ){
+ tplt_skip_header(in,&lineno);
+ }else{
+ tplt_xfer(lemp->name,in,out,&lineno);
+ }
+
+ /* Generate the include code, if any */
+ tplt_print(out,lemp,lemp->include,&lineno);
+ if( mhflag ){
+ char *incName = file_makename(lemp, ".h");
+ fprintf(out,"#include \"%s\"\n", incName); lineno++;
+ free(incName);
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate #defines for all tokens */
+ if( lemp->tokenprefix ) prefix = lemp->tokenprefix;
+ else prefix = "";
+ if( mhflag ){
+ fprintf(out,"#if INTERFACE\n"); lineno++;
+ }else{
+ fprintf(out,"#ifndef %s%s\n", prefix, lemp->symbols[1]->name);
+ }
+ for(i=1; i<lemp->nterminal; i++){
+ fprintf(out,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
+ lineno++;
+ }
+ fprintf(out,"#endif\n"); lineno++;
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the defines */
+ fprintf(out,"#define YYCODETYPE %s\n",
+ minimum_size_type(0, lemp->nsymbol, &szCodeType)); lineno++;
+ fprintf(out,"#define YYNOCODE %d\n",lemp->nsymbol); lineno++;
+ fprintf(out,"#define YYACTIONTYPE %s\n",
+ minimum_size_type(0,lemp->maxAction,&szActionType)); lineno++;
+ if( lemp->wildcard ){
+ fprintf(out,"#define YYWILDCARD %d\n",
+ lemp->wildcard->index); lineno++;
+ }
+ print_stack_union(out,lemp,&lineno,mhflag);
+ fprintf(out, "#ifndef YYSTACKDEPTH\n"); lineno++;
+ if( lemp->stacksize ){
+ fprintf(out,"#define YYSTACKDEPTH %s\n",lemp->stacksize); lineno++;
+ }else{
+ fprintf(out,"#define YYSTACKDEPTH 100\n"); lineno++;
+ }
+ fprintf(out, "#endif\n"); lineno++;
+ if( mhflag ){
+ fprintf(out,"#if INTERFACE\n"); lineno++;
+ }
+ name = lemp->name ? lemp->name : "Parse";
+ if( lemp->arg && lemp->arg[0] ){
+ i = lemonStrlen(lemp->arg);
+ while( i>=1 && ISSPACE(lemp->arg[i-1]) ) i--;
+ while( i>=1 && (ISALNUM(lemp->arg[i-1]) || lemp->arg[i-1]=='_') ) i--;
+ fprintf(out,"#define %sARG_SDECL %s;\n",name,lemp->arg); lineno++;
+ fprintf(out,"#define %sARG_PDECL ,%s\n",name,lemp->arg); lineno++;
+ fprintf(out,"#define %sARG_PARAM ,%s\n",name,&lemp->arg[i]); lineno++;
+ fprintf(out,"#define %sARG_FETCH %s=yypParser->%s;\n",
+ name,lemp->arg,&lemp->arg[i]); lineno++;
+ fprintf(out,"#define %sARG_STORE yypParser->%s=%s;\n",
+ name,&lemp->arg[i],&lemp->arg[i]); lineno++;
+ }else{
+ fprintf(out,"#define %sARG_SDECL\n",name); lineno++;
+ fprintf(out,"#define %sARG_PDECL\n",name); lineno++;
+ fprintf(out,"#define %sARG_PARAM\n",name); lineno++;
+ fprintf(out,"#define %sARG_FETCH\n",name); lineno++;
+ fprintf(out,"#define %sARG_STORE\n",name); lineno++;
+ }
+ if( lemp->ctx && lemp->ctx[0] ){
+ i = lemonStrlen(lemp->ctx);
+ while( i>=1 && ISSPACE(lemp->ctx[i-1]) ) i--;
+ while( i>=1 && (ISALNUM(lemp->ctx[i-1]) || lemp->ctx[i-1]=='_') ) i--;
+ fprintf(out,"#define %sCTX_SDECL %s;\n",name,lemp->ctx); lineno++;
+ fprintf(out,"#define %sCTX_PDECL ,%s\n",name,lemp->ctx); lineno++;
+ fprintf(out,"#define %sCTX_PARAM ,%s\n",name,&lemp->ctx[i]); lineno++;
+ fprintf(out,"#define %sCTX_FETCH %s=yypParser->%s;\n",
+ name,lemp->ctx,&lemp->ctx[i]); lineno++;
+ fprintf(out,"#define %sCTX_STORE yypParser->%s=%s;\n",
+ name,&lemp->ctx[i],&lemp->ctx[i]); lineno++;
+ }else{
+ fprintf(out,"#define %sCTX_SDECL\n",name); lineno++;
+ fprintf(out,"#define %sCTX_PDECL\n",name); lineno++;
+ fprintf(out,"#define %sCTX_PARAM\n",name); lineno++;
+ fprintf(out,"#define %sCTX_FETCH\n",name); lineno++;
+ fprintf(out,"#define %sCTX_STORE\n",name); lineno++;
+ }
+ if( mhflag ){
+ fprintf(out,"#endif\n"); lineno++;
+ }
+ if( lemp->errsym && lemp->errsym->useCnt ){
+ fprintf(out,"#define YYERRORSYMBOL %d\n",lemp->errsym->index); lineno++;
+ fprintf(out,"#define YYERRSYMDT yy%d\n",lemp->errsym->dtnum); lineno++;
+ }
+ if( lemp->has_fallback ){
+ fprintf(out,"#define YYFALLBACK 1\n"); lineno++;
+ }
+
+ /* Compute the action table, but do not output it yet. The action
+ ** table must be computed before generating the YYNSTATE macro because
+ ** we need to know how many states can be eliminated.
+ */
+ ax = (struct axset *) calloc(lemp->nxstate*2, sizeof(ax[0]));
+ if( ax==0 ){
+ fprintf(stderr,"malloc failed\n");
+ exit(1);
+ }
+ for(i=0; i<lemp->nxstate; i++){
+ stp = lemp->sorted[i];
+ ax[i*2].stp = stp;
+ ax[i*2].isTkn = 1;
+ ax[i*2].nAction = stp->nTknAct;
+ ax[i*2+1].stp = stp;
+ ax[i*2+1].isTkn = 0;
+ ax[i*2+1].nAction = stp->nNtAct;
+ }
+ mxTknOfst = mnTknOfst = 0;
+ mxNtOfst = mnNtOfst = 0;
+ /* In an effort to minimize the action table size, use the heuristic
+ ** of placing the largest action sets first */
+ for(i=0; i<lemp->nxstate*2; i++) ax[i].iOrder = i;
+ qsort(ax, lemp->nxstate*2, sizeof(ax[0]), axset_compare);
+ pActtab = acttab_alloc(lemp->nsymbol, lemp->nterminal);
+ for(i=0; i<lemp->nxstate*2 && ax[i].nAction>0; i++){
+ stp = ax[i].stp;
+ if( ax[i].isTkn ){
+ for(ap=stp->ap; ap; ap=ap->next){
+ int action;
+ if( ap->sp->index>=lemp->nterminal ) continue;
+ action = compute_action(lemp, ap);
+ if( action<0 ) continue;
+ acttab_action(pActtab, ap->sp->index, action);
+ }
+ stp->iTknOfst = acttab_insert(pActtab, 1);
+ if( stp->iTknOfst<mnTknOfst ) mnTknOfst = stp->iTknOfst;
+ if( stp->iTknOfst>mxTknOfst ) mxTknOfst = stp->iTknOfst;
+ }else{
+ for(ap=stp->ap; ap; ap=ap->next){
+ int action;
+ if( ap->sp->index<lemp->nterminal ) continue;
+ if( ap->sp->index==lemp->nsymbol ) continue;
+ action = compute_action(lemp, ap);
+ if( action<0 ) continue;
+ acttab_action(pActtab, ap->sp->index, action);
+ }
+ stp->iNtOfst = acttab_insert(pActtab, 0);
+ if( stp->iNtOfst<mnNtOfst ) mnNtOfst = stp->iNtOfst;
+ if( stp->iNtOfst>mxNtOfst ) mxNtOfst = stp->iNtOfst;
+ }
+#if 0 /* Uncomment for a trace of how the yy_action[] table fills out */
+ { int jj, nn;
+ for(jj=nn=0; jj<pActtab->nAction; jj++){
+ if( pActtab->aAction[jj].action<0 ) nn++;
+ }
+ printf("%4d: State %3d %s n: %2d size: %5d freespace: %d\n",
+ i, stp->statenum, ax[i].isTkn ? "Token" : "Var ",
+ ax[i].nAction, pActtab->nAction, nn);
+ }
+#endif
+ }
+ free(ax);
+
+ /* Mark rules that are actually used for reduce actions after all
+ ** optimizations have been applied
+ */
+ for(rp=lemp->rule; rp; rp=rp->next) rp->doesReduce = LEMON_FALSE;
+ for(i=0; i<lemp->nxstate; i++){
+ for(ap=lemp->sorted[i]->ap; ap; ap=ap->next){
+ if( ap->type==REDUCE || ap->type==SHIFTREDUCE ){
+ ap->x.rp->doesReduce = 1;
+ }
+ }
+ }
+
+ /* Finish rendering the constants now that the action table has
+ ** been computed */
+ fprintf(out,"#define YYNSTATE %d\n",lemp->nxstate); lineno++;
+ fprintf(out,"#define YYNRULE %d\n",lemp->nrule); lineno++;
+ fprintf(out,"#define YYNRULE_WITH_ACTION %d\n",lemp->nruleWithAction);
+ lineno++;
+ fprintf(out,"#define YYNTOKEN %d\n",lemp->nterminal); lineno++;
+ fprintf(out,"#define YY_MAX_SHIFT %d\n",lemp->nxstate-1); lineno++;
+ i = lemp->minShiftReduce;
+ fprintf(out,"#define YY_MIN_SHIFTREDUCE %d\n",i); lineno++;
+ i += lemp->nrule;
+ fprintf(out,"#define YY_MAX_SHIFTREDUCE %d\n", i-1); lineno++;
+ fprintf(out,"#define YY_ERROR_ACTION %d\n", lemp->errAction); lineno++;
+ fprintf(out,"#define YY_ACCEPT_ACTION %d\n", lemp->accAction); lineno++;
+ fprintf(out,"#define YY_NO_ACTION %d\n", lemp->noAction); lineno++;
+ fprintf(out,"#define YY_MIN_REDUCE %d\n", lemp->minReduce); lineno++;
+ i = lemp->minReduce + lemp->nrule;
+ fprintf(out,"#define YY_MAX_REDUCE %d\n", i-1); lineno++;
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Now output the action table and its associates:
+ **
+ ** yy_action[] A single table containing all actions.
+ ** yy_lookahead[] A table containing the lookahead for each entry in
+ ** yy_action. Used to detect hash collisions.
+ ** yy_shift_ofst[] For each state, the offset into yy_action for
+ ** shifting terminals.
+ ** yy_reduce_ofst[] For each state, the offset into yy_action for
+ ** shifting non-terminals after a reduce.
+ ** yy_default[] Default action for each state.
+ */
+
+ /* Output the yy_action table */
+ lemp->nactiontab = n = acttab_action_size(pActtab);
+ lemp->tablesize += n*szActionType;
+ fprintf(out,"#define YY_ACTTAB_COUNT (%d)\n", n); lineno++;
+ fprintf(out,"static const YYACTIONTYPE yy_action[] = {\n"); lineno++;
+ for(i=j=0; i<n; i++){
+ int action = acttab_yyaction(pActtab, i);
+ if( action<0 ) action = lemp->noAction;
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", action);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+
+ /* Output the yy_lookahead table */
+ lemp->nlookaheadtab = n = acttab_lookahead_size(pActtab);
+ lemp->tablesize += n*szCodeType;
+ fprintf(out,"static const YYCODETYPE yy_lookahead[] = {\n"); lineno++;
+ for(i=j=0; i<n; i++){
+ int la = acttab_yylookahead(pActtab, i);
+ if( la<0 ) la = lemp->nsymbol;
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", la);
+ if( j==9 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ /* Add extra entries to the end of the yy_lookahead[] table so that
+ ** yy_shift_ofst[]+iToken will always be a valid index into the array,
+ ** even for the largest possible value of yy_shift_ofst[] and iToken. */
+ nLookAhead = lemp->nterminal + lemp->nactiontab;
+ while( i<nLookAhead ){
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", lemp->nterminal);
+ if( j==9 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ i++;
+ }
+ if( j>0 ){ fprintf(out, "\n"); lineno++; }
+ fprintf(out, "};\n"); lineno++;
+
+ /* Output the yy_shift_ofst[] table */
+ n = lemp->nxstate;
+ while( n>0 && lemp->sorted[n-1]->iTknOfst==NO_OFFSET ) n--;
+ fprintf(out, "#define YY_SHIFT_COUNT (%d)\n", n-1); lineno++;
+ fprintf(out, "#define YY_SHIFT_MIN (%d)\n", mnTknOfst); lineno++;
+ fprintf(out, "#define YY_SHIFT_MAX (%d)\n", mxTknOfst); lineno++;
+ fprintf(out, "static const %s yy_shift_ofst[] = {\n",
+ minimum_size_type(mnTknOfst, lemp->nterminal+lemp->nactiontab, &sz));
+ lineno++;
+ lemp->tablesize += n*sz;
+ for(i=j=0; i<n; i++){
+ int ofst;
+ stp = lemp->sorted[i];
+ ofst = stp->iTknOfst;
+ if( ofst==NO_OFFSET ) ofst = lemp->nactiontab;
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", ofst);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+
+ /* Output the yy_reduce_ofst[] table */
+ n = lemp->nxstate;
+ while( n>0 && lemp->sorted[n-1]->iNtOfst==NO_OFFSET ) n--;
+ fprintf(out, "#define YY_REDUCE_COUNT (%d)\n", n-1); lineno++;
+ fprintf(out, "#define YY_REDUCE_MIN (%d)\n", mnNtOfst); lineno++;
+ fprintf(out, "#define YY_REDUCE_MAX (%d)\n", mxNtOfst); lineno++;
+ fprintf(out, "static const %s yy_reduce_ofst[] = {\n",
+ minimum_size_type(mnNtOfst-1, mxNtOfst, &sz)); lineno++;
+ lemp->tablesize += n*sz;
+ for(i=j=0; i<n; i++){
+ int ofst;
+ stp = lemp->sorted[i];
+ ofst = stp->iNtOfst;
+ if( ofst==NO_OFFSET ) ofst = mnNtOfst - 1;
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", ofst);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+
+ /* Output the default action table */
+ fprintf(out, "static const YYACTIONTYPE yy_default[] = {\n"); lineno++;
+ n = lemp->nxstate;
+ lemp->tablesize += n*szActionType;
+ for(i=j=0; i<n; i++){
+ stp = lemp->sorted[i];
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ if( stp->iDfltReduce<0 ){
+ fprintf(out, " %4d,", lemp->errAction);
+ }else{
+ fprintf(out, " %4d,", stp->iDfltReduce + lemp->minReduce);
+ }
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the table of fallback tokens.
+ */
+ if( lemp->has_fallback ){
+ int mx = lemp->nterminal - 1;
+ /* 2019-08-28: Generate fallback entries for every token to avoid
+ ** having to do a range check on the index */
+ /* while( mx>0 && lemp->symbols[mx]->fallback==0 ){ mx--; } */
+ lemp->tablesize += (mx+1)*szCodeType;
+ for(i=0; i<=mx; i++){
+ struct symbol *p = lemp->symbols[i];
+ if( p->fallback==0 ){
+ fprintf(out, " 0, /* %10s => nothing */\n", p->name);
+ }else{
+ fprintf(out, " %3d, /* %10s => %s */\n", p->fallback->index,
+ p->name, p->fallback->name);
+ }
+ lineno++;
+ }
+ }
+ tplt_xfer(lemp->name, in, out, &lineno);
+
+ /* Generate a table containing the symbolic name of every symbol
+ */
+ for(i=0; i<lemp->nsymbol; i++){
+ fprintf(out," /* %4d */ \"%s\",\n",i, lemp->symbols[i]->name); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate a table containing a text string that describes every
+ ** rule in the rule set of the grammar. This information is used
+ ** when tracing REDUCE actions.
+ */
+ for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){
+ assert( rp->iRule==i );
+ fprintf(out," /* %3d */ \"", i);
+ writeRuleText(out, rp);
+ fprintf(out,"\",\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes every time a symbol is popped from
+ ** the stack while processing errors or while destroying the parser.
+ ** (In other words, generate the %destructor actions)
+ */
+ if( lemp->tokendest ){
+ int once = 1;
+ for(i=0; i<lemp->nsymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+ if( sp==0 || sp->type!=TERMINAL ) continue;
+ if( once ){
+ fprintf(out, " /* TERMINAL Destructor */\n"); lineno++;
+ once = 0;
+ }
+ fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++;
+ }
+ for(i=0; i<lemp->nsymbol && lemp->symbols[i]->type!=TERMINAL; i++);
+ if( i<lemp->nsymbol ){
+ emit_destructor_code(out,lemp->symbols[i],lemp,&lineno);
+ fprintf(out," break;\n"); lineno++;
+ }
+ }
+ if( lemp->vardest ){
+ struct symbol *dflt_sp = 0;
+ int once = 1;
+ for(i=0; i<lemp->nsymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+ if( sp==0 || sp->type==TERMINAL ||
+ sp->index<=0 || sp->destructor!=0 ) continue;
+ if( once ){
+ fprintf(out, " /* Default NON-TERMINAL Destructor */\n");lineno++;
+ once = 0;
+ }
+ fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++;
+ dflt_sp = sp;
+ }
+ if( dflt_sp!=0 ){
+ emit_destructor_code(out,dflt_sp,lemp,&lineno);
+ }
+ fprintf(out," break;\n"); lineno++;
+ }
+ for(i=0; i<lemp->nsymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+ if( sp==0 || sp->type==TERMINAL || sp->destructor==0 ) continue;
+ if( sp->destLineno<0 ) continue; /* Already emitted */
+ fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++;
+
+ /* Combine duplicate destructors into a single case */
+ for(j=i+1; j<lemp->nsymbol; j++){
+ struct symbol *sp2 = lemp->symbols[j];
+ if( sp2 && sp2->type!=TERMINAL && sp2->destructor
+ && sp2->dtnum==sp->dtnum
+ && strcmp(sp->destructor,sp2->destructor)==0 ){
+ fprintf(out," case %d: /* %s */\n",
+ sp2->index, sp2->name); lineno++;
+ sp2->destLineno = -1; /* Avoid emitting this destructor again */
+ }
+ }
+
+ emit_destructor_code(out,lemp->symbols[i],lemp,&lineno);
+ fprintf(out," break;\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes whenever the parser stack overflows */
+ tplt_print(out,lemp,lemp->overflow,&lineno);
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the tables of rule information. yyRuleInfoLhs[] and
+ ** yyRuleInfoNRhs[].
+ **
+ ** Note: This code depends on the fact that rules are number
+ ** sequentially beginning with 0.
+ */
+ for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){
+ fprintf(out," %4d, /* (%d) ", rp->lhs->index, i);
+ rule_print(out, rp);
+ fprintf(out," */\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+ for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){
+ fprintf(out," %3d, /* (%d) ", -rp->nrhs, i);
+ rule_print(out, rp);
+ fprintf(out," */\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which execution during each REDUCE action */
+ i = 0;
+ for(rp=lemp->rule; rp; rp=rp->next){
+ i += translate_code(lemp, rp);
+ }
+ if( i ){
+ fprintf(out," YYMINORTYPE yylhsminor;\n"); lineno++;
+ }
+ /* First output rules other than the default: rule */
+ for(rp=lemp->rule; rp; rp=rp->next){
+ struct rule *rp2; /* Other rules with the same action */
+ if( rp->codeEmitted ) continue;
+ if( rp->noCode ){
+ /* No C code actions, so this will be part of the "default:" rule */
+ continue;
+ }
+ fprintf(out," case %d: /* ", rp->iRule);
+ writeRuleText(out, rp);
+ fprintf(out, " */\n"); lineno++;
+ for(rp2=rp->next; rp2; rp2=rp2->next){
+ if( rp2->code==rp->code && rp2->codePrefix==rp->codePrefix
+ && rp2->codeSuffix==rp->codeSuffix ){
+ fprintf(out," case %d: /* ", rp2->iRule);
+ writeRuleText(out, rp2);
+ fprintf(out," */ yytestcase(yyruleno==%d);\n", rp2->iRule); lineno++;
+ rp2->codeEmitted = 1;
+ }
+ }
+ emit_code(out,rp,lemp,&lineno);
+ fprintf(out," break;\n"); lineno++;
+ rp->codeEmitted = 1;
+ }
+ /* Finally, output the default: rule. We choose as the default: all
+ ** empty actions. */
+ fprintf(out," default:\n"); lineno++;
+ for(rp=lemp->rule; rp; rp=rp->next){
+ if( rp->codeEmitted ) continue;
+ assert( rp->noCode );
+ fprintf(out," /* (%d) ", rp->iRule);
+ writeRuleText(out, rp);
+ if( rp->neverReduce ){
+ fprintf(out, " (NEVER REDUCES) */ assert(yyruleno!=%d);\n",
+ rp->iRule); lineno++;
+ }else if( rp->doesReduce ){
+ fprintf(out, " */ yytestcase(yyruleno==%d);\n", rp->iRule); lineno++;
+ }else{
+ fprintf(out, " (OPTIMIZED OUT) */ assert(yyruleno!=%d);\n",
+ rp->iRule); lineno++;
+ }
+ }
+ fprintf(out," break;\n"); lineno++;
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes if a parse fails */
+ tplt_print(out,lemp,lemp->failure,&lineno);
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes when a syntax error occurs */
+ tplt_print(out,lemp,lemp->error,&lineno);
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes when the parser accepts its input */
+ tplt_print(out,lemp,lemp->accept,&lineno);
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Append any addition code the user desires */
+ tplt_print(out,lemp,lemp->extracode,&lineno);
+
+ acttab_free(pActtab);
+ fclose(in);
+ fclose(out);
+ if( sql ) fclose(sql);
+ return;
+}
+
+/* Generate a header file for the parser */
+void ReportHeader(struct lemon *lemp)
+{
+ FILE *out, *in;
+ const char *prefix;
+ char line[LINESIZE];
+ char pattern[LINESIZE];
+ int i;
+
+ if( lemp->tokenprefix ) prefix = lemp->tokenprefix;
+ else prefix = "";
+ in = file_open(lemp,".h","rb");
+ if( in ){
+ int nextChar;
+ for(i=1; i<lemp->nterminal && fgets(line,LINESIZE,in); i++){
+ lemon_sprintf(pattern,"#define %s%-30s %3d\n",
+ prefix,lemp->symbols[i]->name,i);
+ if( strcmp(line,pattern) ) break;
+ }
+ nextChar = fgetc(in);
+ fclose(in);
+ if( i==lemp->nterminal && nextChar==EOF ){
+ /* No change in the file. Don't rewrite it. */
+ return;
+ }
+ }
+ out = file_open(lemp,".h","wb");
+ if( out ){
+ for(i=1; i<lemp->nterminal; i++){
+ fprintf(out,"#define %s%-30s %3d\n",prefix,lemp->symbols[i]->name,i);
+ }
+ fclose(out);
+ }
+ return;
+}
+
+/* Reduce the size of the action tables, if possible, by making use
+** of defaults.
+**
+** In this version, we take the most frequent REDUCE action and make
+** it the default. Except, there is no default if the wildcard token
+** is a possible look-ahead.
+*/
+void CompressTables(struct lemon *lemp)
+{
+ struct state *stp;
+ struct action *ap, *ap2, *nextap;
+ struct rule *rp, *rp2, *rbest;
+ int nbest, n;
+ int i;
+ int usesWildcard;
+
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ nbest = 0;
+ rbest = 0;
+ usesWildcard = 0;
+
+ for(ap=stp->ap; ap; ap=ap->next){
+ if( ap->type==SHIFT && ap->sp==lemp->wildcard ){
+ usesWildcard = 1;
+ }
+ if( ap->type!=REDUCE ) continue;
+ rp = ap->x.rp;
+ if( rp->lhsStart ) continue;
+ if( rp==rbest ) continue;
+ n = 1;
+ for(ap2=ap->next; ap2; ap2=ap2->next){
+ if( ap2->type!=REDUCE ) continue;
+ rp2 = ap2->x.rp;
+ if( rp2==rbest ) continue;
+ if( rp2==rp ) n++;
+ }
+ if( n>nbest ){
+ nbest = n;
+ rbest = rp;
+ }
+ }
+
+ /* Do not make a default if the number of rules to default
+ ** is not at least 1 or if the wildcard token is a possible
+ ** lookahead.
+ */
+ if( nbest<1 || usesWildcard ) continue;
+
+
+ /* Combine matching REDUCE actions into a single default */
+ for(ap=stp->ap; ap; ap=ap->next){
+ if( ap->type==REDUCE && ap->x.rp==rbest ) break;
+ }
+ assert( ap );
+ ap->sp = Symbol_new("{default}");
+ for(ap=ap->next; ap; ap=ap->next){
+ if( ap->type==REDUCE && ap->x.rp==rbest ) ap->type = NOT_USED;
+ }
+ stp->ap = Action_sort(stp->ap);
+
+ for(ap=stp->ap; ap; ap=ap->next){
+ if( ap->type==SHIFT ) break;
+ if( ap->type==REDUCE && ap->x.rp!=rbest ) break;
+ }
+ if( ap==0 ){
+ stp->autoReduce = 1;
+ stp->pDfltReduce = rbest;
+ }
+ }
+
+ /* Make a second pass over all states and actions. Convert
+ ** every action that is a SHIFT to an autoReduce state into
+ ** a SHIFTREDUCE action.
+ */
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ for(ap=stp->ap; ap; ap=ap->next){
+ struct state *pNextState;
+ if( ap->type!=SHIFT ) continue;
+ pNextState = ap->x.stp;
+ if( pNextState->autoReduce && pNextState->pDfltReduce!=0 ){
+ ap->type = SHIFTREDUCE;
+ ap->x.rp = pNextState->pDfltReduce;
+ }
+ }
+ }
+
+ /* If a SHIFTREDUCE action specifies a rule that has a single RHS term
+ ** (meaning that the SHIFTREDUCE will land back in the state where it
+ ** started) and if there is no C-code associated with the reduce action,
+ ** then we can go ahead and convert the action to be the same as the
+ ** action for the RHS of the rule.
+ */
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ for(ap=stp->ap; ap; ap=nextap){
+ nextap = ap->next;
+ if( ap->type!=SHIFTREDUCE ) continue;
+ rp = ap->x.rp;
+ if( rp->noCode==0 ) continue;
+ if( rp->nrhs!=1 ) continue;
+#if 1
+ /* Only apply this optimization to non-terminals. It would be OK to
+ ** apply it to terminal symbols too, but that makes the parser tables
+ ** larger. */
+ if( ap->sp->index<lemp->nterminal ) continue;
+#endif
+ /* If we reach this point, it means the optimization can be applied */
+ nextap = ap;
+ for(ap2=stp->ap; ap2 && (ap2==ap || ap2->sp!=rp->lhs); ap2=ap2->next){}
+ assert( ap2!=0 );
+ ap->spOpt = ap2->sp;
+ ap->type = ap2->type;
+ ap->x = ap2->x;
+ }
+ }
+}
+
+
+/*
+** Compare two states for sorting purposes. The smaller state is the
+** one with the most non-terminal actions. If they have the same number
+** of non-terminal actions, then the smaller is the one with the most
+** token actions.
+*/
+static int stateResortCompare(const void *a, const void *b){
+ const struct state *pA = *(const struct state**)a;
+ const struct state *pB = *(const struct state**)b;
+ int n;
+
+ n = pB->nNtAct - pA->nNtAct;
+ if( n==0 ){
+ n = pB->nTknAct - pA->nTknAct;
+ if( n==0 ){
+ n = pB->statenum - pA->statenum;
+ }
+ }
+ assert( n!=0 );
+ return n;
+}
+
+
+/*
+** Renumber and resort states so that states with fewer choices
+** occur at the end. Except, keep state 0 as the first state.
+*/
+void ResortStates(struct lemon *lemp)
+{
+ int i;
+ struct state *stp;
+ struct action *ap;
+
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ stp->nTknAct = stp->nNtAct = 0;
+ stp->iDfltReduce = -1; /* Init dflt action to "syntax error" */
+ stp->iTknOfst = NO_OFFSET;
+ stp->iNtOfst = NO_OFFSET;
+ for(ap=stp->ap; ap; ap=ap->next){
+ int iAction = compute_action(lemp,ap);
+ if( iAction>=0 ){
+ if( ap->sp->index<lemp->nterminal ){
+ stp->nTknAct++;
+ }else if( ap->sp->index<lemp->nsymbol ){
+ stp->nNtAct++;
+ }else{
+ assert( stp->autoReduce==0 || stp->pDfltReduce==ap->x.rp );
+ stp->iDfltReduce = iAction;
+ }
+ }
+ }
+ }
+ qsort(&lemp->sorted[1], lemp->nstate-1, sizeof(lemp->sorted[0]),
+ stateResortCompare);
+ for(i=0; i<lemp->nstate; i++){
+ lemp->sorted[i]->statenum = i;
+ }
+ lemp->nxstate = lemp->nstate;
+ while( lemp->nxstate>1 && lemp->sorted[lemp->nxstate-1]->autoReduce ){
+ lemp->nxstate--;
+ }
+}
+
+
+/***************** From the file "set.c" ************************************/
+/*
+** Set manipulation routines for the LEMON parser generator.
+*/
+
+static int size = 0;
+
+/* Set the set size */
+void SetSize(int n)
+{
+ size = n+1;
+}
+
+/* Allocate a new set */
+char *SetNew(void){
+ char *s;
+ s = (char*)calloc( size, 1);
+ if( s==0 ){
+ memory_error();
+ }
+ return s;
+}
+
+/* Deallocate a set */
+void SetFree(char *s)
+{
+ free(s);
+}
+
+/* Add a new element to the set. Return TRUE if the element was added
+** and FALSE if it was already there. */
+int SetAdd(char *s, int e)
+{
+ int rv;
+ assert( e>=0 && e<size );
+ rv = s[e];
+ s[e] = 1;
+ return !rv;
+}
+
+/* Add every element of s2 to s1. Return TRUE if s1 changes. */
+int SetUnion(char *s1, char *s2)
+{
+ int i, progress;
+ progress = 0;
+ for(i=0; i<size; i++){
+ if( s2[i]==0 ) continue;
+ if( s1[i]==0 ){
+ progress = 1;
+ s1[i] = 1;
+ }
+ }
+ return progress;
+}
+/********************** From the file "table.c" ****************************/
+/*
+** All code in this file has been automatically generated
+** from a specification in the file
+** "table.q"
+** by the associative array code building program "aagen".
+** Do not edit this file! Instead, edit the specification
+** file, then rerun aagen.
+*/
+/*
+** Code for processing tables in the LEMON parser generator.
+*/
+
+PRIVATE unsigned strhash(const char *x)
+{
+ unsigned h = 0;
+ while( *x ) h = h*13 + *(x++);
+ return h;
+}
+
+/* Works like strdup, sort of. Save a string in malloced memory, but
+** keep strings in a table so that the same string is not in more
+** than one place.
+*/
+const char *Strsafe(const char *y)
+{
+ const char *z;
+ char *cpy;
+
+ if( y==0 ) return 0;
+ z = Strsafe_find(y);
+ if( z==0 && (cpy=(char *)malloc( lemonStrlen(y)+1 ))!=0 ){
+ lemon_strcpy(cpy,y);
+ z = cpy;
+ Strsafe_insert(z);
+ }
+ MemoryCheck(z);
+ return z;
+}
+
+/* There is one instance of the following structure for each
+** associative array of type "x1".
+*/
+struct s_x1 {
+ int size; /* The number of available slots. */
+ /* Must be a power of 2 greater than or */
+ /* equal to 1 */
+ int count; /* Number of currently slots filled */
+ struct s_x1node *tbl; /* The data stored here */
+ struct s_x1node **ht; /* Hash table for lookups */
+};
+
+/* There is one instance of this structure for every data element
+** in an associative array of type "x1".
+*/
+typedef struct s_x1node {
+ const char *data; /* The data */
+ struct s_x1node *next; /* Next entry with the same hash */
+ struct s_x1node **from; /* Previous link */
+} x1node;
+
+/* There is only one instance of the array, which is the following */
+static struct s_x1 *x1a;
+
+/* Allocate a new associative array */
+void Strsafe_init(void){
+ if( x1a ) return;
+ x1a = (struct s_x1*)malloc( sizeof(struct s_x1) );
+ if( x1a ){
+ x1a->size = 1024;
+ x1a->count = 0;
+ x1a->tbl = (x1node*)calloc(1024, sizeof(x1node) + sizeof(x1node*));
+ if( x1a->tbl==0 ){
+ free(x1a);
+ x1a = 0;
+ }else{
+ int i;
+ x1a->ht = (x1node**)&(x1a->tbl[1024]);
+ for(i=0; i<1024; i++) x1a->ht[i] = 0;
+ }
+ }
+}
+/* Insert a new record into the array. Return TRUE if successful.
+** Prior data with the same key is NOT overwritten */
+int Strsafe_insert(const char *data)
+{
+ x1node *np;
+ unsigned h;
+ unsigned ph;
+
+ if( x1a==0 ) return 0;
+ ph = strhash(data);
+ h = ph & (x1a->size-1);
+ np = x1a->ht[h];
+ while( np ){
+ if( strcmp(np->data,data)==0 ){
+ /* An existing entry with the same key is found. */
+ /* Fail because overwrite is not allows. */
+ return 0;
+ }
+ np = np->next;
+ }
+ if( x1a->count>=x1a->size ){
+ /* Need to make the hash table bigger */
+ int i,arrSize;
+ struct s_x1 array;
+ array.size = arrSize = x1a->size*2;
+ array.count = x1a->count;
+ array.tbl = (x1node*)calloc(arrSize, sizeof(x1node) + sizeof(x1node*));
+ if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
+ array.ht = (x1node**)&(array.tbl[arrSize]);
+ for(i=0; i<arrSize; i++) array.ht[i] = 0;
+ for(i=0; i<x1a->count; i++){
+ x1node *oldnp, *newnp;
+ oldnp = &(x1a->tbl[i]);
+ h = strhash(oldnp->data) & (arrSize-1);
+ newnp = &(array.tbl[i]);
+ if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
+ newnp->next = array.ht[h];
+ newnp->data = oldnp->data;
+ newnp->from = &(array.ht[h]);
+ array.ht[h] = newnp;
+ }
+ /* free(x1a->tbl); // This program was originally for 16-bit machines.
+ ** Don't worry about freeing memory on modern platforms. */
+ *x1a = array;
+ }
+ /* Insert the new data */
+ h = ph & (x1a->size-1);
+ np = &(x1a->tbl[x1a->count++]);
+ np->data = data;
+ if( x1a->ht[h] ) x1a->ht[h]->from = &(np->next);
+ np->next = x1a->ht[h];
+ x1a->ht[h] = np;
+ np->from = &(x1a->ht[h]);
+ return 1;
+}
+
+/* Return a pointer to data assigned to the given key. Return NULL
+** if no such key. */
+const char *Strsafe_find(const char *key)
+{
+ unsigned h;
+ x1node *np;
+
+ if( x1a==0 ) return 0;
+ h = strhash(key) & (x1a->size-1);
+ np = x1a->ht[h];
+ while( np ){
+ if( strcmp(np->data,key)==0 ) break;
+ np = np->next;
+ }
+ return np ? np->data : 0;
+}
+
+/* Return a pointer to the (terminal or nonterminal) symbol "x".
+** Create a new symbol if this is the first time "x" has been seen.
+*/
+struct symbol *Symbol_new(const char *x)
+{
+ struct symbol *sp;
+
+ sp = Symbol_find(x);
+ if( sp==0 ){
+ sp = (struct symbol *)calloc(1, sizeof(struct symbol) );
+ MemoryCheck(sp);
+ sp->name = Strsafe(x);
+ sp->type = ISUPPER(*x) ? TERMINAL : NONTERMINAL;
+ sp->rule = 0;
+ sp->fallback = 0;
+ sp->prec = -1;
+ sp->assoc = UNK;
+ sp->firstset = 0;
+ sp->lambda = LEMON_FALSE;
+ sp->destructor = 0;
+ sp->destLineno = 0;
+ sp->datatype = 0;
+ sp->useCnt = 0;
+ Symbol_insert(sp,sp->name);
+ }
+ sp->useCnt++;
+ return sp;
+}
+
+/* Compare two symbols for sorting purposes. Return negative,
+** zero, or positive if a is less then, equal to, or greater
+** than b.
+**
+** Symbols that begin with upper case letters (terminals or tokens)
+** must sort before symbols that begin with lower case letters
+** (non-terminals). And MULTITERMINAL symbols (created using the
+** %token_class directive) must sort at the very end. Other than
+** that, the order does not matter.
+**
+** We find experimentally that leaving the symbols in their original
+** order (the order they appeared in the grammar file) gives the
+** smallest parser tables in SQLite.
+*/
+int Symbolcmpp(const void *_a, const void *_b)
+{
+ const struct symbol *a = *(const struct symbol **) _a;
+ const struct symbol *b = *(const struct symbol **) _b;
+ int i1 = a->type==MULTITERMINAL ? 3 : a->name[0]>'Z' ? 2 : 1;
+ int i2 = b->type==MULTITERMINAL ? 3 : b->name[0]>'Z' ? 2 : 1;
+ return i1==i2 ? a->index - b->index : i1 - i2;
+}
+
+/* There is one instance of the following structure for each
+** associative array of type "x2".
+*/
+struct s_x2 {
+ int size; /* The number of available slots. */
+ /* Must be a power of 2 greater than or */
+ /* equal to 1 */
+ int count; /* Number of currently slots filled */
+ struct s_x2node *tbl; /* The data stored here */
+ struct s_x2node **ht; /* Hash table for lookups */
+};
+
+/* There is one instance of this structure for every data element
+** in an associative array of type "x2".
+*/
+typedef struct s_x2node {
+ struct symbol *data; /* The data */
+ const char *key; /* The key */
+ struct s_x2node *next; /* Next entry with the same hash */
+ struct s_x2node **from; /* Previous link */
+} x2node;
+
+/* There is only one instance of the array, which is the following */
+static struct s_x2 *x2a;
+
+/* Allocate a new associative array */
+void Symbol_init(void){
+ if( x2a ) return;
+ x2a = (struct s_x2*)malloc( sizeof(struct s_x2) );
+ if( x2a ){
+ x2a->size = 128;
+ x2a->count = 0;
+ x2a->tbl = (x2node*)calloc(128, sizeof(x2node) + sizeof(x2node*));
+ if( x2a->tbl==0 ){
+ free(x2a);
+ x2a = 0;
+ }else{
+ int i;
+ x2a->ht = (x2node**)&(x2a->tbl[128]);
+ for(i=0; i<128; i++) x2a->ht[i] = 0;
+ }
+ }
+}
+/* Insert a new record into the array. Return TRUE if successful.
+** Prior data with the same key is NOT overwritten */
+int Symbol_insert(struct symbol *data, const char *key)
+{
+ x2node *np;
+ unsigned h;
+ unsigned ph;
+
+ if( x2a==0 ) return 0;
+ ph = strhash(key);
+ h = ph & (x2a->size-1);
+ np = x2a->ht[h];
+ while( np ){
+ if( strcmp(np->key,key)==0 ){
+ /* An existing entry with the same key is found. */
+ /* Fail because overwrite is not allows. */
+ return 0;
+ }
+ np = np->next;
+ }
+ if( x2a->count>=x2a->size ){
+ /* Need to make the hash table bigger */
+ int i,arrSize;
+ struct s_x2 array;
+ array.size = arrSize = x2a->size*2;
+ array.count = x2a->count;
+ array.tbl = (x2node*)calloc(arrSize, sizeof(x2node) + sizeof(x2node*));
+ if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
+ array.ht = (x2node**)&(array.tbl[arrSize]);
+ for(i=0; i<arrSize; i++) array.ht[i] = 0;
+ for(i=0; i<x2a->count; i++){
+ x2node *oldnp, *newnp;
+ oldnp = &(x2a->tbl[i]);
+ h = strhash(oldnp->key) & (arrSize-1);
+ newnp = &(array.tbl[i]);
+ if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
+ newnp->next = array.ht[h];
+ newnp->key = oldnp->key;
+ newnp->data = oldnp->data;
+ newnp->from = &(array.ht[h]);
+ array.ht[h] = newnp;
+ }
+ /* free(x2a->tbl); // This program was originally written for 16-bit
+ ** machines. Don't worry about freeing this trivial amount of memory
+ ** on modern platforms. Just leak it. */
+ *x2a = array;
+ }
+ /* Insert the new data */
+ h = ph & (x2a->size-1);
+ np = &(x2a->tbl[x2a->count++]);
+ np->key = key;
+ np->data = data;
+ if( x2a->ht[h] ) x2a->ht[h]->from = &(np->next);
+ np->next = x2a->ht[h];
+ x2a->ht[h] = np;
+ np->from = &(x2a->ht[h]);
+ return 1;
+}
+
+/* Return a pointer to data assigned to the given key. Return NULL
+** if no such key. */
+struct symbol *Symbol_find(const char *key)
+{
+ unsigned h;
+ x2node *np;
+
+ if( x2a==0 ) return 0;
+ h = strhash(key) & (x2a->size-1);
+ np = x2a->ht[h];
+ while( np ){
+ if( strcmp(np->key,key)==0 ) break;
+ np = np->next;
+ }
+ return np ? np->data : 0;
+}
+
+/* Return the n-th data. Return NULL if n is out of range. */
+struct symbol *Symbol_Nth(int n)
+{
+ struct symbol *data;
+ if( x2a && n>0 && n<=x2a->count ){
+ data = x2a->tbl[n-1].data;
+ }else{
+ data = 0;
+ }
+ return data;
+}
+
+/* Return the size of the array */
+int Symbol_count()
+{
+ return x2a ? x2a->count : 0;
+}
+
+/* Return an array of pointers to all data in the table.
+** The array is obtained from malloc. Return NULL if memory allocation
+** problems, or if the array is empty. */
+struct symbol **Symbol_arrayof()
+{
+ struct symbol **array;
+ int i,arrSize;
+ if( x2a==0 ) return 0;
+ arrSize = x2a->count;
+ array = (struct symbol **)calloc(arrSize, sizeof(struct symbol *));
+ if( array ){
+ for(i=0; i<arrSize; i++) array[i] = x2a->tbl[i].data;
+ }
+ return array;
+}
+
+/* Compare two configurations */
+int Configcmp(const char *_a,const char *_b)
+{
+ const struct config *a = (struct config *) _a;
+ const struct config *b = (struct config *) _b;
+ int x;
+ x = a->rp->index - b->rp->index;
+ if( x==0 ) x = a->dot - b->dot;
+ return x;
+}
+
+/* Compare two states */
+PRIVATE int statecmp(struct config *a, struct config *b)
+{
+ int rc;
+ for(rc=0; rc==0 && a && b; a=a->bp, b=b->bp){
+ rc = a->rp->index - b->rp->index;
+ if( rc==0 ) rc = a->dot - b->dot;
+ }
+ if( rc==0 ){
+ if( a ) rc = 1;
+ if( b ) rc = -1;
+ }
+ return rc;
+}
+
+/* Hash a state */
+PRIVATE unsigned statehash(struct config *a)
+{
+ unsigned h=0;
+ while( a ){
+ h = h*571 + a->rp->index*37 + a->dot;
+ a = a->bp;
+ }
+ return h;
+}
+
+/* Allocate a new state structure */
+struct state *State_new()
+{
+ struct state *newstate;
+ newstate = (struct state *)calloc(1, sizeof(struct state) );
+ MemoryCheck(newstate);
+ return newstate;
+}
+
+/* There is one instance of the following structure for each
+** associative array of type "x3".
+*/
+struct s_x3 {
+ int size; /* The number of available slots. */
+ /* Must be a power of 2 greater than or */
+ /* equal to 1 */
+ int count; /* Number of currently slots filled */
+ struct s_x3node *tbl; /* The data stored here */
+ struct s_x3node **ht; /* Hash table for lookups */
+};
+
+/* There is one instance of this structure for every data element
+** in an associative array of type "x3".
+*/
+typedef struct s_x3node {
+ struct state *data; /* The data */
+ struct config *key; /* The key */
+ struct s_x3node *next; /* Next entry with the same hash */
+ struct s_x3node **from; /* Previous link */
+} x3node;
+
+/* There is only one instance of the array, which is the following */
+static struct s_x3 *x3a;
+
+/* Allocate a new associative array */
+void State_init(void){
+ if( x3a ) return;
+ x3a = (struct s_x3*)malloc( sizeof(struct s_x3) );
+ if( x3a ){
+ x3a->size = 128;
+ x3a->count = 0;
+ x3a->tbl = (x3node*)calloc(128, sizeof(x3node) + sizeof(x3node*));
+ if( x3a->tbl==0 ){
+ free(x3a);
+ x3a = 0;
+ }else{
+ int i;
+ x3a->ht = (x3node**)&(x3a->tbl[128]);
+ for(i=0; i<128; i++) x3a->ht[i] = 0;
+ }
+ }
+}
+/* Insert a new record into the array. Return TRUE if successful.
+** Prior data with the same key is NOT overwritten */
+int State_insert(struct state *data, struct config *key)
+{
+ x3node *np;
+ unsigned h;
+ unsigned ph;
+
+ if( x3a==0 ) return 0;
+ ph = statehash(key);
+ h = ph & (x3a->size-1);
+ np = x3a->ht[h];
+ while( np ){
+ if( statecmp(np->key,key)==0 ){
+ /* An existing entry with the same key is found. */
+ /* Fail because overwrite is not allows. */
+ return 0;
+ }
+ np = np->next;
+ }
+ if( x3a->count>=x3a->size ){
+ /* Need to make the hash table bigger */
+ int i,arrSize;
+ struct s_x3 array;
+ array.size = arrSize = x3a->size*2;
+ array.count = x3a->count;
+ array.tbl = (x3node*)calloc(arrSize, sizeof(x3node) + sizeof(x3node*));
+ if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
+ array.ht = (x3node**)&(array.tbl[arrSize]);
+ for(i=0; i<arrSize; i++) array.ht[i] = 0;
+ for(i=0; i<x3a->count; i++){
+ x3node *oldnp, *newnp;
+ oldnp = &(x3a->tbl[i]);
+ h = statehash(oldnp->key) & (arrSize-1);
+ newnp = &(array.tbl[i]);
+ if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
+ newnp->next = array.ht[h];
+ newnp->key = oldnp->key;
+ newnp->data = oldnp->data;
+ newnp->from = &(array.ht[h]);
+ array.ht[h] = newnp;
+ }
+ free(x3a->tbl);
+ *x3a = array;
+ }
+ /* Insert the new data */
+ h = ph & (x3a->size-1);
+ np = &(x3a->tbl[x3a->count++]);
+ np->key = key;
+ np->data = data;
+ if( x3a->ht[h] ) x3a->ht[h]->from = &(np->next);
+ np->next = x3a->ht[h];
+ x3a->ht[h] = np;
+ np->from = &(x3a->ht[h]);
+ return 1;
+}
+
+/* Return a pointer to data assigned to the given key. Return NULL
+** if no such key. */
+struct state *State_find(struct config *key)
+{
+ unsigned h;
+ x3node *np;
+
+ if( x3a==0 ) return 0;
+ h = statehash(key) & (x3a->size-1);
+ np = x3a->ht[h];
+ while( np ){
+ if( statecmp(np->key,key)==0 ) break;
+ np = np->next;
+ }
+ return np ? np->data : 0;
+}
+
+/* Return an array of pointers to all data in the table.
+** The array is obtained from malloc. Return NULL if memory allocation
+** problems, or if the array is empty. */
+struct state **State_arrayof(void)
+{
+ struct state **array;
+ int i,arrSize;
+ if( x3a==0 ) return 0;
+ arrSize = x3a->count;
+ array = (struct state **)calloc(arrSize, sizeof(struct state *));
+ if( array ){
+ for(i=0; i<arrSize; i++) array[i] = x3a->tbl[i].data;
+ }
+ return array;
+}
+
+/* Hash a configuration */
+PRIVATE unsigned confighash(struct config *a)
+{
+ unsigned h=0;
+ h = h*571 + a->rp->index*37 + a->dot;
+ return h;
+}
+
+/* There is one instance of the following structure for each
+** associative array of type "x4".
+*/
+struct s_x4 {
+ int size; /* The number of available slots. */
+ /* Must be a power of 2 greater than or */
+ /* equal to 1 */
+ int count; /* Number of currently slots filled */
+ struct s_x4node *tbl; /* The data stored here */
+ struct s_x4node **ht; /* Hash table for lookups */
+};
+
+/* There is one instance of this structure for every data element
+** in an associative array of type "x4".
+*/
+typedef struct s_x4node {
+ struct config *data; /* The data */
+ struct s_x4node *next; /* Next entry with the same hash */
+ struct s_x4node **from; /* Previous link */
+} x4node;
+
+/* There is only one instance of the array, which is the following */
+static struct s_x4 *x4a;
+
+/* Allocate a new associative array */
+void Configtable_init(void){
+ if( x4a ) return;
+ x4a = (struct s_x4*)malloc( sizeof(struct s_x4) );
+ if( x4a ){
+ x4a->size = 64;
+ x4a->count = 0;
+ x4a->tbl = (x4node*)calloc(64, sizeof(x4node) + sizeof(x4node*));
+ if( x4a->tbl==0 ){
+ free(x4a);
+ x4a = 0;
+ }else{
+ int i;
+ x4a->ht = (x4node**)&(x4a->tbl[64]);
+ for(i=0; i<64; i++) x4a->ht[i] = 0;
+ }
+ }
+}
+/* Insert a new record into the array. Return TRUE if successful.
+** Prior data with the same key is NOT overwritten */
+int Configtable_insert(struct config *data)
+{
+ x4node *np;
+ unsigned h;
+ unsigned ph;
+
+ if( x4a==0 ) return 0;
+ ph = confighash(data);
+ h = ph & (x4a->size-1);
+ np = x4a->ht[h];
+ while( np ){
+ if( Configcmp((const char *) np->data,(const char *) data)==0 ){
+ /* An existing entry with the same key is found. */
+ /* Fail because overwrite is not allows. */
+ return 0;
+ }
+ np = np->next;
+ }
+ if( x4a->count>=x4a->size ){
+ /* Need to make the hash table bigger */
+ int i,arrSize;
+ struct s_x4 array;
+ array.size = arrSize = x4a->size*2;
+ array.count = x4a->count;
+ array.tbl = (x4node*)calloc(arrSize, sizeof(x4node) + sizeof(x4node*));
+ if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
+ array.ht = (x4node**)&(array.tbl[arrSize]);
+ for(i=0; i<arrSize; i++) array.ht[i] = 0;
+ for(i=0; i<x4a->count; i++){
+ x4node *oldnp, *newnp;
+ oldnp = &(x4a->tbl[i]);
+ h = confighash(oldnp->data) & (arrSize-1);
+ newnp = &(array.tbl[i]);
+ if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
+ newnp->next = array.ht[h];
+ newnp->data = oldnp->data;
+ newnp->from = &(array.ht[h]);
+ array.ht[h] = newnp;
+ }
+ /* free(x4a->tbl); // This code was originall written for 16-bit machines.
+ ** on modern machines, don't worry about freeing this trival amount of
+ ** memory. */
+ *x4a = array;
+ }
+ /* Insert the new data */
+ h = ph & (x4a->size-1);
+ np = &(x4a->tbl[x4a->count++]);
+ np->data = data;
+ if( x4a->ht[h] ) x4a->ht[h]->from = &(np->next);
+ np->next = x4a->ht[h];
+ x4a->ht[h] = np;
+ np->from = &(x4a->ht[h]);
+ return 1;
+}
+
+/* Return a pointer to data assigned to the given key. Return NULL
+** if no such key. */
+struct config *Configtable_find(struct config *key)
+{
+ int h;
+ x4node *np;
+
+ if( x4a==0 ) return 0;
+ h = confighash(key) & (x4a->size-1);
+ np = x4a->ht[h];
+ while( np ){
+ if( Configcmp((const char *) np->data,(const char *) key)==0 ) break;
+ np = np->next;
+ }
+ return np ? np->data : 0;
+}
+
+/* Remove all data from the table. Pass each data to the function "f"
+** as it is removed. ("f" may be null to avoid this step.) */
+void Configtable_clear(int(*f)(struct config *))
+{
+ int i;
+ if( x4a==0 || x4a->count==0 ) return;
+ if( f ) for(i=0; i<x4a->count; i++) (*f)(x4a->tbl[i].data);
+ for(i=0; i<x4a->size; i++) x4a->ht[i] = 0;
+ x4a->count = 0;
+ return;
+}
diff --git a/tools/lemon/lempar.c b/tools/lemon/lempar.c
new file mode 100644
index 0000000..fcb72b8
--- /dev/null
+++ b/tools/lemon/lempar.c
@@ -0,0 +1,1068 @@
+/*
+** 2000-05-29
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Driver template for the LEMON parser generator.
+**
+** The "lemon" program processes an LALR(1) input grammar file, then uses
+** this template to construct a parser. The "lemon" program inserts text
+** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the
+** interstitial "-" characters) contained in this template is changed into
+** the value of the %name directive from the grammar. Otherwise, the content
+** of this template is copied straight through into the generate parser
+** source file.
+**
+** The following is the concatenation of all %include directives from the
+** input grammar file:
+*/
+/************ Begin %include sections from the grammar ************************/
+%%
+/**************** End of %include directives **********************************/
+/* These constants specify the various numeric values for terminal symbols.
+***************** Begin token definitions *************************************/
+%%
+/**************** End token definitions ***************************************/
+
+/* The next sections is a series of control #defines.
+** various aspects of the generated parser.
+** YYCODETYPE is the data type used to store the integer codes
+** that represent terminal and non-terminal symbols.
+** "unsigned char" is used if there are fewer than
+** 256 symbols. Larger types otherwise.
+** YYNOCODE is a number of type YYCODETYPE that is not used for
+** any terminal or nonterminal symbol.
+** YYFALLBACK If defined, this indicates that one or more tokens
+** (also known as: "terminal symbols") have fall-back
+** values which should be used if the original symbol
+** would not parse. This permits keywords to sometimes
+** be used as identifiers, for example.
+** YYACTIONTYPE is the data type used for "action codes" - numbers
+** that indicate what to do in response to the next
+** token.
+** ParseTOKENTYPE is the data type used for minor type for terminal
+** symbols. Background: A "minor type" is a semantic
+** value associated with a terminal or non-terminal
+** symbols. For example, for an "ID" terminal symbol,
+** the minor type might be the name of the identifier.
+** Each non-terminal can have a different minor type.
+** Terminal symbols all have the same minor type, though.
+** This macros defines the minor type for terminal
+** symbols.
+** YYMINORTYPE is the data type used for all minor types.
+** This is typically a union of many types, one of
+** which is ParseTOKENTYPE. The entry in the union
+** for terminal symbols is called "yy0".
+** YYSTACKDEPTH is the maximum depth of the parser's stack. If
+** zero the stack is dynamically sized using realloc()
+** ParseARG_SDECL A static variable declaration for the %extra_argument
+** ParseARG_PDECL A parameter declaration for the %extra_argument
+** ParseARG_PARAM Code to pass %extra_argument as a subroutine parameter
+** ParseARG_STORE Code to store %extra_argument into yypParser
+** ParseARG_FETCH Code to extract %extra_argument from yypParser
+** ParseCTX_* As ParseARG_ except for %extra_context
+** YYERRORSYMBOL is the code number of the error symbol. If not
+** defined, then do no error processing.
+** YYNSTATE the combined number of states.
+** YYNRULE the number of rules in the grammar
+** YYNTOKEN Number of terminal symbols
+** YY_MAX_SHIFT Maximum value for shift actions
+** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions
+** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions
+** YY_ERROR_ACTION The yy_action[] code for syntax error
+** YY_ACCEPT_ACTION The yy_action[] code for accept
+** YY_NO_ACTION The yy_action[] code for no-op
+** YY_MIN_REDUCE Minimum value for reduce actions
+** YY_MAX_REDUCE Maximum value for reduce actions
+*/
+#ifndef INTERFACE
+# define INTERFACE 1
+#endif
+/************* Begin control #defines *****************************************/
+%%
+/************* End control #defines *******************************************/
+#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
+
+/* Define the yytestcase() macro to be a no-op if is not already defined
+** otherwise.
+**
+** Applications can choose to define yytestcase() in the %include section
+** to a macro that can assist in verifying code coverage. For production
+** code the yytestcase() macro should be turned off. But it is useful
+** for testing.
+*/
+#ifndef yytestcase
+# define yytestcase(X)
+#endif
+
+
+/* Next are the tables used to determine what action to take based on the
+** current state and lookahead token. These tables are used to implement
+** functions that take a state number and lookahead value and return an
+** action integer.
+**
+** Suppose the action integer is N. Then the action is determined as
+** follows
+**
+** 0 <= N <= YY_MAX_SHIFT Shift N. That is, push the lookahead
+** token onto the stack and goto state N.
+**
+** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then
+** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE.
+**
+** N == YY_ERROR_ACTION A syntax error has occurred.
+**
+** N == YY_ACCEPT_ACTION The parser accepts its input.
+**
+** N == YY_NO_ACTION No such action. Denotes unused
+** slots in the yy_action[] table.
+**
+** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE
+** and YY_MAX_REDUCE
+**
+** The action table is constructed as a single large table named yy_action[].
+** Given state S and lookahead X, the action is computed as either:
+**
+** (A) N = yy_action[ yy_shift_ofst[S] + X ]
+** (B) N = yy_default[S]
+**
+** The (A) formula is preferred. The B formula is used instead if
+** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X.
+**
+** The formulas above are for computing the action when the lookahead is
+** a terminal symbol. If the lookahead is a non-terminal (as occurs after
+** a reduce action) then the yy_reduce_ofst[] array is used in place of
+** the yy_shift_ofst[] array.
+**
+** The following are the tables generated in this section:
+**
+** yy_action[] A single table containing all actions.
+** yy_lookahead[] A table containing the lookahead for each entry in
+** yy_action. Used to detect hash collisions.
+** yy_shift_ofst[] For each state, the offset into yy_action for
+** shifting terminals.
+** yy_reduce_ofst[] For each state, the offset into yy_action for
+** shifting non-terminals after a reduce.
+** yy_default[] Default action for each state.
+**
+*********** Begin parsing tables **********************************************/
+%%
+/********** End of lemon-generated parsing tables *****************************/
+
+/* The next table maps tokens (terminal symbols) into fallback tokens.
+** If a construct like the following:
+**
+** %fallback ID X Y Z.
+**
+** appears in the grammar, then ID becomes a fallback token for X, Y,
+** and Z. Whenever one of the tokens X, Y, or Z is input to the parser
+** but it does not parse, the type of the token is changed to ID and
+** the parse is retried before an error is thrown.
+**
+** This feature can be used, for example, to cause some keywords in a language
+** to revert to identifiers if they keyword does not apply in the context where
+** it appears.
+*/
+#ifdef YYFALLBACK
+static const YYCODETYPE yyFallback[] = {
+%%
+};
+#endif /* YYFALLBACK */
+
+/* The following structure represents a single element of the
+** parser's stack. Information stored includes:
+**
+** + The state number for the parser at this level of the stack.
+**
+** + The value of the token stored at this level of the stack.
+** (In other words, the "major" token.)
+**
+** + The semantic value stored at this level of the stack. This is
+** the information used by the action routines in the grammar.
+** It is sometimes called the "minor" token.
+**
+** After the "shift" half of a SHIFTREDUCE action, the stateno field
+** actually contains the reduce action for the second half of the
+** SHIFTREDUCE.
+*/
+struct yyStackEntry {
+ YYACTIONTYPE stateno; /* The state-number, or reduce action in SHIFTREDUCE */
+ YYCODETYPE major; /* The major token value. This is the code
+ ** number for the token at this stack level */
+ YYMINORTYPE minor; /* The user-supplied minor token value. This
+ ** is the value of the token */
+};
+typedef struct yyStackEntry yyStackEntry;
+
+/* The state of the parser is completely contained in an instance of
+** the following structure */
+struct yyParser {
+ yyStackEntry *yytos; /* Pointer to top element of the stack */
+#ifdef YYTRACKMAXSTACKDEPTH
+ int yyhwm; /* High-water mark of the stack */
+#endif
+#ifndef YYNOERRORRECOVERY
+ int yyerrcnt; /* Shifts left before out of the error */
+#endif
+ ParseARG_SDECL /* A place to hold %extra_argument */
+ ParseCTX_SDECL /* A place to hold %extra_context */
+#if YYSTACKDEPTH<=0
+ int yystksz; /* Current side of the stack */
+ yyStackEntry *yystack; /* The parser's stack */
+ yyStackEntry yystk0; /* First stack entry */
+#else
+ yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */
+ yyStackEntry *yystackEnd; /* Last entry in the stack */
+#endif
+};
+typedef struct yyParser yyParser;
+
+#include <assert.h>
+#ifndef NDEBUG
+#include <stdio.h>
+static FILE *yyTraceFILE = 0;
+static char *yyTracePrompt = 0;
+#endif /* NDEBUG */
+
+#ifndef NDEBUG
+/*
+** Turn parser tracing on by giving a stream to which to write the trace
+** and a prompt to preface each trace message. Tracing is turned off
+** by making either argument NULL
+**
+** Inputs:
+** <ul>
+** <li> A FILE* to which trace output should be written.
+** If NULL, then tracing is turned off.
+** <li> A prefix string written at the beginning of every
+** line of trace output. If NULL, then tracing is
+** turned off.
+** </ul>
+**
+** Outputs:
+** None.
+*/
+void ParseTrace(FILE *TraceFILE, char *zTracePrompt){
+ yyTraceFILE = TraceFILE;
+ yyTracePrompt = zTracePrompt;
+ if( yyTraceFILE==0 ) yyTracePrompt = 0;
+ else if( yyTracePrompt==0 ) yyTraceFILE = 0;
+}
+#endif /* NDEBUG */
+
+#if defined(YYCOVERAGE) || !defined(NDEBUG)
+/* For tracing shifts, the names of all terminals and nonterminals
+** are required. The following table supplies these names */
+static const char *const yyTokenName[] = {
+%%
+};
+#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */
+
+#ifndef NDEBUG
+/* For tracing reduce actions, the names of all rules are required.
+*/
+static const char *const yyRuleName[] = {
+%%
+};
+#endif /* NDEBUG */
+
+
+#if YYSTACKDEPTH<=0
+/*
+** Try to increase the size of the parser stack. Return the number
+** of errors. Return 0 on success.
+*/
+static int yyGrowStack(yyParser *p){
+ int newSize;
+ int idx;
+ yyStackEntry *pNew;
+
+ newSize = p->yystksz*2 + 100;
+ idx = p->yytos ? (int)(p->yytos - p->yystack) : 0;
+ if( p->yystack==&p->yystk0 ){
+ pNew = malloc(newSize*sizeof(pNew[0]));
+ if( pNew ) pNew[0] = p->yystk0;
+ }else{
+ pNew = realloc(p->yystack, newSize*sizeof(pNew[0]));
+ }
+ if( pNew ){
+ p->yystack = pNew;
+ p->yytos = &p->yystack[idx];
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n",
+ yyTracePrompt, p->yystksz, newSize);
+ }
+#endif
+ p->yystksz = newSize;
+ }
+ return pNew==0;
+}
+#endif
+
+/* Datatype of the argument to the memory allocated passed as the
+** second argument to ParseAlloc() below. This can be changed by
+** putting an appropriate #define in the %include section of the input
+** grammar.
+*/
+#ifndef YYMALLOCARGTYPE
+# define YYMALLOCARGTYPE size_t
+#endif
+
+/* Initialize a new parser that has already been allocated.
+*/
+void ParseInit(void *yypRawParser ParseCTX_PDECL){
+ yyParser *yypParser = (yyParser*)yypRawParser;
+ ParseCTX_STORE
+#ifdef YYTRACKMAXSTACKDEPTH
+ yypParser->yyhwm = 0;
+#endif
+#if YYSTACKDEPTH<=0
+ yypParser->yytos = NULL;
+ yypParser->yystack = NULL;
+ yypParser->yystksz = 0;
+ if( yyGrowStack(yypParser) ){
+ yypParser->yystack = &yypParser->yystk0;
+ yypParser->yystksz = 1;
+ }
+#endif
+#ifndef YYNOERRORRECOVERY
+ yypParser->yyerrcnt = -1;
+#endif
+ yypParser->yytos = yypParser->yystack;
+ yypParser->yystack[0].stateno = 0;
+ yypParser->yystack[0].major = 0;
+#if YYSTACKDEPTH>0
+ yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1];
+#endif
+}
+
+#ifndef Parse_ENGINEALWAYSONSTACK
+/*
+** This function allocates a new parser.
+** The only argument is a pointer to a function which works like
+** malloc.
+**
+** Inputs:
+** A pointer to the function used to allocate memory.
+**
+** Outputs:
+** A pointer to a parser. This pointer is used in subsequent calls
+** to Parse and ParseFree.
+*/
+void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE) ParseCTX_PDECL){
+ yyParser *yypParser;
+ yypParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) );
+ if( yypParser ){
+ ParseCTX_STORE
+ ParseInit(yypParser ParseCTX_PARAM);
+ }
+ return (void*)yypParser;
+}
+#endif /* Parse_ENGINEALWAYSONSTACK */
+
+
+/* The following function deletes the "minor type" or semantic value
+** associated with a symbol. The symbol can be either a terminal
+** or nonterminal. "yymajor" is the symbol code, and "yypminor" is
+** a pointer to the value to be deleted. The code used to do the
+** deletions is derived from the %destructor and/or %token_destructor
+** directives of the input grammar.
+*/
+static void yy_destructor(
+ yyParser *yypParser, /* The parser */
+ YYCODETYPE yymajor, /* Type code for object to destroy */
+ YYMINORTYPE *yypminor /* The object to be destroyed */
+){
+ ParseARG_FETCH
+ ParseCTX_FETCH
+ switch( yymajor ){
+ /* Here is inserted the actions which take place when a
+ ** terminal or non-terminal is destroyed. This can happen
+ ** when the symbol is popped from the stack during a
+ ** reduce or during error processing or when a parser is
+ ** being destroyed before it is finished parsing.
+ **
+ ** Note: during a reduce, the only symbols destroyed are those
+ ** which appear on the RHS of the rule, but which are *not* used
+ ** inside the C code.
+ */
+/********* Begin destructor definitions ***************************************/
+%%
+/********* End destructor definitions *****************************************/
+ default: break; /* If no destructor action specified: do nothing */
+ }
+}
+
+/*
+** Pop the parser's stack once.
+**
+** If there is a destructor routine associated with the token which
+** is popped from the stack, then call it.
+*/
+static void yy_pop_parser_stack(yyParser *pParser){
+ yyStackEntry *yytos;
+ assert( pParser->yytos!=0 );
+ assert( pParser->yytos > pParser->yystack );
+ yytos = pParser->yytos--;
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sPopping %s\n",
+ yyTracePrompt,
+ yyTokenName[yytos->major]);
+ }
+#endif
+ yy_destructor(pParser, yytos->major, &yytos->minor);
+}
+
+/*
+** Clear all secondary memory allocations from the parser
+*/
+void ParseFinalize(void *p){
+ yyParser *pParser = (yyParser*)p;
+ while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser);
+#if YYSTACKDEPTH<=0
+ if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack);
+#endif
+}
+
+#ifndef Parse_ENGINEALWAYSONSTACK
+/*
+** Deallocate and destroy a parser. Destructors are called for
+** all stack elements before shutting the parser down.
+**
+** If the YYPARSEFREENEVERNULL macro exists (for example because it
+** is defined in a %include section of the input grammar) then it is
+** assumed that the input pointer is never NULL.
+*/
+void ParseFree(
+ void *p, /* The parser to be deleted */
+ void (*freeProc)(void*) /* Function used to reclaim memory */
+){
+#ifndef YYPARSEFREENEVERNULL
+ if( p==0 ) return;
+#endif
+ ParseFinalize(p);
+ (*freeProc)(p);
+}
+#endif /* Parse_ENGINEALWAYSONSTACK */
+
+/*
+** Return the peak depth of the stack for a parser.
+*/
+#ifdef YYTRACKMAXSTACKDEPTH
+int ParseStackPeak(void *p){
+ yyParser *pParser = (yyParser*)p;
+ return pParser->yyhwm;
+}
+#endif
+
+/* This array of booleans keeps track of the parser statement
+** coverage. The element yycoverage[X][Y] is set when the parser
+** is in state X and has a lookahead token Y. In a well-tested
+** systems, every element of this matrix should end up being set.
+*/
+#if defined(YYCOVERAGE)
+static unsigned char yycoverage[YYNSTATE][YYNTOKEN];
+#endif
+
+/*
+** Write into out a description of every state/lookahead combination that
+**
+** (1) has not been used by the parser, and
+** (2) is not a syntax error.
+**
+** Return the number of missed state/lookahead combinations.
+*/
+#if defined(YYCOVERAGE)
+int ParseCoverage(FILE *out){
+ int stateno, iLookAhead, i;
+ int nMissed = 0;
+ for(stateno=0; stateno<YYNSTATE; stateno++){
+ i = yy_shift_ofst[stateno];
+ for(iLookAhead=0; iLookAhead<YYNTOKEN; iLookAhead++){
+ if( yy_lookahead[i+iLookAhead]!=iLookAhead ) continue;
+ if( yycoverage[stateno][iLookAhead]==0 ) nMissed++;
+ if( out ){
+ fprintf(out,"State %d lookahead %s %s\n", stateno,
+ yyTokenName[iLookAhead],
+ yycoverage[stateno][iLookAhead] ? "ok" : "missed");
+ }
+ }
+ }
+ return nMissed;
+}
+#endif
+
+/*
+** Find the appropriate action for a parser given the terminal
+** look-ahead token iLookAhead.
+*/
+static YYACTIONTYPE yy_find_shift_action(
+ YYCODETYPE iLookAhead, /* The look-ahead token */
+ YYACTIONTYPE stateno /* Current state number */
+){
+ int i;
+
+ if( stateno>YY_MAX_SHIFT ) return stateno;
+ assert( stateno <= YY_SHIFT_COUNT );
+#if defined(YYCOVERAGE)
+ yycoverage[stateno][iLookAhead] = 1;
+#endif
+ do{
+ i = yy_shift_ofst[stateno];
+ assert( i>=0 );
+ assert( i<=YY_ACTTAB_COUNT );
+ assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD );
+ assert( iLookAhead!=YYNOCODE );
+ assert( iLookAhead < YYNTOKEN );
+ i += iLookAhead;
+ assert( i<(int)YY_NLOOKAHEAD );
+ if( yy_lookahead[i]!=iLookAhead ){
+#ifdef YYFALLBACK
+ YYCODETYPE iFallback; /* Fallback token */
+ assert( iLookAhead<sizeof(yyFallback)/sizeof(yyFallback[0]) );
+ iFallback = yyFallback[iLookAhead];
+ if( iFallback!=0 ){
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE, "%sFALLBACK %s => %s\n",
+ yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]);
+ }
+#endif
+ assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */
+ iLookAhead = iFallback;
+ continue;
+ }
+#endif
+#ifdef YYWILDCARD
+ {
+ int j = i - iLookAhead + YYWILDCARD;
+ assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) );
+ if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n",
+ yyTracePrompt, yyTokenName[iLookAhead],
+ yyTokenName[YYWILDCARD]);
+ }
+#endif /* NDEBUG */
+ return yy_action[j];
+ }
+ }
+#endif /* YYWILDCARD */
+ return yy_default[stateno];
+ }else{
+ assert( i>=0 && i<(int)(sizeof(yy_action)/sizeof(yy_action[0])) );
+ return yy_action[i];
+ }
+ }while(1);
+}
+
+/*
+** Find the appropriate action for a parser given the non-terminal
+** look-ahead token iLookAhead.
+*/
+static YYACTIONTYPE yy_find_reduce_action(
+ YYACTIONTYPE stateno, /* Current state number */
+ YYCODETYPE iLookAhead /* The look-ahead token */
+){
+ int i;
+#ifdef YYERRORSYMBOL
+ if( stateno>YY_REDUCE_COUNT ){
+ return yy_default[stateno];
+ }
+#else
+ assert( stateno<=YY_REDUCE_COUNT );
+#endif
+ i = yy_reduce_ofst[stateno];
+ assert( iLookAhead!=YYNOCODE );
+ i += iLookAhead;
+#ifdef YYERRORSYMBOL
+ if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){
+ return yy_default[stateno];
+ }
+#else
+ assert( i>=0 && i<YY_ACTTAB_COUNT );
+ assert( yy_lookahead[i]==iLookAhead );
+#endif
+ return yy_action[i];
+}
+
+/*
+** The following routine is called if the stack overflows.
+*/
+static void yyStackOverflow(yyParser *yypParser){
+ ParseARG_FETCH
+ ParseCTX_FETCH
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt);
+ }
+#endif
+ while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser);
+ /* Here code is inserted which will execute if the parser
+ ** stack every overflows */
+/******** Begin %stack_overflow code ******************************************/
+%%
+/******** End %stack_overflow code ********************************************/
+ ParseARG_STORE /* Suppress warning about unused %extra_argument var */
+ ParseCTX_STORE
+}
+
+/*
+** Print tracing information for a SHIFT action
+*/
+#ifndef NDEBUG
+static void yyTraceShift(yyParser *yypParser, int yyNewState, const char *zTag){
+ if( yyTraceFILE ){
+ if( yyNewState<YYNSTATE ){
+ fprintf(yyTraceFILE,"%s%s '%s', go to state %d\n",
+ yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major],
+ yyNewState);
+ }else{
+ fprintf(yyTraceFILE,"%s%s '%s', pending reduce %d\n",
+ yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major],
+ yyNewState - YY_MIN_REDUCE);
+ }
+ }
+}
+#else
+# define yyTraceShift(X,Y,Z)
+#endif
+
+/*
+** Perform a shift action.
+*/
+static void yy_shift(
+ yyParser *yypParser, /* The parser to be shifted */
+ YYACTIONTYPE yyNewState, /* The new state to shift in */
+ YYCODETYPE yyMajor, /* The major token to shift in */
+ ParseTOKENTYPE yyMinor /* The minor token to shift in */
+){
+ yyStackEntry *yytos;
+ yypParser->yytos++;
+#ifdef YYTRACKMAXSTACKDEPTH
+ if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){
+ yypParser->yyhwm++;
+ assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) );
+ }
+#endif
+#if YYSTACKDEPTH>0
+ if( yypParser->yytos>yypParser->yystackEnd ){
+ yypParser->yytos--;
+ yyStackOverflow(yypParser);
+ return;
+ }
+#else
+ if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){
+ if( yyGrowStack(yypParser) ){
+ yypParser->yytos--;
+ yyStackOverflow(yypParser);
+ return;
+ }
+ }
+#endif
+ if( yyNewState > YY_MAX_SHIFT ){
+ yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE;
+ }
+ yytos = yypParser->yytos;
+ yytos->stateno = yyNewState;
+ yytos->major = yyMajor;
+ yytos->minor.yy0 = yyMinor;
+ yyTraceShift(yypParser, yyNewState, "Shift");
+}
+
+/* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side
+** of that rule */
+static const YYCODETYPE yyRuleInfoLhs[] = {
+%%
+};
+
+/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number
+** of symbols on the right-hand side of that rule. */
+static const signed char yyRuleInfoNRhs[] = {
+%%
+};
+
+static void yy_accept(yyParser*); /* Forward Declaration */
+
+/*
+** Perform a reduce action and the shift that must immediately
+** follow the reduce.
+**
+** The yyLookahead and yyLookaheadToken parameters provide reduce actions
+** access to the lookahead token (if any). The yyLookahead will be YYNOCODE
+** if the lookahead token has already been consumed. As this procedure is
+** only called from one place, optimizing compilers will in-line it, which
+** means that the extra parameters have no performance impact.
+*/
+static YYACTIONTYPE yy_reduce(
+ yyParser *yypParser, /* The parser */
+ unsigned int yyruleno, /* Number of the rule by which to reduce */
+ int yyLookahead, /* Lookahead token, or YYNOCODE if none */
+ ParseTOKENTYPE yyLookaheadToken /* Value of the lookahead token */
+ ParseCTX_PDECL /* %extra_context */
+){
+ int yygoto; /* The next state */
+ YYACTIONTYPE yyact; /* The next action */
+ yyStackEntry *yymsp; /* The top of the parser's stack */
+ int yysize; /* Amount to pop the stack */
+ ParseARG_FETCH
+ (void)yyLookahead;
+ (void)yyLookaheadToken;
+ yymsp = yypParser->yytos;
+
+ switch( yyruleno ){
+ /* Beginning here are the reduction cases. A typical example
+ ** follows:
+ ** case 0:
+ ** #line <lineno> <grammarfile>
+ ** { ... } // User supplied code
+ ** #line <lineno> <thisfile>
+ ** break;
+ */
+/********** Begin reduce actions **********************************************/
+%%
+/********** End reduce actions ************************************************/
+ };
+ assert( yyruleno<sizeof(yyRuleInfoLhs)/sizeof(yyRuleInfoLhs[0]) );
+ yygoto = yyRuleInfoLhs[yyruleno];
+ yysize = yyRuleInfoNRhs[yyruleno];
+ yyact = yy_find_reduce_action(yymsp[yysize].stateno,(YYCODETYPE)yygoto);
+
+ /* There are no SHIFTREDUCE actions on nonterminals because the table
+ ** generator has simplified them to pure REDUCE actions. */
+ assert( !(yyact>YY_MAX_SHIFT && yyact<=YY_MAX_SHIFTREDUCE) );
+
+ /* It is not possible for a REDUCE to be followed by an error */
+ assert( yyact!=YY_ERROR_ACTION );
+
+ yymsp += yysize+1;
+ yypParser->yytos = yymsp;
+ yymsp->stateno = (YYACTIONTYPE)yyact;
+ yymsp->major = (YYCODETYPE)yygoto;
+ yyTraceShift(yypParser, yyact, "... then shift");
+ return yyact;
+}
+
+/*
+** The following code executes when the parse fails
+*/
+#ifndef YYNOERRORRECOVERY
+static void yy_parse_failed(
+ yyParser *yypParser /* The parser */
+){
+ ParseARG_FETCH
+ ParseCTX_FETCH
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt);
+ }
+#endif
+ while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser);
+ /* Here code is inserted which will be executed whenever the
+ ** parser fails */
+/************ Begin %parse_failure code ***************************************/
+%%
+/************ End %parse_failure code *****************************************/
+ ParseARG_STORE /* Suppress warning about unused %extra_argument variable */
+ ParseCTX_STORE
+}
+#endif /* YYNOERRORRECOVERY */
+
+/*
+** The following code executes when a syntax error first occurs.
+*/
+static void yy_syntax_error(
+ yyParser *yypParser, /* The parser */
+ int yymajor, /* The major type of the error token */
+ ParseTOKENTYPE yyminor /* The minor type of the error token */
+){
+ ParseARG_FETCH
+ ParseCTX_FETCH
+#define TOKEN yyminor
+/************ Begin %syntax_error code ****************************************/
+%%
+/************ End %syntax_error code ******************************************/
+ ParseARG_STORE /* Suppress warning about unused %extra_argument variable */
+ ParseCTX_STORE
+}
+
+/*
+** The following is executed when the parser accepts
+*/
+static void yy_accept(
+ yyParser *yypParser /* The parser */
+){
+ ParseARG_FETCH
+ ParseCTX_FETCH
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt);
+ }
+#endif
+#ifndef YYNOERRORRECOVERY
+ yypParser->yyerrcnt = -1;
+#endif
+ assert( yypParser->yytos==yypParser->yystack );
+ /* Here code is inserted which will be executed whenever the
+ ** parser accepts */
+/*********** Begin %parse_accept code *****************************************/
+%%
+/*********** End %parse_accept code *******************************************/
+ ParseARG_STORE /* Suppress warning about unused %extra_argument variable */
+ ParseCTX_STORE
+}
+
+/* The main parser program.
+** The first argument is a pointer to a structure obtained from
+** "ParseAlloc" which describes the current state of the parser.
+** The second argument is the major token number. The third is
+** the minor token. The fourth optional argument is whatever the
+** user wants (and specified in the grammar) and is available for
+** use by the action routines.
+**
+** Inputs:
+** <ul>
+** <li> A pointer to the parser (an opaque structure.)
+** <li> The major token number.
+** <li> The minor token number.
+** <li> An option argument of a grammar-specified type.
+** </ul>
+**
+** Outputs:
+** None.
+*/
+void Parse(
+ void *yyp, /* The parser */
+ int yymajor, /* The major token code number */
+ ParseTOKENTYPE yyminor /* The value for the token */
+ ParseARG_PDECL /* Optional %extra_argument parameter */
+){
+ YYMINORTYPE yyminorunion;
+ YYACTIONTYPE yyact; /* The parser action. */
+#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY)
+ int yyendofinput; /* True if we are at the end of input */
+#endif
+#ifdef YYERRORSYMBOL
+ int yyerrorhit = 0; /* True if yymajor has invoked an error */
+#endif
+ yyParser *yypParser = (yyParser*)yyp; /* The parser */
+ ParseCTX_FETCH
+ ParseARG_STORE
+
+ assert( yypParser->yytos!=0 );
+#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY)
+ yyendofinput = (yymajor==0);
+#endif
+
+ yyact = yypParser->yytos->stateno;
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ if( yyact < YY_MIN_REDUCE ){
+ fprintf(yyTraceFILE,"%sInput '%s' in state %d\n",
+ yyTracePrompt,yyTokenName[yymajor],yyact);
+ }else{
+ fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n",
+ yyTracePrompt,yyTokenName[yymajor],yyact-YY_MIN_REDUCE);
+ }
+ }
+#endif
+
+ while(1){ /* Exit by "break" */
+ assert( yypParser->yytos>=yypParser->yystack );
+ assert( yyact==yypParser->yytos->stateno );
+ yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact);
+ if( yyact >= YY_MIN_REDUCE ){
+ unsigned int yyruleno = yyact - YY_MIN_REDUCE; /* Reduce by this rule */
+#ifndef NDEBUG
+ assert( yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) );
+ if( yyTraceFILE ){
+ int yysize = yyRuleInfoNRhs[yyruleno];
+ if( yysize ){
+ fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n",
+ yyTracePrompt,
+ yyruleno, yyRuleName[yyruleno],
+ yyruleno<YYNRULE_WITH_ACTION ? "" : " without external action",
+ yypParser->yytos[yysize].stateno);
+ }else{
+ fprintf(yyTraceFILE, "%sReduce %d [%s]%s.\n",
+ yyTracePrompt, yyruleno, yyRuleName[yyruleno],
+ yyruleno<YYNRULE_WITH_ACTION ? "" : " without external action");
+ }
+ }
+#endif /* NDEBUG */
+
+ /* Check that the stack is large enough to grow by a single entry
+ ** if the RHS of the rule is empty. This ensures that there is room
+ ** enough on the stack to push the LHS value */
+ if( yyRuleInfoNRhs[yyruleno]==0 ){
+#ifdef YYTRACKMAXSTACKDEPTH
+ if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){
+ yypParser->yyhwm++;
+ assert( yypParser->yyhwm ==
+ (int)(yypParser->yytos - yypParser->yystack));
+ }
+#endif
+#if YYSTACKDEPTH>0
+ if( yypParser->yytos>=yypParser->yystackEnd ){
+ yyStackOverflow(yypParser);
+ break;
+ }
+#else
+ if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){
+ if( yyGrowStack(yypParser) ){
+ yyStackOverflow(yypParser);
+ break;
+ }
+ }
+#endif
+ }
+ yyact = yy_reduce(yypParser,yyruleno,yymajor,yyminor ParseCTX_PARAM);
+ }else if( yyact <= YY_MAX_SHIFTREDUCE ){
+ yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor);
+#ifndef YYNOERRORRECOVERY
+ yypParser->yyerrcnt--;
+#endif
+ break;
+ }else if( yyact==YY_ACCEPT_ACTION ){
+ yypParser->yytos--;
+ yy_accept(yypParser);
+ return;
+ }else{
+ assert( yyact == YY_ERROR_ACTION );
+ yyminorunion.yy0 = yyminor;
+#ifdef YYERRORSYMBOL
+ int yymx;
+#endif
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt);
+ }
+#endif
+#ifdef YYERRORSYMBOL
+ /* A syntax error has occurred.
+ ** The response to an error depends upon whether or not the
+ ** grammar defines an error token "ERROR".
+ **
+ ** This is what we do if the grammar does define ERROR:
+ **
+ ** * Call the %syntax_error function.
+ **
+ ** * Begin popping the stack until we enter a state where
+ ** it is legal to shift the error symbol, then shift
+ ** the error symbol.
+ **
+ ** * Set the error count to three.
+ **
+ ** * Begin accepting and shifting new tokens. No new error
+ ** processing will occur until three tokens have been
+ ** shifted successfully.
+ **
+ */
+ if( yypParser->yyerrcnt<0 ){
+ yy_syntax_error(yypParser,yymajor,yyminor);
+ }
+ yymx = yypParser->yytos->major;
+ if( yymx==YYERRORSYMBOL || yyerrorhit ){
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sDiscard input token %s\n",
+ yyTracePrompt,yyTokenName[yymajor]);
+ }
+#endif
+ yy_destructor(yypParser, (YYCODETYPE)yymajor, &yyminorunion);
+ yymajor = YYNOCODE;
+ }else{
+ while( yypParser->yytos > yypParser->yystack ){
+ yyact = yy_find_reduce_action(yypParser->yytos->stateno,
+ YYERRORSYMBOL);
+ if( yyact<=YY_MAX_SHIFTREDUCE ) break;
+ yy_pop_parser_stack(yypParser);
+ }
+ if( yypParser->yytos <= yypParser->yystack || yymajor==0 ){
+ yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
+ yy_parse_failed(yypParser);
+#ifndef YYNOERRORRECOVERY
+ yypParser->yyerrcnt = -1;
+#endif
+ yymajor = YYNOCODE;
+ }else if( yymx!=YYERRORSYMBOL ){
+ yy_shift(yypParser,yyact,YYERRORSYMBOL,yyminor);
+ }
+ }
+ yypParser->yyerrcnt = 3;
+ yyerrorhit = 1;
+ if( yymajor==YYNOCODE ) break;
+ yyact = yypParser->yytos->stateno;
+#elif defined(YYNOERRORRECOVERY)
+ /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to
+ ** do any kind of error recovery. Instead, simply invoke the syntax
+ ** error routine and continue going as if nothing had happened.
+ **
+ ** Applications can set this macro (for example inside %include) if
+ ** they intend to abandon the parse upon the first syntax error seen.
+ */
+ yy_syntax_error(yypParser,yymajor, yyminor);
+ yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
+ break;
+#else /* YYERRORSYMBOL is not defined */
+ /* This is what we do if the grammar does not define ERROR:
+ **
+ ** * Report an error message, and throw away the input token.
+ **
+ ** * If the input token is $, then fail the parse.
+ **
+ ** As before, subsequent error messages are suppressed until
+ ** three input tokens have been successfully shifted.
+ */
+ if( yypParser->yyerrcnt<=0 ){
+ yy_syntax_error(yypParser,yymajor, yyminor);
+ }
+ yypParser->yyerrcnt = 3;
+ yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
+ if( yyendofinput ){
+ yy_parse_failed(yypParser);
+#ifndef YYNOERRORRECOVERY
+ yypParser->yyerrcnt = -1;
+#endif
+ }
+ break;
+#endif
+ }
+ }
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ yyStackEntry *i;
+ char cDiv = '[';
+ fprintf(yyTraceFILE,"%sReturn. Stack=",yyTracePrompt);
+ for(i=&yypParser->yystack[1]; i<=yypParser->yytos; i++){
+ fprintf(yyTraceFILE,"%c%s", cDiv, yyTokenName[i->major]);
+ cDiv = ' ';
+ }
+ fprintf(yyTraceFILE,"]\n");
+ }
+#endif
+ return;
+}
+
+/*
+** Return the fallback token corresponding to canonical token iToken, or
+** 0 if iToken has no fallback.
+*/
+int ParseFallback(int iToken){
+#ifdef YYFALLBACK
+ assert( iToken<(int)(sizeof(yyFallback)/sizeof(yyFallback[0])) );
+ return yyFallback[iToken];
+#else
+ (void)iToken;
+ return 0;
+#endif
+}
diff --git a/tools/lex.py b/tools/lex.py
new file mode 100644
index 0000000..103cf96
--- /dev/null
+++ b/tools/lex.py
@@ -0,0 +1,1074 @@
+# -----------------------------------------------------------------------------
+# ply: lex.py
+#
+# Copyright (C) 2001-2015,
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# -----------------------------------------------------------------------------
+
+__version__ = '3.8'
+__tabversion__ = '3.8'
+
+import re
+import sys
+import types
+import copy
+import os
+import inspect
+
+# This tuple contains known string types
+try:
+ # Python 2.6
+ StringTypes = (types.StringType, types.UnicodeType)
+except AttributeError:
+ # Python 3.0
+ StringTypes = (str, bytes)
+
+# This regular expression is used to match valid token names
+_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
+
+# Exception thrown when invalid token encountered and no default error
+# handler is defined.
+class LexError(Exception):
+ def __init__(self, message, s):
+ self.args = (message,)
+ self.text = s
+
+
+# Token class. This class is used to represent the tokens produced.
+class LexToken(object):
+ def __str__(self):
+ return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
+
+ def __repr__(self):
+ return str(self)
+
+
+# This object is a stand-in for a logging object created by the
+# logging module.
+
+class PlyLogger(object):
+ def __init__(self, f):
+ self.f = f
+
+ def critical(self, msg, *args, **kwargs):
+ self.f.write((msg % args) + '\n')
+
+ def warning(self, msg, *args, **kwargs):
+ self.f.write('WARNING: ' + (msg % args) + '\n')
+
+ def error(self, msg, *args, **kwargs):
+ self.f.write('ERROR: ' + (msg % args) + '\n')
+
+ info = critical
+ debug = critical
+
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+ def __getattribute__(self, name):
+ return self
+
+ def __call__(self, *args, **kwargs):
+ return self
+
+
+# -----------------------------------------------------------------------------
+# === Lexing Engine ===
+#
+# The following Lexer class implements the lexer runtime. There are only
+# a few public methods and attributes:
+#
+# input() - Store a new string in the lexer
+# token() - Get the next token
+# clone() - Clone the lexer
+#
+# lineno - Current line number
+# lexpos - Current position in the input string
+# -----------------------------------------------------------------------------
+
+class Lexer:
+ def __init__(self):
+ self.lexre = None # Master regular expression. This is a list of
+ # tuples (re, findex) where re is a compiled
+ # regular expression and findex is a list
+ # mapping regex group numbers to rules
+ self.lexretext = None # Current regular expression strings
+ self.lexstatere = {} # Dictionary mapping lexer states to master regexs
+ self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
+ self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
+ self.lexstate = 'INITIAL' # Current lexer state
+ self.lexstatestack = [] # Stack of lexer states
+ self.lexstateinfo = None # State information
+ self.lexstateignore = {} # Dictionary of ignored characters for each state
+ self.lexstateerrorf = {} # Dictionary of error functions for each state
+ self.lexstateeoff = {} # Dictionary of eof functions for each state
+ self.lexreflags = 0 # Optional re compile flags
+ self.lexdata = None # Actual input data (as a string)
+ self.lexpos = 0 # Current position in input text
+ self.lexlen = 0 # Length of the input text
+ self.lexerrorf = None # Error rule (if any)
+ self.lexeoff = None # EOF rule (if any)
+ self.lextokens = None # List of valid tokens
+ self.lexignore = '' # Ignored characters
+ self.lexliterals = '' # Literal characters that can be passed through
+ self.lexmodule = None # Module
+ self.lineno = 1 # Current line number
+ self.lexoptimize = False # Optimized mode
+
+ def clone(self, object=None):
+ c = copy.copy(self)
+
+ # If the object parameter has been supplied, it means we are attaching the
+ # lexer to a new object. In this case, we have to rebind all methods in
+ # the lexstatere and lexstateerrorf tables.
+
+ if object:
+ newtab = {}
+ for key, ritem in self.lexstatere.items():
+ newre = []
+ for cre, findex in ritem:
+ newfindex = []
+ for f in findex:
+ if not f or not f[0]:
+ newfindex.append(f)
+ continue
+ newfindex.append((getattr(object, f[0].__name__), f[1]))
+ newre.append((cre, newfindex))
+ newtab[key] = newre
+ c.lexstatere = newtab
+ c.lexstateerrorf = {}
+ for key, ef in self.lexstateerrorf.items():
+ c.lexstateerrorf[key] = getattr(object, ef.__name__)
+ c.lexmodule = object
+ return c
+
+ # ------------------------------------------------------------
+ # writetab() - Write lexer information to a table file
+ # ------------------------------------------------------------
+ def writetab(self, lextab, outputdir=''):
+ if isinstance(lextab, types.ModuleType):
+ raise IOError("Won't overwrite existing lextab module")
+ basetabmodule = lextab.split('.')[-1]
+ filename = os.path.join(outputdir, basetabmodule) + '.py'
+ with open(filename, 'w') as tf:
+ tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
+ tf.write('_tabversion = %s\n' % repr(__tabversion__))
+ tf.write('_lextokens = %s\n' % repr(self.lextokens))
+ tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
+ tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
+ tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
+
+ # Rewrite the lexstatere table, replacing function objects with function names
+ tabre = {}
+ for statename, lre in self.lexstatere.items():
+ titem = []
+ for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
+ titem.append((retext, _funcs_to_names(func, renames)))
+ tabre[statename] = titem
+
+ tf.write('_lexstatere = %s\n' % repr(tabre))
+ tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
+
+ taberr = {}
+ for statename, ef in self.lexstateerrorf.items():
+ taberr[statename] = ef.__name__ if ef else None
+ tf.write('_lexstateerrorf = %s\n' % repr(taberr))
+
+ tabeof = {}
+ for statename, ef in self.lexstateeoff.items():
+ tabeof[statename] = ef.__name__ if ef else None
+ tf.write('_lexstateeoff = %s\n' % repr(tabeof))
+
+ # ------------------------------------------------------------
+ # readtab() - Read lexer information from a tab file
+ # ------------------------------------------------------------
+ def readtab(self, tabfile, fdict):
+ if isinstance(tabfile, types.ModuleType):
+ lextab = tabfile
+ else:
+ exec('import %s' % tabfile)
+ lextab = sys.modules[tabfile]
+
+ if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
+ raise ImportError('Inconsistent PLY version')
+
+ self.lextokens = lextab._lextokens
+ self.lexreflags = lextab._lexreflags
+ self.lexliterals = lextab._lexliterals
+ self.lextokens_all = self.lextokens | set(self.lexliterals)
+ self.lexstateinfo = lextab._lexstateinfo
+ self.lexstateignore = lextab._lexstateignore
+ self.lexstatere = {}
+ self.lexstateretext = {}
+ for statename, lre in lextab._lexstatere.items():
+ titem = []
+ txtitem = []
+ for pat, func_name in lre:
+ titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict)))
+
+ self.lexstatere[statename] = titem
+ self.lexstateretext[statename] = txtitem
+
+ self.lexstateerrorf = {}
+ for statename, ef in lextab._lexstateerrorf.items():
+ self.lexstateerrorf[statename] = fdict[ef]
+
+ self.lexstateeoff = {}
+ for statename, ef in lextab._lexstateeoff.items():
+ self.lexstateeoff[statename] = fdict[ef]
+
+ self.begin('INITIAL')
+
+ # ------------------------------------------------------------
+ # input() - Push a new string into the lexer
+ # ------------------------------------------------------------
+ def input(self, s):
+ # Pull off the first character to see if s looks like a string
+ c = s[:1]
+ if not isinstance(c, StringTypes):
+ raise ValueError('Expected a string')
+ self.lexdata = s
+ self.lexpos = 0
+ self.lexlen = len(s)
+
+ # ------------------------------------------------------------
+ # begin() - Changes the lexing state
+ # ------------------------------------------------------------
+ def begin(self, state):
+ if state not in self.lexstatere:
+ raise ValueError('Undefined state')
+ self.lexre = self.lexstatere[state]
+ self.lexretext = self.lexstateretext[state]
+ self.lexignore = self.lexstateignore.get(state, '')
+ self.lexerrorf = self.lexstateerrorf.get(state, None)
+ self.lexeoff = self.lexstateeoff.get(state, None)
+ self.lexstate = state
+
+ # ------------------------------------------------------------
+ # push_state() - Changes the lexing state and saves old on stack
+ # ------------------------------------------------------------
+ def push_state(self, state):
+ self.lexstatestack.append(self.lexstate)
+ self.begin(state)
+
+ # ------------------------------------------------------------
+ # pop_state() - Restores the previous state
+ # ------------------------------------------------------------
+ def pop_state(self):
+ self.begin(self.lexstatestack.pop())
+
+ # ------------------------------------------------------------
+ # current_state() - Returns the current lexing state
+ # ------------------------------------------------------------
+ def current_state(self):
+ return self.lexstate
+
+ # ------------------------------------------------------------
+ # skip() - Skip ahead n characters
+ # ------------------------------------------------------------
+ def skip(self, n):
+ self.lexpos += n
+
+ # ------------------------------------------------------------
+ # opttoken() - Return the next token from the Lexer
+ #
+ # Note: This function has been carefully implemented to be as fast
+ # as possible. Don't make changes unless you really know what
+ # you are doing
+ # ------------------------------------------------------------
+ def token(self):
+ # Make local copies of frequently referenced attributes
+ lexpos = self.lexpos
+ lexlen = self.lexlen
+ lexignore = self.lexignore
+ lexdata = self.lexdata
+
+ while lexpos < lexlen:
+ # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
+ if lexdata[lexpos] in lexignore:
+ lexpos += 1
+ continue
+
+ # Look for a regular expression match
+ for lexre, lexindexfunc in self.lexre:
+ m = lexre.match(lexdata, lexpos)
+ if not m:
+ continue
+
+ # Create a token for return
+ tok = LexToken()
+ tok.value = m.group()
+ tok.lineno = self.lineno
+ tok.lexpos = lexpos
+
+ i = m.lastindex
+ func, tok.type = lexindexfunc[i]
+
+ if not func:
+ # If no token type was set, it's an ignored token
+ if tok.type:
+ self.lexpos = m.end()
+ return tok
+ else:
+ lexpos = m.end()
+ break
+
+ lexpos = m.end()
+
+ # If token is processed by a function, call it
+
+ tok.lexer = self # Set additional attributes useful in token rules
+ self.lexmatch = m
+ self.lexpos = lexpos
+
+ newtok = func(tok)
+
+ # Every function must return a token, if nothing, we just move to next token
+ if not newtok:
+ lexpos = self.lexpos # This is here in case user has updated lexpos.
+ lexignore = self.lexignore # This is here in case there was a state change
+ break
+
+ # Verify type of the token. If not in the token map, raise an error
+ if not self.lexoptimize:
+ if newtok.type not in self.lextokens_all:
+ raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+ func.__code__.co_filename, func.__code__.co_firstlineno,
+ func.__name__, newtok.type), lexdata[lexpos:])
+
+ return newtok
+ else:
+ # No match, see if in literals
+ if lexdata[lexpos] in self.lexliterals:
+ tok = LexToken()
+ tok.value = lexdata[lexpos]
+ tok.lineno = self.lineno
+ tok.type = tok.value
+ tok.lexpos = lexpos
+ self.lexpos = lexpos + 1
+ return tok
+
+ # No match. Call t_error() if defined.
+ if self.lexerrorf:
+ tok = LexToken()
+ tok.value = self.lexdata[lexpos:]
+ tok.lineno = self.lineno
+ tok.type = 'error'
+ tok.lexer = self
+ tok.lexpos = lexpos
+ self.lexpos = lexpos
+ newtok = self.lexerrorf(tok)
+ if lexpos == self.lexpos:
+ # Error method didn't change text position at all. This is an error.
+ raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+ lexpos = self.lexpos
+ if not newtok:
+ continue
+ return newtok
+
+ self.lexpos = lexpos
+ raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
+
+ if self.lexeoff:
+ tok = LexToken()
+ tok.type = 'eof'
+ tok.value = ''
+ tok.lineno = self.lineno
+ tok.lexpos = lexpos
+ tok.lexer = self
+ self.lexpos = lexpos
+ newtok = self.lexeoff(tok)
+ return newtok
+
+ self.lexpos = lexpos + 1
+ if self.lexdata is None:
+ raise RuntimeError('No input string given with input()')
+ return None
+
+ # Iterator interface
+ def __iter__(self):
+ return self
+
+ def next(self):
+ t = self.token()
+ if t is None:
+ raise StopIteration
+ return t
+
+ __next__ = next
+
+# -----------------------------------------------------------------------------
+# ==== Lex Builder ===
+#
+# The functions and classes below are used to collect lexing information
+# and build a Lexer object from it.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# _get_regex(func)
+#
+# Returns the regular expression assigned to a function either as a doc string
+# or as a .regex attribute attached by the @TOKEN decorator.
+# -----------------------------------------------------------------------------
+def _get_regex(func):
+ return getattr(func, 'regex', func.__doc__)
+
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack. This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
+def get_caller_module_dict(levels):
+ f = sys._getframe(levels)
+ ldict = f.f_globals.copy()
+ if f.f_globals != f.f_locals:
+ ldict.update(f.f_locals)
+ return ldict
+
+# -----------------------------------------------------------------------------
+# _funcs_to_names()
+#
+# Given a list of regular expression functions, this converts it to a list
+# suitable for output to a table file
+# -----------------------------------------------------------------------------
+def _funcs_to_names(funclist, namelist):
+ result = []
+ for f, name in zip(funclist, namelist):
+ if f and f[0]:
+ result.append((name, f[1]))
+ else:
+ result.append(f)
+ return result
+
+# -----------------------------------------------------------------------------
+# _names_to_funcs()
+#
+# Given a list of regular expression function names, this converts it back to
+# functions.
+# -----------------------------------------------------------------------------
+def _names_to_funcs(namelist, fdict):
+ result = []
+ for n in namelist:
+ if n and n[0]:
+ result.append((fdict[n[0]], n[1]))
+ else:
+ result.append(n)
+ return result
+
+# -----------------------------------------------------------------------------
+# _form_master_re()
+#
+# This function takes a list of all of the regex components and attempts to
+# form the master regular expression. Given limitations in the Python re
+# module, it may be necessary to break the master regex into separate expressions.
+# -----------------------------------------------------------------------------
+def _form_master_re(relist, reflags, ldict, toknames):
+ if not relist:
+ return []
+ regex = '|'.join(relist)
+ try:
+ lexre = re.compile(regex, re.VERBOSE | reflags)
+
+ # Build the index to function map for the matching engine
+ lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
+ lexindexnames = lexindexfunc[:]
+
+ for f, i in lexre.groupindex.items():
+ handle = ldict.get(f, None)
+ if type(handle) in (types.FunctionType, types.MethodType):
+ lexindexfunc[i] = (handle, toknames[f])
+ lexindexnames[i] = f
+ elif handle is not None:
+ lexindexnames[i] = f
+ if f.find('ignore_') > 0:
+ lexindexfunc[i] = (None, None)
+ else:
+ lexindexfunc[i] = (None, toknames[f])
+
+ return [(lexre, lexindexfunc)], [regex], [lexindexnames]
+ except Exception:
+ m = int(len(relist)/2)
+ if m == 0:
+ m = 1
+ llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
+ rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
+ return (llist+rlist), (lre+rre), (lnames+rnames)
+
+# -----------------------------------------------------------------------------
+# def _statetoken(s,names)
+#
+# Given a declaration name s of the form "t_" and a dictionary whose keys are
+# state names, this function returns a tuple (states,tokenname) where states
+# is a tuple of state names and tokenname is the name of the token. For example,
+# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
+# -----------------------------------------------------------------------------
+def _statetoken(s, names):
+ nonstate = 1
+ parts = s.split('_')
+ for i, part in enumerate(parts[1:], 1):
+ if part not in names and part != 'ANY':
+ break
+
+ if i > 1:
+ states = tuple(parts[1:i])
+ else:
+ states = ('INITIAL',)
+
+ if 'ANY' in states:
+ states = tuple(names)
+
+ tokenname = '_'.join(parts[i:])
+ return (states, tokenname)
+
+
+# -----------------------------------------------------------------------------
+# LexerReflect()
+#
+# This class represents information needed to build a lexer as extracted from a
+# user's input file.
+# -----------------------------------------------------------------------------
+class LexerReflect(object):
+ def __init__(self, ldict, log=None, reflags=0):
+ self.ldict = ldict
+ self.error_func = None
+ self.tokens = []
+ self.reflags = reflags
+ self.stateinfo = {'INITIAL': 'inclusive'}
+ self.modules = set()
+ self.error = False
+ self.log = PlyLogger(sys.stderr) if log is None else log
+
+ # Get all of the basic information
+ def get_all(self):
+ self.get_tokens()
+ self.get_literals()
+ self.get_states()
+ self.get_rules()
+
+ # Validate all of the information
+ def validate_all(self):
+ self.validate_tokens()
+ self.validate_literals()
+ self.validate_rules()
+ return self.error
+
+ # Get the tokens map
+ def get_tokens(self):
+ tokens = self.ldict.get('tokens', None)
+ if not tokens:
+ self.log.error('No token list is defined')
+ self.error = True
+ return
+
+ if not isinstance(tokens, (list, tuple)):
+ self.log.error('tokens must be a list or tuple')
+ self.error = True
+ return
+
+ if not tokens:
+ self.log.error('tokens is empty')
+ self.error = True
+ return
+
+ self.tokens = tokens
+
+ # Validate the tokens
+ def validate_tokens(self):
+ terminals = {}
+ for n in self.tokens:
+ if not _is_identifier.match(n):
+ self.log.error("Bad token name '%s'", n)
+ self.error = True
+ if n in terminals:
+ self.log.warning("Token '%s' multiply defined", n)
+ terminals[n] = 1
+
+ # Get the literals specifier
+ def get_literals(self):
+ self.literals = self.ldict.get('literals', '')
+ if not self.literals:
+ self.literals = ''
+
+ # Validate literals
+ def validate_literals(self):
+ try:
+ for c in self.literals:
+ if not isinstance(c, StringTypes) or len(c) > 1:
+ self.log.error('Invalid literal %s. Must be a single character', repr(c))
+ self.error = True
+
+ except TypeError:
+ self.log.error('Invalid literals specification. literals must be a sequence of characters')
+ self.error = True
+
+ def get_states(self):
+ self.states = self.ldict.get('states', None)
+ # Build statemap
+ if self.states:
+ if not isinstance(self.states, (tuple, list)):
+ self.log.error('states must be defined as a tuple or list')
+ self.error = True
+ else:
+ for s in self.states:
+ if not isinstance(s, tuple) or len(s) != 2:
+ self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
+ self.error = True
+ continue
+ name, statetype = s
+ if not isinstance(name, StringTypes):
+ self.log.error('State name %s must be a string', repr(name))
+ self.error = True
+ continue
+ if not (statetype == 'inclusive' or statetype == 'exclusive'):
+ self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
+ self.error = True
+ continue
+ if name in self.stateinfo:
+ self.log.error("State '%s' already defined", name)
+ self.error = True
+ continue
+ self.stateinfo[name] = statetype
+
+ # Get all of the symbols with a t_ prefix and sort them into various
+ # categories (functions, strings, error functions, and ignore characters)
+
+ def get_rules(self):
+ tsymbols = [f for f in self.ldict if f[:2] == 't_']
+
+ # Now build up a list of functions and a list of strings
+ self.toknames = {} # Mapping of symbols to token names
+ self.funcsym = {} # Symbols defined as functions
+ self.strsym = {} # Symbols defined as strings
+ self.ignore = {} # Ignore strings by state
+ self.errorf = {} # Error functions by state
+ self.eoff = {} # EOF functions by state
+
+ for s in self.stateinfo:
+ self.funcsym[s] = []
+ self.strsym[s] = []
+
+ if len(tsymbols) == 0:
+ self.log.error('No rules of the form t_rulename are defined')
+ self.error = True
+ return
+
+ for f in tsymbols:
+ t = self.ldict[f]
+ states, tokname = _statetoken(f, self.stateinfo)
+ self.toknames[f] = tokname
+
+ if hasattr(t, '__call__'):
+ if tokname == 'error':
+ for s in states:
+ self.errorf[s] = t
+ elif tokname == 'eof':
+ for s in states:
+ self.eoff[s] = t
+ elif tokname == 'ignore':
+ line = t.__code__.co_firstlineno
+ file = t.__code__.co_filename
+ self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
+ self.error = True
+ else:
+ for s in states:
+ self.funcsym[s].append((f, t))
+ elif isinstance(t, StringTypes):
+ if tokname == 'ignore':
+ for s in states:
+ self.ignore[s] = t
+ if '\\' in t:
+ self.log.warning("%s contains a literal backslash '\\'", f)
+
+ elif tokname == 'error':
+ self.log.error("Rule '%s' must be defined as a function", f)
+ self.error = True
+ else:
+ for s in states:
+ self.strsym[s].append((f, t))
+ else:
+ self.log.error('%s not defined as a function or string', f)
+ self.error = True
+
+ # Sort the functions by line number
+ for f in self.funcsym.values():
+ f.sort(key=lambda x: x[1].__code__.co_firstlineno)
+
+ # Sort the strings by regular expression length
+ for s in self.strsym.values():
+ s.sort(key=lambda x: len(x[1]), reverse=True)
+
+ # Validate all of the t_rules collected
+ def validate_rules(self):
+ for state in self.stateinfo:
+ # Validate all rules defined by functions
+
+ for fname, f in self.funcsym[state]:
+ line = f.__code__.co_firstlineno
+ file = f.__code__.co_filename
+ module = inspect.getmodule(f)
+ self.modules.add(module)
+
+ tokname = self.toknames[fname]
+ if isinstance(f, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ nargs = f.__code__.co_argcount
+ if nargs > reqargs:
+ self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
+ self.error = True
+ continue
+
+ if nargs < reqargs:
+ self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
+ self.error = True
+ continue
+
+ if not _get_regex(f):
+ self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
+ self.error = True
+ continue
+
+ try:
+ c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
+ if c.match(''):
+ self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
+ self.error = True
+ except re.error as e:
+ self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
+ if '#' in _get_regex(f):
+ self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
+ self.error = True
+
+ # Validate all rules defined by strings
+ for name, r in self.strsym[state]:
+ tokname = self.toknames[name]
+ if tokname == 'error':
+ self.log.error("Rule '%s' must be defined as a function", name)
+ self.error = True
+ continue
+
+ if tokname not in self.tokens and tokname.find('ignore_') < 0:
+ self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
+ self.error = True
+ continue
+
+ try:
+ c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags)
+ if (c.match('')):
+ self.log.error("Regular expression for rule '%s' matches empty string", name)
+ self.error = True
+ except re.error as e:
+ self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
+ if '#' in r:
+ self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
+ self.error = True
+
+ if not self.funcsym[state] and not self.strsym[state]:
+ self.log.error("No rules defined for state '%s'", state)
+ self.error = True
+
+ # Validate the error function
+ efunc = self.errorf.get(state, None)
+ if efunc:
+ f = efunc
+ line = f.__code__.co_firstlineno
+ file = f.__code__.co_filename
+ module = inspect.getmodule(f)
+ self.modules.add(module)
+
+ if isinstance(f, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ nargs = f.__code__.co_argcount
+ if nargs > reqargs:
+ self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
+ self.error = True
+
+ if nargs < reqargs:
+ self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
+ self.error = True
+
+ for module in self.modules:
+ self.validate_module(module)
+
+ # -----------------------------------------------------------------------------
+ # validate_module()
+ #
+ # This checks to see if there are duplicated t_rulename() functions or strings
+ # in the parser input file. This is done using a simple regular expression
+ # match on each line in the source code of the given module.
+ # -----------------------------------------------------------------------------
+
+ def validate_module(self, module):
+ lines, linen = inspect.getsourcelines(module)
+
+ fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
+ sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+
+ counthash = {}
+ linen += 1
+ for line in lines:
+ m = fre.match(line)
+ if not m:
+ m = sre.match(line)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ filename = inspect.getsourcefile(module)
+ self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
+ self.error = True
+ linen += 1
+
+# -----------------------------------------------------------------------------
+# lex(module)
+#
+# Build all of the regular expression rules from definitions in the supplied module
+# -----------------------------------------------------------------------------
+def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
+ reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None):
+
+ if lextab is None:
+ lextab = 'lextab'
+
+ global lexer
+
+ ldict = None
+ stateinfo = {'INITIAL': 'inclusive'}
+ lexobj = Lexer()
+ lexobj.lexoptimize = optimize
+ global token, input
+
+ if errorlog is None:
+ errorlog = PlyLogger(sys.stderr)
+
+ if debug:
+ if debuglog is None:
+ debuglog = PlyLogger(sys.stderr)
+
+ # Get the module dictionary used for the lexer
+ if object:
+ module = object
+
+ # Get the module dictionary used for the parser
+ if module:
+ _items = [(k, getattr(module, k)) for k in dir(module)]
+ ldict = dict(_items)
+ # If no __file__ attribute is available, try to obtain it from the __module__ instead
+ if '__file__' not in ldict:
+ ldict['__file__'] = sys.modules[ldict['__module__']].__file__
+ else:
+ ldict = get_caller_module_dict(2)
+
+ # Determine if the module is package of a package or not.
+ # If so, fix the tabmodule setting so that tables load correctly
+ pkg = ldict.get('__package__')
+ if pkg and isinstance(lextab, str):
+ if '.' not in lextab:
+ lextab = pkg + '.' + lextab
+
+ # Collect parser information from the dictionary
+ linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
+ linfo.get_all()
+ if not optimize:
+ if linfo.validate_all():
+ raise SyntaxError("Can't build lexer")
+
+ if optimize and lextab:
+ try:
+ lexobj.readtab(lextab, ldict)
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+ return lexobj
+
+ except ImportError:
+ pass
+
+ # Dump some basic debugging information
+ if debug:
+ debuglog.info('lex: tokens = %r', linfo.tokens)
+ debuglog.info('lex: literals = %r', linfo.literals)
+ debuglog.info('lex: states = %r', linfo.stateinfo)
+
+ # Build a dictionary of valid token names
+ lexobj.lextokens = set()
+ for n in linfo.tokens:
+ lexobj.lextokens.add(n)
+
+ # Get literals specification
+ if isinstance(linfo.literals, (list, tuple)):
+ lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
+ else:
+ lexobj.lexliterals = linfo.literals
+
+ lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
+
+ # Get the stateinfo dictionary
+ stateinfo = linfo.stateinfo
+
+ regexs = {}
+ # Build the master regular expressions
+ for state in stateinfo:
+ regex_list = []
+
+ # Add rules defined by functions first
+ for fname, f in linfo.funcsym[state]:
+ line = f.__code__.co_firstlineno
+ file = f.__code__.co_filename
+ regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
+ if debug:
+ debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
+
+ # Now add all of the simple rules
+ for name, r in linfo.strsym[state]:
+ regex_list.append('(?P<%s>%s)' % (name, r))
+ if debug:
+ debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
+
+ regexs[state] = regex_list
+
+ # Build the master regular expressions
+
+ if debug:
+ debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
+
+ for state in regexs:
+ lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
+ lexobj.lexstatere[state] = lexre
+ lexobj.lexstateretext[state] = re_text
+ lexobj.lexstaterenames[state] = re_names
+ if debug:
+ for i, text in enumerate(re_text):
+ debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
+
+ # For inclusive states, we need to add the regular expressions from the INITIAL state
+ for state, stype in stateinfo.items():
+ if state != 'INITIAL' and stype == 'inclusive':
+ lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
+ lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
+ lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
+
+ lexobj.lexstateinfo = stateinfo
+ lexobj.lexre = lexobj.lexstatere['INITIAL']
+ lexobj.lexretext = lexobj.lexstateretext['INITIAL']
+ lexobj.lexreflags = reflags
+
+ # Set up ignore variables
+ lexobj.lexstateignore = linfo.ignore
+ lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
+
+ # Set up error functions
+ lexobj.lexstateerrorf = linfo.errorf
+ lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
+ if not lexobj.lexerrorf:
+ errorlog.warning('No t_error rule is defined')
+
+ # Set up eof functions
+ lexobj.lexstateeoff = linfo.eoff
+ lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
+
+ # Check state information for ignore and error rules
+ for s, stype in stateinfo.items():
+ if stype == 'exclusive':
+ if s not in linfo.errorf:
+ errorlog.warning("No error rule is defined for exclusive state '%s'", s)
+ if s not in linfo.ignore and lexobj.lexignore:
+ errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
+ elif stype == 'inclusive':
+ if s not in linfo.errorf:
+ linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
+ if s not in linfo.ignore:
+ linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
+
+ # Create global versions of the token() and input() functions
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+
+ # If in optimize mode, we write the lextab
+ if lextab and optimize:
+ if outputdir is None:
+ # If no output directory is set, the location of the output files
+ # is determined according to the following rules:
+ # - If lextab specifies a package, files go into that package directory
+ # - Otherwise, files go in the same directory as the specifying module
+ if isinstance(lextab, types.ModuleType):
+ srcfile = lextab.__file__
+ else:
+ if '.' not in lextab:
+ srcfile = ldict['__file__']
+ else:
+ parts = lextab.split('.')
+ pkgname = '.'.join(parts[:-1])
+ exec('import %s' % pkgname)
+ srcfile = getattr(sys.modules[pkgname], '__file__', '')
+ outputdir = os.path.dirname(srcfile)
+ try:
+ lexobj.writetab(lextab, outputdir)
+ except IOError as e:
+ errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
+
+ return lexobj
+
+# -----------------------------------------------------------------------------
+# runmain()
+#
+# This runs the lexer as a main program
+# -----------------------------------------------------------------------------
+
+def runmain(lexer=None, data=None):
+ if not data:
+ try:
+ filename = sys.argv[1]
+ f = open(filename)
+ data = f.read()
+ f.close()
+ except IndexError:
+ sys.stdout.write('Reading from standard input (type EOF to end):\n')
+ data = sys.stdin.read()
+
+ if lexer:
+ _input = lexer.input
+ else:
+ _input = input
+ _input(data)
+ if lexer:
+ _token = lexer.token
+ else:
+ _token = token
+
+ while True:
+ tok = _token()
+ if not tok:
+ break
+ sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
+
+# -----------------------------------------------------------------------------
+# @TOKEN(regex)
+#
+# This decorator function can be used to set the regex expression on a function
+# when its docstring might need to be set in an alternative way
+# -----------------------------------------------------------------------------
+
+def TOKEN(r):
+ def set_regex(f):
+ if hasattr(r, '__call__'):
+ f.regex = _get_regex(r)
+ else:
+ f.regex = r
+ return f
+ return set_regex
+
+# Alternative spelling of the TOKEN decorator
+Token = TOKEN
+
diff --git a/tools/licensecheck.pl b/tools/licensecheck.pl
new file mode 100755
index 0000000..0778153
--- /dev/null
+++ b/tools/licensecheck.pl
@@ -0,0 +1,874 @@
+#!/usr/bin/perl
+# -*- tab-width: 8; indent-tabs-mode: t; cperl-indent-level: 4 -*-
+# This script was originally based on the script of the same name from
+# the KDE SDK (by dfaure@kde.org)
+#
+# This version is
+# Copyright (C) 2007, 2008 Adam D. Barratt
+# Copyright (C) 2012 Francesco Poli
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <https://www.gnu.org/licenses/>.
+
+# Originally copied from Debian's devscripts. A more modern version of
+# this can be found at
+# https://anonscm.debian.org/git/pkg-perl/packages/licensecheck.git/
+
+=head1 NAME
+
+licensecheck - simple license checker for source files
+
+=head1 SYNOPSIS
+
+B<licensecheck> B<--help>|B<--version>
+
+B<licensecheck> [B<--no-conf>] [B<--verbose>] [B<--copyright>]
+[B<-l>|B<--lines=>I<N>] [B<-i>|B<--ignore=>I<regex>] [B<-c>|B<--check=>I<regex>]
+[B<-m>|B<--machine>] [B<-r>|B<--recursive>] [B<-e>|B<--encoding=>I<...>]
+I<list of files and directories to check>
+
+=head1 DESCRIPTION
+
+B<licensecheck> attempts to determine the license that applies to each file
+passed to it, by searching the start of the file for text belonging to
+various licenses.
+
+If any of the arguments passed are directories, B<licensecheck> will add
+the files contained within to the list of files to process.
+
+=head1 OPTIONS
+
+=over 4
+
+=item B<--verbose>, B<--no-verbose>
+
+Specify whether to output the text being processed from each file before
+the corresponding license information.
+
+Default is to be quiet.
+
+=item B<-l=>I<N>, B<--lines=>I<N>
+
+Specify the number of lines of each file's header which should be parsed
+for license information. (Default is 60).
+
+=item B<--tail=>I<N>
+
+By default, the last 5k bytes of each files are parsed to get license
+information. You may use this option to set the size of this parsed chunk.
+You may set this value to 0 to avoid parsing the end of the file.
+
+=item B<-i=>I<regex>, B<--ignore=>I<regex>
+
+When processing the list of files and directories, the regular
+expression specified by this option will be used to indicate those which
+should not be considered (e.g. backup files, VCS metadata).
+
+=item B<-r>, B<--recursive>
+
+Specify that the contents of directories should be added
+recursively.
+
+=item B<-c=>I<regex>, B<--check=>I<regex>
+
+Specify a pattern against which filenames will be matched in order to
+decide which files to check the license of.
+
+The default includes common source files.
+
+=item B<-s>, B<--skipped>
+
+Specify whether to show skipped files, i.e. files found which do not
+match the check regexp (see C<--check> option). Default is to not show
+skipped files.
+
+Note that ignored files (like C<.git> or C<.svn>) are not shown even when
+this option is used.
+
+=item B<--copyright>
+
+Also display copyright text found within the file
+
+=item B<-e> B<--encoding>
+
+Specifies input encoding of source files. By default, input files are
+not decoded. When encoding is specified, license and copyright
+information are printed on STDOUT as utf8, or garbage if you got the
+encoding wrong.
+
+=item B<-m>, B<--machine>
+
+Display the information in a machine readable way, i.e. in the form
+<file><tab><license>[<tab><copyright>] so that it can be easily sorted
+and/or filtered, e.g. with the B<awk> and B<sort> commands.
+Note that using the B<--verbose> option will kill the readability.
+
+=item B<--no-conf>, B<--noconf>
+
+Do not read any configuration files. This can only be used as the first
+option given on the command line.
+
+=back
+
+=head1 CONFIGURATION VARIABLES
+
+The two configuration files F</etc/devscripts.conf> and
+F<~/.devscripts> are sourced by a shell in that order to set
+configuration variables. Command line options can be used to override
+configuration file settings. Environment variable settings are
+ignored for this purpose. The currently recognised variables are:
+
+=over 4
+
+=item B<LICENSECHECK_VERBOSE>
+
+If this is set to I<yes>, then it is the same as the B<--verbose> command
+line parameter being used. The default is I<no>.
+
+=item B<LICENSECHECK_PARSELINES>
+
+If this is set to a positive number then the specified number of lines
+at the start of each file will be read whilst attempting to determine
+the license(s) in use. This is equivalent to the B<--lines> command line
+option.
+
+=back
+
+=head1 LICENSE
+
+This code is copyright by Adam D. Barratt <I<adam@adam-barratt.org.uk>>,
+all rights reserved; based on a script of the same name from the KDE
+SDK, which is copyright by <I<dfaure@kde.org>>.
+This program comes with ABSOLUTELY NO WARRANTY.
+You are free to redistribute this code under the terms of the GNU
+General Public License, version 2 or later.
+
+=head1 AUTHOR
+
+Adam D. Barratt <adam@adam-barratt.org.uk>
+
+=cut
+
+# see https://stackoverflow.com/questions/6162484/why-does-modern-perl-avoid-utf-8-by-default/6163129#6163129
+use v5.14;
+use utf8;
+
+use strict;
+use autodie;
+use warnings;
+use warnings qw< FATAL utf8 >;
+
+use Getopt::Long qw(:config gnu_getopt);
+use File::Basename;
+use File::stat;
+use IO::File;
+use Fcntl qw/:seek/;
+
+binmode STDOUT, ':utf8';
+
+my $progname = basename($0);
+
+# From dpkg-source
+my $default_ignore_regex = qr!
+# Ignore general backup files
+~$|
+# Ignore emacs recovery files
+(?:^|/)\.#|
+# Ignore vi swap files
+(?:^|/)\..*\.swp$|
+# Ignore baz-style junk files or directories
+(?:^|/),,.*(?:$|/.*$)|
+# File-names that should be ignored (never directories)
+(?:^|/)(?:DEADJOE|\.cvsignore|\.arch-inventory|\.bzrignore|\.gitignore)$|
+# File or directory names that should be ignored
+(?:^|/)(?:CVS|RCS|\.pc|\.deps|\{arch\}|\.arch-ids|\.svn|\.hg|_darcs|\.git|
+\.shelf|_MTN|\.bzr(?:\.backup|tags)?)(?:$|/.*$)
+!x;
+
+# The original Debian version checks Markdown (.md and .markdown) files.
+# If we add those extensions back, we should add Asciidoctor (.adoc) as
+# well, and add SPDX IDs to all of those files.
+my $default_check_regex =
+ qr!
+ \.( # search for file suffix
+ c(c|pp|xx)? # c and c++
+ |h(h|pp|xx)? # header files for c and c++
+ |S
+ |css|less # HTML css and similar
+ |f(77|90)?
+ |go
+ |groovy
+ |lisp
+ |scala
+ |clj
+ |p(l|m)?6?|t|xs|pod6? # perl5 or perl6
+ |sh
+ |php
+ |py(|x)
+ |rb
+ |java
+ |js
+ |vala
+ |el
+ |sc(i|e)
+ |cs
+ |pas
+ |inc
+ |dtd|xsl
+ |mod
+ |m
+ |tex
+ |mli?
+ |(c|l)?hs
+ )
+ $
+ !x;
+
+# also used to cleanup
+my $copyright_indicator_regex
+ = qr!
+ (?:copyright # The full word
+ |copr\. # Legally-valid abbreviation
+ |\xc2\xa9 # Unicode copyright sign encoded in iso8859
+ |\x{00a9} # Unicode character COPYRIGHT SIGN
+ #|© # Unicode character COPYRIGHT SIGN
+ |\(c\) # Legally-null representation of sign
+ )
+ !lix;
+
+my $copyright_indicator_regex_with_capture = qr!$copyright_indicator_regex(?::\s*|\s+)(\S.*)$!lix;
+
+# avoid ditching things like <info@foo.com>
+my $copyright_disindicator_regex
+ = qr{
+ \b(?:info(?:rmation)?(?!@) # Discussing copyright information
+ |(notice|statement|claim|string)s? # Discussing the notice
+ |is|in|to # Part of a sentence
+ |(holder|owner)s? # Part of a sentence
+ |ownership # Part of a sentence
+ )\b
+ }ix;
+
+my $copyright_predisindicator_regex
+ = qr!(
+ ^[#]define\s+.*\(c\) # #define foo(c) -- not copyright
+ )!ix;
+
+my $modified_conf_msg;
+
+my %OPT=(
+ verbose => '',
+ lines => '',
+ noconf => '',
+ ignore => '',
+ check => '',
+ recursive => 0,
+ copyright => 0,
+ machine => 0,
+ text => 0,
+ skipped => 0,
+);
+
+my $def_lines = 60;
+my $def_tail = 5000; # roughly 60 lines of 80 chars
+
+# Read configuration files and then command line
+# This is boilerplate
+
+if (@ARGV and $ARGV[0] =~ /^--no-?conf$/) {
+ $modified_conf_msg = " (no configuration files read)";
+ shift;
+} else {
+ my @config_files = ('/etc/devscripts.conf', '~/.devscripts');
+ my %config_vars = (
+ 'LICENSECHECK_VERBOSE' => 'no',
+ 'LICENSECHECK_PARSELINES' => $def_lines,
+ );
+ my %config_default = %config_vars;
+
+ my $shell_cmd;
+ # Set defaults
+ foreach my $var (keys %config_vars) {
+ $shell_cmd .= qq[$var="$config_vars{$var}";\n];
+ }
+ $shell_cmd .= 'for file in ' . join(" ", @config_files) . "; do\n";
+ $shell_cmd .= '[ -f $file ] && . $file; done;' . "\n";
+ # Read back values
+ foreach my $var (keys %config_vars) { $shell_cmd .= "echo \$$var;\n" }
+ my $shell_out = `/bin/bash -c '$shell_cmd'`;
+ @config_vars{keys %config_vars} = split /\n/, $shell_out, -1;
+
+ # Check validity
+ $config_vars{'LICENSECHECK_VERBOSE'} =~ /^(yes|no)$/
+ or $config_vars{'LICENSECHECK_VERBOSE'} = 'no';
+ $config_vars{'LICENSECHECK_PARSELINES'} =~ /^[1-9][0-9]*$/
+ or $config_vars{'LICENSECHECK_PARSELINES'} = $def_lines;
+
+ foreach my $var (sort keys %config_vars) {
+ if ($config_vars{$var} ne $config_default{$var}) {
+ $modified_conf_msg .= " $var=$config_vars{$var}\n";
+ }
+ }
+ $modified_conf_msg ||= " (none)\n";
+ chomp $modified_conf_msg;
+
+ $OPT{'verbose'} = $config_vars{'LICENSECHECK_VERBOSE'} eq 'yes' ? 1 : 0;
+ $OPT{'lines'} = $config_vars{'LICENSECHECK_PARSELINES'};
+}
+
+GetOptions(\%OPT,
+ "help|h",
+ "check|c=s",
+ "copyright",
+ "encoding|e=s",
+ "ignore|i=s",
+ "lines|l=i",
+ "machine|m",
+ "noconf|no-conf",
+ "recursive|r",
+ "skipped|s",
+ "tail",
+ "text|t",
+ "verbose!",
+ "version|v",
+) or die "Usage: $progname [options] filelist\nRun $progname --help for more details\n";
+
+$OPT{'lines'} = $def_lines if $OPT{'lines'} !~ /^[1-9][0-9]*$/;
+my $ignore_regex = length($OPT{ignore}) ? qr/$OPT{ignore}/ : $default_ignore_regex;
+
+my $check_regex = $default_check_regex;
+$check_regex = qr/$OPT{check}/ if length $OPT{check};
+
+if ($OPT{'noconf'}) {
+ fatal("--no-conf is only acceptable as the first command-line option!");
+}
+if ($OPT{'help'}) { help(); exit 0; }
+if ($OPT{'version'}) { version(); exit 0; }
+
+if ($OPT{text}) {
+ warn "$0 warning: option -text is deprecated\n"; # remove -text end 2015
+}
+
+die "Usage: $progname [options] filelist\nRun $progname --help for more details\n" unless @ARGV;
+
+$OPT{'lines'} = $def_lines if not defined $OPT{'lines'};
+
+my @files = ();
+my @find_args = ();
+my $files_count = @ARGV;
+
+push @find_args, qw(-maxdepth 1) unless $OPT{'recursive'};
+push @find_args, qw(-follow -type f -print);
+
+while (@ARGV) {
+ my $file = shift @ARGV;
+
+ if (-d $file) {
+ open my $FIND, '-|', 'find', $file, @find_args
+ or die "$progname: couldn't exec find: $!\n";
+
+ while (my $found = <$FIND>) {
+ chomp ($found);
+ # Silently skip empty files or ignored files
+ next if -z $found or $found =~ $ignore_regex;
+ if ( not $check_regex or $found =~ $check_regex ) {
+ # Silently skip empty files or ignored files
+ push @files, $found ;
+ }
+ else {
+ warn "skipped file $found\n" if $OPT{skipped};
+ }
+ }
+ close $FIND;
+ }
+ elsif ($file =~ $ignore_regex) {
+ # Silently skip ignored files
+ next;
+ }
+ elsif ( $files_count == 1 or not $check_regex or $file =~ $check_regex ) {
+ push @files, $file;
+ }
+ else {
+ warn "skipped file $file\n" if $OPT{skipped};
+ }
+}
+
+while (@files) {
+ my $file = shift @files;
+ my $content = '';
+ my $copyright_match;
+ my $copyright = '';
+
+ my $st = stat $file;
+
+ my $enc = $OPT{encoding} ;
+ my $mode = $enc ? "<:encoding($enc)" : '<';
+ # need to use "<" when encoding is unknown otherwise we break compatibility
+ my $fh = IO::File->new ($file ,$mode) or die "Unable to access $file\n";
+
+ while ( my $line = $fh->getline ) {
+ last if ($fh->input_line_number > $OPT{'lines'});
+ $content .= $line;
+ }
+
+ my %copyrights = extract_copyright($content);
+
+ print qq(----- $file header -----\n$content----- end header -----\n\n)
+ if $OPT{'verbose'};
+
+ my $license = parselicense(clean_cruft_and_spaces(clean_comments($content)));
+ $copyright = join(" / ", reverse sort values %copyrights);
+
+ if ( not $copyright and $license eq 'UNKNOWN') {
+ my $position = $fh->tell; # See IO::Seekable
+ my $tail_size = $OPT{tail} // $def_tail;
+ my $jump = $st->size - $tail_size;
+ $jump = $position if $jump < $position;
+
+ my $tail ;
+ if ( $tail_size and $jump < $st->size) {
+ $fh->seek($jump, SEEK_SET) ; # also IO::Seekable
+ $tail .= join('',$fh->getlines);
+ }
+
+ print qq(----- $file tail -----\n$tail----- end tail -----\n\n)
+ if $OPT{'verbose'};
+
+ %copyrights = extract_copyright($tail);
+ $license = parselicense(clean_cruft_and_spaces(clean_comments($tail)));
+ $copyright = join(" / ", reverse sort values %copyrights);
+ }
+
+ $fh->close;
+
+ if ($OPT{'machine'}) {
+ print "$file\t$license";
+ print "\t" . ($copyright or "*No copyright*") if $OPT{'copyright'};
+ print "\n";
+ } else {
+ print "$file: ";
+ print "*No copyright* " unless $copyright;
+ print $license . "\n";
+ print " [Copyright: " . $copyright . "]\n"
+ if $copyright and $OPT{'copyright'};
+ print "\n" if $OPT{'copyright'};
+ }
+}
+
+sub extract_copyright {
+ my $content = shift;
+ my @c = split /\n/, clean_comments($content);
+
+ my %copyrights;
+ my $lines_after_copyright_block = 0;
+
+ my $in_copyright_block = 0;
+ while (@c) {
+ my $line = shift @c ;
+ my $copyright_match = parse_copyright($line, \$in_copyright_block) ;
+ if ($copyright_match) {
+ while (@c and $copyright_match =~ /\d[,.]?\s*$/) {
+ # looks like copyright end with a year, assume the owner is on next line(s)
+ $copyright_match .= ' '. shift @c;
+ }
+ $copyright_match =~ s/\s+/ /g;
+ $copyright_match =~ s/\s*$//;
+ $copyrights{lc("$copyright_match")} = "$copyright_match";
+ }
+ elsif (scalar keys %copyrights) {
+ # skip remaining lines if a copyright blocks was found more than 5 lines ago.
+ # so a copyright block may contain up to 5 blank lines, but no more
+ last if $lines_after_copyright_block++ > 5;
+ }
+ }
+ return %copyrights;
+}
+
+sub parse_copyright {
+ my $data = shift ;
+ my $in_copyright_block_ref = shift;
+ my $copyright = '';
+ my $match;
+
+ if ( $data !~ $copyright_predisindicator_regex) {
+ #print "match against ->$data<-\n";
+ if ($data =~ $copyright_indicator_regex_with_capture) {
+ $match = $1;
+ $$in_copyright_block_ref = 1;
+ # Ignore lines matching "see foo for copyright information" etc.
+ if ($match !~ $copyright_disindicator_regex) {
+ # De-cruft
+ $match =~ s/$copyright_indicator_regex//igx;
+ $match =~ s/^\s+//;
+ $match =~ s/\s*\bby\b\s*/ /;
+ $match =~ s/([,.])?\s*$//;
+ $match =~ s/\s{2,}/ /g;
+ $match =~ s/\\//g; # de-cruft nroff files
+ $match =~ s/\s*[*#]\s*$//;
+ $copyright = $match;
+ }
+ }
+ elsif ($$in_copyright_block_ref and $data =~ /^\d{2,}[,\s]+/) {
+ # following lines beginning with a year are supposed to be
+ # continued copyright blocks
+ $copyright = $data;
+ }
+ else {
+ $$in_copyright_block_ref = 0;
+ }
+ }
+
+ return $copyright;
+}
+
+sub clean_comments {
+ local $_ = shift or return q{};
+
+ # Remove generic comments: look for 4 or more lines beginning with
+ # regular comment pattern and trim it. Fall back to old algorithm
+ # if no such pattern found.
+ my @matches = m/^\s*((?:[^a-zA-Z0-9\s]{1,3}|\bREM\b))\s\w/mg;
+ if (@matches >= 4) {
+ my $comment_re = qr/\s*[\Q$matches[0]\E]{1,3}\s*/;
+ s/^$comment_re//mg;
+ }
+
+ # Remove Fortran comments
+ s/^[cC] //gm;
+
+ # Remove C / C++ comments
+ s#(\*/|/[/*])##g;
+
+ return $_;
+}
+
+sub clean_cruft_and_spaces {
+ local $_ = shift or return q{};
+
+ tr/\t\r\n/ /;
+
+ # this also removes quotes
+ tr% A-Za-z.+,@:;0-9\(\)/-%%cd;
+ tr/ //s;
+
+ return $_;
+}
+
+sub help {
+ print <<"EOF";
+Usage: $progname [options] filename [filename ...]
+Valid options are:
+ --help, -h Display this message
+ --version, -v Display version and copyright info
+ --no-conf, --noconf Don't read devscripts config files; must be
+ the first option given
+ --verbose Display the header of each file before its
+ license information
+ --skipped, -s Show skipped files
+ --lines, -l Specify how many lines of the file header
+ should be parsed for license information
+ (Default: $def_lines)
+ --tail Specify how many bytes to parse at end of file
+ (Default: $def_tail)
+ --check, -c Specify a pattern indicating which files should
+ be checked
+ (Default: '$default_check_regex')
+ --machine, -m Display in a machine readable way (good for awk)
+ --recursive, -r Add the contents of directories recursively
+ --copyright Also display the file's copyright
+ --ignore, -i Specify that files / directories matching the
+ regular expression should be ignored when
+ checking files
+ (Default: '$default_ignore_regex')
+
+Default settings modified by devscripts configuration files:
+$modified_conf_msg
+EOF
+}
+
+sub version {
+ print <<"EOF";
+This is $progname, from the Debian devscripts package, version 2.16.2
+Copyright (C) 2007, 2008 by Adam D. Barratt <adam\@adam-barratt.org.uk>; based
+on a script of the same name from the KDE SDK by <dfaure\@kde.org>.
+
+This program comes with ABSOLUTELY NO WARRANTY.
+You are free to redistribute this code under the terms of the
+GNU General Public License, version 2, or (at your option) any
+later version.
+EOF
+}
+
+sub parselicense {
+ my ($licensetext) = @_;
+
+ my $gplver = "";
+ my $extrainfo = "";
+ my $license = "";
+
+ if ($licensetext =~ /version ([^ ]+)(?: of the License)?,? or(?: \(at your option\))? version (\d(?:[.-]\d+)*)/) {
+ $gplver = " (v$1 or v$2)";
+ } elsif ($licensetext =~ /version ([^, ]+?)[.,]? (?:\(?only\)?.? )?(?:of the GNU (Affero )?(Lesser |Library )?General Public License )?(as )?published by the Free Software Foundation/i or
+ $licensetext =~ /GNU (?:Affero )?(?:Lesser |Library )?General Public License (?:as )?published by the Free Software Foundation[;,] version ([^, ]+?)[.,]? /i) {
+
+ $gplver = " (v$1)";
+ } elsif ($licensetext =~ /GNU (?:Affero )?(?:Lesser |Library )?General Public License\s*(?:[(),GPL]+)\s*version (\d+(?:\.\d+)?)[ \.]/i) {
+ $gplver = " (v$1)";
+ } elsif ($licensetext =~ /either version ([^ ]+)(?: of the License)?, or (?:\(at your option\) )?any later version/) {
+ $gplver = " (v$1 or later)";
+ } elsif ($licensetext =~ /GPL\sas\spublished\sby\sthe\sFree\sSoftware\sFoundation,\sversion\s([\d.]+)/i ) {
+ $gplver = " (v$1)";
+ } elsif ($licensetext =~ /SPDX-License-Identifier:\s+GPL-([1-9])\.0-or-later/i ){
+ $gplver = " (v$1 or later)";
+ } elsif ($licensetext =~ /SPDX-License-Identifier:\s+GPL-([1-9])\.0[^+]/i ) {
+ $gplver = " (v$1)";
+ } elsif ($licensetext =~ /SPDX-License-Identifier:\s+GPL-([1-9])\.0\+/i ) {
+ $gplver = " (v$1 or later)";
+ } elsif ($licensetext =~ /SPDX-License-Identifier:\s+LGPL-([1-9])\.[0-1]\-or-later/i ) {
+ $gplver = " (v$1 or later)";
+ }
+
+ if ($licensetext =~ /(?:675 Mass Ave|59 Temple Place|51 Franklin Steet|02139|02111-1307)/i) {
+ $extrainfo = " (with incorrect FSF address)$extrainfo";
+ }
+
+ if ($licensetext =~ /permission (?:is (also granted|given))? to link (the code of )?this program with (any edition of )?(Qt|the Qt library)/i) {
+ $extrainfo = " (with Qt exception)$extrainfo"
+ }
+
+ if ($licensetext =~ /As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice/) {
+ $extrainfo = " (with Bison parser exception)$extrainfo";
+ }
+
+ # exclude blurb found in boost license text
+ if ($licensetext =~ /(All changes made in this file will be lost|DO NOT (EDIT|delete this file)|Generated (automatically|by|from)|generated.*file)/i
+ and $licensetext !~ /unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor/) {
+ $license = "GENERATED FILE";
+ }
+
+ if ($licensetext =~ /(are made available|(is free software.? )?you can redistribute (it|them) and(?:\/|\s+)or modify (it|them)|is licensed) under the terms of (version [^ ]+ of )?the (GNU (Library |Lesser )General Public License|LGPL)/i) {
+ $license = "LGPL$gplver$extrainfo $license";
+ }
+ # For Perl modules handled by Dist::Zilla
+ elsif ($licensetext =~ /this is free software,? licensed under:? (?:the )?(?:GNU (?:Library |Lesser )General Public License|LGPL),? version ([\d\.]+)/i) {
+ $license = "LGPL (v$1) $license";
+ }
+
+ if ($licensetext =~ /is free software.? you can redistribute (it|them) and(?:\/|\s+)or modify (it|them) under the terms of the (GNU Affero General Public License|AGPL)/i) {
+ $license = "AGPL$gplver$extrainfo $license";
+ }
+
+ if ($licensetext =~ /(is free software.? )?you (can|may) redistribute (it|them) and(?:\/|\s+)or modify (it|them) under the terms of (?:version [^ ]+ (?:\(?only\)? )?of )?the GNU General Public License/i) {
+ $license = "GPL$gplver$extrainfo $license";
+ }
+
+ if ($licensetext =~ /is distributed under the terms of the GNU General Public License,/
+ and length $gplver) {
+ $license = "GPL$gplver$extrainfo $license";
+ }
+
+ if ($licensetext =~ /SPDX-License-Identifier:\s+GPL/i and length $gplver) {
+ $license = "GPL$gplver$extrainfo $license";
+ }
+
+ if ($licensetext =~ /SPDX-License-Identifier:\s+GPL-2.0-or-later/i and length $gplver) {
+ $license = "GPL$gplver$extrainfo";
+ }
+
+ if ($licensetext =~ /SPDX-License-Identifier:\s+LGPL/i and length $gplver) {
+ $license = "LGPL$gplver$extrainfo $license";
+ }
+
+ if ($licensetext =~ /SPDX-License-Identifier:\s+Zlib/i) {
+ $license = "zlib/libpng $license";
+ }
+
+ if ($licensetext =~ /SPDX-License-Identifier:\s+BSD-3-Clause/i) {
+ $license = 'BSD (3 clause)';
+ }
+
+ if ($licensetext =~ /SPDX-License-Identifier:\s+BSD-2-Clause/i) {
+ $license = 'BSD (2 clause)';
+ }
+
+ if ($licensetext =~ /SPDX-License-Identifier:\s+BSD-1-Clause/i) {
+ $license = 'BSD (1 clause)';
+ }
+
+ if ($licensetext =~ /SPDX-License-Identifier:\s+MIT/i) {
+ $license = 'MIT/X11 (BSD like)';
+ }
+
+ if ($licensetext =~ /SPDX-License-Identifier:\s+ISC/i) {
+ $license = 'ISC';
+ }
+
+ if ($licensetext =~ /(?:is|may be)\s(?:(?:distributed|used).*?terms|being\s+released).*?\b(L?GPL)\b/) {
+ my $v = $gplver || ' (unversioned/unknown version)';
+ $license = "$1$v $license";
+ }
+
+ if ($licensetext =~ /the rights to distribute and use this software as governed by the terms of the Lisp Lesser General Public License|\bLLGPL\b/ ) {
+ $license = "LLGPL $license";
+ }
+
+ if ($licensetext =~ /This file is part of the .*Qt GUI Toolkit. This file may be distributed under the terms of the Q Public License as defined/) {
+ $license = "QPL (part of Qt) $license";
+ } elsif ($licensetext =~ /may (be distributed|redistribute it) under the terms of the Q Public License/) {
+ $license = "QPL $license";
+ }
+
+ if ($licensetext =~ /opensource\.org\/licenses\/mit-license\.php/) {
+ $license = "MIT/X11 (BSD like) $license";
+ } elsif ($licensetext =~ /Permission is hereby granted, free of charge, to any person obtaining a copy of this software and(\/or)? associated documentation files \(the (Software|Materials)\), to deal in the (Software|Materials)/) {
+ $license = "MIT/X11 (BSD like) $license";
+ } elsif ($licensetext =~ /Permission is hereby granted, without written agreement and without license or royalty fees, to use, copy, modify, and distribute this software and its documentation for any purpose/) {
+ $license = "MIT/X11 (BSD like) $license";
+ }
+
+ if ($licensetext =~ /Permission to use, copy, modify, and(\/or)? distribute this software for any purpose with or without fee is hereby granted, provided.*copyright notice.*permission notice.*all copies/) {
+ $license = "ISC $license";
+ }
+
+ if ($licensetext =~ /THIS SOFTWARE IS PROVIDED .*AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY/) {
+ if ($licensetext =~ /All advertising materials mentioning features or use of this software must display the following acknowledge?ment.*This product includes software developed by/i) {
+ $license = "BSD (4 clause) $license";
+ } elsif ($licensetext =~ /(The name(?:\(s\))? .*? may not|Neither the (names? .*?|authors?) nor the names of( (its|their|other|any))? contributors may) be used to endorse or promote products derived from this software/i) {
+ $license = "BSD (3 clause) $license";
+ } elsif ($licensetext =~ /Redistributions in binary form must reproduce the above copyright notice/i) {
+ $license = "BSD (2 clause) $license";
+ } else {
+ $license = "BSD $license";
+ }
+ }
+
+ if ($licensetext =~ /Mozilla Public License,? (?:(?:Version|v\.)\s+)?(\d+(?:\.\d+)?)/) {
+ $license = "MPL (v$1) $license";
+ }
+ elsif ($licensetext =~ /Mozilla Public License,? \((?:Version|v\.) (\d+(?:\.\d+)?)\)/) {
+ $license = "MPL (v$1) $license";
+ }
+
+ # match when either:
+ # - the text *begins* with "The Artistic license v2.0" which is (hopefully) the actual artistic license v2.0 text.
+ # - a license grant is found. i.e something like "this is free software, licensed under the artistic license v2.0"
+ if ($licensetext =~ /(?:^\s*|(?:This is free software, licensed|Released|be used|use and modify this (?:module|software)) under (?:the terms of )?)[Tt]he Artistic License ([v\d.]*\d)/) {
+ $license = "Artistic (v$1) $license";
+ }
+
+ if ($licensetext =~ /is free software under the Artistic [Ll]icense/) {
+ $license = "Artistic $license";
+ }
+
+ if ($licensetext =~ /This program is free software; you can redistribute it and\/or modify it under the same terms as Perl itself/) {
+ $license = "Perl $license";
+ }
+
+ if ($licensetext =~ /under the Apache License, Version ([^ ]+)/) {
+ $license = "Apache (v$1) $license";
+ }
+
+ if ($licensetext =~ /(THE BEER-WARE LICENSE)/i) {
+ $license = "Beerware $license";
+ }
+
+ if ($licensetext =~ /distributed under the terms of the FreeType project/i) {
+ $license = "FreeType $license"; # aka FTL see https://www.freetype.org/license.html
+ }
+
+ if ($licensetext =~ /This source file is subject to version ([^ ]+) of the PHP license/) {
+ $license = "PHP (v$1) $license";
+ }
+
+ if ($licensetext =~ /under the terms of the CeCILL /) {
+ $license = "CeCILL $license";
+ }
+
+ if ($licensetext =~ /under the terms of the CeCILL-([^ ]+) /) {
+ $license = "CeCILL-$1 $license";
+ }
+
+ if ($licensetext =~ /under the SGI Free Software License B/) {
+ $license = "SGI Free Software License B $license";
+ }
+
+ if ($licensetext =~ /is in the public domain/i) {
+ $license = "Public domain $license";
+ }
+
+ if ($licensetext =~ /terms of the Common Development and Distribution License(, Version ([^(]+))? \(the License\)/) {
+ $license = "CDDL " . ($1 ? "(v$2) " : '') . $license;
+ }
+
+ if ($licensetext =~ /Microsoft Permissive License \(Ms-PL\)/) {
+ $license = "Ms-PL $license";
+ }
+
+ if ($licensetext =~ /Licensed under the Academic Free License version ([\d.]+)/) {
+ $license = $1 ? "AFL-$1" : "AFL";
+ }
+
+ if ($licensetext =~ /This program and the accompanying materials are made available under the terms of the Eclipse Public License v?([\d.]+)/) {
+ $license = $1 ? "EPL-$1" : "EPL";
+ }
+
+ # quotes were removed by clean_comments function
+ if ($licensetext =~ /Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license \(the Software\)/ or
+ $licensetext =~ /Boost Software License([ ,-]+Version ([^ ]+)?(\.))/i) {
+ $license = "BSL " . ($1 ? "(v$2) " : '') . $license;
+ }
+
+ if ($licensetext =~ /PYTHON SOFTWARE FOUNDATION LICENSE (VERSION ([^ ]+))/i) {
+ $license = "PSF " . ($1 ? "(v$2) " : '') . $license;
+ }
+
+ if ($licensetext =~ /The origin of this software must not be misrepresented.*Altered source versions must be plainly marked as such.*This notice may not be removed or altered from any source distribution/ or
+ $licensetext =~ /see copyright notice in zlib\.h/) {
+ $license = "zlib/libpng $license";
+ } elsif ($licensetext =~ /This code is released under the libpng license/) {
+ $license = "libpng $license";
+ }
+
+ if ($licensetext =~ /Do What The Fuck You Want To Public License, Version ([^, ]+)/i) {
+ $license = "WTFPL (v$1) $license";
+ }
+
+ if ($licensetext =~ /Do what The Fuck You Want To Public License/i) {
+ $license = "WTFPL $license";
+ }
+
+ if ($licensetext =~ /(License WTFPL|Under (the|a) WTFPL)/i) {
+ $license = "WTFPL $license";
+ }
+
+ if ($licensetext =~ /SPDX-License-Identifier:\s+\(([a-zA-Z0-9-\.]+)\s+OR\s+([a-zA-Z0-9-\.]+)\)/i) {
+ my $license1 = $1;
+ my $license2 = $2;
+ $license = parselicense("SPDX-License-Identifier: $license1") . ";" . parselicense("SPDX-License-Identifier: $license2");
+ }
+
+ $license = "UNKNOWN" if (!length($license));
+
+ # Remove trailing spaces.
+ $license =~ s/\s+$//;
+
+ return $license;
+}
+
+sub fatal {
+ my ($pack,$file,$line);
+ ($pack,$file,$line) = caller();
+ (my $msg = "$progname: fatal error at line $line:\n@_\n") =~ tr/\0//d;
+ $msg =~ s/\n\n$/\n/;
+ die $msg;
+}
diff --git a/tools/list_protos_in_cap.sh b/tools/list_protos_in_cap.sh
new file mode 100755
index 0000000..0ddfdd1
--- /dev/null
+++ b/tools/list_protos_in_cap.sh
@@ -0,0 +1,96 @@
+#!/bin/bash
+
+# List the protocols (dissectors) used in capture file(s)
+#
+# The Python script indexcap.py does the same thing.
+#
+# This script extracts the protocol names contained in a given capture file.
+# This is useful for generating a "database" (flat file :-)) of in what file
+# a given protocol can be found.
+#
+# Output consists of the file name followed by the protocols, for example:
+# /path/to/the/file.pcap eth ip sctp
+#
+# Copyright 2012 Jeff Morriss <jeff.morriss.ws [AT] gmail.com>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# Directory containing binaries. Default current directory.
+WS_BIN_PATH=${WS_BIN_PATH:-.}
+
+# Tweak the following to your liking. Editcap must support "-E".
+TSHARK="$WS_BIN_PATH/tshark"
+CAPINFOS="$WS_BIN_PATH/capinfos"
+
+if [ "$WS_BIN_PATH" = "." ]; then
+ export WIRESHARK_RUN_FROM_BUILD_DIRECTORY=
+fi
+
+NOTFOUND=0
+for i in "$TSHARK" "$CAPINFOS"
+do
+ if [ ! -x $i ]
+ then
+ echo "Couldn't find $i" 1>&2
+ NOTFOUND=1
+ fi
+done
+if [ $NOTFOUND -eq 1 ]
+then
+ exit 1
+fi
+
+# Make sure we have at least one file
+FOUND=0
+for CF in "$@"
+do
+ if [ "$OSTYPE" == "cygwin" ]
+ then
+ CF=`cygpath --windows "$CF"`
+ fi
+ "$CAPINFOS" "$CF" > /dev/null 2>&1 && FOUND=1
+ if [ $FOUND -eq 1 ]
+ then
+ break
+ fi
+done
+
+if [ $FOUND -eq 0 ] ; then
+ cat <<FIN
+Error: No valid capture files found.
+
+Usage: `basename $0` capture file 1 [capture file 2]...
+FIN
+ exit 1
+fi
+
+for CF in "$@" ; do
+ if [ "$OSTYPE" == "cygwin" ] ; then
+ CF=`cygpath --windows "$CF"`
+ fi
+
+ if [ ! -f "$CF" ] ; then
+ echo "Doesn't exist or not a file: $CF" 1>&2
+ continue
+ fi
+
+ "$CAPINFOS" "$CF" > /dev/null
+ RETVAL=$?
+ if [ $RETVAL -ne 0 ] ; then
+ echo "Not a valid capture file (or some other problem)" 1>&2
+ continue
+ fi
+
+ printf "%s: " "$CF"
+
+ # Extract the protocol names.
+ $TSHARK -T fields -eframe.protocols -nr "$CF" 2>/dev/null | \
+ tr ':\r' '\n' | sort -u | tr '\n\r' ' '
+
+ printf "\n"
+done
+
diff --git a/tools/macos-setup-brew.sh b/tools/macos-setup-brew.sh
new file mode 100755
index 0000000..17c92ce
--- /dev/null
+++ b/tools/macos-setup-brew.sh
@@ -0,0 +1,173 @@
+#!/bin/bash
+# Copyright 2014, Evan Huus (See AUTHORS file)
+#
+# Enhance (2016) by Alexis La Goutte (For use with Travis CI)
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+set -e -u -o pipefail
+
+eval "$(brew shellenv)"
+
+HOMEBREW_NO_AUTO_UPDATE=${HOMEBREW_NO_AUTO_UPDATE:-}
+
+function print_usage() {
+ printf "\\nUtility to setup a macOS system for Wireshark Development using Homebrew.\\n"
+ printf "The basic usage installs the needed software\\n\\n"
+ printf "Usage: %s [--install-optional] [--install-dmg-deps] [...other options...]\\n" "$0"
+ printf "\\t--install-optional: install optional software as well\\n"
+ printf "\\t--install-dmg-deps: install packages required to build the .dmg file\\n"
+ printf "\\t--install-sparkle-deps: install the Sparkle automatic updater\\n"
+ printf "\\t--install-all: install everything\\n"
+ printf "\\t[other]: other options are passed as-is to apt\\n"
+}
+
+INSTALLED_FORMULAE=$( brew list --formulae )
+function install_formulae() {
+ INSTALL_LIST=()
+ for FORMULA in "$@" ; do
+ if ! grep --word-regexp "$FORMULA" > /dev/null 2>&1 <<<"$INSTALLED_FORMULAE" ; then
+ INSTALL_LIST+=( "$FORMULA" )
+ fi
+ done
+ if (( ${#INSTALL_LIST[@]} != 0 )); then
+ brew install "${INSTALL_LIST[@]}"
+ else
+ printf "Nothing to install.\n"
+ fi
+}
+
+INSTALL_OPTIONAL=0
+INSTALL_DOC_DEPS=0
+INSTALL_DMG_DEPS=0
+INSTALL_SPARKLE_DEPS=0
+INSTALL_TEST_DEPS=0
+OPTIONS=()
+for arg; do
+ case $arg in
+ --help)
+ print_usage
+ exit 0
+ ;;
+ --install-optional)
+ INSTALL_OPTIONAL=1
+ ;;
+ --install-doc-deps)
+ INSTALL_DOC_DEPS=1
+ ;;
+ --install-dmg-deps)
+ INSTALL_DMG_DEPS=1
+ ;;
+ --install-sparkle-deps)
+ INSTALL_SPARKLE_DEPS=1
+ ;;
+ --install-test-deps)
+ INSTALL_TEST_DEPS=1
+ ;;
+ --install-all)
+ INSTALL_OPTIONAL=1
+ INSTALL_DOC_DEPS=1
+ INSTALL_DMG_DEPS=1
+ INSTALL_SPARKLE_DEPS=1
+ INSTALL_TEST_DEPS=1
+ ;;
+ *)
+ OPTIONS+=("$arg")
+ ;;
+ esac
+done
+
+BUILD_LIST=(
+ ccache
+ cmake
+ ninja
+)
+
+# Qt isn't technically required, but...
+REQUIRED_LIST=(
+ c-ares
+ glib
+ libgcrypt
+ pcre2
+ qt6
+ speexdsp
+)
+
+ADDITIONAL_LIST=(
+ brotli
+ gettext
+ gnutls
+ libilbc
+ libmaxminddb
+ libnghttp2
+ libnghttp3
+ libsmi
+ libssh
+ libxml2
+ lua@5.1
+ lz4
+ minizip
+ opus
+ snappy
+ spandsp
+ zstd
+)
+
+DOC_DEPS_LIST=(
+ asciidoctor
+ docbook
+ docbook-xsl
+)
+
+ACTUAL_LIST=( "${BUILD_LIST[@]}" "${REQUIRED_LIST[@]}" )
+
+# Now arrange for optional support libraries
+if [ $INSTALL_OPTIONAL -ne 0 ] ; then
+ ACTUAL_LIST+=( "${ADDITIONAL_LIST[@]}" )
+fi
+
+if [ $INSTALL_DOC_DEPS -ne 0 ] ; then
+ ACTUAL_LIST+=( "${DOC_DEPS_LIST[@]}" )
+fi
+
+if (( ${#OPTIONS[@]} != 0 )); then
+ ACTUAL_LIST+=( "${OPTIONS[@]}" )
+fi
+
+install_formulae "${ACTUAL_LIST[@]}"
+
+if [ $INSTALL_DMG_DEPS -ne 0 ] ; then
+ pip3 install dmgbuild
+fi
+
+if [ $INSTALL_SPARKLE_DEPS -ne 0 ] ; then
+ brew cask install sparkle
+fi
+
+if [ $INSTALL_TEST_DEPS -ne 0 ] ; then
+ pip3 install pytest pytest-xdist
+fi
+
+# Uncomment to add PNG compression utilities used by compress-pngs:
+# brew install advancecomp optipng oxipng pngcrush
+
+# Uncomment to enable generation of documentation
+# brew install asciidoctor
+
+exit 0
+#
+# Editor modelines
+#
+# Local Variables:
+# c-basic-offset: 4
+# tab-width: 8
+# indent-tabs-mode: nil
+# End:
+#
+# ex: set shiftwidth=4 tabstop=8 expandtab:
+# :indentSize=4:tabSize=8:noTabs=true:
+#
diff --git a/tools/macos-setup.sh b/tools/macos-setup.sh
new file mode 100755
index 0000000..ec25bf7
--- /dev/null
+++ b/tools/macos-setup.sh
@@ -0,0 +1,3865 @@
+#!/bin/bash
+# Setup development environment on macOS (tested with 10.6.8 and Xcode
+# 3.2.6 and with 10.12.4 and Xcode 8.3).
+#
+# Copyright 2011 Michael Tuexen, Joerg Mayer, Guy Harris (see AUTHORS file)
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+shopt -s extglob
+
+#
+# Get the major version of Darwin, so we can check the major macOS
+# version.
+#
+DARWIN_MAJOR_VERSION=`uname -r | sed 's/\([0-9]*\).*/\1/'`
+
+#
+# The minimum supported version of Qt is 5.9, so the minimum supported version
+# of macOS is OS X 10.10 (Yosemite), aka Darwin 14.0.
+#
+if [[ $DARWIN_MAJOR_VERSION -lt 14 ]]; then
+ echo "This script does not support any versions of macOS before Yosemite" 1>&2
+ exit 1
+fi
+
+#
+# Get the processor architecture of Darwin. Currently supported: arm, i386
+#
+DARWIN_PROCESSOR_ARCH=`uname -p`
+
+if [ "$DARWIN_PROCESSOR_ARCH" != "arm" -a "$DARWIN_PROCESSOR_ARCH" != "i386" ]; then
+ echo "This script does not support this processor architecture" 1>&2
+ exit 1
+fi
+
+#
+# Versions of packages to download and install.
+#
+
+#
+# We use curl, but older versions of curl in older macOS releases can't
+# handle some sites - including the xz site.
+#
+# If the version of curl in the system is older than 7.54.0, download
+# curl and install it.
+#
+current_curl_version=`curl --version | sed -n 's/curl \([0-9.]*\) .*/\1/p'`
+current_curl_major_version="`expr $current_curl_version : '\([0-9][0-9]*\).*'`"
+current_curl_minor_version="`expr $current_curl_version : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+if [[ $current_curl_major_version -lt 7 ||
+ ($current_curl_major_version -eq 7 &&
+ $current_curl_minor_version -lt 54) ]]; then
+ CURL_VERSION=${CURL_VERSION-7.60.0}
+fi
+
+#
+# Some packages need xz to unpack their current source.
+# XXX: tar, since macOS 10.9, can uncompress xz'ed tarballs,
+# so perhaps we could get rid of this now?
+#
+XZ_VERSION=5.2.5
+
+#
+# Some packages need lzip to unpack their current source.
+#
+LZIP_VERSION=1.21
+
+#
+# The version of libPCRE on Catalina is insufficient to build glib due to
+# missing UTF-8 support.
+#
+PCRE_VERSION=8.45
+
+#
+# CMake is required to do the build - and to build some of the
+# dependencies.
+#
+CMAKE_VERSION=${CMAKE_VERSION-3.21.4}
+
+#
+# Ninja isn't required, as make is provided with Xcode, but it is
+# claimed to build faster than make.
+# Comment it out if you don't want it.
+#
+NINJA_VERSION=${NINJA_VERSION-1.10.2}
+
+#
+# The following libraries and tools are required even to build only TShark.
+#
+GETTEXT_VERSION=0.21
+GLIB_VERSION=2.76.6
+if [ "$GLIB_VERSION" ]; then
+ GLIB_MAJOR_VERSION="`expr $GLIB_VERSION : '\([0-9][0-9]*\).*'`"
+ GLIB_MINOR_VERSION="`expr $GLIB_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ GLIB_DOTDOT_VERSION="`expr $GLIB_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ GLIB_MAJOR_MINOR_VERSION=$GLIB_MAJOR_VERSION.$GLIB_MINOR_VERSION
+ GLIB_MAJOR_MINOR_DOTDOT_VERSION=$GLIB_MAJOR_VERSION.$GLIB_MINOR_VERSION.$GLIB_DOTDOT_VERSION
+fi
+PKG_CONFIG_VERSION=0.29.2
+#
+# libgpg-error is required for libgcrypt.
+#
+LIBGPG_ERROR_VERSION=1.39
+#
+# libgcrypt is required.
+#
+LIBGCRYPT_VERSION=1.8.7
+#
+# libpcre2 is required.
+#
+PCRE2_VERSION=10.39
+
+#
+# One or more of the following libraries are required to build Wireshark.
+#
+# To override the version of Qt call the script with some of the variables
+# set to the new values. Setting the variable to empty will disable building
+# the toolkit and will uninstall # any version previously installed by the
+# script, e.g.
+# "QT_VERSION=5.10.1 ./macos-setup.sh"
+# will build and install with QT 5.10.1.
+#
+QT_VERSION=${QT_VERSION-6.2.4}
+
+if [ "$QT_VERSION" ]; then
+ QT_MAJOR_VERSION="`expr $QT_VERSION : '\([0-9][0-9]*\).*'`"
+ QT_MINOR_VERSION="`expr $QT_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ QT_DOTDOT_VERSION="`expr $QT_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ QT_MAJOR_MINOR_VERSION=$QT_MAJOR_VERSION.$QT_MINOR_VERSION
+ QT_MAJOR_MINOR_DOTDOT_VERSION=$QT_MAJOR_VERSION.$QT_MINOR_VERSION.$QT_DOTDOT_VERSION
+fi
+
+#
+# The following libraries are optional.
+# Comment them out if you don't want them, but note that some of
+# the optional libraries are required by other optional libraries.
+#
+LIBSMI_VERSION=0.4.8
+GNUTLS_VERSION=3.7.8
+if [ "$GNUTLS_VERSION" ]; then
+ #
+ # We'll be building GnuTLS, so we may need some additional libraries.
+ # We assume GnuTLS can work with Nettle; newer versions *only* use
+ # Nettle, not libgcrypt.
+ #
+ GNUTLS_MAJOR_VERSION="`expr $GNUTLS_VERSION : '\([0-9][0-9]*\).*'`"
+ GNUTLS_MINOR_VERSION="`expr $GNUTLS_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ NETTLE_VERSION=3.9.1
+
+ #
+ # And, in turn, Nettle requires GMP.
+ #
+ GMP_VERSION=6.3.0
+
+ #
+ # And p11-kit
+ P11KIT_VERSION=0.25.0
+
+ # Which requires libtasn1
+ LIBTASN1_VERSION=4.19.0
+fi
+# Use 5.2.4, not 5.3, for now; lua_bitop.c hasn't been ported to 5.3
+# yet, and we need to check for compatibility issues (we'd want Lua
+# scripts to work with 5.1, 5.2, and 5.3, as long as they only use Lua
+# features present in all three versions)
+LUA_VERSION=5.2.4
+SNAPPY_VERSION=1.1.10
+ZSTD_VERSION=1.5.5
+LIBXML2_VERSION=2.11.5
+LZ4_VERSION=1.9.4
+SBC_VERSION=2.0
+CARES_VERSION=1.19.1
+LIBSSH_VERSION=0.10.5
+# mmdbresolve
+MAXMINDDB_VERSION=1.4.3
+NGHTTP2_VERSION=1.56.0
+NGHTTP3_VERSION=0.15.0
+SPANDSP_VERSION=0.0.6
+SPEEXDSP_VERSION=1.2.1
+if [ "$SPANDSP_VERSION" ]; then
+ #
+ # SpanDSP depends on libtiff.
+ #
+ LIBTIFF_VERSION=3.8.1
+fi
+BCG729_VERSION=1.1.1
+# libilbc 3.0.0 & later link with absiel, which is released under Apache 2.0
+ILBC_VERSION=2.0.2
+OPUS_VERSION=1.4
+
+#
+# Is /usr/bin/python3 a working version of Python? It may be, as it
+# might be a wrapper that runs the Python 3 that's part of Xcode.
+#
+if /usr/bin/python3 --version >/dev/null 2>&1
+then
+ #
+ # Yes - don't bother installing Python 3 from elsewhere
+ #
+ :
+else
+ #
+ # No - install a Python package.
+ #
+ PYTHON3_VERSION=3.9.5
+fi
+BROTLI_VERSION=1.0.9
+# minizip
+ZLIB_VERSION=1.3
+# Uncomment to enable automatic updates using Sparkle
+#SPARKLE_VERSION=2.1.0
+
+#
+# Asciidoctor is required to build the documentation.
+#
+ASCIIDOCTOR_VERSION=${ASCIIDOCTOR_VERSION-2.0.16}
+ASCIIDOCTORPDF_VERSION=${ASCIIDOCTORPDF_VERSION-1.6.1}
+
+#
+# GNU autotools. They're not supplied with the macOS versions we
+# support, and we currently use them for minizip.
+#
+AUTOCONF_VERSION=2.71
+AUTOMAKE_VERSION=1.16.5
+LIBTOOL_VERSION=2.4.6
+
+install_curl() {
+ if [ "$CURL_VERSION" -a ! -f curl-$CURL_VERSION-done ] ; then
+ echo "Downloading, building, and installing curl:"
+ [ -f curl-$CURL_VERSION.tar.bz2 ] || curl -L -O https://curl.haxx.se/download/curl-$CURL_VERSION.tar.bz2 || exit 1
+ $no_build && echo "Skipping installation" && return
+ bzcat curl-$CURL_VERSION.tar.bz2 | tar xf - || exit 1
+ cd curl-$CURL_VERSION
+ ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch curl-$CURL_VERSION-done
+ fi
+}
+
+uninstall_curl() {
+ if [ ! -z "$installed_curl_version" ] ; then
+ echo "Uninstalling curl:"
+ cd curl-$installed_curl_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm curl-$installed_curl_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf curl-$installed_curl_version
+ rm -rf curl-$installed_curl_version.tar.bz2
+ fi
+
+ installed_curl_version=""
+ fi
+}
+
+install_xz() {
+ if [ "$XZ_VERSION" -a ! -f xz-$XZ_VERSION-done ] ; then
+ echo "Downloading, building, and installing xz:"
+ [ -f xz-$XZ_VERSION.tar.bz2 ] || curl -L -O https://tukaani.org/xz/xz-$XZ_VERSION.tar.bz2 || exit 1
+ $no_build && echo "Skipping installation" && return
+ bzcat xz-$XZ_VERSION.tar.bz2 | tar xf - || exit 1
+ cd xz-$XZ_VERSION
+ #
+ # This builds and installs liblzma, which libxml2 uses, and
+ # Wireshark uses liblzma, so we need to build this with
+ # all the minimum-deployment-version and SDK stuff.
+ #
+ CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch xz-$XZ_VERSION-done
+ fi
+}
+
+uninstall_xz() {
+ if [ ! -z "$installed_xz_version" ] ; then
+ echo "Uninstalling xz:"
+ cd xz-$installed_xz_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm xz-$installed_xz_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf xz-$installed_xz_version
+ rm -rf xz-$installed_xz_version.tar.bz2
+ fi
+
+ installed_xz_version=""
+ fi
+}
+
+install_lzip() {
+ if [ "$LZIP_VERSION" -a ! -f lzip-$LZIP_VERSION-done ] ; then
+ echo "Downloading, building, and installing lzip:"
+ [ -f lzip-$LZIP_VERSION.tar.gz ] || curl -L -O https://download.savannah.gnu.org/releases/lzip/lzip-$LZIP_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat lzip-$LZIP_VERSION.tar.gz | tar xf - || exit 1
+ cd lzip-$LZIP_VERSION
+ ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch lzip-$LZIP_VERSION-done
+ fi
+}
+
+uninstall_lzip() {
+ if [ ! -z "$installed_lzip_version" ] ; then
+ echo "Uninstalling lzip:"
+ cd lzip-$installed_lzip_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm lzip-$installed_lzip_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf lzip-$installed_lzip_version
+ rm -rf lzip-$installed_lzip_version.tar.gz
+ fi
+
+ installed_lzip_version=""
+ fi
+}
+
+install_pcre() {
+ if [ "$PCRE_VERSION" -a ! -f pcre-$PCRE_VERSION-done ] ; then
+ echo "Downloading, building, and installing pcre:"
+ [ -f pcre-$PCRE_VERSION.tar.bz2 ] || curl -L -O https://sourceforge.net/projects/pcre/files/pcre/$PCRE_VERSION/pcre-$PCRE_VERSION.tar.bz2 || exit 1
+ $no_build && echo "Skipping installation" && return
+ bzcat pcre-$PCRE_VERSION.tar.bz2 | tar xf - || exit 1
+ cd pcre-$PCRE_VERSION
+ ./configure --enable-unicode-properties || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch pcre-$PCRE_VERSION-done
+ fi
+}
+
+uninstall_pcre() {
+ if [ ! -z "$installed_pcre_version" ] ; then
+ echo "Uninstalling pcre:"
+ cd pcre-$installed_pcre_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm pcre-$installed_pcre_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf pcre-$installed_pcre_version
+ rm -rf pcre-$installed_pcre_version.tar.bz2
+ fi
+
+ installed_pcre_version=""
+ fi
+}
+
+install_pcre2() {
+ if [ "$PCRE2_VERSION" -a ! -f "pcre2-$PCRE2_VERSION-done" ] ; then
+ echo "Downloading, building, and installing pcre2:"
+ [ -f "pcre2-$PCRE2_VERSION.tar.bz2" ] || curl -L -O "https://github.com/PhilipHazel/pcre2/releases/download/pcre2-$PCRE2_VERSION/pcre2-10.39.tar.bz2" || exit 1
+ $no_build && echo "Skipping installation" && return
+ bzcat "pcre2-$PCRE2_VERSION.tar.bz2" | tar xf - || exit 1
+ cd "pcre2-$PCRE2_VERSION"
+ mkdir build_dir
+ cd build_dir
+ # https://github.com/Homebrew/homebrew-core/blob/master/Formula/pcre2.rb
+ # https://github.com/microsoft/vcpkg/blob/master/ports/pcre2/portfile.cmake
+ MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" \
+ $DO_CMAKE -DBUILD_STATIC_LIBS=OFF -DBUILD_SHARED_LIBS=ON -DPCRE2_SUPPORT_JIT=ON -DPCRE2_SUPPORT_UNICODE=ON .. || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ../..
+ touch "pcre2-$PCRE2_VERSION-done"
+ fi
+}
+
+uninstall_pcre2() {
+ if [ -n "$installed_pcre2_version" ] && [ -s "pcre2-$installed_pcre2_version/build_dir/install_manifest.txt" ] ; then
+ echo "Uninstalling pcre2:"
+ # PCRE2 10.39 installs pcre2unicode.3 twice, so this will return an error.
+ while read -r ; do $DO_RM -v "$REPLY" ; done < <(cat "pcre2-$installed_pcre2_version/build_dir/install_manifest.txt"; echo)
+ rm "pcre2-$installed_pcre2_version-done"
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf "pcre2-$installed_pcre2_version"
+ rm -rf "pcre2-$installed_pcre2_version.tar.bz2"
+ fi
+
+ installed_pcre2_version=""
+ fi
+}
+
+install_autoconf() {
+ if [ "$AUTOCONF_VERSION" -a ! -f autoconf-$AUTOCONF_VERSION-done ] ; then
+ echo "Downloading, building and installing GNU autoconf..."
+ [ -f autoconf-$AUTOCONF_VERSION.tar.xz ] || curl -L -O ftp://ftp.gnu.org/gnu/autoconf/autoconf-$AUTOCONF_VERSION.tar.xz || exit 1
+ $no_build && echo "Skipping installation" && return
+ xzcat autoconf-$AUTOCONF_VERSION.tar.xz | tar xf - || exit 1
+ cd autoconf-$AUTOCONF_VERSION
+ ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch autoconf-$AUTOCONF_VERSION-done
+ fi
+}
+
+uninstall_autoconf() {
+ if [ ! -z "$installed_autoconf_version" ] ; then
+ #
+ # automake and libtool depend on this, so uninstall them.
+ #
+ uninstall_libtool "$@"
+ uninstall_automake "$@"
+
+ echo "Uninstalling GNU autoconf:"
+ cd autoconf-$installed_autoconf_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm autoconf-$installed_autoconf_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf autoconf-$installed_autoconf_version
+ rm -rf autoconf-$installed_autoconf_version.tar.xz
+ fi
+
+ installed_autoconf_version=""
+ fi
+}
+
+install_automake() {
+ if [ "$AUTOMAKE_VERSION" -a ! -f automake-$AUTOMAKE_VERSION-done ] ; then
+ echo "Downloading, building and installing GNU automake..."
+ [ -f automake-$AUTOMAKE_VERSION.tar.xz ] || curl -L -O ftp://ftp.gnu.org/gnu/automake/automake-$AUTOMAKE_VERSION.tar.xz || exit 1
+ $no_build && echo "Skipping installation" && return
+ xzcat automake-$AUTOMAKE_VERSION.tar.xz | tar xf - || exit 1
+ cd automake-$AUTOMAKE_VERSION
+ ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch automake-$AUTOMAKE_VERSION-done
+ fi
+}
+
+uninstall_automake() {
+ if [ ! -z "$installed_automake_version" ] ; then
+ #
+ # libtool depends on this(?), so uninstall it.
+ #
+ uninstall_libtool "$@"
+
+ echo "Uninstalling GNU automake:"
+ cd automake-$installed_automake_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm automake-$installed_automake_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf automake-$installed_automake_version
+ rm -rf automake-$installed_automake_version.tar.xz
+ fi
+
+ installed_automake_version=""
+ fi
+}
+
+install_libtool() {
+ if [ "$LIBTOOL_VERSION" -a ! -f libtool-$LIBTOOL_VERSION-done ] ; then
+ echo "Downloading, building and installing GNU libtool..."
+ [ -f libtool-$LIBTOOL_VERSION.tar.xz ] || curl -L -O ftp://ftp.gnu.org/gnu/libtool/libtool-$LIBTOOL_VERSION.tar.xz || exit 1
+ $no_build && echo "Skipping installation" && return
+ xzcat libtool-$LIBTOOL_VERSION.tar.xz | tar xf - || exit 1
+ cd libtool-$LIBTOOL_VERSION
+ ./configure --program-prefix=g || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch libtool-$LIBTOOL_VERSION-done
+ fi
+}
+
+uninstall_libtool() {
+ if [ ! -z "$installed_libtool_version" ] ; then
+ echo "Uninstalling GNU libtool:"
+ cd libtool-$installed_libtool_version
+ $DO_MV /usr/local/bin/glibtool /usr/local/bin/libtool
+ $DO_MV /usr/local/bin/glibtoolize /usr/local/bin/libtoolize
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm libtool-$installed_libtool_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf libtool-$installed_libtool_version
+ rm -rf libtool-$installed_libtool_version.tar.xz
+ fi
+
+ installed_libtool_version=""
+ fi
+}
+
+install_ninja() {
+ if [ "$NINJA_VERSION" -a ! -f ninja-$NINJA_VERSION-done ] ; then
+ echo "Downloading and installing Ninja:"
+ #
+ # Download the zipball, unpack it, and move the binary to
+ # /usr/local/bin.
+ #
+ [ -f ninja-mac-v$NINJA_VERSION.zip ] || curl -L -o ninja-mac-v$NINJA_VERSION.zip https://github.com/ninja-build/ninja/releases/download/v$NINJA_VERSION/ninja-mac.zip || exit 1
+ $no_build && echo "Skipping installation" && return
+ unzip ninja-mac-v$NINJA_VERSION.zip
+ sudo mv ninja /usr/local/bin
+ touch ninja-$NINJA_VERSION-done
+ fi
+}
+
+uninstall_ninja() {
+ if [ ! -z "$installed_ninja_version" ]; then
+ echo "Uninstalling Ninja:"
+ sudo rm /usr/local/bin/ninja
+ rm ninja-$installed_ninja_version-done
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ rm -f ninja-mac-v$installed_ninja_version.zip
+ fi
+
+ installed_ninja_version=""
+ fi
+}
+
+install_asciidoctor() {
+ if [ ! -f asciidoctor-${ASCIIDOCTOR_VERSION}-done ]; then
+ echo "Downloading and installing Asciidoctor:"
+ sudo gem install -V asciidoctor --version "=${ASCIIDOCTOR_VERSION}"
+ touch asciidoctor-${ASCIIDOCTOR_VERSION}-done
+ fi
+}
+
+uninstall_asciidoctor() {
+ if [ ! -z "$installed_asciidoctor_version" ]; then
+ echo "Uninstalling Asciidoctor:"
+ sudo gem uninstall -V asciidoctor --version "=${installed_asciidoctor_version}"
+ rm asciidoctor-$installed_asciidoctor_version-done
+
+ ##if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version,
+ # whatever it might happen to be called.
+ #
+ ## rm -f asciidoctor-$installed_asciidoctor_version
+ ##fi
+ installed_asciidoctor_version=""
+ fi
+}
+
+install_asciidoctorpdf() {
+ if [ ! -f asciidoctorpdf-${ASCIIDOCTORPDF_VERSION}-done ]; then
+ ## XXX gem does not track dependencies that are installed for asciidoctor-pdf
+ ## record them for uninstallation
+ ## ttfunk, pdf-core, prawn, prawn-table, Ascii85, ruby-rc4, hashery, afm, pdf-reader, prawn-templates, public_suffix, addressable, css_parser, prawn-svg, prawn-icon, safe_yaml, thread_safe, polyglot, treetop, asciidoctor-pdf
+ echo "Downloading and installing Asciidoctor-pdf:"
+ sudo gem install -V asciidoctor-pdf --version "=${ASCIIDOCTORPDF_VERSION}"
+ touch asciidoctorpdf-${ASCIIDOCTORPDF_VERSION}-done
+ fi
+}
+
+uninstall_asciidoctorpdf() {
+ if [ ! -z "$installed_asciidoctorpdf_version" ]; then
+ echo "Uninstalling Asciidoctor:"
+ sudo gem uninstall -V asciidoctor-pdf --version "=${installed_asciidoctorpdf_version}"
+ ## XXX uninstall dependencies
+ rm asciidoctorpdf-$installed_asciidoctorpdf_version-done
+
+ ##if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version,
+ # whatever it might happen to be called.
+ #
+ ## rm -f asciidoctorpdf-$installed_asciidoctorpdf_version
+ ##fi
+ installed_asciidoctorpdf_version=""
+ fi
+}
+
+install_cmake() {
+ if [ ! -f cmake-$CMAKE_VERSION-done ]; then
+ echo "Downloading and installing CMake:"
+ CMAKE_MAJOR_VERSION="`expr $CMAKE_VERSION : '\([0-9][0-9]*\).*'`"
+ CMAKE_MINOR_VERSION="`expr $CMAKE_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ CMAKE_MAJOR_MINOR_VERSION=$CMAKE_MAJOR_VERSION.$CMAKE_MINOR_VERSION
+
+ #
+ # NOTE: the "64" in "Darwin64" doesn't mean "64-bit-only"; the
+ # package in question supports both 32-bit and 64-bit x86.
+ #
+ case "$CMAKE_MAJOR_VERSION" in
+
+ 0|1|2)
+ echo "CMake $CMAKE_VERSION" is too old 1>&2
+ ;;
+
+ 3)
+ #
+ # Download the DMG and do a drag install, where "drag" means
+ # "mv".
+ #
+ # 3.1.1 to 3.19.1 have a Darwin-x86_64 DMG.
+ # 3.19.2 has a macos-universal DMG for 10.10 and later
+ # 3.19.3 and later have a macos-universal DMG for 10.13 and later,
+ # and a macos10.10-universal DMG for 10.10 and later.
+ #
+ if [ "$CMAKE_MINOR_VERSION" -lt 5 ]; then
+ echo "CMake $CMAKE_VERSION" is too old 1>&2
+ elif [ "$CMAKE_MINOR_VERSION" -lt 19 -o \
+ "$CMAKE_VERSION" = 3.19.0 -o \
+ "$CMAKE_VERSION" = 3.19.1 ]; then
+ type="Darwin-x86_64"
+ elif [ "$CMAKE_VERSION" = 3.19.2 -o \
+ "$DARWIN_MAJOR_VERSION" -ge 17 ]; then
+ type="macos-universal"
+ else
+ type="macos10.0-universal"
+ fi
+ [ -f cmake-$CMAKE_VERSION-$type.dmg ] || curl -L -O https://cmake.org/files/v$CMAKE_MAJOR_MINOR_VERSION/cmake-$CMAKE_VERSION-$type.dmg || exit 1
+ $no_build && echo "Skipping installation" && return
+ sudo hdiutil attach cmake-$CMAKE_VERSION-$type.dmg || exit 1
+ sudo ditto /Volumes/cmake-$CMAKE_VERSION-$type/CMake.app /Applications/CMake.app || exit 1
+
+ #
+ # Plant the appropriate symbolic links in /usr/local/bin.
+ # It's a drag-install, so there's no installer to make them,
+ # and the CMake code to put them in place is lame, as
+ #
+ # 1) it defaults to /usr/bin, not /usr/local/bin;
+ # 2) it doesn't request the necessary root privileges;
+ # 3) it can't be run from the command line;
+ #
+ # so we do it ourselves.
+ #
+ for i in ccmake cmake cmake-gui cmakexbuild cpack ctest
+ do
+ sudo ln -s /Applications/CMake.app/Contents/bin/$i /usr/local/bin/$i
+ done
+ sudo hdiutil detach /Volumes/cmake-$CMAKE_VERSION-$type
+ ;;
+
+ *)
+ ;;
+ esac
+ touch cmake-$CMAKE_VERSION-done
+ fi
+}
+
+uninstall_cmake() {
+ if [ ! -z "$installed_cmake_version" ]; then
+ echo "Uninstalling CMake:"
+ installed_cmake_major_version="`expr $installed_cmake_version : '\([0-9][0-9]*\).*'`"
+ case "$installed_cmake_major_version" in
+
+ 0|1|2)
+ echo "CMake $installed_cmake_version" is too old 1>&2
+ ;;
+
+ 3)
+ sudo rm -rf /Applications/CMake.app
+ for i in ccmake cmake cmake-gui cmakexbuild cpack ctest
+ do
+ sudo rm -f /usr/local/bin/$i
+ done
+ rm cmake-$installed_cmake_version-done
+ ;;
+ esac
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version,
+ # whatever it might happen to be called.
+ #
+ rm -f cmake-$installed_cmake_version-Darwin-x86_64.dmg
+ rm -f cmake-$installed_cmake_version-macos-universal.dmg
+ rm -f cmake-$installed_cmake_version-macos10.0-universal.dmg
+ fi
+
+ installed_cmake_version=""
+ fi
+}
+
+install_meson() {
+ #
+ # Install Meson with pip3 if we don't have it already.
+ #
+ if $MESON --version >/dev/null 2>&1
+ then
+ # We have it.
+ :
+ else
+ sudo pip3 install meson
+ touch meson-done
+ fi
+}
+
+uninstall_meson() {
+ #
+ # If we installed Meson, uninstal it with pip3.
+ #
+ if [ -f meson-done ] ; then
+ sudo pip3 uninstall meson
+ rm -f meson-done
+ fi
+}
+
+install_pytest() {
+ #
+ # Install pytest with pip3 if we don't have it already.
+ #
+ if python3 -m pytest --version >/dev/null 2>&1
+ then
+ # We have it.
+ :
+ else
+ sudo pip3 install pytest pytest-xdist
+ touch pytest-done
+ fi
+}
+
+uninstall_pytest() {
+ #
+ # If we installed pytest, uninstal it with pip3.
+ #
+ if [ -f pytest-done ] ; then
+ sudo pip3 uninstall pytest pytest-xdist
+ rm -f pytest-done
+ fi
+}
+
+install_gettext() {
+ if [ ! -f gettext-$GETTEXT_VERSION-done ] ; then
+ echo "Downloading, building, and installing GNU gettext:"
+ [ -f gettext-$GETTEXT_VERSION.tar.gz ] || curl -L -O https://ftp.gnu.org/pub/gnu/gettext/gettext-$GETTEXT_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat gettext-$GETTEXT_VERSION.tar.gz | tar xf - || exit 1
+ cd gettext-$GETTEXT_VERSION
+
+ #
+ # This is annoying.
+ #
+ # GNU gettext's configuration script checks for the presence of an
+ # implementation of iconv(). Not only does it check whether iconv()
+ # is available, *but* it checks for certain behavior *not* specified
+ # by POSIX that the GNU implementation provides, namely that an
+ # attempt to convert the UTF-8 for the EURO SYMBOL chaaracter to
+ # ISO 8859-1 results in an error.
+ #
+ # macOS, prior to Sierra, provided the GNU iconv library (as it's
+ # a POSIX API).
+ #
+ # Sierra appears to have picked up an implementation from FreeBSD
+ # (that implementation originated with the CITRUS project:
+ #
+ # http://citrus.bsdclub.org
+ #
+ # with additional work done to integrate it into NetBSD, and then
+ # adopted by FreeBSD with further work done).
+ #
+ # That implementation does *NOT* return an error in that case; instead,
+ # it transliterates the EURO SYMBOL to "EUR".
+ #
+ # Both behaviors conform to POSIX.
+ #
+ # This causes GNU gettext's configure script to conclude that it
+ # should not say iconv() is available. That, unfortunately, causes
+ # the build to fail with a linking error when trying to build
+ # libtextstyle (a library for which we have no use, that is offered
+ # as a separate library by the GNU project:
+ #
+ # https://www.gnu.org/software/gettext/libtextstyle/manual/libtextstyle.html
+ #
+ # and that is presumably bundled in GNU gettext because some gettext
+ # tool depends on it). The failure appears to be due to:
+ #
+ # libtextstyle's exported symbols file is generated from a
+ # template and a script that passes through only symbols
+ # that appear in a header file that declares the symbol
+ # as extern;
+ #
+ # one such header file declares iconv_ostream_create, but only
+ # if HAVE_ICONV is defined.
+ #
+ # the source file that defines iconv_ostream_create does so
+ # only if HAVE_ICONV is defined;
+ #
+ # the aforementioned script pays *NO ATTENTION* to #ifdefs,
+ # so it will include iconv_ostream_create in the list of
+ # symbols to export regardless of whether a working iconv()
+ # was found;
+ #
+ # the linker failing because it was told to export a symbol
+ # that doesn't exist.
+ #
+ # This is a collection of multiple messes:
+ #
+ # 1) not all versions of iconv() defaulting to "return an error
+ # if the target character set doesn't have a character that
+ # corresponds to the source character" and not offering a way
+ # to force that behavior;
+ #
+ # 2) either some parts of GNU gettext - and libraries bundled
+ # with it, for some mysterious reason - depending on the GNU
+ # behavior rather than assuming only what POSIX specifies, and
+ # the configure script checking for the GNU behavior and not
+ # setting HAVE_ICONV if it's not found;
+ #
+ # 3) the process for building the exported symbols file not
+ # removing symbols that won't exist in the build due to
+ # a "working" iconv() not being found;
+ #
+ # 4) the file that would define iconv_ostream_create() not
+ # defining as an always-failing stub if HAVE_ICONV isn't
+ # defined;
+ #
+ # 5) macOS's linker failing if a symbol is specified in an
+ # exported symbols file but not found, while other linkers
+ # just ignore it? (I add this because I'm a bit surprised
+ # that this has not been fixed, as I suspect it would fail
+ # on FreeBSD and possibly NetBSD as well, as I think their
+ # iconv()s also default to transliterating rather than failing
+ # if an input character has no corresponding character in
+ # the output encoding.)
+ #
+ # The Homebrew folks are aware of this and have reported it to
+ # Apple as a "feedback", for what that's worth:
+ #
+ # https://github.com/Homebrew/homebrew-core/commit/af3b4da5a096db3d9ee885e99ed29b33dec1f1c4
+ #
+ # We adopt their fix, which is to run the configure script with
+ # "am_cv_func_iconv_works=y" as one of the arguments if it's
+ # running on Sonoma; in at least one test, doing so on Ventura
+ # caused the build to fail.
+ #
+ if [[ $DARWIN_MAJOR_VERSION -ge 23 ]]; then
+ workaround_arg="am_cv_func_iconv_works=y"
+ else
+ workaround_arg=
+ fi
+ CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure $workaround_arg || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch gettext-$GETTEXT_VERSION-done
+ fi
+}
+
+uninstall_gettext() {
+ if [ ! -z "$installed_gettext_version" ] ; then
+ #
+ # GLib depends on this, so uninstall it.
+ #
+ uninstall_glib "$@"
+
+ echo "Uninstalling GNU gettext:"
+ cd gettext-$installed_gettext_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm gettext-$installed_gettext_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf gettext-$installed_gettext_version
+ rm -rf gettext-$installed_gettext_version.tar.gz
+ fi
+
+ installed_gettext_version=""
+ fi
+}
+
+install_pkg_config() {
+ if [ ! -f pkg-config-$PKG_CONFIG_VERSION-done ] ; then
+ echo "Downloading, building, and installing pkg-config:"
+ [ -f pkg-config-$PKG_CONFIG_VERSION.tar.gz ] || curl -L -O https://pkgconfig.freedesktop.org/releases/pkg-config-$PKG_CONFIG_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat pkg-config-$PKG_CONFIG_VERSION.tar.gz | tar xf - || exit 1
+ cd pkg-config-$PKG_CONFIG_VERSION
+ ./configure --with-internal-glib || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch pkg-config-$PKG_CONFIG_VERSION-done
+ fi
+}
+
+uninstall_pkg_config() {
+ if [ ! -z "$installed_pkg_config_version" ] ; then
+ echo "Uninstalling pkg-config:"
+ cd pkg-config-$installed_pkg_config_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm pkg-config-$installed_pkg_config_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf pkg-config-$installed_pkg_config_version
+ rm -rf pkg-config-$installed_pkg_config_version.tar.gz
+ fi
+
+ installed_pkg_config_version=""
+ fi
+}
+
+install_glib() {
+ if [ ! -f glib-$GLIB_VERSION-done ] ; then
+ echo "Downloading, building, and installing GLib:"
+ glib_dir=`expr $GLIB_VERSION : '\([0-9][0-9]*\.[0-9][0-9]*\).*'`
+ #
+ # Starting with GLib 2.28.8, xz-compressed tarballs are available.
+ #
+ [ -f glib-$GLIB_VERSION.tar.xz ] || curl -L -O https://download.gnome.org/sources/glib/$glib_dir/glib-$GLIB_VERSION.tar.xz || exit 1
+ $no_build && echo "Skipping installation" && return
+ xzcat glib-$GLIB_VERSION.tar.xz | tar xf - || exit 1
+ cd glib-$GLIB_VERSION
+ #
+ # First, determine where the system include files are.
+ # (It's not necessarily /usr/include.) There's a bit of a
+ # greasy hack here; pre-5.x versions of the developer tools
+ # don't support the --show-sdk-path option, and will produce
+ # no output, so includedir will be set to /usr/include
+ # (in those older versions of the developer tools, there is
+ # a /usr/include directory).
+ #
+ # We need this for several things we do later.
+ #
+ includedir=`SDKROOT="$SDKPATH" xcrun --show-sdk-path 2>/dev/null`/usr/include
+ #
+ # GLib's configuration procedure, whether autotools-based or
+ # Meson-based, really likes to use pkg-config to find libraries,
+ # including libffi.
+ #
+ # At least some versions of macOS provide libffi, but, as macOS
+ # doesn't provide pkg-config, they don't provide a .pc file for
+ # it, so the autotools-based configuration needs some trickery
+ # to get it to find the OS-supplied libffi, and the Meson-based
+ # configuration simply won't find it at all.
+ #
+ # So, if we have a system-provided libffi, but pkg-config
+ # doesn't find libffi, we construct a .pc file for that libffi,
+ # and install it in /usr/local/lib/pkgconfig.
+ #
+ # First, check whether pkg-config finds libffi but thinks its
+ # header files are in a non-existent directory. That probaby
+ # means that we generated the .pc file when some SDK was the
+ # appropriate choice, but Xcode has been updated since then
+ # and that SDK is no longer present. If so, we remove it,
+ # so that we will regenerate it if necessary, rather than
+ # trying to build with a bogus include directory. (Yes, this
+ # can happen, and has happened, causing mysterius build
+ # failures when "#include <ffi.h>" fails.)
+ #
+ if pkg-config libffi ; then
+ # We have a .pc file for libffi; what does it say the
+ # include directory is?
+ incldir=`pkg-config --variable=includedir libffi`
+ if [ ! -z "$incldir" -a ! -d "$incldir" ] ; then
+ # Bogus - remove it, assuming
+ $DO_RM /usr/local/lib/pkgconfig/libffi.pc
+ fi
+ fi
+ if pkg-config libffi ; then
+ # It found libffi; no need to install a .pc file, and we
+ # don't want to overwrite what's there already.
+ :
+ elif [ ! -e $includedir/ffi/ffi.h ] ; then
+ # We don't appear to have libffi as part of the system, so
+ # let the configuration process figure out what to do.
+ #
+ # We test for the header file, not the library, because, in
+ # Big Sur and later, there's no guarantee that, for a system
+ # shared library, there's a corresponding dylib file in
+ # /usr/lib.
+ :
+ else
+ #
+ # We have libffi, but pkg-config didn't find it; generate
+ # and install the .pc file.
+ #
+
+ #
+ # Now generate the .pc file.
+ #
+ # We generate the contents of the .pc file by using cat with
+ # a here document containing a template for the file and
+ # piping that to a sed command that replaces @INCLUDEDIR@ in
+ # the template with the include directory we discovered
+ # above, so that the .pc file gives the compiler flags
+ # necessary to find the libffi headers (which are *not*
+ # necessarily in /usr/include, as per the above).
+ #
+ # The EOF marker for the here document is in quotes, to tell
+ # the shell not to do shell expansion, as .pc files use a
+ # syntax to refer to .pc file variables that looks like the
+ # syntax to refer to shell variables.
+ #
+ # The writing of the libffi.pc file is a greasy hack - the
+ # process of generating the contents of the .pc file writes
+ # to the standard output, but running the last process in
+ # the pipeline as root won't allow the shell that's
+ # *running* it to open the .pc file if we don't have write
+ # permission on /usr/local/lib/pkgconfig, so we need a
+ # program that creates a file and then reads from the
+ # standard input and writes to that file. UN*Xes have a
+ # program that does that; it's called "tee". :-)
+ #
+ # However, it *also* writes the file to the standard output,
+ # so we redirect that to /dev/null when we run it.
+ #
+ cat <<"EOF" | sed "s;@INCLUDEDIR@;$includedir;" | $DO_TEE_TO_PC_FILE /usr/local/lib/pkgconfig/libffi.pc >/dev/null
+prefix=/usr
+libdir=${prefix}/lib
+includedir=@INCLUDEDIR@
+
+Name: ffi
+Description: Library supporting Foreign Function Interfaces
+Version: 3.2.9999
+Libs: -L${libdir} -lffi
+Cflags: -I${includedir}/ffi
+EOF
+ fi
+
+ #
+ # GLib 2.59.1 and later use Meson+Ninja as the build system.
+ #
+ case $GLIB_MAJOR_VERSION in
+
+ 1)
+ echo "GLib $GLIB_VERSION" is too old 1>&2
+ ;;
+
+ *)
+ case $GLIB_MINOR_VERSION in
+
+ [0-9]|1[0-9]|2[0-9]|3[0-7])
+ echo "GLib $GLIB_VERSION" is too old 1>&2
+ ;;
+
+ 3[8-9]|4[0-9]|5[0-8])
+ if [ ! -f ./configure ]; then
+ LIBTOOLIZE=glibtoolize ./autogen.sh
+ fi
+ #
+ # At least with the version of Xcode that comes with
+ # Leopard, /usr/include/ffi/fficonfig.h doesn't define
+ # MACOSX, which causes the build of GLib to fail for at
+ # least some versions of GLib. If we don't find
+ # "#define.*MACOSX" in /usr/include/ffi/fficonfig.h,
+ # explicitly define it.
+ #
+ # While we're at it, suppress -Wformat-nonliteral to
+ # avoid a case where clang's stricter rules on when not
+ # to complain about non-literal format arguments cause
+ # it to complain about code that's safe but it wasn't
+ # told that. See my comment #25 in GNOME bug 691608:
+ #
+ # https://bugzilla.gnome.org/show_bug.cgi?id=691608#c25
+ #
+ if grep -qs '#define.*MACOSX' $includedir/ffi/fficonfig.h
+ then
+ # It's defined, nothing to do
+ CFLAGS="$CFLAGS -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ else
+ CFLAGS="$CFLAGS -DMACOSX -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -DMACOSX -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ fi
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ ;;
+
+ 59|[6-9][0-9]|[1-9][0-9][0-9])
+ #
+ # 2.59.0 doesn't require Meson and Ninja, but it
+ # supports it, and I'm too lazy to add a dot-dot
+ # version check.
+ #
+ # Disable tests to work around
+ #
+ # https://gitlab.gnome.org/GNOME/glib/-/issues/2902
+ #
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" $MESON -Dtests=false _build || exit 1
+ ninja $MAKE_BUILD_OPTS -C _build || exit 1
+ $DO_NINJA_INSTALL || exit 1
+ ;;
+ *)
+ echo "Glib's put out 1000 2.x releases?" 1>&2
+ ;;
+
+ esac
+ esac
+ cd ..
+ touch glib-$GLIB_VERSION-done
+ fi
+}
+
+uninstall_glib() {
+ if [ ! -z "$installed_glib_version" ] ; then
+ echo "Uninstalling GLib:"
+ cd glib-$installed_glib_version
+ installed_glib_major_version="`expr $installed_glib_version : '\([0-9][0-9]*\).*'`"
+ installed_glib_minor_version="`expr $installed_glib_version : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ installed_glib_dotdot_version="`expr $installed_glib_version : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ installed_glib_major_minor_version=$installed_glib_major_version.$installed_glib_minor_version
+ installed_glib_major_minor_dotdot_version=$installed_glib_major_version.$installed_glib_minor_version.$installed_glib_dotdot_version
+ #
+ # GLib 2.59.1 and later use Meson+Ninja as the build system.
+ #
+ case $installed_glib_major_version in
+
+ 1)
+ $DO_MAKE_UNINSTALL || exit 1
+ #
+ # This appears to delete dependencies out from under other
+ # Makefiles in the tree, causing it to fail. At least until
+ # that gets fixed, if it ever gets fixed, we just ignore the
+ # exit status of "make distclean"
+ #
+ # make distclean || exit 1
+ make distclean || echo "Ignoring make distclean failure" 1>&2
+ ;;
+
+ *)
+ case $installed_glib_minor_version in
+
+ [0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-8])
+ $DO_MAKE_UNINSTALL || exit 1
+ #
+ # This appears to delete dependencies out from under other
+ # Makefiles in the tree, causing it to fail. At least until
+ # that gets fixed, if it ever gets fixed, we just ignore the
+ # exit status of "make distclean"
+ #
+ # make distclean || exit 1
+ make distclean || echo "Ignoring make distclean failure" 1>&2
+ ;;
+
+ 59|[6-9][0-9]|[1-9][0-9][0-9])
+ #
+ # 2.59.0 doesn't require Meson and Ninja, but it
+ # supports it, and I'm too lazy to add a dot-dot
+ # version check.
+ #
+ $DO_NINJA_UNINSTALL || exit 1
+ #
+ # For Meson+Ninja, we do the build in an _build
+ # subdirectory, so the equivalent of "make distclean"
+ # is just to remove the directory tree.
+ #
+ rm -rf _build
+ ;;
+
+ *)
+ echo "Glib's put out 1000 2.x releases?" 1>&2
+ ;;
+ esac
+ esac
+ cd ..
+ rm glib-$installed_glib_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf glib-$installed_glib_version
+ rm -rf glib-$installed_glib_version.tar.xz
+ fi
+
+ installed_glib_version=""
+ fi
+}
+
+install_qt() {
+ if [ "$QT_VERSION" -a ! -f qt-$QT_VERSION-done ]; then
+ echo "Downloading and installing Qt:"
+ #
+ # What you get for this URL might just be a 302 Found reply, so use
+ # -L so we get redirected.
+ #
+ # 5.0 - 5.1: qt-mac-opensource-{version}-clang-offline.dmg
+ # 5.2.0: qt-mac-opensource-{version}.dmg
+ # 5.2.1: qt-opensource-mac-x64-clang-{version}.dmg
+ # 5.3 - 5.8: qt-opensource-mac-x64-clang-{version}.dmg
+ # 5.9 - 5.14: qt-opensource-mac-x64-{version}.dmg
+ # 5.15 - 6.0: Offline installers no longer provided.
+ # ( https://download.qt.io/archive/qt/5.15/5.15.0/OFFLINE_README.txt )
+ # XXX: We need a different approach for QT >= 5.15. One option would be to
+ # install https://github.com/miurahr/aqtinstall, either permanently or into
+ # a temporary venv.
+ #
+ case $QT_MAJOR_VERSION in
+
+ 1|2|3|4)
+ echo "Qt $QT_VERSION" is too old 1>&2
+ ;;
+
+ 5)
+ case $QT_MINOR_VERSION in
+
+ 0|1|2|3|4|5|6|7|8)
+ echo "Qt $QT_VERSION" is too old 1>&2
+ ;;
+
+ 9|10|11|12|13|14)
+ QT_VOLUME=qt-opensource-mac-x64-$QT_VERSION
+ ;;
+ *)
+ echo "The Qt Company no longer provides open source offline installers for Qt $QT_VERSION" 1>&2
+ ;;
+
+ esac
+ [ -f $QT_VOLUME.dmg ] || curl -L -O https://download.qt.io/archive/qt/$QT_MAJOR_MINOR_VERSION/$QT_MAJOR_MINOR_DOTDOT_VERSION/$QT_VOLUME.dmg || exit 1
+ $no_build && echo "Skipping installation" && return
+ sudo hdiutil attach $QT_VOLUME.dmg || exit 1
+
+ #
+ # Run the installer executable directly, so that we wait for
+ # it to finish. Then unmount the volume.
+ #
+ /Volumes/$QT_VOLUME/$QT_VOLUME.app/Contents/MacOS/$QT_VOLUME
+ sudo hdiutil detach /Volumes/$QT_VOLUME
+ touch qt-$QT_VERSION-done
+ ;;
+ *)
+ echo "The Qt Company no longer provides open source offline installers for Qt $QT_VERSION" 1>&2
+ ;;
+ esac
+ fi
+}
+
+uninstall_qt() {
+ if [ ! -z "$installed_qt_version" ] ; then
+ echo "Uninstalling Qt:"
+ rm -rf $HOME/Qt$installed_qt_version
+ rm qt-$installed_qt_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded version.
+ #
+ # 5.0 - 5.1: qt-mac-opensource-{version}-clang-offline.dmg
+ # 5.2.0: qt-mac-opensource-{version}.dmg
+ # 5.2.1: qt-opensource-mac-x64-clang-{version}.dmg
+ # 5.3 - 5.8: qt-opensource-mac-x64-clang-{version}.dmg
+ # 5.9 - 5.14: qt-opensource-mac-x64-{version}.dmg
+ #
+ installed_qt_major_version="`expr $installed_qt_version : '\([0-9][0-9]*\).*'`"
+ installed_qt_minor_version="`expr $installed_qt_version : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ installed_qt_dotdot_version="`expr $installed_qt_version : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ case $installed_qt_major_version in
+
+ 1|2|3|4)
+ echo "Qt $installed_qt_version" is too old 1>&2
+ ;;
+
+ 5*)
+ case $installed_qt_minor_version in
+
+ 0|1|2|3|4|5)
+ echo "Qt $installed_qt_version" is too old 1>&2
+ ;;
+
+ 6|7|8)
+ installed_qt_volume=qt-opensource-mac-x64-clang-$installed_qt_version.dmg
+ ;;
+
+ 9|10|11|12|13|14)
+ installed_qt_volume=qt-opensource-mac-x64-$installed_qt_version.dmg
+ ;;
+ esac
+ esac
+ rm -f $installed_qt_volume
+ fi
+
+ installed_qt_version=""
+ fi
+}
+
+install_libsmi() {
+ if [ "$LIBSMI_VERSION" -a ! -f libsmi-$LIBSMI_VERSION-done ] ; then
+ echo "Downloading, building, and installing libsmi:"
+ [ -f libsmi-$LIBSMI_VERSION.tar.gz ] || curl -L -O https://www.ibr.cs.tu-bs.de/projects/libsmi/download/libsmi-$LIBSMI_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat libsmi-$LIBSMI_VERSION.tar.gz | tar xf - || exit 1
+ cd libsmi-$LIBSMI_VERSION
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch libsmi-$LIBSMI_VERSION-done
+ fi
+}
+
+uninstall_libsmi() {
+ if [ ! -z "$installed_libsmi_version" ] ; then
+ echo "Uninstalling libsmi:"
+ cd libsmi-$installed_libsmi_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm libsmi-$installed_libsmi_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf libsmi-$installed_libsmi_version
+ rm -rf libsmi-$installed_libsmi_version.tar.gz
+ fi
+
+ installed_libsmi_version=""
+ fi
+}
+
+install_libgpg_error() {
+ if [ "$LIBGPG_ERROR_VERSION" -a ! -f libgpg-error-$LIBGPG_ERROR_VERSION-done ] ; then
+ echo "Downloading, building, and installing libgpg-error:"
+ [ -f libgpg-error-$LIBGPG_ERROR_VERSION.tar.bz2 ] || curl -L -O https://www.gnupg.org/ftp/gcrypt/libgpg-error/libgpg-error-$LIBGPG_ERROR_VERSION.tar.bz2 || exit 1
+ $no_build && echo "Skipping installation" && return
+ bzcat libgpg-error-$LIBGPG_ERROR_VERSION.tar.bz2 | tar xf - || exit 1
+ cd libgpg-error-$LIBGPG_ERROR_VERSION
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch libgpg-error-$LIBGPG_ERROR_VERSION-done
+ fi
+}
+
+uninstall_libgpg_error() {
+ if [ ! -z "$installed_libgpg_error_version" ] ; then
+ #
+ # libgcrypt depends on this, so uninstall it.
+ #
+ uninstall_libgcrypt "$@"
+
+ echo "Uninstalling libgpg-error:"
+ cd libgpg-error-$installed_libgpg_error_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm libgpg-error-$installed_libgpg_error_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf libgpg-error-$installed_libgpg_error_version
+ rm -rf libgpg-error-$installed_libgpg_error_version.tar.bz2
+ fi
+
+ installed_libgpg_error_version=""
+ fi
+}
+
+install_libgcrypt() {
+ if [ "$LIBGCRYPT_VERSION" -a ! -f libgcrypt-$LIBGCRYPT_VERSION-done ] ; then
+ #
+ # libgpg-error is required for libgcrypt.
+ #
+ if [ -z $LIBGPG_ERROR_VERSION ]
+ then
+ echo "libgcrypt requires libgpg-error, but you didn't install libgpg-error." 1>&2
+ exit 1
+ fi
+
+ echo "Downloading, building, and installing libgcrypt:"
+ [ -f libgcrypt-$LIBGCRYPT_VERSION.tar.gz ] || curl -L -O https://www.gnupg.org/ftp/gcrypt/libgcrypt/libgcrypt-$LIBGCRYPT_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat libgcrypt-$LIBGCRYPT_VERSION.tar.gz | tar xf - || exit 1
+ cd libgcrypt-$LIBGCRYPT_VERSION
+ #
+ # The assembler language code is not compatible with the macOS
+ # x86 assembler (or is it an x86-64 vs. x86-32 issue?).
+ #
+ # libgcrypt expects gnu89, not c99/gnu99, semantics for
+ # "inline". See, for example:
+ #
+ # https://lists.freebsd.org/pipermail/freebsd-ports-bugs/2010-October/198809.html
+ #
+ CFLAGS="$CFLAGS -std=gnu89 $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-asm || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch libgcrypt-$LIBGCRYPT_VERSION-done
+ fi
+}
+
+uninstall_libgcrypt() {
+ if [ ! -z "$installed_libgcrypt_version" ] ; then
+ echo "Uninstalling libgcrypt:"
+ cd libgcrypt-$installed_libgcrypt_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm libgcrypt-$installed_libgcrypt_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf libgcrypt-$installed_libgcrypt_version
+ rm -rf libgcrypt-$installed_libgcrypt_version.tar.gz
+ fi
+
+ installed_libgcrypt_version=""
+ fi
+}
+
+install_gmp() {
+ if [ "$GMP_VERSION" -a ! -f gmp-$GMP_VERSION-done ] ; then
+ echo "Downloading, building, and installing GMP:"
+ [ -f gmp-$GMP_VERSION.tar.lz ] || curl -L -O https://gmplib.org/download/gmp/gmp-$GMP_VERSION.tar.lz || exit 1
+ $no_build && echo "Skipping installation" && return
+ lzip -c -d gmp-$GMP_VERSION.tar.lz | tar xf - || exit 1
+ cd gmp-$GMP_VERSION
+ #
+ # Create a fat binary: https://gmplib.org/manual/Notes-for-Package-Builds.html
+ #
+ # According to
+ #
+ # https://www.mail-archive.com/gmp-bugs@gmplib.org/msg01492.html
+ #
+ # and other pages, the Shiny New Linker in Xcode 15 causes this
+ # build to fail with "ld: branch8 out of range 384833 in
+ # ___gmpn_add_nc_x86_64"; linking with -ld64 is a workaround.
+ #
+ # For now, link with -ld64 on Xcode 15 and later.
+ #
+ XCODE_VERSION=`xcodebuild -version | sed -n 's;Xcode \(.*\);\1;p'`
+ XCODE_MAJOR_VERSION="`expr $XCODE_VERSION : '\([0-9][0-9]*\).*'`"
+ XCODE_MINOR_VERSION="`expr $XCODE_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ XCODE_DOTDOT_VERSION="`expr $XCODE_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ if [ "$XCODE_MAJOR_VERSION" -ge 15 ]
+ then
+ LD64_FLAG="-ld64"
+ else
+ LD64_FLAG=""
+ fi
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS $LD64_FLAG" ./configure --enable-fat || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch gmp-$GMP_VERSION-done
+ fi
+}
+
+uninstall_gmp() {
+ if [ ! -z "$installed_gmp_version" ] ; then
+ #
+ # Nettle depends on this, so uninstall it.
+ #
+ uninstall_nettle "$@"
+
+ echo "Uninstalling GMP:"
+ cd gmp-$installed_gmp_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm gmp-$installed_gmp_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf gmp-$installed_gmp_version
+ rm -rf gmp-$installed_gmp_version.tar.lz
+ fi
+
+ installed_gmp_version=""
+ fi
+}
+
+install_libtasn1() {
+ if [ "$LIBTASN1_VERSION" -a ! -f libtasn1-$LIBTASN1_VERSION-done ] ; then
+ echo "Downloading, building, and installing libtasn1:"
+ [ -f libtasn1-$LIBTASN1_VERSION.tar.gz ] || curl -L -O https://ftpmirror.gnu.org/libtasn1/libtasn1-$LIBTASN1_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat libtasn1-$LIBTASN1_VERSION.tar.gz | tar xf - || exit 1
+ cd libtasn1-$LIBTASN1_VERSION
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch libtasn1-$LIBTASN1_VERSION-done
+ fi
+}
+
+uninstall_libtasn1() {
+ if [ ! -z "$installed_libtasn1_version" ] ; then
+ #
+ # p11-kit depends on this, so uninstall it.
+ #
+ uninstall_p11_kit "$@"
+
+ echo "Uninstalling libtasn1:"
+ cd libtasn1-$installed_libtasn1_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm libtasn1-$installed_libtasn1_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf libtasn1-$installed_libtasn1_version
+ rm -rf libtasn1-$installed_libtasn1_version.tar.gz
+ fi
+
+ installed_libtasn1_version=""
+ fi
+}
+
+install_p11_kit() {
+ if [ "$P11KIT_VERSION" -a ! -f p11-kit-$P11KIT_VERSION-done ] ; then
+ echo "Downloading, building, and installing p11-kit:"
+ [ -f p11-kit-$P11KIT_VERSION.tar.xz ] || curl -L -O https://github.com/p11-glue/p11-kit/releases/download/$P11KIT_VERSION/p11-kit-$P11KIT_VERSION.tar.xz || exit 1
+ $no_build && echo "Skipping installation" && return
+ xzcat p11-kit-$P11KIT_VERSION.tar.xz | tar xf - || exit 1
+ cd p11-kit-$P11KIT_VERSION
+ #
+ # Prior to Catalina, the libffi that's supplied with macOS
+ # doesn't support ffi_closure_alloc() or ffi_prep_closure_loc(),
+ # both of which are required by p11-kit if built with libffi.
+ #
+ # According to
+ #
+ # https://p11-glue.github.io/p11-glue/p11-kit/manual/devel-building.html
+ #
+ # libffi is used "for sharing of PKCS#11 modules between
+ # multiple callers in the same process. It is highly recommended
+ # that this dependency be treated as a required dependency.",
+ # but it's not clear that this matters to us, so we just
+ # configure p11-kit not to use libffi.
+ #
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -L/usr/local/lib" LIBS=-lintl ./configure --without-libffi --without-trust-paths || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch p11-kit-$P11KIT_VERSION-done
+ fi
+}
+
+uninstall_p11_kit() {
+ if [ ! -z "$installed_p11_kit_version" ] ; then
+ #
+ # Nettle depends on this, so uninstall it.
+ #
+ uninstall_nettle "$@"
+
+ echo "Uninstalling p11-kit:"
+ cd p11-kit-$installed_p11_kit_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm p11-kit-$installed_p11_kit_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf p11-kit-$installed_p11_kit_version
+ rm -rf p11-kit-$installed_p11_kit_version.tar.xz
+ fi
+
+ installed_p11_kit_version=""
+ fi
+}
+
+install_nettle() {
+ if [ "$NETTLE_VERSION" -a ! -f nettle-$NETTLE_VERSION-done ] ; then
+ echo "Downloading, building, and installing Nettle:"
+ [ -f nettle-$NETTLE_VERSION.tar.gz ] || curl -L -O https://ftp.gnu.org/gnu/nettle/nettle-$NETTLE_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat nettle-$NETTLE_VERSION.tar.gz | tar xf - || exit 1
+ cd nettle-$NETTLE_VERSION
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -I/usr/local/include" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -L/usr/local/lib" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch nettle-$NETTLE_VERSION-done
+ fi
+}
+
+uninstall_nettle() {
+ if [ ! -z "$installed_nettle_version" ] ; then
+ #
+ # GnuTLS depends on this, so uninstall it.
+ #
+ uninstall_gnutls "$@"
+
+ echo "Uninstalling Nettle:"
+ cd nettle-$installed_nettle_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm nettle-$installed_nettle_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf nettle-$installed_nettle_version
+ rm -rf nettle-$installed_nettle_version.tar.gz
+ fi
+
+ installed_nettle_version=""
+ fi
+}
+
+install_gnutls() {
+ if [ "$GNUTLS_VERSION" -a ! -f gnutls-$GNUTLS_VERSION-done ] ; then
+ #
+ # GnuTLS requires Nettle.
+ #
+ if [ -z $NETTLE_VERSION ]
+ then
+ echo "GnuTLS requires Nettle, but you didn't install Nettle" 1>&2
+ exit 1
+ fi
+
+ echo "Downloading, building, and installing GnuTLS:"
+ if [[ $GNUTLS_MAJOR_VERSION -ge 3 ]]
+ then
+ #
+ # Starting with GnuTLS 3.x, the tarballs are compressed with
+ # xz rather than bzip2.
+ #
+ [ -f gnutls-$GNUTLS_VERSION.tar.xz ] || curl -L -O https://www.gnupg.org/ftp/gcrypt/gnutls/v$GNUTLS_MAJOR_VERSION.$GNUTLS_MINOR_VERSION/gnutls-$GNUTLS_VERSION.tar.xz || exit 1
+ $no_build && echo "Skipping installation" && return
+ xzcat gnutls-$GNUTLS_VERSION.tar.xz | tar xf - || exit 1
+ else
+ [ -f gnutls-$GNUTLS_VERSION.tar.bz2 ] || curl -L -O https://www.gnupg.org/ftp/gcrypt/gnutls/v$GNUTLS_MAJOR_VERSION.$GNUTLS_MINOR_VERSION/gnutls-$GNUTLS_VERSION.tar.bz2 || exit 1
+ $no_build && echo "Skipping installation" && return
+ bzcat gnutls-$GNUTLS_VERSION.tar.bz2 | tar xf - || exit 1
+ fi
+ cd gnutls-$GNUTLS_VERSION
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -I /usr/local/include" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -I/usr/local/include/" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -L/usr/local/lib" ./configure --with-included-unistring --disable-guile || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch gnutls-$GNUTLS_VERSION-done
+ fi
+}
+
+uninstall_gnutls() {
+ if [ ! -z "$installed_gnutls_version" ] ; then
+ echo "Uninstalling GnuTLS:"
+ cd gnutls-$installed_gnutls_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm gnutls-$installed_gnutls_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf gnutls-$installed_gnutls_version
+ rm -rf gnutls-$installed_gnutls_version.tar.bz2
+ fi
+
+ installed_gnutls_version=""
+ fi
+}
+
+install_lua() {
+ if [ "$LUA_VERSION" -a ! -f lua-$LUA_VERSION-done ] ; then
+ echo "Downloading, building, and installing Lua:"
+ [ -f lua-$LUA_VERSION.tar.gz ] || curl -L -O https://www.lua.org/ftp/lua-$LUA_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat lua-$LUA_VERSION.tar.gz | tar xf - || exit 1
+ cd lua-$LUA_VERSION
+ make MYCFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" MYLDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" $MAKE_BUILD_OPTS macosx || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch lua-$LUA_VERSION-done
+ fi
+}
+
+uninstall_lua() {
+ if [ ! -z "$installed_lua_version" ] ; then
+ echo "Uninstalling Lua:"
+ #
+ # Lua has no "make uninstall", so just remove stuff manually.
+ # There's no configure script, so there's no need for
+ # "make distclean", either; just do "make clean".
+ #
+ (cd /usr/local/bin; $DO_RM -f lua luac)
+ (cd /usr/local/include; $DO_RM -f lua.h luaconf.h lualib.h lauxlib.h lua.hpp)
+ (cd /usr/local/lib; $DO_RM -f liblua.a)
+ (cd /usr/local/man/man1; $DO_RM -f lua.1 luac.1)
+ cd lua-$installed_lua_version
+ make clean || exit 1
+ cd ..
+ rm lua-$installed_lua_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf lua-$installed_lua_version
+ rm -rf lua-$installed_lua_version.tar.gz
+ fi
+
+ installed_lua_version=""
+ fi
+}
+
+install_snappy() {
+ if [ "$SNAPPY_VERSION" -a ! -f snappy-$SNAPPY_VERSION-done ] ; then
+ echo "Downloading, building, and installing snappy:"
+ [ -f snappy-$SNAPPY_VERSION.tar.gz ] || curl -L -o snappy-$SNAPPY_VERSION.tar.gz https://github.com/google/snappy/archive/$SNAPPY_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat snappy-$SNAPPY_VERSION.tar.gz | tar xf - || exit 1
+ cd snappy-$SNAPPY_VERSION
+ if [ "$SNAPPY_VERSION" = "1.1.10" ] ; then
+ # This patch corresponds to https://github.com/google/snappy/commit/27f34a580be4a3becf5f8c0cba13433f53c21337
+ patch -p0 <${topdir}/macosx-support-lib-patches/snappy-signed.patch || exit 1
+ fi
+ mkdir build_dir
+ cd build_dir
+ #
+ # Build a shared library, because we'll be linking libwireshark,
+ # which is a C library, with libsnappy, and libsnappy is a C++
+ # library and requires the C++ run time; the shared library
+ # will carry that dependency with it, so linking with it should
+ # Just Work.
+ #
+ MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" $DO_CMAKE -DBUILD_SHARED_LIBS=YES -DSNAPPY_BUILD_BENCHMARKS=NO -DSNAPPY_BUILD_TESTS=NO ../ || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ../..
+ touch snappy-$SNAPPY_VERSION-done
+ fi
+}
+
+uninstall_snappy() {
+ if [ ! -z "$installed_snappy_version" ] ; then
+ echo "Uninstalling snappy:"
+ cd snappy-$installed_snappy_version
+ #
+ # snappy uses cmake and doesn't support "make uninstall";
+ # just remove what we know it installs.
+ #
+ # $DO_MAKE_UNINSTALL || exit 1
+ if [ -s build_dir/install_manifest.txt ] ; then
+ while read -r ; do $DO_RM -v "$REPLY" ; done < <(cat build_dir/install_manifest.txt; echo)
+ else
+ $DO_RM -f /usr/local/lib/libsnappy.1.1.8.dylib \
+ /usr/local/lib/libsnappy.1.dylib \
+ /usr/local/lib/libsnappy.dylib \
+ /usr/local/include/snappy-c.h \
+ /usr/local/include/snappy-sinksource.h \
+ /usr/local/include/snappy-stubs-public.h \
+ /usr/local/include/snappy.h \
+ /usr/local/lib/cmake/Snappy/SnappyConfig.cmake \
+ /usr/local/lib/cmake/Snappy/SnappyConfigVersion.cmake \
+ /usr/local/lib/cmake/Snappy/SnappyTargets-noconfig.cmake \
+ /usr/local/lib/cmake/Snappy/SnappyTargets.cmake || exit 1
+ fi
+ #
+ # snappy uses cmake and doesn't support "make distclean";
+ #.just remove the entire build directory.
+ #
+ # make distclean || exit 1
+ rm -rf build_dir || exit 1
+ cd ..
+ rm snappy-$installed_snappy_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf snappy-$installed_snappy_version
+ rm -rf snappy-$installed_snappy_version.tar.gz
+ fi
+
+ installed_snappy_version=""
+ fi
+}
+
+install_zstd() {
+ if [ "$ZSTD_VERSION" -a ! -f zstd-$ZSTD_VERSION-done ] ; then
+ echo "Downloading, building, and installing zstd:"
+ [ -f zstd-$ZSTD_VERSION.tar.gz ] || curl -L -O https://github.com/facebook/zstd/releases/download/v$ZSTD_VERSION/zstd-$ZSTD_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat zstd-$ZSTD_VERSION.tar.gz | tar xf - || exit 1
+ cd zstd-$ZSTD_VERSION
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch zstd-$ZSTD_VERSION-done
+ fi
+}
+
+uninstall_zstd() {
+ if [ ! -z "$installed_zstd_version" ] ; then
+ echo "Uninstalling zstd:"
+ cd zstd-$installed_zstd_version
+ $DO_MAKE_UNINSTALL || exit 1
+ #
+ # zstd has no configure script, so there's no need for
+ # "make distclean", and the Makefile supplied with it
+ # has no "make distclean" rule; just do "make clean".
+ #
+ make clean || exit 1
+ cd ..
+ rm zstd-$installed_zstd_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf zstd-$installed_zstd_version
+ rm -rf zstd-$installed_zstd_version.tar.gz
+ fi
+
+ installed_zstd_version=""
+ fi
+}
+
+install_libxml2() {
+ if [ "$LIBXML2_VERSION" -a ! -f libxml2-$LIBXML2_VERSION-done ] ; then
+ echo "Downloading, building, and installing libxml2:"
+ LIBXML2_MAJOR_VERSION="`expr $LIBXML2_VERSION : '\([0-9][0-9]*\).*'`"
+ LIBXML2_MINOR_VERSION="`expr $LIBXML2_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ LIBXML2_MAJOR_MINOR_VERSION=$LIBXML2_MAJOR_VERSION.$LIBXML2_MINOR_VERSION
+ [ -f libxml2-$LIBXML2_VERSION.tar.gz ] || curl -L -O https://download.gnome.org/sources/libxml2/$LIBXML2_MAJOR_MINOR_VERSION/libxml2-$LIBXML2_VERSION.tar.xz || exit 1
+ $no_build && echo "Skipping installation" && return
+ xzcat libxml2-$LIBXML2_VERSION.tar.xz | tar xf - || exit 1
+ cd libxml2-$LIBXML2_VERSION
+ #
+ # At least on macOS 12.0.1 with Xcode 13.1, when we build
+ # libxml2, the linker complains that we don't have the right
+ # to link with the Python framework, so don't build with
+ # Python.
+ #
+ CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --without-python || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch libxml2-$LIBXML2_VERSION-done
+ fi
+}
+
+uninstall_libxml2() {
+ if [ ! -z "$installed_libxml2_version" ] ; then
+ echo "Uninstalling libxml2:"
+ cd libxml2-$installed_libxml2_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm libxml2-$installed_libxml2_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf libxml2-$installed_libxml2_version
+ rm -rf libxml2-$installed_libxml2_version.tar.xz
+ fi
+
+ installed_libxml2_version=""
+ fi
+}
+
+install_lz4() {
+ if [ "$LZ4_VERSION" -a ! -f lz4-$LZ4_VERSION-done ] ; then
+ echo "Downloading, building, and installing lz4:"
+ #
+ # lz4 switched from sequentially numbered releases, named rN,
+ # to vX.Y.Z-numbered releases.
+ #
+ # The old sequentially-numbered releases were in tarballs
+ # at https://github.com/lz4/lz4/archive/rN.tar.gz, which
+ # extract into an lz4-rN directory.
+ #
+ # THe new vX.Y.Z-numbered releases are in tarballs at
+ # https://github.com/lz4/lz4/archive/vX.Y.Z.tar.gz, which
+ # extract into an lz4-X.Y.Z directory - no, not lz4-vX.Y.Z,
+ # just lz4-X.Y.Z.
+ #
+ # We expect LZ4_VERSION to be set to rN for the sequentially-
+ # numbered releases and X.Y.Z - not vX.Y.Z - for the vX.Y.Z-
+ # numbered releases. We also tell Curl to download the tarball
+ # with a name that corresponds to the name of the target
+ # directory, so that it begins with "lz4-" and ends with either
+ # "rN" or "X.Y.Z", to match what almost all of the other
+ # support libraries do.
+ #
+ if [[ "$LZ4_VERSION" == r* ]]
+ then
+ [ -f lz4-$LZ4_VERSION.tar.gz ] || curl -L -o lz4-$LZ4_VERSION.tar.gz https://github.com/lz4/lz4/archive/$LZ4_VERSION.tar.gz || exit 1
+ else
+ [ -f lz4-$LZ4_VERSION.tar.gz ] || curl -L -o lz4-$LZ4_VERSION.tar.gz https://github.com/lz4/lz4/archive/v$LZ4_VERSION.tar.gz || exit 1
+ fi
+ $no_build && echo "Skipping installation" && return
+ gzcat lz4-$LZ4_VERSION.tar.gz | tar xf - || exit 1
+ cd lz4-$LZ4_VERSION
+ #
+ # No configure script here, but it appears that if MOREFLAGS is
+ # set, that's added to CFLAGS, and those are combined with LDFLAGS
+ # and CXXFLAGS into FLAGS, which is used when building source
+ # files and libraries.
+ #
+ MOREFLAGS="-D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch lz4-$LZ4_VERSION-done
+ fi
+}
+
+uninstall_lz4() {
+ if [ ! -z "$installed_lz4_version" ] ; then
+ echo "Uninstalling lz4:"
+ cd lz4-$installed_lz4_version
+ $DO_MAKE_UNINSTALL || exit 1
+ #
+ # lz4's Makefile doesn't support "make distclean"; just do
+ # "make clean". Perhaps not using autotools means that
+ # there's no need for "make distclean".
+ #
+ # make distclean || exit 1
+ make clean || exit 1
+ cd ..
+ rm lz4-$installed_lz4_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ # "make install" apparently causes some stuff to be
+ # modified in the build tree, so, as it's done as
+ # root, that leaves stuff owned by root in the build
+ # tree. Therefore, we have to remove the build tree
+ # as root.
+ #
+ sudo rm -rf lz4-$installed_lz4_version
+ rm -rf lz4-$installed_lz4_version.tar.gz
+ fi
+
+ installed_lz4_version=""
+ fi
+}
+
+install_sbc() {
+ if [ "$SBC_VERSION" -a ! -f sbc-$SBC_VERSION-done ] ; then
+ echo "Downloading, building, and installing sbc:"
+ [ -f sbc-$SBC_VERSION.tar.gz ] || curl -L -O https://www.kernel.org/pub/linux/bluetooth/sbc-$SBC_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat sbc-$SBC_VERSION.tar.gz | tar xf - || exit 1
+ cd sbc-$SBC_VERSION
+ if [ "$DARWIN_PROCESSOR_ARCH" = "arm" ] ; then
+ CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS -U__ARM_NEON__" CXXFLAGS="$CXXFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-tools --disable-tester --disable-shared || exit 1
+ else
+ CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-tools --disable-tester --disable-shared || exit 1
+ fi
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch sbc-$SBC_VERSION-done
+ fi
+}
+
+uninstall_sbc() {
+ if [ ! -z "$installed_sbc_version" ] ; then
+ echo "Uninstalling sbc:"
+ cd sbc-$installed_sbc_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm sbc-$installed_sbc_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf sbc-$installed_sbc_version
+ rm -rf sbc-$installed_sbc_version.tar.gz
+ fi
+
+ installed_sbc_version=""
+ fi
+}
+
+install_maxminddb() {
+ if [ "$MAXMINDDB_VERSION" -a ! -f maxminddb-$MAXMINDDB_VERSION-done ] ; then
+ echo "Downloading, building, and installing MaxMindDB API:"
+ [ -f libmaxminddb-$MAXMINDDB_VERSION.tar.gz ] || curl -L -O https://github.com/maxmind/libmaxminddb/releases/download/$MAXMINDDB_VERSION/libmaxminddb-$MAXMINDDB_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat libmaxminddb-$MAXMINDDB_VERSION.tar.gz | tar xf - || exit 1
+ cd libmaxminddb-$MAXMINDDB_VERSION
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch maxminddb-$MAXMINDDB_VERSION-done
+ fi
+}
+
+uninstall_maxminddb() {
+ if [ ! -z "$installed_maxminddb_version" ] ; then
+ echo "Uninstalling MaxMindDB API:"
+ cd libmaxminddb-$installed_maxminddb_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm maxminddb-$installed_maxminddb_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf libmaxminddb-$installed_maxminddb_version
+ rm -rf libmaxminddb-$installed_maxminddb_version.tar.gz
+ fi
+
+ installed_maxminddb_version=""
+ fi
+}
+
+install_c_ares() {
+ if [ "$CARES_VERSION" -a ! -f c-ares-$CARES_VERSION-done ] ; then
+ echo "Downloading, building, and installing C-Ares API:"
+ [ -f c-ares-$CARES_VERSION.tar.gz ] || curl -L -O https://c-ares.org/download/c-ares-$CARES_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat c-ares-$CARES_VERSION.tar.gz | tar xf - || exit 1
+ cd c-ares-$CARES_VERSION
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch c-ares-$CARES_VERSION-done
+ fi
+}
+
+uninstall_c_ares() {
+ if [ ! -z "$installed_cares_version" ] ; then
+ echo "Uninstalling C-Ares API:"
+ cd c-ares-$installed_cares_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm c-ares-$installed_cares_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf c-ares-$installed_cares_version
+ rm -rf c-ares-$installed_cares_version.tar.gz
+ fi
+
+ installed_cares_version=""
+ fi
+}
+
+install_libssh() {
+ if [ "$LIBSSH_VERSION" -a ! -f libssh-$LIBSSH_VERSION-done ] ; then
+ echo "Downloading, building, and installing libssh:"
+ LIBSSH_MAJOR_VERSION="`expr $LIBSSH_VERSION : '\([0-9][0-9]*\).*'`"
+ LIBSSH_MINOR_VERSION="`expr $LIBSSH_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
+ LIBSSH_MAJOR_MINOR_VERSION=$LIBSSH_MAJOR_VERSION.$LIBSSH_MINOR_VERSION
+ [ -f libssh-$LIBSSH_VERSION.tar.xz ] || curl -L -O https://www.libssh.org/files/$LIBSSH_MAJOR_MINOR_VERSION/libssh-$LIBSSH_VERSION.tar.xz
+ $no_build && echo "Skipping installation" && return
+ xzcat libssh-$LIBSSH_VERSION.tar.xz | tar xf - || exit 1
+ cd libssh-$LIBSSH_VERSION
+ mkdir build
+ cd build
+ MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" $DO_CMAKE -DWITH_GCRYPT=1 ../ || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ../..
+ touch libssh-$LIBSSH_VERSION-done
+ fi
+}
+
+uninstall_libssh() {
+ if [ ! -z "$installed_libssh_version" ] ; then
+ echo "Uninstalling libssh:"
+ cd libssh-$installed_libssh_version
+ #
+ # libssh uses cmake and doesn't support "make uninstall";
+ # just remove what we know it installs.
+ #
+ # $DO_MAKE_UNINSTALL || exit 1
+ $DO_RM -rf /usr/local/lib/libssh* \
+ /usr/local/include/libssh \
+ /usr/local/lib/pkgconfig/libssh* \
+ /usr/local/lib/cmake/libssh || exit 1
+ #
+ # libssh uses cmake and doesn't support "make distclean";
+ # just remove the entire build directory.
+ #
+ # make distclean || exit 1
+ rm -rf build || exit 1
+ cd ..
+ rm libssh-$installed_libssh_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf libssh-$installed_libssh_version
+ rm -rf libssh-$installed_libssh_version.tar.gz
+ fi
+
+ installed_libssh_version=""
+ fi
+}
+
+install_nghttp2() {
+ if [ "$NGHTTP2_VERSION" -a ! -f nghttp2-$NGHTTP2_VERSION-done ] ; then
+ echo "Downloading, building, and installing nghttp2:"
+ [ -f nghttp2-$NGHTTP2_VERSION.tar.xz ] || curl -L -O https://github.com/nghttp2/nghttp2/releases/download/v$NGHTTP2_VERSION/nghttp2-$NGHTTP2_VERSION.tar.xz || exit 1
+ $no_build && echo "Skipping installation" && return
+ xzcat nghttp2-$NGHTTP2_VERSION.tar.xz | tar xf - || exit 1
+ cd nghttp2-$NGHTTP2_VERSION
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --enable-lib-only || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch nghttp2-$NGHTTP2_VERSION-done
+ fi
+}
+
+uninstall_nghttp2() {
+ if [ ! -z "$installed_nghttp2_version" ] ; then
+ echo "Uninstalling nghttp2:"
+ cd nghttp2-$installed_nghttp2_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm nghttp2-$installed_nghttp2_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf nghttp2-$installed_nghttp2_version
+ rm -rf nghttp2-$installed_nghttp2_version.tar.xz
+ fi
+
+ installed_nghttp2_version=""
+ fi
+}
+
+install_nghttp3() {
+ if [ "$NGHTTP3_VERSION" -a ! -f nghttp3-$NGHTTP3_VERSION-done ] ; then
+ echo "Downloading, building, and installing nghttp3:"
+ [ -f nghttp3-$NGHTTP3_VERSION.tar.xz ] || curl -L -O https://github.com/ngtcp2/nghttp3/releases/download/v$NGHTTP3_VERSION/nghttp3-$NGHTTP3_VERSION.tar.xz || exit 1
+ $no_build && echo "Skipping installation" && return
+ xzcat nghttp3-$NGHTTP3_VERSION.tar.xz | tar xf - || exit 1
+ cd nghttp3-$NGHTTP3_VERSION
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --enable-lib-only || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch nghttp3-$NGHTTP3_VERSION-done
+ fi
+}
+
+uninstall_nghttp3() {
+ if [ ! -z "$installed_nghttp3_version" ] ; then
+ echo "Uninstalling nghttp3:"
+ cd nghttp3-$installed_nghttp3_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm nghttp3-$installed_nghttp3_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf nghttp3-$installed_nghttp3_version
+ rm -rf nghttp3-$installed_nghttp3_version.tar.xz
+ fi
+
+ installed_nghttp3_version=""
+ fi
+}
+
+install_libtiff() {
+ if [ "$LIBTIFF_VERSION" -a ! -f tiff-$LIBTIFF_VERSION-done ] ; then
+ echo "Downloading, building, and installing libtiff:"
+ [ -f tiff-$LIBTIFF_VERSION.tar.gz ] ||
+ curl --fail -L -O https://download.osgeo.org/libtiff/tiff-$LIBTIFF_VERSION.tar.gz ||
+ curl --fail -L -O https://download.osgeo.org/libtiff/old/tiff-$LIBTIFF_VERSION.tar.gz ||
+ exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat tiff-$LIBTIFF_VERSION.tar.gz | tar xf - || exit 1
+ cd tiff-$LIBTIFF_VERSION
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch tiff-$LIBTIFF_VERSION-done
+ fi
+}
+
+uninstall_libtiff() {
+ if [ ! -z "$installed_libtiff_version" ] ; then
+ echo "Uninstalling libtiff:"
+ cd tiff-$installed_libtiff_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm tiff-$installed_libtiff_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf tiff-$installed_libtiff_version
+ rm -rf tiff-$installed_libtiff_version.tar.gz
+ fi
+
+ installed_libtiff_version=""
+ fi
+}
+
+install_spandsp() {
+ if [ "$SPANDSP_VERSION" -a ! -f spandsp-$SPANDSP_VERSION-done ] ; then
+ echo "Downloading, building, and installing SpanDSP:"
+ [ -f spandsp-$SPANDSP_VERSION.tar.gz ] || curl -L -O https://www.soft-switch.org/downloads/spandsp/spandsp-$SPANDSP_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat spandsp-$SPANDSP_VERSION.tar.gz | tar xf - || exit 1
+ cd spandsp-$SPANDSP_VERSION
+ #
+ # Don't use -Wunused-but-set-variable, as it's not supported
+ # by all the gcc versions in the versions of Xcode that we
+ # support.
+ #
+ patch -p0 <${topdir}/macosx-support-lib-patches/spandsp-configure-patch || exit 1
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch spandsp-$SPANDSP_VERSION-done
+ fi
+}
+
+uninstall_spandsp() {
+ if [ ! -z "$installed_spandsp_version" ] ; then
+ echo "Uninstalling SpanDSP:"
+ cd spandsp-$installed_spandsp_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm spandsp-$installed_spandsp_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf spandsp-$installed_spandsp_version
+ rm -rf spandsp-$installed_spandsp_version.tar.gz
+ fi
+
+ installed_spandsp_version=""
+ fi
+}
+
+install_speexdsp() {
+ if [ "$SPEEXDSP_VERSION" -a ! -f speexdsp-$SPEEXDSP_VERSION-done ] ; then
+ echo "Downloading, building, and installing SpeexDSP:"
+ [ -f speexdsp-$SPEEXDSP_VERSION.tar.gz ] || curl -L -O https://ftp.osuosl.org/pub/xiph/releases/speex/speexdsp-$SPEEXDSP_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat speexdsp-$SPEEXDSP_VERSION.tar.gz | tar xf - || exit 1
+ cd speexdsp-$SPEEXDSP_VERSION
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch speexdsp-$SPEEXDSP_VERSION-done
+ fi
+}
+
+uninstall_speexdsp() {
+ if [ ! -z "$installed_speexdsp_version" ] ; then
+ echo "Uninstalling SpeexDSP:"
+ cd speexdsp-$installed_speexdsp_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm speexdsp-$installed_speexdsp_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf speexdsp-$installed_speexdsp_version
+ rm -rf speexdsp-$installed_speexdsp_version.tar.gz
+ fi
+
+ installed_speexdsp_version=""
+ fi
+}
+
+install_bcg729() {
+ if [ "$BCG729_VERSION" -a ! -f bcg729-$BCG729_VERSION-done ] ; then
+ echo "Downloading, building, and installing bcg729:"
+ [ -f bcg729-$BCG729_VERSION.tar.gz ] || curl -L -O https://gitlab.linphone.org/BC/public/bcg729/-/archive/$BCG729_VERSION/bcg729-$BCG729_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat bcg729-$BCG729_VERSION.tar.gz | tar xf - || exit 1
+ cd bcg729-$BCG729_VERSION
+ mkdir build_dir
+ cd build_dir
+ MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" $DO_CMAKE ../ || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ../..
+ touch bcg729-$BCG729_VERSION-done
+ fi
+}
+
+uninstall_bcg729() {
+ if [ ! -z "$installed_bcg729_version" ] ; then
+ echo "Uninstalling bcg729:"
+ cd bcg729-$installed_bcg729_version
+ #
+ # bcg729 uses cmake on macOS and doesn't support "make uninstall";
+ # just remove what we know it installs.
+ #
+ # $DO_MAKE_UNINSTALL || exit 1
+ $DO_RM -rf /usr/local/share/Bcg729 \
+ /usr/local/lib/libbcg729* \
+ /usr/local/include/bcg729 \
+ /usr/local/lib/pkgconfig/libbcg729* || exit 1
+ #
+ # bcg729 uses cmake on macOS and doesn't support "make distclean";
+ # just remove the enire build directory.
+ #
+ # make distclean || exit 1
+ rm -rf build_dir || exit 1
+ cd ..
+ rm bcg729-$installed_bcg729_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf bcg729-$installed_bcg729_version
+ rm -rf bcg729-$installed_bcg729_version.tar.gz
+ fi
+
+ installed_bcg729_version=""
+ fi
+}
+
+install_ilbc() {
+ if [ -n "$ILBC_VERSION" ] && [ ! -f ilbc-$ILBC_VERSION-done ] ; then
+ echo "Downloading, building, and installing iLBC:"
+ [ -f libilbc-$ILBC_VERSION.tar.bz ] || curl --location --remote-name https://github.com/TimothyGu/libilbc/releases/download/v$ILBC_VERSION/libilbc-$ILBC_VERSION.tar.bz2 || exit 1
+ $no_build && echo "Skipping installation" && return
+ bzcat libilbc-$ILBC_VERSION.tar.bz2 | tar xf - || exit 1
+ cd libilbc-$ILBC_VERSION || exit 1
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch ilbc-$ILBC_VERSION-done
+ fi
+}
+
+uninstall_ilbc() {
+ if [ -n "$installed_ilbc_version" ] ; then
+ echo "Uninstalling iLBC:"
+ cd "libilbc-$installed_ilbc_version" || exit 1
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm "ilbc-$installed_ilbc_version-done"
+
+ if [ "$#" -eq 1 ] && [ "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf "libilbc-$installed_ilbc_version"
+ rm -rf "libilbc-$installed_ilbc_version.tar.bz2"
+ fi
+
+ installed_ilbc_version=""
+ fi
+}
+
+install_opus() {
+ if [ "$OPUS_VERSION" -a ! -f opus-$OPUS_VERSION-done ] ; then
+ echo "Downloading, building, and installing opus:"
+ [ -f opus-$OPUS_VERSION.tar.gz ] || curl -L -O https://downloads.xiph.org/releases/opus/opus-$OPUS_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat opus-$OPUS_VERSION.tar.gz | tar xf - || exit 1
+ cd opus-$OPUS_VERSION
+ CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ..
+ touch opus-$OPUS_VERSION-done
+ fi
+}
+
+uninstall_opus() {
+ if [ ! -z "$installed_opus_version" ] ; then
+ echo "Uninstalling opus:"
+ cd opus-$installed_opus_version
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ..
+ rm opus-$installed_opus_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf opus-$installed_opus_version
+ rm -rf opus-$installed_opus_version.tar.gz
+ fi
+
+ installed_opus_version=""
+ fi
+}
+
+install_python3() {
+ # The macos11 installer can be deployed to older versions, down to
+ # 10.9 (Mavericks), but is still considered experimental so continue
+ # to use the 64-bit installer (10.9) on earlier releases for now.
+ local macver=x10.9
+ if [[ $DARWIN_MAJOR_VERSION -gt 19 ]]; then
+ # The macos11 installer is required for Arm-based Macs, which require
+ # macOS 11 Big Sur. Note that the package name is "11.0" (no x) for
+ # 3.9.1 but simply "11" for 3.9.2 (and later)
+ if [[ $PYTHON3_VERSION = 3.9.1 ]]; then
+ macver=11.0
+ else
+ macver=11
+ fi
+ fi
+ if [ "$PYTHON3_VERSION" -a ! -f python3-$PYTHON3_VERSION-done ] ; then
+ echo "Downloading and installing python3:"
+ [ -f python-$PYTHON3_VERSION-macos$macver.pkg ] || curl -L -O https://www.python.org/ftp/python/$PYTHON3_VERSION/python-$PYTHON3_VERSION-macos$macver.pkg || exit 1
+ $no_build && echo "Skipping installation" && return
+ sudo installer -target / -pkg python-$PYTHON3_VERSION-macos$macver.pkg || exit 1
+ touch python3-$PYTHON3_VERSION-done
+
+ #
+ # On macOS, the pip3 installed from Python packages appears to
+ # install scripts /Library/Frameworks/Python.framework/Versions/M.N/bin,
+ # where M.N is the major and minor version of Python (the dot-dot
+ # release is irrelevant).
+ #
+ # Strip off any dot-dot component in $PYTHON3_VERSION.
+ #
+ python_version=`echo $PYTHON3_VERSION | sed 's/\([1-9][0-9]*\.[1-9][0-9]*\).*/\1/'`
+ #
+ # Now treat Meson as being in the directory in question.
+ #
+ MESON="/Library/Frameworks/Python.framework/Versions/$python_version/bin/meson"
+ else
+ #
+ # We're using the Python 3 that's in /usr/bin, the pip3 for
+ # which installs scripts in /usr/local/bin, so, when we
+ # install Meson, look for it there.
+ #
+ MESON=/usr/local/bin/meson
+ fi
+}
+
+uninstall_python3() {
+ # Major version (e.g. "3.7")
+ local PYTHON_VERSION=${installed_python3_version%.*}
+ if [ ! -z "$installed_python3_version" ] ; then
+ echo "Uninstalling python3:"
+ frameworkdir="/Library/Frameworks/Python.framework/Versions/$PYTHON_VERSION"
+ sudo rm -rf "$frameworkdir"
+ sudo rm -rf "/Applications/Python $PYTHON_VERSION"
+ sudo find /usr/local/bin -maxdepth 1 -lname "*$frameworkdir/bin/*" -delete
+ # Remove three symlinks and empty directories. Removing directories
+ # might fail if for some reason multiple versions are installed.
+ sudo rm /Library/Frameworks/Python.framework/Headers
+ sudo rm /Library/Frameworks/Python.framework/Python
+ sudo rm /Library/Frameworks/Python.framework/Resources
+ sudo rmdir /Library/Frameworks/Python.framework/Versions
+ sudo rmdir /Library/Frameworks/Python.framework
+ sudo pkgutil --forget org.python.Python.PythonApplications-$PYTHON_VERSION
+ sudo pkgutil --forget org.python.Python.PythonDocumentation-$PYTHON_VERSION
+ sudo pkgutil --forget org.python.Python.PythonFramework-$PYTHON_VERSION
+ sudo pkgutil --forget org.python.Python.PythonUnixTools-$PYTHON_VERSION
+ rm python3-$installed_python3_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -f python-$installed_python3_version-macos11.pkg
+ rm -f python-$installed_python3_version-macos11.0.pkg
+ rm -f python-$installed_python3_version-macosx10.9.pkg
+ rm -f python-$installed_python3_version-macosx10.6.pkg
+ fi
+
+ installed_python3_version=""
+ fi
+}
+
+install_brotli() {
+ if [ "$BROTLI_VERSION" -a ! -f brotli-$BROTLI_VERSION-done ] ; then
+ echo "Downloading, building, and installing brotli:"
+ [ -f brotli-$BROTLI_VERSION.tar.gz ] || curl -L -o brotli-$BROTLI_VERSION.tar.gz https://github.com/google/brotli/archive/v$BROTLI_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat brotli-$BROTLI_VERSION.tar.gz | tar xf - || exit 1
+ cd brotli-$BROTLI_VERSION
+ mkdir build_dir
+ cd build_dir
+ MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" $DO_CMAKE ../ || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ../..
+ touch brotli-$BROTLI_VERSION-done
+ fi
+}
+
+uninstall_brotli() {
+ if [ ! -z "$installed_brotli_version" ] ; then
+ echo "Uninstalling brotli:"
+ cd brotli-$installed_brotli_version
+ #
+ # brotli uses cmake on macOS and doesn't support "make uninstall";
+ # just remove what we know it installs.
+ #
+ # $DO_MAKE_UNINSTALL || exit 1
+ $DO_RM -rf /usr/local/bin/brotli \
+ /usr/local/lib/libbrotli* \
+ /usr/local/include/brotli \
+ /usr/local/lib/pkgconfig/libbrotli* || exit 1
+ #
+ # brotli uses cmake on macOS and doesn't support "make distclean";
+ # just remove the enire build directory.
+ #
+ # make distclean || exit 1
+ rm -rf build_dir || exit 1
+ cd ..
+ rm brotli-$installed_brotli_version-done
+
+ if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf brotli-$installed_brotli_version
+ rm -rf brotli-$installed_brotli_version.tar.gz
+ fi
+
+ installed_brotli_version=""
+ fi
+}
+
+install_minizip() {
+ if [ "$ZLIB_VERSION" ] && [ ! -f minizip-$ZLIB_VERSION-done ] ; then
+ echo "Downloading, building, and installing zlib for minizip:"
+ [ -f zlib-$ZLIB_VERSION.tar.gz ] || curl -L -o zlib-$ZLIB_VERSION.tar.gz https://zlib.net/zlib-$ZLIB_VERSION.tar.gz || exit 1
+ $no_build && echo "Skipping installation" && return
+ gzcat zlib-$ZLIB_VERSION.tar.gz | tar xf - || exit 1
+ #
+ # minizip ships both with a minimal Makefile that doesn't
+ # support "make install", "make uninstall", or "make distclean",
+ # and with a Makefile.am file that, if we do an autoreconf,
+ # gives us a configure script, and a Makefile.in that, if we run
+ # the configure script, gives us a Makefile that supports ll of
+ # those targets, and that installs a pkg-config .pc file for
+ # minizip.
+ #
+ # So that's what we do.
+ #
+ cd zlib-$ZLIB_VERSION/contrib/minizip || exit 1
+ LIBTOOLIZE=glibtoolize autoreconf --force --install
+ CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
+ make $MAKE_BUILD_OPTS || exit 1
+ $DO_MAKE_INSTALL || exit 1
+ cd ../../..
+ touch minizip-$ZLIB_VERSION-done
+ fi
+}
+
+uninstall_minizip() {
+ if [ -n "$installed_minizip_version" ] ; then
+ echo "Uninstalling minizip:"
+ cd zlib-$installed_minizip_version/contrib/minizip
+ $DO_MAKE_UNINSTALL || exit 1
+ make distclean || exit 1
+ cd ../../..
+
+ rm minizip-$installed_minizip_version-done
+
+ if [ "$#" -eq 1 ] && [ "$1" = "-r" ] ; then
+ #
+ # Get rid of the previously downloaded and unpacked version.
+ #
+ rm -rf zlib-$installed_minizip_version
+ rm -rf zlib-$installed_minizip_version.tar.gz
+ fi
+
+ installed_minizip_version=""
+ fi
+}
+
+install_sparkle() {
+ if [ "$SPARKLE_VERSION" ] && [ ! -f sparkle-$SPARKLE_VERSION-done ] ; then
+ echo "Downloading and installing Sparkle:"
+ #
+ # Download the tarball and unpack it in /usr/local/Sparkle-x.y.z
+ #
+ [ -f Sparkle-$SPARKLE_VERSION.tar.xz ] || curl -L -o Sparkle-$SPARKLE_VERSION.tar.xz https://github.com/sparkle-project/Sparkle/releases/download/$SPARKLE_VERSION/Sparkle-$SPARKLE_VERSION.tar.xz || exit 1
+ $no_build && echo "Skipping installation" && return
+ test -d "/usr/local/Sparkle-$SPARKLE_VERSION" || sudo mkdir "/usr/local/Sparkle-$SPARKLE_VERSION"
+ sudo tar -C "/usr/local/Sparkle-$SPARKLE_VERSION" -xpof Sparkle-$SPARKLE_VERSION.tar.xz
+ touch sparkle-$SPARKLE_VERSION-done
+ fi
+}
+
+uninstall_sparkle() {
+ if [ -n "$installed_sparkle_version" ]; then
+ echo "Uninstalling Sparkle:"
+ sudo rm -rf "/usr/local/Sparkle-$installed_sparkle_version"
+ if [ "$#" -eq 1 ] && [ "$1" = "-r" ] ; then
+ rm -f "Sparkle-$installed_sparkle_version.tar.xz"
+ fi
+
+ installed_sparkle_version=""
+ fi
+}
+
+install_all() {
+ #
+ # Check whether the versions we have installed are the versions
+ # requested; if not, uninstall the installed versions.
+ #
+ if [ ! -z "$installed_brotli_version" -a \
+ "$installed_brotli_version" != "$BROTLI_VERSION" ] ; then
+ echo "Installed brotli version is $installed_brotli_version"
+ if [ -z "$BROTLI_VERSION" ] ; then
+ echo "brotli is not requested"
+ else
+ echo "Requested brotli version is $BROTLI_VERSION"
+ fi
+ uninstall_brotli -r
+ fi
+
+ if [ ! -z "$installed_python3_version" -a \
+ "$installed_python3_version" != "$PYTHON3_VERSION" ] ; then
+ echo "Installed python3 version is $installed_python3_version"
+ if [ -z "$PYTHON3_VERSION" ] ; then
+ echo "python3 is not requested"
+ else
+ echo "Requested python3 version is $PYTHON3_VERSION"
+ fi
+ uninstall_python3 -r
+ fi
+
+ if [ ! -z "$installed_bcg729_version" -a \
+ "$installed_bcg729_version" != "$BCG729_VERSION" ] ; then
+ echo "Installed bcg729 version is $installed_bcg729_version"
+ if [ -z "$BCG729_VERSION" ] ; then
+ echo "bcg729 is not requested"
+ else
+ echo "Requested bcg729 version is $BCG729_VERSION"
+ fi
+ uninstall_bcg729 -r
+ fi
+
+ if [ -n "$installed_ilbc_version" ] \
+ && [ "$installed_ilbc_version" != "$ILBC_VERSION" ] ; then
+ echo "Installed iLBC version is $installed_ilbc_version"
+ if [ -z "$ILBC_VERSION" ] ; then
+ echo "iLBC is not requested"
+ else
+ echo "Requested iLBC version is $ILBC_VERSION"
+ fi
+ uninstall_ilbc -r
+ fi
+
+ if [ -n "$installed_opus_version" ] \
+ && [ "$installed_opus_version" != "$OPUS_VERSION" ] ; then
+ echo "Installed opus version is $installed_opus_version"
+ if [ -z "$OPUS_VERSION" ] ; then
+ echo "opus is not requested"
+ else
+ echo "Requested opus version is $OPUS_VERSION"
+ fi
+ uninstall_opus -r
+ fi
+
+ if [ ! -z "$installed_spandsp_version" -a \
+ "$installed_spandsp_version" != "$SPANDSP_VERSION" ] ; then
+ echo "Installed SpanDSP version is $installed_spandsp_version"
+ if [ -z "$SPANDSP_VERSION" ] ; then
+ echo "spandsp is not requested"
+ else
+ echo "Requested SpanDSP version is $SPANDSP_VERSION"
+ fi
+ uninstall_spandsp -r
+ fi
+
+ if [ ! -z "$installed_speexdsp_version" -a \
+ "$installed_speexdsp_version" != "$SPEEXDSP_VERSION" ] ; then
+ echo "Installed SpeexDSP version is $installed_speexdsp_version"
+ if [ -z "$SPEEXDSP_VERSION" ] ; then
+ echo "speexdsp is not requested"
+ else
+ echo "Requested SpeexDSP version is $SPEEXDSP_VERSION"
+ fi
+ uninstall_speexdsp -r
+ fi
+
+ if [ ! -z "$installed_libtiff_version" -a \
+ "$installed_libtiff_version" != "$LIBTIFF_VERSION" ] ; then
+ echo "Installed libtiff version is $installed_libtiff_version"
+ if [ -z "$LIBTIFF_VERSION" ] ; then
+ echo "libtiff is not requested"
+ else
+ echo "Requested libtiff version is $LIBTIFF_VERSION"
+ fi
+ uninstall_libtiff -r
+ fi
+
+ if [ ! -z "$installed_nghttp2_version" -a \
+ "$installed_nghttp2_version" != "$NGHTTP2_VERSION" ] ; then
+ echo "Installed nghttp2 version is $installed_nghttp2_version"
+ if [ -z "$NGHTTP2_VERSION" ] ; then
+ echo "nghttp2 is not requested"
+ else
+ echo "Requested nghttp2 version is $NGHTTP2_VERSION"
+ fi
+ uninstall_nghttp2 -r
+ fi
+
+ if [ ! -z "$installed_nghttp3_version" -a \
+ "$installed_nghttp3_version" != "$NGHTTP3_VERSION" ] ; then
+ echo "Installed nghttp3 version is $installed_nghttp3_version"
+ if [ -z "$NGHTTP3_VERSION" ] ; then
+ echo "nghttp3 is not requested"
+ else
+ echo "Requested nghttp3 version is $NGHTTP3_VERSION"
+ fi
+ uninstall_nghttp3 -r
+ fi
+
+ if [ ! -z "$installed_libssh_version" -a \
+ "$installed_libssh_version" != "$LIBSSH_VERSION" ] ; then
+ echo "Installed libssh version is $installed_libssh_version"
+ if [ -z "$LIBSSH_VERSION" ] ; then
+ echo "libssh is not requested"
+ else
+ echo "Requested libssh version is $LIBSSH_VERSION"
+ fi
+ uninstall_libssh -r
+ fi
+
+ if [ ! -z "$installed_cares_version" -a \
+ "$installed_cares_version" != "$CARES_VERSION" ] ; then
+ echo "Installed C-Ares version is $installed_cares_version"
+ if [ -z "$CARES_VERSION" ] ; then
+ echo "C-Ares is not requested"
+ else
+ echo "Requested C-Ares version is $CARES_VERSION"
+ fi
+ uninstall_c_ares -r
+ fi
+
+ if [ ! -z "$installed_maxminddb_version" -a \
+ "$installed_maxminddb_version" != "$MAXMINDDB_VERSION" ] ; then
+ echo "Installed MaxMindDB API version is $installed_maxminddb_version"
+ if [ -z "$MAXMINDDB_VERSION" ] ; then
+ echo "MaxMindDB is not requested"
+ else
+ echo "Requested MaxMindDB version is $MAXMINDDB_VERSION"
+ fi
+ uninstall_maxminddb -r
+ fi
+
+ if [ ! -z "$installed_sbc_version" -a \
+ "$installed_sbc_version" != "$SBC_VERSION" ] ; then
+ echo "Installed SBC version is $installed_sbc_version"
+ if [ -z "$SBC_VERSION" ] ; then
+ echo "SBC is not requested"
+ else
+ echo "Requested SBC version is $SBC_VERSION"
+ fi
+ uninstall_sbc -r
+ fi
+
+ if [ ! -z "$installed_lz4_version" -a \
+ "$installed_lz4_version" != "$LZ4_VERSION" ] ; then
+ echo "Installed LZ4 version is $installed_lz4_version"
+ if [ -z "$LZ4_VERSION" ] ; then
+ echo "LZ4 is not requested"
+ else
+ echo "Requested LZ4 version is $LZ4_VERSION"
+ fi
+ uninstall_lz4 -r
+ fi
+
+ if [ ! -z "$installed_libxml2_version" -a \
+ "$installed_libxml2_version" != "$LIBXML2_VERSION" ] ; then
+ echo "Installed libxml2 version is $installed_libxml2_version"
+ if [ -z "$LIBXML2_VERSION" ] ; then
+ echo "libxml2 is not requested"
+ else
+ echo "Requested libxml2 version is $LIBXML2_VERSION"
+ fi
+ uninstall_libxml2 -r
+ fi
+
+ if [ ! -z "$installed_snappy_version" -a \
+ "$installed_snappy_version" != "$SNAPPY_VERSION" ] ; then
+ echo "Installed SNAPPY version is $installed_snappy_version"
+ if [ -z "$SNAPPY_VERSION" ] ; then
+ echo "SNAPPY is not requested"
+ else
+ echo "Requested SNAPPY version is $SNAPPY_VERSION"
+ fi
+ uninstall_snappy -r
+ fi
+
+ if [ ! -z "$installed_lua_version" -a \
+ "$installed_lua_version" != "$LUA_VERSION" ] ; then
+ echo "Installed Lua version is $installed_lua_version"
+ if [ -z "$LUA_VERSION" ] ; then
+ echo "Lua is not requested"
+ else
+ echo "Requested Lua version is $LUA_VERSION"
+ fi
+ uninstall_lua -r
+ fi
+
+ if [ ! -z "$installed_gnutls_version" -a \
+ "$installed_gnutls_version" != "$GNUTLS_VERSION" ] ; then
+ echo "Installed GnuTLS version is $installed_gnutls_version"
+ if [ -z "$GNUTLS_VERSION" ] ; then
+ echo "GnuTLS is not requested"
+ else
+ echo "Requested GnuTLS version is $GNUTLS_VERSION"
+ fi
+ uninstall_gnutls -r
+ fi
+
+ if [ ! -z "$installed_nettle_version" -a \
+ "$installed_nettle_version" != "$NETTLE_VERSION" ] ; then
+ echo "Installed Nettle version is $installed_nettle_version"
+ if [ -z "$NETTLE_VERSION" ] ; then
+ echo "Nettle is not requested"
+ else
+ echo "Requested Nettle version is $NETTLE_VERSION"
+ fi
+ uninstall_nettle -r
+ fi
+
+ if [ ! -z "$installed_gmp_version" -a \
+ "$installed_gmp_version" != "$GMP_VERSION" ] ; then
+ echo "Installed GMP version is $installed_gmp_version"
+ if [ -z "$GMP_VERSION" ] ; then
+ echo "GMP is not requested"
+ else
+ echo "Requested GMP version is $GMP_VERSION"
+ fi
+ uninstall_gmp -r
+ fi
+
+ if [ ! -z "$installed_p11_kit_version" -a \
+ "$installed_p11_kit_version" != "$P11KIT_VERSION" ] ; then
+ echo "Installed p11-kit version is $installed_p11_kit_version"
+ if [ -z "$P11KIT_VERSION" ] ; then
+ echo "p11-kit is not requested"
+ else
+ echo "Requested p11-kit version is $P11KIT_VERSION"
+ fi
+ uninstall_p11_kit -r
+ fi
+
+ if [ ! -z "$installed_libtasn1_version" -a \
+ "$installed_libtasn1_version" != "$LIBTASN1_VERSION" ] ; then
+ echo "Installed libtasn1 version is $installed_libtasn1_version"
+ if [ -z "$LIBTASN1_VERSION" ] ; then
+ echo "libtasn1 is not requested"
+ else
+ echo "Requested libtasn1 version is $LIBTASN1_VERSION"
+ fi
+ uninstall_libtasn1 -r
+ fi
+
+ if [ ! -z "$installed_libgcrypt_version" -a \
+ "$installed_libgcrypt_version" != "$LIBGCRYPT_VERSION" ] ; then
+ echo "Installed libgcrypt version is $installed_libgcrypt_version"
+ if [ -z "$LIBGCRYPT_VERSION" ] ; then
+ echo "libgcrypt is not requested"
+ else
+ echo "Requested libgcrypt version is $LIBGCRYPT_VERSION"
+ fi
+ uninstall_libgcrypt -r
+ fi
+
+ if [ ! -z "$installed_libgpg_error_version" -a \
+ "$installed_libgpg_error_version" != "$LIBGPG_ERROR_VERSION" ] ; then
+ echo "Installed libgpg-error version is $installed_libgpg_error_version"
+ if [ -z "$LIBGPG_ERROR_VERSION" ] ; then
+ echo "libgpg-error is not requested"
+ else
+ echo "Requested libgpg-error version is $LIBGPG_ERROR_VERSION"
+ fi
+ uninstall_libgpg_error -r
+ fi
+
+ if [ ! -z "$installed_libsmi_version" -a \
+ "$installed_libsmi_version" != "$LIBSMI_VERSION" ] ; then
+ echo "Installed libsmi version is $installed_libsmi_version"
+ if [ -z "$LIBSMI_VERSION" ] ; then
+ echo "libsmi is not requested"
+ else
+ echo "Requested libsmi version is $LIBSMI_VERSION"
+ fi
+ uninstall_libsmi -r
+ fi
+
+ if [ ! -z "$installed_qt_version" -a \
+ "$installed_qt_version" != "$QT_VERSION" ] ; then
+ echo "Installed Qt version is $installed_qt_version"
+ if [ -z "$QT_VERSION" ] ; then
+ echo "Qt is not requested"
+ else
+ echo "Requested Qt version is $QT_VERSION"
+ fi
+ uninstall_qt -r
+ fi
+
+ if [ ! -z "$installed_glib_version" -a \
+ "$installed_glib_version" != "$GLIB_VERSION" ] ; then
+ echo "Installed GLib version is $installed_glib_version"
+ if [ -z "$GLIB_VERSION" ] ; then
+ echo "GLib is not requested"
+ else
+ echo "Requested GLib version is $GLIB_VERSION"
+ fi
+ uninstall_glib -r
+ fi
+
+ if [ ! -z "$installed_pkg_config_version" -a \
+ "$installed_pkg_config_version" != "$PKG_CONFIG_VERSION" ] ; then
+ echo "Installed pkg-config version is $installed_pkg_config_version"
+ if [ -z "$PKG_CONFIG_VERSION" ] ; then
+ echo "pkg-config is not requested"
+ else
+ echo "Requested pkg-config version is $PKG_CONFIG_VERSION"
+ fi
+ uninstall_pkg_config -r
+ fi
+
+ if [ ! -z "$installed_gettext_version" -a \
+ "$installed_gettext_version" != "$GETTEXT_VERSION" ] ; then
+ echo "Installed GNU gettext version is $installed_gettext_version"
+ if [ -z "$GETTEXT_VERSION" ] ; then
+ echo "GNU gettext is not requested"
+ else
+ echo "Requested GNU gettext version is $GETTEXT_VERSION"
+ fi
+ uninstall_gettext -r
+ fi
+
+ if [ ! -z "$installed_ninja_version" -a \
+ "$installed_ninja_version" != "$NINJA_VERSION" ] ; then
+ echo "Installed Ninja version is $installed_ninja_version"
+ if [ -z "$NINJA_VERSION" ] ; then
+ echo "Ninja is not requested"
+ else
+ echo "Requested Ninja version is $NINJA_VERSION"
+ fi
+ uninstall_ninja -r
+ fi
+
+ if [ ! -z "$installed_asciidoctorpdf_version" -a \
+ "$installed_asciidoctorpdf_version" != "$ASCIIDOCTORPDF_VERSION" ] ; then
+ echo "Installed Asciidoctor-pdf version is $installed_asciidoctorpdf_version"
+ if [ -z "$ASCIIDOCTORPDF_VERSION" ] ; then
+ echo "Asciidoctor-pdf is not requested"
+ else
+ echo "Requested Asciidoctor-pdf version is $ASCIIDOCTORPDF_VERSION"
+ fi
+ # XXX - really remove this?
+ # Or should we remember it as installed only if this script
+ # installed it?
+ #
+ uninstall_asciidoctorpdf -r
+ fi
+
+ if [ ! -z "$installed_asciidoctor_version" -a \
+ "$installed_asciidoctor_version" != "$ASCIIDOCTOR_VERSION" ] ; then
+ echo "Installed Asciidoctor version is $installed_asciidoctor_version"
+ if [ -z "$ASCIIDOCTOR_VERSION" ] ; then
+ echo "Asciidoctor is not requested"
+ else
+ echo "Requested Asciidoctor version is $ASCIIDOCTOR_VERSION"
+ fi
+ # XXX - really remove this?
+ # Or should we remember it as installed only if this script
+ # installed it?
+ #
+ uninstall_asciidoctor -r
+ fi
+
+ if [ ! -z "$installed_cmake_version" -a \
+ "$installed_cmake_version" != "$CMAKE_VERSION" ] ; then
+ echo "Installed CMake version is $installed_cmake_version"
+ if [ -z "$CMAKE_VERSION" ] ; then
+ echo "CMake is not requested"
+ else
+ echo "Requested CMake version is $CMAKE_VERSION"
+ fi
+ uninstall_cmake -r
+ fi
+
+ if [ ! -z "$installed_libtool_version" -a \
+ "$installed_libtool_version" != "$LIBTOOL_VERSION" ] ; then
+ echo "Installed GNU libtool version is $installed_libtool_version"
+ if [ -z "$LIBTOOL_VERSION" ] ; then
+ echo "GNU libtool is not requested"
+ else
+ echo "Requested GNU libtool version is $LIBTOOL_VERSION"
+ fi
+ uninstall_libtool -r
+ fi
+
+ if [ ! -z "$installed_automake_version" -a \
+ "$installed_automake_version" != "$AUTOMAKE_VERSION" ] ; then
+ echo "Installed GNU automake version is $installed_automake_version"
+ if [ -z "$AUTOMAKE_VERSION" ] ; then
+ echo "GNU automake is not requested"
+ else
+ echo "Requested GNU automake version is $AUTOMAKE_VERSION"
+ fi
+ uninstall_automake -r
+ fi
+
+ if [ ! -z "$installed_autoconf_version" -a \
+ "$installed_autoconf_version" != "$AUTOCONF_VERSION" ] ; then
+ echo "Installed GNU autoconf version is $installed_autoconf_version"
+ if [ -z "$AUTOCONF_VERSION" ] ; then
+ echo "GNU autoconf is not requested"
+ else
+ echo "Requested GNU autoconf version is $AUTOCONF_VERSION"
+ fi
+ uninstall_autoconf -r
+ fi
+
+ if [ ! -z "$installed_pcre_version" -a \
+ "$installed_pcre_version" != "$PCRE_VERSION" ] ; then
+ echo "Installed pcre version is $installed_pcre_version"
+ if [ -z "$PCRE_VERSION" ] ; then
+ echo "pcre is not requested"
+ else
+ echo "Requested pcre version is $PCRE_VERSION"
+ fi
+ uninstall_pcre -r
+ fi
+
+ if [ -n "$installed_pcre2_version" -a \
+ "$installed_pcre2_version" != "$PCRE2_VERSION" ] ; then
+ echo "Installed pcre2 version is $installed_pcre2_version"
+ if [ -z "$PCRE2_VERSION" ] ; then
+ echo "pcre2 is not requested"
+ else
+ echo "Requested pcre2 version is $PCRE2_VERSION"
+ fi
+ uninstall_pcre2 -r
+ fi
+
+ if [ ! -z "$installed_lzip_version" -a \
+ "$installed_lzip_version" != "$LZIP_VERSION" ] ; then
+ echo "Installed lzip version is $installed_lzip_version"
+ if [ -z "$LZIP_VERSION" ] ; then
+ echo "lzip is not requested"
+ else
+ echo "Requested lzip version is $LZIP_VERSION"
+ fi
+ uninstall_lzip -r
+ fi
+
+ if [ ! -z "$installed_xz_version" -a \
+ "$installed_xz_version" != "$XZ_VERSION" ] ; then
+ echo "Installed xz version is $installed_xz_version"
+ if [ -z "$XZ_VERSION" ] ; then
+ echo "xz is not requested"
+ else
+ echo "Requested xz version is $XZ_VERSION"
+ fi
+ uninstall_xz -r
+ fi
+
+ if [ ! -z "$installed_curl_version" -a \
+ "$installed_curl_version" != "$CURL_VERSION" ] ; then
+ echo "Installed curl version is $installed_curl_version"
+ if [ -z "$CURL_VERSION" ] ; then
+ echo "curl is not requested"
+ else
+ echo "Requested curl version is $CURL_VERSION"
+ fi
+ uninstall_curl -r
+ fi
+
+ if [ ! -z "$installed_minizip_version" -a \
+ "$installed_minizip_version" != "$ZLIB_VERSION" ] ; then
+ echo "Installed minizip (zlib) version is $installed_minizip_version"
+ if [ -z "$ZLIB_VERSION" ] ; then
+ echo "minizip is not requested"
+ else
+ echo "Requested minizip (zlib) version is $ZLIB_VERSION"
+ fi
+ uninstall_minizip -r
+ fi
+
+ if [ ! -z "$installed_sparkle_version" -a \
+ "$installed_sparkle_version" != "$SPARKLE_VERSION" ] ; then
+ echo "Installed Sparkle version is $installed_sparkle_version"
+ if [ -z "$SPARKLE_VERSION" ] ; then
+ echo "Sparkle is not requested"
+ else
+ echo "Requested Sparkle version is $SPARKLE_VERSION"
+ fi
+ uninstall_sparkle -r
+ fi
+
+ #
+ # Start with curl: we may need it to download and install xz.
+ #
+ install_curl
+
+ #
+ # Now intall xz: it is the sole download format of glib later than 2.31.2.
+ #
+ install_xz
+
+ install_lzip
+
+ install_pcre
+
+ install_autoconf
+
+ install_automake
+
+ install_libtool
+
+ install_cmake
+
+ install_pcre2
+
+ #
+ # Install Python 3 now; not only is it needed for the Wireshark
+ # build process, it's also needed for the Meson build system,
+ # which newer versions of GLib use as their build system.
+ #
+ install_python3
+
+ #
+ # Now install Meson and pytest.
+ #
+ install_meson
+
+ install_pytest
+
+ install_ninja
+
+ install_asciidoctor
+
+ install_asciidoctorpdf
+
+ #
+ # Start with GNU gettext; GLib requires it, and macOS doesn't have it
+ # or a BSD-licensed replacement.
+ #
+ # At least on Lion with Xcode 4, _FORTIFY_SOURCE gets defined as 2
+ # by default, which causes, for example, stpncpy to be defined as
+ # a hairy macro that collides with the GNU gettext configure script's
+ # attempts to workaround AIX's lack of a declaration for stpncpy,
+ # with the result being a huge train wreck. Define _FORTIFY_SOURCE
+ # as 0 in an attempt to keep the trains on separate tracks.
+ #
+ install_gettext
+
+ #
+ # GLib depends on pkg-config.
+ # By default, pkg-config depends on GLib; we break the dependency cycle
+ # by configuring pkg-config to use its own internal version of GLib.
+ #
+ install_pkg_config
+
+ install_glib
+
+ #
+ # Now we have reached a point where we can build everything but
+ # the GUI (Wireshark).
+ #
+ install_qt
+
+ #
+ # Now we have reached a point where we can build everything including
+ # the GUI (Wireshark), but not with any optional features such as
+ # SNMP OID resolution, some forms of decryption, Lua scripting, playback
+ # of audio, or MaxMindDB mapping of IP addresses.
+ #
+ # We now conditionally download optional libraries to support them;
+ # the default is to download them all.
+ #
+
+ install_libsmi
+
+ install_libgpg_error
+
+ install_libgcrypt
+
+ install_gmp
+
+ install_libtasn1
+
+ install_p11_kit
+
+ install_nettle
+
+ install_gnutls
+
+ install_lua
+
+ install_snappy
+
+ install_zstd
+
+ install_libxml2
+
+ install_lz4
+
+ install_sbc
+
+ install_maxminddb
+
+ install_c_ares
+
+ install_libssh
+
+ install_nghttp2
+
+ install_nghttp3
+
+ install_libtiff
+
+ install_spandsp
+
+ install_speexdsp
+
+ install_bcg729
+
+ install_ilbc
+
+ install_opus
+
+ install_brotli
+
+ install_minizip
+
+ install_sparkle
+}
+
+uninstall_all() {
+ if [ -d "${MACOSX_SUPPORT_LIBS}" ]
+ then
+ cd "${MACOSX_SUPPORT_LIBS}"
+
+ #
+ # Uninstall items in the reverse order from the order in which they're
+ # installed. Only uninstall if the download/build/install process
+ # completed; uninstall the version that appears in the name of
+ # the -done file.
+ #
+ # We also do a "make distclean", so that we don't have leftovers from
+ # old configurations.
+ #
+ uninstall_sparkle
+
+ uninstall_minizip
+
+ uninstall_brotli
+
+ uninstall_opus
+
+ uninstall_ilbc
+
+ uninstall_bcg729
+
+ uninstall_speexdsp
+
+ uninstall_spandsp
+
+ uninstall_libtiff
+
+ uninstall_nghttp2
+
+ uninstall_nghttp3
+
+ uninstall_libssh
+
+ uninstall_c_ares
+
+ uninstall_maxminddb
+
+ uninstall_snappy
+
+ uninstall_zstd
+
+ uninstall_libxml2
+
+ uninstall_lz4
+
+ uninstall_sbc
+
+ uninstall_lua
+
+ uninstall_gnutls
+
+ uninstall_nettle
+
+ uninstall_p11_kit
+
+ uninstall_libtasn1
+
+ uninstall_gmp
+
+ uninstall_libgcrypt
+
+ uninstall_libgpg_error
+
+ uninstall_libsmi
+
+ uninstall_qt
+
+ uninstall_glib
+
+ uninstall_pkg_config
+
+ uninstall_gettext
+
+ uninstall_ninja
+
+ #
+ # XXX - really remove this?
+ # Or should we remember it as installed only if this script
+ # installed it?
+ #
+ uninstall_asciidoctorpdf
+
+ uninstall_asciidoctor
+
+ uninstall_pytest
+
+ uninstall_meson
+
+ uninstall_python3
+
+ uninstall_cmake
+
+ uninstall_libtool
+
+ uninstall_automake
+
+ uninstall_autoconf
+
+ uninstall_pcre
+
+ uninstall_lzip
+
+ uninstall_xz
+
+ uninstall_curl
+ fi
+}
+
+#
+# Do we have permission to write in /usr/local?
+#
+# If so, assume we have permission to write in its subdirectories.
+# (If that's not the case, this test needs to check the subdirectories
+# as well.)
+#
+# If not, do "make install", "make uninstall", "ninja install",
+# "ninja uninstall", the removes for dependencies that don't support
+# "make uninstall" or "ninja uninstall", the renames of [g]libtool*,
+# and the writing of a libffi .pc file with sudo.
+#
+if [ -w /usr/local ]
+then
+ DO_MAKE_INSTALL="make install"
+ DO_MAKE_UNINSTALL="make uninstall"
+ DO_NINJA_INSTALL="ninja -C _build install"
+ DO_NINJA_UNINSTALL="ninja -C _build uninstall"
+ DO_TEE_TO_PC_FILE="tee"
+ DO_RM="rm"
+ DO_MV="mv"
+else
+ DO_MAKE_INSTALL="sudo make install"
+ DO_MAKE_UNINSTALL="sudo make uninstall"
+ DO_NINJA_INSTALL="sudo ninja -C _build install"
+ DO_NINJA_UNINSTALL="sudo ninja -C _build uninstall"
+ DO_TEE_TO_PC_FILE="sudo tee"
+ DO_RM="sudo rm"
+ DO_MV="sudo mv"
+fi
+
+#
+# When building with CMake, don't build libraries with an install path
+# that begins with @rpath because that will cause binaries linked with it
+# to use that path as the library to look for, and that will cause the
+# run-time linker, at least on macOS 14 and later, not to find the library
+# in /usr/local/lib unless you explicitly set DYLD_LIBRARY_PATH to include
+# /usr/local/lib. That means that you get "didn't find libpcre" errors if
+# you try to run binaries from a build unless you set DYLD_LIBRARYPATH to
+# include /usr/local/lib.
+#
+# However, setting CMAKE_MACOSX_RPATH to OFF causes the installed
+# library just to have the file name of the library as its install
+# name. It needs to be the full installed path of the library in
+# order to make running binaries from the build directory work, so
+# we set CMAKE_INSTALL_NAME_DIR to /usr/local/lib.
+#
+# packaging/macosx/osx-app.sh will convert *all* libraries in
+# the app bundle to have an @rpath install name, so this won't
+# break anything there; it just fixes the ability to run from the
+# build directory.
+#
+DO_CMAKE="cmake -DCMAKE_MACOSX_RPATH=OFF -DCMAKE_INSTALL_NAME_DIR=/usr/local/lib"
+
+# This script is meant to be run in the source root. The following
+# code will attempt to get you there, but is not perfect (particulary
+# if someone copies the script).
+
+topdir=`pwd`/`dirname $0`/..
+cd $topdir
+
+# Preference of the support libraries directory:
+# ${MACOSX_SUPPORT_LIBS}
+# ../macosx-support-libs
+# ./macosx-support-libs (default if none exists)
+if [ ! -d "${MACOSX_SUPPORT_LIBS}" ]; then
+ unset MACOSX_SUPPORT_LIBS
+fi
+if [ -d ../macosx-support-libs ]; then
+ MACOSX_SUPPORT_LIBS=${MACOSX_SUPPORT_LIBS-../macosx-support-libs}
+else
+ MACOSX_SUPPORT_LIBS=${MACOSX_SUPPORT_LIBS-./macosx-support-libs}
+fi
+
+#
+# If we have SDKs available, the default target OS is the major version
+# of the one we're running; get that and strip off the third component
+# if present.
+#
+for i in /Developer/SDKs \
+ /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs \
+ /Library/Developer/CommandLineTools/SDKs
+do
+ if [ -d "$i" ]
+ then
+ min_osx_target=`sw_vers -productVersion | sed 's/\([0-9]*\)\.\([0-9]*\)\.[0-9]*/\1.\2/'`
+ break
+ fi
+done
+
+#
+# Parse command-line flags:
+#
+# -h - print help.
+# -t <target> - build libraries so that they'll work on the specified
+# version of macOS and later versions.
+# -u - do an uninstall.
+# -n - download all packages, but don't build or install.
+#
+
+no_build=false
+
+while getopts ht:un name
+do
+ case $name in
+ u)
+ do_uninstall=yes
+ ;;
+ n)
+ no_build=true
+ ;;
+ t)
+ min_osx_target="$OPTARG"
+ ;;
+ h|?)
+ echo "Usage: macos-setup.sh [ -t <target> ] [ -u ] [ -n ]" 1>&1
+ exit 0
+ ;;
+ esac
+done
+
+#
+# Get the version numbers of installed packages, if any.
+#
+if [ -d "${MACOSX_SUPPORT_LIBS}" ]
+then
+ cd "${MACOSX_SUPPORT_LIBS}"
+
+ installed_xz_version=`ls xz-*-done 2>/dev/null | sed 's/xz-\(.*\)-done/\1/'`
+ installed_lzip_version=`ls lzip-*-done 2>/dev/null | sed 's/lzip-\(.*\)-done/\1/'`
+ installed_pcre_version=`ls pcre-*-done 2>/dev/null | sed 's/pcre-\(.*\)-done/\1/'`
+ installed_pcre2_version=$(ls pcre2-*-done 2>/dev/null | sed 's/pcre2-\(.*\)-done/\1/')
+ installed_autoconf_version=`ls autoconf-*-done 2>/dev/null | sed 's/autoconf-\(.*\)-done/\1/'`
+ installed_automake_version=`ls automake-*-done 2>/dev/null | sed 's/automake-\(.*\)-done/\1/'`
+ installed_libtool_version=`ls libtool-*-done 2>/dev/null | sed 's/libtool-\(.*\)-done/\1/'`
+ installed_cmake_version=`ls cmake-*-done 2>/dev/null | sed 's/cmake-\(.*\)-done/\1/'`
+ installed_ninja_version=`ls ninja-*-done 2>/dev/null | sed 's/ninja-\(.*\)-done/\1/'`
+ installed_asciidoctor_version=`ls asciidoctor-*-done 2>/dev/null | sed 's/asciidoctor-\(.*\)-done/\1/'`
+ installed_asciidoctorpdf_version=`ls asciidoctorpdf-*-done 2>/dev/null | sed 's/asciidoctorpdf-\(.*\)-done/\1/'`
+ installed_gettext_version=`ls gettext-*-done 2>/dev/null | sed 's/gettext-\(.*\)-done/\1/'`
+ installed_pkg_config_version=`ls pkg-config-*-done 2>/dev/null | sed 's/pkg-config-\(.*\)-done/\1/'`
+ installed_glib_version=`ls glib-*-done 2>/dev/null | sed 's/glib-\(.*\)-done/\1/'`
+ installed_qt_version=`ls qt-*-done 2>/dev/null | sed 's/qt-\(.*\)-done/\1/'`
+ installed_libsmi_version=`ls libsmi-*-done 2>/dev/null | sed 's/libsmi-\(.*\)-done/\1/'`
+ installed_libgpg_error_version=`ls libgpg-error-*-done 2>/dev/null | sed 's/libgpg-error-\(.*\)-done/\1/'`
+ installed_libgcrypt_version=`ls libgcrypt-*-done 2>/dev/null | sed 's/libgcrypt-\(.*\)-done/\1/'`
+ installed_gmp_version=`ls gmp-*-done 2>/dev/null | sed 's/gmp-\(.*\)-done/\1/'`
+ installed_libtasn1_version=`ls libtasn1-*-done 2>/dev/null | sed 's/libtasn1-\(.*\)-done/\1/'`
+ installed_p11_kit_version=`ls p11-kit-*-done 2>/dev/null | sed 's/p11-kit-\(.*\)-done/\1/'`
+ installed_nettle_version=`ls nettle-*-done 2>/dev/null | sed 's/nettle-\(.*\)-done/\1/'`
+ installed_gnutls_version=`ls gnutls-*-done 2>/dev/null | sed 's/gnutls-\(.*\)-done/\1/'`
+ installed_lua_version=`ls lua-*-done 2>/dev/null | sed 's/lua-\(.*\)-done/\1/'`
+ installed_snappy_version=`ls snappy-*-done 2>/dev/null | sed 's/snappy-\(.*\)-done/\1/'`
+ installed_zstd_version=`ls zstd-*-done 2>/dev/null | sed 's/zstd-\(.*\)-done/\1/'`
+ installed_libxml2_version=`ls libxml2-*-done 2>/dev/null | sed 's/libxml2-\(.*\)-done/\1/'`
+ installed_lz4_version=`ls lz4-*-done 2>/dev/null | sed 's/lz4-\(.*\)-done/\1/'`
+ installed_sbc_version=`ls sbc-*-done 2>/dev/null | sed 's/sbc-\(.*\)-done/\1/'`
+ installed_maxminddb_version=`ls maxminddb-*-done 2>/dev/null | sed 's/maxminddb-\(.*\)-done/\1/'`
+ installed_cares_version=`ls c-ares-*-done 2>/dev/null | sed 's/c-ares-\(.*\)-done/\1/'`
+ installed_libssh_version=`ls libssh-*-done 2>/dev/null | sed 's/libssh-\(.*\)-done/\1/'`
+ installed_nghttp2_version=`ls nghttp2-*-done 2>/dev/null | sed 's/nghttp2-\(.*\)-done/\1/'`
+ installed_nghttp3_version=`ls nghttp3-*-done 2>/dev/null | sed 's/nghttp3-\(.*\)-done/\1/'`
+ installed_libtiff_version=`ls tiff-*-done 2>/dev/null | sed 's/tiff-\(.*\)-done/\1/'`
+ installed_spandsp_version=`ls spandsp-*-done 2>/dev/null | sed 's/spandsp-\(.*\)-done/\1/'`
+ installed_speexdsp_version=`ls speexdsp-*-done 2>/dev/null | sed 's/speexdsp-\(.*\)-done/\1/'`
+ installed_bcg729_version=`ls bcg729-*-done 2>/dev/null | sed 's/bcg729-\(.*\)-done/\1/'`
+ installed_ilbc_version=`ls ilbc-*-done 2>/dev/null | sed 's/ilbc-\(.*\)-done/\1/'`
+ installed_opus_version=`ls opus-*-done 2>/dev/null | sed 's/opus-\(.*\)-done/\1/'`
+ installed_python3_version=`ls python3-*-done 2>/dev/null | sed 's/python3-\(.*\)-done/\1/'`
+ installed_brotli_version=`ls brotli-*-done 2>/dev/null | sed 's/brotli-\(.*\)-done/\1/'`
+ installed_minizip_version=`ls minizip-*-done 2>/dev/null | sed 's/minizip-\(.*\)-done/\1/'`
+ installed_sparkle_version=`ls sparkle-*-done 2>/dev/null | sed 's/sparkle-\(.*\)-done/\1/'`
+
+ cd $topdir
+fi
+
+if [ "$do_uninstall" = "yes" ]
+then
+ uninstall_all
+ exit 0
+fi
+
+#
+# Configure scripts tend to set CFLAGS and CXXFLAGS to "-g -O2" if
+# invoked without CFLAGS or CXXFLAGS being set in the environment.
+#
+# However, we *are* setting them in the environment, for our own
+# nefarious purposes, so start them out as "-g -O2".
+#
+CFLAGS="-g -O2"
+CXXFLAGS="-g -O2"
+
+# if no make options are present, set default options
+if [ -z "$MAKE_BUILD_OPTS" ] ; then
+ # by default use 1.5x number of cores for parallel build
+ MAKE_BUILD_OPTS="-j $(( $(sysctl -n hw.logicalcpu) * 3 / 2))"
+fi
+
+#
+# If we have a target release, look for the oldest SDK that's for an
+# OS equal to or later than that one, and build libraries against it
+# rather than against the headers and, more importantly, libraries
+# that come with the OS, so that we don't end up with support libraries
+# that only work on the OS version on which we built them, not earlier
+# versions of the same release, or earlier releases if the minimum is
+# earlier.
+#
+if [ ! -z "$min_osx_target" ]
+then
+ #
+ # Get the major and minor version of the target release.
+ # We assume it'll be a while before there's a macOS 100. :-)
+ #
+ case "$min_osx_target" in
+
+ [1-9][0-9].*)
+ #
+ # major.minor.
+ #
+ min_osx_target_major=`echo "$min_osx_target" | sed -n 's/\([1-9][0-9]*\)\..*/\1/p'`
+ min_osx_target_minor=`echo "$min_osx_target" | sed -n 's/[1-9][0-9]*\.\(.*\)/\1/p'`
+ ;;
+
+ [1-9][0-9])
+ #
+ # Just a major version number was specified; make the minor
+ # version 0.
+ #
+ min_osx_target_major="$min_osx_target"
+ min_osx_target_minor=0
+ ;;
+
+ *)
+ echo "macosx-setup.sh: Invalid target release $min_osx_target" 1>&2
+ exit 1
+ ;;
+ esac
+
+ #
+ # Search each directory that might contain SDKs.
+ #
+ sdkpath=""
+ for sdksdir in /Developer/SDKs \
+ /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs \
+ /Library/Developer/CommandLineTools/SDKs
+ do
+ #
+ # Get a list of all the SDKs.
+ #
+ if ! test -d "$sdksdir"
+ then
+ #
+ # There is no directory with that name.
+ # Move on to the next one in the list, if any.
+ #
+ continue
+ fi
+
+ #
+ # Get a list of all the SDKs in that directory, if any.
+ # We assume it'll be a while before there's a macOS 100. :-)
+ #
+ sdklist=`(cd "$sdksdir"; ls -d MacOSX[1-9][0-9].[0-9]*.sdk 2>/dev/null)`
+
+ for sdk in $sdklist
+ do
+ #
+ # Get the major and minor version for this SDK.
+ #
+ sdk_major=`echo "$sdk" | sed -n 's/MacOSX\([1-9][0-9]*\)\..*\.sdk/\1/p'`
+ sdk_minor=`echo "$sdk" | sed -n 's/MacOSX[1-9][0-9]*\.\(.*\)\.sdk/\1/p'`
+
+ #
+ # Is it for the deployment target or some later release?
+ # Starting with major 11, the minor version no longer matters.
+ #
+ if test "$sdk_major" -gt "$min_osx_target_major" -o \
+ \( "$sdk_major" -eq "$min_osx_target_major" -a \
+ \( "$sdk_major" -ge 11 -o \
+ "$sdk_minor" -ge "$min_osx_target_minor" \) \)
+ then
+ #
+ # Yes, use it.
+ #
+ sdkpath="$sdksdir/$sdk"
+ break 2
+ fi
+ done
+ done
+
+ if [ -z "$sdkpath" ]
+ then
+ echo "macos-setup.sh: Couldn't find an SDK for macOS $min_osx_target or later" 1>&2
+ exit 1
+ fi
+
+ SDKPATH="$sdkpath"
+ echo "Using the $sdk_major.$sdk_minor SDK"
+
+ #
+ # Make sure there are links to /usr/local/include and /usr/local/lib
+ # in the SDK's usr/local.
+ #
+ if [ ! -e $SDKPATH/usr/local/include ]
+ then
+ if [ ! -d $SDKPATH/usr/local ]
+ then
+ sudo mkdir $SDKPATH/usr/local
+ fi
+ sudo ln -s /usr/local/include $SDKPATH/usr/local/include
+ fi
+ if [ ! -e $SDKPATH/usr/local/lib ]
+ then
+ if [ ! -d $SDKPATH/usr/local ]
+ then
+ sudo mkdir $SDKPATH/usr/local
+ fi
+ sudo ln -s /usr/local/lib $SDKPATH/usr/local/lib
+ fi
+
+ #
+ # Set the minimum OS version for which to build to the specified
+ # minimum target OS version, so we don't, for example, end up using
+ # linker features supported by the OS verson on which we're building
+ # but not by the target version.
+ #
+ VERSION_MIN_FLAGS="-mmacosx-version-min=$min_osx_target"
+
+ #
+ # Compile and link against the SDK.
+ #
+ SDKFLAGS="-isysroot $SDKPATH"
+
+fi
+
+export CFLAGS
+export CXXFLAGS
+
+#
+# You need Xcode or the command-line tools installed to get the compilers (xcrun checks both).
+#
+ if [ ! -x /usr/bin/xcrun ]; then
+ echo "Please install Xcode (app or command line) first (should be available on DVD or from the Mac App Store)."
+ exit 1
+fi
+
+if [ "$QT_VERSION" ]; then
+ #
+ # We need Xcode, not just the command-line tools, installed to build
+ # Qt.
+ #
+ # At least with Xcode 8, /usr/bin/xcodebuild --help fails if only
+ # the command-line tools are installed and succeeds if Xcode is
+ # installed. Unfortunately, it fails *with* Xcode 3, but
+ # /usr/bin/xcodebuild -version works with that and with Xcode 8.
+ # Hopefully it fails with only the command-line tools installed.
+ #
+ if /usr/bin/xcodebuild -version >/dev/null 2>&1; then
+ :
+ elif qmake --version >/dev/null 2>&1; then
+ :
+ else
+ echo "Please install Xcode first (should be available on DVD or from the Mac App Store)."
+ echo "The command-line build tools are not sufficient to build Qt."
+ echo "Alternatively build QT according to: https://gist.github.com/shoogle/750a330c851bd1a924dfe1346b0b4a08#:~:text=MacOS%2FQt%5C%20Creator-,Go%20to%20Qt%20Creator%20%3E%20Preferences%20%3E%20Build%20%26%20Run%20%3E%20Kits,for%20both%20compilers%2C%20not%20gcc%20."
+ exit 1
+ fi
+fi
+
+export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig
+
+#
+# Do all the downloads and untarring in a subdirectory, so all that
+# stuff can be removed once we've installed the support libraries.
+
+if [ ! -d "${MACOSX_SUPPORT_LIBS}" ]
+then
+ mkdir "${MACOSX_SUPPORT_LIBS}" || exit 1
+fi
+cd "${MACOSX_SUPPORT_LIBS}"
+
+install_all
+
+echo ""
+
+#
+# Indicate what paths to use for pkg-config and cmake.
+#
+pkg_config_path=/usr/local/lib/pkgconfig
+if [ "$QT_VERSION" ]; then
+ qt_base_path=$HOME/Qt$QT_VERSION/$QT_VERSION/clang_64
+ pkg_config_path="$pkg_config_path":"$qt_base_path/lib/pkgconfig"
+ CMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH":"$qt_base_path/lib/cmake"
+fi
+
+if $no_build; then
+ echo "All required dependencies downloaded. Run without -n to install them."
+ exit 0
+fi
+
+if [ "$QT_VERSION" ]; then
+ if [ -f qt-$QT_VERSION-done ]; then
+ echo "You are now prepared to build Wireshark."
+ else
+ echo "Qt was not installed; you will have to install it in order to build the"
+ echo "Wireshark application, but you can build all the command-line tools in"
+ echo "the Wireshark distribution."
+ echo ""
+ echo "See section 2.1.1. \"Build environment setup\" of the Wireshark Developer's"
+ echo "Guide for instructions on how to install Qt."
+ fi
+else
+ echo "You did not install Qt; you will have to install it in order to build"
+ echo "the Wireshark application, but you can build all the command-line tools in"
+ echo "the Wireshark distribution."
+fi
+echo
+echo "To build:"
+echo
+echo "export PKG_CONFIG_PATH=$pkg_config_path"
+echo "export CMAKE_PREFIX_PATH=$CMAKE_PREFIX_PATH"
+echo "export PATH=$PATH:$qt_base_path/bin"
+echo
+echo "mkdir build; cd build"
+if [ ! -z "$NINJA_VERSION" ]; then
+ echo "cmake -G Ninja .."
+ echo "ninja wireshark_app_bundle logray_app_bundle # (Modify as needed)"
+ echo "ninja install/strip"
+else
+ echo "cmake .."
+ echo "make $MAKE_BUILD_OPTS wireshark_app_bundle logray_app_bundle # (Modify as needed)"
+ echo "make install/strip"
+fi
+echo
+echo "Make sure you are allowed capture access to the network devices"
+echo "See: https://gitlab.com/wireshark/wireshark/-/wikis/CaptureSetup/CapturePrivileges"
+echo
+
+exit 0
diff --git a/tools/make-authors-csv.py b/tools/make-authors-csv.py
new file mode 100755
index 0000000..7652803
--- /dev/null
+++ b/tools/make-authors-csv.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+#
+# Generate the authors.csv file.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+'''\
+Remove tasks from individual author entries from the AUTHORS file
+for use in the "About" dialog.
+'''
+
+import io
+import re
+import sys
+
+
+def remove_tasks(stdinu8):
+ in_subinfo = False
+ all_lines = []
+
+ # Assume the first line is blank and skip it. make-authors-short.pl
+ # skipped over the UTF-8 BOM as well. Do we need to do that here?
+
+ stdinu8.readline()
+
+ for line in stdinu8:
+
+ sub_m = re.search(r'(.*?)\s*\{', line)
+ if sub_m:
+ in_subinfo = True
+ all_lines.append(sub_m.group(1))
+ elif '}' in line:
+ in_subinfo = False
+ nextline = next(stdinu8)
+ if not re.match(r'^\s*$', nextline):
+ # if '{' in nextline:
+ # stderru8.write("No blank line after '}', found " + nextline)
+ all_lines.append(nextline)
+ elif in_subinfo:
+ continue
+ else:
+ all_lines.append(line)
+ return all_lines
+
+
+def main():
+ stdinu8 = io.TextIOWrapper(sys.stdin.buffer, encoding='utf8')
+ stdoutu8 = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')
+ stderru8 = io.TextIOWrapper(sys.stderr.buffer, encoding='utf8')
+
+ lines = remove_tasks(stdinu8)
+ patt = re.compile("(.*)[<(]([\\s'a-zA-Z0-9._%+-]+(\\[[Aa][Tt]\\])?[a-zA-Z0-9._%+-]+)[>)]")
+
+ for line in lines:
+ match = patt.match(line)
+ if match:
+ name = match.group(1).strip()
+ mail = match.group(2).strip().replace("[AT]", "@")
+ stdoutu8.write("{},{}\n".format(name, mail))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/make-enterprises.py b/tools/make-enterprises.py
new file mode 100755
index 0000000..1b2b2d0
--- /dev/null
+++ b/tools/make-enterprises.py
@@ -0,0 +1,196 @@
+#!/usr/bin/env python3
+# create the enterprises.c file from
+# https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers
+# or an offline copy
+#
+# Copyright 2022 by Moshe Kaplan
+# Based on make-sminmpec.pl by Gerald Combs
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 2004 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import argparse
+import re
+import urllib.request
+
+
+ENTERPRISES_CFILE = os.path.join('epan', 'enterprises.c')
+
+ENTERPRISE_NUMBERS_URL = "https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers"
+
+DECIMAL_PATTERN = r"^(\d+)"
+# up to three spaces because of formatting errors in the source
+ORGANIZATION_PATTERN = r"^ ?(\S.*)"
+FORMERLY_PATTERN = r" \(((formerly|previously) .*)\)"
+
+
+LOOKUP_FUNCTION = r"""
+const char* global_enterprises_lookup(uint32_t value)
+{
+ if (value > table.max_idx) {
+ return NULL;
+ }
+ else return table.values[value];
+}
+"""
+
+DUMP_FUNCTION = r"""
+void global_enterprises_dump(FILE *fp)
+{
+ for (size_t idx = 0; idx <= table.max_idx; idx++) {
+ if (table.values[idx] != NULL) {
+ fprintf(fp, "%zu\t%s\n", idx, table.values[idx]);
+ }
+ }
+}
+"""
+
+# This intermediate format is no longer written to a file - returned as string
+def generate_enterprise_entries(file_content):
+ # We only care about the "Decimal" and "Organization",
+ # not the contact or email
+ org_lines = []
+ last_updated = ""
+ end_seen = False
+ for line in file_content.splitlines():
+ decimal_match = re.match(DECIMAL_PATTERN, line)
+ if decimal_match:
+ decimal = decimal_match.group(0)
+ elif re.match(ORGANIZATION_PATTERN, line):
+ organization = line.strip()
+ if organization.lower() == "unassigned":
+ continue
+ organization = re.sub(FORMERLY_PATTERN, r"\t# \1", organization)
+ org_lines += [decimal + "\t" + organization]
+ elif "last updated" in line.lower():
+ last_updated = line
+ elif "end of document" in line.lower():
+ end_seen = True
+
+ if not end_seen:
+ raise Exception('"End of Document" not found. Truncated source file?')
+
+ last_updated_line = "/* " + last_updated + " */\n\n"
+ output = "\n".join(org_lines) + "\n"
+ return (output,last_updated_line)
+
+class CFile:
+ def __init__(self, filename, last_updated_line):
+ self.filename = filename
+ self.f = open(filename, 'w')
+ self.mappings = {}
+ self.highest_num = 0
+
+ # Write file header
+ self.f.write('/* ' + os.path.basename(self.filename) + '\n')
+ self.f.write(' *\n')
+ self.f.write(' * Wireshark - Network traffic analyzer\n')
+ self.f.write(' * By Gerald Combs <gerald@wireshark.org>\n')
+ self.f.write(' * Copyright 1998 Gerald Combs\n')
+ self.f.write(' *\n')
+ self.f.write(' * Do not edit - this file is automatically generated\n')
+ self.f.write(' * SPDX-License-Identifier: GPL-2.0-or-later\n')
+ self.f.write(' */\n\n')
+ self.f.write(last_updated_line)
+
+ # Include header files
+ self.f.write('#include "config.h"\n\n')
+ self.f.write('#include <stddef.h>\n')
+ self.f.write('#include "enterprises.h"\n')
+ self.f.write('\n\n')
+
+ def __del__(self):
+ self.f.write('typedef struct\n')
+ self.f.write('{\n')
+ self.f.write(' uint32_t max_idx;\n')
+ self.f.write(' const char* values[' + str(self.highest_num+1) + '];\n')
+ self.f.write('} global_enterprises_table_t;\n\n')
+
+ # Write static table
+ self.f.write('static global_enterprises_table_t table =\n')
+ self.f.write('{\n')
+ # Largest index
+ self.f.write(' ' + str(self.highest_num) + ',\n')
+ self.f.write(' {\n')
+ # Entries (read from dict)
+ for n in range(0, self.highest_num+1):
+ if n not in self.mappings:
+ # There are some gaps, write a NULL entry so can lookup by index
+ line = ' NULL'
+ else:
+ line = ' "' + self.mappings[n] + '"'
+ # Add coma.
+ if n < self.highest_num:
+ line += ','
+ # Add number as aligned comment.
+ line += ' '*(90-len(line)) + '// ' + str(n)
+
+ self.f.write(line+'\n')
+
+ # End of array
+ self.f.write(' }\n')
+ # End of struct
+ self.f.write('};\n')
+ print('Re-generated', self.filename)
+
+ # Lookup function
+ self.f.write(LOOKUP_FUNCTION)
+
+ # Dump function
+ self.f.write(DUMP_FUNCTION)
+
+ # Add an individual mapping to the function
+ def addMapping(self, num, name):
+ # Handle some escapings
+ name = name.replace('\\', '\\\\')
+ name = name.replace('"', '""')
+
+ # Record.
+ self.mappings[num] = name
+ self.highest_num = num if num>self.highest_num else self.highest_num
+
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Create the {} file.".format(ENTERPRISES_CFILE))
+ parser.add_argument('--infile')
+ parser.add_argument('outfile', nargs='?', default=ENTERPRISES_CFILE)
+ parsed_args = parser.parse_args()
+
+ # Read data from file or webpage
+ if parsed_args.infile:
+ with open(parsed_args.infile, encoding='utf-8') as fh:
+ data = fh.read()
+ else:
+ with urllib.request.urlopen(ENTERPRISE_NUMBERS_URL) as f:
+ if f.status != 200:
+ raise Exception("request for " + ENTERPRISE_NUMBERS_URL + " failed with result code " + f.status)
+ data = f.read().decode('utf-8')
+
+ # Find bits we need and generate enterprise entries
+ enterprises_content,last_updated_line = generate_enterprise_entries(data)
+
+ # Now write to a C file the contents (which is faster than parsing the global file at runtime).
+ c_file = CFile(parsed_args.outfile, last_updated_line)
+
+ mapping_re = re.compile(r'^(\d+)\s+(.*)$')
+ for line in enterprises_content.splitlines():
+ match = mapping_re.match(line)
+ if match:
+ num, name = match.group(1), match.group(2)
+ # Strip any comments and/or trailing whitespace
+ idx = name.find('#')
+ if idx != -1:
+ name = name[0:idx]
+ name = name.rstrip()
+ # Add
+ c_file.addMapping(int(num), name)
+
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/make-enums.py b/tools/make-enums.py
new file mode 100755
index 0000000..b6a2835
--- /dev/null
+++ b/tools/make-enums.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python3
+#
+# Copyright 2021, João Valverde <j@v6e.pt>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+#
+# Uses pyclibrary to parse C headers for enums and integer macro
+# definitions. Exports that data to a C file for the introspection API.
+#
+# Requires: https://github.com/MatthieuDartiailh/pyclibrary
+#
+
+import os
+import sys
+import argparse
+from pyclibrary import CParser
+
+def parse_files(infiles, outfile):
+
+ print("Input: {}".format(infiles))
+ print("Output: '{}'".format(outfile))
+
+ parser = CParser(infiles)
+
+ source = """\
+/*
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald@wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Generated automatically from %s. It can be re-created by running
+ * "tools/make-enums.py" from the top source directory.
+ *
+ * It is fine to edit this file by hand. Particularly if a symbol
+ * disappears from the API it can just be removed here. There is no
+ * requirement to re-run the generator script.
+ *
+ */
+""" % (os.path.basename(sys.argv[0]))
+
+ for f in infiles:
+ source += '#include <{}>\n'.format(f)
+
+ source += """
+#define ENUM(arg) { #arg, arg }
+
+static ws_enum_t all_enums[] = {
+"""
+
+ definitions = parser.defs['values']
+ symbols = list(definitions.keys())
+ symbols.sort()
+
+ for s in symbols:
+ if isinstance(definitions[s], int):
+ source += ' ENUM({}),\n'.format(s)
+
+ source += """\
+ { NULL, 0 },
+};
+"""
+
+ try:
+ fh = open(outfile, 'w')
+ except OSError:
+ sys.exit('Unable to write ' + outfile + '.\n')
+
+ fh.write(source)
+ fh.close()
+
+epan_files = [
+ "epan/address.h",
+ "epan/ipproto.h",
+ "epan/proto.h",
+ "epan/ftypes/ftypes.h",
+ "epan/stat_groups.h",
+]
+parse_files(epan_files, "epan/introspection-enums.c")
+
+wtap_files = [
+ "wiretap/wtap.h",
+]
+parse_files(wtap_files, "wiretap/introspection-enums.c")
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 4
+# indent-tabs-mode: nil
+# End:
+#
+# vi: set shiftwidth=4 expandtab:
+# :indentSize=4:noTabs=true:
+#
diff --git a/tools/make-isobus.py b/tools/make-isobus.py
new file mode 100644
index 0000000..ce0259c
--- /dev/null
+++ b/tools/make-isobus.py
@@ -0,0 +1,223 @@
+#!/usr/bin/env python3
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+'''Update the "packet-isobus-parameters.h" file.
+Make-isobus creates a file containing isobus parameters
+from the databases at isobus.net.
+'''
+
+import csv
+import io
+import os
+import sys
+import urllib.request, urllib.error, urllib.parse
+import zipfile
+
+def exit_msg(msg=None, status=1):
+ if msg is not None:
+ sys.stderr.write(msg + '\n\n')
+ sys.stderr.write(__doc__ + '\n')
+ sys.exit(status)
+
+def open_url_zipped(url):
+ '''Open a URL of a zipped file.
+
+ '''
+
+ url_path = '/'.join(url)
+
+ req_headers = { 'User-Agent': 'Wireshark make-isobus' }
+ try:
+ req = urllib.request.Request(url_path, headers=req_headers)
+ response = urllib.request.urlopen(req)
+ body = response.read()
+ except Exception:
+ exit_msg('Error opening ' + url_path)
+
+ return zipfile.ZipFile(io.BytesIO(body))
+
+def main():
+ this_dir = os.path.dirname(__file__)
+ isobus_output_path = os.path.join('epan', 'dissectors', 'packet-isobus-parameters.h')
+
+ isobus_zip_url = [ "https://www.isobus.net/isobus/attachments/", "isoExport_csv.zip"]
+
+ isobus_files = {
+ 'indust' : 'Industry Groups.csv',
+ 'glblfcts' : 'Global NAME Functions.csv',
+ 'igfcts' :'IG Specific NAME Function.csv',
+ 'manuf' : 'Manufacturer IDs.csv',
+ 'pgn_spns' : 'SPNs and PGNs.csv'
+ }
+
+ zipf = open_url_zipped(isobus_zip_url)
+
+ # Industries csv
+ min_total = 4 # typically 8
+ f = zipf.read(isobus_files['indust'])
+ lines = f.decode('UTF-8', 'replace').splitlines()
+
+ if len(lines) < min_total:
+ exit_msg("{}: Not enough entries ({})".format(isobus_files['indust'], len(lines)))
+
+ indust_csv = csv.reader(lines)
+ next(indust_csv)
+
+ # Global Name Functions csv
+ min_total = 50 # XXX as of 2023-10-18
+ f = zipf.read(isobus_files['glblfcts'])
+ lines = f.decode('UTF-8', 'replace').splitlines()
+
+ if len(lines) < min_total:
+ exit_msg("{}: Not enough entries ({})".format(isobus_files['glblfcts'], len(lines)))
+
+ glbl_name_functions_csv = csv.reader(lines)
+ next(glbl_name_functions_csv)
+
+ # Specific Name Functions csv
+ min_total = 200 # 295 as of 2023-10-18
+ f = zipf.read(isobus_files['igfcts'])
+ lines = f.decode('UTF-8', 'replace').splitlines()
+
+ if len(lines) < min_total:
+ exit_msg("{}: Not enough entries ({})".format(isobus_files['igfcts'], len(lines)))
+
+ vehicle_system_names = {}
+ specific_functions = {}
+
+ specific_functions_csv = csv.reader(lines)
+ next(specific_functions_csv)
+ for row in specific_functions_csv:
+ ig_id, vs_id, vs_name, f_id, f_name = row[:5]
+ new_id = int(ig_id) * 256 + int(vs_id)
+ if len(vs_name) > 50:
+ if new_id != 539: # 539: Weeders ...
+ print(f"shortening {new_id}: {vs_name} -> {vs_name[:36]}")
+ vs_name = vs_name[:36]
+ vehicle_system_names[new_id] = vs_name
+
+ #vehicle_system_names.setdefault(ig_id, {}).setdefault(vs_id, vs_name)
+ new_id2 = 256 * new_id + int(f_id)
+ specific_functions[new_id2] = f_name
+
+ # Manufacturers csv
+ min_total = 1000 # 1396 as of 2023-10-18
+ f = zipf.read(isobus_files['manuf'])
+ lines = f.decode('UTF-8', 'replace').splitlines()
+
+ if len(lines) < min_total:
+ exit_msg("{}: Not enough entries ({})".format(isobus_files['manuf'], len(lines)))
+
+ manuf_csv = csv.reader(lines)
+ next(manuf_csv)
+
+ # PGN SPN csv
+ min_total = 20000 # 23756 as of 2023-10-18
+ f = zipf.read(isobus_files['pgn_spns'])
+ lines = f.decode('UTF-8', 'replace').splitlines()
+
+ if len(lines) < min_total:
+ exit_msg("{}: Not enough entries ({})".format(isobus_files['pgn_spns'], len(lines)))
+
+ pgn_names = {}
+
+ pgn_spn_csv = csv.reader(lines)
+ next(pgn_spn_csv)
+ for row in pgn_spn_csv:
+ try:
+ pgn_id, pgn_name, = row[:2]
+ if not pgn_name.startswith("Proprietary B"):
+ pgn_names[int(pgn_id)] = pgn_name.replace("\"","'")
+ except:
+ pass
+
+ # prepare output file
+ try:
+ output_fd = io.open(isobus_output_path, 'w', encoding='UTF-8')
+ except Exception:
+ exit_msg("Couldn't open ({}) ".format(isobus_output_path))
+
+ output_fd.write('''/*
+ * This file was generated by running ./tools/make-isobus.py.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * The ISOBUS public listings available from:
+ * <https://www.isobus.net/isobus/attachments/isoExport_csv.zip>
+ *
+ */
+
+#ifndef __PACKET_ISOBUS_PARAMETERS_H__
+#define __PACKET_ISOBUS_PARAMETERS_H__
+
+''')
+
+ # Write Industries
+ output_fd.write("static const value_string _isobus_industry_groups[] = {\n")
+
+ for row in sorted(indust_csv, key=lambda x: int(x[0])):
+ output_fd.write(f" {{ {row[0]}, \"{row[1]}\" }},\n")
+
+ output_fd.write(" { 0, NULL }\n")
+ output_fd.write("};\n")
+ output_fd.write("static value_string_ext isobus_industry_groups_ext = VALUE_STRING_EXT_INIT(_isobus_industry_groups);\n\n");
+
+ # Write Vehicle System Names
+ output_fd.write("/* key: 256 * Industry-Group-ID + Vehicle-Group-ID */\n")
+ output_fd.write("static const value_string _isobus_vehicle_systems[] = {\n")
+
+ for key in sorted(vehicle_system_names):
+ output_fd.write(f" {{ {hex(key)}, \"{vehicle_system_names[key]}\" }},\n")
+
+ output_fd.write(" { 0, NULL }\n")
+ output_fd.write("};\n")
+ output_fd.write("static value_string_ext isobus_vehicle_systems_ext = VALUE_STRING_EXT_INIT(_isobus_vehicle_systems);\n\n");
+
+ # Write Global Name Functions
+ output_fd.write("static const value_string _isobus_global_name_functions[] = {\n")
+
+ for row in sorted(glbl_name_functions_csv, key=lambda x: int(x[0])):
+ output_fd.write(f" {{ {row[0]}, \"{row[1]}\" }},\n")
+
+ output_fd.write(" { 0, NULL }\n")
+ output_fd.write("};\n")
+ output_fd.write("static value_string_ext isobus_global_name_functions_ext = VALUE_STRING_EXT_INIT(_isobus_global_name_functions);\n\n");
+
+ # IG Specific Global Name Functions
+ output_fd.write("/* key: 65536 * Industry-Group-ID + 256 * Vehicle-System-ID + Function-ID */\n")
+ output_fd.write("static const value_string _isobus_ig_specific_name_functions[] = {\n")
+
+ for key in sorted(specific_functions):
+ output_fd.write(f" {{ {hex(key)}, \"{specific_functions[key]}\" }},\n")
+
+ output_fd.write(" { 0, NULL }\n")
+ output_fd.write("};\n")
+ output_fd.write("static value_string_ext isobus_ig_specific_name_functions_ext = VALUE_STRING_EXT_INIT(_isobus_ig_specific_name_functions);\n\n");
+
+ # Write Manufacturers
+ output_fd.write("static const value_string _isobus_manufacturers[] = {\n")
+
+ for row in sorted(manuf_csv, key=lambda x: int(x[0])):
+ output_fd.write(f" {{ {row[0]}, \"{row[1]}\" }},\n")
+
+ output_fd.write(" { 0, NULL }\n")
+ output_fd.write("};\n")
+ output_fd.write("static value_string_ext isobus_manufacturers_ext = VALUE_STRING_EXT_INIT(_isobus_manufacturers);\n\n");
+
+ # PGN Names
+ output_fd.write("static const value_string _isobus_pgn_names[] = {\n")
+
+ for key in sorted(pgn_names):
+ output_fd.write(f" {{ {key}, \"{pgn_names[key]}\" }},\n")
+
+ output_fd.write(" { 0, NULL }\n")
+ output_fd.write("};\n")
+ output_fd.write("static value_string_ext isobus_pgn_names_ext = VALUE_STRING_EXT_INIT(_isobus_pgn_names);\n\n");
+
+ output_fd.write("#endif /* __PACKET_ISOBUS_PARAMETERS_H__ */")
+if __name__ == '__main__':
+ main()
diff --git a/tools/make-manuf.py b/tools/make-manuf.py
new file mode 100755
index 0000000..22f3aa0
--- /dev/null
+++ b/tools/make-manuf.py
@@ -0,0 +1,401 @@
+#!/usr/bin/env python3
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+'''Update the "manuf" file.
+
+Make-manuf creates a file containing ethernet OUIs and their company
+IDs from the databases at IEEE.
+'''
+
+import csv
+import html
+import io
+import os
+import re
+import sys
+import urllib.request, urllib.error, urllib.parse
+
+have_icu = False
+try:
+ # Use the grapheme or segments module instead?
+ import icu
+ have_icu = True
+except ImportError:
+ pass
+
+def exit_msg(msg=None, status=1):
+ if msg is not None:
+ sys.stderr.write(msg + '\n\n')
+ sys.stderr.write(__doc__ + '\n')
+ sys.exit(status)
+
+def open_url(url):
+ '''Open a URL.
+ Returns a tuple containing the body and response dict. The body is a
+ str in Python 3 and bytes in Python 2 in order to be compatibile with
+ csv.reader.
+ '''
+
+ if len(sys.argv) > 1:
+ url_path = os.path.join(sys.argv[1], url[1])
+ url_fd = open(url_path)
+ body = url_fd.read()
+ url_fd.close()
+ else:
+ url_path = '/'.join(url)
+
+ req_headers = { 'User-Agent': 'Wireshark make-manuf' }
+ try:
+ req = urllib.request.Request(url_path, headers=req_headers)
+ response = urllib.request.urlopen(req)
+ body = response.read().decode('UTF-8', 'replace')
+ except Exception:
+ exit_msg('Error opening ' + url_path)
+
+ return body
+
+# These are applied after punctuation has been removed.
+# More examples at https://en.wikipedia.org/wiki/Incorporation_(business)
+general_terms = '|'.join([
+ ' a +s\\b', # A/S and A.S. but not "As" as in "Connect As".
+ ' ab\\b', # Also follows "Oy", which is covered below.
+ ' ag\\b',
+ ' b ?v\\b',
+ ' closed joint stock company\\b',
+ ' co\\b',
+ ' company\\b',
+ ' corp\\b',
+ ' corporation\\b',
+ ' corporate\\b',
+ ' de c ?v\\b', # Follows "S.A.", which is covered separately below.
+ ' gmbh\\b',
+ ' holding\\b',
+ ' inc\\b',
+ ' incorporated\\b',
+ ' jsc\\b',
+ ' kg\\b',
+ ' k k\\b', # "K.K." as in "kabushiki kaisha", but not "K+K" as in "K+K Messtechnik".
+ ' limited\\b',
+ ' llc\\b',
+ ' ltd\\b',
+ ' n ?v\\b',
+ ' oao\\b',
+ ' of\\b',
+ ' open joint stock company\\b',
+ ' ooo\\b',
+ ' oü\\b',
+ ' oy\\b',
+ ' oyj\\b',
+ ' plc\\b',
+ ' pty\\b',
+ ' pvt\\b',
+ ' s ?a ?r ?l\\b',
+ ' s ?a\\b',
+ ' s ?p ?a\\b',
+ ' sp ?k\\b',
+ ' s ?r ?l\\b',
+ ' systems\\b',
+ '\\bthe\\b',
+ ' zao\\b',
+ ' z ?o ?o\\b'
+ ])
+
+# Chinese company names tend to start with the location, skip it (non-exhaustive list).
+skip_start = [
+ 'shengzen',
+ 'shenzhen',
+ 'beijing',
+ 'shanghai',
+ 'wuhan',
+ 'hangzhou',
+ 'guangxi',
+ 'guangdong',
+ 'chengdu',
+]
+
+# Special cases handled directly
+special_case = {
+ "Advanced Micro Devices": "AMD",
+ "杭州德澜科技有限公司": "DelanTech" # 杭州德澜科技有限公司(HangZhou Delan Technology Co.,Ltd)
+}
+
+def shorten(manuf):
+ '''Convert a long manufacturer name to abbreviated and short names'''
+ # Normalize whitespace.
+ manuf = ' '.join(manuf.split())
+ orig_manuf = manuf
+ # Convert all caps to title case
+ if manuf.isupper():
+ manuf = manuf.title()
+ # Remove the contents of parenthesis as ancillary data
+ manuf = re.sub(r"\(.*\)", '', manuf)
+ # Remove the contents of fullwidth parenthesis (mostly in Asian names)
+ manuf = re.sub(r"(.*)", '', manuf)
+ # Remove "a" before removing punctuation ("Aruba, a Hewlett [...]" etc.)
+ manuf = manuf.replace(" a ", " ")
+ # Remove any punctuation
+ # XXX Use string.punctuation? Note that it includes '-' and '*'.
+ manuf = re.sub(r"[\"',./:()+-]", ' ', manuf)
+ # XXX For some reason including the double angle brackets in the above
+ # regex makes it bomb
+ manuf = re.sub(r"[«»“”]", ' ', manuf)
+ # & isn't needed when Standalone
+ manuf = manuf.replace(" & ", " ")
+ # Remove business types and other general terms ("the", "inc", "plc", etc.)
+ plain_manuf = re.sub(general_terms, '', manuf, flags=re.IGNORECASE)
+ # ...but make sure we don't remove everything.
+ if not all(s == ' ' for s in plain_manuf):
+ manuf = plain_manuf
+
+ manuf = manuf.strip()
+
+ # Check for special case
+ if manuf in special_case.keys():
+ manuf = special_case[manuf]
+
+ # XXX: Some of the entries have Chinese city or other location
+ # names written with spaces between each character, like
+ # Bei jing, Wu Han, Shen Zhen, etc. We should remove that too.
+ split = manuf.split()
+ if len(split) > 1 and split[0].lower() in skip_start:
+ manuf = ' '.join(split[1:])
+
+ # Remove all spaces
+ manuf = re.sub(r'\s+', '', manuf)
+
+ if len(manuf) < 1:
+ sys.stderr.write('Manufacturer "{}" shortened to nothing.\n'.format(orig_manuf))
+ sys.exit(1)
+
+ # Truncate names to a reasonable length, say, 12 characters. If
+ # the string contains UTF-8, this may be substantially more than
+ # 12 bytes. It might also be less than 12 visible characters. Plain
+ # Python slices Unicode strings by code point, which is better
+ # than raw bytes but not as good as grapheme clusters. PyICU
+ # supports grapheme clusters. https://bugs.python.org/issue30717
+ #
+
+ # Truncate by code points
+ trunc_len = 12
+
+ if have_icu:
+ # Truncate by grapheme clusters
+ bi_ci = icu.BreakIterator.createCharacterInstance(icu.Locale('en_US'))
+ bi_ci.setText(manuf)
+ bounds = list(bi_ci)
+ bounds = bounds[0:trunc_len]
+ trunc_len = bounds[-1]
+
+ manuf = manuf[:trunc_len]
+
+ if manuf.lower() == orig_manuf.lower():
+ # Original manufacturer name was short and simple.
+ return [manuf, None]
+
+ mixed_manuf = orig_manuf
+ # At least one entry has whitespace in front of a period.
+ mixed_manuf = re.sub(r'\s+\.', '.', mixed_manuf)
+ #If company is all caps, convert to mixed case (so it doesn't look like we're screaming the company name)
+ if mixed_manuf.upper() == mixed_manuf:
+ mixed_manuf = mixed_manuf.title()
+
+ return [manuf, mixed_manuf]
+
+MA_L = 'MA_L'
+MA_M = 'MA_M'
+MA_S = 'MA_S'
+
+def prefix_to_oui(prefix, prefix_map):
+ pfx_len = int(len(prefix) * 8 / 2)
+ prefix24 = prefix[:6]
+ oui24 = ':'.join(hi + lo for hi, lo in zip(prefix24[0::2], prefix24[1::2]))
+
+ if pfx_len == 24:
+ # 24-bit OUI assignment, no mask
+ return oui24, MA_L
+
+ # Other lengths which require a mask.
+ oui = prefix.ljust(12, '0')
+ oui = ':'.join(hi + lo for hi, lo in zip(oui[0::2], oui[1::2]))
+ if pfx_len == 28:
+ kind = MA_M
+ elif pfx_len == 36:
+ kind = MA_S
+ prefix_map[oui24] = kind
+
+ return '{}/{:d}'.format(oui, int(pfx_len)), kind
+
+def main():
+ this_dir = os.path.dirname(__file__)
+ manuf_path = os.path.join('epan', 'manuf-data.c')
+
+ ieee_d = {
+ 'OUI': { 'url': ["https://standards-oui.ieee.org/oui/", "oui.csv"], 'min_entries': 1000 },
+ 'CID': { 'url': ["https://standards-oui.ieee.org/cid/", "cid.csv"], 'min_entries': 75 },
+ 'IAB': { 'url': ["https://standards-oui.ieee.org/iab/", "iab.csv"], 'min_entries': 1000 },
+ 'OUI28': { 'url': ["https://standards-oui.ieee.org/oui28/", "mam.csv"], 'min_entries': 1000 },
+ 'OUI36': { 'url': ["https://standards-oui.ieee.org/oui36/", "oui36.csv"], 'min_entries': 1000 },
+ }
+ oui_d = {
+ MA_L: { '00:00:00' : ['00:00:00', 'Officially Xerox, but 0:0:0:0:0:0 is more common'] },
+ MA_M: {},
+ MA_S: {},
+ }
+
+ min_total = 35000; # 35830 as of 2018-09-05
+ total_added = 0
+
+ # Add IEEE entries from each of their databases
+ ieee_db_l = ['OUI', 'OUI28', 'OUI36', 'CID', 'IAB']
+
+ # map a 24-bit prefix to MA-M/MA-S or none (MA-L by default)
+ prefix_map = {}
+
+ for db in ieee_db_l:
+ db_url = ieee_d[db]['url']
+ ieee_d[db]['skipped'] = 0
+ ieee_d[db]['added'] = 0
+ ieee_d[db]['total'] = 0
+ print('Merging {} data from {}'.format(db, db_url))
+ body = open_url(db_url)
+ ieee_csv = csv.reader(body.splitlines())
+
+ # Pop the title row.
+ next(ieee_csv)
+ for ieee_row in ieee_csv:
+ #Registry,Assignment,Organization Name,Organization Address
+ #IAB,0050C2DD6,Transas Marine Limited,Datavagen 37 Askim Vastra Gotaland SE 436 32
+ oui, kind = prefix_to_oui(ieee_row[1].upper(), prefix_map)
+ manuf = ieee_row[2].strip()
+ # The Organization Name field occasionally contains HTML entities. Undo them.
+ manuf = html.unescape(manuf)
+ # "Watts A\S"
+ manuf = manuf.replace('\\', '/')
+ if manuf == 'IEEE Registration Authority':
+ continue
+ if manuf == 'Private':
+ continue
+ if oui in oui_d[kind]:
+ action = 'Skipping'
+ print('{} - {} IEEE "{}" in favor of "{}"'.format(oui, action, manuf, oui_d[kind][oui]))
+ ieee_d[db]['skipped'] += 1
+ else:
+ oui_d[kind][oui] = shorten(manuf)
+ ieee_d[db]['added'] += 1
+ ieee_d[db]['total'] += 1
+
+ if ieee_d[db]['total'] < ieee_d[db]['min_entries']:
+ exit_msg("Too few {} entries. Got {}, wanted {}".format(db, ieee_d[db]['total'], ieee_d[db]['min_entries']))
+ total_added += ieee_d[db]['total']
+
+ if total_added < min_total:
+ exit_msg("Too few total entries ({})".format(total_added))
+
+ try:
+ manuf_fd = io.open(manuf_path, 'w', encoding='UTF-8')
+ except Exception:
+ exit_msg("Couldn't open manuf file for reading ({}) ".format(manuf_path))
+
+ manuf_fd.write('''/*
+ * This file was generated by running ./tools/make-manuf.py.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * The data below has been assembled from the following sources:
+ *
+ * The IEEE public OUI listings available from:
+ * <http://standards-oui.ieee.org/oui/oui.csv>
+ * <http://standards-oui.ieee.org/cid/cid.csv>
+ * <http://standards-oui.ieee.org/iab/iab.csv>
+ * <http://standards-oui.ieee.org/oui28/mam.csv>
+ * <http://standards-oui.ieee.org/oui36/oui36.csv>
+ *
+ */
+
+''')
+
+ # Write the prefix map
+ manuf_fd.write("static const manuf_registry_t ieee_registry_table[] = {\n")
+ keys = list(prefix_map.keys())
+ keys.sort()
+ for oui in keys:
+ manuf_fd.write(" {{ {{ 0x{}, 0x{}, 0x{} }}, {} }},\n".format(oui[0:2], oui[3:5], oui[6:8], prefix_map[oui]))
+ manuf_fd.write("};\n\n")
+
+ # write the MA-L table
+ manuf_fd.write("static const manuf_oui24_t global_manuf_oui24_table[] = {\n")
+ keys = list(oui_d[MA_L].keys())
+ keys.sort()
+ for oui in keys:
+ short = oui_d[MA_L][oui][0]
+ if oui_d[MA_L][oui][1]:
+ long = oui_d[MA_L][oui][1]
+ else:
+ long = short
+ line = " {{ {{ 0x{}, 0x{}, 0x{} }}, \"{}\", ".format(oui[0:2], oui[3:5], oui[6:8], short)
+ sep = 44 - len(line)
+ if sep <= 0:
+ sep = 0
+ line += sep * ' '
+ line += "\"{}\" }},\n".format(long.replace('"', '\\"'))
+ manuf_fd.write(line)
+ manuf_fd.write("};\n\n")
+
+ # write the MA-M table
+ manuf_fd.write("static const manuf_oui28_t global_manuf_oui28_table[] = {\n")
+ keys = list(oui_d[MA_M].keys())
+ keys.sort()
+ for oui in keys:
+ short = oui_d[MA_M][oui][0]
+ if oui_d[MA_M][oui][1]:
+ long = oui_d[MA_M][oui][1]
+ else:
+ long = short
+ line = " {{ {{ 0x{}, 0x{}, 0x{}, 0x{} }}, \"{}\", ".format(oui[0:2], oui[3:5], oui[6:8], oui[9:11], short)
+ sep = 50 - len(line)
+ if sep <= 0:
+ sep = 0
+ line += sep * ' '
+ line += "\"{}\" }},\n".format(long.replace('"', '\\"'))
+ manuf_fd.write(line)
+ manuf_fd.write("};\n\n")
+
+ #write the MA-S table
+ manuf_fd.write("static const manuf_oui36_t global_manuf_oui36_table[] = {\n")
+ keys = list(oui_d[MA_S].keys())
+ keys.sort()
+ for oui in keys:
+ short = oui_d[MA_S][oui][0]
+ if oui_d[MA_S][oui][1]:
+ long = oui_d[MA_S][oui][1]
+ else:
+ long = short
+ line = " {{ {{ 0x{}, 0x{}, 0x{}, 0x{}, 0x{} }}, \"{}\", ".format(oui[0:2], oui[3:5], oui[6:8], oui[9:11], oui[12:14], short)
+ sep = 56 - len(line)
+ if sep <= 0:
+ sep = 0
+ line += sep * ' '
+ line += "\"{}\" }},\n".format(long.replace('"', '\\"'))
+ manuf_fd.write(line)
+ manuf_fd.write("};\n")
+
+ manuf_fd.close()
+
+ for db in ieee_d:
+ print('{:<20}: {}'.format('IEEE ' + db + ' added', ieee_d[db]['added']))
+ print('{:<20}: {}'.format('Total added', total_added))
+
+ print()
+ for db in ieee_d:
+ print('{:<20}: {}'.format('IEEE ' + db + ' total', ieee_d[db]['total']))
+
+ print()
+ for db in ieee_d:
+ print('{:<20}: {}'.format('IEEE ' + db + ' skipped', ieee_d[db]['skipped']))
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/make-no-reassembly-profile.py b/tools/make-no-reassembly-profile.py
new file mode 100755
index 0000000..cd68155
--- /dev/null
+++ b/tools/make-no-reassembly-profile.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python3
+#
+# Generate preferences for a "No Reassembly" profile.
+# By Gerald Combs <gerald@wireshark.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+'''Generate preferences for a "No Reassembly" profile.'''
+
+import argparse
+import os.path
+import re
+import subprocess
+import sys
+
+MIN_PLUGINS = 10
+
+def main():
+ parser = argparse.ArgumentParser(description='No reassembly profile generator')
+ parser.add_argument('-p', '--program-path', default=os.path.curdir, help='Path to TShark.')
+ parser.add_argument('-v', '--verbose', action='store_const', const=True, default=False, help='Verbose output.')
+ args = parser.parse_args()
+
+ this_dir = os.path.dirname(__file__)
+ profile_path = os.path.join(this_dir, '..', 'resources', 'share', 'wireshark', 'profiles', 'No Reassembly', 'preferences')
+
+ tshark_path = os.path.join(args.program_path, 'tshark')
+ if not os.path.isfile(tshark_path):
+ print('tshark not found at {}\n'.format(tshark_path))
+ parser.print_usage()
+ sys.exit(1)
+
+ # Make sure plugin prefs are present.
+ cp = subprocess.run([tshark_path, '-G', 'plugins'], stdout=subprocess.PIPE, check=True, encoding='utf-8')
+ plugin_lines = cp.stdout.splitlines()
+ dissector_count = len(tuple(filter(lambda p: re.search('\sdissector\s', p), plugin_lines)))
+ if dissector_count < MIN_PLUGINS:
+ print('Found {} plugins but require {}.'.format(dissector_count, MIN_PLUGINS))
+ sys.exit(1)
+
+ rd_pref_re = re.compile('^#\s*(.*(reassembl|desegment)\S*):\s*TRUE')
+ out_prefs = [
+ '# Generated by ' + os.path.basename(__file__), '',
+ '####### Protocols ########', '',
+ ]
+ cp = subprocess.run([tshark_path, '-G', 'defaultprefs'], stdout=subprocess.PIPE, check=True, encoding='utf-8')
+ pref_lines = cp.stdout.splitlines()
+ for pref_line in pref_lines:
+ m = rd_pref_re.search(pref_line)
+ if m:
+ rd_pref = m.group(1) + ': FALSE'
+ if args.verbose is True:
+ print(rd_pref)
+ out_prefs.append(rd_pref)
+
+ if len(pref_lines) < 5000:
+ print("Too few preference lines.")
+ sys.exit(1)
+
+ if len(out_prefs) < 150:
+ print("Too few changed preferences.")
+ sys.exit(1)
+
+ with open(profile_path, 'w') as profile_f:
+ for pref_line in out_prefs:
+ profile_f.write(pref_line + '\n')
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/make-packet-dcm.py b/tools/make-packet-dcm.py
new file mode 100755
index 0000000..028bde4
--- /dev/null
+++ b/tools/make-packet-dcm.py
@@ -0,0 +1,247 @@
+#!/usr/bin/env python3
+import os.path
+import sys
+import itertools
+import lxml.etree
+
+# This utility scrapes the DICOM standard document in DocBook format, finds the appropriate tables,
+# and extracts the data needed to build the lists of DICOM attributes, UIDs and value representations.
+
+# If the files part05.xml, part06.xml and part07.xml exist in the current directory, use them.
+# Otherwise, download the current release from the current DICOM official sources.
+if os.path.exists("part05.xml"):
+ print("Using local part05 docbook.", file=sys.stderr)
+ part05 = lxml.etree.parse("part05.xml")
+else:
+ print("Downloading part05 docbook...", file=sys.stderr)
+ part05 = lxml.etree.parse("http://dicom.nema.org/medical/dicom/current/source/docbook/part05/part05.xml")
+if os.path.exists("part06.xml"):
+ print("Using local part06 docbook.", file=sys.stderr)
+ part06 = lxml.etree.parse("part06.xml")
+else:
+ print("Downloading part06 docbook...", file=sys.stderr)
+ part06 = lxml.etree.parse("http://dicom.nema.org/medical/dicom/current/source/docbook/part06/part06.xml")
+if os.path.exists("part07.xml"):
+ print("Using local part07 docbook.", file=sys.stderr)
+ part07 = lxml.etree.parse("part07.xml")
+else:
+ print("Downloading part07 docbook...", file=sys.stderr)
+ part07 = lxml.etree.parse("http://dicom.nema.org/medical/dicom/current/source/docbook/part07/part07.xml")
+dbns = {'db':'http://docbook.org/ns/docbook', 'xml':'http://www.w3.org/XML/1998/namespace'}
+
+# When displaying the dissected packets, some attributes are nice to include in the description of their parent.
+include_in_parent = {"Patient Position",
+ "ROI Number",
+ "ROI Name",
+ "Contour Geometric Type",
+ "Observation Number",
+ "ROI Observation Label",
+ "RT ROI Interpreted Type",
+ "Dose Reference Structure Type",
+ "Dose Reference Description",
+ "Dose Reference Type",
+ "Target Prescription Dose",
+ "Tolerance Table Label",
+ "Beam Limiting Device Position Tolerance",
+ "Number of Fractions Planned",
+ "Treatment Machine Name",
+ "RT Beam Limiting Device Type",
+ "Beam Number",
+ "Beam Name",
+ "Beam Type",
+ "Radiation Type",
+ "Wedge Type",
+ "Wedge ID",
+ "Wedge Angle",
+ "Material ID",
+ "Block Tray ID",
+ "Block Name",
+ "Applicator ID",
+ "Applicator Type",
+ "Control Point Index",
+ "Nominal Beam Energy",
+ "Cumulative Meterset Weight",
+ "Patient Setup Number"}
+
+# Data elements are listed in three tables in Part 6:
+# * Table 6-1. Registry of DICOM Data Elements
+# * Table 7-1. Registry of DICOM File Meta Elements
+# * Table 8-1. Registry of DICOM Directory Structuring Elements
+# All three tables are in the same format and can be merged for processing.
+
+# The Command data elements (used only in networking), are listed in two tables in Part 7:
+# * Table E.1-1. Command Fields
+# * Table E.2-1. Retired Command Fields
+# The Retired Command Fields are missing the last column. For processing here,
+# we just add a last column with "RET", and they can be parsed with the same
+# as for the Data elements.
+
+data_element_tables=["table_6-1", "table_7-1", "table_8-1"]
+
+def get_trs(document, table_id):
+ return document.findall(f"//db:table[@xml:id='{table_id}']/db:tbody/db:tr",
+ namespaces=dbns)
+
+data_trs = sum((get_trs(part06, table_id) for table_id in data_element_tables), [])
+cmd_trs = get_trs(part07, "table_E.1-1")
+retired_cmd_trs = get_trs(part07, "table_E.2-1")
+
+def get_texts_in_row(tr):
+ tds = tr.findall("db:td", namespaces=dbns)
+ texts = [" ".join(x.replace('\u200b', '').replace('\u00b5', 'u').strip() for x in td.itertext() if x.strip() != '') for td in tds]
+ return texts
+
+data_rows = [get_texts_in_row(x) for x in data_trs]
+retired_cmd_rows = [get_texts_in_row(x) for x in retired_cmd_trs]
+cmd_rows = ([get_texts_in_row(x) for x in cmd_trs] +
+ [x + ["RET"] for x in retired_cmd_rows])
+
+def parse_tag(tag):
+ # To handle some old cases where "x" is included as part of the tag number
+ tag = tag.replace("x", "0")
+ return f"0x{tag[1:5]}{tag[6:10]}"
+def parse_ret(ret):
+ if ret.startswith("RET"):
+ return -1
+ else:
+ return 0
+def include_in_parent_bit(name):
+ if name in include_in_parent:
+ return -1
+ else:
+ return 0
+def text_for_row(row):
+ return f' {{ {parse_tag(row[0])}, "{row[1]}", "{row[3]}", "{row[4]}", {parse_ret(row[5])}, {include_in_parent_bit(row[1])}}},'
+
+def text_for_rows(rows):
+ return "\n".join(text_for_row(row) for row in rows)
+
+vrs = {i+1: get_texts_in_row(x)[0].split(maxsplit=1) for i,x in enumerate(get_trs(part05, "table_6.2-1"))}
+
+
+# Table A-1. UID Values
+uid_trs = get_trs(part06, "table_A-1")
+uid_rows = [get_texts_in_row(x) for x in uid_trs]
+
+def uid_define_name(uid):
+ if uid[1] == "(Retired)":
+ return f'"{uid[0]}"'
+ uid_type = uid[3]
+ uid_name = uid[1]
+ uid_name = re.sub(":.*", "", uid[1])
+ if uid_name.endswith(uid_type):
+ uid_name = uid_name[:-len(uid_type)].strip()
+ return f"DCM_UID_{definify(uid_type)}_{definify(uid_name)}"
+
+import re
+def definify(s):
+ return re.sub('[^A-Z0-9]+', '_', re.sub(' +', ' ', re.sub('[^-A-Z0-9 ]+', '', s.upper())))
+
+uid_rows = sorted(uid_rows, key=lambda uid_row: [int(i) for i in uid_row[0].split(".")])
+packet_dcm_h = """/* packet-dcm.h
+ * Definitions for DICOM dissection
+ * Copyright 2003, Rich Coe <richcoe2@gmail.com>
+ * Copyright 2008-2018, David Aggeler <david_aggeler@hispeed.ch>
+ *
+ * DICOM communication protocol: https://www.dicomstandard.org/current/
+ *
+ * Generated automatically by """ + os.path.basename(sys.argv[0]) + """ from the following sources:
+ *
+ * """ + part05.find("./db:subtitle", namespaces=dbns).text + """
+ * """ + part06.find("./db:subtitle", namespaces=dbns).text + """
+ * """ + part07.find("./db:subtitle", namespaces=dbns).text + """
+ *
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald@wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef __PACKET_DCM_H__
+#define __PACKET_DCM_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+""" + "\n".join(f"#define DCM_VR_{vr[0]} {i:2d} /* {vr[1]:25s} */" for i,vr in vrs.items()) + """
+
+/* Following must be in the same order as the definitions above */
+static const gchar* dcm_tag_vr_lookup[] = {
+ " ",
+ """ + ",\n ".join(",".join(f'"{x[1][0]}"' for x in j[1]) for j in itertools.groupby(vrs.items(), lambda i: (i[0]-1)//8)) + """
+};
+
+
+/* ---------------------------------------------------------------------
+ * DICOM Tag Definitions
+ *
+ * Some Tags can have different VRs
+ *
+ * Group 1000 is not supported, multiple tags with same description (retired anyhow)
+ * Group 7Fxx is not supported, multiple tags with same description (retired anyhow)
+ *
+ * Tags (0020,3100 to 0020, 31FF) not supported, multiple tags with same description (retired anyhow)
+ *
+ * Repeating groups (50xx & 60xx) are manually added. Declared as 5000 & 6000
+ */
+
+typedef struct dcm_tag {
+ const guint32 tag;
+ const gchar *description;
+ const gchar *vr;
+ const gchar *vm;
+ const gboolean is_retired;
+ const gboolean add_to_summary; /* Add to parent's item description */
+} dcm_tag_t;
+
+static dcm_tag_t dcm_tag_data[] = {
+
+ /* Command Tags */
+""" + text_for_rows(cmd_rows) + """
+
+ /* Data Tags */
+""" + text_for_rows(data_rows) + """
+};
+
+/* ---------------------------------------------------------------------
+ * DICOM UID Definitions
+
+ * Part 6 lists following different UID Types (2006-2008)
+
+ * Application Context Name
+ * Coding Scheme
+ * DICOM UIDs as a Coding Scheme
+ * LDAP OID
+ * Meta SOP Class
+ * SOP Class
+ * Service Class
+ * Transfer Syntax
+ * Well-known Print Queue SOP Instance
+ * Well-known Printer SOP Instance
+ * Well-known SOP Instance
+ * Well-known frame of reference
+ */
+
+typedef struct dcm_uid {
+ const gchar *value;
+ const gchar *name;
+ const gchar *type;
+} dcm_uid_t;
+
+""" + "\n".join(f'#define {uid_define_name(uid)} "{uid[0]}"'
+ for uid in uid_rows if uid[1] != '(Retired)') + """
+
+static dcm_uid_t dcm_uid_data[] = {
+""" + "\n".join(f' {{ {uid_define_name(uid)}, "{uid[1]}", "{uid[3]}"}},'
+ for uid in uid_rows)+ """
+};
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* packet-dcm.h */"""
+
+print(packet_dcm_h)
diff --git a/tools/make-pci-ids.py b/tools/make-pci-ids.py
new file mode 100755
index 0000000..0a77f76
--- /dev/null
+++ b/tools/make-pci-ids.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python3
+#
+# make-pci-ids - Creates a file containing PCI IDs.
+# It use the databases from
+# https://github.com/pciutils/pciids/raw/master/pci.ids
+# to create our file epan/dissectors/pci-ids.c
+#
+# Wireshark - Network traffic analyzer
+#
+# By Caleb Chiu <caleb.chiu@macnica.com>
+# Copyright 2021
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+import string
+import sys
+import urllib.request, urllib.error, urllib.parse
+
+OUTPUT_FILE = "epan/pci-ids.c"
+
+MIN_VENDOR_COUNT = 2250 # 2261 on 2021-11-01
+MIN_DEVICE_COUNT = 33000 # 33724 on 2021-11-01
+
+CODE_PREFIX = """\
+ *
+ * Generated by tools/make-pci-ids.py
+ * By Caleb Chiu <caleb.chiu@macnica.com>
+ * Copyright 2021
+ *
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <config.h>
+
+#include <stddef.h>
+
+#include "pci-ids.h"
+
+typedef struct
+{
+ uint16_t vid;
+ uint16_t did;
+ uint16_t svid;
+ uint16_t ssid;
+ char *name;
+
+} pci_id_t;
+
+typedef struct
+{
+ uint16_t vid;
+ uint16_t count;
+ pci_id_t *ids_ptr;
+
+} pci_vid_index_t;
+
+"""
+
+CODE_POSTFIX = """
+static pci_vid_index_t *get_vid_index(uint16_t vid)
+{
+ uint32_t start_index = 0;
+ uint32_t end_index = 0;
+ uint32_t idx = 0;
+
+ end_index = sizeof(pci_vid_index)/sizeof(pci_vid_index[0]);
+
+ while(start_index != end_index)
+ {
+ if(end_index - start_index == 1)
+ {
+ if(pci_vid_index[start_index].vid == vid)
+ return &pci_vid_index[start_index];
+
+ break;
+ }
+
+ idx = (start_index + end_index)/2;
+
+ if(pci_vid_index[idx].vid < vid)
+ start_index = idx;
+ else
+ if(pci_vid_index[idx].vid > vid)
+ end_index = idx;
+ else
+ return &pci_vid_index[idx];
+
+ }
+
+ return NULL;
+
+}
+
+const char *pci_id_str(uint16_t vid, uint16_t did, uint16_t svid, uint16_t ssid)
+{
+ unsigned int i;
+ static char *not_found = \"Not found\";
+ pci_vid_index_t *index_ptr;
+ pci_id_t *ids_ptr;
+
+ index_ptr = get_vid_index(vid);
+
+ if(index_ptr == NULL)
+ return not_found;
+
+ ids_ptr = index_ptr->ids_ptr;
+ for(i = 0; i < index_ptr->count; ids_ptr++, i++)
+ if(vid == ids_ptr->vid &&
+ did == ids_ptr->did &&
+ svid == ids_ptr->svid &&
+ ssid == ids_ptr->ssid)
+ return ids_ptr->name;
+ return not_found;
+
+}
+"""
+
+
+id_list=[]
+count_list=[]
+
+
+def exit_msg(msg=None, status=1):
+ if msg is not None:
+ sys.stderr.write(msg + '\n')
+ sys.exit(status)
+
+
+def main():
+ req_headers = { 'User-Agent': 'Wireshark make-pci-ids' }
+ req = urllib.request.Request('https://github.com/pciutils/pciids/raw/master/pci.ids', headers=req_headers)
+ response = urllib.request.urlopen(req)
+ lines = response.read().decode('UTF-8', 'replace').splitlines()
+
+ out_lines = '''\
+/* pci-ids.c
+ *
+ * pci-ids.c is based on the pci.ids of The PCI ID Repository at
+ * https://pci-ids.ucw.cz/, fetched indirectly via
+ * https://github.com/pciutils/pciids
+'''
+ vid = -1
+ did = -1
+ svid = -1
+ entries = 0
+ line_num = 0
+
+ for line in lines:
+ line = line.strip('\n')
+ line_num += 1
+
+ if line_num <= 15:
+ line = line.replace('#', ' ', 1)
+ line = line.lstrip()
+ line = line.replace("GNU General Public License", "GPL")
+ if line:
+ line = ' * ' + line
+ else:
+ line = ' *' + line
+ out_lines += line + '\n'
+ if line_num == 15:
+ out_lines += CODE_PREFIX
+
+ line = line.replace("\\","\\\\")
+ line = line.replace("\"","\\\"")
+ line = line.replace("?","?-")
+ tabs = len(line) - len(line.lstrip('\t'))
+ if tabs == 0:
+ #print line
+ words = line.split(" ", 1)
+ if len(words) < 2:
+ continue
+ if len(words[0]) != 4:
+ continue
+ if all(c in string.hexdigits for c in words[0]):
+ hex_int = int(words[0], 16)
+ if vid != -1:
+ out_lines += "}; /* pci_vid_%04X[] */\n\n" % (vid)
+ count_list.append(entries)
+ vid = hex_int
+ entries = 1
+ did = -1
+ svid = -1
+ ssid = -1
+ out_lines += "pci_id_t pci_vid_%04X[] = {\n" % (vid)
+ out_lines += "{0x%04X, 0xFFFF, 0xFFFF, 0xFFFF, \"%s(0x%04X)\"},\n" % (vid, words[1].strip(), vid)
+ id_list.append(vid)
+ continue
+
+ if tabs == 1:
+ line = line.strip('\t')
+ words = line.split(" ", 1)
+ if len(words) < 2:
+ continue
+ if len(words[0]) != 4:
+ continue
+ if all(c in string.hexdigits for c in words[0]):
+ hex_int = int(words[0], 16)
+ did = hex_int
+ svid = -1
+ ssid = -1
+ out_lines += "{0x%04X, 0x%04X, 0xFFFF, 0xFFFF, \"%s(0x%04X)\"},\n" % (vid, did, words[1].strip(), did)
+ entries += 1
+ continue
+
+ if tabs == 2:
+ line = line.strip('\t')
+ words = line.split(" ", 2)
+ if len(words[0]) != 4:
+ continue
+ if all(c in string.hexdigits for c in words[0]):
+ hex_int = int(words[0], 16)
+ svid = hex_int
+
+ if all(c in string.hexdigits for c in words[1]):
+ hex_int = int(words[1], 16)
+ ssid = hex_int
+
+ out_lines += "{0x%04X, 0x%04X, 0x%04X, 0x%04X, \"%s(0x%04X-0x%04X)\"},\n" % (vid, did, svid, ssid, words[2].strip(), svid, ssid)
+ entries += 1
+ svid = -1
+ ssid = -1
+ continue
+
+ out_lines += "}; /* pci_vid_%04X[] */\n" % (vid)
+ count_list.append(entries)
+
+ out_lines += "\npci_vid_index_t pci_vid_index[] = {\n"
+
+ vendor_count = len(id_list)
+ device_count = 0
+ for i in range(vendor_count):
+ out_lines += "{0x%04X, %d, pci_vid_%04X },\n" % (id_list[i], count_list[i], id_list[i])
+ device_count += count_list[i]
+
+ out_lines += "}; /* We have %d VIDs */\n" % (vendor_count)
+
+ out_lines += CODE_POSTFIX
+
+ if vendor_count < MIN_VENDOR_COUNT:
+ exit_msg(f'Too view vendors. Wanted {MIN_VENDOR_COUNT}, got {vendor_count}.')
+
+ if device_count < MIN_DEVICE_COUNT:
+ exit_msg(f'Too view devices. Wanted {MIN_DEVICE_COUNT}, got {device_count}.')
+
+ with open(OUTPUT_FILE, "w", encoding="utf-8") as pci_ids_f:
+ pci_ids_f.write(out_lines)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/make-plugin-reg.py b/tools/make-plugin-reg.py
new file mode 100755
index 0000000..2b9bc34
--- /dev/null
+++ b/tools/make-plugin-reg.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python3
+#
+# Looks for registration routines in the plugins
+# and assembles C code to call all the routines.
+# A new "plugin.c" file will be written in the current directory.
+#
+
+import os
+import sys
+import re
+
+#
+# The first argument is the directory in which the source files live.
+#
+srcdir = sys.argv[1]
+#
+# The second argument is either "plugin", "plugin_wtap", "plugin_codec",
+# or "plugin_tap".
+#
+registertype = sys.argv[2]
+#
+# All subsequent arguments are the files to scan.
+#
+files = sys.argv[3:]
+
+final_filename = "plugin.c"
+preamble = """\
+/*
+ * Do not modify this file. Changes will be overwritten.
+ *
+ * Generated automatically from %s.
+ */
+""" % (os.path.basename(sys.argv[0]))
+
+# Create the proper list of filenames
+filenames = []
+for file in files:
+ if os.path.isfile(file):
+ filenames.append(file)
+ else:
+ filenames.append(os.path.join(srcdir, file))
+
+if len(filenames) < 1:
+ print("No files found")
+ sys.exit(1)
+
+
+# Look through all files, applying the regex to each line.
+# If the pattern matches, save the "symbol" section to the
+# appropriate set.
+regs = {
+ 'proto_reg': set(),
+ 'handoff_reg': set(),
+ 'wtap_register': set(),
+ 'codec_register': set(),
+ 'register_tap_listener': set(),
+ }
+
+# For those that don't know Python, r"" indicates a raw string,
+# devoid of Python escapes.
+proto_regex = r"\bproto_register_(?P<symbol>[\w]+)\s*\(\s*void\s*\)\s*{"
+
+handoff_regex = r"\bproto_reg_handoff_(?P<symbol>[\w]+)\s*\(\s*void\s*\)\s*{"
+
+wtap_reg_regex = r"\bwtap_register_(?P<symbol>[\w]+)\s*\(\s*void\s*\)\s*{"
+
+codec_reg_regex = r"\bcodec_register_(?P<symbol>[\w]+)\s*\(\s*void\s*\)\s*{"
+
+tap_reg_regex = r"\bregister_tap_listener_(?P<symbol>[\w]+)\s*\(\s*void\s*\)\s*{"
+
+# This table drives the pattern-matching and symbol-harvesting
+patterns = [
+ ( 'proto_reg', re.compile(proto_regex, re.MULTILINE | re.ASCII) ),
+ ( 'handoff_reg', re.compile(handoff_regex, re.MULTILINE | re.ASCII) ),
+ ( 'wtap_register', re.compile(wtap_reg_regex, re.MULTILINE | re.ASCII) ),
+ ( 'codec_register', re.compile(codec_reg_regex, re.MULTILINE | re.ASCII) ),
+ ( 'register_tap_listener', re.compile(tap_reg_regex, re.MULTILINE | re.ASCII) ),
+ ]
+
+# Grep
+for filename in filenames:
+ file = open(filename)
+ # Read the whole file into memory
+ contents = file.read()
+ for action in patterns:
+ regex = action[1]
+ for match in regex.finditer(contents):
+ symbol = match.group("symbol")
+ sym_type = action[0]
+ regs[sym_type].add(symbol)
+ # We're done with the file contents
+ del contents
+ file.close()
+
+# Make sure we actually processed something
+if (len(regs['proto_reg']) < 1 and len(regs['wtap_register']) < 1 and len(regs['codec_register']) < 1 and len(regs['register_tap_listener']) < 1):
+ print("No plugin registrations found")
+ sys.exit(1)
+
+# Convert the sets into sorted lists to make the output pretty
+regs['proto_reg'] = sorted(regs['proto_reg'])
+regs['handoff_reg'] = sorted(regs['handoff_reg'])
+regs['wtap_register'] = sorted(regs['wtap_register'])
+regs['codec_register'] = sorted(regs['codec_register'])
+regs['register_tap_listener'] = sorted(regs['register_tap_listener'])
+
+reg_code = ""
+
+reg_code += preamble
+
+reg_code += """
+#include "config.h"
+
+#include <gmodule.h>
+
+/* plugins are DLLs on Windows */
+#define WS_BUILD_DLL
+#include "ws_symbol_export.h"
+
+"""
+
+if registertype == "plugin":
+ reg_code += "#include \"epan/proto.h\"\n\n"
+if registertype == "plugin_wtap":
+ reg_code += "#include \"wiretap/wtap.h\"\n\n"
+if registertype == "plugin_codec":
+ reg_code += "#include \"wsutil/codecs.h\"\n\n"
+if registertype == "plugin_tap":
+ reg_code += "#include \"epan/tap.h\"\n\n"
+
+for symbol in regs['proto_reg']:
+ reg_code += "void proto_register_%s(void);\n" % (symbol)
+for symbol in regs['handoff_reg']:
+ reg_code += "void proto_reg_handoff_%s(void);\n" % (symbol)
+for symbol in regs['wtap_register']:
+ reg_code += "void wtap_register_%s(void);\n" % (symbol)
+for symbol in regs['codec_register']:
+ reg_code += "void codec_register_%s(void);\n" % (symbol)
+for symbol in regs['register_tap_listener']:
+ reg_code += "void register_tap_listener_%s(void);\n" % (symbol)
+
+reg_code += """
+WS_DLL_PUBLIC_DEF const gchar plugin_version[] = PLUGIN_VERSION;
+WS_DLL_PUBLIC_DEF const int plugin_want_major = VERSION_MAJOR;
+WS_DLL_PUBLIC_DEF const int plugin_want_minor = VERSION_MINOR;
+
+WS_DLL_PUBLIC void plugin_register(void);
+
+void plugin_register(void)
+{
+"""
+
+if registertype == "plugin":
+ for symbol in regs['proto_reg']:
+ reg_code +=" static proto_plugin plug_%s;\n\n" % (symbol)
+ reg_code +=" plug_%s.register_protoinfo = proto_register_%s;\n" % (symbol, symbol)
+ if symbol in regs['handoff_reg']:
+ reg_code +=" plug_%s.register_handoff = proto_reg_handoff_%s;\n" % (symbol, symbol)
+ else:
+ reg_code +=" plug_%s.register_handoff = NULL;\n" % (symbol)
+ reg_code += " proto_register_plugin(&plug_%s);\n" % (symbol)
+if registertype == "plugin_wtap":
+ for symbol in regs['wtap_register']:
+ reg_code += " static wtap_plugin plug_%s;\n\n" % (symbol)
+ reg_code += " plug_%s.register_wtap_module = wtap_register_%s;\n" % (symbol, symbol)
+ reg_code += " wtap_register_plugin(&plug_%s);\n" % (symbol)
+if registertype == "plugin_codec":
+ for symbol in regs['codec_register']:
+ reg_code += " static codecs_plugin plug_%s;\n\n" % (symbol)
+ reg_code += " plug_%s.register_codec_module = codec_register_%s;\n" % (symbol, symbol)
+ reg_code += " codecs_register_plugin(&plug_%s);\n" % (symbol)
+if registertype == "plugin_tap":
+ for symbol in regs['register_tap_listener']:
+ reg_code += " static tap_plugin plug_%s;\n\n" % (symbol)
+ reg_code += " plug_%s.register_tap_listener = register_tap_listener_%s;\n" % (symbol, symbol)
+ reg_code += " tap_register_plugin(&plug_%s);\n" % (symbol)
+
+reg_code += "}\n"
+
+try:
+ fh = open(final_filename, 'w')
+ fh.write(reg_code)
+ fh.close()
+except OSError:
+ sys.exit('Unable to write ' + final_filename + '.\n')
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 4
+# indent-tabs-mode: nil
+# End:
+#
+# vi: set shiftwidth=4 expandtab:
+# :indentSize=4:noTabs=true:
+#
diff --git a/tools/make-regs.py b/tools/make-regs.py
new file mode 100755
index 0000000..376b3c6
--- /dev/null
+++ b/tools/make-regs.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python3
+#
+# Looks for registration routines in the source files
+# and assembles C code to call all the routines.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+import sys
+import re
+
+preamble = """\
+/*
+ * Do not modify this file. Changes will be overwritten.
+ *
+ * Generated automatically using \"make-regs.py\".
+ */
+
+"""
+
+def gen_prototypes(funcs):
+ output = ""
+ for f in funcs:
+ output += "void {}(void);\n".format(f)
+ return output
+
+def gen_array(funcs, name):
+ output = "{}[] = {{\n".format(name)
+ for f in funcs:
+ output += " {{ \"{0}\", {0} }},\n".format(f)
+ output += " { NULL, NULL }\n};\n"
+ return output
+
+def scan_files(infiles, regs):
+ for path in infiles:
+ with open(path, 'r', encoding='utf8') as f:
+ source = f.read()
+ for array, regex in regs:
+ matches = re.findall(regex, source)
+ array.extend(matches)
+
+def make_dissectors(outfile, infiles):
+ protos = []
+ protos_regex = r"void\s+(proto_register_[\w]+)\s*\(\s*void\s*\)\s*{"
+ handoffs = []
+ handoffs_regex = r"void\s+(proto_reg_handoff_[\w]+)\s*\(\s*void\s*\)\s*{"
+
+ scan_files(infiles, [(protos, protos_regex), (handoffs, handoffs_regex)])
+
+ if len(protos) < 1:
+ sys.exit("No protocol registrations found.")
+
+ protos.sort()
+ handoffs.sort()
+
+ output = preamble
+ output += """\
+#include "dissectors.h"
+
+const unsigned long dissector_reg_proto_count = {0};
+const unsigned long dissector_reg_handoff_count = {1};
+
+""".format(len(protos), len(handoffs))
+
+ output += gen_prototypes(protos)
+ output += "\n"
+ output += gen_array(protos, "dissector_reg_t dissector_reg_proto")
+ output += "\n"
+ output += gen_prototypes(handoffs)
+ output += "\n"
+ output += gen_array(handoffs, "dissector_reg_t dissector_reg_handoff")
+
+ with open(outfile, "w") as f:
+ f.write(output)
+
+ print("Found {0} registrations and {1} handoffs.".format(len(protos), len(handoffs)))
+
+def make_wtap_modules(outfile, infiles):
+ wtap_modules = []
+ wtap_modules_regex = r"void\s+(register_[\w]+)\s*\(\s*void\s*\)\s*{"
+
+ scan_files(infiles, [(wtap_modules, wtap_modules_regex)])
+
+ if len(wtap_modules) < 1:
+ sys.exit("No wiretap registrations found.")
+
+ wtap_modules.sort()
+
+ output = preamble
+ output += """\
+#include "wtap_modules.h"
+
+const unsigned wtap_module_count = {0};
+
+""".format(len(wtap_modules))
+
+ output += gen_prototypes(wtap_modules)
+ output += "\n"
+ output += gen_array(wtap_modules, "wtap_module_reg_t wtap_module_reg")
+
+ with open(outfile, "w") as f:
+ f.write(output)
+
+ print("Found {0} registrations.".format(len(wtap_modules)))
+
+def make_taps(outfile, infiles):
+ taps = []
+ taps_regex = r"void\s+(register_tap_listener_[\w]+)\s*\(\s*void\s*\)\s*{"
+
+ scan_files(infiles, [(taps, taps_regex)])
+
+ if len(taps) < 1:
+ sys.exit("No tap registrations found.")
+
+ taps.sort()
+
+ output = preamble
+ output += """\
+#include "ui/taps.h"
+
+const unsigned long tap_reg_listener_count = {0};
+
+""".format(len(taps))
+
+ output += gen_prototypes(taps)
+ output += "\n"
+ output += gen_array(taps, "tap_reg_t tap_reg_listener")
+
+ with open(outfile, "w") as f:
+ f.write(output)
+
+ print("Found {0} registrations.".format(len(taps)))
+
+
+def print_usage():
+ sys.exit("Usage: {0} <dissectors|taps> <outfile> <infiles...|@filelist>\n".format(sys.argv[0]))
+
+if __name__ == "__main__":
+ if len(sys.argv) < 4:
+ print_usage()
+
+ mode = sys.argv[1]
+ outfile = sys.argv[2]
+ if sys.argv[3].startswith("@"):
+ with open(sys.argv[3][1:]) as f:
+ infiles = [l.strip() for l in f.readlines()]
+ else:
+ infiles = sys.argv[3:]
+
+ if mode == "dissectors":
+ make_dissectors(outfile, infiles)
+ elif mode == "wtap_modules":
+ make_wtap_modules(outfile, infiles)
+ elif mode == "taps":
+ make_taps(outfile, infiles)
+ else:
+ print_usage()
diff --git a/tools/make-services.py b/tools/make-services.py
new file mode 100755
index 0000000..e608af7
--- /dev/null
+++ b/tools/make-services.py
@@ -0,0 +1,292 @@
+#!/usr/bin/env python3
+#
+# Parses the CSV version of the IANA Service Name and Transport Protocol Port Number Registry
+# and generates a services(5) file.
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 2013 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+iana_svc_url = 'https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv'
+
+__doc__ = '''\
+Usage: make-services.py [url]
+
+url defaults to
+ %s
+''' % (iana_svc_url)
+
+import sys
+import getopt
+import csv
+import re
+import collections
+import urllib.request, urllib.error, urllib.parse
+import codecs
+
+services_file = 'epan/services-data.c'
+
+exclude_services = [
+ '^spr-itunes',
+ '^spl-itunes',
+ '^shilp',
+ ]
+
+min_source_lines = 14000 # Size was ~ 14800 on 2017-07-20
+
+def parse_port(port_str):
+
+ p = port_str.split('-')
+ try:
+ if len(p) == 1:
+ return tuple([int(p[0])])
+ if len(p) == 2:
+ return tuple([int(p[0]), int(p[1])])
+ except ValueError:
+ pass
+ return ()
+
+def port_to_str(port):
+ if len(port) == 2:
+ return str(port[0]) + '-' + str(port[1])
+ return str(port[0])
+
+def parse_rows(svc_fd):
+ port_reader = csv.reader(svc_fd)
+ count = 0
+
+ # Header positions as of 2013-08-06
+ headers = next(port_reader)
+
+ try:
+ sn_pos = headers.index('Service Name')
+ except Exception:
+ sn_pos = 0
+ try:
+ pn_pos = headers.index('Port Number')
+ except Exception:
+ pn_pos = 1
+ try:
+ tp_pos = headers.index('Transport Protocol')
+ except Exception:
+ tp_pos = 2
+ try:
+ desc_pos = headers.index('Description')
+ except Exception:
+ desc_pos = 3
+
+ services_map = {}
+
+ for row in port_reader:
+ service = row[sn_pos]
+ port = parse_port(row[pn_pos])
+ proto = row[tp_pos]
+ description = row[desc_pos]
+ count += 1
+
+ if len(service) < 1 or not port or len(proto) < 1:
+ continue
+
+ if re.search('|'.join(exclude_services), service):
+ continue
+
+ # max 15 chars
+ service = service[:15].rstrip()
+
+ # replace blanks (for some non-standard long names)
+ service = service.replace(" ", "-")
+
+ description = description.replace("\n", "")
+ description = re.sub("IANA assigned this well-formed service .+$", "", description)
+ description = re.sub(" +", " ", description)
+ description = description.strip()
+ if description == service or description == service.replace("-", " "):
+ description = None
+
+ if not port in services_map:
+ services_map[port] = collections.OrderedDict()
+
+ # Remove some duplicates (first entry wins)
+ proto_exists = False
+ for k in services_map[port].keys():
+ if proto in services_map[port][k]:
+ proto_exists = True
+ break
+ if proto_exists:
+ continue
+
+ if not service in services_map[port]:
+ services_map[port][service] = [description]
+ services_map[port][service].append(proto)
+
+ if count < min_source_lines:
+ exit_msg('Not enough parsed data')
+
+ return services_map
+
+def compile_body(d):
+ keys = list(d.keys())
+ keys.sort()
+ body = []
+
+ for port in keys:
+ for serv in d[port].keys():
+ line = [port, d[port][serv][1:], serv]
+ description = d[port][serv][0]
+ if description:
+ line.append(description)
+ body.append(line)
+
+ return body
+
+def add_entry(table, port, service_name, description):
+ table.append([int(port), service_name, description])
+
+
+ # body = [(port-range,), [proto-list], service-name, optional-description]
+ # table = [port-number, service-name, optional-description]
+def compile_tables(body):
+
+ body.sort()
+ tcp_udp_table = []
+ tcp_table = []
+ udp_table = []
+ sctp_table = []
+ dccp_table = []
+
+ for entry in body:
+ if len(entry) == 4:
+ port_range, proto_list, service_name, description = entry
+ else:
+ port_range, proto_list, service_name = entry
+ description = None
+
+ for port in port_range:
+ if 'tcp' in proto_list and 'udp' in proto_list:
+ add_entry(tcp_udp_table, port, service_name, description)
+ else:
+ if 'tcp' in proto_list:
+ add_entry(tcp_table, port, service_name, description)
+ if 'udp' in proto_list:
+ add_entry(udp_table, port, service_name, description)
+ if 'sctp' in proto_list:
+ add_entry(sctp_table, port, service_name, description)
+ if 'dccp' in proto_list:
+ add_entry(dccp_table, port, service_name, description)
+
+ return tcp_udp_table, tcp_table, udp_table, sctp_table, dccp_table
+
+
+def exit_msg(msg=None, status=1):
+ if msg is not None:
+ sys.stderr.write(msg + '\n\n')
+ sys.stderr.write(__doc__ + '\n')
+ sys.exit(status)
+
+def main(argv):
+ if sys.version_info[0] < 3:
+ print("This requires Python 3")
+ sys.exit(2)
+
+ try:
+ opts, _ = getopt.getopt(argv, "h", ["help"])
+ except getopt.GetoptError:
+ exit_msg()
+ for opt, _ in opts:
+ if opt in ("-h", "--help"):
+ exit_msg(None, 0)
+
+ if (len(argv) > 0):
+ svc_url = argv[0]
+ else:
+ svc_url = iana_svc_url
+
+ try:
+ if not svc_url.startswith('http'):
+ svc_fd = open(svc_url)
+ else:
+ req = urllib.request.urlopen(svc_url)
+ svc_fd = codecs.getreader('utf8')(req)
+ except Exception:
+ exit_msg('Error opening ' + svc_url)
+
+ body = parse_rows(svc_fd)
+
+ out = open(services_file, 'w')
+ out.write('''\
+/*
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald@wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This is a local copy of the IANA port-numbers file.
+ *
+ * Wireshark uses it to resolve port numbers into human readable
+ * service names, e.g. TCP port 80 -> http.
+ *
+ * It is subject to copyright and being used with IANA's permission:
+ * https://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html
+ *
+ * The original file can be found at:
+ * %s
+ */
+
+''' % (iana_svc_url))
+
+ body = compile_body(body)
+ # body = [(port-range,), [proto-list], service-name, optional-description]
+
+ max_port = 0
+
+ tcp_udp, tcp, udp, sctp, dccp = compile_tables(body)
+
+ def write_entry(f, e, max_port):
+ line = " {{ {}, \"{}\", ".format(*e)
+ sep_len = 32 - len(line)
+ if sep_len <= 0:
+ sep_len = 1
+ line += ' ' * sep_len
+ if len(e) == 3 and e[2]:
+ line += "\"{}\" }},\n".format(e[2].replace('"', '\\"'))
+ else:
+ line += "\"\" },\n"
+ f.write(line)
+ if int(e[0]) > int(max_port):
+ return e[0]
+ return max_port
+
+ out.write("static ws_services_entry_t global_tcp_udp_services_table[] = {\n")
+ for e in tcp_udp:
+ max_port = write_entry(out, e, max_port)
+ out.write("};\n\n")
+
+ out.write("static ws_services_entry_t global_tcp_services_table[] = {\n")
+ for e in tcp:
+ max_port = write_entry(out, e, max_port)
+ out.write("};\n\n")
+
+ out.write("static ws_services_entry_t global_udp_services_table[] = {\n")
+ for e in udp:
+ max_port = write_entry(out, e, max_port)
+ out.write("};\n\n")
+
+ out.write("static ws_services_entry_t global_sctp_services_table[] = {\n")
+ for e in sctp:
+ max_port = write_entry(out, e, max_port)
+ out.write("};\n\n")
+
+ out.write("static ws_services_entry_t global_dccp_services_table[] = {\n")
+ for e in dccp:
+ max_port = write_entry(out, e, max_port)
+ out.write("};\n\n")
+
+ out.write("static const uint16_t _services_max_port = {};\n".format(max_port))
+
+ out.close()
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/tools/make-tls-ct-logids.py b/tools/make-tls-ct-logids.py
new file mode 100755
index 0000000..0b74c51
--- /dev/null
+++ b/tools/make-tls-ct-logids.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+# Generate the array of Certificate Transparency Log ID to description mappings
+# for the TLS dissector.
+#
+# To update the TLS dissector source file, run this from the source directory:
+#
+# python3 tools/make-tls-ct-logids.py --update
+#
+
+import argparse
+from base64 import b64decode, b64encode
+from enum import Enum
+import itertools
+import os
+import requests
+from hashlib import sha256
+
+
+# Begin of comment, followed by the actual array definition
+HEADER = "/* Generated by tools/make-tls-ct-logids.py\n"
+# See also https://www.certificate-transparency.org/known-logs
+CT_JSON_URL = 'https://www.gstatic.com/ct/log_list/v3/all_logs_list.json'
+# File to be patched
+SOURCE_FILE = os.path.join('epan', 'dissectors', 'packet-tls-utils.c')
+
+# Maximum elements per line in the value array. 11 is chosen because it results
+# in output consistent with clang-format.
+BYTES_PER_LINE = 11
+
+class SourceStage(Enum):
+ BEGIN = 1
+ IN_METAINFO = 2
+ IN_BLOCK = 3
+ END = 4
+
+
+def escape_c(s):
+ return s.replace('\\', '\\\\').replace('"', '\\"')
+
+
+def byteshex(b):
+ return " ".join("0x%02x," % b for b in bytearray(b))
+
+
+def process_json(obj, lastmod):
+ logs = list(itertools.chain(*[op['logs'] for op in obj['operators']]))
+ metainfo, block = HEADER, ''
+ metainfo += " * Last-Modified %s, %s entries. */\n" % (lastmod, len(logs))
+ block += "static const bytes_string ct_logids[] = {\n"
+ for entry in logs:
+ desc = entry["description"]
+ pubkey_der = b64decode(entry["key"])
+ key_id = sha256(pubkey_der).digest()
+ block += ' { (const uint8_t[]){\n'
+ for offset in range(0, len(key_id), BYTES_PER_LINE):
+ block += ' %s\n' % \
+ byteshex(key_id[offset:offset+BYTES_PER_LINE])
+ block += ' },\n'
+ block += ' %d, "%s" },\n' % (len(key_id), escape_c(desc))
+ block += " { NULL, 0, NULL }\n"
+ block += "};\n"
+ return metainfo, block
+
+
+def parse_source(source_path):
+ """
+ Reads the source file and tries to split it in the parts before, inside and
+ after the block.
+ """
+ begin, metainfo, block, end = '', '', '', ''
+ # Stages: BEGIN (before block), IN_METAINFO, IN_BLOCK (skip), END
+ stage = SourceStage.BEGIN
+ with open(source_path) as f:
+ for line in f:
+ if line.startswith('/* Generated by '):
+ stage = SourceStage.IN_METAINFO
+
+
+ if stage == SourceStage.BEGIN:
+ begin += line
+ elif stage == SourceStage.IN_METAINFO:
+ metainfo += line
+ elif stage == SourceStage.IN_BLOCK:
+ block += line
+ if line.startswith('}'):
+ stage = SourceStage.END
+ elif stage == SourceStage.END:
+ end += line
+
+ if line.startswith(' * Last-Modified '):
+ stage = SourceStage.IN_BLOCK
+
+ if stage != SourceStage.END:
+ raise RuntimeError("Could not parse file (in stage %s)" % stage.name)
+ return begin, metainfo, block, end
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--update", action="store_true",
+ help="Update %s as needed instead of writing to stdout" % SOURCE_FILE)
+
+
+def main():
+ args = parser.parse_args()
+ this_dir = os.path.dirname(__file__)
+ r = requests.get(CT_JSON_URL)
+ j_metainfo, j_block = process_json(r.json(), lastmod=r.headers['Last-Modified'])
+ source_path = os.path.join(this_dir, '..', SOURCE_FILE)
+
+ if args.update:
+ s_begin, _, s_block, s_end = parse_source(source_path)
+ if s_block == j_block:
+ print("File is up-to-date")
+ else:
+ with open(source_path, "w") as f:
+ f.write(s_begin)
+ f.write(j_metainfo)
+ f.write(j_block)
+ f.write(s_end)
+ print("Updated %s" % source_path)
+ else:
+ print(j_metainfo, j_block)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/make-usb.py b/tools/make-usb.py
new file mode 100755
index 0000000..6540803
--- /dev/null
+++ b/tools/make-usb.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python3
+#
+# make-usb - Creates a file containing vendor and product ids.
+# It use the databases from
+# - The USB ID Repository: https://usb-ids.gowdy.us (http://www.linux-usb.org), mirrored at Sourceforge
+# - libgphoto2 from gPhoto: https://github.com/gphoto/libgphoto2 (http://gphoto.org), available at GitHub
+# to create our file epan/dissectors/usb.c
+
+import re
+import sys
+import urllib.request, urllib.error, urllib.parse
+
+MODE_IDLE = 0
+MODE_VENDOR_PRODUCT = 1
+MIN_VENDORS = 3400 # 3409 as of 2020-11-15
+MIN_PRODUCTS = 20000 # 20361 as of 2020-11-15
+
+mode = MODE_IDLE
+
+req_headers = { 'User-Agent': 'Wireshark make-usb' }
+req = urllib.request.Request('https://sourceforge.net/p/linux-usb/repo/HEAD/tree/trunk/htdocs/usb.ids?format=raw', headers=req_headers)
+response = urllib.request.urlopen(req)
+lines = response.read().decode('UTF-8', 'replace').splitlines()
+
+vendors = dict()
+products = dict()
+vendors_str="static const value_string usb_vendors_vals[] = {\n"
+products_str="static const value_string usb_products_vals[] = {\n"
+
+# Escape backslashes, quotes, control characters and non-ASCII characters.
+escapes = {}
+for i in range(256):
+ if i in b'\\"':
+ escapes[i] = '\\%c' % i
+ elif i in range(0x20, 0x80) or i in b'\t':
+ escapes[i] = chr(i)
+ else:
+ escapes[i] = '\\%03o' % i
+
+for utf8line in lines:
+ # Convert single backslashes to double (escaped) backslashes, escape quotes, etc.
+ utf8line = utf8line.rstrip()
+ utf8line = re.sub("\?+", "?", utf8line)
+ line = ''.join(escapes[byte] for byte in utf8line.encode('utf8'))
+
+ if line == "# Vendors, devices and interfaces. Please keep sorted.":
+ mode = MODE_VENDOR_PRODUCT
+ continue
+ elif line == "# List of known device classes, subclasses and protocols":
+ mode = MODE_IDLE
+ continue
+
+ if mode == MODE_VENDOR_PRODUCT:
+ if re.match("^[0-9a-f]{4}", line):
+ last_vendor=line[:4]
+ vendors[last_vendor] = line[4:].strip()
+ elif re.match("^\t[0-9a-f]{4}", line):
+ line = line.strip()
+ product = "%s%s"%(last_vendor, line[:4])
+ products[product] = line[4:].strip()
+
+req = urllib.request.Request('https://raw.githubusercontent.com/gphoto/libgphoto2/master/camlibs/ptp2/library.c', headers=req_headers)
+response = urllib.request.urlopen(req)
+lines = response.read().decode('UTF-8', 'replace').splitlines()
+
+mode = MODE_IDLE
+
+for line in lines:
+ if mode == MODE_IDLE and re.match(r".*\bmodels\[\]", line):
+ mode = MODE_VENDOR_PRODUCT
+ continue
+
+ if mode == MODE_VENDOR_PRODUCT and re.match(r"};", line):
+ mode = MODE_IDLE
+
+ if mode == MODE_IDLE:
+ continue
+
+ m = re.match(r"\s*{\"(.*):(.*)\",\s*0x([0-9a-fA-F]{4}),\s*0x([0-9a-fA-F]{4}),.*},", line)
+ if m is not None:
+ manuf = m.group(1).strip()
+ model = re.sub(r"\(.*\)", "", m.group(2)).strip()
+ product = m.group(3) + m.group(4)
+ products[product] = ' '.join((manuf, model))
+
+req = urllib.request.Request('https://raw.githubusercontent.com/gphoto/libgphoto2/master/camlibs/ptp2/music-players.h', headers=req_headers)
+response = urllib.request.urlopen(req)
+lines = response.read().decode('UTF-8', 'replace').splitlines()
+
+for line in lines:
+ m = re.match(r"\s*{\s*\"(.*)\",\s*0x([0-9a-fA-F]{4}),\s*\"(.*)\",\s*0x([0-9a-fA-F]{4}),", line)
+ if m is not None:
+ manuf = m.group(1).strip()
+ model = m.group(3).strip()
+ product = m.group(2) + m.group(4)
+ products[product] = ' '.join((manuf, model))
+
+
+if (len(vendors) < MIN_VENDORS):
+ sys.stderr.write("Not enough vendors: %d\n" % len(vendors))
+ sys.exit(1)
+
+if (len(products) < MIN_PRODUCTS):
+ sys.stderr.write("Not enough products: %d\n" % len(products))
+ sys.exit(1)
+
+for v in sorted(vendors):
+ vendors_str += " { 0x%s, \"%s\" },\n"%(v,vendors[v])
+
+vendors_str += """ { 0, NULL }\n};
+value_string_ext ext_usb_vendors_vals = VALUE_STRING_EXT_INIT(usb_vendors_vals);
+"""
+
+for p in sorted(products):
+ products_str += " { 0x%s, \"%s\" },\n"%(p,products[p])
+
+products_str += """ { 0, NULL }\n};
+value_string_ext ext_usb_products_vals = VALUE_STRING_EXT_INIT(usb_products_vals);
+"""
+
+header="""/* usb.c
+ * USB vendor id and product ids
+ * This file was generated by running python ./tools/make-usb.py
+ * Don't change it directly.
+ *
+ * Copyright 2012, Michal Labedzki for Tieto Corporation
+ *
+ * Other values imported from libghoto2/camlibs/ptp2/library.c, music-players.h
+ *
+ * Copyright (C) 2001-2005 Mariusz Woloszyn <emsi@ipartners.pl>
+ * Copyright (C) 2003-2013 Marcus Meissner <marcus@jet.franken.de>
+ * Copyright (C) 2005 Hubert Figuiere <hfiguiere@teaser.fr>
+ * Copyright (C) 2009 Axel Waggershauser <awagger@web.de>
+ * Copyright (C) 2005-2007 Richard A. Low <richard@wentnet.com>
+ * Copyright (C) 2005-2012 Linus Walleij <triad@df.lth.se>
+ * Copyright (C) 2007 Ted Bullock
+ * Copyright (C) 2012 Sony Mobile Communications AB
+ *
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald@wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+/*
+ * XXX We should probably parse a USB ID file at program start instead
+ * of generating this file.
+ */
+
+#include "config.h"
+#include <epan/packet.h>
+"""
+
+f = open('epan/dissectors/usb.c', 'w')
+f.write(header)
+f.write("\n")
+f.write(vendors_str)
+f.write("\n\n")
+f.write(products_str)
+f.write("\n")
+f.close()
+
+print("Success!")
diff --git a/tools/make-version.py b/tools/make-version.py
new file mode 100755
index 0000000..4adc7b2
--- /dev/null
+++ b/tools/make-version.py
@@ -0,0 +1,459 @@
+#!/usr/bin/env python3
+#
+# Copyright 2022 by Moshe Kaplan
+# Based on make-version.pl by Jörg Mayer
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# See below for usage.
+#
+# If run with the "-r" or "--set-release" argument the VERSION macro in
+# CMakeLists.txt will have the version_extra template appended to the
+# version number. vcs_version.h will _not_ be generated if either argument is
+# present.
+#
+# make-version.py is called during the build to update vcs_version.h in the build
+# directory. To set a fixed version, use something like:
+#
+# cmake -DVCSVERSION_OVERRIDE="Git v3.1.0 packaged as 3.1.0-1"
+#
+
+# XXX - We're pretty dumb about the "{vcsinfo}" substitution, and about having
+# spaces in the package format.
+
+import argparse
+import os
+import os.path
+import re
+import shlex
+import shutil
+import sys
+import subprocess
+
+GIT_ABBREV_LENGTH = 12
+
+# `git archive` will use an 'export-subst' entry in .gitattributes to replace
+# the $Format strings with `git log --pretty=format:` placeholders.
+# The output will look something like the following:
+# GIT_EXPORT_SUBST_H = '51315cf37cdf6c0add1b1c99cb7941aac4489a6f'
+# GIT_EXPORT_SUBST_D = 'HEAD -> master, upstream/master, upstream/HEAD'
+# If the text "$Format" is still present, it means that
+# git archive did not replace the $Format string, which
+# means that this not a git archive.
+GIT_EXPORT_SUBST_H = '40459284278611128aac5cef35a563218933f8da'
+GIT_EXPORT_SUBST_D = 'tag: wireshark-4.2.2, tag: v4.2.2, refs/merge-requests/13920/head, refs/keep-around/40459284278611128aac5cef35a563218933f8da'
+IS_GIT_ARCHIVE = not GIT_EXPORT_SUBST_H.startswith('$Format')
+
+
+def update_cmakelists_txt(src_dir, set_version, repo_data):
+ if not set_version and repo_data['package_string'] == "":
+ return
+
+ cmake_filepath = os.path.join(src_dir, "CMakeLists.txt")
+
+ with open(cmake_filepath, encoding='utf-8') as fh:
+ cmake_contents = fh.read()
+
+ MAJOR_PATTERN = r"^set *\( *PROJECT_MAJOR_VERSION *\d+ *\)$"
+ MINOR_PATTERN = r"^set *\( *PROJECT_MINOR_VERSION *\d+ *\)$"
+ PATCH_PATTERN = r"^set *\( *PROJECT_PATCH_VERSION *\d+ *\)$"
+ VERSION_EXTENSION_PATTERN = r"^set *\( *PROJECT_VERSION_EXTENSION .*?$"
+
+ new_cmake_contents = cmake_contents
+ new_cmake_contents = re.sub(MAJOR_PATTERN,
+ f"set(PROJECT_MAJOR_VERSION {repo_data['version_major']})",
+ new_cmake_contents,
+ flags=re.MULTILINE)
+ new_cmake_contents = re.sub(MINOR_PATTERN,
+ f"set(PROJECT_MINOR_VERSION {repo_data['version_minor']})",
+ new_cmake_contents,
+ flags=re.MULTILINE)
+ new_cmake_contents = re.sub(PATCH_PATTERN,
+ f"set(PROJECT_PATCH_VERSION {repo_data['version_patch']})",
+ new_cmake_contents,
+ flags=re.MULTILINE)
+ new_cmake_contents = re.sub(VERSION_EXTENSION_PATTERN,
+ f"set(PROJECT_VERSION_EXTENSION \"{repo_data['package_string']}\")",
+ new_cmake_contents,
+ flags=re.MULTILINE)
+
+ with open(cmake_filepath, mode='w', encoding='utf-8') as fh:
+ fh.write(new_cmake_contents)
+ print(cmake_filepath + " has been updated.")
+
+
+def update_debian_changelog(src_dir, repo_data):
+ # Read packaging/debian/changelog, then write back out an updated version.
+
+ deb_changelog_filepath = os.path.join(src_dir, "packaging", "debian", "changelog")
+ with open(deb_changelog_filepath, encoding='utf-8') as fh:
+ changelog_contents = fh.read()
+
+ CHANGELOG_PATTERN = r"^.*"
+ text_replacement = f"wireshark ({repo_data['version_major']}.{repo_data['version_minor']}.{repo_data['version_patch']}{repo_data['package_string']}) unstable; urgency=low"
+ # Note: Only need to replace the first line, so we don't use re.MULTILINE or re.DOTALL
+ new_changelog_contents = re.sub(CHANGELOG_PATTERN, text_replacement, changelog_contents)
+ with open(deb_changelog_filepath, mode='w', encoding='utf-8') as fh:
+ fh.write(new_changelog_contents)
+ print(deb_changelog_filepath + " has been updated.")
+
+
+def create_version_file(version_f, repo_data):
+ 'Write the version to the specified file handle'
+
+ version_f.write(f"{repo_data['version_major']}.{repo_data['version_minor']}.{repo_data['version_patch']}{repo_data['package_string']}\n")
+ print(version_f.name + " has been created.")
+
+
+def update_attributes_asciidoc(src_dir, repo_data):
+ # Read docbook/attributes.adoc, then write it back out with an updated
+ # wireshark-version replacement line.
+ asiidoc_filepath = os.path.join(src_dir, "docbook", "attributes.adoc")
+ with open(asiidoc_filepath, encoding='utf-8') as fh:
+ asciidoc_contents = fh.read()
+
+ # Sample line (without quotes): ":wireshark-version: 2.3.1"
+ ASCIIDOC_PATTERN = r"^:wireshark-version:.*$"
+ text_replacement = f":wireshark-version: {repo_data['version_major']}.{repo_data['version_minor']}.{repo_data['version_patch']}"
+
+ new_asciidoc_contents = re.sub(ASCIIDOC_PATTERN, text_replacement, asciidoc_contents, flags=re.MULTILINE)
+
+ with open(asiidoc_filepath, mode='w', encoding='utf-8') as fh:
+ fh.write(new_asciidoc_contents)
+ print(asiidoc_filepath + " has been updated.")
+
+
+def update_docinfo_asciidoc(src_dir, repo_data):
+ doc_paths = []
+ doc_paths += [os.path.join(src_dir, 'docbook', 'wsdg_src', 'developer-guide-docinfo.xml')]
+ doc_paths += [os.path.join(src_dir, 'docbook', 'wsug_src', 'user-guide-docinfo.xml')]
+
+ for doc_path in doc_paths:
+ with open(doc_path, encoding='utf-8') as fh:
+ doc_contents = fh.read()
+
+ # Sample line (without quotes): "<subtitle>For Wireshark 1.2</subtitle>"
+ DOC_PATTERN = r"^<subtitle>For Wireshark \d+.\d+<\/subtitle>$"
+ text_replacement = f"<subtitle>For Wireshark {repo_data['version_major']}.{repo_data['version_minor']}</subtitle>"
+
+ new_doc_contents = re.sub(DOC_PATTERN, text_replacement, doc_contents, flags=re.MULTILINE)
+
+ with open(doc_path, mode='w', encoding='utf-8') as fh:
+ fh.write(new_doc_contents)
+ print(doc_path + " has been updated.")
+
+
+def update_cmake_lib_releases(src_dir, repo_data):
+ # Read CMakeLists.txt for each library, then write back out an updated version.
+ dir_paths = []
+ dir_paths += [os.path.join(src_dir, 'epan')]
+ dir_paths += [os.path.join(src_dir, 'wiretap')]
+
+ for dir_path in dir_paths:
+ cmakelists_filepath = os.path.join(dir_path, "CMakeLists.txt")
+ with open(cmakelists_filepath, encoding='utf-8') as fh:
+ cmakelists_contents = fh.read()
+
+ # Sample line (without quotes; note leading tab: " VERSION "0.0.0" SOVERSION 0")
+ VERSION_PATTERN = r'^(\s*VERSION\s+"\d+\.\d+\.)\d+'
+ replacement_text = f"\\g<1>{repo_data['version_patch']}"
+ new_cmakelists_contents = re.sub(VERSION_PATTERN,
+ replacement_text,
+ cmakelists_contents,
+ flags=re.MULTILINE)
+
+ with open(cmakelists_filepath, mode='w', encoding='utf-8') as fh:
+ fh.write(new_cmakelists_contents)
+ print(cmakelists_filepath + " has been updated.")
+
+
+# Update distributed files that contain any version information
+def update_versioned_files(src_dir, set_version, repo_data):
+ update_cmakelists_txt(src_dir, set_version, repo_data)
+ update_debian_changelog(src_dir, repo_data)
+ if set_version:
+ update_attributes_asciidoc(src_dir, repo_data)
+ update_docinfo_asciidoc(src_dir, repo_data)
+ update_cmake_lib_releases(src_dir, repo_data)
+
+
+def generate_version_h(repo_data):
+ # Generate new contents of version.h from repository data
+
+ if not repo_data.get('enable_vcsversion'):
+ return "/* #undef VCSVERSION */\n"
+
+ if repo_data.get('git_description'):
+ # Do not bother adding the git branch, the git describe output
+ # normally contains the base tag and commit ID which is more
+ # than sufficient to determine the actual source tree.
+ return f'#define VCSVERSION "{repo_data["git_description"]}"\n'
+
+ if repo_data.get('last_change') and repo_data.get('num_commits'):
+ version_string = f"v{repo_data['version_major']}.{repo_data['version_minor']}.{repo_data['version_patch']}"
+ vcs_line = f'#define VCSVERSION "{version_string}-Git-{repo_data["num_commits"]}"\n'
+ return vcs_line
+
+ if repo_data.get('commit_id'):
+ vcs_line = f'#define VCSVERSION "Git commit {repo_data["commit_id"]}"\n'
+ return vcs_line
+
+ vcs_line = '#define VCSVERSION "Git Rev Unknown from unknown"\n'
+ return vcs_line
+
+
+def print_VCS_REVISION(version_file, repo_data, set_vcs):
+ # Write the version control system's version to $version_file.
+ # Don't change the file if it is not needed.
+ #
+ # XXX - We might want to add VCSVERSION to CMakeLists.txt so that it can
+ # generate vcs_version.h independently.
+
+ new_version_h = generate_version_h(repo_data)
+
+ needs_update = True
+ if os.path.exists(version_file):
+ with open(version_file, encoding='utf-8') as fh:
+ current_version_h = fh.read()
+ if current_version_h == new_version_h:
+ needs_update = False
+
+ if not set_vcs:
+ return
+
+ if needs_update:
+ with open(version_file, mode='w', encoding='utf-8') as fh:
+ fh.write(new_version_h)
+ print(version_file + " has been updated.")
+ elif not repo_data['enable_vcsversion']:
+ print(version_file + " disabled.")
+ else:
+ print(version_file + " unchanged.")
+ return
+
+
+def get_version(cmakelists_file_data):
+ # Reads major, minor, and patch
+ # Sample data:
+ # set(PROJECT_MAJOR_VERSION 3)
+ # set(PROJECT_MINOR_VERSION 7)
+ # set(PROJECT_PATCH_VERSION 2)
+
+ MAJOR_PATTERN = r"^set *\( *PROJECT_MAJOR_VERSION *(\d+) *\)$"
+ MINOR_PATTERN = r"^set *\( *PROJECT_MINOR_VERSION *(\d+) *\)$"
+ PATCH_PATTERN = r"^set *\( *PROJECT_PATCH_VERSION *(\d+) *\)$"
+
+ major_match = re.search(MAJOR_PATTERN, cmakelists_file_data, re.MULTILINE)
+ minor_match = re.search(MINOR_PATTERN, cmakelists_file_data, re.MULTILINE)
+ patch_match = re.search(PATCH_PATTERN, cmakelists_file_data, re.MULTILINE)
+
+ if not major_match:
+ raise Exception("Couldn't get major version")
+ if not minor_match:
+ raise Exception("Couldn't get minor version")
+ if not patch_match:
+ raise Exception("Couldn't get patch version")
+
+ major_version = major_match.groups()[0]
+ minor_version = minor_match.groups()[0]
+ patch_version = patch_match.groups()[0]
+ return major_version, minor_version, patch_version
+
+
+def read_git_archive(tagged_version_extra, untagged_version_extra):
+ # Reads key data from the git repo.
+ # For git archives, this does not need to access the source directory because
+ # `git archive` will use an 'export-subst' entry in .gitattributes to replace
+ # the value for GIT_EXPORT_SUBST_H in the script.
+ # Returns a dictionary with key values from the repository
+
+ is_tagged = False
+ for git_ref in GIT_EXPORT_SUBST_D.split(r', '):
+ match = re.match(r'^tag: (v[1-9].+)', git_ref)
+ if match:
+ is_tagged = True
+ vcs_tag = match.groups()[0]
+
+ if is_tagged:
+ print(f"We are on tag {vcs_tag}.")
+ package_string = tagged_version_extra
+ else:
+ print("We are not tagged.")
+ package_string = untagged_version_extra
+
+ # Always 0 commits for a git archive
+ num_commits = 0
+
+ # Assume a full commit hash, abbreviate it.
+ commit_id = GIT_EXPORT_SUBST_H[:GIT_ABBREV_LENGTH]
+ package_string = package_string.replace("{vcsinfo}", str(num_commits) + "-" + commit_id)
+
+ repo_data = {}
+ repo_data['commit_id'] = commit_id
+ repo_data['enable_vcsversion'] = True
+ repo_data['info_source'] = "git archive"
+ repo_data['is_tagged'] = is_tagged
+ repo_data['num_commits'] = num_commits
+ repo_data['package_string'] = package_string
+ return repo_data
+
+
+def read_git_repo(src_dir, tagged_version_extra, untagged_version_extra):
+ # Reads metadata from the git repo for generating the version string
+ # Returns the data in a dict
+
+ IS_GIT_INSTALLED = shutil.which('git') != ''
+ if not IS_GIT_INSTALLED:
+ print("Git unavailable. Git revision will be missing from version string.", file=sys.stderr)
+ return {}
+
+ GIT_DIR = os.path.join(src_dir, '.git')
+ # Check whether to include VCS version information in vcs_version.h
+ enable_vcsversion = True
+ git_get_commondir_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" rev-parse --git-common-dir')
+ git_commondir = subprocess.check_output(git_get_commondir_cmd, universal_newlines=True).strip()
+ if git_commondir and os.path.exists(f"{git_commondir}{os.sep}wireshark-disable-versioning"):
+ print("Header versioning disabled using git override.")
+ enable_vcsversion = False
+
+ git_last_changetime_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" log -1 --pretty=format:%at')
+ git_last_changetime = subprocess.check_output(git_last_changetime_cmd, universal_newlines=True).strip()
+
+ # Commits since last annotated tag.
+ # Output could be something like: v3.7.2rc0-64-g84d83a8292cb
+ # Or g84d83a8292cb
+ git_last_annotated_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" describe --abbrev={GIT_ABBREV_LENGTH} --long --always --match "v[1-9]*"')
+ git_last_annotated = subprocess.check_output(git_last_annotated_cmd, universal_newlines=True).strip()
+ parts = git_last_annotated.split('-')
+ git_description = git_last_annotated
+ if len(parts) > 1:
+ num_commits = int(parts[1])
+ else:
+ num_commits = 0
+ commit_id = parts[-1]
+
+ release_candidate = ''
+ RC_PATTERN = r'^v\d+\.\d+\.\d+(rc\d+)$'
+ match = re.match(RC_PATTERN, parts[0])
+ if match:
+ release_candidate = match.groups()[0]
+
+ # This command is expected to fail if the version is not tagged
+ try:
+ git_vcs_tag_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" describe --exact-match --match "v[1-9]*"')
+ git_vcs_tag = subprocess.check_output(git_vcs_tag_cmd, stderr=subprocess.DEVNULL, universal_newlines=True).strip()
+ is_tagged = True
+ except subprocess.CalledProcessError:
+ is_tagged = False
+
+ git_timestamp = ""
+ if num_commits == 0:
+ # Get the timestamp; format is similar to: 2022-06-27 23:09:20 -0400
+ # Note: This doesn't appear to be used, only checked for command success
+ git_timestamp_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" log --format="%ad" -n 1 --date=iso')
+ git_timestamp = subprocess.check_output(git_timestamp_cmd, universal_newlines=True).strip()
+
+ if is_tagged:
+ print(f"We are on tag {git_vcs_tag}.")
+ package_string = tagged_version_extra
+ else:
+ print("We are not tagged.")
+ package_string = untagged_version_extra
+
+ package_string = release_candidate + package_string.replace("{vcsinfo}", str(num_commits) + "-" + commit_id)
+
+ repo_data = {}
+ repo_data['commit_id'] = commit_id
+ repo_data['enable_vcsversion'] = enable_vcsversion
+ repo_data['git_timestamp'] = git_timestamp
+ repo_data['git_description'] = git_description
+ repo_data['info_source'] = "Command line (git)"
+ repo_data['is_tagged'] = is_tagged
+ repo_data['last_change'] = git_last_changetime
+ repo_data['num_commits'] = num_commits
+ repo_data['package_string'] = package_string
+ return repo_data
+
+
+def parse_versionstring(version_arg):
+ version_parts = version_arg.split('.')
+ if len(version_parts) != 3:
+ msg = "Version must have three numbers of the form x.y.z. You entered: " + version_arg
+ raise argparse.ArgumentTypeError(msg)
+ for i, version_type in enumerate(('Major', 'Minor', 'Patch')):
+ try:
+ int(version_parts[i])
+ except ValueError:
+ msg = f"{version_type} version must be a number! {version_type} version was '{version_parts[i]}'"
+ raise argparse.ArgumentTypeError(msg)
+ return version_parts
+
+
+def read_repo_info(src_dir, tagged_version_extra, untagged_version_extra):
+ if IS_GIT_ARCHIVE:
+ repo_data = read_git_archive(tagged_version_extra, untagged_version_extra)
+ elif os.path.exists(src_dir + os.sep + '.git') and not os.path.exists(os.path.join(src_dir, '.git', 'svn')):
+ repo_data = read_git_repo(src_dir, tagged_version_extra, untagged_version_extra)
+ else:
+ raise Exception(src_dir + " does not appear to be a git repo or git archive!")
+
+ cmake_path = os.path.join(src_dir, "CMakeLists.txt")
+ with open(cmake_path, encoding='utf-8') as fh:
+ version_major, version_minor, version_patch = get_version(fh.read())
+ repo_data['version_major'] = version_major
+ repo_data['version_minor'] = version_minor
+ repo_data['version_patch'] = version_patch
+
+ return repo_data
+
+
+# CMakeLists.txt calls this with no arguments to create vcs_version.h
+# AppVeyor calls this with --set-release --untagged-version-extra=-{vcsinfo}-AppVeyor --tagged-version-extra=-AppVeyor
+# .gitlab-ci calls this with --set-release
+# Release checklist requires --set-version
+def main():
+ parser = argparse.ArgumentParser(description='Wireshark file and package versions')
+ action_group = parser.add_mutually_exclusive_group()
+ action_group.add_argument('--set-version', '-v', metavar='<x.y.z>', type=parse_versionstring, help='Set the major, minor, and patch versions in the top-level CMakeLists.txt, docbook/attributes.adoc, packaging/debian/changelog, and the CMakeLists.txt for all libraries to the provided version number')
+ action_group.add_argument('--set-release', '-r', action='store_true', help='Set the extra release information in the top-level CMakeLists.txt based on either default or command-line specified options.')
+ setrel_group = parser.add_argument_group()
+ setrel_group.add_argument('--tagged-version-extra', '-t', default="", help="Extra version information format to use when a tag is found. No format \
+(an empty string) is used by default.")
+ setrel_group.add_argument('--untagged-version-extra', '-u', default='-{vcsinfo}', help='Extra version information format to use when no tag is found. The format "-{vcsinfo}" (the number of commits and commit ID) is used by default.')
+ parser.add_argument('--version-file', '-f', metavar='<file>', type=argparse.FileType('w'), help='path to version file')
+ parser.add_argument("src_dir", metavar='src_dir', nargs=1, help="path to source code")
+ args = parser.parse_args()
+
+ if args.version_file and not args.set_release:
+ sys.stderr.write('Error: --version-file must be used with --set-release.\n')
+ sys.exit(1)
+
+ src_dir = args.src_dir[0]
+
+ if args.set_version:
+ repo_data = {}
+ repo_data['version_major'] = args.set_version[0]
+ repo_data['version_minor'] = args.set_version[1]
+ repo_data['version_patch'] = args.set_version[2]
+ repo_data['package_string'] = ''
+ else:
+ repo_data = read_repo_info(src_dir, args.tagged_version_extra, args.untagged_version_extra)
+
+ set_vcs = not (args.set_release or args.set_version)
+ VERSION_FILE = 'vcs_version.h'
+ print_VCS_REVISION(VERSION_FILE, repo_data, set_vcs)
+
+ if args.set_release or args.set_version:
+ update_versioned_files(src_dir, args.set_version, repo_data)
+
+ if args.version_file:
+ create_version_file(args.version_file, repo_data)
+
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/make_charset_table.c b/tools/make_charset_table.c
new file mode 100644
index 0000000..27d921a
--- /dev/null
+++ b/tools/make_charset_table.c
@@ -0,0 +1,125 @@
+/* make_charset_table.c
+ * sample program to generate tables for charsets.c using iconv
+ *
+ * public domain
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <errno.h>
+#include <iconv.h>
+
+#define UNREPL 0xFFFD
+
+int main(int argc, char **argv) {
+ /* for now only UCS-2 */
+ uint16_t table[0x100];
+
+ iconv_t conv;
+ const char *charset;
+ int i, j;
+
+ /* 0x00 ... 0x7F same as ASCII? */
+ int ascii_based = 1;
+ /* 0x00 ... 0x9F same as ISO? */
+ int iso_based = 1;
+
+ if (argc != 2) {
+ printf("usage: %s <charset>\n", argv[0]);
+ return 1;
+ }
+
+ charset = argv[1];
+
+ conv = iconv_open("UCS-2", charset);
+ if (conv == (iconv_t) -1) {
+ perror("iconv_open");
+ return 2;
+ }
+ iconv_close(conv);
+
+ for (i = 0x00; i < 0x100; i++) {
+ unsigned char in[1], out[2];
+ size_t inlen = 1, outlen = 2;
+
+ char *inbuf = (char *) in;
+ char *outbuf = (char *) out;
+
+ size_t ret;
+
+ in[0] = i;
+
+ conv = iconv_open("UCS-2BE", charset);
+
+ if (conv == (iconv_t) -1) {
+ /* shouldn't fail now */
+ perror("iconv_open");
+ return 2;
+ }
+
+ ret = iconv(conv, &inbuf, &inlen, &outbuf, &outlen);
+
+ if (ret == (size_t) -1 && errno == EILSEQ) {
+ table[i] = UNREPL;
+ iconv_close(conv);
+ continue;
+ }
+
+ if (ret == (size_t) -1) {
+ perror("iconv");
+ iconv_close(conv);
+ return 4;
+ }
+
+ iconv_close(conv);
+
+ if (ret != 0 || inlen != 0 || outlen != 0) {
+ fprintf(stderr, "%d: smth went wrong: %zu %zu %zu\n", i, ret, inlen, outlen);
+ return 3;
+ }
+
+ if (i < 0x80 && (out[0] != 0 || out[1] != i))
+ ascii_based = 0;
+
+ if (i < 0xA0 && (out[0] != 0 || out[1] != i))
+ iso_based = 0;
+
+ table[i] = (out[0] << 8) | out[1];
+ }
+
+ /* iso_based not supported */
+ iso_based = 0;
+
+ printf("/* generated by %s %s */\n", argv[0], charset);
+
+ if (iso_based)
+ i = 0xA0;
+ else if (ascii_based)
+ i = 0x80;
+ else
+ i = 0;
+
+ printf("const gunichar2 charset_table_%s[0x%x] = {\n", charset, 0x100 - i);
+ while (i < 0x100) {
+ int start = i;
+
+ printf(" ");
+
+ for (j = 0; j < 8; j++, i++) {
+ if (table[i] == UNREPL)
+ printf("UNREPL, ");
+ else
+ printf("0x%.4x, ", table[i]);
+ }
+
+ if ((start & 0xf) == 0)
+ printf(" /* 0x%.2X - */", start);
+ else
+ printf(" /* - 0x%.2X */", i - 1);
+
+ printf("\n");
+ }
+ printf("};\n");
+
+ return 0;
+}
diff --git a/tools/mingw-rpm-setup.sh b/tools/mingw-rpm-setup.sh
new file mode 100755
index 0000000..602c0fb
--- /dev/null
+++ b/tools/mingw-rpm-setup.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+# Setup development environment on Fedora Linux for MinGW-w64
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# We drag in tools that might not be needed by all users; it's easier
+# that way.
+#
+
+function print_usage() {
+ printf "\\nUtility to setup a Fedora MinGW-w64 system for Wireshark development.\\n"
+ printf "The basic usage installs the needed software\\n\\n"
+ printf "Usage: %s [...other options...]\\n" "$0"
+ printf "\\t--install-all: install everything\\n"
+ printf "\\t[other]: other options are passed as-is to pacman\\n"
+ printf "\\tPass --noconfirm to bypass any \"are you sure?\" messages.\\n"
+}
+
+OPTIONS=
+for arg; do
+ case $arg in
+ --help)
+ print_usage
+ exit 0
+ ;;
+ --install-all)
+ ;;
+ *)
+ OPTIONS="$OPTIONS $arg"
+ ;;
+ esac
+done
+
+BASIC_LIST="mingw64-gcc \
+ mingw64-gcc-c++ \
+ mingw64-glib2 \
+ mingw64-libgcrypt \
+ mingw64-c-ares \
+ mingw64-qt6-qtbase \
+ mingw64-qt6-qt5compat \
+ mingw64-qt6-qtmultimedia \
+ mingw64-qt6-qttools \
+ mingw64-speexdsp \
+ mingw32-nsis \
+ mingw64-nsis \
+ mingw64-gnutls \
+ mingw64-brotli \
+ mingw64-minizip \
+ mingw64-opus \
+ mingw64-wpcap \
+ mingw64-libxml2 \
+ ninja-build \
+ flex \
+ lemon \
+ asciidoctor \
+ libxslt \
+ docbook-style-xsl \
+ ccache \
+ git \
+ patch \
+ cmake
+ cmake-rpm-macros"
+
+ACTUAL_LIST=$BASIC_LIST
+
+dnf install $ACTUAL_LIST $OPTIONS
diff --git a/tools/msnchat b/tools/msnchat
new file mode 100755
index 0000000..c2fcaab
--- /dev/null
+++ b/tools/msnchat
@@ -0,0 +1,315 @@
+#!/usr/bin/env python
+"""
+Process packet capture files and produce a nice HTML
+report of MSN Chat sessions.
+
+Copyright (c) 2003 by Gilbert Ramirez <gram@alumni.rice.edu>
+
+SPDX-License-Identifier: GPL-2.0-or-later
+"""
+
+import os
+import re
+import sys
+import array
+import string
+import WiresharkXML
+import getopt
+
+# By default we output the HTML to stdout
+out_fh = sys.stdout
+
+class MSNMessage:
+ pass
+
+class MSN_MSG(MSNMessage):
+ def __init__(self, timestamp, user, message):
+ self.timestamp = timestamp
+ self.user = user
+ self.message = message
+
+
+class Conversation:
+ """Keeps track of a single MSN chat session"""
+
+ re_MSG_out = re.compile("MSG (?P<TrID>\d+) (?P<ACKTYPE>[UNA]) (?P<len>\d+)")
+ re_MSG_in = re.compile("MSG (?P<user>\S+)@(?P<domain>\S+) (?P<alias>\S+) (?P<len>\d+)")
+
+ USER_NOT_FOUND = -1
+ DEFAULT_USER = None
+
+
+ DEFAULT_USER_COLOR = "#0000ff"
+ USER_COLORS = [ "#ff0000", "#00ff00",
+ "#800000", "#008000", "#000080" ]
+
+ DEFAULT_USER_TEXT_COLOR = "#000000"
+ USER_TEXT_COLOR = "#000080"
+
+ def __init__(self):
+ self.packets = []
+ self.messages = []
+
+ def AddPacket(self, packet):
+ self.packets.append(packet)
+
+ def Summarize(self):
+ for packet in self.packets:
+ msg = self.CreateMSNMessage(packet)
+ if msg:
+ self.messages.append(msg)
+ else:
+ #XXX
+ pass
+
+
+ def CreateMSNMessage(self, packet):
+ msnms = packet.get_items("msnms")[0]
+
+ # Check the first line in the msnms transmission for the user
+ child = msnms.children[0]
+ user = self.USER_NOT_FOUND
+
+ m = self.re_MSG_out.search(child.show)
+ if m:
+ user = self.DEFAULT_USER
+
+ else:
+ m = self.re_MSG_in.search(child.show)
+ if m:
+ user = m.group("alias")
+
+ if user == self.USER_NOT_FOUND:
+ print >> sys.stderr, "No match for", child.show
+ sys.exit(1)
+ return None
+
+ msg = ""
+
+ i = 5
+ check_trailing = 0
+ if len(msnms.children) > 5:
+ check_trailing = 1
+
+ while i < len(msnms.children):
+ msg += msnms.children[i].show
+ if check_trailing:
+ j = msg.find("MSG ")
+ if j >= 0:
+ msg = msg[:j]
+ i += 5
+ else:
+ i += 6
+ else:
+ i += 6
+
+ timestamp = packet.get_items("frame.time")[0].get_show()
+ i = timestamp.rfind(".")
+ timestamp = timestamp[:i]
+
+ return MSN_MSG(timestamp, user, msg)
+
+ def MsgToHTML(self, text):
+ bytes = array.array("B")
+
+ new_string = text
+ i = new_string.find("\\")
+
+ while i > -1:
+ # At the end?
+ if i == len(new_string) - 1:
+ # Just let the default action
+ # copy everything to 'bytes'
+ break
+
+ if new_string[i+1] in string.digits:
+ left = new_string[:i]
+ bytes.fromstring(left)
+
+ right = new_string[i+4:]
+
+ oct_string = new_string[i+1:i+4]
+ char = int(oct_string, 8)
+ bytes.append(char)
+
+ new_string = right
+
+ # ignore \r and \n
+ elif new_string[i+1] in "rn":
+ copy_these = new_string[:i]
+ bytes.fromstring(copy_these)
+ new_string = new_string[i+2:]
+
+ else:
+ copy_these = new_string[:i+2]
+ bytes.fromstring(copy_these)
+ new_string = new_string[i+2:]
+
+ i = new_string.find("\\")
+
+
+ bytes.fromstring(new_string)
+
+ return bytes
+
+ def CreateHTML(self, default_user):
+ if not self.messages:
+ return
+
+ print >> out_fh, """
+<HR><BR><H3 Align=Center> ---- New Conversation @ %s ----</H3><BR>""" \
+ % (self.messages[0].timestamp)
+
+ user_color_assignments = {}
+
+ for msg in self.messages:
+ # Calculate 'user' and 'user_color' and 'user_text_color'
+ if msg.user == self.DEFAULT_USER:
+ user = default_user
+ user_color = self.DEFAULT_USER_COLOR
+ user_text_color = self.DEFAULT_USER_TEXT_COLOR
+ else:
+ user = msg.user
+ user_text_color = self.USER_TEXT_COLOR
+ if user_color_assignments.has_key(user):
+ user_color = user_color_assignments[user]
+ else:
+ num_assigned = len(user_color_assignments.keys())
+ user_color = self.USER_COLORS[num_assigned]
+ user_color_assignments[user] = user_color
+
+ # "Oct 6, 2003 21:45:25" --> "21:45:25"
+ timestamp = msg.timestamp.split()[-1]
+
+ htmlmsg = self.MsgToHTML(msg.message)
+
+ print >> out_fh, """
+<FONT COLOR="%s"><FONT SIZE="2">(%s) </FONT><B>%s:</B></FONT> <FONT COLOR="%s">""" \
+ % (user_color, timestamp, user, user_text_color)
+
+ htmlmsg.tofile(out_fh)
+
+ print >> out_fh, "</FONT><BR>"
+
+
+class CaptureFile:
+ """Parses a single a capture file and keeps track of
+ all chat sessions in the file."""
+
+ def __init__(self, capture_filename, tshark):
+ """Run tshark on the capture file and parse
+ the data."""
+ self.conversations = []
+ self.conversations_map = {}
+
+ pipe = os.popen(tshark + " -Tpdml -n -R "
+ "'msnms contains \"X-MMS-IM-Format\"' "
+ "-r " + capture_filename, "r")
+
+ WiresharkXML.parse_fh(pipe, self.collect_packets)
+
+ for conv in self.conversations:
+ conv.Summarize()
+
+ def collect_packets(self, packet):
+ """Collect the packets passed back from WiresharkXML.
+ Sort them by TCP/IP conversation, as there could be multiple
+ clients per machine."""
+ # Just in case we're looking at tunnelling protocols where
+ # more than one IP or TCP header exists, look at the last one,
+ # which would be the one inside the tunnel.
+ src_ip = packet.get_items("ip.src")[-1].get_show()
+ dst_ip = packet.get_items("ip.dst")[-1].get_show()
+ src_tcp = packet.get_items("tcp.srcport")[-1].get_show()
+ dst_tcp = packet.get_items("tcp.dstport")[-1].get_show()
+
+ key_params = [src_ip, dst_ip, src_tcp, dst_tcp]
+ key_params.sort()
+ key = '|'.join(key_params)
+
+ if not self.conversations_map.has_key(key):
+ conv = self.conversations_map[key] = Conversation()
+ self.conversations.append(conv)
+ else:
+ conv = self.conversations_map[key]
+
+ conv.AddPacket(packet)
+
+
+ def CreateHTML(self, default_user):
+ if not self.conversations:
+ return
+
+ for conv in self.conversations:
+ conv.CreateHTML(default_user)
+
+
+def run_filename(filename, default_user, tshark):
+ """Process one capture file."""
+
+ capture = CaptureFile(filename, tshark)
+ capture.CreateHTML(default_user)
+
+
+def run(filenames, default_user, tshark):
+ # HTML Header
+ print >> out_fh, """
+<HTML><TITLE>MSN Conversation</TITLE>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+<BODY>
+"""
+ for filename in filenames:
+ run_filename(filename, default_user, tshark)
+
+ # HTML Footer
+ print >> out_fh, """
+<HR>
+</BODY>
+</HTML>
+"""
+
+
+def usage():
+ print >> sys.stderr, "msnchat [OPTIONS] CAPTURE_FILE [...]"
+ print >> sys.stderr, " -o FILE name of output file"
+ print >> sys.stderr, " -t TSHARK location of tshark binary"
+ print >> sys.stderr, " -u USER name for unknown user"
+ sys.exit(1)
+
+def main():
+ default_user = "Unknown"
+ tshark = "tshark"
+
+ optstring = "ho:t:u:"
+ longopts = ["help"]
+
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], optstring, longopts)
+ except getopt.GetoptError:
+ usage()
+
+ for opt, arg in opts:
+ if opt == "-h" or opt == "--help":
+ usage()
+
+ elif opt == "-o":
+ filename = arg
+ global out_fh
+ try:
+ out_fh = open(filename, "w")
+ except IOError:
+ sys.exit("Could not open %s for writing." % (filename,))
+
+ elif opt == "-u":
+ default_user = arg
+
+ elif opt == "-t":
+ tshark = arg
+
+ else:
+ sys.exit("Unhandled command-line option: " + opt)
+
+ run(args, default_user, tshark)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/msys2-setup.sh b/tools/msys2-setup.sh
new file mode 100644
index 0000000..0ca6329
--- /dev/null
+++ b/tools/msys2-setup.sh
@@ -0,0 +1,129 @@
+#!/bin/bash
+# Setup development environment on MSYS2
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# We drag in tools that might not be needed by all users; it's easier
+# that way.
+#
+
+function print_usage() {
+ printf "\\nUtility to setup an MSYS2 MinGW-w64 system for Wireshark development.\\n"
+ printf "The basic usage installs the needed software\\n\\n"
+ printf "Usage: %s [--install-optional] [...other options...]\\n" "$0"
+ printf "\\t--install-optional: install optional software as well\\n"
+ printf "\\t--install-test-deps: install packages required to run all tests\\n"
+ printf "\\t--install-all: install everything\\n"
+ printf "\\t[other]: other options are passed as-is to pacman\\n"
+ printf "\\tPass --noconfirm to bypass any \"are you sure?\" messages.\\n"
+}
+
+ADDITIONAL=0
+TESTDEPS=0
+OPTIONS=
+for arg; do
+ case $arg in
+ --help)
+ print_usage
+ exit 0
+ ;;
+ --install-optional)
+ ADDITIONAL=1
+ ;;
+ --install-test-deps)
+ TESTDEPS=1
+ ;;
+ --install-all)
+ ADDITIONAL=1
+ TESTDEPS=1
+ ;;
+ *)
+ OPTIONS="$OPTIONS $arg"
+ ;;
+ esac
+done
+
+PACKAGE_PREFIX="${MINGW_PACKAGE_PREFIX:-mingw-w64-x86_64}"
+
+#
+# Lua packaging is kind of a mess. Lua 5.2 is not available. Some packages have
+# a hard dependy on LuaJIT and it conflicts with Lua 5.1 and vice-versa.
+# This will probably have to be fixed by the MSYS2 maintainers.
+# XXX Is this still true?
+#
+BASIC_LIST="base-devel \
+ git \
+ ${PACKAGE_PREFIX}-bcg729 \
+ ${PACKAGE_PREFIX}-brotli \
+ ${PACKAGE_PREFIX}-c-ares \
+ ${PACKAGE_PREFIX}-cmake \
+ ${PACKAGE_PREFIX}-glib2 \
+ ${PACKAGE_PREFIX}-gnutls \
+ ${PACKAGE_PREFIX}-libgcrypt \
+ ${PACKAGE_PREFIX}-libilbc \
+ ${PACKAGE_PREFIX}-libmaxminddb \
+ ${PACKAGE_PREFIX}-nghttp2 \
+ ${PACKAGE_PREFIX}-libpcap \
+ ${PACKAGE_PREFIX}-libsmi \
+ ${PACKAGE_PREFIX}-libssh \
+ ${PACKAGE_PREFIX}-libxml2 \
+ ${PACKAGE_PREFIX}-lz4 \
+ ${PACKAGE_PREFIX}-minizip \
+ ${PACKAGE_PREFIX}-ninja \
+ ${PACKAGE_PREFIX}-opencore-amr \
+ ${PACKAGE_PREFIX}-opus \
+ ${PACKAGE_PREFIX}-pcre2 \
+ ${PACKAGE_PREFIX}-python \
+ ${PACKAGE_PREFIX}-qt6-base \
+ ${PACKAGE_PREFIX}-qt6-multimedia \
+ ${PACKAGE_PREFIX}-qt6-tools \
+ ${PACKAGE_PREFIX}-qt6-translations \
+ ${PACKAGE_PREFIX}-qt6-5compat \
+ ${PACKAGE_PREFIX}-sbc \
+ ${PACKAGE_PREFIX}-snappy \
+ ${PACKAGE_PREFIX}-spandsp \
+ ${PACKAGE_PREFIX}-speexdsp \
+ ${PACKAGE_PREFIX}-toolchain \
+ ${PACKAGE_PREFIX}-winsparkle \
+ ${PACKAGE_PREFIX}-zlib \
+ ${PACKAGE_PREFIX}-zstd"
+
+ADDITIONAL_LIST="${PACKAGE_PREFIX}-asciidoctor \
+ ${PACKAGE_PREFIX}-ccache \
+ ${PACKAGE_PREFIX}-docbook-xsl \
+ ${PACKAGE_PREFIX}-doxygen \
+ ${PACKAGE_PREFIX}-libxslt \
+ ${PACKAGE_PREFIX}-perl \
+ ${PACKAGE_PREFIX}-ntldd"
+
+TESTDEPS_LIST="${PACKAGE_PREFIX}-python-pytest \
+ ${PACKAGE_PREFIX}-python-pytest-xdist"
+
+ACTUAL_LIST=$BASIC_LIST
+
+if [ $ADDITIONAL -ne 0 ]
+then
+ ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST"
+fi
+
+if [ $TESTDEPS -ne 0 ]
+then
+ ACTUAL_LIST="$ACTUAL_LIST $TESTDEPS_LIST"
+fi
+
+# Partial upgrades are unsupported.
+pacman --sync --refresh --sysupgrade --needed $ACTUAL_LIST $OPTIONS || exit 2
+
+if [ $ADDITIONAL -eq 0 ]
+then
+ printf "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n"
+fi
+
+if [ $TESTDEPS -eq 0 ]
+then
+ printf "\n*** Test deps not installed. Rerun with --install-test-deps to have them.\n"
+fi
diff --git a/tools/msys2checkdeps.py b/tools/msys2checkdeps.py
new file mode 100644
index 0000000..f46eb50
--- /dev/null
+++ b/tools/msys2checkdeps.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+# ------------------------------------------------------------------------------------------------------------------
+# list or check dependencies for binary distributions based on MSYS2 (requires the package mingw-w64-ntldd)
+#
+# run './msys2checkdeps.py --help' for usage information
+# ------------------------------------------------------------------------------------------------------------------
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+from __future__ import print_function
+
+
+import argparse
+import os
+import subprocess
+import sys
+
+
+SYSTEMROOT = os.environ['SYSTEMROOT']
+
+
+class Dependency:
+ def __init__(self):
+ self.location = None
+ self.dependents = set()
+
+
+def warning(msg):
+ print("Warning: " + msg, file=sys.stderr)
+
+
+def error(msg):
+ print("Error: " + msg, file=sys.stderr)
+ exit(1)
+
+
+def call_ntldd(filename):
+ try:
+ output = subprocess.check_output(['ntldd', '-R', filename], stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ error("'ntldd' failed with '" + str(e) + "'")
+ except WindowsError as e:
+ error("Calling 'ntldd' failed with '" + str(e) + "' (have you installed 'mingw-w64-ntldd-git'?)")
+ except Exception as e:
+ error("Calling 'ntldd' failed with '" + str(e) + "'")
+ return output.decode('utf-8')
+
+
+def get_dependencies(filename, deps):
+ raw_list = call_ntldd(filename)
+
+ skip_indent = float('Inf')
+ parents = {}
+ parents[0] = os.path.basename(filename)
+ for line in raw_list.splitlines():
+ line = line[1:]
+ indent = len(line) - len(line.lstrip())
+ if indent > skip_indent:
+ continue
+ else:
+ skip_indent = float('Inf')
+
+ # if the dependency is not found in the working directory ntldd tries to find it on the search path
+ # which is indicated by the string '=>' followed by the determined location or 'not found'
+ if ('=>' in line):
+ (lib, location) = line.lstrip().split(' => ')
+ if location == 'not found':
+ location = None
+ else:
+ location = location.rsplit('(', 1)[0].strip()
+ else:
+ lib = line.rsplit('(', 1)[0].strip()
+ location = os.getcwd()
+
+ parents[indent+1] = lib
+
+ # we don't care about Microsoft libraries and their dependencies
+ if location and SYSTEMROOT in location:
+ skip_indent = indent
+ continue
+
+ if lib not in deps:
+ deps[lib] = Dependency()
+ deps[lib].location = location
+ deps[lib].dependents.add(parents[indent])
+ return deps
+
+
+def collect_dependencies(path):
+ # collect dependencies
+ # - each key in 'deps' will be the filename of a dependency
+ # - the corresponding value is an instance of class Dependency (containing full path and dependents)
+ deps = {}
+ if os.path.isfile(path):
+ deps = get_dependencies(path, deps)
+ elif os.path.isdir(path):
+ extensions = ['.exe', '.pyd', '.dll']
+ exclusions = ['distutils/command/wininst'] # python
+ for base, dirs, files in os.walk(path):
+ for f in files:
+ filepath = os.path.join(base, f)
+ (_, ext) = os.path.splitext(f)
+ if (ext.lower() not in extensions) or any(exclusion in filepath for exclusion in exclusions):
+ continue
+ deps = get_dependencies(filepath, deps)
+ return deps
+
+
+if __name__ == '__main__':
+ modes = ['list', 'list-compact', 'check', 'check-missing', 'check-unused']
+
+ # parse arguments from command line
+ parser = argparse.ArgumentParser(description="List or check dependencies for binary distributions based on MSYS2.\n"
+ "(requires the package 'mingw-w64-ntldd')",
+ formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument('mode', metavar="MODE", choices=modes,
+ help="One of the following:\n"
+ " list - list dependencies in human-readable form\n"
+ " with full path and list of dependents\n"
+ " list-compact - list dependencies in compact form (as a plain list of filenames)\n"
+ " check - check for missing or unused dependencies (see below for details)\n"
+ " check-missing - check if all required dependencies are present in PATH\n"
+ " exits with error code 2 if missing dependencies are found\n"
+ " and prints the list to stderr\n"
+ " check-unused - check if any of the libraries in the root of PATH are unused\n"
+ " and prints the list to stderr")
+ parser.add_argument('path', metavar='PATH',
+ help="full or relative path to a single file or a directory to work on\n"
+ "(directories will be checked recursively)")
+ parser.add_argument('-w', '--working-directory', metavar="DIR",
+ help="Use custom working directory (instead of 'dirname PATH')")
+ args = parser.parse_args()
+
+ # check if path exists
+ args.path = os.path.abspath(args.path)
+ if not os.path.exists(args.path):
+ error("Can't find file/folder '" + args.path + "'")
+
+ # get root and set it as working directory (unless one is explicitly specified)
+ if args.working_directory:
+ root = os.path.abspath(args.working_directory)
+ elif os.path.isdir(args.path):
+ root = args.path
+ elif os.path.isfile(args.path):
+ root = os.path.dirname(args.path)
+ os.chdir(root)
+
+ # get dependencies for path recursively
+ deps = collect_dependencies(args.path)
+
+ # print output / prepare exit code
+ exit_code = 0
+ for dep in sorted(deps):
+ location = deps[dep].location
+ dependents = deps[dep].dependents
+
+ if args.mode == 'list':
+ if (location is None):
+ location = '---MISSING---'
+ print(dep + " - " + location + " (" + ", ".join(dependents) + ")")
+ elif args.mode == 'list-compact':
+ print(dep)
+ elif args.mode in ['check', 'check-missing']:
+ if ((location is None) or (root not in os.path.abspath(location))):
+ warning("Missing dependency " + dep + " (" + ", ".join(dependents) + ")")
+ exit_code = 2
+
+ # check for unused libraries
+ if args.mode in ['check', 'check-unused']:
+ installed_libs = [file for file in os.listdir(root) if file.endswith(".dll")]
+ deps_lower = [dep.lower() for dep in deps]
+ top_level_libs = [lib for lib in installed_libs if lib.lower() not in deps_lower]
+ for top_level_lib in top_level_libs:
+ warning("Unused dependency " + top_level_lib)
+
+ exit(exit_code)
diff --git a/tools/ncp2222.py b/tools/ncp2222.py
new file mode 100755
index 0000000..f14d0c5
--- /dev/null
+++ b/tools/ncp2222.py
@@ -0,0 +1,16921 @@
+#!/usr/bin/env python3
+
+"""
+Creates C code from a table of NCP type 0x2222 packet types.
+(And 0x3333, which are the replies, but the packets are more commonly
+refered to as type 0x2222; the 0x3333 replies are understood to be
+part of the 0x2222 "family")
+
+The data-munging code was written by Gilbert Ramirez.
+The NCP data comes from Greg Morris <GMORRIS@novell.com>.
+Many thanks to Novell for letting him work on this.
+
+Additional data sources:
+"Programmer's Guide to the NetWare Core Protocol" by Steve Conner and Dianne Conner.
+
+At one time, Novell provided a list of NCPs by number at:
+
+http://developer.novell.com/ndk/ncp.htm (where you could download an
+*.exe file which installs a PDF, although you may have to create a login
+to do this)
+
+or
+
+http://developer.novell.com/ndk/doc/ncp/
+for a badly-formatted HTML version of the same PDF.
+
+Currently, NCP documentation can be found at:
+
+https://www.microfocus.com/documentation/open-enterprise-server-developer-documentation/ncp/
+
+with a list of NCPs by number at
+
+https://www.microfocus.com/documentation/open-enterprise-server-developer-documentation/ncp/ncpdocs/main.htm
+
+and some additional NCPs to support volumes > 16TB at
+
+https://www.microfocus.com/documentation/open-enterprise-server-developer-documentation/ncp/ncpdocs/16tb+.htm
+
+NDS information can be found at:
+
+https://www.microfocus.com/documentation/edirectory-developer-documentation/edirectory-libraries-for-c/
+
+and PDFs linked from there, and from
+
+https://www.novell.com/documentation/developer/ndslib/
+
+and HTML versions linked from there.
+
+The Novell eDirectory Schema Reference gives a "Transfer Format" for
+some types, which may be the way they're sent over the wire.
+
+Portions Copyright (c) 2000-2002 by Gilbert Ramirez <gram@alumni.rice.edu>.
+Portions Copyright (c) Novell, Inc. 2000-2003.
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation; either version 2
+of the License, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""
+
+import os
+import sys
+import string
+import getopt
+import traceback
+
+errors = {}
+groups = {}
+packets = []
+compcode_lists = None
+ptvc_lists = None
+msg = None
+reply_var = None
+#ensure unique expert function declarations
+expert_hash = {}
+
+REC_START = 0
+REC_LENGTH = 1
+REC_FIELD = 2
+REC_ENDIANNESS = 3
+REC_VAR = 4
+REC_REPEAT = 5
+REC_REQ_COND = 6
+REC_INFO_STR = 7
+
+NO_VAR = -1
+NO_REPEAT = -1
+NO_REQ_COND = -1
+NO_LENGTH_CHECK = -2
+
+
+PROTO_LENGTH_UNKNOWN = -1
+
+global_highest_var = -1
+global_req_cond = {}
+
+
+REQ_COND_SIZE_VARIABLE = "REQ_COND_SIZE_VARIABLE"
+REQ_COND_SIZE_CONSTANT = "REQ_COND_SIZE_CONSTANT"
+
+##############################################################################
+# Global containers
+##############################################################################
+
+class UniqueCollection:
+ """The UniqueCollection class stores objects which can be compared to other
+ objects of the same class. If two objects in the collection are equivalent,
+ only one is stored."""
+
+ def __init__(self, name):
+ "Constructor"
+ self.name = name
+ self.members = []
+ self.member_reprs = {}
+
+ def Add(self, object):
+ """Add an object to the members lists, if a comparable object
+ doesn't already exist. The object that is in the member list, that is
+ either the object that was added or the comparable object that was
+ already in the member list, is returned."""
+
+ r = repr(object)
+ # Is 'object' a duplicate of some other member?
+ if r in self.member_reprs:
+ return self.member_reprs[r]
+ else:
+ self.member_reprs[r] = object
+ self.members.append(object)
+ return object
+
+ def Members(self):
+ "Returns the list of members."
+ return self.members
+
+ def HasMember(self, object):
+ "Does the list of members contain the object?"
+ if repr(object) in self.member_reprs:
+ return 1
+ else:
+ return 0
+
+# This list needs to be defined before the NCP types are defined,
+# because the NCP types are defined in the global scope, not inside
+# a function's scope.
+ptvc_lists = UniqueCollection('PTVC Lists')
+
+##############################################################################
+
+class NamedList:
+ "NamedList's keep track of PTVC's and Completion Codes"
+ def __init__(self, name, list):
+ "Constructor"
+ self.name = name
+ self.list = list
+
+ def __cmp__(self, other):
+ "Compare this NamedList to another"
+
+ if isinstance(other, NamedList):
+ return cmp(self.list, other.list)
+ else:
+ return 0
+
+
+ def Name(self, new_name = None):
+ "Get/Set name of list"
+ if new_name is not None:
+ self.name = new_name
+ return self.name
+
+ def Records(self):
+ "Returns record lists"
+ return self.list
+
+ def Null(self):
+ "Is there no list (different from an empty list)?"
+ return self.list is None
+
+ def Empty(self):
+ "It the list empty (different from a null list)?"
+ assert(not self.Null())
+
+ if self.list:
+ return 0
+ else:
+ return 1
+
+ def __repr__(self):
+ return repr(self.list)
+
+class PTVC(NamedList):
+ """ProtoTree TVBuff Cursor List ("PTVC List") Class"""
+
+ def __init__(self, name, records, code):
+ "Constructor"
+ NamedList.__init__(self, name, [])
+
+ global global_highest_var
+
+ expected_offset = None
+ highest_var = -1
+
+ named_vars = {}
+
+ # Make a PTVCRecord object for each list in 'records'
+ for record in records:
+ offset = record[REC_START]
+ length = record[REC_LENGTH]
+ field = record[REC_FIELD]
+ endianness = record[REC_ENDIANNESS]
+ info_str = record[REC_INFO_STR]
+
+ # Variable
+ var_name = record[REC_VAR]
+ if var_name:
+ # Did we already define this var?
+ if var_name in named_vars:
+ sys.exit("%s has multiple %s vars." % \
+ (name, var_name))
+
+ highest_var = highest_var + 1
+ var = highest_var
+ if highest_var > global_highest_var:
+ global_highest_var = highest_var
+ named_vars[var_name] = var
+ else:
+ var = NO_VAR
+
+ # Repeat
+ repeat_name = record[REC_REPEAT]
+ if repeat_name:
+ # Do we have this var?
+ if repeat_name not in named_vars:
+ sys.exit("%s does not have %s var defined." % \
+ (name, repeat_name))
+ repeat = named_vars[repeat_name]
+ else:
+ repeat = NO_REPEAT
+
+ # Request Condition
+ req_cond = record[REC_REQ_COND]
+ if req_cond != NO_REQ_COND:
+ global_req_cond[req_cond] = None
+
+ ptvc_rec = PTVCRecord(field, length, endianness, var, repeat, req_cond, info_str, code)
+
+ if expected_offset is None:
+ expected_offset = offset
+
+ elif expected_offset == -1:
+ pass
+
+ elif expected_offset != offset and offset != -1:
+ msg.write("Expected offset in %s for %s to be %d\n" % \
+ (name, field.HFName(), expected_offset))
+ sys.exit(1)
+
+ # We can't make a PTVC list from a variable-length
+ # packet, unless the fields can tell us at run time
+ # how long the packet is. That is, nstring8 is fine, since
+ # the field has an integer telling us how long the string is.
+ # Fields that don't have a length determinable at run-time
+ # cannot be variable-length.
+ if type(ptvc_rec.Length()) == type(()):
+ if isinstance(ptvc_rec.Field(), nstring):
+ expected_offset = -1
+ pass
+ elif isinstance(ptvc_rec.Field(), nbytes):
+ expected_offset = -1
+ pass
+ elif isinstance(ptvc_rec.Field(), struct):
+ expected_offset = -1
+ pass
+ else:
+ field = ptvc_rec.Field()
+ assert 0, "Cannot make PTVC from %s, type %s" % \
+ (field.HFName(), field)
+
+ elif expected_offset > -1:
+ if ptvc_rec.Length() < 0:
+ expected_offset = -1
+ else:
+ expected_offset = expected_offset + ptvc_rec.Length()
+
+
+ self.list.append(ptvc_rec)
+
+ def ETTName(self):
+ return "ett_%s" % (self.Name(),)
+
+
+ def Code(self):
+ x = "static const ptvc_record %s[] = {\n" % (self.Name())
+ for ptvc_rec in self.list:
+ x = x + " %s,\n" % (ptvc_rec.Code())
+ x = x + " { NULL, 0, NULL, NULL, NO_ENDIANNESS, NO_VAR, NO_REPEAT, NO_REQ_COND }\n"
+ x = x + "};\n"
+ return x
+
+ def __repr__(self):
+ x = ""
+ for ptvc_rec in self.list:
+ x = x + repr(ptvc_rec)
+ return x
+
+
+class PTVCBitfield(PTVC):
+ def __init__(self, name, vars):
+ NamedList.__init__(self, name, [])
+
+ for var in vars:
+ ptvc_rec = PTVCRecord(var, var.Length(), var.Endianness(),
+ NO_VAR, NO_REPEAT, NO_REQ_COND, None, 0)
+ self.list.append(ptvc_rec)
+
+ def Code(self):
+ ett_name = self.ETTName()
+ x = "static int %s = -1;\n" % (ett_name,)
+
+ x = x + "static const ptvc_record ptvc_%s[] = {\n" % (self.Name())
+ for ptvc_rec in self.list:
+ x = x + " %s,\n" % (ptvc_rec.Code())
+ x = x + " { NULL, 0, NULL, NULL, NO_ENDIANNESS, NO_VAR, NO_REPEAT, NO_REQ_COND }\n"
+ x = x + "};\n"
+
+ x = x + "static const sub_ptvc_record %s = {\n" % (self.Name(),)
+ x = x + " &%s,\n" % (ett_name,)
+ x = x + " NULL,\n"
+ x = x + " ptvc_%s,\n" % (self.Name(),)
+ x = x + "};\n"
+ return x
+
+
+class PTVCRecord:
+ def __init__(self, field, length, endianness, var, repeat, req_cond, info_str, code):
+ "Constructor"
+ self.field = field
+ self.length = length
+ self.endianness = endianness
+ self.var = var
+ self.repeat = repeat
+ self.req_cond = req_cond
+ self.req_info_str = info_str
+ self.__code__ = code
+
+ def __cmp__(self, other):
+ "Comparison operator"
+ if self.field != other.field:
+ return 1
+ elif self.length < other.length:
+ return -1
+ elif self.length > other.length:
+ return 1
+ elif self.endianness != other.endianness:
+ return 1
+ else:
+ return 0
+
+ def Code(self):
+ # Nice textual representations
+ if self.var == NO_VAR:
+ var = "NO_VAR"
+ else:
+ var = self.var
+
+ if self.repeat == NO_REPEAT:
+ repeat = "NO_REPEAT"
+ else:
+ repeat = self.repeat
+
+ if self.req_cond == NO_REQ_COND:
+ req_cond = "NO_REQ_COND"
+ else:
+ req_cond = global_req_cond[self.req_cond]
+ assert req_cond is not None
+
+ if isinstance(self.field, struct):
+ return self.field.ReferenceString(var, repeat, req_cond)
+ else:
+ return self.RegularCode(var, repeat, req_cond)
+
+ def InfoStrName(self):
+ "Returns a C symbol based on the NCP function code, for the info_str"
+ return "info_str_0x%x" % (self.__code__)
+
+ def RegularCode(self, var, repeat, req_cond):
+ "String representation"
+ endianness = 'ENC_BIG_ENDIAN'
+ if self.endianness == ENC_LITTLE_ENDIAN:
+ endianness = 'ENC_LITTLE_ENDIAN'
+
+ length = None
+
+ if type(self.length) == type(0):
+ length = self.length
+ else:
+ # This is for cases where a length is needed
+ # in order to determine a following variable-length,
+ # like nstring8, where 1 byte is needed in order
+ # to determine the variable length.
+ var_length = self.field.Length()
+ if var_length > 0:
+ length = var_length
+
+ if length == PROTO_LENGTH_UNKNOWN:
+ # XXX length = "PROTO_LENGTH_UNKNOWN"
+ pass
+
+ assert length, "Length not handled for %s" % (self.field.HFName(),)
+
+ sub_ptvc_name = self.field.PTVCName()
+ if sub_ptvc_name != "NULL":
+ sub_ptvc_name = "&%s" % (sub_ptvc_name,)
+
+ if self.req_info_str:
+ req_info_str = "&" + self.InfoStrName() + "_req"
+ else:
+ req_info_str = "NULL"
+
+ return "{ &%s, %s, %s, %s, %s, %s, %s, %s }" % \
+ (self.field.HFName(), length, sub_ptvc_name,
+ req_info_str, endianness, var, repeat, req_cond)
+
+ def Offset(self):
+ return self.offset
+
+ def Length(self):
+ return self.length
+
+ def Field(self):
+ return self.field
+
+ def __repr__(self):
+ if self.req_info_str:
+ return "{%s len=%s end=%s var=%s rpt=%s rqc=%s info=%s}" % \
+ (self.field.HFName(), self.length,
+ self.endianness, self.var, self.repeat, self.req_cond, self.req_info_str[1])
+ else:
+ return "{%s len=%s end=%s var=%s rpt=%s rqc=%s}" % \
+ (self.field.HFName(), self.length,
+ self.endianness, self.var, self.repeat, self.req_cond)
+
+##############################################################################
+
+class NCP:
+ "NCP Packet class"
+ def __init__(self, func_code, description, group, has_length=1):
+ "Constructor"
+ self.__code__ = func_code
+ self.description = description
+ self.group = group
+ self.codes = None
+ self.request_records = None
+ self.reply_records = None
+ self.has_length = has_length
+ self.req_cond_size = None
+ self.req_info_str = None
+ self.expert_func = None
+
+ if group not in groups:
+ msg.write("NCP 0x%x has invalid group '%s'\n" % \
+ (self.__code__, group))
+ sys.exit(1)
+
+ if self.HasSubFunction():
+ # NCP Function with SubFunction
+ self.start_offset = 10
+ else:
+ # Simple NCP Function
+ self.start_offset = 7
+
+ def ReqCondSize(self):
+ return self.req_cond_size
+
+ def ReqCondSizeVariable(self):
+ self.req_cond_size = REQ_COND_SIZE_VARIABLE
+
+ def ReqCondSizeConstant(self):
+ self.req_cond_size = REQ_COND_SIZE_CONSTANT
+
+ def FunctionCode(self, part=None):
+ "Returns the function code for this NCP packet."
+ if part is None:
+ return self.__code__
+ elif part == 'high':
+ if self.HasSubFunction():
+ return (self.__code__ & 0xff00) >> 8
+ else:
+ return self.__code__
+ elif part == 'low':
+ if self.HasSubFunction():
+ return self.__code__ & 0x00ff
+ else:
+ return 0x00
+ else:
+ msg.write("Unknown directive '%s' for function_code()\n" % (part))
+ sys.exit(1)
+
+ def HasSubFunction(self):
+ "Does this NPC packet require a subfunction field?"
+ if self.__code__ <= 0xff:
+ return 0
+ else:
+ return 1
+
+ def HasLength(self):
+ return self.has_length
+
+ def Description(self):
+ return self.description
+
+ def Group(self):
+ return self.group
+
+ def PTVCRequest(self):
+ return self.ptvc_request
+
+ def PTVCReply(self):
+ return self.ptvc_reply
+
+ def Request(self, size, records=[], **kwargs):
+ self.request_size = size
+ self.request_records = records
+ if self.HasSubFunction():
+ if self.HasLength():
+ self.CheckRecords(size, records, "Request", 10)
+ else:
+ self.CheckRecords(size, records, "Request", 8)
+ else:
+ self.CheckRecords(size, records, "Request", 7)
+ self.ptvc_request = self.MakePTVC(records, "request", self.__code__)
+
+ if "info_str" in kwargs:
+ self.req_info_str = kwargs["info_str"]
+
+ def Reply(self, size, records=[]):
+ self.reply_size = size
+ self.reply_records = records
+ self.CheckRecords(size, records, "Reply", 8)
+ self.ptvc_reply = self.MakePTVC(records, "reply", self.__code__)
+
+ def CheckRecords(self, size, records, descr, min_hdr_length):
+ "Simple sanity check"
+ if size == NO_LENGTH_CHECK:
+ return
+ min = size
+ max = size
+ if type(size) == type(()):
+ min = size[0]
+ max = size[1]
+
+ lower = min_hdr_length
+ upper = min_hdr_length
+
+ for record in records:
+ rec_size = record[REC_LENGTH]
+ rec_lower = rec_size
+ rec_upper = rec_size
+ if type(rec_size) == type(()):
+ rec_lower = rec_size[0]
+ rec_upper = rec_size[1]
+
+ lower = lower + rec_lower
+ upper = upper + rec_upper
+
+ error = 0
+ if min != lower:
+ msg.write("%s records for 2222/0x%x sum to %d bytes minimum, but param1 shows %d\n" \
+ % (descr, self.FunctionCode(), lower, min))
+ error = 1
+ if max != upper:
+ msg.write("%s records for 2222/0x%x sum to %d bytes maximum, but param1 shows %d\n" \
+ % (descr, self.FunctionCode(), upper, max))
+ error = 1
+
+ if error == 1:
+ sys.exit(1)
+
+
+ def MakePTVC(self, records, name_suffix, code):
+ """Makes a PTVC out of a request or reply record list. Possibly adds
+ it to the global list of PTVCs (the global list is a UniqueCollection,
+ so an equivalent PTVC may already be in the global list)."""
+
+ name = "%s_%s" % (self.CName(), name_suffix)
+ #if any individual record has an info_str, bubble it up to the top
+ #so an info_string_t can be created for it
+ for record in records:
+ if record[REC_INFO_STR]:
+ self.req_info_str = record[REC_INFO_STR]
+
+ ptvc = PTVC(name, records, code)
+
+ #if the record is a duplicate, remove the req_info_str so
+ #that an unused info_string isn't generated
+ remove_info = 0
+ if ptvc_lists.HasMember(ptvc):
+ if 'info' in repr(ptvc):
+ remove_info = 1
+
+ ptvc_test = ptvc_lists.Add(ptvc)
+
+ if remove_info:
+ self.req_info_str = None
+
+ return ptvc_test
+
+ def CName(self):
+ "Returns a C symbol based on the NCP function code"
+ return "ncp_0x%x" % (self.__code__)
+
+ def InfoStrName(self):
+ "Returns a C symbol based on the NCP function code, for the info_str"
+ return "info_str_0x%x" % (self.__code__)
+
+ def MakeExpert(self, func):
+ self.expert_func = func
+ expert_hash[func] = func
+
+ def Variables(self):
+ """Returns a list of variables used in the request and reply records.
+ A variable is listed only once, even if it is used twice (once in
+ the request, once in the reply)."""
+
+ variables = {}
+ if self.request_records:
+ for record in self.request_records:
+ var = record[REC_FIELD]
+ variables[var.HFName()] = var
+
+ sub_vars = var.SubVariables()
+ for sv in sub_vars:
+ variables[sv.HFName()] = sv
+
+ if self.reply_records:
+ for record in self.reply_records:
+ var = record[REC_FIELD]
+ variables[var.HFName()] = var
+
+ sub_vars = var.SubVariables()
+ for sv in sub_vars:
+ variables[sv.HFName()] = sv
+
+ return list(variables.values())
+
+ def CalculateReqConds(self):
+ """Returns a list of request conditions (dfilter text) used
+ in the reply records. A request condition is listed only once,"""
+ texts = {}
+ if self.reply_records:
+ for record in self.reply_records:
+ text = record[REC_REQ_COND]
+ if text != NO_REQ_COND:
+ texts[text] = None
+
+ if len(texts) == 0:
+ self.req_conds = None
+ return None
+
+ dfilter_texts = list(texts.keys())
+ dfilter_texts.sort()
+ name = "%s_req_cond_indexes" % (self.CName(),)
+ return NamedList(name, dfilter_texts)
+
+ def GetReqConds(self):
+ return self.req_conds
+
+ def SetReqConds(self, new_val):
+ self.req_conds = new_val
+
+
+ def CompletionCodes(self, codes=None):
+ """Sets or returns the list of completion
+ codes. Internally, a NamedList is used to store the
+ completion codes, but the caller of this function never
+ realizes that because Python lists are the input and
+ output."""
+
+ if codes is None:
+ return self.codes
+
+ # Sanity check
+ okay = 1
+ for code in codes:
+ if code not in errors:
+ msg.write("Errors table does not have key 0x%04x for NCP=0x%x\n" % (code,
+ self.__code__))
+ okay = 0
+
+ # Delay the exit until here so that the programmer can get
+ # the complete list of missing error codes
+ if not okay:
+ sys.exit(1)
+
+ # Create CompletionCode (NamedList) object and possible
+ # add it to the global list of completion code lists.
+ name = "%s_errors" % (self.CName(),)
+ codes.sort()
+ codes_list = NamedList(name, codes)
+ self.codes = compcode_lists.Add(codes_list)
+
+ self.Finalize()
+
+ def Finalize(self):
+ """Adds the NCP object to the global collection of NCP
+ objects. This is done automatically after setting the
+ CompletionCode list. Yes, this is a shortcut, but it makes
+ our list of NCP packet definitions look neater, since an
+ explicit "add to global list of packets" is not needed."""
+
+ # Add packet to global collection of packets
+ packets.append(self)
+
+def rec(start, length, field, endianness=None, **kw):
+ return _rec(start, length, field, endianness, kw)
+
+def srec(field, endianness=None, **kw):
+ return _rec(-1, -1, field, endianness, kw)
+
+def _rec(start, length, field, endianness, kw):
+ # If endianness not explicitly given, use the field's
+ # default endiannes.
+ if endianness is None:
+ endianness = field.Endianness()
+
+ # Setting a var?
+ if "var" in kw:
+ # Is the field an INT ?
+ if not isinstance(field, CountingNumber):
+ sys.exit("Field %s used as count variable, but not integer." \
+ % (field.HFName()))
+ var = kw["var"]
+ else:
+ var = None
+
+ # If 'var' not used, 'repeat' can be used.
+ if not var and "repeat" in kw:
+ repeat = kw["repeat"]
+ else:
+ repeat = None
+
+ # Request-condition ?
+ if "req_cond" in kw:
+ req_cond = kw["req_cond"]
+ else:
+ req_cond = NO_REQ_COND
+
+ if "info_str" in kw:
+ req_info_str = kw["info_str"]
+ else:
+ req_info_str = None
+
+ return [start, length, field, endianness, var, repeat, req_cond, req_info_str]
+
+
+
+##############################################################################
+
+ENC_LITTLE_ENDIAN = 1 # Little-Endian
+ENC_BIG_ENDIAN = 0 # Big-Endian
+NA = -1 # Not Applicable
+
+class Type:
+ " Virtual class for NCP field types"
+ type = "Type"
+ ftype = None
+ disp = "BASE_DEC"
+ custom_func = None
+ endianness = NA
+ values = []
+
+ def __init__(self, abbrev, descr, bytes, endianness = NA):
+ self.abbrev = abbrev
+ self.descr = descr
+ self.bytes = bytes
+ self.endianness = endianness
+ self.hfname = "hf_ncp_" + self.abbrev
+
+ def Length(self):
+ return self.bytes
+
+ def Abbreviation(self):
+ return self.abbrev
+
+ def Description(self):
+ return self.descr
+
+ def HFName(self):
+ return self.hfname
+
+ def DFilter(self):
+ return "ncp." + self.abbrev
+
+ def WiresharkFType(self):
+ return self.ftype
+
+ def Display(self, newval=None):
+ if newval is not None:
+ self.disp = newval
+ return self.disp
+
+ def ValuesName(self):
+ if self.custom_func:
+ return "CF_FUNC(" + self.custom_func + ")"
+ else:
+ return "NULL"
+
+ def Mask(self):
+ return 0
+
+ def Endianness(self):
+ return self.endianness
+
+ def SubVariables(self):
+ return []
+
+ def PTVCName(self):
+ return "NULL"
+
+ def NWDate(self):
+ self.disp = "BASE_CUSTOM"
+ self.custom_func = "padd_date"
+
+ def NWTime(self):
+ self.disp = "BASE_CUSTOM"
+ self.custom_func = "padd_time"
+
+ #def __cmp__(self, other):
+ # return cmp(self.hfname, other.hfname)
+
+ def __lt__(self, other):
+ return (self.hfname < other.hfname)
+
+class struct(PTVC, Type):
+ def __init__(self, name, items, descr=None):
+ name = "struct_%s" % (name,)
+ NamedList.__init__(self, name, [])
+
+ self.bytes = 0
+ self.descr = descr
+ for item in items:
+ if isinstance(item, Type):
+ field = item
+ length = field.Length()
+ endianness = field.Endianness()
+ var = NO_VAR
+ repeat = NO_REPEAT
+ req_cond = NO_REQ_COND
+ elif type(item) == type([]):
+ field = item[REC_FIELD]
+ length = item[REC_LENGTH]
+ endianness = item[REC_ENDIANNESS]
+ var = item[REC_VAR]
+ repeat = item[REC_REPEAT]
+ req_cond = item[REC_REQ_COND]
+ else:
+ assert 0, "Item %s item not handled." % (item,)
+
+ ptvc_rec = PTVCRecord(field, length, endianness, var,
+ repeat, req_cond, None, 0)
+ self.list.append(ptvc_rec)
+ self.bytes = self.bytes + field.Length()
+
+ self.hfname = self.name
+
+ def Variables(self):
+ vars = []
+ for ptvc_rec in self.list:
+ vars.append(ptvc_rec.Field())
+ return vars
+
+ def ReferenceString(self, var, repeat, req_cond):
+ return "{ PTVC_STRUCT, NO_LENGTH, &%s, NULL, NO_ENDIANNESS, %s, %s, %s }" % \
+ (self.name, var, repeat, req_cond)
+
+ def Code(self):
+ ett_name = self.ETTName()
+ x = "static int %s = -1;\n" % (ett_name,)
+ x = x + "static const ptvc_record ptvc_%s[] = {\n" % (self.name,)
+ for ptvc_rec in self.list:
+ x = x + " %s,\n" % (ptvc_rec.Code())
+ x = x + " { NULL, NO_LENGTH, NULL, NULL, NO_ENDIANNESS, NO_VAR, NO_REPEAT, NO_REQ_COND }\n"
+ x = x + "};\n"
+
+ x = x + "static const sub_ptvc_record %s = {\n" % (self.name,)
+ x = x + " &%s,\n" % (ett_name,)
+ if self.descr:
+ x = x + ' "%s",\n' % (self.descr,)
+ else:
+ x = x + " NULL,\n"
+ x = x + " ptvc_%s,\n" % (self.Name(),)
+ x = x + "};\n"
+ return x
+
+ def __cmp__(self, other):
+ return cmp(self.HFName(), other.HFName())
+
+
+class byte(Type):
+ type = "byte"
+ ftype = "FT_UINT8"
+ def __init__(self, abbrev, descr):
+ Type.__init__(self, abbrev, descr, 1)
+
+class CountingNumber:
+ pass
+
+# Same as above. Both are provided for convenience
+class uint8(Type, CountingNumber):
+ type = "uint8"
+ ftype = "FT_UINT8"
+ bytes = 1
+ def __init__(self, abbrev, descr):
+ Type.__init__(self, abbrev, descr, 1)
+
+class uint16(Type, CountingNumber):
+ type = "uint16"
+ ftype = "FT_UINT16"
+ def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
+ Type.__init__(self, abbrev, descr, 2, endianness)
+
+class uint24(Type, CountingNumber):
+ type = "uint24"
+ ftype = "FT_UINT24"
+ def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
+ Type.__init__(self, abbrev, descr, 3, endianness)
+
+class uint32(Type, CountingNumber):
+ type = "uint32"
+ ftype = "FT_UINT32"
+ def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
+ Type.__init__(self, abbrev, descr, 4, endianness)
+
+class uint64(Type, CountingNumber):
+ type = "uint64"
+ ftype = "FT_UINT64"
+ def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
+ Type.__init__(self, abbrev, descr, 8, endianness)
+
+class eptime(Type, CountingNumber):
+ type = "eptime"
+ ftype = "FT_ABSOLUTE_TIME"
+ disp = "ABSOLUTE_TIME_LOCAL"
+ def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
+ Type.__init__(self, abbrev, descr, 4, endianness)
+
+class boolean8(uint8):
+ type = "boolean8"
+ ftype = "FT_BOOLEAN"
+ disp = "BASE_NONE"
+
+class boolean16(uint16):
+ type = "boolean16"
+ ftype = "FT_BOOLEAN"
+ disp = "BASE_NONE"
+
+class boolean24(uint24):
+ type = "boolean24"
+ ftype = "FT_BOOLEAN"
+ disp = "BASE_NONE"
+
+class boolean32(uint32):
+ type = "boolean32"
+ ftype = "FT_BOOLEAN"
+ disp = "BASE_NONE"
+
+class nstring:
+ pass
+
+class nstring8(Type, nstring):
+ """A string of up to (2^8)-1 characters. The first byte
+ gives the string length."""
+
+ type = "nstring8"
+ ftype = "FT_UINT_STRING"
+ disp = "BASE_NONE"
+ def __init__(self, abbrev, descr):
+ Type.__init__(self, abbrev, descr, 1)
+
+class nstring16(Type, nstring):
+ """A string of up to (2^16)-2 characters. The first 2 bytes
+ gives the string length."""
+
+ type = "nstring16"
+ ftype = "FT_UINT_STRING"
+ disp = "BASE_NONE"
+ def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
+ Type.__init__(self, abbrev, descr, 2, endianness)
+
+class nstring32(Type, nstring):
+ """A string of up to (2^32)-4 characters. The first 4 bytes
+ gives the string length."""
+
+ type = "nstring32"
+ ftype = "FT_UINT_STRING"
+ disp = "BASE_NONE"
+ def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
+ Type.__init__(self, abbrev, descr, 4, endianness)
+
+class fw_string(Type):
+ """A fixed-width string of n bytes."""
+
+ type = "fw_string"
+ disp = "BASE_NONE"
+ ftype = "FT_STRING"
+
+ def __init__(self, abbrev, descr, bytes):
+ Type.__init__(self, abbrev, descr, bytes)
+
+
+class stringz(Type):
+ "NUL-terminated string, with a maximum length"
+
+ type = "stringz"
+ disp = "BASE_NONE"
+ ftype = "FT_STRINGZ"
+ def __init__(self, abbrev, descr):
+ Type.__init__(self, abbrev, descr, PROTO_LENGTH_UNKNOWN)
+
+class val_string(Type):
+ """Abstract class for val_stringN, where N is number
+ of bits that key takes up."""
+
+ type = "val_string"
+ disp = 'BASE_HEX'
+
+ def __init__(self, abbrev, descr, val_string_array, endianness = ENC_LITTLE_ENDIAN):
+ Type.__init__(self, abbrev, descr, self.bytes, endianness)
+ self.values = val_string_array
+
+ def Code(self):
+ result = "static const value_string %s[] = {\n" \
+ % (self.ValuesCName())
+ for val_record in self.values:
+ value = val_record[0]
+ text = val_record[1]
+ value_repr = self.value_format % value
+ result = result + ' { %s, "%s" },\n' \
+ % (value_repr, text)
+
+ value_repr = self.value_format % 0
+ result = result + " { %s, NULL },\n" % (value_repr)
+ result = result + "};\n"
+ REC_VAL_STRING_RES = self.value_format % value
+ return result
+
+ def ValuesCName(self):
+ return "ncp_%s_vals" % (self.abbrev)
+
+ def ValuesName(self):
+ return "VALS(%s)" % (self.ValuesCName())
+
+class val_string8(val_string):
+ type = "val_string8"
+ ftype = "FT_UINT8"
+ bytes = 1
+ value_format = "0x%02x"
+
+class val_string16(val_string):
+ type = "val_string16"
+ ftype = "FT_UINT16"
+ bytes = 2
+ value_format = "0x%04x"
+
+class val_string32(val_string):
+ type = "val_string32"
+ ftype = "FT_UINT32"
+ bytes = 4
+ value_format = "0x%08x"
+
+class bytes(Type):
+ type = 'bytes'
+ disp = "BASE_NONE"
+ ftype = 'FT_BYTES'
+
+ def __init__(self, abbrev, descr, bytes):
+ Type.__init__(self, abbrev, descr, bytes, NA)
+
+class nbytes:
+ pass
+
+class nbytes8(Type, nbytes):
+ """A series of up to (2^8)-1 bytes. The first byte
+ gives the byte-string length."""
+
+ type = "nbytes8"
+ ftype = "FT_UINT_BYTES"
+ disp = "BASE_NONE"
+ def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
+ Type.__init__(self, abbrev, descr, 1, endianness)
+
+class nbytes16(Type, nbytes):
+ """A series of up to (2^16)-2 bytes. The first 2 bytes
+ gives the byte-string length."""
+
+ type = "nbytes16"
+ ftype = "FT_UINT_BYTES"
+ disp = "BASE_NONE"
+ def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
+ Type.__init__(self, abbrev, descr, 2, endianness)
+
+class nbytes32(Type, nbytes):
+ """A series of up to (2^32)-4 bytes. The first 4 bytes
+ gives the byte-string length."""
+
+ type = "nbytes32"
+ ftype = "FT_UINT_BYTES"
+ disp = "BASE_NONE"
+ def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
+ Type.__init__(self, abbrev, descr, 4, endianness)
+
+class bf_uint(Type):
+ type = "bf_uint"
+ disp = None
+
+ def __init__(self, bitmask, abbrev, descr, endianness=ENC_LITTLE_ENDIAN):
+ Type.__init__(self, abbrev, descr, self.bytes, endianness)
+ self.bitmask = bitmask
+
+ def Mask(self):
+ return self.bitmask
+
+class bf_val_str(bf_uint):
+ type = "bf_uint"
+ disp = None
+
+ def __init__(self, bitmask, abbrev, descr, val_string_array, endiannes=ENC_LITTLE_ENDIAN):
+ bf_uint.__init__(self, bitmask, abbrev, descr, endiannes)
+ self.values = val_string_array
+
+ def ValuesName(self):
+ return "VALS(%s)" % (self.ValuesCName())
+
+class bf_val_str8(bf_val_str, val_string8):
+ type = "bf_val_str8"
+ ftype = "FT_UINT8"
+ disp = "BASE_HEX"
+ bytes = 1
+
+class bf_val_str16(bf_val_str, val_string16):
+ type = "bf_val_str16"
+ ftype = "FT_UINT16"
+ disp = "BASE_HEX"
+ bytes = 2
+
+class bf_val_str32(bf_val_str, val_string32):
+ type = "bf_val_str32"
+ ftype = "FT_UINT32"
+ disp = "BASE_HEX"
+ bytes = 4
+
+class bf_boolean:
+ disp = "BASE_NONE"
+
+class bf_boolean8(bf_uint, boolean8, bf_boolean):
+ type = "bf_boolean8"
+ ftype = "FT_BOOLEAN"
+ disp = "8"
+ bytes = 1
+
+class bf_boolean16(bf_uint, boolean16, bf_boolean):
+ type = "bf_boolean16"
+ ftype = "FT_BOOLEAN"
+ disp = "16"
+ bytes = 2
+
+class bf_boolean24(bf_uint, boolean24, bf_boolean):
+ type = "bf_boolean24"
+ ftype = "FT_BOOLEAN"
+ disp = "24"
+ bytes = 3
+
+class bf_boolean32(bf_uint, boolean32, bf_boolean):
+ type = "bf_boolean32"
+ ftype = "FT_BOOLEAN"
+ disp = "32"
+ bytes = 4
+
+class bitfield(Type):
+ type = "bitfield"
+ disp = 'BASE_HEX'
+
+ def __init__(self, vars):
+ var_hash = {}
+ for var in vars:
+ if isinstance(var, bf_boolean):
+ if not isinstance(var, self.bf_type):
+ print("%s must be of type %s" % \
+ (var.Abbreviation(),
+ self.bf_type))
+ sys.exit(1)
+ var_hash[var.bitmask] = var
+
+ bitmasks = list(var_hash.keys())
+ bitmasks.sort()
+ bitmasks.reverse()
+
+ ordered_vars = []
+ for bitmask in bitmasks:
+ var = var_hash[bitmask]
+ ordered_vars.append(var)
+
+ self.vars = ordered_vars
+ self.ptvcname = "ncp_%s_bitfield" % (self.abbrev,)
+ self.hfname = "hf_ncp_%s" % (self.abbrev,)
+ self.sub_ptvc = PTVCBitfield(self.PTVCName(), self.vars)
+
+ def SubVariables(self):
+ return self.vars
+
+ def SubVariablesPTVC(self):
+ return self.sub_ptvc
+
+ def PTVCName(self):
+ return self.ptvcname
+
+
+class bitfield8(bitfield, uint8):
+ type = "bitfield8"
+ ftype = "FT_UINT8"
+ bf_type = bf_boolean8
+
+ def __init__(self, abbrev, descr, vars):
+ uint8.__init__(self, abbrev, descr)
+ bitfield.__init__(self, vars)
+
+class bitfield16(bitfield, uint16):
+ type = "bitfield16"
+ ftype = "FT_UINT16"
+ bf_type = bf_boolean16
+
+ def __init__(self, abbrev, descr, vars, endianness=ENC_LITTLE_ENDIAN):
+ uint16.__init__(self, abbrev, descr, endianness)
+ bitfield.__init__(self, vars)
+
+class bitfield24(bitfield, uint24):
+ type = "bitfield24"
+ ftype = "FT_UINT24"
+ bf_type = bf_boolean24
+
+ def __init__(self, abbrev, descr, vars, endianness=ENC_LITTLE_ENDIAN):
+ uint24.__init__(self, abbrev, descr, endianness)
+ bitfield.__init__(self, vars)
+
+class bitfield32(bitfield, uint32):
+ type = "bitfield32"
+ ftype = "FT_UINT32"
+ bf_type = bf_boolean32
+
+ def __init__(self, abbrev, descr, vars, endianness=ENC_LITTLE_ENDIAN):
+ uint32.__init__(self, abbrev, descr, endianness)
+ bitfield.__init__(self, vars)
+
+#
+# Force the endianness of a field to a non-default value; used in
+# the list of fields of a structure.
+#
+def endian(field, endianness):
+ return [-1, field.Length(), field, endianness, NO_VAR, NO_REPEAT, NO_REQ_COND]
+
+##############################################################################
+# NCP Field Types. Defined in Appendix A of "Programmer's Guide..."
+##############################################################################
+
+AbortQueueFlag = val_string8("abort_q_flag", "Abort Queue Flag", [
+ [ 0x00, "Place at End of Queue" ],
+ [ 0x01, "Do Not Place Spool File, Examine Flags" ],
+])
+AcceptedMaxSize = uint16("accepted_max_size", "Accepted Max Size")
+AcceptedMaxSize64 = uint64("accepted_max_size64", "Accepted Max Size")
+AccessControl = val_string8("access_control", "Access Control", [
+ [ 0x00, "Open for read by this client" ],
+ [ 0x01, "Open for write by this client" ],
+ [ 0x02, "Deny read requests from other stations" ],
+ [ 0x03, "Deny write requests from other stations" ],
+ [ 0x04, "File detached" ],
+ [ 0x05, "TTS holding detach" ],
+ [ 0x06, "TTS holding open" ],
+])
+AccessDate = uint16("access_date", "Access Date")
+AccessDate.NWDate()
+AccessMode = bitfield8("access_mode", "Access Mode", [
+ bf_boolean8(0x01, "acc_mode_read", "Read Access"),
+ bf_boolean8(0x02, "acc_mode_write", "Write Access"),
+ bf_boolean8(0x04, "acc_mode_deny_read", "Deny Read Access"),
+ bf_boolean8(0x08, "acc_mode_deny_write", "Deny Write Access"),
+ bf_boolean8(0x10, "acc_mode_comp", "Compatibility Mode"),
+])
+AccessPrivileges = bitfield8("access_privileges", "Access Privileges", [
+ bf_boolean8(0x01, "acc_priv_read", "Read Privileges (files only)"),
+ bf_boolean8(0x02, "acc_priv_write", "Write Privileges (files only)"),
+ bf_boolean8(0x04, "acc_priv_open", "Open Privileges (files only)"),
+ bf_boolean8(0x08, "acc_priv_create", "Create Privileges (files only)"),
+ bf_boolean8(0x10, "acc_priv_delete", "Delete Privileges (files only)"),
+ bf_boolean8(0x20, "acc_priv_parent", "Parental Privileges (directories only for creating, deleting, and renaming)"),
+ bf_boolean8(0x40, "acc_priv_search", "Search Privileges (directories only)"),
+ bf_boolean8(0x80, "acc_priv_modify", "Modify File Status Flags Privileges (files and directories)"),
+])
+AccessRightsMask = bitfield8("access_rights_mask", "Access Rights", [
+ bf_boolean8(0x0001, "acc_rights_read", "Read Rights"),
+ bf_boolean8(0x0002, "acc_rights_write", "Write Rights"),
+ bf_boolean8(0x0004, "acc_rights_open", "Open Rights"),
+ bf_boolean8(0x0008, "acc_rights_create", "Create Rights"),
+ bf_boolean8(0x0010, "acc_rights_delete", "Delete Rights"),
+ bf_boolean8(0x0020, "acc_rights_parent", "Parental Rights"),
+ bf_boolean8(0x0040, "acc_rights_search", "Search Rights"),
+ bf_boolean8(0x0080, "acc_rights_modify", "Modify Rights"),
+])
+AccessRightsMaskWord = bitfield16("access_rights_mask_word", "Access Rights", [
+ bf_boolean16(0x0001, "acc_rights1_read", "Read Rights"),
+ bf_boolean16(0x0002, "acc_rights1_write", "Write Rights"),
+ bf_boolean16(0x0004, "acc_rights1_open", "Open Rights"),
+ bf_boolean16(0x0008, "acc_rights1_create", "Create Rights"),
+ bf_boolean16(0x0010, "acc_rights1_delete", "Delete Rights"),
+ bf_boolean16(0x0020, "acc_rights1_parent", "Parental Rights"),
+ bf_boolean16(0x0040, "acc_rights1_search", "Search Rights"),
+ bf_boolean16(0x0080, "acc_rights1_modify", "Modify Rights"),
+ bf_boolean16(0x0100, "acc_rights1_supervisor", "Supervisor Access Rights"),
+])
+AccountBalance = uint32("account_balance", "Account Balance")
+AccountVersion = uint8("acct_version", "Acct Version")
+ActionFlag = bitfield8("action_flag", "Action Flag", [
+ bf_boolean8(0x01, "act_flag_open", "Open"),
+ bf_boolean8(0x02, "act_flag_replace", "Replace"),
+ bf_boolean8(0x10, "act_flag_create", "Create"),
+])
+ActiveConnBitList = fw_string("active_conn_bit_list", "Active Connection List", 512)
+ActiveIndexedFiles = uint16("active_indexed_files", "Active Indexed Files")
+ActualMaxBinderyObjects = uint16("actual_max_bindery_objects", "Actual Max Bindery Objects")
+ActualMaxIndexedFiles = uint16("actual_max_indexed_files", "Actual Max Indexed Files")
+ActualMaxOpenFiles = uint16("actual_max_open_files", "Actual Max Open Files")
+ActualMaxSimultaneousTransactions = uint16("actual_max_sim_trans", "Actual Max Simultaneous Transactions")
+ActualMaxUsedDirectoryEntries = uint16("actual_max_used_directory_entries", "Actual Max Used Directory Entries")
+ActualMaxUsedRoutingBuffers = uint16("actual_max_used_routing_buffers", "Actual Max Used Routing Buffers")
+ActualResponseCount = uint16("actual_response_count", "Actual Response Count")
+AddNameSpaceAndVol = stringz("add_nm_spc_and_vol", "Add Name Space and Volume")
+AFPEntryID = uint32("afp_entry_id", "AFP Entry ID", ENC_BIG_ENDIAN)
+AFPEntryID.Display("BASE_HEX")
+AllocAvailByte = uint32("alloc_avail_byte", "Bytes Available for Allocation")
+AllocateMode = bitfield16("alloc_mode", "Allocate Mode", [
+ bf_val_str16(0x0001, "alloc_dir_hdl", "Dir Handle Type",[
+ [0x00, "Permanent"],
+ [0x01, "Temporary"],
+ ]),
+ bf_boolean16(0x0002, "alloc_spec_temp_dir_hdl","Special Temporary Directory Handle"),
+ bf_boolean16(0x4000, "alloc_reply_lvl2","Reply Level 2"),
+ bf_boolean16(0x8000, "alloc_dst_name_spc","Destination Name Space Input Parameter"),
+])
+AllocationBlockSize = uint32("allocation_block_size", "Allocation Block Size")
+AllocFreeCount = uint32("alloc_free_count", "Reclaimable Free Bytes")
+ApplicationNumber = uint16("application_number", "Application Number")
+ArchivedTime = uint16("archived_time", "Archived Time")
+ArchivedTime.NWTime()
+ArchivedDate = uint16("archived_date", "Archived Date")
+ArchivedDate.NWDate()
+ArchiverID = uint32("archiver_id", "Archiver ID", ENC_BIG_ENDIAN)
+ArchiverID.Display("BASE_HEX")
+AssociatedNameSpace = uint8("associated_name_space", "Associated Name Space")
+AttachDuringProcessing = uint16("attach_during_processing", "Attach During Processing")
+AttachedIndexedFiles = uint8("attached_indexed_files", "Attached Indexed Files")
+AttachWhileProcessingAttach = uint16("attach_while_processing_attach", "Attach While Processing Attach")
+Attributes = uint32("attributes", "Attributes")
+AttributesDef = bitfield8("attr_def", "Attributes", [
+ bf_boolean8(0x01, "att_def_ro", "Read Only"),
+ bf_boolean8(0x02, "att_def_hidden", "Hidden"),
+ bf_boolean8(0x04, "att_def_system", "System"),
+ bf_boolean8(0x08, "att_def_execute", "Execute"),
+ bf_boolean8(0x10, "att_def_sub_only", "Subdirectory"),
+ bf_boolean8(0x20, "att_def_archive", "Archive"),
+ bf_boolean8(0x80, "att_def_shareable", "Shareable"),
+])
+AttributesDef16 = bitfield16("attr_def_16", "Attributes", [
+ bf_boolean16(0x0001, "att_def16_ro", "Read Only"),
+ bf_boolean16(0x0002, "att_def16_hidden", "Hidden"),
+ bf_boolean16(0x0004, "att_def16_system", "System"),
+ bf_boolean16(0x0008, "att_def16_execute", "Execute"),
+ bf_boolean16(0x0010, "att_def16_sub_only", "Subdirectory"),
+ bf_boolean16(0x0020, "att_def16_archive", "Archive"),
+ bf_boolean16(0x0080, "att_def16_shareable", "Shareable"),
+ bf_boolean16(0x1000, "att_def16_transaction", "Transactional"),
+ bf_boolean16(0x4000, "att_def16_read_audit", "Read Audit"),
+ bf_boolean16(0x8000, "att_def16_write_audit", "Write Audit"),
+])
+AttributesDef32 = bitfield32("attr_def_32", "Attributes", [
+ bf_boolean32(0x00000001, "att_def32_ro", "Read Only"),
+ bf_boolean32(0x00000002, "att_def32_hidden", "Hidden"),
+ bf_boolean32(0x00000004, "att_def32_system", "System"),
+ bf_boolean32(0x00000008, "att_def32_execute", "Execute"),
+ bf_boolean32(0x00000010, "att_def32_sub_only", "Subdirectory"),
+ bf_boolean32(0x00000020, "att_def32_archive", "Archive"),
+ bf_boolean32(0x00000040, "att_def32_execute_confirm", "Execute Confirm"),
+ bf_boolean32(0x00000080, "att_def32_shareable", "Shareable"),
+ bf_val_str32(0x00000700, "att_def32_search", "Search Mode",[
+ [0, "Search on all Read Only Opens"],
+ [1, "Search on Read Only Opens with no Path"],
+ [2, "Shell Default Search Mode"],
+ [3, "Search on all Opens with no Path"],
+ [4, "Do not Search"],
+ [5, "Reserved - Do not Use"],
+ [6, "Search on All Opens"],
+ [7, "Reserved - Do not Use"],
+ ]),
+ bf_boolean32(0x00000800, "att_def32_no_suballoc", "No Suballoc"),
+ bf_boolean32(0x00001000, "att_def32_transaction", "Transactional"),
+ bf_boolean32(0x00004000, "att_def32_read_audit", "Read Audit"),
+ bf_boolean32(0x00008000, "att_def32_write_audit", "Write Audit"),
+ bf_boolean32(0x00010000, "att_def32_purge", "Immediate Purge"),
+ bf_boolean32(0x00020000, "att_def32_reninhibit", "Rename Inhibit"),
+ bf_boolean32(0x00040000, "att_def32_delinhibit", "Delete Inhibit"),
+ bf_boolean32(0x00080000, "att_def32_cpyinhibit", "Copy Inhibit"),
+ bf_boolean32(0x00100000, "att_def32_file_audit", "File Audit"),
+ bf_boolean32(0x00200000, "att_def32_reserved", "Reserved"),
+ bf_boolean32(0x00400000, "att_def32_data_migrate", "Data Migrated"),
+ bf_boolean32(0x00800000, "att_def32_inhibit_dm", "Inhibit Data Migration"),
+ bf_boolean32(0x01000000, "att_def32_dm_save_key", "Data Migration Save Key"),
+ bf_boolean32(0x02000000, "att_def32_im_comp", "Immediate Compress"),
+ bf_boolean32(0x04000000, "att_def32_comp", "Compressed"),
+ bf_boolean32(0x08000000, "att_def32_comp_inhibit", "Inhibit Compression"),
+ bf_boolean32(0x10000000, "att_def32_reserved2", "Reserved"),
+ bf_boolean32(0x20000000, "att_def32_cant_compress", "Can't Compress"),
+ bf_boolean32(0x40000000, "att_def32_attr_archive", "Archive Attributes"),
+ bf_boolean32(0x80000000, "att_def32_reserved3", "Reserved"),
+])
+AttributeValidFlag = uint32("attribute_valid_flag", "Attribute Valid Flag")
+AuditFileVersionDate = uint16("audit_file_ver_date", "Audit File Version Date")
+AuditFileVersionDate.NWDate()
+AuditFlag = val_string8("audit_flag", "Audit Flag", [
+ [ 0x00, "Do NOT audit object" ],
+ [ 0x01, "Audit object" ],
+])
+AuditHandle = uint32("audit_handle", "Audit File Handle")
+AuditHandle.Display("BASE_HEX")
+AuditID = uint32("audit_id", "Audit ID", ENC_BIG_ENDIAN)
+AuditID.Display("BASE_HEX")
+AuditIDType = val_string16("audit_id_type", "Audit ID Type", [
+ [ 0x0000, "Volume" ],
+ [ 0x0001, "Container" ],
+])
+AuditVersionDate = uint16("audit_ver_date", "Auditing Version Date")
+AuditVersionDate.NWDate()
+AvailableBlocks = uint32("available_blocks", "Available Blocks")
+AvailableBlocks64 = uint64("available_blocks64", "Available Blocks")
+AvailableClusters = uint16("available_clusters", "Available Clusters")
+AvailableDirectorySlots = uint16("available_directory_slots", "Available Directory Slots")
+AvailableDirEntries = uint32("available_dir_entries", "Available Directory Entries")
+AvailableDirEntries64 = uint64("available_dir_entries64", "Available Directory Entries")
+AvailableIndexedFiles = uint16("available_indexed_files", "Available Indexed Files")
+
+BackgroundAgedWrites = uint32("background_aged_writes", "Background Aged Writes")
+BackgroundDirtyWrites = uint32("background_dirty_writes", "Background Dirty Writes")
+BadLogicalConnectionCount = uint16("bad_logical_connection_count", "Bad Logical Connection Count")
+BannerName = fw_string("banner_name", "Banner Name", 14)
+BaseDirectoryID = uint32("base_directory_id", "Base Directory ID", ENC_BIG_ENDIAN)
+BaseDirectoryID.Display("BASE_HEX")
+binderyContext = nstring8("bindery_context", "Bindery Context")
+BitMap = bytes("bit_map", "Bit Map", 512)
+BlockNumber = uint32("block_number", "Block Number")
+BlockSize = uint16("block_size", "Block Size")
+BlockSizeInSectors = uint32("block_size_in_sectors", "Block Size in Sectors")
+BoardInstalled = uint8("board_installed", "Board Installed")
+BoardNumber = uint32("board_number", "Board Number")
+BoardNumbers = uint32("board_numbers", "Board Numbers")
+BufferSize = uint16("buffer_size", "Buffer Size")
+BusString = stringz("bus_string", "Bus String")
+BusType = val_string8("bus_type", "Bus Type", [
+ [0x00, "ISA"],
+ [0x01, "Micro Channel" ],
+ [0x02, "EISA"],
+ [0x04, "PCI"],
+ [0x08, "PCMCIA"],
+ [0x10, "ISA"],
+ [0x14, "ISA/PCI"],
+])
+BytesActuallyTransferred = uint32("bytes_actually_transferred", "Bytes Actually Transferred")
+BytesActuallyTransferred64bit = uint64("bytes_actually_transferred_64", "Bytes Actually Transferred", ENC_LITTLE_ENDIAN)
+BytesActuallyTransferred64bit.Display("BASE_DEC")
+BytesRead = fw_string("bytes_read", "Bytes Read", 6)
+BytesToCopy = uint32("bytes_to_copy", "Bytes to Copy")
+BytesToCopy64bit = uint64("bytes_to_copy_64", "Bytes to Copy")
+BytesToCopy64bit.Display("BASE_DEC")
+BytesWritten = fw_string("bytes_written", "Bytes Written", 6)
+
+CacheAllocations = uint32("cache_allocations", "Cache Allocations")
+CacheBlockScrapped = uint16("cache_block_scrapped", "Cache Block Scrapped")
+CacheBufferCount = uint16("cache_buffer_count", "Cache Buffer Count")
+CacheBufferSize = uint16("cache_buffer_size", "Cache Buffer Size")
+CacheFullWriteRequests = uint32("cache_full_write_requests", "Cache Full Write Requests")
+CacheGetRequests = uint32("cache_get_requests", "Cache Get Requests")
+CacheHitOnUnavailableBlock = uint16("cache_hit_on_unavailable_block", "Cache Hit On Unavailable Block")
+CacheHits = uint32("cache_hits", "Cache Hits")
+CacheMisses = uint32("cache_misses", "Cache Misses")
+CachePartialWriteRequests = uint32("cache_partial_write_requests", "Cache Partial Write Requests")
+CacheReadRequests = uint32("cache_read_requests", "Cache Read Requests")
+CacheWriteRequests = uint32("cache_write_requests", "Cache Write Requests")
+CategoryName = stringz("category_name", "Category Name")
+CCFileHandle = uint32("cc_file_handle", "File Handle")
+CCFileHandle.Display("BASE_HEX")
+CCFunction = val_string8("cc_function", "OP-Lock Flag", [
+ [ 0x01, "Clear OP-Lock" ],
+ [ 0x02, "Acknowledge Callback" ],
+ [ 0x03, "Decline Callback" ],
+ [ 0x04, "Level 2" ],
+])
+ChangeBits = bitfield16("change_bits", "Change Bits", [
+ bf_boolean16(0x0001, "change_bits_modify", "Modify Name"),
+ bf_boolean16(0x0002, "change_bits_fatt", "File Attributes"),
+ bf_boolean16(0x0004, "change_bits_cdate", "Creation Date"),
+ bf_boolean16(0x0008, "change_bits_ctime", "Creation Time"),
+ bf_boolean16(0x0010, "change_bits_owner", "Owner ID"),
+ bf_boolean16(0x0020, "change_bits_adate", "Archive Date"),
+ bf_boolean16(0x0040, "change_bits_atime", "Archive Time"),
+ bf_boolean16(0x0080, "change_bits_aid", "Archiver ID"),
+ bf_boolean16(0x0100, "change_bits_udate", "Update Date"),
+ bf_boolean16(0x0200, "change_bits_utime", "Update Time"),
+ bf_boolean16(0x0400, "change_bits_uid", "Update ID"),
+ bf_boolean16(0x0800, "change_bits_acc_date", "Access Date"),
+ bf_boolean16(0x1000, "change_bits_max_acc_mask", "Maximum Access Mask"),
+ bf_boolean16(0x2000, "change_bits_max_space", "Maximum Space"),
+])
+ChannelState = val_string8("channel_state", "Channel State", [
+ [ 0x00, "Channel is running" ],
+ [ 0x01, "Channel is stopping" ],
+ [ 0x02, "Channel is stopped" ],
+ [ 0x03, "Channel is not functional" ],
+])
+ChannelSynchronizationState = val_string8("channel_synchronization_state", "Channel Synchronization State", [
+ [ 0x00, "Channel is not being used" ],
+ [ 0x02, "NetWare is using the channel; no one else wants it" ],
+ [ 0x04, "NetWare is using the channel; someone else wants it" ],
+ [ 0x06, "Someone else is using the channel; NetWare does not need it" ],
+ [ 0x08, "Someone else is using the channel; NetWare needs it" ],
+ [ 0x0A, "Someone else has released the channel; NetWare should use it" ],
+])
+ChargeAmount = uint32("charge_amount", "Charge Amount")
+ChargeInformation = uint32("charge_information", "Charge Information")
+ClientCompFlag = val_string16("client_comp_flag", "Completion Flag", [
+ [ 0x0000, "Successful" ],
+ [ 0x0001, "Illegal Station Number" ],
+ [ 0x0002, "Client Not Logged In" ],
+ [ 0x0003, "Client Not Accepting Messages" ],
+ [ 0x0004, "Client Already has a Message" ],
+ [ 0x0096, "No Alloc Space for the Message" ],
+ [ 0x00fd, "Bad Station Number" ],
+ [ 0x00ff, "Failure" ],
+])
+ClientIDNumber = uint32("client_id_number", "Client ID Number", ENC_BIG_ENDIAN)
+ClientIDNumber.Display("BASE_HEX")
+ClientList = uint32("client_list", "Client List")
+ClientListCount = uint16("client_list_cnt", "Client List Count")
+ClientListLen = uint8("client_list_len", "Client List Length")
+ClientName = nstring8("client_name", "Client Name")
+ClientRecordArea = fw_string("client_record_area", "Client Record Area", 152)
+ClientStation = uint8("client_station", "Client Station")
+ClientStationLong = uint32("client_station_long", "Client Station")
+ClientTaskNumber = uint8("client_task_number", "Client Task Number")
+ClientTaskNumberLong = uint32("client_task_number_long", "Client Task Number")
+ClusterCount = uint16("cluster_count", "Cluster Count")
+ClustersUsedByDirectories = uint32("clusters_used_by_directories", "Clusters Used by Directories")
+ClustersUsedByExtendedDirectories = uint32("clusters_used_by_extended_dirs", "Clusters Used by Extended Directories")
+ClustersUsedByFAT = uint32("clusters_used_by_fat", "Clusters Used by FAT")
+CodePage = uint32("code_page", "Code Page")
+ComCnts = uint16("com_cnts", "Communication Counters")
+Comment = nstring8("comment", "Comment")
+CommentType = uint16("comment_type", "Comment Type")
+CompletionCode = uint32("ncompletion_code", "Completion Code")
+CompressedDataStreamsCount = uint32("compressed_data_streams_count", "Compressed Data Streams Count")
+CompressedLimboDataStreamsCount = uint32("compressed_limbo_data_streams_count", "Compressed Limbo Data Streams Count")
+CompressedSectors = uint32("compressed_sectors", "Compressed Sectors")
+compressionStage = uint32("compression_stage", "Compression Stage")
+compressVolume = uint32("compress_volume", "Volume Compression")
+ConfigMajorVN = uint8("config_major_vn", "Configuration Major Version Number")
+ConfigMinorVN = uint8("config_minor_vn", "Configuration Minor Version Number")
+ConfigurationDescription = fw_string("configuration_description", "Configuration Description", 80)
+ConfigurationText = fw_string("configuration_text", "Configuration Text", 160)
+ConfiguredMaxBinderyObjects = uint16("configured_max_bindery_objects", "Configured Max Bindery Objects")
+ConfiguredMaxOpenFiles = uint16("configured_max_open_files", "Configured Max Open Files")
+ConfiguredMaxRoutingBuffers = uint16("configured_max_routing_buffers", "Configured Max Routing Buffers")
+ConfiguredMaxSimultaneousTransactions = uint16("cfg_max_simultaneous_transactions", "Configured Max Simultaneous Transactions")
+ConnectedLAN = uint32("connected_lan", "LAN Adapter")
+ConnectionControlBits = bitfield8("conn_ctrl_bits", "Connection Control", [
+ bf_boolean8(0x01, "enable_brdcasts", "Enable Broadcasts"),
+ bf_boolean8(0x02, "enable_personal_brdcasts", "Enable Personal Broadcasts"),
+ bf_boolean8(0x04, "enable_wdog_messages", "Enable Watchdog Message"),
+ bf_boolean8(0x10, "disable_brdcasts", "Disable Broadcasts"),
+ bf_boolean8(0x20, "disable_personal_brdcasts", "Disable Personal Broadcasts"),
+ bf_boolean8(0x40, "disable_wdog_messages", "Disable Watchdog Message"),
+])
+ConnectionListCount = uint32("conn_list_count", "Connection List Count")
+ConnectionList = uint32("connection_list", "Connection List")
+ConnectionNumber = uint32("connection_number", "Connection Number", ENC_BIG_ENDIAN)
+ConnectionNumberList = nstring8("connection_number_list", "Connection Number List")
+ConnectionNumberWord = uint16("conn_number_word", "Connection Number")
+ConnectionNumberByte = uint8("conn_number_byte", "Connection Number")
+ConnectionServiceType = val_string8("connection_service_type","Connection Service Type",[
+ [ 0x01, "CLIB backward Compatibility" ],
+ [ 0x02, "NCP Connection" ],
+ [ 0x03, "NLM Connection" ],
+ [ 0x04, "AFP Connection" ],
+ [ 0x05, "FTAM Connection" ],
+ [ 0x06, "ANCP Connection" ],
+ [ 0x07, "ACP Connection" ],
+ [ 0x08, "SMB Connection" ],
+ [ 0x09, "Winsock Connection" ],
+])
+ConnectionsInUse = uint16("connections_in_use", "Connections In Use")
+ConnectionsMaxUsed = uint16("connections_max_used", "Connections Max Used")
+ConnectionsSupportedMax = uint16("connections_supported_max", "Connections Supported Max")
+ConnectionType = val_string8("connection_type", "Connection Type", [
+ [ 0x00, "Not in use" ],
+ [ 0x02, "NCP" ],
+ [ 0x0b, "UDP (for IP)" ],
+])
+ConnListLen = uint8("conn_list_len", "Connection List Length")
+connList = uint32("conn_list", "Connection List")
+ControlFlags = val_string8("control_flags", "Control Flags", [
+ [ 0x00, "Forced Record Locking is Off" ],
+ [ 0x01, "Forced Record Locking is On" ],
+])
+ControllerDriveNumber = uint8("controller_drive_number", "Controller Drive Number")
+ControllerNumber = uint8("controller_number", "Controller Number")
+ControllerType = uint8("controller_type", "Controller Type")
+Cookie1 = uint32("cookie_1", "Cookie 1")
+Cookie2 = uint32("cookie_2", "Cookie 2")
+Copies = uint8( "copies", "Copies" )
+CoprocessorFlag = uint32("co_processor_flag", "CoProcessor Present Flag")
+CoProcessorString = stringz("co_proc_string", "CoProcessor String")
+CounterMask = val_string8("counter_mask", "Counter Mask", [
+ [ 0x00, "Counter is Valid" ],
+ [ 0x01, "Counter is not Valid" ],
+])
+CPUNumber = uint32("cpu_number", "CPU Number")
+CPUString = stringz("cpu_string", "CPU String")
+CPUType = val_string8("cpu_type", "CPU Type", [
+ [ 0x00, "80386" ],
+ [ 0x01, "80486" ],
+ [ 0x02, "Pentium" ],
+ [ 0x03, "Pentium Pro" ],
+])
+CreationDate = uint16("creation_date", "Creation Date")
+CreationDate.NWDate()
+CreationTime = uint16("creation_time", "Creation Time")
+CreationTime.NWTime()
+CreatorID = uint32("creator_id", "Creator ID", ENC_BIG_ENDIAN)
+CreatorID.Display("BASE_HEX")
+CreatorNameSpaceNumber = val_string8("creator_name_space_number", "Creator Name Space Number", [
+ [ 0x00, "DOS Name Space" ],
+ [ 0x01, "MAC Name Space" ],
+ [ 0x02, "NFS Name Space" ],
+ [ 0x04, "Long Name Space" ],
+])
+CreditLimit = uint32("credit_limit", "Credit Limit")
+CtrlFlags = val_string16("ctrl_flags", "Control Flags", [
+ [ 0x0000, "Do Not Return File Name" ],
+ [ 0x0001, "Return File Name" ],
+])
+curCompBlks = uint32("cur_comp_blks", "Current Compression Blocks")
+curInitialBlks = uint32("cur_initial_blks", "Current Initial Blocks")
+curIntermediateBlks = uint32("cur_inter_blks", "Current Intermediate Blocks")
+CurNumOfRTags = uint32("cur_num_of_r_tags", "Current Number of Resource Tags")
+CurrentBlockBeingDecompressed = uint32("cur_blk_being_dcompress", "Current Block Being Decompressed")
+CurrentChangedFATs = uint16("current_changed_fats", "Current Changed FAT Entries")
+CurrentEntries = uint32("current_entries", "Current Entries")
+CurrentFormType = uint8( "current_form_type", "Current Form Type" )
+CurrentLFSCounters = uint32("current_lfs_counters", "Current LFS Counters")
+CurrentlyUsedRoutingBuffers = uint16("currently_used_routing_buffers", "Currently Used Routing Buffers")
+CurrentOpenFiles = uint16("current_open_files", "Current Open Files")
+CurrentReferenceID = uint16("curr_ref_id", "Current Reference ID")
+CurrentServers = uint32("current_servers", "Current Servers")
+CurrentServerTime = uint32("current_server_time", "Time Elapsed Since Server Was Brought Up")
+CurrentSpace = uint32("current_space", "Current Space")
+CurrentTransactionCount = uint32("current_trans_count", "Current Transaction Count")
+CurrentUsedBinderyObjects = uint16("current_used_bindery_objects", "Current Used Bindery Objects")
+CurrentUsedDynamicSpace = uint32("current_used_dynamic_space", "Current Used Dynamic Space")
+CustomCnts = uint32("custom_cnts", "Custom Counters")
+CustomCount = uint32("custom_count", "Custom Count")
+CustomCounters = uint32("custom_counters", "Custom Counters")
+CustomString = nstring8("custom_string", "Custom String")
+CustomVariableValue = uint32("custom_var_value", "Custom Variable Value")
+
+Data = nstring8("data", "Data")
+Data64 = stringz("data64", "Data")
+DataForkFirstFAT = uint32("data_fork_first_fat", "Data Fork First FAT Entry")
+DataForkLen = uint32("data_fork_len", "Data Fork Len")
+DataForkSize = uint32("data_fork_size", "Data Fork Size")
+DataSize = uint32("data_size", "Data Size")
+DataStream = val_string8("data_stream", "Data Stream", [
+ [ 0x00, "Resource Fork or DOS" ],
+ [ 0x01, "Data Fork" ],
+])
+DataStreamFATBlocks = uint32("data_stream_fat_blks", "Data Stream FAT Blocks")
+DataStreamName = nstring8("data_stream_name", "Data Stream Name")
+DataStreamNumber = uint8("data_stream_number", "Data Stream Number")
+DataStreamNumberLong = uint32("data_stream_num_long", "Data Stream Number")
+DataStreamsCount = uint32("data_streams_count", "Data Streams Count")
+DataStreamSize = uint32("data_stream_size", "Size")
+DataStreamSize64 = uint64("data_stream_size_64", "Size")
+DataStreamSpaceAlloc = uint32( "data_stream_space_alloc", "Space Allocated for Data Stream" )
+DataTypeFlag = val_string8("data_type_flag", "Data Type Flag", [
+ [ 0x00, "ASCII Data" ],
+ [ 0x01, "UTF8 Data" ],
+])
+Day = uint8("s_day", "Day")
+DayOfWeek = val_string8("s_day_of_week", "Day of Week", [
+ [ 0x00, "Sunday" ],
+ [ 0x01, "Monday" ],
+ [ 0x02, "Tuesday" ],
+ [ 0x03, "Wednesday" ],
+ [ 0x04, "Thursday" ],
+ [ 0x05, "Friday" ],
+ [ 0x06, "Saturday" ],
+])
+DeadMirrorTable = bytes("dead_mirror_table", "Dead Mirror Table", 32)
+DefinedDataStreams = uint8("defined_data_streams", "Defined Data Streams")
+DefinedNameSpaces = uint8("defined_name_spaces", "Defined Name Spaces")
+DeletedDate = uint16("deleted_date", "Deleted Date")
+DeletedDate.NWDate()
+DeletedFileTime = uint32( "deleted_file_time", "Deleted File Time")
+DeletedFileTime.Display("BASE_HEX")
+DeletedTime = uint16("deleted_time", "Deleted Time")
+DeletedTime.NWTime()
+DeletedID = uint32( "delete_id", "Deleted ID", ENC_BIG_ENDIAN)
+DeletedID.Display("BASE_HEX")
+DeleteExistingFileFlag = val_string8("delete_existing_file_flag", "Delete Existing File Flag", [
+ [ 0x00, "Do Not Delete Existing File" ],
+ [ 0x01, "Delete Existing File" ],
+])
+DenyReadCount = uint16("deny_read_count", "Deny Read Count")
+DenyWriteCount = uint16("deny_write_count", "Deny Write Count")
+DescriptionStrings = fw_string("description_string", "Description", 100)
+DesiredAccessRights = bitfield16("desired_access_rights", "Desired Access Rights", [
+ bf_boolean16(0x0001, "dsired_acc_rights_read_o", "Read Only"),
+ bf_boolean16(0x0002, "dsired_acc_rights_write_o", "Write Only"),
+ bf_boolean16(0x0004, "dsired_acc_rights_deny_r", "Deny Read"),
+ bf_boolean16(0x0008, "dsired_acc_rights_deny_w", "Deny Write"),
+ bf_boolean16(0x0010, "dsired_acc_rights_compat", "Compatibility"),
+ bf_boolean16(0x0040, "dsired_acc_rights_w_thru", "File Write Through"),
+ bf_boolean16(0x0400, "dsired_acc_rights_del_file_cls", "Delete File Close"),
+])
+DesiredResponseCount = uint16("desired_response_count", "Desired Response Count")
+DestDirHandle = uint8("dest_dir_handle", "Destination Directory Handle")
+DestNameSpace = val_string8("dest_name_space", "Destination Name Space", [
+ [ 0x00, "DOS Name Space" ],
+ [ 0x01, "MAC Name Space" ],
+ [ 0x02, "NFS Name Space" ],
+ [ 0x04, "Long Name Space" ],
+])
+DestPathComponentCount = uint8("dest_component_count", "Destination Path Component Count")
+DestPath = nstring8("dest_path", "Destination Path")
+DestPath16 = nstring16("dest_path_16", "Destination Path")
+DetachDuringProcessing = uint16("detach_during_processing", "Detach During Processing")
+DetachForBadConnectionNumber = uint16("detach_for_bad_connection_number", "Detach For Bad Connection Number")
+DirHandle = uint8("dir_handle", "Directory Handle")
+DirHandleName = uint8("dir_handle_name", "Handle Name")
+DirHandleLong = uint32("dir_handle_long", "Directory Handle")
+DirHandle64 = uint64("dir_handle64", "Directory Handle")
+DirectoryAccessRights = uint8("directory_access_rights", "Directory Access Rights")
+#
+# XXX - what do the bits mean here?
+#
+DirectoryAttributes = uint8("directory_attributes", "Directory Attributes")
+DirectoryBase = uint32("dir_base", "Directory Base")
+DirectoryBase.Display("BASE_HEX")
+DirectoryCount = uint16("dir_count", "Directory Count")
+DirectoryEntryNumber = uint32("directory_entry_number", "Directory Entry Number")
+DirectoryEntryNumber.Display('BASE_HEX')
+DirectoryEntryNumberWord = uint16("directory_entry_number_word", "Directory Entry Number")
+DirectoryID = uint16("directory_id", "Directory ID", ENC_BIG_ENDIAN)
+DirectoryID.Display("BASE_HEX")
+DirectoryName = fw_string("directory_name", "Directory Name",12)
+DirectoryName14 = fw_string("directory_name_14", "Directory Name", 14)
+DirectoryNameLen = uint8("directory_name_len", "Directory Name Length")
+DirectoryNumber = uint32("directory_number", "Directory Number")
+DirectoryNumber.Display("BASE_HEX")
+DirectoryPath = fw_string("directory_path", "Directory Path", 16)
+DirectoryServicesObjectID = uint32("directory_services_object_id", "Directory Services Object ID")
+DirectoryServicesObjectID.Display("BASE_HEX")
+DirectoryStamp = uint16("directory_stamp", "Directory Stamp (0xD1D1)")
+DirtyCacheBuffers = uint16("dirty_cache_buffers", "Dirty Cache Buffers")
+DiskChannelNumber = uint8("disk_channel_number", "Disk Channel Number")
+DiskChannelTable = val_string8("disk_channel_table", "Disk Channel Table", [
+ [ 0x01, "XT" ],
+ [ 0x02, "AT" ],
+ [ 0x03, "SCSI" ],
+ [ 0x04, "Disk Coprocessor" ],
+])
+DiskSpaceLimit = uint32("disk_space_limit", "Disk Space Limit")
+DiskSpaceLimit64 = uint64("data_stream_size_64", "Size")
+DMAChannelsUsed = uint32("dma_channels_used", "DMA Channels Used")
+DMInfoEntries = uint32("dm_info_entries", "DM Info Entries")
+DMInfoLevel = val_string8("dm_info_level", "DM Info Level", [
+ [ 0x00, "Return Detailed DM Support Module Information" ],
+ [ 0x01, "Return Number of DM Support Modules" ],
+ [ 0x02, "Return DM Support Modules Names" ],
+])
+DMFlags = val_string8("dm_flags", "DM Flags", [
+ [ 0x00, "OnLine Media" ],
+ [ 0x01, "OffLine Media" ],
+])
+DMmajorVersion = uint32("dm_major_version", "DM Major Version")
+DMminorVersion = uint32("dm_minor_version", "DM Minor Version")
+DMPresentFlag = val_string8("dm_present_flag", "Data Migration Present Flag", [
+ [ 0x00, "Data Migration NLM is not loaded" ],
+ [ 0x01, "Data Migration NLM has been loaded and is running" ],
+])
+DOSDirectoryBase = uint32("dos_directory_base", "DOS Directory Base")
+DOSDirectoryBase.Display("BASE_HEX")
+DOSDirectoryEntry = uint32("dos_directory_entry", "DOS Directory Entry")
+DOSDirectoryEntry.Display("BASE_HEX")
+DOSDirectoryEntryNumber = uint32("dos_directory_entry_number", "DOS Directory Entry Number")
+DOSDirectoryEntryNumber.Display('BASE_HEX')
+DOSFileAttributes = uint8("dos_file_attributes", "DOS File Attributes")
+DOSParentDirectoryEntry = uint32("dos_parent_directory_entry", "DOS Parent Directory Entry")
+DOSParentDirectoryEntry.Display('BASE_HEX')
+DOSSequence = uint32("dos_sequence", "DOS Sequence")
+DriveCylinders = uint16("drive_cylinders", "Drive Cylinders")
+DriveDefinitionString = fw_string("drive_definition_string", "Drive Definition", 64)
+DriveHeads = uint8("drive_heads", "Drive Heads")
+DriveMappingTable = bytes("drive_mapping_table", "Drive Mapping Table", 32)
+DriveMirrorTable = bytes("drive_mirror_table", "Drive Mirror Table", 32)
+DriverBoardName = stringz("driver_board_name", "Driver Board Name")
+DriveRemovableFlag = val_string8("drive_removable_flag", "Drive Removable Flag", [
+ [ 0x00, "Nonremovable" ],
+ [ 0xff, "Removable" ],
+])
+DriverLogicalName = stringz("driver_log_name", "Driver Logical Name")
+DriverShortName = stringz("driver_short_name", "Driver Short Name")
+DriveSize = uint32("drive_size", "Drive Size")
+DstEAFlags = val_string16("dst_ea_flags", "Destination EA Flags", [
+ [ 0x0000, "Return EAHandle,Information Level 0" ],
+ [ 0x0001, "Return NetWareHandle,Information Level 0" ],
+ [ 0x0002, "Return Volume/Directory Number,Information Level 0" ],
+ [ 0x0004, "Return EAHandle,Close Handle on Error,Information Level 0" ],
+ [ 0x0005, "Return NetWareHandle,Close Handle on Error,Information Level 0" ],
+ [ 0x0006, "Return Volume/Directory Number,Close Handle on Error,Information Level 0" ],
+ [ 0x0010, "Return EAHandle,Information Level 1" ],
+ [ 0x0011, "Return NetWareHandle,Information Level 1" ],
+ [ 0x0012, "Return Volume/Directory Number,Information Level 1" ],
+ [ 0x0014, "Return EAHandle,Close Handle on Error,Information Level 1" ],
+ [ 0x0015, "Return NetWareHandle,Close Handle on Error,Information Level 1" ],
+ [ 0x0016, "Return Volume/Directory Number,Close Handle on Error,Information Level 1" ],
+ [ 0x0020, "Return EAHandle,Information Level 2" ],
+ [ 0x0021, "Return NetWareHandle,Information Level 2" ],
+ [ 0x0022, "Return Volume/Directory Number,Information Level 2" ],
+ [ 0x0024, "Return EAHandle,Close Handle on Error,Information Level 2" ],
+ [ 0x0025, "Return NetWareHandle,Close Handle on Error,Information Level 2" ],
+ [ 0x0026, "Return Volume/Directory Number,Close Handle on Error,Information Level 2" ],
+ [ 0x0030, "Return EAHandle,Information Level 3" ],
+ [ 0x0031, "Return NetWareHandle,Information Level 3" ],
+ [ 0x0032, "Return Volume/Directory Number,Information Level 3" ],
+ [ 0x0034, "Return EAHandle,Close Handle on Error,Information Level 3" ],
+ [ 0x0035, "Return NetWareHandle,Close Handle on Error,Information Level 3" ],
+ [ 0x0036, "Return Volume/Directory Number,Close Handle on Error,Information Level 3" ],
+ [ 0x0040, "Return EAHandle,Information Level 4" ],
+ [ 0x0041, "Return NetWareHandle,Information Level 4" ],
+ [ 0x0042, "Return Volume/Directory Number,Information Level 4" ],
+ [ 0x0044, "Return EAHandle,Close Handle on Error,Information Level 4" ],
+ [ 0x0045, "Return NetWareHandle,Close Handle on Error,Information Level 4" ],
+ [ 0x0046, "Return Volume/Directory Number,Close Handle on Error,Information Level 4" ],
+ [ 0x0050, "Return EAHandle,Information Level 5" ],
+ [ 0x0051, "Return NetWareHandle,Information Level 5" ],
+ [ 0x0052, "Return Volume/Directory Number,Information Level 5" ],
+ [ 0x0054, "Return EAHandle,Close Handle on Error,Information Level 5" ],
+ [ 0x0055, "Return NetWareHandle,Close Handle on Error,Information Level 5" ],
+ [ 0x0056, "Return Volume/Directory Number,Close Handle on Error,Information Level 5" ],
+ [ 0x0060, "Return EAHandle,Information Level 6" ],
+ [ 0x0061, "Return NetWareHandle,Information Level 6" ],
+ [ 0x0062, "Return Volume/Directory Number,Information Level 6" ],
+ [ 0x0064, "Return EAHandle,Close Handle on Error,Information Level 6" ],
+ [ 0x0065, "Return NetWareHandle,Close Handle on Error,Information Level 6" ],
+ [ 0x0066, "Return Volume/Directory Number,Close Handle on Error,Information Level 6" ],
+ [ 0x0070, "Return EAHandle,Information Level 7" ],
+ [ 0x0071, "Return NetWareHandle,Information Level 7" ],
+ [ 0x0072, "Return Volume/Directory Number,Information Level 7" ],
+ [ 0x0074, "Return EAHandle,Close Handle on Error,Information Level 7" ],
+ [ 0x0075, "Return NetWareHandle,Close Handle on Error,Information Level 7" ],
+ [ 0x0076, "Return Volume/Directory Number,Close Handle on Error,Information Level 7" ],
+ [ 0x0080, "Return EAHandle,Information Level 0,Immediate Close Handle" ],
+ [ 0x0081, "Return NetWareHandle,Information Level 0,Immediate Close Handle" ],
+ [ 0x0082, "Return Volume/Directory Number,Information Level 0,Immediate Close Handle" ],
+ [ 0x0084, "Return EAHandle,Close Handle on Error,Information Level 0,Immediate Close Handle" ],
+ [ 0x0085, "Return NetWareHandle,Close Handle on Error,Information Level 0,Immediate Close Handle" ],
+ [ 0x0086, "Return Volume/Directory Number,Close Handle on Error,Information Level 0,Immediate Close Handle" ],
+ [ 0x0090, "Return EAHandle,Information Level 1,Immediate Close Handle" ],
+ [ 0x0091, "Return NetWareHandle,Information Level 1,Immediate Close Handle" ],
+ [ 0x0092, "Return Volume/Directory Number,Information Level 1,Immediate Close Handle" ],
+ [ 0x0094, "Return EAHandle,Close Handle on Error,Information Level 1,Immediate Close Handle" ],
+ [ 0x0095, "Return NetWareHandle,Close Handle on Error,Information Level 1,Immediate Close Handle" ],
+ [ 0x0096, "Return Volume/Directory Number,Close Handle on Error,Information Level 1,Immediate Close Handle" ],
+ [ 0x00a0, "Return EAHandle,Information Level 2,Immediate Close Handle" ],
+ [ 0x00a1, "Return NetWareHandle,Information Level 2,Immediate Close Handle" ],
+ [ 0x00a2, "Return Volume/Directory Number,Information Level 2,Immediate Close Handle" ],
+ [ 0x00a4, "Return EAHandle,Close Handle on Error,Information Level 2,Immediate Close Handle" ],
+ [ 0x00a5, "Return NetWareHandle,Close Handle on Error,Information Level 2,Immediate Close Handle" ],
+ [ 0x00a6, "Return Volume/Directory Number,Close Handle on Error,Information Level 2,Immediate Close Handle" ],
+ [ 0x00b0, "Return EAHandle,Information Level 3,Immediate Close Handle" ],
+ [ 0x00b1, "Return NetWareHandle,Information Level 3,Immediate Close Handle" ],
+ [ 0x00b2, "Return Volume/Directory Number,Information Level 3,Immediate Close Handle" ],
+ [ 0x00b4, "Return EAHandle,Close Handle on Error,Information Level 3,Immediate Close Handle" ],
+ [ 0x00b5, "Return NetWareHandle,Close Handle on Error,Information Level 3,Immediate Close Handle" ],
+ [ 0x00b6, "Return Volume/Directory Number,Close Handle on Error,Information Level 3,Immediate Close Handle" ],
+ [ 0x00c0, "Return EAHandle,Information Level 4,Immediate Close Handle" ],
+ [ 0x00c1, "Return NetWareHandle,Information Level 4,Immediate Close Handle" ],
+ [ 0x00c2, "Return Volume/Directory Number,Information Level 4,Immediate Close Handle" ],
+ [ 0x00c4, "Return EAHandle,Close Handle on Error,Information Level 4,Immediate Close Handle" ],
+ [ 0x00c5, "Return NetWareHandle,Close Handle on Error,Information Level 4,Immediate Close Handle" ],
+ [ 0x00c6, "Return Volume/Directory Number,Close Handle on Error,Information Level 4,Immediate Close Handle" ],
+ [ 0x00d0, "Return EAHandle,Information Level 5,Immediate Close Handle" ],
+ [ 0x00d1, "Return NetWareHandle,Information Level 5,Immediate Close Handle" ],
+ [ 0x00d2, "Return Volume/Directory Number,Information Level 5,Immediate Close Handle" ],
+ [ 0x00d4, "Return EAHandle,Close Handle on Error,Information Level 5,Immediate Close Handle" ],
+ [ 0x00d5, "Return NetWareHandle,Close Handle on Error,Information Level 5,Immediate Close Handle" ],
+ [ 0x00d6, "Return Volume/Directory Number,Close Handle on Error,Information Level 5,Immediate Close Handle" ],
+ [ 0x00e0, "Return EAHandle,Information Level 6,Immediate Close Handle" ],
+ [ 0x00e1, "Return NetWareHandle,Information Level 6,Immediate Close Handle" ],
+ [ 0x00e2, "Return Volume/Directory Number,Information Level 6,Immediate Close Handle" ],
+ [ 0x00e4, "Return EAHandle,Close Handle on Error,Information Level 6,Immediate Close Handle" ],
+ [ 0x00e5, "Return NetWareHandle,Close Handle on Error,Information Level 6,Immediate Close Handle" ],
+ [ 0x00e6, "Return Volume/Directory Number,Close Handle on Error,Information Level 6,Immediate Close Handle" ],
+ [ 0x00f0, "Return EAHandle,Information Level 7,Immediate Close Handle" ],
+ [ 0x00f1, "Return NetWareHandle,Information Level 7,Immediate Close Handle" ],
+ [ 0x00f2, "Return Volume/Directory Number,Information Level 7,Immediate Close Handle" ],
+ [ 0x00f4, "Return EAHandle,Close Handle on Error,Information Level 7,Immediate Close Handle" ],
+ [ 0x00f5, "Return NetWareHandle,Close Handle on Error,Information Level 7,Immediate Close Handle" ],
+ [ 0x00f6, "Return Volume/Directory Number,Close Handle on Error,Information Level 7,Immediate Close Handle" ],
+])
+dstNSIndicator = val_string16("dst_ns_indicator", "Destination Name Space Indicator", [
+ [ 0x0000, "Return Source Name Space Information" ],
+ [ 0x0001, "Return Destination Name Space Information" ],
+])
+DstQueueID = uint32("dst_queue_id", "Destination Queue ID")
+DuplicateRepliesSent = uint16("duplicate_replies_sent", "Duplicate Replies Sent")
+
+EAAccessFlag = bitfield16("ea_access_flag", "EA Access Flag", [
+ bf_boolean16(0x0001, "ea_permanent_memory", "Permanent Memory"),
+ bf_boolean16(0x0002, "ea_deep_freeze", "Deep Freeze"),
+ bf_boolean16(0x0004, "ea_in_progress", "In Progress"),
+ bf_boolean16(0x0008, "ea_header_being_enlarged", "Header Being Enlarged"),
+ bf_boolean16(0x0010, "ea_new_tally_used", "New Tally Used"),
+ bf_boolean16(0x0020, "ea_tally_need_update", "Tally Need Update"),
+ bf_boolean16(0x0040, "ea_score_card_present", "Score Card Present"),
+ bf_boolean16(0x0080, "ea_need_bit_flag", "EA Need Bit Flag"),
+ bf_boolean16(0x0100, "ea_write_privileges", "Write Privileges"),
+ bf_boolean16(0x0200, "ea_read_privileges", "Read Privileges"),
+ bf_boolean16(0x0400, "ea_delete_privileges", "Delete Privileges"),
+ bf_boolean16(0x0800, "ea_system_ea_only", "System EA Only"),
+ bf_boolean16(0x1000, "ea_write_in_progress", "Write In Progress"),
+])
+EABytesWritten = uint32("ea_bytes_written", "Bytes Written")
+EACount = uint32("ea_count", "Count")
+EADataSize = uint32("ea_data_size", "Data Size")
+EADataSizeDuplicated = uint32("ea_data_size_duplicated", "Data Size Duplicated")
+EADuplicateCount = uint32("ea_duplicate_count", "Duplicate Count")
+EAErrorCodes = val_string16("ea_error_codes", "EA Error Codes", [
+ [ 0x0000, "SUCCESSFUL" ],
+ [ 0x00c8, "ERR_MISSING_EA_KEY" ],
+ [ 0x00c9, "ERR_EA_NOT_FOUND" ],
+ [ 0x00ca, "ERR_INVALID_EA_HANDLE_TYPE" ],
+ [ 0x00cb, "ERR_EA_NO_KEY_NO_DATA" ],
+ [ 0x00cc, "ERR_EA_NUMBER_MISMATCH" ],
+ [ 0x00cd, "ERR_EXTENT_NUMBER_OUT_OF_RANGE" ],
+ [ 0x00ce, "ERR_EA_BAD_DIR_NUM" ],
+ [ 0x00cf, "ERR_INVALID_EA_HANDLE" ],
+ [ 0x00d0, "ERR_EA_POSITION_OUT_OF_RANGE" ],
+ [ 0x00d1, "ERR_EA_ACCESS_DENIED" ],
+ [ 0x00d2, "ERR_DATA_PAGE_ODD_SIZE" ],
+ [ 0x00d3, "ERR_EA_VOLUME_NOT_MOUNTED" ],
+ [ 0x00d4, "ERR_BAD_PAGE_BOUNDARY" ],
+ [ 0x00d5, "ERR_INSPECT_FAILURE" ],
+ [ 0x00d6, "ERR_EA_ALREADY_CLAIMED" ],
+ [ 0x00d7, "ERR_ODD_BUFFER_SIZE" ],
+ [ 0x00d8, "ERR_NO_SCORECARDS" ],
+ [ 0x00d9, "ERR_BAD_EDS_SIGNATURE" ],
+ [ 0x00da, "ERR_EA_SPACE_LIMIT" ],
+ [ 0x00db, "ERR_EA_KEY_CORRUPT" ],
+ [ 0x00dc, "ERR_EA_KEY_LIMIT" ],
+ [ 0x00dd, "ERR_TALLY_CORRUPT" ],
+])
+EAFlags = val_string16("ea_flags", "EA Flags", [
+ [ 0x0000, "Return EAHandle,Information Level 0" ],
+ [ 0x0001, "Return NetWareHandle,Information Level 0" ],
+ [ 0x0002, "Return Volume/Directory Number,Information Level 0" ],
+ [ 0x0004, "Return EAHandle,Close Handle on Error,Information Level 0" ],
+ [ 0x0005, "Return NetWareHandle,Close Handle on Error,Information Level 0" ],
+ [ 0x0006, "Return Volume/Directory Number,Close Handle on Error,Information Level 0" ],
+ [ 0x0010, "Return EAHandle,Information Level 1" ],
+ [ 0x0011, "Return NetWareHandle,Information Level 1" ],
+ [ 0x0012, "Return Volume/Directory Number,Information Level 1" ],
+ [ 0x0014, "Return EAHandle,Close Handle on Error,Information Level 1" ],
+ [ 0x0015, "Return NetWareHandle,Close Handle on Error,Information Level 1" ],
+ [ 0x0016, "Return Volume/Directory Number,Close Handle on Error,Information Level 1" ],
+ [ 0x0020, "Return EAHandle,Information Level 2" ],
+ [ 0x0021, "Return NetWareHandle,Information Level 2" ],
+ [ 0x0022, "Return Volume/Directory Number,Information Level 2" ],
+ [ 0x0024, "Return EAHandle,Close Handle on Error,Information Level 2" ],
+ [ 0x0025, "Return NetWareHandle,Close Handle on Error,Information Level 2" ],
+ [ 0x0026, "Return Volume/Directory Number,Close Handle on Error,Information Level 2" ],
+ [ 0x0030, "Return EAHandle,Information Level 3" ],
+ [ 0x0031, "Return NetWareHandle,Information Level 3" ],
+ [ 0x0032, "Return Volume/Directory Number,Information Level 3" ],
+ [ 0x0034, "Return EAHandle,Close Handle on Error,Information Level 3" ],
+ [ 0x0035, "Return NetWareHandle,Close Handle on Error,Information Level 3" ],
+ [ 0x0036, "Return Volume/Directory Number,Close Handle on Error,Information Level 3" ],
+ [ 0x0040, "Return EAHandle,Information Level 4" ],
+ [ 0x0041, "Return NetWareHandle,Information Level 4" ],
+ [ 0x0042, "Return Volume/Directory Number,Information Level 4" ],
+ [ 0x0044, "Return EAHandle,Close Handle on Error,Information Level 4" ],
+ [ 0x0045, "Return NetWareHandle,Close Handle on Error,Information Level 4" ],
+ [ 0x0046, "Return Volume/Directory Number,Close Handle on Error,Information Level 4" ],
+ [ 0x0050, "Return EAHandle,Information Level 5" ],
+ [ 0x0051, "Return NetWareHandle,Information Level 5" ],
+ [ 0x0052, "Return Volume/Directory Number,Information Level 5" ],
+ [ 0x0054, "Return EAHandle,Close Handle on Error,Information Level 5" ],
+ [ 0x0055, "Return NetWareHandle,Close Handle on Error,Information Level 5" ],
+ [ 0x0056, "Return Volume/Directory Number,Close Handle on Error,Information Level 5" ],
+ [ 0x0060, "Return EAHandle,Information Level 6" ],
+ [ 0x0061, "Return NetWareHandle,Information Level 6" ],
+ [ 0x0062, "Return Volume/Directory Number,Information Level 6" ],
+ [ 0x0064, "Return EAHandle,Close Handle on Error,Information Level 6" ],
+ [ 0x0065, "Return NetWareHandle,Close Handle on Error,Information Level 6" ],
+ [ 0x0066, "Return Volume/Directory Number,Close Handle on Error,Information Level 6" ],
+ [ 0x0070, "Return EAHandle,Information Level 7" ],
+ [ 0x0071, "Return NetWareHandle,Information Level 7" ],
+ [ 0x0072, "Return Volume/Directory Number,Information Level 7" ],
+ [ 0x0074, "Return EAHandle,Close Handle on Error,Information Level 7" ],
+ [ 0x0075, "Return NetWareHandle,Close Handle on Error,Information Level 7" ],
+ [ 0x0076, "Return Volume/Directory Number,Close Handle on Error,Information Level 7" ],
+ [ 0x0080, "Return EAHandle,Information Level 0,Immediate Close Handle" ],
+ [ 0x0081, "Return NetWareHandle,Information Level 0,Immediate Close Handle" ],
+ [ 0x0082, "Return Volume/Directory Number,Information Level 0,Immediate Close Handle" ],
+ [ 0x0084, "Return EAHandle,Close Handle on Error,Information Level 0,Immediate Close Handle" ],
+ [ 0x0085, "Return NetWareHandle,Close Handle on Error,Information Level 0,Immediate Close Handle" ],
+ [ 0x0086, "Return Volume/Directory Number,Close Handle on Error,Information Level 0,Immediate Close Handle" ],
+ [ 0x0090, "Return EAHandle,Information Level 1,Immediate Close Handle" ],
+ [ 0x0091, "Return NetWareHandle,Information Level 1,Immediate Close Handle" ],
+ [ 0x0092, "Return Volume/Directory Number,Information Level 1,Immediate Close Handle" ],
+ [ 0x0094, "Return EAHandle,Close Handle on Error,Information Level 1,Immediate Close Handle" ],
+ [ 0x0095, "Return NetWareHandle,Close Handle on Error,Information Level 1,Immediate Close Handle" ],
+ [ 0x0096, "Return Volume/Directory Number,Close Handle on Error,Information Level 1,Immediate Close Handle" ],
+ [ 0x00a0, "Return EAHandle,Information Level 2,Immediate Close Handle" ],
+ [ 0x00a1, "Return NetWareHandle,Information Level 2,Immediate Close Handle" ],
+ [ 0x00a2, "Return Volume/Directory Number,Information Level 2,Immediate Close Handle" ],
+ [ 0x00a4, "Return EAHandle,Close Handle on Error,Information Level 2,Immediate Close Handle" ],
+ [ 0x00a5, "Return NetWareHandle,Close Handle on Error,Information Level 2,Immediate Close Handle" ],
+ [ 0x00a6, "Return Volume/Directory Number,Close Handle on Error,Information Level 2,Immediate Close Handle" ],
+ [ 0x00b0, "Return EAHandle,Information Level 3,Immediate Close Handle" ],
+ [ 0x00b1, "Return NetWareHandle,Information Level 3,Immediate Close Handle" ],
+ [ 0x00b2, "Return Volume/Directory Number,Information Level 3,Immediate Close Handle" ],
+ [ 0x00b4, "Return EAHandle,Close Handle on Error,Information Level 3,Immediate Close Handle" ],
+ [ 0x00b5, "Return NetWareHandle,Close Handle on Error,Information Level 3,Immediate Close Handle" ],
+ [ 0x00b6, "Return Volume/Directory Number,Close Handle on Error,Information Level 3,Immediate Close Handle" ],
+ [ 0x00c0, "Return EAHandle,Information Level 4,Immediate Close Handle" ],
+ [ 0x00c1, "Return NetWareHandle,Information Level 4,Immediate Close Handle" ],
+ [ 0x00c2, "Return Volume/Directory Number,Information Level 4,Immediate Close Handle" ],
+ [ 0x00c4, "Return EAHandle,Close Handle on Error,Information Level 4,Immediate Close Handle" ],
+ [ 0x00c5, "Return NetWareHandle,Close Handle on Error,Information Level 4,Immediate Close Handle" ],
+ [ 0x00c6, "Return Volume/Directory Number,Close Handle on Error,Information Level 4,Immediate Close Handle" ],
+ [ 0x00d0, "Return EAHandle,Information Level 5,Immediate Close Handle" ],
+ [ 0x00d1, "Return NetWareHandle,Information Level 5,Immediate Close Handle" ],
+ [ 0x00d2, "Return Volume/Directory Number,Information Level 5,Immediate Close Handle" ],
+ [ 0x00d4, "Return EAHandle,Close Handle on Error,Information Level 5,Immediate Close Handle" ],
+ [ 0x00d5, "Return NetWareHandle,Close Handle on Error,Information Level 5,Immediate Close Handle" ],
+ [ 0x00d6, "Return Volume/Directory Number,Close Handle on Error,Information Level 5,Immediate Close Handle" ],
+ [ 0x00e0, "Return EAHandle,Information Level 6,Immediate Close Handle" ],
+ [ 0x00e1, "Return NetWareHandle,Information Level 6,Immediate Close Handle" ],
+ [ 0x00e2, "Return Volume/Directory Number,Information Level 6,Immediate Close Handle" ],
+ [ 0x00e4, "Return EAHandle,Close Handle on Error,Information Level 6,Immediate Close Handle" ],
+ [ 0x00e5, "Return NetWareHandle,Close Handle on Error,Information Level 6,Immediate Close Handle" ],
+ [ 0x00e6, "Return Volume/Directory Number,Close Handle on Error,Information Level 6,Immediate Close Handle" ],
+ [ 0x00f0, "Return EAHandle,Information Level 7,Immediate Close Handle" ],
+ [ 0x00f1, "Return NetWareHandle,Information Level 7,Immediate Close Handle" ],
+ [ 0x00f2, "Return Volume/Directory Number,Information Level 7,Immediate Close Handle" ],
+ [ 0x00f4, "Return EAHandle,Close Handle on Error,Information Level 7,Immediate Close Handle" ],
+ [ 0x00f5, "Return NetWareHandle,Close Handle on Error,Information Level 7,Immediate Close Handle" ],
+ [ 0x00f6, "Return Volume/Directory Number,Close Handle on Error,Information Level 7,Immediate Close Handle" ],
+])
+EAHandle = uint32("ea_handle", "EA Handle")
+EAHandle.Display("BASE_HEX")
+EAHandleOrNetWareHandleOrVolume = uint32("ea_handle_or_netware_handle_or_volume", "EAHandle or NetWare Handle or Volume (see EAFlags)")
+EAHandleOrNetWareHandleOrVolume.Display("BASE_HEX")
+EAKey = nstring16("ea_key", "EA Key")
+EAKeySize = uint32("ea_key_size", "Key Size")
+EAKeySizeDuplicated = uint32("ea_key_size_duplicated", "Key Size Duplicated")
+EAValue = nstring16("ea_value", "EA Value")
+EAValueRep = fw_string("ea_value_rep", "EA Value", 1)
+EAValueLength = uint16("ea_value_length", "Value Length")
+EchoSocket = uint16("echo_socket", "Echo Socket")
+EchoSocket.Display('BASE_HEX')
+EffectiveRights = bitfield8("effective_rights", "Effective Rights", [
+ bf_boolean8(0x01, "effective_rights_read", "Read Rights"),
+ bf_boolean8(0x02, "effective_rights_write", "Write Rights"),
+ bf_boolean8(0x04, "effective_rights_open", "Open Rights"),
+ bf_boolean8(0x08, "effective_rights_create", "Create Rights"),
+ bf_boolean8(0x10, "effective_rights_delete", "Delete Rights"),
+ bf_boolean8(0x20, "effective_rights_parental", "Parental Rights"),
+ bf_boolean8(0x40, "effective_rights_search", "Search Rights"),
+ bf_boolean8(0x80, "effective_rights_modify", "Modify Rights"),
+])
+EnumInfoMask = bitfield8("enum_info_mask", "Return Information Mask", [
+ bf_boolean8(0x01, "enum_info_transport", "Transport Information"),
+ bf_boolean8(0x02, "enum_info_time", "Time Information"),
+ bf_boolean8(0x04, "enum_info_name", "Name Information"),
+ bf_boolean8(0x08, "enum_info_lock", "Lock Information"),
+ bf_boolean8(0x10, "enum_info_print", "Print Information"),
+ bf_boolean8(0x20, "enum_info_stats", "Statistical Information"),
+ bf_boolean8(0x40, "enum_info_account", "Accounting Information"),
+ bf_boolean8(0x80, "enum_info_auth", "Authentication Information"),
+])
+
+eventOffset = bytes("event_offset", "Event Offset", 8)
+eventTime = uint32("event_time", "Event Time")
+eventTime.Display("BASE_HEX")
+ExpirationTime = uint32("expiration_time", "Expiration Time")
+ExpirationTime.Display('BASE_HEX')
+ExtAttrDataSize = uint32("ext_attr_data_size", "Extended Attributes Data Size")
+ExtAttrCount = uint32("ext_attr_count", "Extended Attributes Count")
+ExtAttrKeySize = uint32("ext_attr_key_size", "Extended Attributes Key Size")
+ExtendedAttributesDefined = uint32("extended_attributes_defined", "Extended Attributes Defined")
+ExtendedAttributeExtentsUsed = uint32("extended_attribute_extents_used", "Extended Attribute Extents Used")
+ExtendedInfo = bitfield16("ext_info", "Extended Return Information", [
+ bf_boolean16(0x0001, "ext_info_update", "Last Update"),
+ bf_boolean16(0x0002, "ext_info_dos_name", "DOS Name"),
+ bf_boolean16(0x0004, "ext_info_flush", "Flush Time"),
+ bf_boolean16(0x0008, "ext_info_parental", "Parental"),
+ bf_boolean16(0x0010, "ext_info_mac_finder", "MAC Finder"),
+ bf_boolean16(0x0020, "ext_info_sibling", "Sibling"),
+ bf_boolean16(0x0040, "ext_info_effective", "Effective"),
+ bf_boolean16(0x0080, "ext_info_mac_date", "MAC Date"),
+ bf_boolean16(0x0100, "ext_info_access", "Last Access"),
+ bf_boolean16(0x0400, "ext_info_64_bit_fs", "64 Bit File Sizes"),
+ bf_boolean16(0x8000, "ext_info_newstyle", "New Style"),
+])
+
+ExtentListFormat = uint8("ext_lst_format", "Extent List Format")
+RetExtentListCount = uint8("ret_ext_lst_count", "Extent List Count")
+EndingOffset = bytes("end_offset", "Ending Offset", 8)
+#ExtentLength = bytes("extent_length", "Length", 8),
+ExtentList = bytes("ext_lst", "Extent List", 512)
+ExtRouterActiveFlag = boolean8("ext_router_active_flag", "External Router Active Flag")
+
+FailedAllocReqCnt = uint32("failed_alloc_req", "Failed Alloc Request Count")
+FatalFATWriteErrors = uint16("fatal_fat_write_errors", "Fatal FAT Write Errors")
+FATScanErrors = uint16("fat_scan_errors", "FAT Scan Errors")
+FATWriteErrors = uint16("fat_write_errors", "FAT Write Errors")
+FieldsLenTable = bytes("fields_len_table", "Fields Len Table", 32)
+FileCount = uint16("file_count", "File Count")
+FileDate = uint16("file_date", "File Date")
+FileDate.NWDate()
+FileDirWindow = uint16("file_dir_win", "File/Dir Window")
+FileDirWindow.Display("BASE_HEX")
+FileExecuteType = uint8("file_execute_type", "File Execute Type")
+FileExtendedAttributes = val_string8("file_ext_attr", "File Extended Attributes", [
+ [ 0x00, "Search On All Read Only Opens" ],
+ [ 0x01, "Search On Read Only Opens With No Path" ],
+ [ 0x02, "Shell Default Search Mode" ],
+ [ 0x03, "Search On All Opens With No Path" ],
+ [ 0x04, "Do Not Search" ],
+ [ 0x05, "Reserved" ],
+ [ 0x06, "Search On All Opens" ],
+ [ 0x07, "Reserved" ],
+ [ 0x08, "Search On All Read Only Opens/Indexed" ],
+ [ 0x09, "Search On Read Only Opens With No Path/Indexed" ],
+ [ 0x0a, "Shell Default Search Mode/Indexed" ],
+ [ 0x0b, "Search On All Opens With No Path/Indexed" ],
+ [ 0x0c, "Do Not Search/Indexed" ],
+ [ 0x0d, "Indexed" ],
+ [ 0x0e, "Search On All Opens/Indexed" ],
+ [ 0x0f, "Indexed" ],
+ [ 0x10, "Search On All Read Only Opens/Transactional" ],
+ [ 0x11, "Search On Read Only Opens With No Path/Transactional" ],
+ [ 0x12, "Shell Default Search Mode/Transactional" ],
+ [ 0x13, "Search On All Opens With No Path/Transactional" ],
+ [ 0x14, "Do Not Search/Transactional" ],
+ [ 0x15, "Transactional" ],
+ [ 0x16, "Search On All Opens/Transactional" ],
+ [ 0x17, "Transactional" ],
+ [ 0x18, "Search On All Read Only Opens/Indexed/Transactional" ],
+ [ 0x19, "Search On Read Only Opens With No Path/Indexed/Transactional" ],
+ [ 0x1a, "Shell Default Search Mode/Indexed/Transactional" ],
+ [ 0x1b, "Search On All Opens With No Path/Indexed/Transactional" ],
+ [ 0x1c, "Do Not Search/Indexed/Transactional" ],
+ [ 0x1d, "Indexed/Transactional" ],
+ [ 0x1e, "Search On All Opens/Indexed/Transactional" ],
+ [ 0x1f, "Indexed/Transactional" ],
+ [ 0x40, "Search On All Read Only Opens/Read Audit" ],
+ [ 0x41, "Search On Read Only Opens With No Path/Read Audit" ],
+ [ 0x42, "Shell Default Search Mode/Read Audit" ],
+ [ 0x43, "Search On All Opens With No Path/Read Audit" ],
+ [ 0x44, "Do Not Search/Read Audit" ],
+ [ 0x45, "Read Audit" ],
+ [ 0x46, "Search On All Opens/Read Audit" ],
+ [ 0x47, "Read Audit" ],
+ [ 0x48, "Search On All Read Only Opens/Indexed/Read Audit" ],
+ [ 0x49, "Search On Read Only Opens With No Path/Indexed/Read Audit" ],
+ [ 0x4a, "Shell Default Search Mode/Indexed/Read Audit" ],
+ [ 0x4b, "Search On All Opens With No Path/Indexed/Read Audit" ],
+ [ 0x4c, "Do Not Search/Indexed/Read Audit" ],
+ [ 0x4d, "Indexed/Read Audit" ],
+ [ 0x4e, "Search On All Opens/Indexed/Read Audit" ],
+ [ 0x4f, "Indexed/Read Audit" ],
+ [ 0x50, "Search On All Read Only Opens/Transactional/Read Audit" ],
+ [ 0x51, "Search On Read Only Opens With No Path/Transactional/Read Audit" ],
+ [ 0x52, "Shell Default Search Mode/Transactional/Read Audit" ],
+ [ 0x53, "Search On All Opens With No Path/Transactional/Read Audit" ],
+ [ 0x54, "Do Not Search/Transactional/Read Audit" ],
+ [ 0x55, "Transactional/Read Audit" ],
+ [ 0x56, "Search On All Opens/Transactional/Read Audit" ],
+ [ 0x57, "Transactional/Read Audit" ],
+ [ 0x58, "Search On All Read Only Opens/Indexed/Transactional/Read Audit" ],
+ [ 0x59, "Search On Read Only Opens With No Path/Indexed/Transactional/Read Audit" ],
+ [ 0x5a, "Shell Default Search Mode/Indexed/Transactional/Read Audit" ],
+ [ 0x5b, "Search On All Opens With No Path/Indexed/Transactional/Read Audit" ],
+ [ 0x5c, "Do Not Search/Indexed/Transactional/Read Audit" ],
+ [ 0x5d, "Indexed/Transactional/Read Audit" ],
+ [ 0x5e, "Search On All Opens/Indexed/Transactional/Read Audit" ],
+ [ 0x5f, "Indexed/Transactional/Read Audit" ],
+ [ 0x80, "Search On All Read Only Opens/Write Audit" ],
+ [ 0x81, "Search On Read Only Opens With No Path/Write Audit" ],
+ [ 0x82, "Shell Default Search Mode/Write Audit" ],
+ [ 0x83, "Search On All Opens With No Path/Write Audit" ],
+ [ 0x84, "Do Not Search/Write Audit" ],
+ [ 0x85, "Write Audit" ],
+ [ 0x86, "Search On All Opens/Write Audit" ],
+ [ 0x87, "Write Audit" ],
+ [ 0x88, "Search On All Read Only Opens/Indexed/Write Audit" ],
+ [ 0x89, "Search On Read Only Opens With No Path/Indexed/Write Audit" ],
+ [ 0x8a, "Shell Default Search Mode/Indexed/Write Audit" ],
+ [ 0x8b, "Search On All Opens With No Path/Indexed/Write Audit" ],
+ [ 0x8c, "Do Not Search/Indexed/Write Audit" ],
+ [ 0x8d, "Indexed/Write Audit" ],
+ [ 0x8e, "Search On All Opens/Indexed/Write Audit" ],
+ [ 0x8f, "Indexed/Write Audit" ],
+ [ 0x90, "Search On All Read Only Opens/Transactional/Write Audit" ],
+ [ 0x91, "Search On Read Only Opens With No Path/Transactional/Write Audit" ],
+ [ 0x92, "Shell Default Search Mode/Transactional/Write Audit" ],
+ [ 0x93, "Search On All Opens With No Path/Transactional/Write Audit" ],
+ [ 0x94, "Do Not Search/Transactional/Write Audit" ],
+ [ 0x95, "Transactional/Write Audit" ],
+ [ 0x96, "Search On All Opens/Transactional/Write Audit" ],
+ [ 0x97, "Transactional/Write Audit" ],
+ [ 0x98, "Search On All Read Only Opens/Indexed/Transactional/Write Audit" ],
+ [ 0x99, "Search On Read Only Opens With No Path/Indexed/Transactional/Write Audit" ],
+ [ 0x9a, "Shell Default Search Mode/Indexed/Transactional/Write Audit" ],
+ [ 0x9b, "Search On All Opens With No Path/Indexed/Transactional/Write Audit" ],
+ [ 0x9c, "Do Not Search/Indexed/Transactional/Write Audit" ],
+ [ 0x9d, "Indexed/Transactional/Write Audit" ],
+ [ 0x9e, "Search On All Opens/Indexed/Transactional/Write Audit" ],
+ [ 0x9f, "Indexed/Transactional/Write Audit" ],
+ [ 0xa0, "Search On All Read Only Opens/Read Audit/Write Audit" ],
+ [ 0xa1, "Search On Read Only Opens With No Path/Read Audit/Write Audit" ],
+ [ 0xa2, "Shell Default Search Mode/Read Audit/Write Audit" ],
+ [ 0xa3, "Search On All Opens With No Path/Read Audit/Write Audit" ],
+ [ 0xa4, "Do Not Search/Read Audit/Write Audit" ],
+ [ 0xa5, "Read Audit/Write Audit" ],
+ [ 0xa6, "Search On All Opens/Read Audit/Write Audit" ],
+ [ 0xa7, "Read Audit/Write Audit" ],
+ [ 0xa8, "Search On All Read Only Opens/Indexed/Read Audit/Write Audit" ],
+ [ 0xa9, "Search On Read Only Opens With No Path/Indexed/Read Audit/Write Audit" ],
+ [ 0xaa, "Shell Default Search Mode/Indexed/Read Audit/Write Audit" ],
+ [ 0xab, "Search On All Opens With No Path/Indexed/Read Audit/Write Audit" ],
+ [ 0xac, "Do Not Search/Indexed/Read Audit/Write Audit" ],
+ [ 0xad, "Indexed/Read Audit/Write Audit" ],
+ [ 0xae, "Search On All Opens/Indexed/Read Audit/Write Audit" ],
+ [ 0xaf, "Indexed/Read Audit/Write Audit" ],
+ [ 0xb0, "Search On All Read Only Opens/Transactional/Read Audit/Write Audit" ],
+ [ 0xb1, "Search On Read Only Opens With No Path/Transactional/Read Audit/Write Audit" ],
+ [ 0xb2, "Shell Default Search Mode/Transactional/Read Audit/Write Audit" ],
+ [ 0xb3, "Search On All Opens With No Path/Transactional/Read Audit/Write Audit" ],
+ [ 0xb4, "Do Not Search/Transactional/Read Audit/Write Audit" ],
+ [ 0xb5, "Transactional/Read Audit/Write Audit" ],
+ [ 0xb6, "Search On All Opens/Transactional/Read Audit/Write Audit" ],
+ [ 0xb7, "Transactional/Read Audit/Write Audit" ],
+ [ 0xb8, "Search On All Read Only Opens/Indexed/Transactional/Read Audit/Write Audit" ],
+ [ 0xb9, "Search On Read Only Opens With No Path/Indexed/Transactional/Read Audit/Write Audit" ],
+ [ 0xba, "Shell Default Search Mode/Indexed/Transactional/Read Audit/Write Audit" ],
+ [ 0xbb, "Search On All Opens With No Path/Indexed/Transactional/Read Audit/Write Audit" ],
+ [ 0xbc, "Do Not Search/Indexed/Transactional/Read Audit/Write Audit" ],
+ [ 0xbd, "Indexed/Transactional/Read Audit/Write Audit" ],
+ [ 0xbe, "Search On All Opens/Indexed/Transactional/Read Audit/Write Audit" ],
+ [ 0xbf, "Indexed/Transactional/Read Audit/Write Audit" ],
+])
+fileFlags = uint32("file_flags", "File Flags")
+FileHandle = bytes("file_handle", "File Handle", 6)
+FileLimbo = uint32("file_limbo", "File Limbo")
+FileListCount = uint32("file_list_count", "File List Count")
+FileLock = val_string8("file_lock", "File Lock", [
+ [ 0x00, "Not Locked" ],
+ [ 0xfe, "Locked by file lock" ],
+ [ 0xff, "Unknown" ],
+])
+FileLockCount = uint16("file_lock_count", "File Lock Count")
+FileMigrationState = val_string8("file_mig_state", "File Migration State", [
+ [ 0x00, "Mark file ineligible for file migration" ],
+ [ 0x01, "Mark file eligible for file migration" ],
+ [ 0x02, "Mark file as migrated and delete fat chains" ],
+ [ 0x03, "Reset file status back to normal" ],
+ [ 0x04, "Get file data back and reset file status back to normal" ],
+])
+FileMode = uint8("file_mode", "File Mode")
+FileName = nstring8("file_name", "Filename")
+FileName12 = fw_string("file_name_12", "Filename", 12)
+FileName14 = fw_string("file_name_14", "Filename", 14)
+FileName16 = nstring16("file_name_16", "Filename")
+FileNameLen = uint8("file_name_len", "Filename Length")
+FileOffset = uint32("file_offset", "File Offset")
+FilePath = nstring8("file_path", "File Path")
+FileSize = uint32("file_size", "File Size", ENC_BIG_ENDIAN)
+FileSize64bit = uint64("f_size_64bit", "64bit File Size")
+FileSystemID = uint8("file_system_id", "File System ID")
+FileTime = uint16("file_time", "File Time")
+FileTime.NWTime()
+FileUseCount = uint16("file_use_count", "File Use Count")
+FileWriteFlags = val_string8("file_write_flags", "File Write Flags", [
+ [ 0x01, "Writing" ],
+ [ 0x02, "Write aborted" ],
+])
+FileWriteState = val_string8("file_write_state", "File Write State", [
+ [ 0x00, "Not Writing" ],
+ [ 0x01, "Write in Progress" ],
+ [ 0x02, "Write Being Stopped" ],
+])
+Filler = uint8("filler", "Filler")
+FinderAttr = bitfield16("finder_attr", "Finder Info Attributes", [
+ bf_boolean16(0x0001, "finder_attr_desktop", "Object on Desktop"),
+ bf_boolean16(0x2000, "finder_attr_invisible", "Object is Invisible"),
+ bf_boolean16(0x4000, "finder_attr_bundle", "Object Has Bundle"),
+])
+FixedBitMask = uint32("fixed_bit_mask", "Fixed Bit Mask")
+FixedBitsDefined = uint16("fixed_bits_defined", "Fixed Bits Defined")
+FlagBits = uint8("flag_bits", "Flag Bits")
+Flags = uint8("flags", "Flags")
+FlagsDef = uint16("flags_def", "Flags")
+FlushTime = uint32("flush_time", "Flush Time")
+FolderFlag = val_string8("folder_flag", "Folder Flag", [
+ [ 0x00, "Not a Folder" ],
+ [ 0x01, "Folder" ],
+])
+ForkCount = uint8("fork_count", "Fork Count")
+ForkIndicator = val_string8("fork_indicator", "Fork Indicator", [
+ [ 0x00, "Data Fork" ],
+ [ 0x01, "Resource Fork" ],
+])
+ForceFlag = val_string8("force_flag", "Force Server Down Flag", [
+ [ 0x00, "Down Server if No Files Are Open" ],
+ [ 0xff, "Down Server Immediately, Auto-Close Open Files" ],
+])
+ForgedDetachedRequests = uint16("forged_detached_requests", "Forged Detached Requests")
+FormType = uint16( "form_type", "Form Type" )
+FormTypeCnt = uint32("form_type_count", "Form Types Count")
+FoundSomeMem = uint32("found_some_mem", "Found Some Memory")
+FractionalSeconds = eptime("fractional_time", "Fractional Time in Seconds")
+FraggerHandle = uint32("fragger_handle", "Fragment Handle")
+FraggerHandle.Display('BASE_HEX')
+FragmentWriteOccurred = uint16("fragment_write_occurred", "Fragment Write Occurred")
+FragSize = uint32("frag_size", "Fragment Size")
+FreeableLimboSectors = uint32("freeable_limbo_sectors", "Freeable Limbo Sectors")
+FreeBlocks = uint32("free_blocks", "Free Blocks")
+FreedClusters = uint32("freed_clusters", "Freed Clusters")
+FreeDirectoryEntries = uint16("free_directory_entries", "Free Directory Entries")
+FSEngineFlag = boolean8("fs_engine_flag", "FS Engine Flag")
+FullName = fw_string("full_name", "Full Name", 39)
+
+GetSetFlag = val_string8("get_set_flag", "Get Set Flag", [
+ [ 0x00, "Get the default support module ID" ],
+ [ 0x01, "Set the default support module ID" ],
+])
+GUID = bytes("guid", "GUID", 16)
+
+HandleFlag = val_string8("handle_flag", "Handle Flag", [
+ [ 0x00, "Short Directory Handle" ],
+ [ 0x01, "Directory Base" ],
+ [ 0xFF, "No Handle Present" ],
+])
+HandleInfoLevel = val_string8("handle_info_level", "Handle Info Level", [
+ [ 0x00, "Get Limited Information from a File Handle" ],
+ [ 0x01, "Get Limited Information from a File Handle Using a Name Space" ],
+ [ 0x02, "Get Information from a File Handle" ],
+ [ 0x03, "Get Information from a Directory Handle" ],
+ [ 0x04, "Get Complete Information from a Directory Handle" ],
+ [ 0x05, "Get Complete Information from a File Handle" ],
+])
+HeldBytesRead = bytes("held_bytes_read", "Held Bytes Read", 6)
+HeldBytesWritten = bytes("held_bytes_write", "Held Bytes Written", 6)
+HeldConnectTimeInMinutes = uint32("held_conn_time", "Held Connect Time in Minutes")
+HeldRequests = uint32("user_info_held_req", "Held Requests")
+HoldAmount = uint32("hold_amount", "Hold Amount")
+HoldCancelAmount = uint32("hold_cancel_amount", "Hold Cancel Amount")
+HolderID = uint32("holder_id", "Holder ID")
+HolderID.Display("BASE_HEX")
+HoldTime = uint32("hold_time", "Hold Time")
+HopsToNet = uint16("hops_to_net", "Hop Count")
+HorizLocation = uint16("horiz_location", "Horizontal Location")
+HostAddress = bytes("host_address", "Host Address", 6)
+HotFixBlocksAvailable = uint16("hot_fix_blocks_available", "Hot Fix Blocks Available")
+HotFixDisabled = val_string8("hot_fix_disabled", "Hot Fix Disabled", [
+ [ 0x00, "Enabled" ],
+ [ 0x01, "Disabled" ],
+])
+HotFixTableSize = uint16("hot_fix_table_size", "Hot Fix Table Size")
+HotFixTableStart = uint32("hot_fix_table_start", "Hot Fix Table Start")
+Hour = uint8("s_hour", "Hour")
+HugeBitMask = uint32("huge_bit_mask", "Huge Bit Mask")
+HugeBitsDefined = uint16("huge_bits_defined", "Huge Bits Defined")
+HugeData = nstring8("huge_data", "Huge Data")
+HugeDataUsed = uint32("huge_data_used", "Huge Data Used")
+HugeStateInfo = bytes("huge_state_info", "Huge State Info", 16)
+
+IdentificationNumber = uint32("identification_number", "Identification Number")
+IgnoredRxPkts = uint32("ignored_rx_pkts", "Ignored Receive Packets")
+IncomingPacketDiscardedNoDGroup = uint16("incoming_packet_discarded_no_dgroup", "Incoming Packet Discarded No DGroup")
+IndexNumber = uint8("index_number", "Index Number")
+InfoCount = uint16("info_count", "Info Count")
+InfoFlags = bitfield32("info_flags", "Info Flags", [
+ bf_boolean32(0x10000000, "info_flags_security", "Return Object Security"),
+ bf_boolean32(0x20000000, "info_flags_flags", "Return Object Flags"),
+ bf_boolean32(0x40000000, "info_flags_type", "Return Object Type"),
+ bf_boolean32(0x80000000, "info_flags_name", "Return Object Name"),
+])
+InfoLevelNumber = val_string8("info_level_num", "Information Level Number", [
+ [ 0x0, "Single Directory Quota Information" ],
+ [ 0x1, "Multi-Level Directory Quota Information" ],
+])
+InfoMask = bitfield32("info_mask", "Information Mask", [
+ bf_boolean32(0x00000001, "info_flags_dos_time", "DOS Time"),
+ bf_boolean32(0x00000002, "info_flags_ref_count", "Reference Count"),
+ bf_boolean32(0x00000004, "info_flags_dos_attr", "DOS Attributes"),
+ bf_boolean32(0x00000008, "info_flags_ids", "ID's"),
+ bf_boolean32(0x00000010, "info_flags_ds_sizes", "Data Stream Sizes"),
+ bf_boolean32(0x00000020, "info_flags_ns_attr", "Name Space Attributes"),
+ bf_boolean32(0x00000040, "info_flags_ea_present", "EA Present Flag"),
+ bf_boolean32(0x00000080, "info_flags_all_attr", "All Attributes"),
+ bf_boolean32(0x00000100, "info_flags_all_dirbase_num", "All Directory Base Numbers"),
+ bf_boolean32(0x00000200, "info_flags_max_access_mask", "Maximum Access Mask"),
+ bf_boolean32(0x00000400, "info_flags_flush_time", "Flush Time"),
+ bf_boolean32(0x00000800, "info_flags_prnt_base_id", "Parent Base ID"),
+ bf_boolean32(0x00001000, "info_flags_mac_finder", "Mac Finder Information"),
+ bf_boolean32(0x00002000, "info_flags_sibling_cnt", "Sibling Count"),
+ bf_boolean32(0x00004000, "info_flags_effect_rights", "Effective Rights"),
+ bf_boolean32(0x00008000, "info_flags_mac_time", "Mac Time"),
+ bf_boolean32(0x20000000, "info_mask_dosname", "DOS Name"),
+ bf_boolean32(0x40000000, "info_mask_c_name_space", "Creator Name Space & Name"),
+ bf_boolean32(0x80000000, "info_mask_name", "Name"),
+])
+InheritedRightsMask = bitfield16("inherited_rights_mask", "Inherited Rights Mask", [
+ bf_boolean16(0x0001, "inh_rights_read", "Read Rights"),
+ bf_boolean16(0x0002, "inh_rights_write", "Write Rights"),
+ bf_boolean16(0x0004, "inh_rights_open", "Open Rights"),
+ bf_boolean16(0x0008, "inh_rights_create", "Create Rights"),
+ bf_boolean16(0x0010, "inh_rights_delete", "Delete Rights"),
+ bf_boolean16(0x0020, "inh_rights_parent", "Change Access"),
+ bf_boolean16(0x0040, "inh_rights_search", "See Files Flag"),
+ bf_boolean16(0x0080, "inh_rights_modify", "Modify Rights"),
+ bf_boolean16(0x0100, "inh_rights_supervisor", "Supervisor"),
+])
+InheritanceRevokeMask = bitfield16("inheritance_revoke_mask", "Revoke Rights Mask", [
+ bf_boolean16(0x0001, "inh_revoke_read", "Read Rights"),
+ bf_boolean16(0x0002, "inh_revoke_write", "Write Rights"),
+ bf_boolean16(0x0004, "inh_revoke_open", "Open Rights"),
+ bf_boolean16(0x0008, "inh_revoke_create", "Create Rights"),
+ bf_boolean16(0x0010, "inh_revoke_delete", "Delete Rights"),
+ bf_boolean16(0x0020, "inh_revoke_parent", "Change Access"),
+ bf_boolean16(0x0040, "inh_revoke_search", "See Files Flag"),
+ bf_boolean16(0x0080, "inh_revoke_modify", "Modify Rights"),
+ bf_boolean16(0x0100, "inh_revoke_supervisor", "Supervisor"),
+])
+InitialSemaphoreValue = uint8("initial_semaphore_value", "Initial Semaphore Value")
+InpInfotype = uint32("inp_infotype", "Information Type")
+Inpld = uint32("inp_ld", "Volume Number or Directory Handle")
+InspectSize = uint32("inspect_size", "Inspect Size")
+InternetBridgeVersion = uint8("internet_bridge_version", "Internet Bridge Version")
+InterruptNumbersUsed = uint32("interrupt_numbers_used", "Interrupt Numbers Used")
+InUse = uint32("in_use", "Blocks in Use")
+InUse64 = uint64("in_use64", "Blocks in Use")
+IOAddressesUsed = bytes("io_addresses_used", "IO Addresses Used", 8)
+IOErrorCount = uint16("io_error_count", "IO Error Count")
+IOEngineFlag = boolean8("io_engine_flag", "IO Engine Flag")
+IPXNotMyNetwork = uint16("ipx_not_my_network", "IPX Not My Network")
+ItemsChanged = uint32("items_changed", "Items Changed")
+ItemsChecked = uint32("items_checked", "Items Checked")
+ItemsCount = uint32("items_count", "Items Count")
+itemsInList = uint32("items_in_list", "Items in List")
+ItemsInPacket = uint32("items_in_packet", "Items in Packet")
+
+JobControlFlags = bitfield8("job_control_flags", "Job Control Flags", [
+ bf_boolean8(0x08, "job_control_job_recovery", "Job Recovery"),
+ bf_boolean8(0x10, "job_control_reservice", "ReService Job"),
+ bf_boolean8(0x20, "job_control_file_open", "File Open"),
+ bf_boolean8(0x40, "job_control_user_hold", "User Hold"),
+ bf_boolean8(0x80, "job_control_operator_hold", "Operator Hold"),
+
+])
+JobControlFlagsWord = bitfield16("job_control_flags_word", "Job Control Flags", [
+ bf_boolean16(0x0008, "job_control1_job_recovery", "Job Recovery"),
+ bf_boolean16(0x0010, "job_control1_reservice", "ReService Job"),
+ bf_boolean16(0x0020, "job_control1_file_open", "File Open"),
+ bf_boolean16(0x0040, "job_control1_user_hold", "User Hold"),
+ bf_boolean16(0x0080, "job_control1_operator_hold", "Operator Hold"),
+
+])
+JobCount = uint32("job_count", "Job Count")
+JobFileHandle = bytes("job_file_handle", "Job File Handle", 6)
+JobFileHandleLong = uint32("job_file_handle_long", "Job File Handle", ENC_BIG_ENDIAN)
+JobFileHandleLong.Display("BASE_HEX")
+JobFileName = fw_string("job_file_name", "Job File Name", 14)
+JobPosition = uint8("job_position", "Job Position")
+JobPositionWord = uint16("job_position_word", "Job Position")
+JobNumber = uint16("job_number", "Job Number", ENC_BIG_ENDIAN )
+JobNumberLong = uint32("job_number_long", "Job Number", ENC_BIG_ENDIAN )
+JobNumberLong.Display("BASE_HEX")
+JobType = uint16("job_type", "Job Type", ENC_BIG_ENDIAN )
+
+LANCustomVariablesCount = uint32("lan_cust_var_count", "LAN Custom Variables Count")
+LANdriverBoardInstance = uint16("lan_drv_bd_inst", "LAN Driver Board Instance")
+LANdriverBoardNumber = uint16("lan_drv_bd_num", "LAN Driver Board Number")
+LANdriverCardID = uint16("lan_drv_card_id", "LAN Driver Card ID")
+LANdriverCardName = fw_string("lan_drv_card_name", "LAN Driver Card Name", 28)
+LANdriverCFG_MajorVersion = uint8("lan_dvr_cfg_major_vrs", "LAN Driver Config - Major Version")
+LANdriverCFG_MinorVersion = uint8("lan_dvr_cfg_minor_vrs", "LAN Driver Config - Minor Version")
+LANdriverDMAUsage1 = uint8("lan_drv_dma_usage1", "Primary DMA Channel")
+LANdriverDMAUsage2 = uint8("lan_drv_dma_usage2", "Secondary DMA Channel")
+LANdriverFlags = uint16("lan_drv_flags", "LAN Driver Flags")
+LANdriverFlags.Display("BASE_HEX")
+LANdriverInterrupt1 = uint8("lan_drv_interrupt1", "Primary Interrupt Vector")
+LANdriverInterrupt2 = uint8("lan_drv_interrupt2", "Secondary Interrupt Vector")
+LANdriverIOPortsAndRanges1 = uint16("lan_drv_io_ports_and_ranges_1", "Primary Base I/O Port")
+LANdriverIOPortsAndRanges2 = uint16("lan_drv_io_ports_and_ranges_2", "Number of I/O Ports")
+LANdriverIOPortsAndRanges3 = uint16("lan_drv_io_ports_and_ranges_3", "Secondary Base I/O Port")
+LANdriverIOPortsAndRanges4 = uint16("lan_drv_io_ports_and_ranges_4", "Number of I/O Ports")
+LANdriverIOReserved = bytes("lan_drv_io_reserved", "LAN Driver IO Reserved", 14)
+LANdriverLineSpeed = uint16("lan_drv_line_speed", "LAN Driver Line Speed")
+LANdriverLink = uint32("lan_drv_link", "LAN Driver Link")
+LANdriverLogicalName = bytes("lan_drv_log_name", "LAN Driver Logical Name", 18)
+LANdriverMajorVersion = uint8("lan_drv_major_ver", "LAN Driver Major Version")
+LANdriverMaximumSize = uint32("lan_drv_max_size", "LAN Driver Maximum Size")
+LANdriverMaxRecvSize = uint32("lan_drv_max_rcv_size", "LAN Driver Maximum Receive Size")
+LANdriverMediaID = uint16("lan_drv_media_id", "LAN Driver Media ID")
+LANdriverMediaType = fw_string("lan_drv_media_type", "LAN Driver Media Type", 40)
+LANdriverMemoryDecode0 = uint32("lan_drv_mem_decode_0", "LAN Driver Memory Decode 0")
+LANdriverMemoryDecode1 = uint32("lan_drv_mem_decode_1", "LAN Driver Memory Decode 1")
+LANdriverMemoryLength0 = uint16("lan_drv_mem_length_0", "LAN Driver Memory Length 0")
+LANdriverMemoryLength1 = uint16("lan_drv_mem_length_1", "LAN Driver Memory Length 1")
+LANdriverMinorVersion = uint8("lan_drv_minor_ver", "LAN Driver Minor Version")
+LANdriverModeFlags = val_string8("lan_dvr_mode_flags", "LAN Driver Mode Flags", [
+ [0x80, "Canonical Address" ],
+ [0x81, "Canonical Address" ],
+ [0x82, "Canonical Address" ],
+ [0x83, "Canonical Address" ],
+ [0x84, "Canonical Address" ],
+ [0x85, "Canonical Address" ],
+ [0x86, "Canonical Address" ],
+ [0x87, "Canonical Address" ],
+ [0x88, "Canonical Address" ],
+ [0x89, "Canonical Address" ],
+ [0x8a, "Canonical Address" ],
+ [0x8b, "Canonical Address" ],
+ [0x8c, "Canonical Address" ],
+ [0x8d, "Canonical Address" ],
+ [0x8e, "Canonical Address" ],
+ [0x8f, "Canonical Address" ],
+ [0x90, "Canonical Address" ],
+ [0x91, "Canonical Address" ],
+ [0x92, "Canonical Address" ],
+ [0x93, "Canonical Address" ],
+ [0x94, "Canonical Address" ],
+ [0x95, "Canonical Address" ],
+ [0x96, "Canonical Address" ],
+ [0x97, "Canonical Address" ],
+ [0x98, "Canonical Address" ],
+ [0x99, "Canonical Address" ],
+ [0x9a, "Canonical Address" ],
+ [0x9b, "Canonical Address" ],
+ [0x9c, "Canonical Address" ],
+ [0x9d, "Canonical Address" ],
+ [0x9e, "Canonical Address" ],
+ [0x9f, "Canonical Address" ],
+ [0xa0, "Canonical Address" ],
+ [0xa1, "Canonical Address" ],
+ [0xa2, "Canonical Address" ],
+ [0xa3, "Canonical Address" ],
+ [0xa4, "Canonical Address" ],
+ [0xa5, "Canonical Address" ],
+ [0xa6, "Canonical Address" ],
+ [0xa7, "Canonical Address" ],
+ [0xa8, "Canonical Address" ],
+ [0xa9, "Canonical Address" ],
+ [0xaa, "Canonical Address" ],
+ [0xab, "Canonical Address" ],
+ [0xac, "Canonical Address" ],
+ [0xad, "Canonical Address" ],
+ [0xae, "Canonical Address" ],
+ [0xaf, "Canonical Address" ],
+ [0xb0, "Canonical Address" ],
+ [0xb1, "Canonical Address" ],
+ [0xb2, "Canonical Address" ],
+ [0xb3, "Canonical Address" ],
+ [0xb4, "Canonical Address" ],
+ [0xb5, "Canonical Address" ],
+ [0xb6, "Canonical Address" ],
+ [0xb7, "Canonical Address" ],
+ [0xb8, "Canonical Address" ],
+ [0xb9, "Canonical Address" ],
+ [0xba, "Canonical Address" ],
+ [0xbb, "Canonical Address" ],
+ [0xbc, "Canonical Address" ],
+ [0xbd, "Canonical Address" ],
+ [0xbe, "Canonical Address" ],
+ [0xbf, "Canonical Address" ],
+ [0xc0, "Non-Canonical Address" ],
+ [0xc1, "Non-Canonical Address" ],
+ [0xc2, "Non-Canonical Address" ],
+ [0xc3, "Non-Canonical Address" ],
+ [0xc4, "Non-Canonical Address" ],
+ [0xc5, "Non-Canonical Address" ],
+ [0xc6, "Non-Canonical Address" ],
+ [0xc7, "Non-Canonical Address" ],
+ [0xc8, "Non-Canonical Address" ],
+ [0xc9, "Non-Canonical Address" ],
+ [0xca, "Non-Canonical Address" ],
+ [0xcb, "Non-Canonical Address" ],
+ [0xcc, "Non-Canonical Address" ],
+ [0xcd, "Non-Canonical Address" ],
+ [0xce, "Non-Canonical Address" ],
+ [0xcf, "Non-Canonical Address" ],
+ [0xd0, "Non-Canonical Address" ],
+ [0xd1, "Non-Canonical Address" ],
+ [0xd2, "Non-Canonical Address" ],
+ [0xd3, "Non-Canonical Address" ],
+ [0xd4, "Non-Canonical Address" ],
+ [0xd5, "Non-Canonical Address" ],
+ [0xd6, "Non-Canonical Address" ],
+ [0xd7, "Non-Canonical Address" ],
+ [0xd8, "Non-Canonical Address" ],
+ [0xd9, "Non-Canonical Address" ],
+ [0xda, "Non-Canonical Address" ],
+ [0xdb, "Non-Canonical Address" ],
+ [0xdc, "Non-Canonical Address" ],
+ [0xdd, "Non-Canonical Address" ],
+ [0xde, "Non-Canonical Address" ],
+ [0xdf, "Non-Canonical Address" ],
+ [0xe0, "Non-Canonical Address" ],
+ [0xe1, "Non-Canonical Address" ],
+ [0xe2, "Non-Canonical Address" ],
+ [0xe3, "Non-Canonical Address" ],
+ [0xe4, "Non-Canonical Address" ],
+ [0xe5, "Non-Canonical Address" ],
+ [0xe6, "Non-Canonical Address" ],
+ [0xe7, "Non-Canonical Address" ],
+ [0xe8, "Non-Canonical Address" ],
+ [0xe9, "Non-Canonical Address" ],
+ [0xea, "Non-Canonical Address" ],
+ [0xeb, "Non-Canonical Address" ],
+ [0xec, "Non-Canonical Address" ],
+ [0xed, "Non-Canonical Address" ],
+ [0xee, "Non-Canonical Address" ],
+ [0xef, "Non-Canonical Address" ],
+ [0xf0, "Non-Canonical Address" ],
+ [0xf1, "Non-Canonical Address" ],
+ [0xf2, "Non-Canonical Address" ],
+ [0xf3, "Non-Canonical Address" ],
+ [0xf4, "Non-Canonical Address" ],
+ [0xf5, "Non-Canonical Address" ],
+ [0xf6, "Non-Canonical Address" ],
+ [0xf7, "Non-Canonical Address" ],
+ [0xf8, "Non-Canonical Address" ],
+ [0xf9, "Non-Canonical Address" ],
+ [0xfa, "Non-Canonical Address" ],
+ [0xfb, "Non-Canonical Address" ],
+ [0xfc, "Non-Canonical Address" ],
+ [0xfd, "Non-Canonical Address" ],
+ [0xfe, "Non-Canonical Address" ],
+ [0xff, "Non-Canonical Address" ],
+])
+LANDriverNumber = uint8("lan_driver_number", "LAN Driver Number")
+LANdriverNodeAddress = bytes("lan_dvr_node_addr", "LAN Driver Node Address", 6)
+LANdriverRecvSize = uint32("lan_drv_rcv_size", "LAN Driver Receive Size")
+LANdriverReserved = uint16("lan_drv_reserved", "LAN Driver Reserved")
+LANdriverSendRetries = uint16("lan_drv_snd_retries", "LAN Driver Send Retries")
+LANdriverSharingFlags = uint16("lan_drv_share", "LAN Driver Sharing Flags")
+LANdriverShortName = fw_string("lan_drv_short_name", "LAN Driver Short Name", 40)
+LANdriverSlot = uint16("lan_drv_slot", "LAN Driver Slot")
+LANdriverSrcRouting = uint32("lan_drv_src_route", "LAN Driver Source Routing")
+LANdriverTransportTime = uint16("lan_drv_trans_time", "LAN Driver Transport Time")
+LastAccessedDate = uint16("last_access_date", "Last Accessed Date")
+LastAccessedDate.NWDate()
+LastAccessedTime = uint16("last_access_time", "Last Accessed Time")
+LastAccessedTime.NWTime()
+LastGarbCollect = uint32("last_garbage_collect", "Last Garbage Collection")
+LastInstance = uint32("last_instance", "Last Instance")
+LastRecordSeen = uint16("last_record_seen", "Last Record Seen")
+LastSearchIndex = uint16("last_search_index", "Search Index")
+LastSeen = uint32("last_seen", "Last Seen")
+LastSequenceNumber = uint16("last_sequence_number", "Sequence Number")
+Length64bit = bytes("length_64bit", "64bit Length", 64)
+Level = uint8("level", "Level")
+LFSCounters = uint32("lfs_counters", "LFS Counters")
+LimboDataStreamsCount = uint32("limbo_data_streams_count", "Limbo Data Streams Count")
+limbCount = uint32("limb_count", "Limb Count")
+limbFlags = bitfield32("limb_flags", "Limb Flags", [
+ bf_boolean32(0x00000002, "scan_entire_folder", "Wild Search"),
+ bf_boolean32(0x00000004, "scan_files_only", "Scan Files Only"),
+ bf_boolean32(0x00000008, "scan_folders_only", "Scan Folders Only"),
+ bf_boolean32(0x00000010, "allow_system", "Allow System Files and Folders"),
+ bf_boolean32(0x00000020, "allow_hidden", "Allow Hidden Files and Folders"),
+])
+
+limbScanNum = uint32("limb_scan_num", "Limb Scan Number")
+LimboUsed = uint32("limbo_used", "Limbo Used")
+LoadedNameSpaces = uint8("loaded_name_spaces", "Loaded Name Spaces")
+LocalConnectionID = uint32("local_connection_id", "Local Connection ID")
+LocalConnectionID.Display("BASE_HEX")
+LocalMaxPacketSize = uint32("local_max_packet_size", "Local Max Packet Size")
+LocalMaxSendSize = uint32("local_max_send_size", "Local Max Send Size")
+LocalMaxRecvSize = uint32("local_max_recv_size", "Local Max Recv Size")
+LocalLoginInfoCcode = uint8("local_login_info_ccode", "Local Login Info C Code")
+LocalTargetSocket = uint32("local_target_socket", "Local Target Socket")
+LocalTargetSocket.Display("BASE_HEX")
+LockAreaLen = uint32("lock_area_len", "Lock Area Length")
+LockAreasStartOffset = uint32("lock_areas_start_offset", "Lock Areas Start Offset")
+LockTimeout = uint16("lock_timeout", "Lock Timeout")
+Locked = val_string8("locked", "Locked Flag", [
+ [ 0x00, "Not Locked Exclusively" ],
+ [ 0x01, "Locked Exclusively" ],
+])
+LockFlag = val_string8("lock_flag", "Lock Flag", [
+ [ 0x00, "Not Locked, Log for Future Exclusive Lock" ],
+ [ 0x01, "Exclusive Lock (Read/Write)" ],
+ [ 0x02, "Log for Future Shared Lock"],
+ [ 0x03, "Shareable Lock (Read-Only)" ],
+ [ 0xfe, "Locked by a File Lock" ],
+ [ 0xff, "Locked by Begin Share File Set" ],
+])
+LockName = nstring8("lock_name", "Lock Name")
+LockStatus = val_string8("lock_status", "Lock Status", [
+ [ 0x00, "Locked Exclusive" ],
+ [ 0x01, "Locked Shareable" ],
+ [ 0x02, "Logged" ],
+ [ 0x06, "Lock is Held by TTS"],
+])
+ConnLockStatus = val_string8("conn_lock_status", "Lock Status", [
+ [ 0x00, "Normal (connection free to run)" ],
+ [ 0x01, "Waiting on physical record lock" ],
+ [ 0x02, "Waiting on a file lock" ],
+ [ 0x03, "Waiting on a logical record lock"],
+ [ 0x04, "Waiting on a semaphore"],
+])
+LockType = val_string8("lock_type", "Lock Type", [
+ [ 0x00, "Locked" ],
+ [ 0x01, "Open Shareable" ],
+ [ 0x02, "Logged" ],
+ [ 0x03, "Open Normal" ],
+ [ 0x06, "TTS Holding Lock" ],
+ [ 0x07, "Transaction Flag Set on This File" ],
+])
+LogFileFlagHigh = bitfield8("log_file_flag_high", "Log File Flag (byte 2)", [
+ bf_boolean8(0x80, "log_flag_call_back", "Call Back Requested" ),
+])
+LogFileFlagLow = bitfield8("log_file_flag_low", "Log File Flag", [
+ bf_boolean8(0x01, "log_flag_lock_file", "Lock File Immediately" ),
+])
+LoggedObjectID = uint32("logged_object_id", "Logged in Object ID")
+LoggedObjectID.Display("BASE_HEX")
+LoggedCount = uint16("logged_count", "Logged Count")
+LogicalConnectionNumber = uint16("logical_connection_number", "Logical Connection Number", ENC_BIG_ENDIAN)
+LogicalDriveCount = uint8("logical_drive_count", "Logical Drive Count")
+LogicalDriveNumber = uint8("logical_drive_number", "Logical Drive Number")
+LogicalLockThreshold = uint8("logical_lock_threshold", "LogicalLockThreshold")
+LogicalRecordName = nstring8("logical_record_name", "Logical Record Name")
+LoginKey = bytes("login_key", "Login Key", 8)
+LogLockType = uint8("log_lock_type", "Log Lock Type")
+LogTtlRxPkts = uint32("log_ttl_rx_pkts", "Total Received Packets")
+LogTtlTxPkts = uint32("log_ttl_tx_pkts", "Total Transmitted Packets")
+LongName = fw_string("long_name", "Long Name", 32)
+LRUBlockWasDirty = uint16("lru_block_was_dirty", "LRU Block Was Dirty")
+
+MacAttr = bitfield16("mac_attr", "Attributes", [
+ bf_boolean16(0x0001, "mac_attr_smode1", "Search Mode"),
+ bf_boolean16(0x0002, "mac_attr_smode2", "Search Mode"),
+ bf_boolean16(0x0004, "mac_attr_smode3", "Search Mode"),
+ bf_boolean16(0x0010, "mac_attr_transaction", "Transaction"),
+ bf_boolean16(0x0020, "mac_attr_index", "Index"),
+ bf_boolean16(0x0040, "mac_attr_r_audit", "Read Audit"),
+ bf_boolean16(0x0080, "mac_attr_w_audit", "Write Audit"),
+ bf_boolean16(0x0100, "mac_attr_r_only", "Read Only"),
+ bf_boolean16(0x0200, "mac_attr_hidden", "Hidden"),
+ bf_boolean16(0x0400, "mac_attr_system", "System"),
+ bf_boolean16(0x0800, "mac_attr_execute_only", "Execute Only"),
+ bf_boolean16(0x1000, "mac_attr_subdirectory", "Subdirectory"),
+ bf_boolean16(0x2000, "mac_attr_archive", "Archive"),
+ bf_boolean16(0x8000, "mac_attr_share", "Shareable File"),
+])
+MACBackupDate = uint16("mac_backup_date", "Mac Backup Date")
+MACBackupDate.NWDate()
+MACBackupTime = uint16("mac_backup_time", "Mac Backup Time")
+MACBackupTime.NWTime()
+MacBaseDirectoryID = uint32("mac_base_directory_id", "Mac Base Directory ID", ENC_BIG_ENDIAN)
+MacBaseDirectoryID.Display("BASE_HEX")
+MACCreateDate = uint16("mac_create_date", "Mac Create Date")
+MACCreateDate.NWDate()
+MACCreateTime = uint16("mac_create_time", "Mac Create Time")
+MACCreateTime.NWTime()
+MacDestinationBaseID = uint32("mac_destination_base_id", "Mac Destination Base ID")
+MacDestinationBaseID.Display("BASE_HEX")
+MacFinderInfo = bytes("mac_finder_info", "Mac Finder Information", 32)
+MacLastSeenID = uint32("mac_last_seen_id", "Mac Last Seen ID")
+MacLastSeenID.Display("BASE_HEX")
+MacSourceBaseID = uint32("mac_source_base_id", "Mac Source Base ID")
+MacSourceBaseID.Display("BASE_HEX")
+MajorVersion = uint32("major_version", "Major Version")
+MaxBytes = uint16("max_bytes", "Maximum Number of Bytes")
+MaxDataStreams = uint32("max_data_streams", "Maximum Data Streams")
+MaxDirDepth = uint32("max_dir_depth", "Maximum Directory Depth")
+MaximumSpace = uint16("max_space", "Maximum Space")
+MaxNumOfConn = uint32("max_num_of_conn", "Maximum Number of Connections")
+MaxNumOfLANS = uint32("max_num_of_lans", "Maximum Number Of LAN's")
+MaxNumOfMedias = uint32("max_num_of_medias", "Maximum Number Of Media's")
+MaxNumOfNmeSps = uint32("max_num_of_nme_sps", "Maximum Number Of Name Spaces")
+MaxNumOfSpoolPr = uint32("max_num_of_spool_pr", "Maximum Number Of Spool Printers")
+MaxNumOfStacks = uint32("max_num_of_stacks", "Maximum Number Of Stacks")
+MaxNumOfUsers = uint32("max_num_of_users", "Maximum Number Of Users")
+MaxNumOfVol = uint32("max_num_of_vol", "Maximum Number of Volumes")
+MaxReadDataReplySize = uint16("max_read_data_reply_size", "Max Read Data Reply Size")
+MaxSpace = uint32("maxspace", "Maximum Space")
+MaxSpace64 = uint64("maxspace64", "Maximum Space")
+MaxUsedDynamicSpace = uint32("max_used_dynamic_space", "Max Used Dynamic Space")
+MediaList = uint32("media_list", "Media List")
+MediaListCount = uint32("media_list_count", "Media List Count")
+MediaName = nstring8("media_name", "Media Name")
+MediaNumber = uint32("media_number", "Media Number")
+MaxReplyObjectIDCount = uint8("max_reply_obj_id_count", "Max Reply Object ID Count")
+MediaObjectType = val_string8("media_object_type", "Object Type", [
+ [ 0x00, "Adapter" ],
+ [ 0x01, "Changer" ],
+ [ 0x02, "Removable Device" ],
+ [ 0x03, "Device" ],
+ [ 0x04, "Removable Media" ],
+ [ 0x05, "Partition" ],
+ [ 0x06, "Slot" ],
+ [ 0x07, "Hotfix" ],
+ [ 0x08, "Mirror" ],
+ [ 0x09, "Parity" ],
+ [ 0x0a, "Volume Segment" ],
+ [ 0x0b, "Volume" ],
+ [ 0x0c, "Clone" ],
+ [ 0x0d, "Fixed Media" ],
+ [ 0x0e, "Unknown" ],
+])
+MemberName = nstring8("member_name", "Member Name")
+MemberType = val_string16("member_type", "Member Type", [
+ [ 0x0000, "Unknown" ],
+ [ 0x0001, "User" ],
+ [ 0x0002, "User group" ],
+ [ 0x0003, "Print queue" ],
+ [ 0x0004, "NetWare file server" ],
+ [ 0x0005, "Job server" ],
+ [ 0x0006, "Gateway" ],
+ [ 0x0007, "Print server" ],
+ [ 0x0008, "Archive queue" ],
+ [ 0x0009, "Archive server" ],
+ [ 0x000a, "Job queue" ],
+ [ 0x000b, "Administration" ],
+ [ 0x0021, "NAS SNA gateway" ],
+ [ 0x0026, "Remote bridge server" ],
+ [ 0x0027, "TCP/IP gateway" ],
+])
+MessageLanguage = uint32("message_language", "NLM Language")
+MigratedFiles = uint32("migrated_files", "Migrated Files")
+MigratedSectors = uint32("migrated_sectors", "Migrated Sectors")
+MinorVersion = uint32("minor_version", "Minor Version")
+MinSpaceLeft64 = uint64("min_space_left64", "Minimum Space Left")
+Minute = uint8("s_minute", "Minutes")
+MixedModePathFlag = val_string8("mixed_mode_path_flag", "Mixed Mode Path Flag", [
+ [ 0x00, "Mixed mode path handling is not available"],
+ [ 0x01, "Mixed mode path handling is available"],
+])
+ModifiedDate = uint16("modified_date", "Modified Date")
+ModifiedDate.NWDate()
+ModifiedTime = uint16("modified_time", "Modified Time")
+ModifiedTime.NWTime()
+ModifierID = uint32("modifier_id", "Modifier ID", ENC_BIG_ENDIAN)
+ModifierID.Display("BASE_HEX")
+ModifyDOSInfoMask = bitfield16("modify_dos_info_mask", "Modify DOS Info Mask", [
+ bf_boolean16(0x0002, "modify_dos_read", "Attributes"),
+ bf_boolean16(0x0004, "modify_dos_write", "Creation Date"),
+ bf_boolean16(0x0008, "modify_dos_open", "Creation Time"),
+ bf_boolean16(0x0010, "modify_dos_create", "Creator ID"),
+ bf_boolean16(0x0020, "modify_dos_delete", "Archive Date"),
+ bf_boolean16(0x0040, "modify_dos_parent", "Archive Time"),
+ bf_boolean16(0x0080, "modify_dos_search", "Archiver ID"),
+ bf_boolean16(0x0100, "modify_dos_mdate", "Modify Date"),
+ bf_boolean16(0x0200, "modify_dos_mtime", "Modify Time"),
+ bf_boolean16(0x0400, "modify_dos_mid", "Modifier ID"),
+ bf_boolean16(0x0800, "modify_dos_laccess", "Last Access"),
+ bf_boolean16(0x1000, "modify_dos_inheritance", "Inheritance"),
+ bf_boolean16(0x2000, "modify_dos_max_space", "Maximum Space"),
+])
+Month = val_string8("s_month", "Month", [
+ [ 0x01, "January"],
+ [ 0x02, "February"],
+ [ 0x03, "March"],
+ [ 0x04, "April"],
+ [ 0x05, "May"],
+ [ 0x06, "June"],
+ [ 0x07, "July"],
+ [ 0x08, "August"],
+ [ 0x09, "September"],
+ [ 0x0a, "October"],
+ [ 0x0b, "November"],
+ [ 0x0c, "December"],
+])
+
+MoreFlag = val_string8("more_flag", "More Flag", [
+ [ 0x00, "No More Segments/Entries Available" ],
+ [ 0x01, "More Segments/Entries Available" ],
+ [ 0xff, "More Segments/Entries Available" ],
+])
+MoreProperties = val_string8("more_properties", "More Properties", [
+ [ 0x00, "No More Properties Available" ],
+ [ 0x01, "No More Properties Available" ],
+ [ 0xff, "More Properties Available" ],
+])
+
+Name = nstring8("name", "Name")
+Name12 = fw_string("name12", "Name", 12)
+NameLen = uint8("name_len", "Name Space Length")
+NameLength = uint8("name_length", "Name Length")
+NameList = uint32("name_list", "Name List")
+#
+# XXX - should this value be used to interpret the characters in names,
+# search patterns, and the like?
+#
+# We need to handle character sets better, e.g. translating strings
+# from whatever character set they are in the packet (DOS/Windows code
+# pages, ISO character sets, UNIX EUC character sets, UTF-8, UCS-2/Unicode,
+# Mac character sets, etc.) into UCS-4 or UTF-8 and storing them as such
+# in the protocol tree, and displaying them as best we can.
+#
+NameSpace = val_string8("name_space", "Name Space", [
+ [ 0x00, "DOS" ],
+ [ 0x01, "MAC" ],
+ [ 0x02, "NFS" ],
+ [ 0x03, "FTAM" ],
+ [ 0x04, "OS/2, Long" ],
+])
+NamesSpaceInfoMask = bitfield16("ns_info_mask", "Names Space Info Mask", [
+ bf_boolean16(0x0001, "ns_info_mask_modify", "Modify Name"),
+ bf_boolean16(0x0002, "ns_info_mask_fatt", "File Attributes"),
+ bf_boolean16(0x0004, "ns_info_mask_cdate", "Creation Date"),
+ bf_boolean16(0x0008, "ns_info_mask_ctime", "Creation Time"),
+ bf_boolean16(0x0010, "ns_info_mask_owner", "Owner ID"),
+ bf_boolean16(0x0020, "ns_info_mask_adate", "Archive Date"),
+ bf_boolean16(0x0040, "ns_info_mask_atime", "Archive Time"),
+ bf_boolean16(0x0080, "ns_info_mask_aid", "Archiver ID"),
+ bf_boolean16(0x0100, "ns_info_mask_udate", "Update Date"),
+ bf_boolean16(0x0200, "ns_info_mask_utime", "Update Time"),
+ bf_boolean16(0x0400, "ns_info_mask_uid", "Update ID"),
+ bf_boolean16(0x0800, "ns_info_mask_acc_date", "Access Date"),
+ bf_boolean16(0x1000, "ns_info_mask_max_acc_mask", "Inheritance"),
+ bf_boolean16(0x2000, "ns_info_mask_max_space", "Maximum Space"),
+])
+NameSpaceName = nstring8("name_space_name", "Name Space Name")
+nameType = uint32("name_type", "nameType")
+NCPdataSize = uint32("ncp_data_size", "NCP Data Size")
+NCPEncodedStringsBits = uint32("ncp_encoded_strings_bits", "NCP Encoded Strings Bits")
+NCPextensionMajorVersion = uint8("ncp_extension_major_version", "NCP Extension Major Version")
+NCPextensionMinorVersion = uint8("ncp_extension_minor_version", "NCP Extension Minor Version")
+NCPextensionName = nstring8("ncp_extension_name", "NCP Extension Name")
+NCPextensionNumber = uint32("ncp_extension_number", "NCP Extension Number")
+NCPextensionNumber.Display("BASE_HEX")
+NCPExtensionNumbers = uint32("ncp_extension_numbers", "NCP Extension Numbers")
+NCPextensionRevisionNumber = uint8("ncp_extension_revision_number", "NCP Extension Revision Number")
+NCPPeakStaInUse = uint32("ncp_peak_sta_in_use", "Peak Number of Connections since Server was brought up")
+NCPStaInUseCnt = uint32("ncp_sta_in_use", "Number of Workstations Connected to Server")
+NDSRequestFlags = bitfield16("nds_request_flags", "NDS Request Flags", [
+ bf_boolean16(0x0001, "nds_request_flags_output", "Output Fields"),
+ bf_boolean16(0x0002, "nds_request_flags_no_such_entry", "No Such Entry"),
+ bf_boolean16(0x0004, "nds_request_flags_local_entry", "Local Entry"),
+ bf_boolean16(0x0008, "nds_request_flags_type_ref", "Type Referral"),
+ bf_boolean16(0x0010, "nds_request_flags_alias_ref", "Alias Referral"),
+ bf_boolean16(0x0020, "nds_request_flags_req_cnt", "Request Count"),
+ bf_boolean16(0x0040, "nds_request_flags_req_data_size", "Request Data Size"),
+ bf_boolean16(0x0080, "nds_request_flags_reply_data_size", "Reply Data Size"),
+ bf_boolean16(0x0100, "nds_request_flags_trans_ref", "Transport Referral"),
+ bf_boolean16(0x0200, "nds_request_flags_trans_ref2", "Transport Referral"),
+ bf_boolean16(0x0400, "nds_request_flags_up_ref", "Up Referral"),
+ bf_boolean16(0x0800, "nds_request_flags_dn_ref", "Down Referral"),
+])
+NDSStatus = uint32("nds_status", "NDS Status")
+NetBIOSBroadcastWasPropagated = uint32("netbios_broadcast_was_propagated", "NetBIOS Broadcast Was Propagated")
+NetIDNumber = uint32("net_id_number", "Net ID Number")
+NetIDNumber.Display("BASE_HEX")
+NetAddress = nbytes32("address", "Address")
+NetStatus = uint16("net_status", "Network Status")
+NetWareAccessHandle = bytes("netware_access_handle", "NetWare Access Handle", 6)
+NetworkAddress = uint32("network_address", "Network Address")
+NetworkAddress.Display("BASE_HEX")
+NetworkNodeAddress = bytes("network_node_address", "Network Node Address", 6)
+NetworkNumber = uint32("network_number", "Network Number")
+NetworkNumber.Display("BASE_HEX")
+#
+# XXX - this should have the "ipx_socket_vals" value_string table
+# from "packet-ipx.c".
+#
+NetworkSocket = uint16("network_socket", "Network Socket")
+NetworkSocket.Display("BASE_HEX")
+NewAccessRights = bitfield16("new_access_rights_mask", "New Access Rights", [
+ bf_boolean16(0x0001, "new_access_rights_read", "Read"),
+ bf_boolean16(0x0002, "new_access_rights_write", "Write"),
+ bf_boolean16(0x0004, "new_access_rights_open", "Open"),
+ bf_boolean16(0x0008, "new_access_rights_create", "Create"),
+ bf_boolean16(0x0010, "new_access_rights_delete", "Delete"),
+ bf_boolean16(0x0020, "new_access_rights_parental", "Parental"),
+ bf_boolean16(0x0040, "new_access_rights_search", "Search"),
+ bf_boolean16(0x0080, "new_access_rights_modify", "Modify"),
+ bf_boolean16(0x0100, "new_access_rights_supervisor", "Supervisor"),
+])
+NewDirectoryID = uint32("new_directory_id", "New Directory ID", ENC_BIG_ENDIAN)
+NewDirectoryID.Display("BASE_HEX")
+NewEAHandle = uint32("new_ea_handle", "New EA Handle")
+NewEAHandle.Display("BASE_HEX")
+NewFileName = fw_string("new_file_name", "New File Name", 14)
+NewFileNameLen = nstring8("new_file_name_len", "New File Name")
+NewFileSize = uint32("new_file_size", "New File Size")
+NewPassword = nstring8("new_password", "New Password")
+NewPath = nstring8("new_path", "New Path")
+NewPosition = uint8("new_position", "New Position")
+NewObjectName = nstring8("new_object_name", "New Object Name")
+NextCntBlock = uint32("next_cnt_block", "Next Count Block")
+NextHugeStateInfo = bytes("next_huge_state_info", "Next Huge State Info", 16)
+nextLimbScanNum = uint32("next_limb_scan_num", "Next Limb Scan Number")
+NextObjectID = uint32("next_object_id", "Next Object ID", ENC_BIG_ENDIAN)
+NextObjectID.Display("BASE_HEX")
+NextRecord = uint32("next_record", "Next Record")
+NextRequestRecord = uint16("next_request_record", "Next Request Record")
+NextSearchIndex = uint16("next_search_index", "Next Search Index")
+NextSearchNumber = uint16("next_search_number", "Next Search Number")
+NextSearchNum = uint32("nxt_search_num", "Next Search Number")
+nextStartingNumber = uint32("next_starting_number", "Next Starting Number")
+NextTrusteeEntry = uint32("next_trustee_entry", "Next Trustee Entry")
+NextVolumeNumber = uint32("next_volume_number", "Next Volume Number")
+NLMBuffer = nstring8("nlm_buffer", "Buffer")
+NLMcount = uint32("nlm_count", "NLM Count")
+NLMFlags = bitfield8("nlm_flags", "Flags", [
+ bf_boolean8(0x01, "nlm_flags_reentrant", "ReEntrant"),
+ bf_boolean8(0x02, "nlm_flags_multiple", "Can Load Multiple Times"),
+ bf_boolean8(0x04, "nlm_flags_synchronize", "Synchronize Start"),
+ bf_boolean8(0x08, "nlm_flags_pseudo", "PseudoPreemption"),
+])
+NLMLoadOptions = uint32("nlm_load_options", "NLM Load Options")
+NLMName = stringz("nlm_name_stringz", "NLM Name")
+NLMNumber = uint32("nlm_number", "NLM Number")
+NLMNumbers = uint32("nlm_numbers", "NLM Numbers")
+NLMsInList = uint32("nlms_in_list", "NLM's in List")
+NLMStartNumber = uint32("nlm_start_num", "NLM Start Number")
+NLMType = val_string8("nlm_type", "NLM Type", [
+ [ 0x00, "Generic NLM (.NLM)" ],
+ [ 0x01, "LAN Driver (.LAN)" ],
+ [ 0x02, "Disk Driver (.DSK)" ],
+ [ 0x03, "Name Space Support Module (.NAM)" ],
+ [ 0x04, "Utility or Support Program (.NLM)" ],
+ [ 0x05, "Mirrored Server Link (.MSL)" ],
+ [ 0x06, "OS NLM (.NLM)" ],
+ [ 0x07, "Paged High OS NLM (.NLM)" ],
+ [ 0x08, "Host Adapter Module (.HAM)" ],
+ [ 0x09, "Custom Device Module (.CDM)" ],
+ [ 0x0a, "File System Engine (.NLM)" ],
+ [ 0x0b, "Real Mode NLM (.NLM)" ],
+ [ 0x0c, "Hidden NLM (.NLM)" ],
+ [ 0x15, "NICI Support (.NLM)" ],
+ [ 0x16, "NICI Support (.NLM)" ],
+ [ 0x17, "Cryptography (.NLM)" ],
+ [ 0x18, "Encryption (.NLM)" ],
+ [ 0x19, "NICI Support (.NLM)" ],
+ [ 0x1c, "NICI Support (.NLM)" ],
+])
+nodeFlags = uint32("node_flags", "Node Flags")
+nodeFlags.Display("BASE_HEX")
+NoMoreMemAvlCnt = uint32("no_more_mem_avail", "No More Memory Available Count")
+NonDedFlag = boolean8("non_ded_flag", "Non Dedicated Flag")
+NonFreeableAvailableSubAllocSectors = uint32("non_freeable_avail_sub_alloc_sectors", "Non Freeable Available Sub Alloc Sectors")
+NonFreeableLimboSectors = uint32("non_freeable_limbo_sectors", "Non Freeable Limbo Sectors")
+NotUsableSubAllocSectors = uint32("not_usable_sub_alloc_sectors", "Not Usable Sub Alloc Sectors")
+NotYetPurgeableBlocks = uint32("not_yet_purgeable_blocks", "Not Yet Purgeable Blocks")
+NSInfoBitMask = uint32("ns_info_bit_mask", "Name Space Info Bit Mask")
+NSSOAllInFlags = bitfield32("nsso_all_in_flags", "SecretStore All Input Flags",[
+ bf_boolean32(0x00000010, "nsso_all_unicode", "Unicode Data"),
+ bf_boolean32(0x00000080, "nsso_set_tree", "Set Tree"),
+ bf_boolean32(0x00000200, "nsso_destroy_ctx", "Destroy Context"),
+])
+NSSOGetServiceInFlags = bitfield32("nsso_get_svc_in_flags", "SecretStore Get Service Flags",[
+ bf_boolean32(0x00000100, "nsso_get_ctx", "Get Context"),
+])
+NSSOReadInFlags = bitfield32("nsso_read_in_flags", "SecretStore Read Flags",[
+ bf_boolean32(0x00000001, "nsso_rw_enh_prot", "Read/Write Enhanced Protection"),
+ bf_boolean32(0x00000008, "nsso_repair", "Repair SecretStore"),
+])
+NSSOReadOrUnlockInFlags = bitfield32("nsso_read_or_unlock_in_flags", "SecretStore Read or Unlock Flags",[
+ bf_boolean32(0x00000004, "nsso_ep_master_pwd", "Master Password used instead of ENH Password"),
+])
+NSSOUnlockInFlags = bitfield32("nsso_unlock_in_flags", "SecretStore Unlock Flags",[
+ bf_boolean32(0x00000004, "nsso_rmv_lock", "Remove Lock from Store"),
+])
+NSSOWriteInFlags = bitfield32("nsso_write_in_flags", "SecretStore Write Flags",[
+ bf_boolean32(0x00000001, "nsso_enh_prot", "Enhanced Protection"),
+ bf_boolean32(0x00000002, "nsso_create_id", "Create ID"),
+ bf_boolean32(0x00000040, "nsso_ep_pwd_used", "Enhanced Protection Password Used"),
+])
+NSSOContextOutFlags = bitfield32("nsso_cts_out_flags", "Type of Context",[
+ bf_boolean32(0x00000001, "nsso_ds_ctx", "DSAPI Context"),
+ bf_boolean32(0x00000080, "nsso_ldap_ctx", "LDAP Context"),
+ bf_boolean32(0x00000200, "nsso_dc_ctx", "Reserved"),
+])
+NSSOGetServiceOutFlags = bitfield32("nsso_get_svc_out_flags", "SecretStore Status Flags",[
+ bf_boolean32(0x00400000, "nsso_mstr_pwd", "Master Password Present"),
+])
+NSSOGetServiceReadOutFlags = bitfield32("nsso_get_svc_read_out_flags", "SecretStore Status Flags",[
+ bf_boolean32(0x00800000, "nsso_mp_disabled", "Master Password Disabled"),
+])
+NSSOReadOutFlags = bitfield32("nsso_read_out_flags", "SecretStore Read Flags",[
+ bf_boolean32(0x00010000, "nsso_secret_locked", "Enhanced Protection Lock on Secret"),
+ bf_boolean32(0x00020000, "nsso_secret_not_init", "Secret Not Yet Initialized"),
+ bf_boolean32(0x00040000, "nsso_secret_marked", "Secret Marked for Enhanced Protection"),
+ bf_boolean32(0x00080000, "nsso_secret_not_sync", "Secret Not Yet Synchronized in NDS"),
+ bf_boolean32(0x00200000, "nsso_secret_enh_pwd", "Enhanced Protection Password on Secret"),
+])
+NSSOReadOutStatFlags = bitfield32("nsso_read_out_stat_flags", "SecretStore Read Status Flags",[
+ bf_boolean32(0x00100000, "nsso_admin_mod", "Admin Modified Secret Last"),
+])
+NSSOVerb = val_string8("nsso_verb", "SecretStore Verb", [
+ [ 0x00, "Query Server" ],
+ [ 0x01, "Read App Secrets" ],
+ [ 0x02, "Write App Secrets" ],
+ [ 0x03, "Add Secret ID" ],
+ [ 0x04, "Remove Secret ID" ],
+ [ 0x05, "Remove SecretStore" ],
+ [ 0x06, "Enumerate SecretID's" ],
+ [ 0x07, "Unlock Store" ],
+ [ 0x08, "Set Master Password" ],
+ [ 0x09, "Get Service Information" ],
+])
+NSSpecificInfo = fw_string("ns_specific_info", "Name Space Specific Info", 512)
+NumberOfActiveTasks = uint8("num_of_active_tasks", "Number of Active Tasks")
+NumberOfAllocs = uint32("num_of_allocs", "Number of Allocations")
+NumberOfCPUs = uint32("number_of_cpus", "Number of CPU's")
+NumberOfDataStreams = uint16("number_of_data_streams", "Number of Data Streams")
+NumberOfDataStreamsLong = uint32("number_of_data_streams_long", "Number of Data Streams")
+NumberOfDynamicMemoryAreas = uint16("number_of_dynamic_memory_areas", "Number Of Dynamic Memory Areas")
+NumberOfEntries = uint8("number_of_entries", "Number of Entries")
+NumberOfEntriesLong = uint32("number_of_entries_long", "Number of Entries")
+NumberOfLocks = uint8("number_of_locks", "Number of Locks")
+NumberOfMinutesToDelay = uint32("number_of_minutes_to_delay", "Number of Minutes to Delay")
+NumberOfNCPExtensions = uint32("number_of_ncp_extensions", "Number Of NCP Extensions")
+NumberOfNSLoaded = uint16("number_of_ns_loaded", "Number Of Name Spaces Loaded")
+NumberOfProtocols = uint8("number_of_protocols", "Number of Protocols")
+NumberOfRecords = uint16("number_of_records", "Number of Records")
+NumberOfReferencedPublics = uint32("num_of_ref_publics", "Number of Referenced Public Symbols")
+NumberOfSemaphores = uint16("number_of_semaphores", "Number Of Semaphores")
+NumberOfServiceProcesses = uint8("number_of_service_processes", "Number Of Service Processes")
+NumberOfSetCategories = uint32("number_of_set_categories", "Number Of Set Categories")
+NumberOfSMs = uint32("number_of_sms", "Number Of Storage Medias")
+NumberOfStations = uint8("number_of_stations", "Number of Stations")
+NumBytes = uint16("num_bytes", "Number of Bytes")
+NumBytesLong = uint32("num_bytes_long", "Number of Bytes")
+NumOfCCinPkt = uint32("num_of_cc_in_pkt", "Number of Custom Counters in Packet")
+NumOfChecks = uint32("num_of_checks", "Number of Checks")
+NumOfEntries = uint32("num_of_entries", "Number of Entries")
+NumOfFilesMigrated = uint32("num_of_files_migrated", "Number Of Files Migrated")
+NumOfGarbageColl = uint32("num_of_garb_coll", "Number of Garbage Collections")
+NumOfNCPReqs = uint32("num_of_ncp_reqs", "Number of NCP Requests since Server was brought up")
+NumOfSegments = uint32("num_of_segments", "Number of Segments")
+
+ObjectCount = uint32("object_count", "Object Count")
+ObjectFlags = val_string8("object_flags", "Object Flags", [
+ [ 0x00, "Dynamic object" ],
+ [ 0x01, "Static object" ],
+])
+ObjectHasProperties = val_string8("object_has_properites", "Object Has Properties", [
+ [ 0x00, "No properties" ],
+ [ 0xff, "One or more properties" ],
+])
+ObjectID = uint32("object_id", "Object ID", ENC_BIG_ENDIAN)
+ObjectID.Display('BASE_HEX')
+ObjectIDCount = uint16("object_id_count", "Object ID Count")
+ObjectIDInfo = uint32("object_id_info", "Object Information")
+ObjectInfoReturnCount = uint32("object_info_rtn_count", "Object Information Count")
+ObjectName = nstring8("object_name", "Object Name")
+ObjectNameLen = fw_string("object_name_len", "Object Name", 48)
+ObjectNameStringz = stringz("object_name_stringz", "Object Name")
+ObjectNumber = uint32("object_number", "Object Number")
+ObjectSecurity = val_string8("object_security", "Object Security", [
+ [ 0x00, "Object Read (Anyone) / Object Write (Anyone)" ],
+ [ 0x01, "Object Read (Logged in) / Object Write (Anyone)" ],
+ [ 0x02, "Object Read (Logged in as Object) / Object Write (Anyone)" ],
+ [ 0x03, "Object Read (Supervisor) / Object Write (Anyone)" ],
+ [ 0x04, "Object Read (Operating System Only) / Object Write (Anyone)" ],
+ [ 0x10, "Object Read (Anyone) / Object Write (Logged in)" ],
+ [ 0x11, "Object Read (Logged in) / Object Write (Logged in)" ],
+ [ 0x12, "Object Read (Logged in as Object) / Object Write (Logged in)" ],
+ [ 0x13, "Object Read (Supervisor) / Object Write (Logged in)" ],
+ [ 0x14, "Object Read (Operating System Only) / Object Write (Logged in)" ],
+ [ 0x20, "Object Read (Anyone) / Object Write (Logged in as Object)" ],
+ [ 0x21, "Object Read (Logged in) / Object Write (Logged in as Object)" ],
+ [ 0x22, "Object Read (Logged in as Object) / Object Write (Logged in as Object)" ],
+ [ 0x23, "Object Read (Supervisor) / Object Write (Logged in as Object)" ],
+ [ 0x24, "Object Read (Operating System Only) / Object Write (Logged in as Object)" ],
+ [ 0x30, "Object Read (Anyone) / Object Write (Supervisor)" ],
+ [ 0x31, "Object Read (Logged in) / Object Write (Supervisor)" ],
+ [ 0x32, "Object Read (Logged in as Object) / Object Write (Supervisor)" ],
+ [ 0x33, "Object Read (Supervisor) / Object Write (Supervisor)" ],
+ [ 0x34, "Object Read (Operating System Only) / Object Write (Supervisor)" ],
+ [ 0x40, "Object Read (Anyone) / Object Write (Operating System Only)" ],
+ [ 0x41, "Object Read (Logged in) / Object Write (Operating System Only)" ],
+ [ 0x42, "Object Read (Logged in as Object) / Object Write (Operating System Only)" ],
+ [ 0x43, "Object Read (Supervisor) / Object Write (Operating System Only)" ],
+ [ 0x44, "Object Read (Operating System Only) / Object Write (Operating System Only)" ],
+])
+#
+# XXX - should this use the "server_vals[]" value_string array from
+# "packet-ipx.c"?
+#
+# XXX - should this list be merged with that list? There are some
+# oddities, e.g. this list has 0x03f5 for "Microsoft SQL Server", but
+# the list from "packet-ipx.c" has 0xf503 for that - is that just
+# byte-order confusion?
+#
+ObjectType = val_string16("object_type", "Object Type", [
+ [ 0x0000, "Unknown" ],
+ [ 0x0001, "User" ],
+ [ 0x0002, "User group" ],
+ [ 0x0003, "Print queue" ],
+ [ 0x0004, "NetWare file server" ],
+ [ 0x0005, "Job server" ],
+ [ 0x0006, "Gateway" ],
+ [ 0x0007, "Print server" ],
+ [ 0x0008, "Archive queue" ],
+ [ 0x0009, "Archive server" ],
+ [ 0x000a, "Job queue" ],
+ [ 0x000b, "Administration" ],
+ [ 0x0021, "NAS SNA gateway" ],
+ [ 0x0026, "Remote bridge server" ],
+ [ 0x0027, "TCP/IP gateway" ],
+ [ 0x0047, "Novell Print Server" ],
+ [ 0x004b, "Btrieve Server" ],
+ [ 0x004c, "NetWare SQL Server" ],
+ [ 0x0064, "ARCserve" ],
+ [ 0x0066, "ARCserve 3.0" ],
+ [ 0x0076, "NetWare SQL" ],
+ [ 0x00a0, "Gupta SQL Base Server" ],
+ [ 0x00a1, "Powerchute" ],
+ [ 0x0107, "NetWare Remote Console" ],
+ [ 0x01cb, "Shiva NetModem/E" ],
+ [ 0x01cc, "Shiva LanRover/E" ],
+ [ 0x01cd, "Shiva LanRover/T" ],
+ [ 0x01d8, "Castelle FAXPress Server" ],
+ [ 0x01da, "Castelle Print Server" ],
+ [ 0x01dc, "Castelle Fax Server" ],
+ [ 0x0200, "Novell SQL Server" ],
+ [ 0x023a, "NetWare Lanalyzer Agent" ],
+ [ 0x023c, "DOS Target Service Agent" ],
+ [ 0x023f, "NetWare Server Target Service Agent" ],
+ [ 0x024f, "Appletalk Remote Access Service" ],
+ [ 0x0263, "NetWare Management Agent" ],
+ [ 0x0264, "Global MHS" ],
+ [ 0x0265, "SNMP" ],
+ [ 0x026a, "NetWare Management/NMS Console" ],
+ [ 0x026b, "NetWare Time Synchronization" ],
+ [ 0x0273, "Nest Device" ],
+ [ 0x0274, "GroupWise Message Multiple Servers" ],
+ [ 0x0278, "NDS Replica Server" ],
+ [ 0x0282, "NDPS Service Registry Service" ],
+ [ 0x028a, "MPR/IPX Address Mapping Gateway" ],
+ [ 0x028b, "ManageWise" ],
+ [ 0x0293, "NetWare 6" ],
+ [ 0x030c, "HP JetDirect" ],
+ [ 0x0328, "Watcom SQL Server" ],
+ [ 0x0355, "Backup Exec" ],
+ [ 0x039b, "Lotus Notes" ],
+ [ 0x03e1, "Univel Server" ],
+ [ 0x03f5, "Microsoft SQL Server" ],
+ [ 0x055e, "Lexmark Print Server" ],
+ [ 0x0640, "Microsoft Gateway Services for NetWare" ],
+ [ 0x064e, "Microsoft Internet Information Server" ],
+ [ 0x077b, "Advantage Database Server" ],
+ [ 0x07a7, "Backup Exec Job Queue" ],
+ [ 0x07a8, "Backup Exec Job Manager" ],
+ [ 0x07a9, "Backup Exec Job Service" ],
+ [ 0x5555, "Site Lock" ],
+ [ 0x8202, "NDPS Broker" ],
+])
+OCRetFlags = val_string8("o_c_ret_flags", "Open Create Return Flags", [
+ [ 0x00, "No CallBack has been registered (No Op-Lock)" ],
+ [ 0x01, "Request has been registered for CallBack (Op-Lock)" ],
+])
+OESServer = val_string8("oes_server", "Type of Novell Server", [
+ [ 0x00, "NetWare" ],
+ [ 0x01, "OES" ],
+ [ 0x02, "OES 64bit" ],
+])
+
+OESLinuxOrNetWare = val_string8("oeslinux_or_netware", "Kernel Type", [
+ [ 0x00, "NetWare" ],
+ [ 0x01, "Linux" ],
+])
+
+OldestDeletedFileAgeInTicks = uint32("oldest_deleted_file_age_in_ticks", "Oldest Deleted File Age in Ticks")
+OldFileName = bytes("old_file_name", "Old File Name", 15)
+OldFileSize = uint32("old_file_size", "Old File Size")
+OpenCount = uint16("open_count", "Open Count")
+OpenCreateAction = bitfield8("open_create_action", "Open Create Action", [
+ bf_boolean8(0x01, "open_create_action_opened", "Opened"),
+ bf_boolean8(0x02, "open_create_action_created", "Created"),
+ bf_boolean8(0x04, "open_create_action_replaced", "Replaced"),
+ bf_boolean8(0x08, "open_create_action_compressed", "Compressed"),
+ bf_boolean8(0x80, "open_create_action_read_only", "Read Only"),
+])
+OpenCreateMode = bitfield8("open_create_mode", "Open Create Mode", [
+ bf_boolean8(0x01, "open_create_mode_open", "Open existing file (file must exist)"),
+ bf_boolean8(0x02, "open_create_mode_replace", "Replace existing file"),
+ bf_boolean8(0x08, "open_create_mode_create", "Create new file or subdirectory (file or subdirectory cannot exist)"),
+ bf_boolean8(0x20, "open_create_mode_64bit", "Open 64-bit Access"),
+ bf_boolean8(0x40, "open_create_mode_ro", "Open with Read Only Access"),
+ bf_boolean8(0x80, "open_create_mode_oplock", "Open Callback (Op-Lock)"),
+])
+OpenForReadCount = uint16("open_for_read_count", "Open For Read Count")
+OpenForWriteCount = uint16("open_for_write_count", "Open For Write Count")
+OpenRights = bitfield8("open_rights", "Open Rights", [
+ bf_boolean8(0x01, "open_rights_read_only", "Read Only"),
+ bf_boolean8(0x02, "open_rights_write_only", "Write Only"),
+ bf_boolean8(0x04, "open_rights_deny_read", "Deny Read"),
+ bf_boolean8(0x08, "open_rights_deny_write", "Deny Write"),
+ bf_boolean8(0x10, "open_rights_compat", "Compatibility"),
+ bf_boolean8(0x40, "open_rights_write_thru", "File Write Through"),
+])
+OptionNumber = uint8("option_number", "Option Number")
+originalSize = uint32("original_size", "Original Size")
+OSLanguageID = uint8("os_language_id", "OS Language ID")
+OSMajorVersion = uint8("os_major_version", "OS Major Version")
+OSMinorVersion = uint8("os_minor_version", "OS Minor Version")
+OSRevision = uint32("os_revision", "OS Revision")
+OtherFileForkSize = uint32("other_file_fork_size", "Other File Fork Size")
+OtherFileForkFAT = uint32("other_file_fork_fat", "Other File Fork FAT Entry")
+OutgoingPacketDiscardedNoTurboBuffer = uint16("outgoing_packet_discarded_no_turbo_buffer", "Outgoing Packet Discarded No Turbo Buffer")
+
+PacketsDiscardedByHopCount = uint16("packets_discarded_by_hop_count", "Packets Discarded By Hop Count")
+PacketsDiscardedUnknownNet = uint16("packets_discarded_unknown_net", "Packets Discarded Unknown Net")
+PacketsFromInvalidConnection = uint16("packets_from_invalid_connection", "Packets From Invalid Connection")
+PacketsReceivedDuringProcessing = uint16("packets_received_during_processing", "Packets Received During Processing")
+PacketsWithBadRequestType = uint16("packets_with_bad_request_type", "Packets With Bad Request Type")
+PacketsWithBadSequenceNumber = uint16("packets_with_bad_sequence_number", "Packets With Bad Sequence Number")
+PageTableOwnerFlag = uint32("page_table_owner_flag", "Page Table Owner")
+ParentID = uint32("parent_id", "Parent ID")
+ParentID.Display("BASE_HEX")
+ParentBaseID = uint32("parent_base_id", "Parent Base ID")
+ParentBaseID.Display("BASE_HEX")
+ParentDirectoryBase = uint32("parent_directory_base", "Parent Directory Base")
+ParentDOSDirectoryBase = uint32("parent_dos_directory_base", "Parent DOS Directory Base")
+ParentObjectNumber = uint32("parent_object_number", "Parent Object Number")
+ParentObjectNumber.Display("BASE_HEX")
+Password = nstring8("password", "Password")
+PathBase = uint8("path_base", "Path Base")
+PathComponentCount = uint16("path_component_count", "Path Component Count")
+PathComponentSize = uint16("path_component_size", "Path Component Size")
+PathCookieFlags = val_string16("path_cookie_flags", "Path Cookie Flags", [
+ [ 0x0000, "Last component is Not a File Name" ],
+ [ 0x0001, "Last component is a File Name" ],
+])
+PathCount = uint8("path_count", "Path Count")
+#
+# XXX - in at least some File Search Continue requests, the string
+# length value is longer than the string, and there's a NUL, followed
+# by other non-zero cruft, in the string. Should this be an
+# "nstringz8", with FT_UINT_STRINGZPAD added to support it? And
+# does that apply to any other values?
+#
+Path = nstring8("path", "Path")
+Path16 = nstring16("path16", "Path")
+PathAndName = stringz("path_and_name", "Path and Name")
+PendingIOCommands = uint16("pending_io_commands", "Pending IO Commands")
+PhysicalDiskNumber = uint8("physical_disk_number", "Physical Disk Number")
+PhysicalDriveCount = uint8("physical_drive_count", "Physical Drive Count")
+PhysicalLockThreshold = uint8("physical_lock_threshold", "Physical Lock Threshold")
+PingVersion = uint16("ping_version", "Ping Version")
+PoolName = stringz("pool_name", "Pool Name")
+PositiveAcknowledgesSent = uint16("positive_acknowledges_sent", "Positive Acknowledges Sent")
+PreCompressedSectors = uint32("pre_compressed_sectors", "Precompressed Sectors")
+PreviousRecord = uint32("previous_record", "Previous Record")
+PrimaryEntry = uint32("primary_entry", "Primary Entry")
+PrintFlags = bitfield8("print_flags", "Print Flags", [
+ bf_boolean8(0x08, "print_flags_ff", "Suppress Form Feeds"),
+ bf_boolean8(0x10, "print_flags_cr", "Create"),
+ bf_boolean8(0x20, "print_flags_del_spool", "Delete Spool File after Printing"),
+ bf_boolean8(0x40, "print_flags_exp_tabs", "Expand Tabs in the File"),
+ bf_boolean8(0x80, "print_flags_banner", "Print Banner Page"),
+])
+PrinterHalted = val_string8("printer_halted", "Printer Halted", [
+ [ 0x00, "Printer is not Halted" ],
+ [ 0xff, "Printer is Halted" ],
+])
+PrinterOffLine = val_string8( "printer_offline", "Printer Off-Line", [
+ [ 0x00, "Printer is On-Line" ],
+ [ 0xff, "Printer is Off-Line" ],
+])
+PrintServerVersion = uint8("print_server_version", "Print Server Version")
+Priority = uint32("priority", "Priority")
+Privileges = uint32("privileges", "Login Privileges")
+ProcessorType = val_string8("processor_type", "Processor Type", [
+ [ 0x00, "Motorola 68000" ],
+ [ 0x01, "Intel 8088 or 8086" ],
+ [ 0x02, "Intel 80286" ],
+])
+ProDOSInfo = bytes("pro_dos_info", "Pro DOS Info", 6)
+ProductMajorVersion = uint16("product_major_version", "Product Major Version")
+ProductMinorVersion = uint16("product_minor_version", "Product Minor Version")
+ProductRevisionVersion = uint8("product_revision_version", "Product Revision Version")
+projectedCompSize = uint32("projected_comp_size", "Projected Compression Size")
+PropertyHasMoreSegments = val_string8("property_has_more_segments",
+ "Property Has More Segments", [
+ [ 0x00, "Is last segment" ],
+ [ 0xff, "More segments are available" ],
+])
+PropertyName = nstring8("property_name", "Property Name")
+PropertyName16 = fw_string("property_name_16", "Property Name", 16)
+PropertyData = bytes("property_data", "Property Data", 128)
+PropertySegment = uint8("property_segment", "Property Segment")
+PropertyType = val_string8("property_type", "Property Type", [
+ [ 0x00, "Display Static property" ],
+ [ 0x01, "Display Dynamic property" ],
+ [ 0x02, "Set Static property" ],
+ [ 0x03, "Set Dynamic property" ],
+])
+PropertyValue = fw_string("property_value", "Property Value", 128)
+ProposedMaxSize = uint16("proposed_max_size", "Proposed Max Size")
+ProposedMaxSize64 = uint64("proposed_max_size64", "Proposed Max Size")
+protocolFlags = uint32("protocol_flags", "Protocol Flags")
+protocolFlags.Display("BASE_HEX")
+PurgeableBlocks = uint32("purgeable_blocks", "Purgeable Blocks")
+PurgeCcode = uint32("purge_c_code", "Purge Completion Code")
+PurgeCount = uint32("purge_count", "Purge Count")
+PurgeFlags = val_string16("purge_flags", "Purge Flags", [
+ [ 0x0000, "Do not Purge All" ],
+ [ 0x0001, "Purge All" ],
+ [ 0xffff, "Do not Purge All" ],
+])
+PurgeList = uint32("purge_list", "Purge List")
+PhysicalDiskChannel = uint8("physical_disk_channel", "Physical Disk Channel")
+PhysicalDriveType = val_string8("physical_drive_type", "Physical Drive Type", [
+ [ 0x01, "XT" ],
+ [ 0x02, "AT" ],
+ [ 0x03, "SCSI" ],
+ [ 0x04, "Disk Coprocessor" ],
+ [ 0x05, "PS/2 with MFM Controller" ],
+ [ 0x06, "PS/2 with ESDI Controller" ],
+ [ 0x07, "Convergent Technology SBIC" ],
+])
+PhysicalReadErrors = uint16("physical_read_errors", "Physical Read Errors")
+PhysicalReadRequests = uint32("physical_read_requests", "Physical Read Requests")
+PhysicalWriteErrors = uint16("physical_write_errors", "Physical Write Errors")
+PhysicalWriteRequests = uint32("physical_write_requests", "Physical Write Requests")
+PrintToFileFlag = boolean8("print_to_file_flag", "Print to File Flag")
+
+QueueID = uint32("queue_id", "Queue ID")
+QueueID.Display("BASE_HEX")
+QueueName = nstring8("queue_name", "Queue Name")
+QueueStartPosition = uint32("queue_start_position", "Queue Start Position")
+QueueStatus = bitfield8("queue_status", "Queue Status", [
+ bf_boolean8(0x01, "queue_status_new_jobs", "Operator does not want to add jobs to the queue"),
+ bf_boolean8(0x02, "queue_status_pserver", "Operator does not want additional servers attaching"),
+ bf_boolean8(0x04, "queue_status_svc_jobs", "Operator does not want servers to service jobs"),
+])
+QueueType = uint16("queue_type", "Queue Type")
+QueueingVersion = uint8("qms_version", "QMS Version")
+
+ReadBeyondWrite = uint16("read_beyond_write", "Read Beyond Write")
+RecordLockCount = uint16("rec_lock_count", "Record Lock Count")
+RecordStart = uint32("record_start", "Record Start")
+RecordEnd = uint32("record_end", "Record End")
+RecordInUseFlag = val_string16("record_in_use", "Record in Use", [
+ [ 0x0000, "Record In Use" ],
+ [ 0xffff, "Record Not In Use" ],
+])
+RedirectedPrinter = uint8( "redirected_printer", "Redirected Printer" )
+ReferenceCount = uint32("reference_count", "Reference Count")
+RelationsCount = uint16("relations_count", "Relations Count")
+ReMirrorCurrentOffset = uint32("re_mirror_current_offset", "ReMirror Current Offset")
+ReMirrorDriveNumber = uint8("re_mirror_drive_number", "ReMirror Drive Number")
+RemoteMaxPacketSize = uint32("remote_max_packet_size", "Remote Max Packet Size")
+RemoteTargetID = uint32("remote_target_id", "Remote Target ID")
+RemoteTargetID.Display("BASE_HEX")
+RemovableFlag = uint16("removable_flag", "Removable Flag")
+RemoveOpenRights = bitfield8("remove_open_rights", "Remove Open Rights", [
+ bf_boolean8(0x01, "remove_open_rights_ro", "Read Only"),
+ bf_boolean8(0x02, "remove_open_rights_wo", "Write Only"),
+ bf_boolean8(0x04, "remove_open_rights_dr", "Deny Read"),
+ bf_boolean8(0x08, "remove_open_rights_dw", "Deny Write"),
+ bf_boolean8(0x10, "remove_open_rights_comp", "Compatibility"),
+ bf_boolean8(0x40, "remove_open_rights_write_thru", "Write Through"),
+])
+RenameFlag = bitfield8("rename_flag", "Rename Flag", [
+ bf_boolean8(0x01, "rename_flag_ren", "Rename to Myself allows file to be renamed to its original name"),
+ bf_boolean8(0x02, "rename_flag_comp", "Compatibility allows files that are marked read only to be opened with read/write access"),
+ bf_boolean8(0x04, "rename_flag_no", "Name Only renames only the specified name space entry name"),
+])
+RepliesCancelled = uint16("replies_cancelled", "Replies Cancelled")
+ReplyBuffer = nstring8("reply_buffer", "Reply Buffer")
+ReplyBufferSize = uint32("reply_buffer_size", "Reply Buffer Size")
+ReplyQueueJobNumbers = uint32("reply_queue_job_numbers", "Reply Queue Job Numbers")
+RequestBitMap = bitfield16("request_bit_map", "Request Bit Map", [
+ bf_boolean16(0x0001, "request_bit_map_ret_afp_ent", "AFP Entry ID"),
+ bf_boolean16(0x0002, "request_bit_map_ret_data_fork", "Data Fork Length"),
+ bf_boolean16(0x0004, "request_bit_map_ret_res_fork", "Resource Fork Length"),
+ bf_boolean16(0x0008, "request_bit_map_ret_num_off", "Number of Offspring"),
+ bf_boolean16(0x0010, "request_bit_map_ret_owner", "Owner ID"),
+ bf_boolean16(0x0020, "request_bit_map_ret_short", "Short Name"),
+ bf_boolean16(0x0040, "request_bit_map_ret_acc_priv", "Access Privileges"),
+ bf_boolean16(0x0100, "request_bit_map_ratt", "Return Attributes"),
+ bf_boolean16(0x0200, "request_bit_map_ret_afp_parent", "AFP Parent Entry ID"),
+ bf_boolean16(0x0400, "request_bit_map_ret_cr_date", "Creation Date"),
+ bf_boolean16(0x0800, "request_bit_map_ret_acc_date", "Access Date"),
+ bf_boolean16(0x1000, "request_bit_map_ret_mod_date", "Modify Date&Time"),
+ bf_boolean16(0x2000, "request_bit_map_ret_bak_date", "Backup Date&Time"),
+ bf_boolean16(0x4000, "request_bit_map_ret_finder", "Finder Info"),
+ bf_boolean16(0x8000, "request_bit_map_ret_long_nm", "Long Name"),
+])
+ResourceForkLen = uint32("resource_fork_len", "Resource Fork Len")
+RequestCode = val_string8("request_code", "Request Code", [
+ [ 0x00, "Change Logged in to Temporary Authenticated" ],
+ [ 0x01, "Change Temporary Authenticated to Logged in" ],
+])
+RequestData = nstring8("request_data", "Request Data")
+RequestsReprocessed = uint16("requests_reprocessed", "Requests Reprocessed")
+Reserved = uint8( "reserved", "Reserved" )
+Reserved2 = bytes("reserved2", "Reserved", 2)
+Reserved3 = bytes("reserved3", "Reserved", 3)
+Reserved4 = bytes("reserved4", "Reserved", 4)
+Reserved5 = bytes("reserved5", "Reserved", 5)
+Reserved6 = bytes("reserved6", "Reserved", 6)
+Reserved8 = bytes("reserved8", "Reserved", 8)
+Reserved10 = bytes("reserved10", "Reserved", 10)
+Reserved12 = bytes("reserved12", "Reserved", 12)
+Reserved16 = bytes("reserved16", "Reserved", 16)
+Reserved20 = bytes("reserved20", "Reserved", 20)
+Reserved28 = bytes("reserved28", "Reserved", 28)
+Reserved36 = bytes("reserved36", "Reserved", 36)
+Reserved44 = bytes("reserved44", "Reserved", 44)
+Reserved48 = bytes("reserved48", "Reserved", 48)
+Reserved50 = bytes("reserved50", "Reserved", 50)
+Reserved56 = bytes("reserved56", "Reserved", 56)
+Reserved64 = bytes("reserved64", "Reserved", 64)
+Reserved120 = bytes("reserved120", "Reserved", 120)
+ReservedOrDirectoryNumber = uint32("reserved_or_directory_number", "Reserved or Directory Number (see EAFlags)")
+ReservedOrDirectoryNumber.Display("BASE_HEX")
+ResourceCount = uint32("resource_count", "Resource Count")
+ResourceForkSize = uint32("resource_fork_size", "Resource Fork Size")
+ResourceName = stringz("resource_name", "Resource Name")
+ResourceSignature = fw_string("resource_sig", "Resource Signature", 4)
+RestoreTime = eptime("restore_time", "Restore Time")
+Restriction = uint32("restriction", "Disk Space Restriction")
+RestrictionQuad = uint64("restriction_quad", "Restriction")
+RestrictionsEnforced = val_string8("restrictions_enforced", "Disk Restrictions Enforce Flag", [
+ [ 0x00, "Enforced" ],
+ [ 0xff, "Not Enforced" ],
+])
+ReturnInfoCount = uint32("return_info_count", "Return Information Count")
+ReturnInfoMask = bitfield16("ret_info_mask", "Return Information", [
+ bf_boolean16(0x0001, "ret_info_mask_fname", "Return File Name Information"),
+ bf_boolean16(0x0002, "ret_info_mask_alloc", "Return Allocation Space Information"),
+ bf_boolean16(0x0004, "ret_info_mask_attr", "Return Attribute Information"),
+ bf_boolean16(0x0008, "ret_info_mask_size", "Return Size Information"),
+ bf_boolean16(0x0010, "ret_info_mask_tspace", "Return Total Space Information"),
+ bf_boolean16(0x0020, "ret_info_mask_eattr", "Return Extended Attributes Information"),
+ bf_boolean16(0x0040, "ret_info_mask_arch", "Return Archive Information"),
+ bf_boolean16(0x0080, "ret_info_mask_mod", "Return Modify Information"),
+ bf_boolean16(0x0100, "ret_info_mask_create", "Return Creation Information"),
+ bf_boolean16(0x0200, "ret_info_mask_ns", "Return Name Space Information"),
+ bf_boolean16(0x0400, "ret_info_mask_dir", "Return Directory Information"),
+ bf_boolean16(0x0800, "ret_info_mask_rights", "Return Rights Information"),
+ bf_boolean16(0x1000, "ret_info_mask_id", "Return ID Information"),
+ bf_boolean16(0x2000, "ret_info_mask_ns_attr", "Return Name Space Attributes Information"),
+ bf_boolean16(0x4000, "ret_info_mask_actual", "Return Actual Information"),
+ bf_boolean16(0x8000, "ret_info_mask_logical", "Return Logical Information"),
+])
+ReturnedListCount = uint32("returned_list_count", "Returned List Count")
+Revision = uint32("revision", "Revision")
+RevisionNumber = uint8("revision_number", "Revision")
+RevQueryFlag = val_string8("rev_query_flag", "Revoke Rights Query Flag", [
+ [ 0x00, "Do not query the locks engine for access rights" ],
+ [ 0x01, "Query the locks engine and return the access rights" ],
+])
+RightsGrantMask = bitfield8("rights_grant_mask", "Grant Rights", [
+ bf_boolean8(0x01, "rights_grant_mask_read", "Read"),
+ bf_boolean8(0x02, "rights_grant_mask_write", "Write"),
+ bf_boolean8(0x04, "rights_grant_mask_open", "Open"),
+ bf_boolean8(0x08, "rights_grant_mask_create", "Create"),
+ bf_boolean8(0x10, "rights_grant_mask_del", "Delete"),
+ bf_boolean8(0x20, "rights_grant_mask_parent", "Parental"),
+ bf_boolean8(0x40, "rights_grant_mask_search", "Search"),
+ bf_boolean8(0x80, "rights_grant_mask_mod", "Modify"),
+])
+RightsRevokeMask = bitfield8("rights_revoke_mask", "Revoke Rights", [
+ bf_boolean8(0x01, "rights_revoke_mask_read", "Read"),
+ bf_boolean8(0x02, "rights_revoke_mask_write", "Write"),
+ bf_boolean8(0x04, "rights_revoke_mask_open", "Open"),
+ bf_boolean8(0x08, "rights_revoke_mask_create", "Create"),
+ bf_boolean8(0x10, "rights_revoke_mask_del", "Delete"),
+ bf_boolean8(0x20, "rights_revoke_mask_parent", "Parental"),
+ bf_boolean8(0x40, "rights_revoke_mask_search", "Search"),
+ bf_boolean8(0x80, "rights_revoke_mask_mod", "Modify"),
+])
+RIPSocketNumber = uint16("rip_socket_num", "RIP Socket Number")
+RIPSocketNumber.Display("BASE_HEX")
+RouterDownFlag = boolean8("router_dn_flag", "Router Down Flag")
+RPCccode = val_string16("rpc_c_code", "RPC Completion Code", [
+ [ 0x0000, "Successful" ],
+])
+RTagNumber = uint32("r_tag_num", "Resource Tag Number")
+RTagNumber.Display("BASE_HEX")
+RpyNearestSrvFlag = boolean8("rpy_nearest_srv_flag", "Reply to Nearest Server Flag")
+
+SalvageableFileEntryNumber = uint32("salvageable_file_entry_number", "Salvageable File Entry Number")
+SalvageableFileEntryNumber.Display("BASE_HEX")
+SAPSocketNumber = uint16("sap_socket_number", "SAP Socket Number")
+SAPSocketNumber.Display("BASE_HEX")
+ScanItems = uint32("scan_items", "Number of Items returned from Scan")
+SearchAttributes = bitfield8("sattr", "Search Attributes", [
+ bf_boolean8(0x01, "sattr_ronly", "Read-Only Files Allowed"),
+ bf_boolean8(0x02, "sattr_hid", "Hidden Files Allowed"),
+ bf_boolean8(0x04, "sattr_sys", "System Files Allowed"),
+ bf_boolean8(0x08, "sattr_exonly", "Execute-Only Files Allowed"),
+ bf_boolean8(0x10, "sattr_sub", "Subdirectories Only"),
+ bf_boolean8(0x20, "sattr_archive", "Archive"),
+ bf_boolean8(0x40, "sattr_execute_confirm", "Execute Confirm"),
+ bf_boolean8(0x80, "sattr_shareable", "Shareable"),
+])
+SearchAttributesLow = bitfield16("search_att_low", "Search Attributes", [
+ bf_boolean16(0x0001, "search_att_read_only", "Read-Only"),
+ bf_boolean16(0x0002, "search_att_hidden", "Hidden Files Allowed"),
+ bf_boolean16(0x0004, "search_att_system", "System"),
+ bf_boolean16(0x0008, "search_att_execute_only", "Execute-Only"),
+ bf_boolean16(0x0010, "search_att_sub", "Subdirectories Only"),
+ bf_boolean16(0x0020, "search_att_archive", "Archive"),
+ bf_boolean16(0x0040, "search_att_execute_confirm", "Execute Confirm"),
+ bf_boolean16(0x0080, "search_att_shareable", "Shareable"),
+ bf_boolean16(0x8000, "search_attr_all_files", "All Files and Directories"),
+])
+SearchBitMap = bitfield8("search_bit_map", "Search Bit Map", [
+ bf_boolean8(0x01, "search_bit_map_hidden", "Hidden"),
+ bf_boolean8(0x02, "search_bit_map_sys", "System"),
+ bf_boolean8(0x04, "search_bit_map_sub", "Subdirectory"),
+ bf_boolean8(0x08, "search_bit_map_files", "Files"),
+])
+SearchConnNumber = uint32("search_conn_number", "Search Connection Number")
+SearchInstance = uint32("search_instance", "Search Instance")
+SearchNumber = uint32("search_number", "Search Number")
+SearchPattern = nstring8("search_pattern", "Search Pattern")
+SearchPattern16 = nstring16("search_pattern_16", "Search Pattern")
+SearchSequence = bytes("search_sequence", "Search Sequence", 9)
+SearchSequenceWord = uint16("search_sequence_word", "Search Sequence", ENC_BIG_ENDIAN)
+Second = uint8("s_second", "Seconds")
+SecondsRelativeToTheYear2000 = uint32("sec_rel_to_y2k", "Seconds Relative to the Year 2000")
+SecretStoreVerb = val_string8("ss_verb", "Secret Store Verb",[
+ [ 0x00, "Query Server" ],
+ [ 0x01, "Read App Secrets" ],
+ [ 0x02, "Write App Secrets" ],
+ [ 0x03, "Add Secret ID" ],
+ [ 0x04, "Remove Secret ID" ],
+ [ 0x05, "Remove SecretStore" ],
+ [ 0x06, "Enumerate Secret IDs" ],
+ [ 0x07, "Unlock Store" ],
+ [ 0x08, "Set Master Password" ],
+ [ 0x09, "Get Service Information" ],
+])
+SecurityEquivalentList = fw_string("security_equiv_list", "Security Equivalent List", 128)
+SecurityFlag = bitfield8("security_flag", "Security Flag", [
+ bf_boolean8(0x01, "checksumming", "Checksumming"),
+ bf_boolean8(0x02, "signature", "Signature"),
+ bf_boolean8(0x04, "complete_signatures", "Complete Signatures"),
+ bf_boolean8(0x08, "encryption", "Encryption"),
+ bf_boolean8(0x80, "large_internet_packets", "Large Internet Packets (LIP) Disabled"),
+])
+SecurityRestrictionVersion = uint8("security_restriction_version", "Security Restriction Version")
+SectorsPerBlock = uint8("sectors_per_block", "Sectors Per Block")
+SectorsPerBlockLong = uint32("sectors_per_block_long", "Sectors Per Block")
+SectorsPerCluster = uint16("sectors_per_cluster", "Sectors Per Cluster" )
+SectorsPerClusterLong = uint32("sectors_per_cluster_long", "Sectors Per Cluster" )
+SectorsPerTrack = uint8("sectors_per_track", "Sectors Per Track")
+SectorSize = uint32("sector_size", "Sector Size")
+SemaphoreHandle = uint32("semaphore_handle", "Semaphore Handle")
+SemaphoreName = nstring8("semaphore_name", "Semaphore Name")
+SemaphoreOpenCount = uint8("semaphore_open_count", "Semaphore Open Count")
+SemaphoreShareCount = uint8("semaphore_share_count", "Semaphore Share Count")
+SemaphoreTimeOut = uint16("semaphore_time_out", "Semaphore Time Out")
+SemaphoreValue = uint16("semaphore_value", "Semaphore Value")
+SendStatus = val_string8("send_status", "Send Status", [
+ [ 0x00, "Successful" ],
+ [ 0x01, "Illegal Station Number" ],
+ [ 0x02, "Client Not Logged In" ],
+ [ 0x03, "Client Not Accepting Messages" ],
+ [ 0x04, "Client Already has a Message" ],
+ [ 0x96, "No Alloc Space for the Message" ],
+ [ 0xfd, "Bad Station Number" ],
+ [ 0xff, "Failure" ],
+])
+SequenceByte = uint8("sequence_byte", "Sequence")
+SequenceNumber = uint32("sequence_number", "Sequence Number")
+SequenceNumber.Display("BASE_HEX")
+SequenceNumberLong = uint64("sequence_number64", "Sequence Number")
+SequenceNumberLong.Display("BASE_HEX")
+ServerAddress = bytes("server_address", "Server Address", 12)
+ServerAppNumber = uint16("server_app_num", "Server App Number")
+ServerID = uint32("server_id_number", "Server ID", ENC_BIG_ENDIAN )
+ServerID.Display("BASE_HEX")
+ServerInfoFlags = val_string16("server_info_flags", "Server Information Flags", [
+ [ 0x0000, "This server is not a member of a Cluster" ],
+ [ 0x0001, "This server is a member of a Cluster" ],
+])
+serverListFlags = uint32("server_list_flags", "Server List Flags")
+ServerName = fw_string("server_name", "Server Name", 48)
+serverName50 = fw_string("server_name50", "Server Name", 50)
+ServerNameLen = nstring8("server_name_len", "Server Name")
+ServerNameStringz = stringz("server_name_stringz", "Server Name")
+ServerNetworkAddress = bytes("server_network_address", "Server Network Address", 10)
+ServerNode = bytes("server_node", "Server Node", 6)
+ServerSerialNumber = uint32("server_serial_number", "Server Serial Number")
+ServerStation = uint8("server_station", "Server Station")
+ServerStationLong = uint32("server_station_long", "Server Station")
+ServerStationList = uint8("server_station_list", "Server Station List")
+ServerStatusRecord = fw_string("server_status_record", "Server Status Record", 64)
+ServerTaskNumber = uint8("server_task_number", "Server Task Number")
+ServerTaskNumberLong = uint32("server_task_number_long", "Server Task Number")
+ServerType = uint16("server_type", "Server Type")
+ServerType.Display("BASE_HEX")
+ServerUtilization = uint32("server_utilization", "Server Utilization")
+ServerUtilizationPercentage = uint8("server_utilization_percentage", "Server Utilization Percentage")
+ServiceType = val_string16("Service_type", "Service Type", [
+ [ 0x0000, "Unknown" ],
+ [ 0x0001, "User" ],
+ [ 0x0002, "User group" ],
+ [ 0x0003, "Print queue" ],
+ [ 0x0004, "NetWare file server" ],
+ [ 0x0005, "Job server" ],
+ [ 0x0006, "Gateway" ],
+ [ 0x0007, "Print server" ],
+ [ 0x0008, "Archive queue" ],
+ [ 0x0009, "Archive server" ],
+ [ 0x000a, "Job queue" ],
+ [ 0x000b, "Administration" ],
+ [ 0x0021, "NAS SNA gateway" ],
+ [ 0x0026, "Remote bridge server" ],
+ [ 0x0027, "TCP/IP gateway" ],
+ [ 0xffff, "All Types" ],
+])
+SetCmdCategory = val_string8("set_cmd_category", "Set Command Category", [
+ [ 0x00, "Communications" ],
+ [ 0x01, "Memory" ],
+ [ 0x02, "File Cache" ],
+ [ 0x03, "Directory Cache" ],
+ [ 0x04, "File System" ],
+ [ 0x05, "Locks" ],
+ [ 0x06, "Transaction Tracking" ],
+ [ 0x07, "Disk" ],
+ [ 0x08, "Time" ],
+ [ 0x09, "NCP" ],
+ [ 0x0a, "Miscellaneous" ],
+ [ 0x0b, "Error Handling" ],
+ [ 0x0c, "Directory Services" ],
+ [ 0x0d, "MultiProcessor" ],
+ [ 0x0e, "Service Location Protocol" ],
+ [ 0x0f, "Licensing Services" ],
+])
+SetCmdFlags = bitfield8("set_cmd_flags", "Set Command Flags", [
+ bf_boolean8(0x01, "cmd_flags_startup_only", "Startup.ncf Only"),
+ bf_boolean8(0x02, "cmd_flags_hidden", "Hidden"),
+ bf_boolean8(0x04, "cmd_flags_advanced", "Advanced"),
+ bf_boolean8(0x08, "cmd_flags_later", "Restart Server Required to Take Effect"),
+ bf_boolean8(0x80, "cmd_flags_secure", "Console Secured"),
+])
+SetCmdName = stringz("set_cmd_name", "Set Command Name")
+SetCmdType = val_string8("set_cmd_type", "Set Command Type", [
+ [ 0x00, "Numeric Value" ],
+ [ 0x01, "Boolean Value" ],
+ [ 0x02, "Ticks Value" ],
+ [ 0x04, "Time Value" ],
+ [ 0x05, "String Value" ],
+ [ 0x06, "Trigger Value" ],
+ [ 0x07, "Numeric Value" ],
+])
+SetCmdValueNum = uint32("set_cmd_value_num", "Set Command Value")
+SetCmdValueString = stringz("set_cmd_value_string", "Set Command Value")
+SetMask = bitfield32("set_mask", "Set Mask", [
+ bf_boolean32(0x00000001, "ncp_encoded_strings", "NCP Encoded Strings"),
+ bf_boolean32(0x00000002, "connection_code_page", "Connection Code Page"),
+])
+SetParmName = stringz("set_parm_name", "Set Parameter Name")
+SFTErrorTable = bytes("sft_error_table", "SFT Error Table", 60)
+SFTSupportLevel = val_string8("sft_support_level", "SFT Support Level", [
+ [ 0x01, "Server Offers Hot Disk Error Fixing" ],
+ [ 0x02, "Server Offers Disk Mirroring and Transaction Tracking" ],
+ [ 0x03, "Server Offers Physical Server Mirroring" ],
+])
+ShareableLockCount = uint16("shareable_lock_count", "Shareable Lock Count")
+SharedMemoryAddresses = bytes("shared_memory_addresses", "Shared Memory Addresses", 10)
+ShortName = fw_string("short_name", "Short Name", 12)
+ShortStkName = fw_string("short_stack_name", "Short Stack Name", 16)
+SiblingCount = uint32("sibling_count", "Sibling Count")
+SixtyFourBitOffsetsSupportedFlag = val_string8("64_bit_flag", "64 Bit Support", [
+ [ 0x00, "No support for 64 bit offsets" ],
+ [ 0x01, "64 bit offsets supported" ],
+ [ 0x02, "Use 64 bit file transfer NCP's" ],
+])
+SMIDs = uint32("smids", "Storage Media ID's")
+SoftwareDescription = fw_string("software_description", "Software Description", 65)
+SoftwareDriverType = uint8("software_driver_type", "Software Driver Type")
+SoftwareMajorVersionNumber = uint8("software_major_version_number", "Software Major Version Number")
+SoftwareMinorVersionNumber = uint8("software_minor_version_number", "Software Minor Version Number")
+SourceDirHandle = uint8("source_dir_handle", "Source Directory Handle")
+SourceFileHandle = bytes("s_fhandle_64bit", "Source File Handle", 6)
+SourceFileOffset = bytes("s_foffset", "Source File Offset", 8)
+sourceOriginateTime = bytes("source_originate_time", "Source Originate Time", 8)
+SourcePath = nstring8("source_path", "Source Path")
+SourcePathComponentCount = uint8("source_component_count", "Source Path Component Count")
+sourceReturnTime = bytes("source_return_time", "Source Return Time", 8)
+SpaceUsed = uint32("space_used", "Space Used")
+SpaceMigrated = uint32("space_migrated", "Space Migrated")
+SrcNameSpace = val_string8("src_name_space", "Source Name Space", [
+ [ 0x00, "DOS Name Space" ],
+ [ 0x01, "MAC Name Space" ],
+ [ 0x02, "NFS Name Space" ],
+ [ 0x04, "Long Name Space" ],
+])
+SubFuncStrucLen = uint16("sub_func_struc_len", "Structure Length")
+SupModID = uint32("sup_mod_id", "Sup Mod ID")
+StackCount = uint32("stack_count", "Stack Count")
+StackFullNameStr = nstring8("stack_full_name_str", "Stack Full Name")
+StackMajorVN = uint8("stack_major_vn", "Stack Major Version Number")
+StackMinorVN = uint8("stack_minor_vn", "Stack Minor Version Number")
+StackNumber = uint32("stack_number", "Stack Number")
+StartConnNumber = uint32("start_conn_num", "Starting Connection Number")
+StartingBlock = uint16("starting_block", "Starting Block")
+StartingNumber = uint32("starting_number", "Starting Number")
+StartingSearchNumber = uint16("start_search_number", "Start Search Number")
+StartNumber = uint32("start_number", "Start Number")
+startNumberFlag = uint16("start_number_flag", "Start Number Flag")
+StartOffset64bit = bytes("s_offset_64bit", "64bit Starting Offset", 64)
+StartVolumeNumber = uint32("start_volume_number", "Starting Volume Number")
+StationList = uint32("station_list", "Station List")
+StationNumber = bytes("station_number", "Station Number", 3)
+StatMajorVersion = uint8("stat_major_version", "Statistics Table Major Version")
+StatMinorVersion = uint8("stat_minor_version", "Statistics Table Minor Version")
+Status = bitfield16("status", "Status", [
+ bf_boolean16(0x0001, "user_info_logged_in", "Logged In"),
+ bf_boolean16(0x0002, "user_info_being_abort", "Being Aborted"),
+ bf_boolean16(0x0004, "user_info_audited", "Audited"),
+ bf_boolean16(0x0008, "user_info_need_sec", "Needs Security Change"),
+ bf_boolean16(0x0010, "user_info_mac_station", "MAC Station"),
+ bf_boolean16(0x0020, "user_info_temp_authen", "Temporary Authenticated"),
+ bf_boolean16(0x0040, "user_info_audit_conn", "Audit Connection Recorded"),
+ bf_boolean16(0x0080, "user_info_dsaudit_conn", "DS Audit Connection Recorded"),
+ bf_boolean16(0x0100, "user_info_logout", "Logout in Progress"),
+ bf_boolean16(0x0200, "user_info_int_login", "Internal Login"),
+ bf_boolean16(0x0400, "user_info_bindery", "Bindery Connection"),
+])
+StatusFlagBits = bitfield32("status_flag_bits", "Status Flag", [
+ bf_boolean32(0x00000001, "status_flag_bits_suballoc", "Sub Allocation"),
+ bf_boolean32(0x00000002, "status_flag_bits_comp", "Compression"),
+ bf_boolean32(0x00000004, "status_flag_bits_migrate", "Migration"),
+ bf_boolean32(0x00000008, "status_flag_bits_audit", "Audit"),
+ bf_boolean32(0x00000010, "status_flag_bits_ro", "Read Only"),
+ bf_boolean32(0x00000020, "status_flag_bits_im_purge", "Immediate Purge"),
+ bf_boolean32(0x00000040, "status_flag_bits_64bit", "64Bit File Offsets"),
+ bf_boolean32(0x00000080, "status_flag_bits_utf8", "UTF8 NCP Strings"),
+ bf_boolean32(0x80000000, "status_flag_bits_nss", "NSS Volume"),
+])
+SubAllocClusters = uint32("sub_alloc_clusters", "Sub Alloc Clusters")
+SubAllocFreeableClusters = uint32("sub_alloc_freeable_clusters", "Sub Alloc Freeable Clusters")
+Subdirectory = uint32("sub_directory", "Subdirectory")
+Subdirectory.Display("BASE_HEX")
+SuggestedFileSize = uint32("suggested_file_size", "Suggested File Size")
+SupportModuleID = uint32("support_module_id", "Support Module ID")
+SynchName = nstring8("synch_name", "Synch Name")
+SystemIntervalMarker = uint32("system_interval_marker", "System Interval Marker")
+
+TabSize = uint8( "tab_size", "Tab Size" )
+TargetClientList = uint8("target_client_list", "Target Client List")
+TargetConnectionNumber = uint16("target_connection_number", "Target Connection Number")
+TargetDirectoryBase = uint32("target_directory_base", "Target Directory Base")
+TargetDirHandle = uint8("target_dir_handle", "Target Directory Handle")
+TargetEntryID = uint32("target_entry_id", "Target Entry ID")
+TargetEntryID.Display("BASE_HEX")
+TargetExecutionTime = bytes("target_execution_time", "Target Execution Time", 6)
+TargetFileHandle = bytes("target_file_handle", "Target File Handle", 6)
+TargetFileOffset = uint32("target_file_offset", "Target File Offset")
+TargetFileOffset64bit = bytes("t_foffset", "Target File Offset", 8)
+TargetMessage = nstring8("target_message", "Message")
+TargetPrinter = uint8( "target_ptr", "Target Printer" )
+targetReceiveTime = bytes("target_receive_time", "Target Receive Time", 8)
+TargetServerIDNumber = uint32("target_server_id_number", "Target Server ID Number", ENC_BIG_ENDIAN )
+TargetServerIDNumber.Display("BASE_HEX")
+targetTransmitTime = bytes("target_transmit_time", "Target Transmit Time", 8)
+TaskNumByte = uint8("task_num_byte", "Task Number")
+TaskNumber = uint32("task_number", "Task Number")
+TaskNumberWord = uint16("task_number_word", "Task Number")
+TaskState = val_string8("task_state", "Task State", [
+ [ 0x00, "Normal" ],
+ [ 0x01, "TTS explicit transaction in progress" ],
+ [ 0x02, "TTS implicit transaction in progress" ],
+ [ 0x04, "Shared file set lock in progress" ],
+])
+TextJobDescription = fw_string("text_job_description", "Text Job Description", 50)
+ThrashingCount = uint16("thrashing_count", "Thrashing Count")
+TimeoutLimit = uint16("timeout_limit", "Timeout Limit")
+TimesyncStatus = bitfield32("timesync_status_flags", "Timesync Status", [
+ bf_boolean32(0x00000001, "timesync_status_sync", "Time is Synchronized"),
+ bf_boolean32(0x00000002, "timesync_status_net_sync", "Time is Synchronized to the Network"),
+ bf_boolean32(0x00000004, "timesync_status_active", "Time Synchronization is Active"),
+ bf_boolean32(0x00000008, "timesync_status_external", "External Time Synchronization Active"),
+ bf_val_str32(0x00000700, "timesync_status_server_type", "Time Server Type", [
+ [ 0x01, "Client Time Server" ],
+ [ 0x02, "Secondary Time Server" ],
+ [ 0x03, "Primary Time Server" ],
+ [ 0x04, "Reference Time Server" ],
+ [ 0x05, "Single Reference Time Server" ],
+ ]),
+ bf_boolean32(0x000f0000, "timesync_status_ext_sync", "External Clock Status"),
+])
+TimeToNet = uint16("time_to_net", "Time To Net")
+TotalBlocks = uint32("total_blocks", "Total Blocks")
+TotalBlocks64 = uint64("total_blocks64", "Total Blocks")
+TotalBlocksToDecompress = uint32("total_blks_to_dcompress", "Total Blocks To Decompress")
+TotalBytesRead = bytes("user_info_ttl_bytes_rd", "Total Bytes Read", 6)
+TotalBytesWritten = bytes("user_info_ttl_bytes_wrt", "Total Bytes Written", 6)
+TotalCacheWrites = uint32("total_cache_writes", "Total Cache Writes")
+TotalChangedFATs = uint32("total_changed_fats", "Total Changed FAT Entries")
+TotalCommonCnts = uint32("total_common_cnts", "Total Common Counts")
+TotalCntBlocks = uint32("total_cnt_blocks", "Total Count Blocks")
+TotalDataStreamDiskSpaceAlloc = uint32("ttl_data_str_size_space_alloc", "Total Data Stream Disk Space Alloc")
+TotalDirectorySlots = uint16("total_directory_slots", "Total Directory Slots")
+TotalDirectoryEntries = uint32("total_dir_entries", "Total Directory Entries")
+TotalDirEntries64 = uint64("total_dir_entries64", "Total Directory Entries")
+TotalDynamicSpace = uint32("total_dynamic_space", "Total Dynamic Space")
+TotalExtendedDirectoryExtents = uint32("total_extended_directory_extents", "Total Extended Directory Extents")
+TotalFileServicePackets = uint32("total_file_service_packets", "Total File Service Packets")
+TotalFilesOpened = uint32("total_files_opened", "Total Files Opened")
+TotalLFSCounters = uint32("total_lfs_counters", "Total LFS Counters")
+TotalOffspring = uint16("total_offspring", "Total Offspring")
+TotalOtherPackets = uint32("total_other_packets", "Total Other Packets")
+TotalQueueJobs = uint32("total_queue_jobs", "Total Queue Jobs")
+TotalReadRequests = uint32("total_read_requests", "Total Read Requests")
+TotalRequest = uint32("total_request", "Total Requests")
+TotalRequestPackets = uint32("total_request_packets", "Total Request Packets")
+TotalRoutedPackets = uint32("total_routed_packets", "Total Routed Packets")
+TotalRxPkts = uint32("total_rx_pkts", "Total Receive Packets")
+TotalServerMemory = uint16("total_server_memory", "Total Server Memory", ENC_BIG_ENDIAN)
+TotalTransactionsBackedOut = uint32("total_trans_backed_out", "Total Transactions Backed Out")
+TotalTransactionsPerformed = uint32("total_trans_performed", "Total Transactions Performed")
+TotalTxPkts = uint32("total_tx_pkts", "Total Transmit Packets")
+TotalUnfilledBackoutRequests = uint16("total_unfilled_backout_requests", "Total Unfilled Backout Requests")
+TotalVolumeClusters = uint16("total_volume_clusters", "Total Volume Clusters")
+TotalWriteRequests = uint32("total_write_requests", "Total Write Requests")
+TotalWriteTransactionsPerformed = uint32("total_write_trans_performed", "Total Write Transactions Performed")
+TrackOnFlag = boolean8("track_on_flag", "Track On Flag")
+TransactionDiskSpace = uint16("transaction_disk_space", "Transaction Disk Space")
+TransactionFATAllocations = uint32("transaction_fat_allocations", "Transaction FAT Allocations")
+TransactionFileSizeChanges = uint32("transaction_file_size_changes", "Transaction File Size Changes")
+TransactionFilesTruncated = uint32("transaction_files_truncated", "Transaction Files Truncated")
+TransactionNumber = uint32("transaction_number", "Transaction Number")
+TransactionTrackingEnabled = uint8("transaction_tracking_enabled", "Transaction Tracking Enabled")
+TransactionTrackingFlag = uint16("tts_flag", "Transaction Tracking Flag")
+TransactionTrackingSupported = uint8("transaction_tracking_supported", "Transaction Tracking Supported")
+TransactionVolumeNumber = uint16("transaction_volume_number", "Transaction Volume Number")
+TransportType = val_string8("transport_type", "Communications Type", [
+ [ 0x01, "Internet Packet Exchange (IPX)" ],
+ [ 0x05, "User Datagram Protocol (UDP)" ],
+ [ 0x06, "Transmission Control Protocol (TCP)" ],
+])
+TreeLength = uint32("tree_length", "Tree Length")
+TreeName = nstring32("tree_name", "Tree Name")
+TrusteeAccessMask = uint8("trustee_acc_mask", "Trustee Access Mask")
+TrusteeRights = bitfield16("trustee_rights_low", "Trustee Rights", [
+ bf_boolean16(0x0001, "trustee_rights_read", "Read"),
+ bf_boolean16(0x0002, "trustee_rights_write", "Write"),
+ bf_boolean16(0x0004, "trustee_rights_open", "Open"),
+ bf_boolean16(0x0008, "trustee_rights_create", "Create"),
+ bf_boolean16(0x0010, "trustee_rights_del", "Delete"),
+ bf_boolean16(0x0020, "trustee_rights_parent", "Parental"),
+ bf_boolean16(0x0040, "trustee_rights_search", "Search"),
+ bf_boolean16(0x0080, "trustee_rights_modify", "Modify"),
+ bf_boolean16(0x0100, "trustee_rights_super", "Supervisor"),
+])
+TTSLevel = uint8("tts_level", "TTS Level")
+TrusteeSetNumber = uint8("trustee_set_number", "Trustee Set Number")
+TrusteeID = uint32("trustee_id_set", "Trustee ID")
+TrusteeID.Display("BASE_HEX")
+ttlCompBlks = uint32("ttl_comp_blks", "Total Compression Blocks")
+TtlDSDskSpaceAlloc = uint32("ttl_ds_disk_space_alloc", "Total Streams Space Allocated")
+TtlEAs = uint32("ttl_eas", "Total EA's")
+TtlEAsDataSize = uint32("ttl_eas_data_size", "Total EA's Data Size")
+TtlEAsKeySize = uint32("ttl_eas_key_size", "Total EA's Key Size")
+ttlIntermediateBlks = uint32("ttl_inter_blks", "Total Intermediate Blocks")
+TtlMigratedSize = uint32("ttl_migrated_size", "Total Migrated Size")
+TtlNumOfRTags = uint32("ttl_num_of_r_tags", "Total Number of Resource Tags")
+TtlNumOfSetCmds = uint32("ttl_num_of_set_cmds", "Total Number of Set Commands")
+TtlValuesLength = uint32("ttl_values_length", "Total Values Length")
+TtlWriteDataSize = uint32("ttl_write_data_size", "Total Write Data Size")
+TurboUsedForFileService = uint16("turbo_used_for_file_service", "Turbo Used For File Service")
+
+UnclaimedPkts = uint32("un_claimed_packets", "Unclaimed Packets")
+UnCompressableDataStreamsCount = uint32("un_compressable_data_streams_count", "Uncompressable Data Streams Count")
+Undefined8 = bytes("undefined_8", "Undefined", 8)
+Undefined28 = bytes("undefined_28", "Undefined", 28)
+UndefinedWord = uint16("undefined_word", "Undefined")
+UniqueID = uint8("unique_id", "Unique ID")
+UnknownByte = uint8("unknown_byte", "Unknown Byte")
+Unused = uint8("un_used", "Unused")
+UnusedBlocks = uint32("unused_blocks", "Unused Blocks")
+UnUsedDirectoryEntries = uint32("un_used_directory_entries", "Unused Directory Entries")
+UnusedDiskBlocks = uint32("unused_disk_blocks", "Unused Disk Blocks")
+UnUsedExtendedDirectoryExtents = uint32("un_used_extended_directory_extents", "Unused Extended Directory Extents")
+UpdateDate = uint16("update_date", "Update Date")
+UpdateDate.NWDate()
+UpdateID = uint32("update_id", "Update ID", ENC_BIG_ENDIAN)
+UpdateID.Display("BASE_HEX")
+UpdateTime = uint16("update_time", "Update Time")
+UpdateTime.NWTime()
+UseCount = val_string16("user_info_use_count", "Use Count", [
+ [ 0x0000, "Connection is not in use" ],
+ [ 0x0001, "Connection is in use" ],
+])
+UsedBlocks = uint32("used_blocks", "Used Blocks")
+UserID = uint32("user_id", "User ID", ENC_BIG_ENDIAN)
+UserID.Display("BASE_HEX")
+UserLoginAllowed = val_string8("user_login_allowed", "Login Status", [
+ [ 0x00, "Client Login Disabled" ],
+ [ 0x01, "Client Login Enabled" ],
+])
+
+UserName = nstring8("user_name", "User Name")
+UserName16 = fw_string("user_name_16", "User Name", 16)
+UserName48 = fw_string("user_name_48", "User Name", 48)
+UserType = uint16("user_type", "User Type")
+UTCTimeInSeconds = eptime("uts_time_in_seconds", "UTC Time in Seconds")
+
+ValueAvailable = val_string8("value_available", "Value Available", [
+ [ 0x00, "Has No Value" ],
+ [ 0xff, "Has Value" ],
+])
+VAPVersion = uint8("vap_version", "VAP Version")
+VariableBitMask = uint32("variable_bit_mask", "Variable Bit Mask")
+VariableBitsDefined = uint16("variable_bits_defined", "Variable Bits Defined")
+VConsoleRevision = uint8("vconsole_rev", "Console Revision")
+VConsoleVersion = uint8("vconsole_ver", "Console Version")
+Verb = uint32("verb", "Verb")
+VerbData = uint8("verb_data", "Verb Data")
+version = uint32("version", "Version")
+VersionNumber = uint8("version_number", "Version")
+VersionNumberLong = uint32("version_num_long", "Version")
+VertLocation = uint16("vert_location", "Vertical Location")
+VirtualConsoleVersion = uint8("virtual_console_version", "Virtual Console Version")
+VolumeID = uint32("volume_id", "Volume ID")
+VolumeID.Display("BASE_HEX")
+VolInfoReplyLen = uint16("vol_info_reply_len", "Volume Information Reply Length")
+VolInfoReturnInfoMask = bitfield32("vol_info_ret_info_mask", "Return Information Mask", [
+ bf_boolean32(0x00000001, "vinfo_info64", "Return 64 bit Volume Information"),
+ bf_boolean32(0x00000002, "vinfo_volname", "Return Volume Name Details"),
+])
+VolumeCapabilities = bitfield32("volume_capabilities", "Volume Capabilities", [
+ bf_boolean32(0x00000001, "vol_cap_user_space", "NetWare User Space Restrictions Supported"),
+ bf_boolean32(0x00000002, "vol_cap_dir_quota", "NetWare Directory Quotas Supported"),
+ bf_boolean32(0x00000004, "vol_cap_dfs", "DFS is Active on Volume"),
+ bf_boolean32(0x00000008, "vol_cap_sal_purge", "NetWare Salvage and Purge Operations Supported"),
+ bf_boolean32(0x00000010, "vol_cap_comp", "NetWare Compression Supported"),
+ bf_boolean32(0x00000020, "vol_cap_cluster", "Volume is a Cluster Resource"),
+ bf_boolean32(0x00000040, "vol_cap_nss_admin", "Volume is the NSS Admin Volume"),
+ bf_boolean32(0x00000080, "vol_cap_nss", "Volume is Mounted by NSS"),
+ bf_boolean32(0x00000100, "vol_cap_ea", "OS2 style EA's Supported"),
+ bf_boolean32(0x00000200, "vol_cap_archive", "NetWare Archive bit Supported"),
+ bf_boolean32(0x00000400, "vol_cap_file_attr", "Full NetWare file Attributes Supported"),
+])
+VolumeCachedFlag = val_string8("volume_cached_flag", "Volume Cached Flag", [
+ [ 0x00, "Volume is Not Cached" ],
+ [ 0xff, "Volume is Cached" ],
+])
+VolumeDataStreams = uint8("volume_data_streams", "Volume Data Streams")
+VolumeEpochTime = eptime("epoch_time", "Last Modified Timestamp")
+VolumeGUID = stringz("volume_guid", "Volume GUID")
+VolumeHashedFlag = val_string8("volume_hashed_flag", "Volume Hashed Flag", [
+ [ 0x00, "Volume is Not Hashed" ],
+ [ 0xff, "Volume is Hashed" ],
+])
+VolumeMountedFlag = val_string8("volume_mounted_flag", "Volume Mounted Flag", [
+ [ 0x00, "Volume is Not Mounted" ],
+ [ 0xff, "Volume is Mounted" ],
+])
+VolumeMountPoint = stringz("volume_mnt_point", "Volume Mount Point")
+VolumeName = fw_string("volume_name", "Volume Name", 16)
+VolumeNameLen = nstring8("volume_name_len", "Volume Name")
+VolumeNameSpaces = uint8("volume_name_spaces", "Volume Name Spaces")
+VolumeNameStringz = stringz("vol_name_stringz", "Volume Name")
+VolumeNumber = uint8("volume_number", "Volume Number")
+VolumeNumberLong = uint32("volume_number_long", "Volume Number")
+VolumeRemovableFlag = val_string8("volume_removable_flag", "Volume Removable Flag", [
+ [ 0x00, "Disk Cannot be Removed from Server" ],
+ [ 0xff, "Disk Can be Removed from Server" ],
+])
+VolumeRequestFlags = val_string16("volume_request_flags", "Volume Request Flags", [
+ [ 0x0000, "Do not return name with volume number" ],
+ [ 0x0001, "Return name with volume number" ],
+])
+VolumeSizeInClusters = uint32("volume_size_in_clusters", "Volume Size in Clusters")
+VolumesSupportedMax = uint16("volumes_supported_max", "Volumes Supported Max")
+VolumeType = val_string16("volume_type", "Volume Type", [
+ [ 0x0000, "NetWare 386" ],
+ [ 0x0001, "NetWare 286" ],
+ [ 0x0002, "NetWare 386 Version 30" ],
+ [ 0x0003, "NetWare 386 Version 31" ],
+])
+VolumeTypeLong = val_string32("volume_type_long", "Volume Type", [
+ [ 0x00000000, "NetWare 386" ],
+ [ 0x00000001, "NetWare 286" ],
+ [ 0x00000002, "NetWare 386 Version 30" ],
+ [ 0x00000003, "NetWare 386 Version 31" ],
+])
+WastedServerMemory = uint16("wasted_server_memory", "Wasted Server Memory", ENC_BIG_ENDIAN)
+WaitTime = uint32("wait_time", "Wait Time")
+
+Year = val_string8("year", "Year",[
+ [ 0x50, "1980" ],
+ [ 0x51, "1981" ],
+ [ 0x52, "1982" ],
+ [ 0x53, "1983" ],
+ [ 0x54, "1984" ],
+ [ 0x55, "1985" ],
+ [ 0x56, "1986" ],
+ [ 0x57, "1987" ],
+ [ 0x58, "1988" ],
+ [ 0x59, "1989" ],
+ [ 0x5a, "1990" ],
+ [ 0x5b, "1991" ],
+ [ 0x5c, "1992" ],
+ [ 0x5d, "1993" ],
+ [ 0x5e, "1994" ],
+ [ 0x5f, "1995" ],
+ [ 0x60, "1996" ],
+ [ 0x61, "1997" ],
+ [ 0x62, "1998" ],
+ [ 0x63, "1999" ],
+ [ 0x64, "2000" ],
+ [ 0x65, "2001" ],
+ [ 0x66, "2002" ],
+ [ 0x67, "2003" ],
+ [ 0x68, "2004" ],
+ [ 0x69, "2005" ],
+ [ 0x6a, "2006" ],
+ [ 0x6b, "2007" ],
+ [ 0x6c, "2008" ],
+ [ 0x6d, "2009" ],
+ [ 0x6e, "2010" ],
+ [ 0x6f, "2011" ],
+ [ 0x70, "2012" ],
+ [ 0x71, "2013" ],
+ [ 0x72, "2014" ],
+ [ 0x73, "2015" ],
+ [ 0x74, "2016" ],
+ [ 0x75, "2017" ],
+ [ 0x76, "2018" ],
+ [ 0x77, "2019" ],
+ [ 0x78, "2020" ],
+ [ 0x79, "2021" ],
+ [ 0x7a, "2022" ],
+ [ 0x7b, "2023" ],
+ [ 0x7c, "2024" ],
+ [ 0x7d, "2025" ],
+ [ 0x7e, "2026" ],
+ [ 0x7f, "2027" ],
+ [ 0xc0, "1984" ],
+ [ 0xc1, "1985" ],
+ [ 0xc2, "1986" ],
+ [ 0xc3, "1987" ],
+ [ 0xc4, "1988" ],
+ [ 0xc5, "1989" ],
+ [ 0xc6, "1990" ],
+ [ 0xc7, "1991" ],
+ [ 0xc8, "1992" ],
+ [ 0xc9, "1993" ],
+ [ 0xca, "1994" ],
+ [ 0xcb, "1995" ],
+ [ 0xcc, "1996" ],
+ [ 0xcd, "1997" ],
+ [ 0xce, "1998" ],
+ [ 0xcf, "1999" ],
+ [ 0xd0, "2000" ],
+ [ 0xd1, "2001" ],
+ [ 0xd2, "2002" ],
+ [ 0xd3, "2003" ],
+ [ 0xd4, "2004" ],
+ [ 0xd5, "2005" ],
+ [ 0xd6, "2006" ],
+ [ 0xd7, "2007" ],
+ [ 0xd8, "2008" ],
+ [ 0xd9, "2009" ],
+ [ 0xda, "2010" ],
+ [ 0xdb, "2011" ],
+ [ 0xdc, "2012" ],
+ [ 0xdd, "2013" ],
+ [ 0xde, "2014" ],
+ [ 0xdf, "2015" ],
+])
+##############################################################################
+# Structs
+##############################################################################
+
+
+acctngInfo = struct("acctng_info_struct", [
+ HoldTime,
+ HoldAmount,
+ ChargeAmount,
+ HeldConnectTimeInMinutes,
+ HeldRequests,
+ HeldBytesRead,
+ HeldBytesWritten,
+],"Accounting Information")
+AFP10Struct = struct("afp_10_struct", [
+ AFPEntryID,
+ ParentID,
+ AttributesDef16,
+ DataForkLen,
+ ResourceForkLen,
+ TotalOffspring,
+ CreationDate,
+ LastAccessedDate,
+ ModifiedDate,
+ ModifiedTime,
+ ArchivedDate,
+ ArchivedTime,
+ CreatorID,
+ Reserved4,
+ FinderAttr,
+ HorizLocation,
+ VertLocation,
+ FileDirWindow,
+ Reserved16,
+ LongName,
+ CreatorID,
+ ShortName,
+ AccessPrivileges,
+], "AFP Information" )
+AFP20Struct = struct("afp_20_struct", [
+ AFPEntryID,
+ ParentID,
+ AttributesDef16,
+ DataForkLen,
+ ResourceForkLen,
+ TotalOffspring,
+ CreationDate,
+ LastAccessedDate,
+ ModifiedDate,
+ ModifiedTime,
+ ArchivedDate,
+ ArchivedTime,
+ CreatorID,
+ Reserved4,
+ FinderAttr,
+ HorizLocation,
+ VertLocation,
+ FileDirWindow,
+ Reserved16,
+ LongName,
+ CreatorID,
+ ShortName,
+ AccessPrivileges,
+ Reserved,
+ ProDOSInfo,
+], "AFP Information" )
+ArchiveDateStruct = struct("archive_date_struct", [
+ ArchivedDate,
+])
+ArchiveIdStruct = struct("archive_id_struct", [
+ ArchiverID,
+])
+ArchiveInfoStruct = struct("archive_info_struct", [
+ ArchivedTime,
+ ArchivedDate,
+ ArchiverID,
+], "Archive Information")
+ArchiveTimeStruct = struct("archive_time_struct", [
+ ArchivedTime,
+])
+AttributesStruct = struct("attributes_struct", [
+ AttributesDef32,
+ FlagsDef,
+], "Attributes")
+authInfo = struct("auth_info_struct", [
+ Status,
+ Reserved2,
+ Privileges,
+])
+BoardNameStruct = struct("board_name_struct", [
+ DriverBoardName,
+ DriverShortName,
+ DriverLogicalName,
+], "Board Name")
+CacheInfo = struct("cache_info", [
+ uint32("max_byte_cnt", "Maximum Byte Count"),
+ uint32("min_num_of_cache_buff", "Minimum Number Of Cache Buffers"),
+ uint32("min_cache_report_thresh", "Minimum Cache Report Threshold"),
+ uint32("alloc_waiting", "Allocate Waiting Count"),
+ uint32("ndirty_blocks", "Number of Dirty Blocks"),
+ uint32("cache_dirty_wait_time", "Cache Dirty Wait Time"),
+ uint32("cache_max_concur_writes", "Cache Maximum Concurrent Writes"),
+ uint32("max_dirty_time", "Maximum Dirty Time"),
+ uint32("num_dir_cache_buff", "Number Of Directory Cache Buffers"),
+ uint32("cache_byte_to_block", "Cache Byte To Block Shift Factor"),
+], "Cache Information")
+CommonLanStruc = struct("common_lan_struct", [
+ boolean8("not_supported_mask", "Bit Counter Supported"),
+ Reserved3,
+ uint32("total_tx_packet_count", "Total Transmit Packet Count"),
+ uint32("total_rx_packet_count", "Total Receive Packet Count"),
+ uint32("no_ecb_available_count", "No ECB Available Count"),
+ uint32("packet_tx_too_big_count", "Transmit Packet Too Big Count"),
+ uint32("packet_tx_too_small_count", "Transmit Packet Too Small Count"),
+ uint32("packet_rx_overflow_count", "Receive Packet Overflow Count"),
+ uint32("packet_rx_too_big_count", "Receive Packet Too Big Count"),
+ uint32("packet_rs_too_small_count", "Receive Packet Too Small Count"),
+ uint32("packet_tx_misc_error_count", "Transmit Packet Misc Error Count"),
+ uint32("packet_rx_misc_error_count", "Receive Packet Misc Error Count"),
+ uint32("retry_tx_count", "Transmit Retry Count"),
+ uint32("checksum_error_count", "Checksum Error Count"),
+ uint32("hardware_rx_mismatch_count", "Hardware Receive Mismatch Count"),
+], "Common LAN Information")
+CompDeCompStat = struct("comp_d_comp_stat", [
+ uint32("cmphitickhigh", "Compress High Tick"),
+ uint32("cmphitickcnt", "Compress High Tick Count"),
+ uint32("cmpbyteincount", "Compress Byte In Count"),
+ uint32("cmpbyteoutcnt", "Compress Byte Out Count"),
+ uint32("cmphibyteincnt", "Compress High Byte In Count"),
+ uint32("cmphibyteoutcnt", "Compress High Byte Out Count"),
+ uint32("decphitickhigh", "DeCompress High Tick"),
+ uint32("decphitickcnt", "DeCompress High Tick Count"),
+ uint32("decpbyteincount", "DeCompress Byte In Count"),
+ uint32("decpbyteoutcnt", "DeCompress Byte Out Count"),
+ uint32("decphibyteincnt", "DeCompress High Byte In Count"),
+ uint32("decphibyteoutcnt", "DeCompress High Byte Out Count"),
+], "Compression/Decompression Information")
+ConnFileStruct = struct("conn_file_struct", [
+ ConnectionNumberWord,
+ TaskNumberWord,
+ LockType,
+ AccessControl,
+ LockFlag,
+], "File Connection Information")
+ConnStruct = struct("conn_struct", [
+ TaskNumByte,
+ LockType,
+ AccessControl,
+ LockFlag,
+ VolumeNumber,
+ DirectoryEntryNumberWord,
+ FileName14,
+], "Connection Information")
+ConnTaskStruct = struct("conn_task_struct", [
+ ConnectionNumberByte,
+ TaskNumByte,
+], "Task Information")
+Counters = struct("counters_struct", [
+ uint32("read_exist_blck", "Read Existing Block Count"),
+ uint32("read_exist_write_wait", "Read Existing Write Wait Count"),
+ uint32("read_exist_part_read", "Read Existing Partial Read Count"),
+ uint32("read_exist_read_err", "Read Existing Read Error Count"),
+ uint32("wrt_blck_cnt", "Write Block Count"),
+ uint32("wrt_entire_blck", "Write Entire Block Count"),
+ uint32("internl_dsk_get", "Internal Disk Get Count"),
+ uint32("internl_dsk_get_need_to_alloc", "Internal Disk Get Need To Allocate Count"),
+ uint32("internl_dsk_get_someone_beat", "Internal Disk Get Someone Beat My Count"),
+ uint32("internl_dsk_get_part_read", "Internal Disk Get Partial Read Count"),
+ uint32("internl_dsk_get_read_err", "Internal Disk Get Read Error Count"),
+ uint32("async_internl_dsk_get", "Async Internal Disk Get Count"),
+ uint32("async_internl_dsk_get_need_to_alloc", "Async Internal Disk Get Need To Alloc"),
+ uint32("async_internl_dsk_get_someone_beat", "Async Internal Disk Get Someone Beat Me"),
+ uint32("err_doing_async_read", "Error Doing Async Read Count"),
+ uint32("internl_dsk_get_no_read", "Internal Disk Get No Read Count"),
+ uint32("internl_dsk_get_no_read_alloc", "Internal Disk Get No Read Allocate Count"),
+ uint32("internl_dsk_get_no_read_someone_beat", "Internal Disk Get No Read Someone Beat Me Count"),
+ uint32("internl_dsk_write", "Internal Disk Write Count"),
+ uint32("internl_dsk_write_alloc", "Internal Disk Write Allocate Count"),
+ uint32("internl_dsk_write_someone_beat", "Internal Disk Write Someone Beat Me Count"),
+ uint32("write_err", "Write Error Count"),
+ uint32("wait_on_sema", "Wait On Semaphore Count"),
+ uint32("alloc_blck_i_had_to_wait_for", "Allocate Block I Had To Wait For Someone Count"),
+ uint32("alloc_blck", "Allocate Block Count"),
+ uint32("alloc_blck_i_had_to_wait", "Allocate Block I Had To Wait Count"),
+], "Disk Counter Information")
+CPUInformation = struct("cpu_information", [
+ PageTableOwnerFlag,
+ CPUType,
+ Reserved3,
+ CoprocessorFlag,
+ BusType,
+ Reserved3,
+ IOEngineFlag,
+ Reserved3,
+ FSEngineFlag,
+ Reserved3,
+ NonDedFlag,
+ Reserved3,
+ CPUString,
+ CoProcessorString,
+ BusString,
+], "CPU Information")
+CreationDateStruct = struct("creation_date_struct", [
+ CreationDate,
+])
+CreationInfoStruct = struct("creation_info_struct", [
+ CreationTime,
+ CreationDate,
+ endian(CreatorID, ENC_LITTLE_ENDIAN),
+], "Creation Information")
+CreationTimeStruct = struct("creation_time_struct", [
+ CreationTime,
+])
+CustomCntsInfo = struct("custom_cnts_info", [
+ CustomVariableValue,
+ CustomString,
+], "Custom Counters" )
+DataStreamInfo = struct("data_stream_info", [
+ AssociatedNameSpace,
+ DataStreamName
+])
+DataStreamSizeStruct = struct("data_stream_size_struct", [
+ DataStreamSize,
+])
+DirCacheInfo = struct("dir_cache_info", [
+ uint32("min_time_since_file_delete", "Minimum Time Since File Delete"),
+ uint32("abs_min_time_since_file_delete", "Absolute Minimum Time Since File Delete"),
+ uint32("min_num_of_dir_cache_buff", "Minimum Number Of Directory Cache Buffers"),
+ uint32("max_num_of_dir_cache_buff", "Maximum Number Of Directory Cache Buffers"),
+ uint32("num_of_dir_cache_buff", "Number Of Directory Cache Buffers"),
+ uint32("dc_min_non_ref_time", "DC Minimum Non-Referenced Time"),
+ uint32("dc_wait_time_before_new_buff", "DC Wait Time Before New Buffer"),
+ uint32("dc_max_concurrent_writes", "DC Maximum Concurrent Writes"),
+ uint32("dc_dirty_wait_time", "DC Dirty Wait Time"),
+ uint32("dc_double_read_flag", "DC Double Read Flag"),
+ uint32("map_hash_node_count", "Map Hash Node Count"),
+ uint32("space_restriction_node_count", "Space Restriction Node Count"),
+ uint32("trustee_list_node_count", "Trustee List Node Count"),
+ uint32("percent_of_vol_used_by_dirs", "Percent Of Volume Used By Directories"),
+], "Directory Cache Information")
+DirDiskSpaceRest64bit = struct("dir_disk_space_rest_64bit", [
+ Level,
+ MaxSpace64,
+ MinSpaceLeft64
+], "Directory Disk Space Restriction 64 bit")
+DirEntryStruct = struct("dir_entry_struct", [
+ DirectoryEntryNumber,
+ DOSDirectoryEntryNumber,
+ VolumeNumberLong,
+], "Directory Entry Information")
+DirectoryInstance = struct("directory_instance", [
+ SearchSequenceWord,
+ DirectoryID,
+ DirectoryName14,
+ DirectoryAttributes,
+ DirectoryAccessRights,
+ endian(CreationDate, ENC_BIG_ENDIAN),
+ endian(AccessDate, ENC_BIG_ENDIAN),
+ CreatorID,
+ Reserved2,
+ DirectoryStamp,
+], "Directory Information")
+DMInfoLevel0 = struct("dm_info_level_0", [
+ uint32("io_flag", "IO Flag"),
+ uint32("sm_info_size", "Storage Module Information Size"),
+ uint32("avail_space", "Available Space"),
+ uint32("used_space", "Used Space"),
+ stringz("s_module_name", "Storage Module Name"),
+ uint8("s_m_info", "Storage Media Information"),
+])
+DMInfoLevel1 = struct("dm_info_level_1", [
+ NumberOfSMs,
+ SMIDs,
+])
+DMInfoLevel2 = struct("dm_info_level_2", [
+ Name,
+])
+DOSDirectoryEntryStruct = struct("dos_directory_entry_struct", [
+ AttributesDef32,
+ UniqueID,
+ PurgeFlags,
+ DestNameSpace,
+ DirectoryNameLen,
+ DirectoryName,
+ CreationTime,
+ CreationDate,
+ CreatorID,
+ ArchivedTime,
+ ArchivedDate,
+ ArchiverID,
+ UpdateTime,
+ UpdateDate,
+ NextTrusteeEntry,
+ Reserved48,
+ InheritedRightsMask,
+], "DOS Directory Information")
+DOSFileEntryStruct = struct("dos_file_entry_struct", [
+ AttributesDef32,
+ UniqueID,
+ PurgeFlags,
+ DestNameSpace,
+ NameLen,
+ Name12,
+ CreationTime,
+ CreationDate,
+ CreatorID,
+ ArchivedTime,
+ ArchivedDate,
+ ArchiverID,
+ UpdateTime,
+ UpdateDate,
+ UpdateID,
+ FileSize,
+ DataForkFirstFAT,
+ NextTrusteeEntry,
+ Reserved36,
+ InheritedRightsMask,
+ LastAccessedDate,
+ Reserved20,
+ PrimaryEntry,
+ NameList,
+], "DOS File Information")
+DSSpaceAllocateStruct = struct("ds_space_alloc_struct", [
+ DataStreamSpaceAlloc,
+])
+DynMemStruct = struct("dyn_mem_struct", [
+ uint32("dyn_mem_struct_total", "Total Dynamic Space" ),
+ uint32("dyn_mem_struct_max", "Max Used Dynamic Space" ),
+ uint32("dyn_mem_struct_cur", "Current Used Dynamic Space" ),
+], "Dynamic Memory Information")
+EAInfoStruct = struct("ea_info_struct", [
+ EADataSize,
+ EACount,
+ EAKeySize,
+], "Extended Attribute Information")
+ExtraCacheCntrs = struct("extra_cache_cntrs", [
+ uint32("internl_dsk_get_no_wait", "Internal Disk Get No Wait Count"),
+ uint32("internl_dsk_get_no_wait_need", "Internal Disk Get No Wait Need To Allocate Count"),
+ uint32("internl_dsk_get_no_wait_no_blk", "Internal Disk Get No Wait No Block Count"),
+ uint32("id_get_no_read_no_wait", "ID Get No Read No Wait Count"),
+ uint32("id_get_no_read_no_wait_sema", "ID Get No Read No Wait Semaphored Count"),
+ uint32("id_get_no_read_no_wait_buffer", "ID Get No Read No Wait No Buffer Count"),
+ uint32("id_get_no_read_no_wait_alloc", "ID Get No Read No Wait Allocate Count"),
+ uint32("id_get_no_read_no_wait_no_alloc", "ID Get No Read No Wait No Alloc Count"),
+ uint32("id_get_no_read_no_wait_no_alloc_sema", "ID Get No Read No Wait No Alloc Semaphored Count"),
+ uint32("id_get_no_read_no_wait_no_alloc_alloc", "ID Get No Read No Wait No Alloc Allocate Count"),
+], "Extra Cache Counters Information")
+
+FileSize64bitStruct = struct("file_sz_64bit_struct", [
+ FileSize64bit,
+])
+
+ReferenceIDStruct = struct("ref_id_struct", [
+ CurrentReferenceID,
+])
+NSAttributeStruct = struct("ns_attrib_struct", [
+ AttributesDef32,
+])
+DStreamActual = struct("d_stream_actual", [
+ DataStreamNumberLong,
+ DataStreamFATBlocks,
+], "Actual Stream")
+DStreamLogical = struct("d_string_logical", [
+ DataStreamNumberLong,
+ DataStreamSize,
+], "Logical Stream")
+LastUpdatedInSecondsStruct = struct("last_update_in_seconds_struct", [
+ SecondsRelativeToTheYear2000,
+])
+DOSNameStruct = struct("dos_name_struct", [
+ FileName,
+], "DOS File Name")
+DOSName16Struct = struct("dos_name_16_struct", [
+ FileName16,
+], "DOS File Name")
+FlushTimeStruct = struct("flush_time_struct", [
+ FlushTime,
+])
+ParentBaseIDStruct = struct("parent_base_id_struct", [
+ ParentBaseID,
+])
+MacFinderInfoStruct = struct("mac_finder_info_struct", [
+ MacFinderInfo,
+])
+SiblingCountStruct = struct("sibling_count_struct", [
+ SiblingCount,
+])
+EffectiveRightsStruct = struct("eff_rights_struct", [
+ EffectiveRights,
+ Reserved3,
+])
+MacTimeStruct = struct("mac_time_struct", [
+ MACCreateDate,
+ MACCreateTime,
+ MACBackupDate,
+ MACBackupTime,
+])
+LastAccessedTimeStruct = struct("last_access_time_struct", [
+ LastAccessedTime,
+])
+FileAttributesStruct = struct("file_attributes_struct", [
+ AttributesDef32,
+])
+FileInfoStruct = struct("file_info_struct", [
+ ParentID,
+ DirectoryEntryNumber,
+ TotalBlocksToDecompress,
+ #CurrentBlockBeingDecompressed,
+], "File Information")
+FileInstance = struct("file_instance", [
+ SearchSequenceWord,
+ DirectoryID,
+ FileName14,
+ AttributesDef,
+ FileMode,
+ FileSize,
+ endian(CreationDate, ENC_BIG_ENDIAN),
+ endian(AccessDate, ENC_BIG_ENDIAN),
+ endian(UpdateDate, ENC_BIG_ENDIAN),
+ endian(UpdateTime, ENC_BIG_ENDIAN),
+], "File Instance")
+FileNameStruct = struct("file_name_struct", [
+ FileName,
+], "File Name")
+FileName16Struct = struct("file_name16_struct", [
+ FileName16,
+], "File Name")
+FileServerCounters = struct("file_server_counters", [
+ uint16("too_many_hops", "Too Many Hops"),
+ uint16("unknown_network", "Unknown Network"),
+ uint16("no_space_for_service", "No Space For Service"),
+ uint16("no_receive_buff", "No Receive Buffers"),
+ uint16("not_my_network", "Not My Network"),
+ uint32("netbios_progated", "NetBIOS Propagated Count"),
+ uint32("ttl_pckts_srvcd", "Total Packets Serviced"),
+ uint32("ttl_pckts_routed", "Total Packets Routed"),
+], "File Server Counters")
+FileSystemInfo = struct("file_system_info", [
+ uint32("fat_moved", "Number of times the OS has move the location of FAT"),
+ uint32("fat_write_err", "Number of write errors in both original and mirrored copies of FAT"),
+ uint32("someone_else_did_it_0", "Someone Else Did It Count 0"),
+ uint32("someone_else_did_it_1", "Someone Else Did It Count 1"),
+ uint32("someone_else_did_it_2", "Someone Else Did It Count 2"),
+ uint32("i_ran_out_someone_else_did_it_0", "I Ran Out Someone Else Did It Count 0"),
+ uint32("i_ran_out_someone_else_did_it_1", "I Ran Out Someone Else Did It Count 1"),
+ uint32("i_ran_out_someone_else_did_it_2", "I Ran Out Someone Else Did It Count 2"),
+ uint32("turbo_fat_build_failed", "Turbo FAT Build Failed Count"),
+ uint32("extra_use_count_node_count", "Errors allocating a use count node for TTS"),
+ uint32("extra_extra_use_count_node_count", "Errors allocating an additional use count node for TTS"),
+ uint32("error_read_last_fat", "Error Reading Last FAT Count"),
+ uint32("someone_else_using_this_file", "Someone Else Using This File Count"),
+], "File System Information")
+GenericInfoDef = struct("generic_info_def", [
+ fw_string("generic_label", "Label", 64),
+ uint32("generic_ident_type", "Identification Type"),
+ uint32("generic_ident_time", "Identification Time"),
+ uint32("generic_media_type", "Media Type"),
+ uint32("generic_cartridge_type", "Cartridge Type"),
+ uint32("generic_unit_size", "Unit Size"),
+ uint32("generic_block_size", "Block Size"),
+ uint32("generic_capacity", "Capacity"),
+ uint32("generic_pref_unit_size", "Preferred Unit Size"),
+ fw_string("generic_name", "Name",64),
+ uint32("generic_type", "Type"),
+ uint32("generic_status", "Status"),
+ uint32("generic_func_mask", "Function Mask"),
+ uint32("generic_ctl_mask", "Control Mask"),
+ uint32("generic_parent_count", "Parent Count"),
+ uint32("generic_sib_count", "Sibling Count"),
+ uint32("generic_child_count", "Child Count"),
+ uint32("generic_spec_info_sz", "Specific Information Size"),
+ uint32("generic_object_uniq_id", "Unique Object ID"),
+ uint32("generic_media_slot", "Media Slot"),
+], "Generic Information")
+HandleInfoLevel0 = struct("handle_info_level_0", [
+# DataStream,
+])
+HandleInfoLevel1 = struct("handle_info_level_1", [
+ DataStream,
+])
+HandleInfoLevel2 = struct("handle_info_level_2", [
+ DOSDirectoryBase,
+ NameSpace,
+ DataStream,
+])
+HandleInfoLevel3 = struct("handle_info_level_3", [
+ DOSDirectoryBase,
+ NameSpace,
+])
+HandleInfoLevel4 = struct("handle_info_level_4", [
+ DOSDirectoryBase,
+ NameSpace,
+ ParentDirectoryBase,
+ ParentDOSDirectoryBase,
+])
+HandleInfoLevel5 = struct("handle_info_level_5", [
+ DOSDirectoryBase,
+ NameSpace,
+ DataStream,
+ ParentDirectoryBase,
+ ParentDOSDirectoryBase,
+])
+IPXInformation = struct("ipx_information", [
+ uint32("ipx_send_pkt", "IPX Send Packet Count"),
+ uint16("ipx_malform_pkt", "IPX Malformed Packet Count"),
+ uint32("ipx_get_ecb_req", "IPX Get ECB Request Count"),
+ uint32("ipx_get_ecb_fail", "IPX Get ECB Fail Count"),
+ uint32("ipx_aes_event", "IPX AES Event Count"),
+ uint16("ipx_postponed_aes", "IPX Postponed AES Count"),
+ uint16("ipx_max_conf_sock", "IPX Max Configured Socket Count"),
+ uint16("ipx_max_open_sock", "IPX Max Open Socket Count"),
+ uint16("ipx_open_sock_fail", "IPX Open Socket Fail Count"),
+ uint32("ipx_listen_ecb", "IPX Listen ECB Count"),
+ uint16("ipx_ecb_cancel_fail", "IPX ECB Cancel Fail Count"),
+ uint16("ipx_get_lcl_targ_fail", "IPX Get Local Target Fail Count"),
+], "IPX Information")
+JobEntryTime = struct("job_entry_time", [
+ Year,
+ Month,
+ Day,
+ Hour,
+ Minute,
+ Second,
+], "Job Entry Time")
+JobStruct3x = struct("job_struct_3x", [
+ RecordInUseFlag,
+ PreviousRecord,
+ NextRecord,
+ ClientStationLong,
+ ClientTaskNumberLong,
+ ClientIDNumber,
+ TargetServerIDNumber,
+ TargetExecutionTime,
+ JobEntryTime,
+ JobNumberLong,
+ JobType,
+ JobPositionWord,
+ JobControlFlagsWord,
+ JobFileName,
+ JobFileHandleLong,
+ ServerStationLong,
+ ServerTaskNumberLong,
+ ServerID,
+ TextJobDescription,
+ ClientRecordArea,
+], "Job Information")
+JobStruct = struct("job_struct", [
+ ClientStation,
+ ClientTaskNumber,
+ ClientIDNumber,
+ TargetServerIDNumber,
+ TargetExecutionTime,
+ JobEntryTime,
+ JobNumber,
+ JobType,
+ JobPosition,
+ JobControlFlags,
+ JobFileName,
+ JobFileHandle,
+ ServerStation,
+ ServerTaskNumber,
+ ServerID,
+ TextJobDescription,
+ ClientRecordArea,
+], "Job Information")
+JobStructNew = struct("job_struct_new", [
+ RecordInUseFlag,
+ PreviousRecord,
+ NextRecord,
+ ClientStationLong,
+ ClientTaskNumberLong,
+ ClientIDNumber,
+ TargetServerIDNumber,
+ TargetExecutionTime,
+ JobEntryTime,
+ JobNumberLong,
+ JobType,
+ JobPositionWord,
+ JobControlFlagsWord,
+ JobFileName,
+ JobFileHandleLong,
+ ServerStationLong,
+ ServerTaskNumberLong,
+ ServerID,
+], "Job Information")
+KnownRoutes = struct("known_routes", [
+ NetIDNumber,
+ HopsToNet,
+ NetStatus,
+ TimeToNet,
+], "Known Routes")
+SrcEnhNWHandlePathS1 = struct("source_nwhandle", [
+ DirectoryBase,
+ VolumeNumber,
+ HandleFlag,
+ DataTypeFlag,
+ Reserved5,
+], "Source Information")
+DstEnhNWHandlePathS1 = struct("destination_nwhandle", [
+ DirectoryBase,
+ VolumeNumber,
+ HandleFlag,
+ DataTypeFlag,
+ Reserved5,
+], "Destination Information")
+KnownServStruc = struct("known_server_struct", [
+ ServerAddress,
+ HopsToNet,
+ ServerNameStringz,
+], "Known Servers")
+LANConfigInfo = struct("lan_cfg_info", [
+ LANdriverCFG_MajorVersion,
+ LANdriverCFG_MinorVersion,
+ LANdriverNodeAddress,
+ Reserved,
+ LANdriverModeFlags,
+ LANdriverBoardNumber,
+ LANdriverBoardInstance,
+ LANdriverMaximumSize,
+ LANdriverMaxRecvSize,
+ LANdriverRecvSize,
+ LANdriverCardID,
+ LANdriverMediaID,
+ LANdriverTransportTime,
+ LANdriverSrcRouting,
+ LANdriverLineSpeed,
+ LANdriverReserved,
+ LANdriverMajorVersion,
+ LANdriverMinorVersion,
+ LANdriverFlags,
+ LANdriverSendRetries,
+ LANdriverLink,
+ LANdriverSharingFlags,
+ LANdriverSlot,
+ LANdriverIOPortsAndRanges1,
+ LANdriverIOPortsAndRanges2,
+ LANdriverIOPortsAndRanges3,
+ LANdriverIOPortsAndRanges4,
+ LANdriverMemoryDecode0,
+ LANdriverMemoryLength0,
+ LANdriverMemoryDecode1,
+ LANdriverMemoryLength1,
+ LANdriverInterrupt1,
+ LANdriverInterrupt2,
+ LANdriverDMAUsage1,
+ LANdriverDMAUsage2,
+ LANdriverLogicalName,
+ LANdriverIOReserved,
+ LANdriverCardName,
+], "LAN Configuration Information")
+LastAccessStruct = struct("last_access_struct", [
+ LastAccessedDate,
+])
+lockInfo = struct("lock_info_struct", [
+ LogicalLockThreshold,
+ PhysicalLockThreshold,
+ FileLockCount,
+ RecordLockCount,
+], "Lock Information")
+LockStruct = struct("lock_struct", [
+ TaskNumByte,
+ LockType,
+ RecordStart,
+ RecordEnd,
+], "Locks")
+LoginTime = struct("login_time", [
+ Year,
+ Month,
+ Day,
+ Hour,
+ Minute,
+ Second,
+ DayOfWeek,
+], "Login Time")
+LogLockStruct = struct("log_lock_struct", [
+ TaskNumberWord,
+ LockStatus,
+ LockName,
+], "Logical Locks")
+LogRecStruct = struct("log_rec_struct", [
+ ConnectionNumberWord,
+ TaskNumByte,
+ LockStatus,
+], "Logical Record Locks")
+LSLInformation = struct("lsl_information", [
+ uint32("rx_buffers", "Receive Buffers"),
+ uint32("rx_buffers_75", "Receive Buffers Warning Level"),
+ uint32("rx_buffers_checked_out", "Receive Buffers Checked Out Count"),
+ uint32("rx_buffer_size", "Receive Buffer Size"),
+ uint32("max_phy_packet_size", "Maximum Physical Packet Size"),
+ uint32("last_time_rx_buff_was_alloc", "Last Time a Receive Buffer was Allocated"),
+ uint32("max_num_of_protocols", "Maximum Number of Protocols"),
+ uint32("max_num_of_media_types", "Maximum Number of Media Types"),
+ uint32("total_tx_packets", "Total Transmit Packets"),
+ uint32("get_ecb_buf", "Get ECB Buffers"),
+ uint32("get_ecb_fails", "Get ECB Failures"),
+ uint32("aes_event_count", "AES Event Count"),
+ uint32("post_poned_events", "Postponed Events"),
+ uint32("ecb_cxl_fails", "ECB Cancel Failures"),
+ uint32("valid_bfrs_reused", "Valid Buffers Reused"),
+ uint32("enqueued_send_cnt", "Enqueued Send Count"),
+ uint32("total_rx_packets", "Total Receive Packets"),
+ uint32("unclaimed_packets", "Unclaimed Packets"),
+ uint8("stat_table_major_version", "Statistics Table Major Version"),
+ uint8("stat_table_minor_version", "Statistics Table Minor Version"),
+], "LSL Information")
+MaximumSpaceStruct = struct("max_space_struct", [
+ MaxSpace,
+])
+MemoryCounters = struct("memory_counters", [
+ uint32("orig_num_cache_buff", "Original Number Of Cache Buffers"),
+ uint32("curr_num_cache_buff", "Current Number Of Cache Buffers"),
+ uint32("cache_dirty_block_thresh", "Cache Dirty Block Threshold"),
+ uint32("wait_node", "Wait Node Count"),
+ uint32("wait_node_alloc_fail", "Wait Node Alloc Failure Count"),
+ uint32("move_cache_node", "Move Cache Node Count"),
+ uint32("move_cache_node_from_avai", "Move Cache Node From Avail Count"),
+ uint32("accel_cache_node_write", "Accelerate Cache Node Write Count"),
+ uint32("rem_cache_node", "Remove Cache Node Count"),
+ uint32("rem_cache_node_from_avail", "Remove Cache Node From Avail Count"),
+], "Memory Counters")
+MLIDBoardInfo = struct("mlid_board_info", [
+ uint32("protocol_board_num", "Protocol Board Number"),
+ uint16("protocol_number", "Protocol Number"),
+ bytes("protocol_id", "Protocol ID", 6),
+ nstring8("protocol_name", "Protocol Name"),
+], "MLID Board Information")
+ModifyInfoStruct = struct("modify_info_struct", [
+ ModifiedTime,
+ ModifiedDate,
+ endian(ModifierID, ENC_LITTLE_ENDIAN),
+ LastAccessedDate,
+], "Modification Information")
+nameInfo = struct("name_info_struct", [
+ ObjectType,
+ nstring8("login_name", "Login Name"),
+], "Name Information")
+NCPNetworkAddress = struct("ncp_network_address_struct", [
+ TransportType,
+ Reserved3,
+ NetAddress,
+], "Network Address")
+
+netAddr = struct("net_addr_struct", [
+ TransportType,
+ nbytes32("transport_addr", "Transport Address"),
+], "Network Address")
+
+NetWareInformationStruct = struct("netware_information_struct", [
+ DataStreamSpaceAlloc, # (Data Stream Alloc Bit)
+ AttributesDef32, # (Attributes Bit)
+ FlagsDef,
+ DataStreamSize, # (Data Stream Size Bit)
+ TotalDataStreamDiskSpaceAlloc, # (Total Stream Size Bit)
+ NumberOfDataStreams,
+ CreationTime, # (Creation Bit)
+ CreationDate,
+ CreatorID,
+ ModifiedTime, # (Modify Bit)
+ ModifiedDate,
+ ModifierID,
+ LastAccessedDate,
+ ArchivedTime, # (Archive Bit)
+ ArchivedDate,
+ ArchiverID,
+ InheritedRightsMask, # (Rights Bit)
+ DirectoryEntryNumber, # (Directory Entry Bit)
+ DOSDirectoryEntryNumber,
+ VolumeNumberLong,
+ EADataSize, # (Extended Attribute Bit)
+ EACount,
+ EAKeySize,
+ CreatorNameSpaceNumber, # (Name Space Bit)
+ Reserved3,
+], "NetWare Information")
+NLMInformation = struct("nlm_information", [
+ IdentificationNumber,
+ NLMFlags,
+ Reserved3,
+ NLMType,
+ Reserved3,
+ ParentID,
+ MajorVersion,
+ MinorVersion,
+ Revision,
+ Year,
+ Reserved3,
+ Month,
+ Reserved3,
+ Day,
+ Reserved3,
+ AllocAvailByte,
+ AllocFreeCount,
+ LastGarbCollect,
+ MessageLanguage,
+ NumberOfReferencedPublics,
+], "NLM Information")
+NSInfoStruct = struct("ns_info_struct", [
+ CreatorNameSpaceNumber,
+ Reserved3,
+])
+NWAuditStatus = struct("nw_audit_status", [
+ AuditVersionDate,
+ AuditFileVersionDate,
+ val_string16("audit_enable_flag", "Auditing Enabled Flag", [
+ [ 0x0000, "Auditing Disabled" ],
+ [ 0x0001, "Auditing Enabled" ],
+ ]),
+ Reserved2,
+ uint32("audit_file_size", "Audit File Size"),
+ uint32("modified_counter", "Modified Counter"),
+ uint32("audit_file_max_size", "Audit File Maximum Size"),
+ uint32("audit_file_size_threshold", "Audit File Size Threshold"),
+ uint32("audit_record_count", "Audit Record Count"),
+ uint32("auditing_flags", "Auditing Flags"),
+], "NetWare Audit Status")
+ObjectSecurityStruct = struct("object_security_struct", [
+ ObjectSecurity,
+])
+ObjectFlagsStruct = struct("object_flags_struct", [
+ ObjectFlags,
+])
+ObjectTypeStruct = struct("object_type_struct", [
+ endian(ObjectType, ENC_BIG_ENDIAN),
+ Reserved2,
+])
+ObjectNameStruct = struct("object_name_struct", [
+ ObjectNameStringz,
+])
+ObjectIDStruct = struct("object_id_struct", [
+ ObjectID,
+ Restriction,
+])
+ObjectIDStruct64 = struct("object_id_struct64", [
+ endian(ObjectID, ENC_LITTLE_ENDIAN),
+ endian(RestrictionQuad, ENC_LITTLE_ENDIAN),
+])
+OpnFilesStruct = struct("opn_files_struct", [
+ TaskNumberWord,
+ LockType,
+ AccessControl,
+ LockFlag,
+ VolumeNumber,
+ DOSParentDirectoryEntry,
+ DOSDirectoryEntry,
+ ForkCount,
+ NameSpace,
+ FileName,
+], "Open Files Information")
+OwnerIDStruct = struct("owner_id_struct", [
+ CreatorID,
+])
+PacketBurstInformation = struct("packet_burst_information", [
+ uint32("big_invalid_slot", "Big Invalid Slot Count"),
+ uint32("big_forged_packet", "Big Forged Packet Count"),
+ uint32("big_invalid_packet", "Big Invalid Packet Count"),
+ uint32("big_still_transmitting", "Big Still Transmitting Count"),
+ uint32("still_doing_the_last_req", "Still Doing The Last Request Count"),
+ uint32("invalid_control_req", "Invalid Control Request Count"),
+ uint32("control_invalid_message_number", "Control Invalid Message Number Count"),
+ uint32("control_being_torn_down", "Control Being Torn Down Count"),
+ uint32("big_repeat_the_file_read", "Big Repeat the File Read Count"),
+ uint32("big_send_extra_cc_count", "Big Send Extra CC Count"),
+ uint32("big_return_abort_mess", "Big Return Abort Message Count"),
+ uint32("big_read_invalid_mess", "Big Read Invalid Message Number Count"),
+ uint32("big_read_do_it_over", "Big Read Do It Over Count"),
+ uint32("big_read_being_torn_down", "Big Read Being Torn Down Count"),
+ uint32("previous_control_packet", "Previous Control Packet Count"),
+ uint32("send_hold_off_message", "Send Hold Off Message Count"),
+ uint32("big_read_no_data_avail", "Big Read No Data Available Count"),
+ uint32("big_read_trying_to_read", "Big Read Trying To Read Too Much Count"),
+ uint32("async_read_error", "Async Read Error Count"),
+ uint32("big_read_phy_read_err", "Big Read Physical Read Error Count"),
+ uint32("ctl_bad_ack_frag_list", "Control Bad ACK Fragment List Count"),
+ uint32("ctl_no_data_read", "Control No Data Read Count"),
+ uint32("write_dup_req", "Write Duplicate Request Count"),
+ uint32("shouldnt_be_ack_here", "Shouldn't Be ACKing Here Count"),
+ uint32("write_incon_packet_len", "Write Inconsistent Packet Lengths Count"),
+ uint32("first_packet_isnt_a_write", "First Packet Isn't A Write Count"),
+ uint32("write_trash_dup_req", "Write Trashed Duplicate Request Count"),
+ uint32("big_write_inv_message_num", "Big Write Invalid Message Number Count"),
+ uint32("big_write_being_torn_down", "Big Write Being Torn Down Count"),
+ uint32("big_write_being_abort", "Big Write Being Aborted Count"),
+ uint32("zero_ack_frag", "Zero ACK Fragment Count"),
+ uint32("write_curr_trans", "Write Currently Transmitting Count"),
+ uint32("try_to_write_too_much", "Trying To Write Too Much Count"),
+ uint32("write_out_of_mem_for_ctl_nodes", "Write Out Of Memory For Control Nodes Count"),
+ uint32("write_didnt_need_this_frag", "Write Didn't Need This Fragment Count"),
+ uint32("write_too_many_buf_check", "Write Too Many Buffers Checked Out Count"),
+ uint32("write_timeout", "Write Time Out Count"),
+ uint32("write_got_an_ack0", "Write Got An ACK Count 0"),
+ uint32("write_got_an_ack1", "Write Got An ACK Count 1"),
+ uint32("poll_abort_conn", "Poller Aborted The Connection Count"),
+ uint32("may_had_out_of_order", "Maybe Had Out Of Order Writes Count"),
+ uint32("had_an_out_of_order", "Had An Out Of Order Write Count"),
+ uint32("moved_the_ack_bit_dn", "Moved The ACK Bit Down Count"),
+ uint32("bumped_out_of_order", "Bumped Out Of Order Write Count"),
+ uint32("poll_rem_old_out_of_order", "Poller Removed Old Out Of Order Count"),
+ uint32("write_didnt_need_but_req_ack", "Write Didn't Need But Requested ACK Count"),
+ uint32("write_trash_packet", "Write Trashed Packet Count"),
+ uint32("too_many_ack_frag", "Too Many ACK Fragments Count"),
+ uint32("saved_an_out_of_order_packet", "Saved An Out Of Order Packet Count"),
+ uint32("conn_being_aborted", "Connection Being Aborted Count"),
+], "Packet Burst Information")
+
+PadDSSpaceAllocate = struct("pad_ds_space_alloc", [
+ Reserved4,
+])
+PadAttributes = struct("pad_attributes", [
+ Reserved6,
+])
+PadDataStreamSize = struct("pad_data_stream_size", [
+ Reserved4,
+])
+PadTotalStreamSize = struct("pad_total_stream_size", [
+ Reserved6,
+])
+PadCreationInfo = struct("pad_creation_info", [
+ Reserved8,
+])
+PadModifyInfo = struct("pad_modify_info", [
+ Reserved10,
+])
+PadArchiveInfo = struct("pad_archive_info", [
+ Reserved8,
+])
+PadRightsInfo = struct("pad_rights_info", [
+ Reserved2,
+])
+PadDirEntry = struct("pad_dir_entry", [
+ Reserved12,
+])
+PadEAInfo = struct("pad_ea_info", [
+ Reserved12,
+])
+PadNSInfo = struct("pad_ns_info", [
+ Reserved4,
+])
+PhyLockStruct = struct("phy_lock_struct", [
+ LoggedCount,
+ ShareableLockCount,
+ RecordStart,
+ RecordEnd,
+ LogicalConnectionNumber,
+ TaskNumByte,
+ LockType,
+], "Physical Locks")
+printInfo = struct("print_info_struct", [
+ PrintFlags,
+ TabSize,
+ Copies,
+ PrintToFileFlag,
+ BannerName,
+ TargetPrinter,
+ FormType,
+], "Print Information")
+ReplyLevel1Struct = struct("reply_lvl_1_struct", [
+ DirHandle,
+ VolumeNumber,
+ Reserved4,
+], "Reply Level 1")
+ReplyLevel2Struct = struct("reply_lvl_2_struct", [
+ VolumeNumberLong,
+ DirectoryBase,
+ DOSDirectoryBase,
+ NameSpace,
+ DirHandle,
+], "Reply Level 2")
+RightsInfoStruct = struct("rights_info_struct", [
+ InheritedRightsMask,
+])
+RoutersInfo = struct("routers_info", [
+ bytes("node", "Node", 6),
+ ConnectedLAN,
+ uint16("route_hops", "Hop Count"),
+ uint16("route_time", "Route Time"),
+], "Router Information")
+RTagStructure = struct("r_tag_struct", [
+ RTagNumber,
+ ResourceSignature,
+ ResourceCount,
+ ResourceName,
+], "Resource Tag")
+ScanInfoFileName = struct("scan_info_file_name", [
+ SalvageableFileEntryNumber,
+ FileName,
+])
+ScanInfoFileNoName = struct("scan_info_file_no_name", [
+ SalvageableFileEntryNumber,
+])
+SeachSequenceStruct = struct("search_seq", [
+ VolumeNumber,
+ DirectoryEntryNumber,
+ SequenceNumber,
+], "Search Sequence")
+Segments = struct("segments", [
+ uint32("volume_segment_dev_num", "Volume Segment Device Number"),
+ uint32("volume_segment_offset", "Volume Segment Offset"),
+ uint32("volume_segment_size", "Volume Segment Size"),
+], "Volume Segment Information")
+SemaInfoStruct = struct("sema_info_struct", [
+ LogicalConnectionNumber,
+ TaskNumByte,
+])
+SemaStruct = struct("sema_struct", [
+ OpenCount,
+ SemaphoreValue,
+ TaskNumberWord,
+ SemaphoreName,
+], "Semaphore Information")
+ServerInfo = struct("server_info", [
+ uint32("reply_canceled", "Reply Canceled Count"),
+ uint32("write_held_off", "Write Held Off Count"),
+ uint32("write_held_off_with_dup", "Write Held Off With Duplicate Request"),
+ uint32("invalid_req_type", "Invalid Request Type Count"),
+ uint32("being_aborted", "Being Aborted Count"),
+ uint32("already_doing_realloc", "Already Doing Re-Allocate Count"),
+ uint32("dealloc_invalid_slot", "De-Allocate Invalid Slot Count"),
+ uint32("dealloc_being_proc", "De-Allocate Being Processed Count"),
+ uint32("dealloc_forged_packet", "De-Allocate Forged Packet Count"),
+ uint32("dealloc_still_transmit", "De-Allocate Still Transmitting Count"),
+ uint32("start_station_error", "Start Station Error Count"),
+ uint32("invalid_slot", "Invalid Slot Count"),
+ uint32("being_processed", "Being Processed Count"),
+ uint32("forged_packet", "Forged Packet Count"),
+ uint32("still_transmitting", "Still Transmitting Count"),
+ uint32("reexecute_request", "Re-Execute Request Count"),
+ uint32("invalid_sequence_number", "Invalid Sequence Number Count"),
+ uint32("dup_is_being_sent", "Duplicate Is Being Sent Already Count"),
+ uint32("sent_pos_ack", "Sent Positive Acknowledge Count"),
+ uint32("sent_a_dup_reply", "Sent A Duplicate Reply Count"),
+ uint32("no_mem_for_station", "No Memory For Station Control Count"),
+ uint32("no_avail_conns", "No Available Connections Count"),
+ uint32("realloc_slot", "Re-Allocate Slot Count"),
+ uint32("realloc_slot_came_too_soon", "Re-Allocate Slot Came Too Soon Count"),
+], "Server Information")
+ServersSrcInfo = struct("servers_src_info", [
+ ServerNode,
+ ConnectedLAN,
+ HopsToNet,
+], "Source Server Information")
+SpaceStruct = struct("space_struct", [
+ Level,
+ MaxSpace,
+ CurrentSpace,
+], "Space Information")
+SPXInformation = struct("spx_information", [
+ uint16("spx_max_conn", "SPX Max Connections Count"),
+ uint16("spx_max_used_conn", "SPX Max Used Connections"),
+ uint16("spx_est_conn_req", "SPX Establish Connection Requests"),
+ uint16("spx_est_conn_fail", "SPX Establish Connection Fail"),
+ uint16("spx_listen_con_req", "SPX Listen Connect Request"),
+ uint16("spx_listen_con_fail", "SPX Listen Connect Fail"),
+ uint32("spx_send", "SPX Send Count"),
+ uint32("spx_window_choke", "SPX Window Choke Count"),
+ uint16("spx_bad_send", "SPX Bad Send Count"),
+ uint16("spx_send_fail", "SPX Send Fail Count"),
+ uint16("spx_abort_conn", "SPX Aborted Connection"),
+ uint32("spx_listen_pkt", "SPX Listen Packet Count"),
+ uint16("spx_bad_listen", "SPX Bad Listen Count"),
+ uint32("spx_incoming_pkt", "SPX Incoming Packet Count"),
+ uint16("spx_bad_in_pkt", "SPX Bad In Packet Count"),
+ uint16("spx_supp_pkt", "SPX Suppressed Packet Count"),
+ uint16("spx_no_ses_listen", "SPX No Session Listen ECB Count"),
+ uint16("spx_watch_dog", "SPX Watch Dog Destination Session Count"),
+], "SPX Information")
+StackInfo = struct("stack_info", [
+ StackNumber,
+ fw_string("stack_short_name", "Stack Short Name", 16),
+], "Stack Information")
+statsInfo = struct("stats_info_struct", [
+ TotalBytesRead,
+ TotalBytesWritten,
+ TotalRequest,
+], "Statistics")
+TaskStruct = struct("task_struct", [
+ TaskNumberWord,
+ TaskState,
+], "Task Information")
+theTimeStruct = struct("the_time_struct", [
+ UTCTimeInSeconds,
+ FractionalSeconds,
+ TimesyncStatus,
+])
+timeInfo = struct("time_info", [
+ Year,
+ Month,
+ Day,
+ Hour,
+ Minute,
+ Second,
+ DayOfWeek,
+ uint32("login_expiration_time", "Login Expiration Time"),
+])
+TotalStreamSizeStruct = struct("total_stream_size_struct", [
+ TtlDSDskSpaceAlloc,
+ NumberOfDataStreams,
+])
+TrendCounters = struct("trend_counters", [
+ uint32("num_of_cache_checks", "Number Of Cache Checks"),
+ uint32("num_of_cache_hits", "Number Of Cache Hits"),
+ uint32("num_of_dirty_cache_checks", "Number Of Dirty Cache Checks"),
+ uint32("num_of_cache_dirty_checks", "Number Of Cache Dirty Checks"),
+ uint32("cache_used_while_check", "Cache Used While Checking"),
+ uint32("wait_till_dirty_blcks_dec", "Wait Till Dirty Blocks Decrease Count"),
+ uint32("alloc_blck_frm_avail", "Allocate Block From Available Count"),
+ uint32("alloc_blck_frm_lru", "Allocate Block From LRU Count"),
+ uint32("alloc_blck_already_wait", "Allocate Block Already Waiting"),
+ uint32("lru_sit_time", "LRU Sitting Time"),
+ uint32("num_of_cache_check_no_wait", "Number Of Cache Check No Wait"),
+ uint32("num_of_cache_hits_no_wait", "Number Of Cache Hits No Wait"),
+], "Trend Counters")
+TrusteeStruct = struct("trustee_struct", [
+ endian(ObjectID, ENC_LITTLE_ENDIAN),
+ AccessRightsMaskWord,
+])
+UpdateDateStruct = struct("update_date_struct", [
+ UpdateDate,
+])
+UpdateIDStruct = struct("update_id_struct", [
+ UpdateID,
+])
+UpdateTimeStruct = struct("update_time_struct", [
+ UpdateTime,
+])
+UserInformation = struct("user_info", [
+ endian(ConnectionNumber, ENC_LITTLE_ENDIAN),
+ UseCount,
+ Reserved2,
+ ConnectionServiceType,
+ Year,
+ Month,
+ Day,
+ Hour,
+ Minute,
+ Second,
+ DayOfWeek,
+ Status,
+ Reserved2,
+ ExpirationTime,
+ ObjectType,
+ Reserved2,
+ TransactionTrackingFlag,
+ LogicalLockThreshold,
+ FileWriteFlags,
+ FileWriteState,
+ Reserved,
+ FileLockCount,
+ RecordLockCount,
+ TotalBytesRead,
+ TotalBytesWritten,
+ TotalRequest,
+ HeldRequests,
+ HeldBytesRead,
+ HeldBytesWritten,
+], "User Information")
+VolInfoStructure = struct("vol_info_struct", [
+ VolumeType,
+ Reserved2,
+ StatusFlagBits,
+ SectorSize,
+ SectorsPerClusterLong,
+ VolumeSizeInClusters,
+ FreedClusters,
+ SubAllocFreeableClusters,
+ FreeableLimboSectors,
+ NonFreeableLimboSectors,
+ NonFreeableAvailableSubAllocSectors,
+ NotUsableSubAllocSectors,
+ SubAllocClusters,
+ DataStreamsCount,
+ LimboDataStreamsCount,
+ OldestDeletedFileAgeInTicks,
+ CompressedDataStreamsCount,
+ CompressedLimboDataStreamsCount,
+ UnCompressableDataStreamsCount,
+ PreCompressedSectors,
+ CompressedSectors,
+ MigratedFiles,
+ MigratedSectors,
+ ClustersUsedByFAT,
+ ClustersUsedByDirectories,
+ ClustersUsedByExtendedDirectories,
+ TotalDirectoryEntries,
+ UnUsedDirectoryEntries,
+ TotalExtendedDirectoryExtents,
+ UnUsedExtendedDirectoryExtents,
+ ExtendedAttributesDefined,
+ ExtendedAttributeExtentsUsed,
+ DirectoryServicesObjectID,
+ VolumeEpochTime,
+
+], "Volume Information")
+VolInfoStructure64 = struct("vol_info_struct64", [
+ VolumeTypeLong,
+ StatusFlagBits,
+ uint64("sectoresize64", "Sector Size"),
+ uint64("sectorspercluster64", "Sectors Per Cluster"),
+ uint64("volumesizeinclusters64", "Volume Size in Clusters"),
+ uint64("freedclusters64", "Freed Clusters"),
+ uint64("suballocfreeableclusters64", "Sub Alloc Freeable Clusters"),
+ uint64("freeablelimbosectors64", "Freeable Limbo Sectors"),
+ uint64("nonfreeablelimbosectors64", "Non-Freeable Limbo Sectors"),
+ uint64("nonfreeableavailalesuballocsectors64", "Non-Freeable Available Sub Alloc Sectors"),
+ uint64("notusablesuballocsectors64", "Not Usable Sub Alloc Sectors"),
+ uint64("suballocclusters64", "Sub Alloc Clusters"),
+ uint64("datastreamscount64", "Data Streams Count"),
+ uint64("limbodatastreamscount64", "Limbo Data Streams Count"),
+ uint64("oldestdeletedfileageinticks64", "Oldest Deleted File Age in Ticks"),
+ uint64("compressdatastreamscount64", "Compressed Data Streams Count"),
+ uint64("compressedlimbodatastreamscount64", "Compressed Limbo Data Streams Count"),
+ uint64("uncompressabledatastreamscount64", "Uncompressable Data Streams Count"),
+ uint64("precompressedsectors64", "Precompressed Sectors"),
+ uint64("compressedsectors64", "Compressed Sectors"),
+ uint64("migratedfiles64", "Migrated Files"),
+ uint64("migratedsectors64", "Migrated Sectors"),
+ uint64("clustersusedbyfat64", "Clusters Used by FAT"),
+ uint64("clustersusedbydirectories64", "Clusters Used by Directories"),
+ uint64("clustersusedbyextendeddirectories64", "Clusters Used by Extended Directories"),
+ uint64("totaldirectoryentries64", "Total Directory Entries"),
+ uint64("unuseddirectoryentries64", "Unused Directory Entries"),
+ uint64("totalextendeddirectoryextents64", "Total Extended Directory Extents"),
+ uint64("unusedextendeddirectoryextents64", "Unused Total Extended Directory Extents"),
+ uint64("extendedattributesdefined64", "Extended Attributes Defined"),
+ uint64("extendedattributeextentsused64", "Extended Attribute Extents Used"),
+ uint64("directoryservicesobjectid64", "Directory Services Object ID"),
+ VolumeEpochTime,
+
+], "Volume Information")
+VolInfo2Struct = struct("vol_info_struct_2", [
+ uint32("volume_active_count", "Volume Active Count"),
+ uint32("volume_use_count", "Volume Use Count"),
+ uint32("mac_root_ids", "MAC Root IDs"),
+ VolumeEpochTime,
+ uint32("volume_reference_count", "Volume Reference Count"),
+ uint32("compression_lower_limit", "Compression Lower Limit"),
+ uint32("outstanding_ios", "Outstanding IOs"),
+ uint32("outstanding_compression_ios", "Outstanding Compression IOs"),
+ uint32("compression_ios_limit", "Compression IOs Limit"),
+], "Extended Volume Information")
+VolumeWithNameStruct = struct("volume_with_name_struct", [
+ VolumeNumberLong,
+ VolumeNameLen,
+])
+VolumeStruct = struct("volume_struct", [
+ VolumeNumberLong,
+])
+
+zFileMap_Allocation = struct("zfilemap_allocation_struct", [
+ uint64("extent_byte_offset", "Byte Offset"),
+ endian(uint64("extent_length_alloc", "Length"), ENC_LITTLE_ENDIAN),
+ #ExtentLength,
+], "File Map Allocation")
+zFileMap_Logical = struct("zfilemap_logical_struct", [
+ uint64("extent_block_number", "Block Number"),
+ uint64("extent_number_of_blocks", "Number of Blocks"),
+], "File Map Logical")
+zFileMap_Physical = struct("zfilemap_physical_struct", [
+ uint64("extent_length_physical", "Length"),
+ uint64("extent_logical_offset", "Logical Offset"),
+ uint64("extent_pool_offset", "Pool Offset"),
+ uint64("extent_physical_offset", "Physical Offset"),
+ fw_string("extent_device_id", "Device ID", 8),
+], "File Map Physical")
+
+##############################################################################
+# NCP Groups
+##############################################################################
+def define_groups():
+ groups['accounting'] = "Accounting"
+ groups['afp'] = "AFP"
+ groups['auditing'] = "Auditing"
+ groups['bindery'] = "Bindery"
+ groups['connection'] = "Connection"
+ groups['enhanced'] = "Enhanced File System"
+ groups['extended'] = "Extended Attribute"
+ groups['extension'] = "NCP Extension"
+ groups['file'] = "File System"
+ groups['fileserver'] = "File Server Environment"
+ groups['message'] = "Message"
+ groups['migration'] = "Data Migration"
+ groups['nds'] = "Novell Directory Services"
+ groups['pburst'] = "Packet Burst"
+ groups['print'] = "Print"
+ groups['remote'] = "Remote"
+ groups['sync'] = "Synchronization"
+ groups['tsync'] = "Time Synchronization"
+ groups['tts'] = "Transaction Tracking"
+ groups['qms'] = "Queue Management System (QMS)"
+ groups['stats'] = "Server Statistics"
+ groups['nmas'] = "Novell Modular Authentication Service"
+ groups['sss'] = "SecretStore Services"
+
+##############################################################################
+# NCP Errors
+##############################################################################
+def define_errors():
+ errors[0x0000] = "Ok"
+ errors[0x0001] = "Transaction tracking is available"
+ errors[0x0002] = "Ok. The data has been written"
+ errors[0x0003] = "Calling Station is a Manager"
+
+ errors[0x0100] = "One or more of the Connection Numbers in the send list are invalid"
+ errors[0x0101] = "Invalid space limit"
+ errors[0x0102] = "Insufficient disk space"
+ errors[0x0103] = "Queue server cannot add jobs"
+ errors[0x0104] = "Out of disk space"
+ errors[0x0105] = "Semaphore overflow"
+ errors[0x0106] = "Invalid Parameter"
+ errors[0x0107] = "Invalid Number of Minutes to Delay"
+ errors[0x0108] = "Invalid Start or Network Number"
+ errors[0x0109] = "Cannot Obtain License"
+ errors[0x010a] = "No Purgeable Files Available"
+
+ errors[0x0200] = "One or more clients in the send list are not logged in"
+ errors[0x0201] = "Queue server cannot attach"
+
+ errors[0x0300] = "One or more clients in the send list are not accepting messages"
+
+ errors[0x0400] = "Client already has message"
+ errors[0x0401] = "Queue server cannot service job"
+
+ errors[0x7300] = "Revoke Handle Rights Not Found"
+ errors[0x7700] = "Buffer Too Small"
+ errors[0x7900] = "Invalid Parameter in Request Packet"
+ errors[0x7901] = "Nothing being Compressed"
+ errors[0x7902] = "No Items Found"
+ errors[0x7a00] = "Connection Already Temporary"
+ errors[0x7b00] = "Connection Already Logged in"
+ errors[0x7c00] = "Connection Not Authenticated"
+ errors[0x7d00] = "Connection Not Logged In"
+
+ errors[0x7e00] = "NCP failed boundary check"
+ errors[0x7e01] = "Invalid Length"
+
+ errors[0x7f00] = "Lock Waiting"
+ errors[0x8000] = "Lock fail"
+ errors[0x8001] = "File in Use"
+
+ errors[0x8100] = "A file handle could not be allocated by the file server"
+ errors[0x8101] = "Out of File Handles"
+
+ errors[0x8200] = "Unauthorized to open the file"
+ errors[0x8300] = "Unable to read/write the volume. Possible bad sector on the file server"
+ errors[0x8301] = "Hard I/O Error"
+
+ errors[0x8400] = "Unauthorized to create the directory"
+ errors[0x8401] = "Unauthorized to create the file"
+
+ errors[0x8500] = "Unauthorized to delete the specified file"
+ errors[0x8501] = "Unauthorized to overwrite an existing file in this directory"
+
+ errors[0x8700] = "An unexpected character was encountered in the filename"
+ errors[0x8701] = "Create Filename Error"
+
+ errors[0x8800] = "Invalid file handle"
+ errors[0x8900] = "Unauthorized to search this file/directory"
+ errors[0x8a00] = "Unauthorized to delete this file/directory"
+ errors[0x8b00] = "Unauthorized to rename a file in this directory"
+
+ errors[0x8c00] = "No set privileges"
+ errors[0x8c01] = "Unauthorized to modify a file in this directory"
+ errors[0x8c02] = "Unauthorized to change the restriction on this volume"
+
+ errors[0x8d00] = "Some of the affected files are in use by another client"
+ errors[0x8d01] = "The affected file is in use"
+
+ errors[0x8e00] = "All of the affected files are in use by another client"
+ errors[0x8f00] = "Some of the affected files are read-only"
+
+ errors[0x9000] = "An attempt to modify a read-only volume occurred"
+ errors[0x9001] = "All of the affected files are read-only"
+ errors[0x9002] = "Read Only Access to Volume"
+
+ errors[0x9100] = "Some of the affected files already exist"
+ errors[0x9101] = "Some Names Exist"
+
+ errors[0x9200] = "Directory with the new name already exists"
+ errors[0x9201] = "All of the affected files already exist"
+
+ errors[0x9300] = "Unauthorized to read from this file"
+ errors[0x9400] = "Unauthorized to write to this file"
+ errors[0x9500] = "The affected file is detached"
+
+ errors[0x9600] = "The file server has run out of memory to service this request"
+ errors[0x9601] = "No alloc space for message"
+ errors[0x9602] = "Server Out of Space"
+
+ errors[0x9800] = "The affected volume is not mounted"
+ errors[0x9801] = "The volume associated with Volume Number is not mounted"
+ errors[0x9802] = "The resulting volume does not exist"
+ errors[0x9803] = "The destination volume is not mounted"
+ errors[0x9804] = "Disk Map Error"
+
+ errors[0x9900] = "The file server has run out of directory space on the affected volume"
+ errors[0x9a00] = "Invalid request to rename the affected file to another volume"
+
+ errors[0x9b00] = "DirHandle is not associated with a valid directory path"
+ errors[0x9b01] = "A resulting directory handle is not associated with a valid directory path"
+ errors[0x9b02] = "The directory associated with DirHandle does not exist"
+ errors[0x9b03] = "Bad directory handle"
+
+ errors[0x9c00] = "The resulting path is not valid"
+ errors[0x9c01] = "The resulting file path is not valid"
+ errors[0x9c02] = "The resulting directory path is not valid"
+ errors[0x9c03] = "Invalid path"
+ errors[0x9c04] = "No more trustees found, based on requested search sequence number"
+
+ errors[0x9d00] = "A directory handle was not available for allocation"
+
+ errors[0x9e00] = "The name of the directory does not conform to a legal name for this name space"
+ errors[0x9e01] = "The new directory name does not conform to a legal name for this name space"
+ errors[0x9e02] = "Bad File Name"
+
+ errors[0x9f00] = "The request attempted to delete a directory that is in use by another client"
+
+ errors[0xa000] = "The request attempted to delete a directory that is not empty"
+ errors[0xa100] = "An unrecoverable error occurred on the affected directory"
+
+ errors[0xa200] = "The request attempted to read from a file region that is physically locked"
+ errors[0xa201] = "I/O Lock Error"
+
+ errors[0xa400] = "Invalid directory rename attempted"
+ errors[0xa500] = "Invalid open create mode"
+ errors[0xa600] = "Auditor Access has been Removed"
+ errors[0xa700] = "Error Auditing Version"
+
+ errors[0xa800] = "Invalid Support Module ID"
+ errors[0xa801] = "No Auditing Access Rights"
+ errors[0xa802] = "No Access Rights"
+
+ errors[0xa900] = "Error Link in Path"
+ errors[0xa901] = "Invalid Path With Junction Present"
+
+ errors[0xaa00] = "Invalid Data Type Flag"
+
+ errors[0xac00] = "Packet Signature Required"
+
+ errors[0xbe00] = "Invalid Data Stream"
+ errors[0xbf00] = "Requests for this name space are not valid on this volume"
+
+ errors[0xc000] = "Unauthorized to retrieve accounting data"
+
+ errors[0xc100] = "The ACCOUNT_BALANCE property does not exist"
+ errors[0xc101] = "No Account Balance"
+
+ errors[0xc200] = "The object has exceeded its credit limit"
+ errors[0xc300] = "Too many holds have been placed against this account"
+ errors[0xc400] = "The client account has been disabled"
+
+ errors[0xc500] = "Access to the account has been denied because of intruder detection"
+ errors[0xc501] = "Login lockout"
+ errors[0xc502] = "Server Login Locked"
+
+ errors[0xc600] = "The caller does not have operator privileges"
+ errors[0xc601] = "The client does not have operator privileges"
+
+ errors[0xc800] = "Missing EA Key"
+ errors[0xc900] = "EA Not Found"
+ errors[0xca00] = "Invalid EA Handle Type"
+ errors[0xcb00] = "EA No Key No Data"
+ errors[0xcc00] = "EA Number Mismatch"
+ errors[0xcd00] = "Extent Number Out of Range"
+ errors[0xce00] = "EA Bad Directory Number"
+ errors[0xcf00] = "Invalid EA Handle"
+
+ errors[0xd000] = "Queue error"
+ errors[0xd001] = "EA Position Out of Range"
+
+ errors[0xd100] = "The queue does not exist"
+ errors[0xd101] = "EA Access Denied"
+
+ errors[0xd200] = "A queue server is not associated with this queue"
+ errors[0xd201] = "A queue server is not associated with the selected queue"
+ errors[0xd202] = "No queue server"
+ errors[0xd203] = "Data Page Odd Size"
+
+ errors[0xd300] = "No queue rights"
+ errors[0xd301] = "EA Volume Not Mounted"
+
+ errors[0xd400] = "The queue is full and cannot accept another request"
+ errors[0xd401] = "The queue associated with ObjectId is full and cannot accept another request"
+ errors[0xd402] = "Bad Page Boundary"
+
+ errors[0xd500] = "A job does not exist in this queue"
+ errors[0xd501] = "No queue job"
+ errors[0xd502] = "The job associated with JobNumber does not exist in this queue"
+ errors[0xd503] = "Inspect Failure"
+ errors[0xd504] = "Unknown NCP Extension Number"
+
+ errors[0xd600] = "The file server does not allow unencrypted passwords"
+ errors[0xd601] = "No job right"
+ errors[0xd602] = "EA Already Claimed"
+
+ errors[0xd700] = "Bad account"
+ errors[0xd701] = "The old and new password strings are identical"
+ errors[0xd702] = "The job is currently being serviced"
+ errors[0xd703] = "The queue is currently servicing a job"
+ errors[0xd704] = "Queue servicing"
+ errors[0xd705] = "Odd Buffer Size"
+
+ errors[0xd800] = "Queue not active"
+ errors[0xd801] = "No Scorecards"
+
+ errors[0xd900] = "The file server cannot accept another connection as it has reached its limit"
+ errors[0xd901] = "The client is not security equivalent to one of the objects in the Q_SERVERS group property of the target queue"
+ errors[0xd902] = "Queue Station is not a server"
+ errors[0xd903] = "Bad EDS Signature"
+ errors[0xd904] = "Attempt to log in using an account which has limits on the number of concurrent connections and that number has been reached."
+
+ errors[0xda00] = "Attempted to login to the file server during a restricted time period"
+ errors[0xda01] = "Queue halted"
+ errors[0xda02] = "EA Space Limit"
+
+ errors[0xdb00] = "Attempted to login to the file server from an unauthorized workstation or network"
+ errors[0xdb01] = "The queue cannot attach another queue server"
+ errors[0xdb02] = "Maximum queue servers"
+ errors[0xdb03] = "EA Key Corrupt"
+
+ errors[0xdc00] = "Account Expired"
+ errors[0xdc01] = "EA Key Limit"
+
+ errors[0xdd00] = "Tally Corrupt"
+ errors[0xde00] = "Attempted to login to the file server with an incorrect password"
+ errors[0xdf00] = "Attempted to login to the file server with a password that has expired"
+
+ errors[0xe000] = "No Login Connections Available"
+ errors[0xe700] = "No disk track"
+ errors[0xe800] = "Write to group"
+ errors[0xe900] = "The object is already a member of the group property"
+
+ errors[0xea00] = "No such member"
+ errors[0xea01] = "The bindery object is not a member of the set"
+ errors[0xea02] = "Non-existent member"
+
+ errors[0xeb00] = "The property is not a set property"
+
+ errors[0xec00] = "No such set"
+ errors[0xec01] = "The set property does not exist"
+
+ errors[0xed00] = "Property exists"
+ errors[0xed01] = "The property already exists"
+ errors[0xed02] = "An attempt was made to create a bindery object property that already exists"
+
+ errors[0xee00] = "The object already exists"
+ errors[0xee01] = "The bindery object already exists"
+
+ errors[0xef00] = "Illegal name"
+ errors[0xef01] = "Illegal characters in ObjectName field"
+ errors[0xef02] = "Invalid name"
+
+ errors[0xf000] = "A wildcard was detected in a field that does not support wildcards"
+ errors[0xf001] = "An illegal wildcard was detected in ObjectName"
+
+ errors[0xf100] = "The client does not have the rights to access this bindery object"
+ errors[0xf101] = "Bindery security"
+ errors[0xf102] = "Invalid bindery security"
+
+ errors[0xf200] = "Unauthorized to read from this object"
+ errors[0xf300] = "Unauthorized to rename this object"
+
+ errors[0xf400] = "Unauthorized to delete this object"
+ errors[0xf401] = "No object delete privileges"
+ errors[0xf402] = "Unauthorized to delete this queue"
+
+ errors[0xf500] = "Unauthorized to create this object"
+ errors[0xf501] = "No object create"
+
+ errors[0xf600] = "No property delete"
+ errors[0xf601] = "Unauthorized to delete the property of this object"
+ errors[0xf602] = "Unauthorized to delete this property"
+
+ errors[0xf700] = "Unauthorized to create this property"
+ errors[0xf701] = "No property create privilege"
+
+ errors[0xf800] = "Unauthorized to write to this property"
+ errors[0xf900] = "Unauthorized to read this property"
+ errors[0xfa00] = "Temporary remap error"
+
+ errors[0xfb00] = "No such property"
+ errors[0xfb01] = "The file server does not support this request"
+ errors[0xfb02] = "The specified property does not exist"
+ errors[0xfb03] = "The PASSWORD property does not exist for this bindery object"
+ errors[0xfb04] = "NDS NCP not available"
+ errors[0xfb05] = "Bad Directory Handle"
+ errors[0xfb06] = "Unknown Request"
+ errors[0xfb07] = "Invalid Subfunction Request"
+ errors[0xfb08] = "Attempt to use an invalid parameter (drive number, path, or flag value) during a set drive path call"
+ errors[0xfb09] = "NMAS not running on this server, NCP NOT Supported"
+ errors[0xfb0a] = "Station Not Logged In"
+ errors[0xfb0b] = "Secret Store not running on this server, NCP Not supported"
+
+ errors[0xfc00] = "The message queue cannot accept another message"
+ errors[0xfc01] = "The trustee associated with ObjectId does not exist"
+ errors[0xfc02] = "The specified bindery object does not exist"
+ errors[0xfc03] = "The bindery object associated with ObjectID does not exist"
+ errors[0xfc04] = "A bindery object does not exist that matches"
+ errors[0xfc05] = "The specified queue does not exist"
+ errors[0xfc06] = "No such object"
+ errors[0xfc07] = "The queue associated with ObjectID does not exist"
+
+ errors[0xfd00] = "Bad station number"
+ errors[0xfd01] = "The connection associated with ConnectionNumber is not active"
+ errors[0xfd02] = "Lock collision"
+ errors[0xfd03] = "Transaction tracking is disabled"
+
+ errors[0xfe00] = "I/O failure"
+ errors[0xfe01] = "The files containing the bindery on the file server are locked"
+ errors[0xfe02] = "A file with the specified name already exists in this directory"
+ errors[0xfe03] = "No more restrictions were found"
+ errors[0xfe04] = "The file server was unable to lock the file within the specified time limit"
+ errors[0xfe05] = "The file server was unable to lock all files within the specified time limit"
+ errors[0xfe06] = "The bindery object associated with ObjectID is not a valid trustee"
+ errors[0xfe07] = "Directory locked"
+ errors[0xfe08] = "Bindery locked"
+ errors[0xfe09] = "Invalid semaphore name length"
+ errors[0xfe0a] = "The file server was unable to complete the operation within the specified time limit"
+ errors[0xfe0b] = "Transaction restart"
+ errors[0xfe0c] = "Bad packet"
+ errors[0xfe0d] = "Timeout"
+ errors[0xfe0e] = "User Not Found"
+ errors[0xfe0f] = "Trustee Not Found"
+
+ errors[0xff00] = "Failure"
+ errors[0xff01] = "Lock error"
+ errors[0xff02] = "File not found"
+ errors[0xff03] = "The file not found or cannot be unlocked"
+ errors[0xff04] = "Record not found"
+ errors[0xff05] = "The logical record was not found"
+ errors[0xff06] = "The printer associated with Printer Number does not exist"
+ errors[0xff07] = "No such printer"
+ errors[0xff08] = "Unable to complete the request"
+ errors[0xff09] = "Unauthorized to change privileges of this trustee"
+ errors[0xff0a] = "No files matching the search criteria were found"
+ errors[0xff0b] = "A file matching the search criteria was not found"
+ errors[0xff0c] = "Verification failed"
+ errors[0xff0d] = "Object associated with ObjectID is not a manager"
+ errors[0xff0e] = "Invalid initial semaphore value"
+ errors[0xff0f] = "The semaphore handle is not valid"
+ errors[0xff10] = "SemaphoreHandle is not associated with a valid sempahore"
+ errors[0xff11] = "Invalid semaphore handle"
+ errors[0xff12] = "Transaction tracking is not available"
+ errors[0xff13] = "The transaction has not yet been written to disk"
+ errors[0xff14] = "Directory already exists"
+ errors[0xff15] = "The file already exists and the deletion flag was not set"
+ errors[0xff16] = "No matching files or directories were found"
+ errors[0xff17] = "A file or directory matching the search criteria was not found"
+ errors[0xff18] = "The file already exists"
+ errors[0xff19] = "Failure, No files found"
+ errors[0xff1a] = "Unlock Error"
+ errors[0xff1b] = "I/O Bound Error"
+ errors[0xff1c] = "Not Accepting Messages"
+ errors[0xff1d] = "No More Salvageable Files in Directory"
+ errors[0xff1e] = "Calling Station is Not a Manager"
+ errors[0xff1f] = "Bindery Failure"
+ errors[0xff20] = "NCP Extension Not Found"
+ errors[0xff21] = "Audit Property Not Found"
+ errors[0xff22] = "Server Set Parameter Not Found"
+
+##############################################################################
+# Produce C code
+##############################################################################
+def ExamineVars(vars, structs_hash, vars_hash):
+ for var in vars:
+ if isinstance(var, struct):
+ structs_hash[var.HFName()] = var
+ struct_vars = var.Variables()
+ ExamineVars(struct_vars, structs_hash, vars_hash)
+ else:
+ vars_hash[repr(var)] = var
+ if isinstance(var, bitfield):
+ sub_vars = var.SubVariables()
+ ExamineVars(sub_vars, structs_hash, vars_hash)
+
+def produce_code():
+
+ global errors
+
+ print("/*")
+ print(" * Do not modify this file. Changes will be overwritten.")
+ print(" * Generated automatically from %s" % (sys.argv[0]))
+ print(" */\n")
+
+ print("""
+/*
+ * Portions Copyright (c) Gilbert Ramirez 2000-2002
+ * Portions Copyright (c) Novell, Inc. 2000-2005
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "config.h"
+
+#include <string.h>
+#include <glib.h>
+#include <epan/packet.h>
+#include <epan/dfilter/dfilter.h>
+#include <epan/exceptions.h>
+#include <ftypes/ftypes.h>
+#include <epan/to_str.h>
+#include <epan/conversation.h>
+#include <epan/ptvcursor.h>
+#include <epan/strutil.h>
+#include <epan/reassemble.h>
+#include <epan/tap.h>
+#include <epan/proto_data.h>
+#include "packet-ncp-int.h"
+#include "packet-ncp-nmas.h"
+#include "packet-ncp-sss.h"
+
+/* Function declarations for functions used in proto_register_ncp2222() */
+void proto_register_ncp2222(void);
+
+/* Endianness macros */
+#define NO_ENDIANNESS 0
+
+#define NO_LENGTH -1
+
+/* We use this int-pointer as a special flag in ptvc_record's */
+static int ptvc_struct_int_storage;
+#define PTVC_STRUCT (&ptvc_struct_int_storage)
+
+/* Values used in the count-variable ("var"/"repeat") logic. */""")
+
+
+ if global_highest_var > -1:
+ print("#define NUM_REPEAT_VARS %d" % (global_highest_var + 1))
+ print("static unsigned repeat_vars[NUM_REPEAT_VARS];")
+ else:
+ print("#define NUM_REPEAT_VARS 0")
+ print("static unsigned *repeat_vars = NULL;")
+
+ print("""
+#define NO_VAR NUM_REPEAT_VARS
+#define NO_REPEAT NUM_REPEAT_VARS
+
+#define REQ_COND_SIZE_CONSTANT 0
+#define REQ_COND_SIZE_VARIABLE 1
+#define NO_REQ_COND_SIZE 0
+
+
+#define NTREE 0x00020000
+#define NDEPTH 0x00000002
+#define NREV 0x00000004
+#define NFLAGS 0x00000008
+
+static int hf_ncp_number_of_data_streams_long = -1;
+static int hf_ncp_func = -1;
+static int hf_ncp_length = -1;
+static int hf_ncp_subfunc = -1;
+static int hf_ncp_group = -1;
+static int hf_ncp_fragment_handle = -1;
+static int hf_ncp_completion_code = -1;
+static int hf_ncp_connection_status = -1;
+static int hf_ncp_req_frame_num = -1;
+static int hf_ncp_req_frame_time = -1;
+static int hf_ncp_fragment_size = -1;
+static int hf_ncp_message_size = -1;
+static int hf_ncp_nds_flag = -1;
+static int hf_ncp_nds_verb = -1;
+static int hf_ping_version = -1;
+/* static int hf_nds_version = -1; */
+/* static int hf_nds_flags = -1; */
+static int hf_nds_reply_depth = -1;
+static int hf_nds_reply_rev = -1;
+static int hf_nds_reply_flags = -1;
+static int hf_nds_p1type = -1;
+static int hf_nds_uint32value = -1;
+static int hf_nds_bit1 = -1;
+static int hf_nds_bit2 = -1;
+static int hf_nds_bit3 = -1;
+static int hf_nds_bit4 = -1;
+static int hf_nds_bit5 = -1;
+static int hf_nds_bit6 = -1;
+static int hf_nds_bit7 = -1;
+static int hf_nds_bit8 = -1;
+static int hf_nds_bit9 = -1;
+static int hf_nds_bit10 = -1;
+static int hf_nds_bit11 = -1;
+static int hf_nds_bit12 = -1;
+static int hf_nds_bit13 = -1;
+static int hf_nds_bit14 = -1;
+static int hf_nds_bit15 = -1;
+static int hf_nds_bit16 = -1;
+static int hf_outflags = -1;
+static int hf_bit1outflags = -1;
+static int hf_bit2outflags = -1;
+static int hf_bit3outflags = -1;
+static int hf_bit4outflags = -1;
+static int hf_bit5outflags = -1;
+static int hf_bit6outflags = -1;
+static int hf_bit7outflags = -1;
+static int hf_bit8outflags = -1;
+static int hf_bit9outflags = -1;
+static int hf_bit10outflags = -1;
+static int hf_bit11outflags = -1;
+static int hf_bit12outflags = -1;
+static int hf_bit13outflags = -1;
+static int hf_bit14outflags = -1;
+static int hf_bit15outflags = -1;
+static int hf_bit16outflags = -1;
+static int hf_bit1nflags = -1;
+static int hf_bit2nflags = -1;
+static int hf_bit3nflags = -1;
+static int hf_bit4nflags = -1;
+static int hf_bit5nflags = -1;
+static int hf_bit6nflags = -1;
+static int hf_bit7nflags = -1;
+static int hf_bit8nflags = -1;
+static int hf_bit9nflags = -1;
+static int hf_bit10nflags = -1;
+static int hf_bit11nflags = -1;
+static int hf_bit12nflags = -1;
+static int hf_bit13nflags = -1;
+static int hf_bit14nflags = -1;
+static int hf_bit15nflags = -1;
+static int hf_bit16nflags = -1;
+static int hf_bit1rflags = -1;
+static int hf_bit2rflags = -1;
+static int hf_bit3rflags = -1;
+static int hf_bit4rflags = -1;
+static int hf_bit5rflags = -1;
+static int hf_bit6rflags = -1;
+static int hf_bit7rflags = -1;
+static int hf_bit8rflags = -1;
+static int hf_bit9rflags = -1;
+static int hf_bit10rflags = -1;
+static int hf_bit11rflags = -1;
+static int hf_bit12rflags = -1;
+static int hf_bit13rflags = -1;
+static int hf_bit14rflags = -1;
+static int hf_bit15rflags = -1;
+static int hf_bit16rflags = -1;
+static int hf_cflags = -1;
+static int hf_bit1cflags = -1;
+static int hf_bit2cflags = -1;
+static int hf_bit3cflags = -1;
+static int hf_bit4cflags = -1;
+static int hf_bit5cflags = -1;
+static int hf_bit6cflags = -1;
+static int hf_bit7cflags = -1;
+static int hf_bit8cflags = -1;
+static int hf_bit9cflags = -1;
+static int hf_bit10cflags = -1;
+static int hf_bit11cflags = -1;
+static int hf_bit12cflags = -1;
+static int hf_bit13cflags = -1;
+static int hf_bit14cflags = -1;
+static int hf_bit15cflags = -1;
+static int hf_bit16cflags = -1;
+static int hf_bit1acflags = -1;
+static int hf_bit2acflags = -1;
+static int hf_bit3acflags = -1;
+static int hf_bit4acflags = -1;
+static int hf_bit5acflags = -1;
+static int hf_bit6acflags = -1;
+static int hf_bit7acflags = -1;
+static int hf_bit8acflags = -1;
+static int hf_bit9acflags = -1;
+static int hf_bit10acflags = -1;
+static int hf_bit11acflags = -1;
+static int hf_bit12acflags = -1;
+static int hf_bit13acflags = -1;
+static int hf_bit14acflags = -1;
+static int hf_bit15acflags = -1;
+static int hf_bit16acflags = -1;
+static int hf_vflags = -1;
+static int hf_bit1vflags = -1;
+static int hf_bit2vflags = -1;
+static int hf_bit3vflags = -1;
+static int hf_bit4vflags = -1;
+static int hf_bit5vflags = -1;
+static int hf_bit6vflags = -1;
+static int hf_bit7vflags = -1;
+static int hf_bit8vflags = -1;
+static int hf_bit9vflags = -1;
+static int hf_bit10vflags = -1;
+static int hf_bit11vflags = -1;
+static int hf_bit12vflags = -1;
+static int hf_bit13vflags = -1;
+static int hf_bit14vflags = -1;
+static int hf_bit15vflags = -1;
+static int hf_bit16vflags = -1;
+static int hf_eflags = -1;
+static int hf_bit1eflags = -1;
+static int hf_bit2eflags = -1;
+static int hf_bit3eflags = -1;
+static int hf_bit4eflags = -1;
+static int hf_bit5eflags = -1;
+static int hf_bit6eflags = -1;
+static int hf_bit7eflags = -1;
+static int hf_bit8eflags = -1;
+static int hf_bit9eflags = -1;
+static int hf_bit10eflags = -1;
+static int hf_bit11eflags = -1;
+static int hf_bit12eflags = -1;
+static int hf_bit13eflags = -1;
+static int hf_bit14eflags = -1;
+static int hf_bit15eflags = -1;
+static int hf_bit16eflags = -1;
+static int hf_infoflagsl = -1;
+static int hf_retinfoflagsl = -1;
+static int hf_bit1infoflagsl = -1;
+static int hf_bit2infoflagsl = -1;
+static int hf_bit3infoflagsl = -1;
+static int hf_bit4infoflagsl = -1;
+static int hf_bit5infoflagsl = -1;
+static int hf_bit6infoflagsl = -1;
+static int hf_bit7infoflagsl = -1;
+static int hf_bit8infoflagsl = -1;
+static int hf_bit9infoflagsl = -1;
+static int hf_bit10infoflagsl = -1;
+static int hf_bit11infoflagsl = -1;
+static int hf_bit12infoflagsl = -1;
+static int hf_bit13infoflagsl = -1;
+static int hf_bit14infoflagsl = -1;
+static int hf_bit15infoflagsl = -1;
+static int hf_bit16infoflagsl = -1;
+static int hf_infoflagsh = -1;
+static int hf_bit1infoflagsh = -1;
+static int hf_bit2infoflagsh = -1;
+static int hf_bit3infoflagsh = -1;
+static int hf_bit4infoflagsh = -1;
+static int hf_bit5infoflagsh = -1;
+static int hf_bit6infoflagsh = -1;
+static int hf_bit7infoflagsh = -1;
+static int hf_bit8infoflagsh = -1;
+static int hf_bit9infoflagsh = -1;
+static int hf_bit10infoflagsh = -1;
+static int hf_bit11infoflagsh = -1;
+static int hf_bit12infoflagsh = -1;
+static int hf_bit13infoflagsh = -1;
+static int hf_bit14infoflagsh = -1;
+static int hf_bit15infoflagsh = -1;
+static int hf_bit16infoflagsh = -1;
+static int hf_retinfoflagsh = -1;
+static int hf_bit1retinfoflagsh = -1;
+static int hf_bit2retinfoflagsh = -1;
+static int hf_bit3retinfoflagsh = -1;
+static int hf_bit4retinfoflagsh = -1;
+static int hf_bit5retinfoflagsh = -1;
+static int hf_bit6retinfoflagsh = -1;
+static int hf_bit7retinfoflagsh = -1;
+static int hf_bit8retinfoflagsh = -1;
+static int hf_bit9retinfoflagsh = -1;
+static int hf_bit10retinfoflagsh = -1;
+static int hf_bit11retinfoflagsh = -1;
+static int hf_bit12retinfoflagsh = -1;
+static int hf_bit13retinfoflagsh = -1;
+static int hf_bit14retinfoflagsh = -1;
+static int hf_bit15retinfoflagsh = -1;
+static int hf_bit16retinfoflagsh = -1;
+static int hf_bit1lflags = -1;
+static int hf_bit2lflags = -1;
+static int hf_bit3lflags = -1;
+static int hf_bit4lflags = -1;
+static int hf_bit5lflags = -1;
+static int hf_bit6lflags = -1;
+static int hf_bit7lflags = -1;
+static int hf_bit8lflags = -1;
+static int hf_bit9lflags = -1;
+static int hf_bit10lflags = -1;
+static int hf_bit11lflags = -1;
+static int hf_bit12lflags = -1;
+static int hf_bit13lflags = -1;
+static int hf_bit14lflags = -1;
+static int hf_bit15lflags = -1;
+static int hf_bit16lflags = -1;
+static int hf_l1flagsl = -1;
+static int hf_l1flagsh = -1;
+static int hf_bit1l1flagsl = -1;
+static int hf_bit2l1flagsl = -1;
+static int hf_bit3l1flagsl = -1;
+static int hf_bit4l1flagsl = -1;
+static int hf_bit5l1flagsl = -1;
+static int hf_bit6l1flagsl = -1;
+static int hf_bit7l1flagsl = -1;
+static int hf_bit8l1flagsl = -1;
+static int hf_bit9l1flagsl = -1;
+static int hf_bit10l1flagsl = -1;
+static int hf_bit11l1flagsl = -1;
+static int hf_bit12l1flagsl = -1;
+static int hf_bit13l1flagsl = -1;
+static int hf_bit14l1flagsl = -1;
+static int hf_bit15l1flagsl = -1;
+static int hf_bit16l1flagsl = -1;
+static int hf_bit1l1flagsh = -1;
+static int hf_bit2l1flagsh = -1;
+static int hf_bit3l1flagsh = -1;
+static int hf_bit4l1flagsh = -1;
+static int hf_bit5l1flagsh = -1;
+static int hf_bit6l1flagsh = -1;
+static int hf_bit7l1flagsh = -1;
+static int hf_bit8l1flagsh = -1;
+static int hf_bit9l1flagsh = -1;
+static int hf_bit10l1flagsh = -1;
+static int hf_bit11l1flagsh = -1;
+static int hf_bit12l1flagsh = -1;
+static int hf_bit13l1flagsh = -1;
+static int hf_bit14l1flagsh = -1;
+static int hf_bit15l1flagsh = -1;
+static int hf_bit16l1flagsh = -1;
+static int hf_nds_tree_name = -1;
+static int hf_nds_reply_error = -1;
+static int hf_nds_net = -1;
+static int hf_nds_node = -1;
+static int hf_nds_socket = -1;
+static int hf_add_ref_ip = -1;
+static int hf_add_ref_udp = -1;
+static int hf_add_ref_tcp = -1;
+static int hf_referral_record = -1;
+static int hf_referral_addcount = -1;
+static int hf_nds_port = -1;
+static int hf_mv_string = -1;
+static int hf_nds_syntax = -1;
+static int hf_value_string = -1;
+static int hf_nds_buffer_size = -1;
+static int hf_nds_ver = -1;
+static int hf_nds_nflags = -1;
+static int hf_nds_scope = -1;
+static int hf_nds_name = -1;
+static int hf_nds_comm_trans = -1;
+static int hf_nds_tree_trans = -1;
+static int hf_nds_iteration = -1;
+static int hf_nds_eid = -1;
+static int hf_nds_info_type = -1;
+static int hf_nds_all_attr = -1;
+static int hf_nds_req_flags = -1;
+static int hf_nds_attr = -1;
+static int hf_nds_crc = -1;
+static int hf_nds_referrals = -1;
+static int hf_nds_result_flags = -1;
+static int hf_nds_tag_string = -1;
+static int hf_value_bytes = -1;
+static int hf_replica_type = -1;
+static int hf_replica_state = -1;
+static int hf_replica_number = -1;
+static int hf_min_nds_ver = -1;
+static int hf_nds_ver_include = -1;
+static int hf_nds_ver_exclude = -1;
+/* static int hf_nds_es = -1; */
+static int hf_es_type = -1;
+/* static int hf_delim_string = -1; */
+static int hf_rdn_string = -1;
+static int hf_nds_revent = -1;
+static int hf_nds_rnum = -1;
+static int hf_nds_name_type = -1;
+static int hf_nds_rflags = -1;
+static int hf_nds_eflags = -1;
+static int hf_nds_depth = -1;
+static int hf_nds_class_def_type = -1;
+static int hf_nds_classes = -1;
+static int hf_nds_return_all_classes = -1;
+static int hf_nds_stream_flags = -1;
+static int hf_nds_stream_name = -1;
+static int hf_nds_file_handle = -1;
+static int hf_nds_file_size = -1;
+static int hf_nds_dn_output_type = -1;
+static int hf_nds_nested_output_type = -1;
+static int hf_nds_output_delimiter = -1;
+static int hf_nds_output_entry_specifier = -1;
+static int hf_es_value = -1;
+static int hf_es_rdn_count = -1;
+static int hf_nds_replica_num = -1;
+static int hf_nds_event_num = -1;
+static int hf_es_seconds = -1;
+static int hf_nds_compare_results = -1;
+static int hf_nds_parent = -1;
+static int hf_nds_name_filter = -1;
+static int hf_nds_class_filter = -1;
+static int hf_nds_time_filter = -1;
+static int hf_nds_partition_root_id = -1;
+static int hf_nds_replicas = -1;
+static int hf_nds_purge = -1;
+static int hf_nds_local_partition = -1;
+static int hf_partition_busy = -1;
+static int hf_nds_number_of_changes = -1;
+static int hf_sub_count = -1;
+static int hf_nds_revision = -1;
+static int hf_nds_base_class = -1;
+static int hf_nds_relative_dn = -1;
+/* static int hf_nds_root_dn = -1; */
+/* static int hf_nds_parent_dn = -1; */
+static int hf_deref_base = -1;
+/* static int hf_nds_entry_info = -1; */
+static int hf_nds_base = -1;
+static int hf_nds_privileges = -1;
+static int hf_nds_vflags = -1;
+static int hf_nds_value_len = -1;
+static int hf_nds_cflags = -1;
+static int hf_nds_acflags = -1;
+static int hf_nds_asn1 = -1;
+static int hf_nds_upper = -1;
+static int hf_nds_lower = -1;
+static int hf_nds_trustee_dn = -1;
+static int hf_nds_attribute_dn = -1;
+static int hf_nds_acl_add = -1;
+static int hf_nds_acl_del = -1;
+static int hf_nds_att_add = -1;
+static int hf_nds_att_del = -1;
+static int hf_nds_keep = -1;
+static int hf_nds_new_rdn = -1;
+static int hf_nds_time_delay = -1;
+static int hf_nds_root_name = -1;
+static int hf_nds_new_part_id = -1;
+static int hf_nds_child_part_id = -1;
+static int hf_nds_master_part_id = -1;
+static int hf_nds_target_name = -1;
+static int hf_nds_super = -1;
+static int hf_pingflags2 = -1;
+static int hf_bit1pingflags2 = -1;
+static int hf_bit2pingflags2 = -1;
+static int hf_bit3pingflags2 = -1;
+static int hf_bit4pingflags2 = -1;
+static int hf_bit5pingflags2 = -1;
+static int hf_bit6pingflags2 = -1;
+static int hf_bit7pingflags2 = -1;
+static int hf_bit8pingflags2 = -1;
+static int hf_bit9pingflags2 = -1;
+static int hf_bit10pingflags2 = -1;
+static int hf_bit11pingflags2 = -1;
+static int hf_bit12pingflags2 = -1;
+static int hf_bit13pingflags2 = -1;
+static int hf_bit14pingflags2 = -1;
+static int hf_bit15pingflags2 = -1;
+static int hf_bit16pingflags2 = -1;
+static int hf_pingflags1 = -1;
+static int hf_bit1pingflags1 = -1;
+static int hf_bit2pingflags1 = -1;
+static int hf_bit3pingflags1 = -1;
+static int hf_bit4pingflags1 = -1;
+static int hf_bit5pingflags1 = -1;
+static int hf_bit6pingflags1 = -1;
+static int hf_bit7pingflags1 = -1;
+static int hf_bit8pingflags1 = -1;
+static int hf_bit9pingflags1 = -1;
+static int hf_bit10pingflags1 = -1;
+static int hf_bit11pingflags1 = -1;
+static int hf_bit12pingflags1 = -1;
+static int hf_bit13pingflags1 = -1;
+static int hf_bit14pingflags1 = -1;
+static int hf_bit15pingflags1 = -1;
+static int hf_bit16pingflags1 = -1;
+static int hf_pingpflags1 = -1;
+static int hf_bit1pingpflags1 = -1;
+static int hf_bit2pingpflags1 = -1;
+static int hf_bit3pingpflags1 = -1;
+static int hf_bit4pingpflags1 = -1;
+static int hf_bit5pingpflags1 = -1;
+static int hf_bit6pingpflags1 = -1;
+static int hf_bit7pingpflags1 = -1;
+static int hf_bit8pingpflags1 = -1;
+static int hf_bit9pingpflags1 = -1;
+static int hf_bit10pingpflags1 = -1;
+static int hf_bit11pingpflags1 = -1;
+static int hf_bit12pingpflags1 = -1;
+static int hf_bit13pingpflags1 = -1;
+static int hf_bit14pingpflags1 = -1;
+static int hf_bit15pingpflags1 = -1;
+static int hf_bit16pingpflags1 = -1;
+static int hf_pingvflags1 = -1;
+static int hf_bit1pingvflags1 = -1;
+static int hf_bit2pingvflags1 = -1;
+static int hf_bit3pingvflags1 = -1;
+static int hf_bit4pingvflags1 = -1;
+static int hf_bit5pingvflags1 = -1;
+static int hf_bit6pingvflags1 = -1;
+static int hf_bit7pingvflags1 = -1;
+static int hf_bit8pingvflags1 = -1;
+static int hf_bit9pingvflags1 = -1;
+static int hf_bit10pingvflags1 = -1;
+static int hf_bit11pingvflags1 = -1;
+static int hf_bit12pingvflags1 = -1;
+static int hf_bit13pingvflags1 = -1;
+static int hf_bit14pingvflags1 = -1;
+static int hf_bit15pingvflags1 = -1;
+static int hf_bit16pingvflags1 = -1;
+static int hf_nds_letter_ver = -1;
+static int hf_nds_os_majver = -1;
+static int hf_nds_os_minver = -1;
+static int hf_nds_lic_flags = -1;
+static int hf_nds_ds_time = -1;
+static int hf_nds_ping_version = -1;
+static int hf_nds_search_scope = -1;
+static int hf_nds_num_objects = -1;
+static int hf_siflags = -1;
+static int hf_bit1siflags = -1;
+static int hf_bit2siflags = -1;
+static int hf_bit3siflags = -1;
+static int hf_bit4siflags = -1;
+static int hf_bit5siflags = -1;
+static int hf_bit6siflags = -1;
+static int hf_bit7siflags = -1;
+static int hf_bit8siflags = -1;
+static int hf_bit9siflags = -1;
+static int hf_bit10siflags = -1;
+static int hf_bit11siflags = -1;
+static int hf_bit12siflags = -1;
+static int hf_bit13siflags = -1;
+static int hf_bit14siflags = -1;
+static int hf_bit15siflags = -1;
+static int hf_bit16siflags = -1;
+static int hf_nds_segments = -1;
+static int hf_nds_segment = -1;
+static int hf_nds_segment_overlap = -1;
+static int hf_nds_segment_overlap_conflict = -1;
+static int hf_nds_segment_multiple_tails = -1;
+static int hf_nds_segment_too_long_segment = -1;
+static int hf_nds_segment_error = -1;
+static int hf_nds_segment_count = -1;
+static int hf_nds_reassembled_length = -1;
+static int hf_nds_verb2b_req_flags = -1;
+static int hf_ncp_ip_address = -1;
+static int hf_ncp_copyright = -1;
+static int hf_ndsprot1flag = -1;
+static int hf_ndsprot2flag = -1;
+static int hf_ndsprot3flag = -1;
+static int hf_ndsprot4flag = -1;
+static int hf_ndsprot5flag = -1;
+static int hf_ndsprot6flag = -1;
+static int hf_ndsprot7flag = -1;
+static int hf_ndsprot8flag = -1;
+static int hf_ndsprot9flag = -1;
+static int hf_ndsprot10flag = -1;
+static int hf_ndsprot11flag = -1;
+static int hf_ndsprot12flag = -1;
+static int hf_ndsprot13flag = -1;
+static int hf_ndsprot14flag = -1;
+static int hf_ndsprot15flag = -1;
+static int hf_ndsprot16flag = -1;
+static int hf_nds_svr_dst_name = -1;
+static int hf_nds_tune_mark = -1;
+/* static int hf_nds_create_time = -1; */
+static int hf_srvr_param_number = -1;
+static int hf_srvr_param_boolean = -1;
+static int hf_srvr_param_string = -1;
+static int hf_nds_svr_time = -1;
+static int hf_nds_crt_time = -1;
+static int hf_nds_number_of_items = -1;
+static int hf_nds_compare_attributes = -1;
+static int hf_nds_read_attribute = -1;
+static int hf_nds_write_add_delete_attribute = -1;
+static int hf_nds_add_delete_self = -1;
+static int hf_nds_privilege_not_defined = -1;
+static int hf_nds_supervisor = -1;
+static int hf_nds_inheritance_control = -1;
+static int hf_nds_browse_entry = -1;
+static int hf_nds_add_entry = -1;
+static int hf_nds_delete_entry = -1;
+static int hf_nds_rename_entry = -1;
+static int hf_nds_supervisor_entry = -1;
+static int hf_nds_entry_privilege_not_defined = -1;
+static int hf_nds_iterator = -1;
+static int hf_ncp_nds_iterverb = -1;
+static int hf_iter_completion_code = -1;
+/* static int hf_nds_iterobj = -1; */
+static int hf_iter_verb_completion_code = -1;
+static int hf_iter_ans = -1;
+static int hf_positionable = -1;
+static int hf_num_skipped = -1;
+static int hf_num_to_skip = -1;
+static int hf_timelimit = -1;
+static int hf_iter_index = -1;
+static int hf_num_to_get = -1;
+/* static int hf_ret_info_type = -1; */
+static int hf_data_size = -1;
+static int hf_this_count = -1;
+static int hf_max_entries = -1;
+static int hf_move_position = -1;
+static int hf_iter_copy = -1;
+static int hf_iter_position = -1;
+static int hf_iter_search = -1;
+static int hf_iter_other = -1;
+static int hf_nds_oid = -1;
+static int hf_ncp_bytes_actually_trans_64 = -1;
+static int hf_sap_name = -1;
+static int hf_os_name = -1;
+static int hf_vendor_name = -1;
+static int hf_hardware_name = -1;
+static int hf_no_request_record_found = -1;
+static int hf_search_modifier = -1;
+static int hf_search_pattern = -1;
+static int hf_nds_acl_protected_attribute = -1;
+static int hf_nds_acl_subject = -1;
+static int hf_nds_acl_privileges = -1;
+
+static expert_field ei_ncp_file_rights_change = EI_INIT;
+static expert_field ei_ncp_completion_code = EI_INIT;
+static expert_field ei_nds_reply_error = EI_INIT;
+static expert_field ei_ncp_destroy_connection = EI_INIT;
+static expert_field ei_nds_iteration = EI_INIT;
+static expert_field ei_ncp_eid = EI_INIT;
+static expert_field ei_ncp_file_handle = EI_INIT;
+static expert_field ei_ncp_connection_destroyed = EI_INIT;
+static expert_field ei_ncp_no_request_record_found = EI_INIT;
+static expert_field ei_ncp_file_rights = EI_INIT;
+static expert_field ei_iter_verb_completion_code = EI_INIT;
+static expert_field ei_ncp_connection_request = EI_INIT;
+static expert_field ei_ncp_connection_status = EI_INIT;
+static expert_field ei_ncp_op_lock_handle = EI_INIT;
+static expert_field ei_ncp_effective_rights = EI_INIT;
+static expert_field ei_ncp_server = EI_INIT;
+static expert_field ei_ncp_invalid_offset = EI_INIT;
+static expert_field ei_ncp_address_type = EI_INIT;
+static expert_field ei_ncp_value_too_large = EI_INIT;
+""")
+
+ # Look at all packet types in the packets collection, and cull information
+ # from them.
+ errors_used_list = []
+ errors_used_hash = {}
+ groups_used_list = []
+ groups_used_hash = {}
+ variables_used_hash = {}
+ structs_used_hash = {}
+
+ for pkt in packets:
+ # Determine which error codes are used.
+ codes = pkt.CompletionCodes()
+ for code in codes.Records():
+ if code not in errors_used_hash:
+ errors_used_hash[code] = len(errors_used_list)
+ errors_used_list.append(code)
+
+ # Determine which groups are used.
+ group = pkt.Group()
+ if group not in groups_used_hash:
+ groups_used_hash[group] = len(groups_used_list)
+ groups_used_list.append(group)
+
+
+
+
+ # Determine which variables are used.
+ vars = pkt.Variables()
+ ExamineVars(vars, structs_used_hash, variables_used_hash)
+
+
+ # Print the hf variable declarations
+ sorted_vars = list(variables_used_hash.values())
+ sorted_vars.sort()
+ for var in sorted_vars:
+ print("static int " + var.HFName() + " = -1;")
+
+
+ # Print the value_string's
+ for var in sorted_vars:
+ if isinstance(var, val_string):
+ print("")
+ print(var.Code())
+
+ # Determine which error codes are not used
+ errors_not_used = {}
+ # Copy the keys from the error list...
+ for code in list(errors.keys()):
+ errors_not_used[code] = 1
+ # ... and remove the ones that *were* used.
+ for code in errors_used_list:
+ del errors_not_used[code]
+
+ # Print a remark showing errors not used
+ list_errors_not_used = list(errors_not_used.keys())
+ list_errors_not_used.sort()
+ for code in list_errors_not_used:
+ print("/* Error 0x%04x not used: %s */" % (code, errors[code]))
+ print("\n")
+
+ # Print the errors table
+ print("/* Error strings. */")
+ print("static const char *ncp_errors[] = {")
+ for code in errors_used_list:
+ print(' /* %02d (0x%04x) */ "%s",' % (errors_used_hash[code], code, errors[code]))
+ print("};\n")
+
+
+
+
+ # Determine which groups are not used
+ groups_not_used = {}
+ # Copy the keys from the group list...
+ for group in list(groups.keys()):
+ groups_not_used[group] = 1
+ # ... and remove the ones that *were* used.
+ for group in groups_used_list:
+ del groups_not_used[group]
+
+ # Print a remark showing groups not used
+ list_groups_not_used = list(groups_not_used.keys())
+ list_groups_not_used.sort()
+ for group in list_groups_not_used:
+ print("/* Group not used: %s = %s */" % (group, groups[group]))
+ print("\n")
+
+ # Print the groups table
+ print("/* Group strings. */")
+ print("static const char *ncp_groups[] = {")
+ for group in groups_used_list:
+ print(' /* %02d (%s) */ "%s",' % (groups_used_hash[group], group, groups[group]))
+ print("};\n")
+
+ # Print the group macros
+ for group in groups_used_list:
+ name = str.upper(group)
+ print("#define NCP_GROUP_%s %d" % (name, groups_used_hash[group]))
+ print("\n")
+
+
+ # Print the conditional_records for all Request Conditions.
+ num = 0
+ print("/* Request-Condition dfilter records. The NULL pointer")
+ print(" is replaced by a pointer to the created dfilter_t. */")
+ if len(global_req_cond) == 0:
+ print("static conditional_record req_conds = NULL;")
+ else:
+ print("static conditional_record req_conds[] = {")
+ req_cond_l = list(global_req_cond.keys())
+ req_cond_l.sort()
+ for req_cond in req_cond_l:
+ print(" { \"%s\", NULL }," % (req_cond,))
+ global_req_cond[req_cond] = num
+ num = num + 1
+ print("};")
+ print("#define NUM_REQ_CONDS %d" % (num,))
+ print("#define NO_REQ_COND NUM_REQ_CONDS\n\n")
+
+
+
+ # Print PTVC's for bitfields
+ ett_list = []
+ print("/* PTVC records for bit-fields. */")
+ for var in sorted_vars:
+ if isinstance(var, bitfield):
+ sub_vars_ptvc = var.SubVariablesPTVC()
+ print("/* %s */" % (sub_vars_ptvc.Name()))
+ print(sub_vars_ptvc.Code())
+ ett_list.append(sub_vars_ptvc.ETTName())
+
+
+ # Print the PTVC's for structures
+ print("/* PTVC records for structs. */")
+ # Sort them
+ svhash = {}
+ for svar in list(structs_used_hash.values()):
+ svhash[svar.HFName()] = svar
+ if svar.descr:
+ ett_list.append(svar.ETTName())
+
+ struct_vars = list(svhash.keys())
+ struct_vars.sort()
+ for varname in struct_vars:
+ var = svhash[varname]
+ print(var.Code())
+
+ ett_list.sort()
+
+ # Print info string structures
+ print("/* Info Strings */")
+ for pkt in packets:
+ if pkt.req_info_str:
+ name = pkt.InfoStrName() + "_req"
+ var = pkt.req_info_str[0]
+ print("static const info_string_t %s = {" % (name,))
+ print(" &%s," % (var.HFName(),))
+ print(' "%s",' % (pkt.req_info_str[1],))
+ print(' "%s"' % (pkt.req_info_str[2],))
+ print("};\n")
+
+ # Print regular PTVC's
+ print("/* PTVC records. These are re-used to save space. */")
+ for ptvc in ptvc_lists.Members():
+ if not ptvc.Null() and not ptvc.Empty():
+ print(ptvc.Code())
+
+ # Print error_equivalency tables
+ print("/* Error-Equivalency Tables. These are re-used to save space. */")
+ for compcodes in compcode_lists.Members():
+ errors = compcodes.Records()
+ # Make sure the record for error = 0x00 comes last.
+ print("static const error_equivalency %s[] = {" % (compcodes.Name()))
+ for error in errors:
+ error_in_packet = error >> 8;
+ ncp_error_index = errors_used_hash[error]
+ print(" { 0x%02x, %d }, /* 0x%04x */" % (error_in_packet,
+ ncp_error_index, error))
+ print(" { 0x00, -1 }\n};\n")
+
+
+
+ # Print integer arrays for all ncp_records that need
+ # a list of req_cond_indexes. Do it "uniquely" to save space;
+ # if multiple packets share the same set of req_cond's,
+ # then they'll share the same integer array
+ print("/* Request Condition Indexes */")
+ # First, make them unique
+ req_cond_collection = UniqueCollection("req_cond_collection")
+ for pkt in packets:
+ req_conds = pkt.CalculateReqConds()
+ if req_conds:
+ unique_list = req_cond_collection.Add(req_conds)
+ pkt.SetReqConds(unique_list)
+ else:
+ pkt.SetReqConds(None)
+
+ # Print them
+ for req_cond in req_cond_collection.Members():
+ sys.stdout.write("static const int %s[] = {" % (req_cond.Name()))
+ sys.stdout.write(" ")
+ vals = []
+ for text in req_cond.Records():
+ vals.append(global_req_cond[text])
+ vals.sort()
+ for val in vals:
+ sys.stdout.write("%s, " % (val,))
+
+ print("-1 };")
+ print("")
+
+
+
+ # Functions without length parameter
+ funcs_without_length = {}
+
+ print("/* Forward declaration of expert info functions defined in ncp2222.inc */")
+ for expert in expert_hash:
+ print("static void %s_expert_func(ptvcursor_t *ptvc, packet_info *pinfo, const ncp_record *ncp_rec, bool request);" % expert)
+
+ # Print ncp_record packet records
+ print("#define SUBFUNC_WITH_LENGTH 0x02")
+ print("#define SUBFUNC_NO_LENGTH 0x01")
+ print("#define NO_SUBFUNC 0x00")
+
+ print("/* ncp_record structs for packets */")
+ print("static const ncp_record ncp_packets[] = {")
+ for pkt in packets:
+ if pkt.HasSubFunction():
+ func = pkt.FunctionCode('high')
+ if pkt.HasLength():
+ subfunc_string = "SUBFUNC_WITH_LENGTH"
+ # Ensure that the function either has a length param or not
+ if func in funcs_without_length:
+ sys.exit("Function 0x%04x sometimes has length param, sometimes not." \
+ % (pkt.FunctionCode(),))
+ else:
+ subfunc_string = "SUBFUNC_NO_LENGTH"
+ funcs_without_length[func] = 1
+ else:
+ subfunc_string = "NO_SUBFUNC"
+ sys.stdout.write(' { 0x%02x, 0x%02x, %s, "%s",' % (pkt.FunctionCode('high'),
+ pkt.FunctionCode('low'), subfunc_string, pkt.Description()))
+
+ print(' %d /* %s */,' % (groups_used_hash[pkt.Group()], pkt.Group()))
+
+ ptvc = pkt.PTVCRequest()
+ if not ptvc.Null() and not ptvc.Empty():
+ ptvc_request = ptvc.Name()
+ else:
+ ptvc_request = 'NULL'
+
+ ptvc = pkt.PTVCReply()
+ if not ptvc.Null() and not ptvc.Empty():
+ ptvc_reply = ptvc.Name()
+ else:
+ ptvc_reply = 'NULL'
+
+ errors = pkt.CompletionCodes()
+
+ req_conds_obj = pkt.GetReqConds()
+ if req_conds_obj:
+ req_conds = req_conds_obj.Name()
+ else:
+ req_conds = "NULL"
+
+ if not req_conds_obj:
+ req_cond_size = "NO_REQ_COND_SIZE"
+ else:
+ req_cond_size = pkt.ReqCondSize()
+ if req_cond_size is None:
+ msg.write("NCP packet %s needs a ReqCondSize*() call\n" \
+ % (pkt.CName(),))
+ sys.exit(1)
+
+ if pkt.expert_func:
+ expert_func = "&" + pkt.expert_func + "_expert_func"
+ else:
+ expert_func = "NULL"
+
+ print(' %s, %s, %s, %s, %s, %s },\n' % \
+ (ptvc_request, ptvc_reply, errors.Name(), req_conds,
+ req_cond_size, expert_func))
+
+ print(' { 0, 0, 0, NULL, 0, NULL, NULL, NULL, NULL, NO_REQ_COND_SIZE, NULL }')
+ print("};\n")
+
+ print("/* ncp funcs that require a subfunc */")
+ print("static const uint8_t ncp_func_requires_subfunc[] = {")
+ hi_seen = {}
+ for pkt in packets:
+ if pkt.HasSubFunction():
+ hi_func = pkt.FunctionCode('high')
+ if hi_func not in hi_seen:
+ print(" 0x%02x," % (hi_func))
+ hi_seen[hi_func] = 1
+ print(" 0")
+ print("};\n")
+
+
+ print("/* ncp funcs that have no length parameter */")
+ print("static const uint8_t ncp_func_has_no_length_parameter[] = {")
+ funcs = list(funcs_without_length.keys())
+ funcs.sort()
+ for func in funcs:
+ print(" 0x%02x," % (func,))
+ print(" 0")
+ print("};\n")
+
+ print("")
+
+ # proto_register_ncp2222()
+ print("""
+static const value_string connection_status_vals[] = {
+ { 0x00, "Ok" },
+ { 0x01, "Bad Service Connection" },
+ { 0x10, "File Server is Down" },
+ { 0x40, "Broadcast Message Pending" },
+ { 0, NULL }
+};
+
+#include "packet-ncp2222.inc"
+
+void
+proto_register_ncp2222(void)
+{
+
+ static hf_register_info hf[] = {
+ { &hf_ncp_number_of_data_streams_long,
+ { "Number of Data Streams", "ncp.number_of_data_streams_long", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_func,
+ { "Function", "ncp.func", FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_length,
+ { "Packet Length", "ncp.length", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_subfunc,
+ { "SubFunction", "ncp.subfunc", FT_UINT8, BASE_DEC_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_completion_code,
+ { "Completion Code", "ncp.completion_code", FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_group,
+ { "NCP Group Type", "ncp.group", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_fragment_handle,
+ { "NDS Fragment Handle", "ncp.ndsfrag", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_fragment_size,
+ { "NDS Fragment Size", "ncp.ndsfragsize", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_message_size,
+ { "Message Size", "ncp.ndsmessagesize", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_nds_flag,
+ { "NDS Protocol Flags", "ncp.ndsflag", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_nds_verb,
+ { "NDS Verb", "ncp.ndsverb", FT_UINT8, BASE_HEX, VALS(ncp_nds_verb_vals), 0x0, NULL, HFILL }},
+
+ { &hf_ping_version,
+ { "NDS Version", "ncp.ping_version", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+#if 0 /* Unused ? */
+ { &hf_nds_version,
+ { "NDS Version", "ncp.nds_version", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+#endif
+
+ { &hf_nds_tree_name,
+ { "NDS Tree Name", "ncp.nds_tree_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ /*
+ * XXX - the page at
+ *
+ * https://web.archive.org/web/20030629082113/http://www.odyssea.com/whats_new/tcpipnet/tcpipnet.html
+ *
+ * says of the connection status "The Connection Code field may
+ * contain values that indicate the status of the client host to
+ * server connection. A value of 1 in the fourth bit of this data
+ * byte indicates that the server is unavailable (server was
+ * downed).
+ *
+ * The page at
+ *
+ * https://web.archive.org/web/20090809191415/http://www.unm.edu/~network/presentations/course/appendix/appendix_f/tsld088.htm
+ *
+ * says that bit 0 is "bad service", bit 2 is "no connection
+ * available", bit 4 is "service down", and bit 6 is "server
+ * has a broadcast message waiting for the client".
+ *
+ * Should it be displayed in hex, and should those bits (and any
+ * other bits with significance) be displayed as bitfields
+ * underneath it?
+ */
+ { &hf_ncp_connection_status,
+ { "Connection Status", "ncp.connection_status", FT_UINT8, BASE_DEC, VALS(connection_status_vals), 0x0, NULL, HFILL }},
+
+ { &hf_ncp_req_frame_num,
+ { "Response to Request in Frame Number", "ncp.req_frame_num", FT_FRAMENUM, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_req_frame_time,
+ { "Time from Request", "ncp.time", FT_RELATIVE_TIME, BASE_NONE, NULL, 0x0, "Time between request and response in seconds", HFILL }},
+
+#if 0 /* Unused ? */
+ { &hf_nds_flags,
+ { "NDS Return Flags", "ncp.nds_flags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+#endif
+
+ { &hf_nds_reply_depth,
+ { "Distance from Root", "ncp.ndsdepth", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_reply_rev,
+ { "NDS Revision", "ncp.ndsrev", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_reply_flags,
+ { "Flags", "ncp.ndsflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_p1type,
+ { "NDS Parameter Type", "ncp.p1type", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_uint32value,
+ { "NDS Value", "ncp.uint32value", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_bit1,
+ { "Typeless", "ncp.nds_bit1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_nds_bit2,
+ { "All Containers", "ncp.nds_bit2", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_nds_bit3,
+ { "Slashed", "ncp.nds_bit3", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_nds_bit4,
+ { "Dotted", "ncp.nds_bit4", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_nds_bit5,
+ { "Tuned", "ncp.nds_bit5", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_nds_bit6,
+ { "Not Defined", "ncp.nds_bit6", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_nds_bit7,
+ { "Not Defined", "ncp.nds_bit7", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_nds_bit8,
+ { "Not Defined", "ncp.nds_bit8", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_nds_bit9,
+ { "Not Defined", "ncp.nds_bit9", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_nds_bit10,
+ { "Not Defined", "ncp.nds_bit10", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_nds_bit11,
+ { "Not Defined", "ncp.nds_bit11", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_nds_bit12,
+ { "Not Defined", "ncp.nds_bit12", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_nds_bit13,
+ { "Not Defined", "ncp.nds_bit13", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_nds_bit14,
+ { "Not Defined", "ncp.nds_bit14", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_nds_bit15,
+ { "Not Defined", "ncp.nds_bit15", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_nds_bit16,
+ { "Not Defined", "ncp.nds_bit16", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_outflags,
+ { "Output Flags", "ncp.outflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1outflags,
+ { "Output Flags", "ncp.bit1outflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2outflags,
+ { "Entry ID", "ncp.bit2outflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3outflags,
+ { "Replica State", "ncp.bit3outflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4outflags,
+ { "Modification Timestamp", "ncp.bit4outflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5outflags,
+ { "Purge Time", "ncp.bit5outflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6outflags,
+ { "Local Partition ID", "ncp.bit6outflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7outflags,
+ { "Distinguished Name", "ncp.bit7outflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8outflags,
+ { "Replica Type", "ncp.bit8outflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9outflags,
+ { "Partition Busy", "ncp.bit9outflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10outflags,
+ { "Not Defined", "ncp.bit10outflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11outflags,
+ { "Not Defined", "ncp.bit11outflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12outflags,
+ { "Not Defined", "ncp.bit12outflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13outflags,
+ { "Not Defined", "ncp.bit13outflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14outflags,
+ { "Not Defined", "ncp.bit14outflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15outflags,
+ { "Not Defined", "ncp.bit15outflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16outflags,
+ { "Not Defined", "ncp.bit16outflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_bit1nflags,
+ { "Entry ID", "ncp.bit1nflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2nflags,
+ { "Readable", "ncp.bit2nflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3nflags,
+ { "Writeable", "ncp.bit3nflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4nflags,
+ { "Master", "ncp.bit4nflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5nflags,
+ { "Create ID", "ncp.bit5nflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6nflags,
+ { "Walk Tree", "ncp.bit6nflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7nflags,
+ { "Dereference Alias", "ncp.bit7nflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8nflags,
+ { "Not Defined", "ncp.bit8nflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9nflags,
+ { "Not Defined", "ncp.bit9nflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10nflags,
+ { "Not Defined", "ncp.bit10nflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11nflags,
+ { "Not Defined", "ncp.bit11nflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12nflags,
+ { "Not Defined", "ncp.bit12nflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13nflags,
+ { "Not Defined", "ncp.bit13nflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14nflags,
+ { "Prefer Referrals", "ncp.bit14nflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15nflags,
+ { "Prefer Only Referrals", "ncp.bit15nflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16nflags,
+ { "Not Defined", "ncp.bit16nflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_bit1rflags,
+ { "Typeless", "ncp.bit1rflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2rflags,
+ { "Slashed", "ncp.bit2rflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3rflags,
+ { "Dotted", "ncp.bit3rflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4rflags,
+ { "Tuned", "ncp.bit4rflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5rflags,
+ { "Not Defined", "ncp.bit5rflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6rflags,
+ { "Not Defined", "ncp.bit6rflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7rflags,
+ { "Not Defined", "ncp.bit7rflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8rflags,
+ { "Not Defined", "ncp.bit8rflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9rflags,
+ { "Not Defined", "ncp.bit9rflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10rflags,
+ { "Not Defined", "ncp.bit10rflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11rflags,
+ { "Not Defined", "ncp.bit11rflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12rflags,
+ { "Not Defined", "ncp.bit12rflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13rflags,
+ { "Not Defined", "ncp.bit13rflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14rflags,
+ { "Not Defined", "ncp.bit14rflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15rflags,
+ { "Not Defined", "ncp.bit15rflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16rflags,
+ { "Not Defined", "ncp.bit16rflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_eflags,
+ { "Entry Flags", "ncp.eflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1eflags,
+ { "Alias Entry", "ncp.bit1eflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2eflags,
+ { "Partition Root", "ncp.bit2eflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3eflags,
+ { "Container Entry", "ncp.bit3eflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4eflags,
+ { "Container Alias", "ncp.bit4eflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5eflags,
+ { "Matches List Filter", "ncp.bit5eflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6eflags,
+ { "Reference Entry", "ncp.bit6eflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7eflags,
+ { "40x Reference Entry", "ncp.bit7eflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8eflags,
+ { "Back Linked", "ncp.bit8eflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9eflags,
+ { "New Entry", "ncp.bit9eflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10eflags,
+ { "Temporary Reference", "ncp.bit10eflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11eflags,
+ { "Audited", "ncp.bit11eflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12eflags,
+ { "Entry Not Present", "ncp.bit12eflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13eflags,
+ { "Entry Verify CTS", "ncp.bit13eflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14eflags,
+ { "Entry Damaged", "ncp.bit14eflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15eflags,
+ { "Not Defined", "ncp.bit15rflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16eflags,
+ { "Not Defined", "ncp.bit16rflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_infoflagsl,
+ { "Information Flags (low) Byte", "ncp.infoflagsl", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_retinfoflagsl,
+ { "Return Information Flags (low) Byte", "ncp.retinfoflagsl", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1infoflagsl,
+ { "Output Flags", "ncp.bit1infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2infoflagsl,
+ { "Entry ID", "ncp.bit2infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3infoflagsl,
+ { "Entry Flags", "ncp.bit3infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4infoflagsl,
+ { "Subordinate Count", "ncp.bit4infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5infoflagsl,
+ { "Modification Time", "ncp.bit5infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6infoflagsl,
+ { "Modification Timestamp", "ncp.bit6infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7infoflagsl,
+ { "Creation Timestamp", "ncp.bit7infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8infoflagsl,
+ { "Partition Root ID", "ncp.bit8infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9infoflagsl,
+ { "Parent ID", "ncp.bit9infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10infoflagsl,
+ { "Revision Count", "ncp.bit10infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11infoflagsl,
+ { "Replica Type", "ncp.bit11infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12infoflagsl,
+ { "Base Class", "ncp.bit12infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13infoflagsl,
+ { "Relative Distinguished Name", "ncp.bit13infoflagsl", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14infoflagsl,
+ { "Distinguished Name", "ncp.bit14infoflagsl", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15infoflagsl,
+ { "Root Distinguished Name", "ncp.bit15infoflagsl", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16infoflagsl,
+ { "Parent Distinguished Name", "ncp.bit16infoflagsl", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_infoflagsh,
+ { "Information Flags (high) Byte", "ncp.infoflagsh", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1infoflagsh,
+ { "Purge Time", "ncp.bit1infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2infoflagsh,
+ { "Dereference Base Class", "ncp.bit2infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3infoflagsh,
+ { "Not Defined", "ncp.bit3infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4infoflagsh,
+ { "Not Defined", "ncp.bit4infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5infoflagsh,
+ { "Not Defined", "ncp.bit5infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6infoflagsh,
+ { "Not Defined", "ncp.bit6infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7infoflagsh,
+ { "Not Defined", "ncp.bit7infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8infoflagsh,
+ { "Not Defined", "ncp.bit8infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9infoflagsh,
+ { "Not Defined", "ncp.bit9infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10infoflagsh,
+ { "Not Defined", "ncp.bit10infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11infoflagsh,
+ { "Not Defined", "ncp.bit11infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12infoflagsh,
+ { "Not Defined", "ncp.bit12infoflagshs", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13infoflagsh,
+ { "Not Defined", "ncp.bit13infoflagsh", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14infoflagsh,
+ { "Not Defined", "ncp.bit14infoflagsh", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15infoflagsh,
+ { "Not Defined", "ncp.bit15infoflagsh", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16infoflagsh,
+ { "Not Defined", "ncp.bit16infoflagsh", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_retinfoflagsh,
+ { "Return Information Flags (high) Byte", "ncp.retinfoflagsh", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1retinfoflagsh,
+ { "Purge Time", "ncp.bit1retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2retinfoflagsh,
+ { "Dereference Base Class", "ncp.bit2retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3retinfoflagsh,
+ { "Replica Number", "ncp.bit3retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4retinfoflagsh,
+ { "Replica State", "ncp.bit4retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5retinfoflagsh,
+ { "Federation Boundary", "ncp.bit5retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6retinfoflagsh,
+ { "Schema Boundary", "ncp.bit6retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7retinfoflagsh,
+ { "Federation Boundary ID", "ncp.bit7retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8retinfoflagsh,
+ { "Schema Boundary ID", "ncp.bit8retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9retinfoflagsh,
+ { "Current Subcount", "ncp.bit9retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10retinfoflagsh,
+ { "Local Entry Flags", "ncp.bit10retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11retinfoflagsh,
+ { "Not Defined", "ncp.bit11retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12retinfoflagsh,
+ { "Not Defined", "ncp.bit12retinfoflagshs", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13retinfoflagsh,
+ { "Not Defined", "ncp.bit13retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14retinfoflagsh,
+ { "Not Defined", "ncp.bit14retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15retinfoflagsh,
+ { "Not Defined", "ncp.bit15retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16retinfoflagsh,
+ { "Not Defined", "ncp.bit16retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_bit1lflags,
+ { "List Typeless", "ncp.bit1lflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2lflags,
+ { "List Containers", "ncp.bit2lflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3lflags,
+ { "List Slashed", "ncp.bit3lflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4lflags,
+ { "List Dotted", "ncp.bit4lflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5lflags,
+ { "Dereference Alias", "ncp.bit5lflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6lflags,
+ { "List All Containers", "ncp.bit6lflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7lflags,
+ { "List Obsolete", "ncp.bit7lflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8lflags,
+ { "List Tuned Output", "ncp.bit8lflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9lflags,
+ { "List External Reference", "ncp.bit9lflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10lflags,
+ { "Not Defined", "ncp.bit10lflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11lflags,
+ { "Not Defined", "ncp.bit11lflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12lflags,
+ { "Not Defined", "ncp.bit12lflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13lflags,
+ { "Not Defined", "ncp.bit13lflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14lflags,
+ { "Not Defined", "ncp.bit14lflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15lflags,
+ { "Not Defined", "ncp.bit15lflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16lflags,
+ { "Not Defined", "ncp.bit16lflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_l1flagsl,
+ { "Information Flags (low) Byte", "ncp.l1flagsl", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_l1flagsh,
+ { "Information Flags (high) Byte", "ncp.l1flagsh", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1l1flagsl,
+ { "Output Flags", "ncp.bit1l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2l1flagsl,
+ { "Entry ID", "ncp.bit2l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3l1flagsl,
+ { "Replica State", "ncp.bit3l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4l1flagsl,
+ { "Modification Timestamp", "ncp.bit4l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5l1flagsl,
+ { "Purge Time", "ncp.bit5l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6l1flagsl,
+ { "Local Partition ID", "ncp.bit6l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7l1flagsl,
+ { "Distinguished Name", "ncp.bit7l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8l1flagsl,
+ { "Replica Type", "ncp.bit8l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9l1flagsl,
+ { "Partition Busy", "ncp.bit9l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10l1flagsl,
+ { "Not Defined", "ncp.bit10l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11l1flagsl,
+ { "Not Defined", "ncp.bit11l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12l1flagsl,
+ { "Not Defined", "ncp.bit12l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13l1flagsl,
+ { "Not Defined", "ncp.bit13l1flagsl", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14l1flagsl,
+ { "Not Defined", "ncp.bit14l1flagsl", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15l1flagsl,
+ { "Not Defined", "ncp.bit15l1flagsl", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16l1flagsl,
+ { "Not Defined", "ncp.bit16l1flagsl", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_bit1l1flagsh,
+ { "Not Defined", "ncp.bit1l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2l1flagsh,
+ { "Not Defined", "ncp.bit2l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3l1flagsh,
+ { "Not Defined", "ncp.bit3l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4l1flagsh,
+ { "Not Defined", "ncp.bit4l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5l1flagsh,
+ { "Not Defined", "ncp.bit5l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6l1flagsh,
+ { "Not Defined", "ncp.bit6l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7l1flagsh,
+ { "Not Defined", "ncp.bit7l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8l1flagsh,
+ { "Not Defined", "ncp.bit8l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9l1flagsh,
+ { "Not Defined", "ncp.bit9l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10l1flagsh,
+ { "Not Defined", "ncp.bit10l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11l1flagsh,
+ { "Not Defined", "ncp.bit11l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12l1flagsh,
+ { "Not Defined", "ncp.bit12l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13l1flagsh,
+ { "Not Defined", "ncp.bit13l1flagsh", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14l1flagsh,
+ { "Not Defined", "ncp.bit14l1flagsh", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15l1flagsh,
+ { "Not Defined", "ncp.bit15l1flagsh", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16l1flagsh,
+ { "Not Defined", "ncp.bit16l1flagsh", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_vflags,
+ { "Value Flags", "ncp.vflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1vflags,
+ { "Naming", "ncp.bit1vflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2vflags,
+ { "Base Class", "ncp.bit2vflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3vflags,
+ { "Present", "ncp.bit3vflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4vflags,
+ { "Value Damaged", "ncp.bit4vflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5vflags,
+ { "Not Defined", "ncp.bit5vflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6vflags,
+ { "Not Defined", "ncp.bit6vflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7vflags,
+ { "Not Defined", "ncp.bit7vflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8vflags,
+ { "Not Defined", "ncp.bit8vflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9vflags,
+ { "Not Defined", "ncp.bit9vflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10vflags,
+ { "Not Defined", "ncp.bit10vflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11vflags,
+ { "Not Defined", "ncp.bit11vflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12vflags,
+ { "Not Defined", "ncp.bit12vflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13vflags,
+ { "Not Defined", "ncp.bit13vflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14vflags,
+ { "Not Defined", "ncp.bit14vflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15vflags,
+ { "Not Defined", "ncp.bit15vflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16vflags,
+ { "Not Defined", "ncp.bit16vflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_cflags,
+ { "Class Flags", "ncp.cflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1cflags,
+ { "Container", "ncp.bit1cflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2cflags,
+ { "Effective", "ncp.bit2cflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3cflags,
+ { "Class Definition Cannot be Removed", "ncp.bit3cflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4cflags,
+ { "Ambiguous Naming", "ncp.bit4cflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5cflags,
+ { "Ambiguous Containment", "ncp.bit5cflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6cflags,
+ { "Auxiliary", "ncp.bit6cflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7cflags,
+ { "Operational", "ncp.bit7cflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8cflags,
+ { "Sparse Required", "ncp.bit8cflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9cflags,
+ { "Sparse Operational", "ncp.bit9cflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10cflags,
+ { "Not Defined", "ncp.bit10cflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11cflags,
+ { "Not Defined", "ncp.bit11cflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12cflags,
+ { "Not Defined", "ncp.bit12cflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13cflags,
+ { "Not Defined", "ncp.bit13cflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14cflags,
+ { "Not Defined", "ncp.bit14cflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15cflags,
+ { "Not Defined", "ncp.bit15cflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16cflags,
+ { "Not Defined", "ncp.bit16cflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_bit1acflags,
+ { "Single Valued", "ncp.bit1acflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2acflags,
+ { "Sized", "ncp.bit2acflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3acflags,
+ { "Non-Removable", "ncp.bit3acflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4acflags,
+ { "Read Only", "ncp.bit4acflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5acflags,
+ { "Hidden", "ncp.bit5acflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6acflags,
+ { "String", "ncp.bit6acflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7acflags,
+ { "Synchronize Immediate", "ncp.bit7acflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8acflags,
+ { "Public Read", "ncp.bit8acflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9acflags,
+ { "Server Read", "ncp.bit9acflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10acflags,
+ { "Write Managed", "ncp.bit10acflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11acflags,
+ { "Per Replica", "ncp.bit11acflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12acflags,
+ { "Never Schedule Synchronization", "ncp.bit12acflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13acflags,
+ { "Operational", "ncp.bit13acflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14acflags,
+ { "Not Defined", "ncp.bit14acflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15acflags,
+ { "Not Defined", "ncp.bit15acflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16acflags,
+ { "Not Defined", "ncp.bit16acflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+
+ { &hf_nds_reply_error,
+ { "NDS Error", "ncp.ndsreplyerror", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_net,
+ { "Network","ncp.ndsnet", FT_IPXNET, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_node,
+ { "Node", "ncp.ndsnode", FT_ETHER, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_socket,
+ { "Socket", "ncp.ndssocket", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_add_ref_ip,
+ { "Address Referral", "ncp.ipref", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_add_ref_udp,
+ { "Address Referral", "ncp.udpref", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_add_ref_tcp,
+ { "Address Referral", "ncp.tcpref", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_referral_record,
+ { "Referral Record", "ncp.ref_rec", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_referral_addcount,
+ { "Number of Addresses in Referral", "ncp.ref_addcount", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_port,
+ { "Port", "ncp.ndsport", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_mv_string,
+ { "Attribute Name", "ncp.mv_string", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_syntax,
+ { "Attribute Syntax", "ncp.nds_syntax", FT_UINT32, BASE_DEC, VALS(nds_syntax), 0x0, NULL, HFILL }},
+
+ { &hf_value_string,
+ { "Value", "ncp.value_string", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_stream_name,
+ { "Stream Name", "ncp.nds_stream_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_buffer_size,
+ { "NDS Reply Buffer Size", "ncp.nds_reply_buf", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_ver,
+ { "NDS Version", "ncp.nds_ver", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_nflags,
+ { "Flags", "ncp.nds_nflags", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_rflags,
+ { "Request Flags", "ncp.nds_rflags", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_eflags,
+ { "Entry Flags", "ncp.nds_eflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_scope,
+ { "Scope", "ncp.nds_scope", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_name,
+ { "Name", "ncp.nds_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_name_type,
+ { "Name Type", "ncp.nds_name_type", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_comm_trans,
+ { "Communications Transport", "ncp.nds_comm_trans", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_tree_trans,
+ { "Tree Walker Transport", "ncp.nds_tree_trans", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_iteration,
+ { "Iteration Handle", "ncp.nds_iteration", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_iterator,
+ { "Iterator", "ncp.nds_iterator", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_file_handle,
+ { "File Handle", "ncp.nds_file_handle", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_file_size,
+ { "File Size", "ncp.nds_file_size", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_eid,
+ { "NDS EID", "ncp.nds_eid", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_depth,
+ { "Distance object is from Root", "ncp.nds_depth", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_info_type,
+ { "Info Type", "ncp.nds_info_type", FT_UINT32, BASE_RANGE_STRING|BASE_DEC, RVALS(nds_info_type), 0x0, NULL, HFILL }},
+
+ { &hf_nds_class_def_type,
+ { "Class Definition Type", "ncp.nds_class_def_type", FT_UINT32, BASE_DEC, VALS(class_def_type), 0x0, NULL, HFILL }},
+
+ { &hf_nds_all_attr,
+ { "All Attributes", "ncp.nds_all_attr", FT_UINT32, BASE_DEC, NULL, 0x0, "Return all Attributes?", HFILL }},
+
+ { &hf_nds_return_all_classes,
+ { "All Classes", "ncp.nds_return_all_classes", FT_UINT32, BASE_DEC, NULL, 0x0, "Return all Classes?", HFILL }},
+
+ { &hf_nds_req_flags,
+ { "Request Flags", "ncp.nds_req_flags", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_attr,
+ { "Attributes", "ncp.nds_attributes", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_classes,
+ { "Classes", "ncp.nds_classes", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_crc,
+ { "CRC", "ncp.nds_crc", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_referrals,
+ { "Referrals", "ncp.nds_referrals", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_result_flags,
+ { "Result Flags", "ncp.nds_result_flags", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_stream_flags,
+ { "Streams Flags", "ncp.nds_stream_flags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_tag_string,
+ { "Tags", "ncp.nds_tags", FT_UINT32, BASE_DEC, VALS(nds_tags), 0x0, NULL, HFILL }},
+
+ { &hf_value_bytes,
+ { "Bytes", "ncp.value_bytes", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_replica_type,
+ { "Replica Type", "ncp.rtype", FT_UINT32, BASE_DEC, VALS(nds_replica_type), 0x0, NULL, HFILL }},
+
+ { &hf_replica_state,
+ { "Replica State", "ncp.rstate", FT_UINT16, BASE_DEC, VALS(nds_replica_state), 0x0, NULL, HFILL }},
+
+ { &hf_nds_rnum,
+ { "Replica Number", "ncp.rnum", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_revent,
+ { "Event", "ncp.revent", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_replica_number,
+ { "Replica Number", "ncp.rnum", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_min_nds_ver,
+ { "Minimum NDS Version", "ncp.min_nds_version", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_ver_include,
+ { "Include NDS Version", "ncp.inc_nds_ver", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_ver_exclude,
+ { "Exclude NDS Version", "ncp.exc_nds_ver", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+#if 0 /* Unused ? */
+ { &hf_nds_es,
+ { "Input Entry Specifier", "ncp.nds_es", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+#endif
+
+ { &hf_es_type,
+ { "Entry Specifier Type", "ncp.nds_es_type", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_rdn_string,
+ { "RDN", "ncp.nds_rdn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+#if 0 /* Unused ? */
+ { &hf_delim_string,
+ { "Delimiter", "ncp.nds_delim", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+#endif
+
+ { &hf_nds_dn_output_type,
+ { "Output Entry Specifier Type", "ncp.nds_out_es_type", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_nested_output_type,
+ { "Nested Output Entry Specifier Type", "ncp.nds_nested_out_es", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_output_delimiter,
+ { "Output Delimiter", "ncp.nds_out_delimiter", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_output_entry_specifier,
+ { "Output Entry Specifier", "ncp.nds_out_es", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_es_value,
+ { "Entry Specifier Value", "ncp.nds_es_value", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_es_rdn_count,
+ { "RDN Count", "ncp.nds_es_rdn_count", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_replica_num,
+ { "Replica Number", "ncp.nds_replica_num", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_es_seconds,
+ { "Seconds", "ncp.nds_es_seconds", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_event_num,
+ { "Event Number", "ncp.nds_event_num", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_compare_results,
+ { "Compare Values Returned", "ncp.nds_compare_results", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_parent,
+ { "Parent ID", "ncp.nds_parent", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_name_filter,
+ { "Name Filter", "ncp.nds_name_filter", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_class_filter,
+ { "Class Filter", "ncp.nds_class_filter", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_time_filter,
+ { "Time Filter", "ncp.nds_time_filter", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_partition_root_id,
+ { "Partition Root ID", "ncp.nds_partition_root_id", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_replicas,
+ { "Replicas", "ncp.nds_replicas", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_purge,
+ { "Purge Time", "ncp.nds_purge", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_local_partition,
+ { "Local Partition ID", "ncp.nds_local_partition", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_partition_busy,
+ { "Partition Busy", "ncp.nds_partition_busy", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_number_of_changes,
+ { "Number of Attribute Changes", "ncp.nds_number_of_changes", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_sub_count,
+ { "Subordinate Count", "ncp.sub_count", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_revision,
+ { "Revision Count", "ncp.nds_rev_count", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_base_class,
+ { "Base Class", "ncp.nds_base_class", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_relative_dn,
+ { "Relative Distinguished Name", "ncp.nds_relative_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+#if 0 /* Unused ? */
+ { &hf_nds_root_dn,
+ { "Root Distinguished Name", "ncp.nds_root_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+#endif
+
+#if 0 /* Unused ? */
+ { &hf_nds_parent_dn,
+ { "Parent Distinguished Name", "ncp.nds_parent_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+#endif
+
+ { &hf_deref_base,
+ { "Dereference Base Class", "ncp.nds_deref_base", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_base,
+ { "Base Class", "ncp.nds_base", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_super,
+ { "Super Class", "ncp.nds_super", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+#if 0 /* Unused ? */
+ { &hf_nds_entry_info,
+ { "Entry Information", "ncp.nds_entry_info", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+#endif
+
+ { &hf_nds_privileges,
+ { "Privileges", "ncp.nds_privileges", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_compare_attributes,
+ { "Compare Attributes?", "ncp.nds_compare_attributes", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_nds_read_attribute,
+ { "Read Attribute?", "ncp.nds_read_attribute", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_nds_write_add_delete_attribute,
+ { "Write, Add, Delete Attribute?", "ncp.nds_write_add_delete_attribute", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_nds_add_delete_self,
+ { "Add/Delete Self?", "ncp.nds_add_delete_self", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_nds_privilege_not_defined,
+ { "Privilege Not defined", "ncp.nds_privilege_not_defined", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_nds_supervisor,
+ { "Supervisor?", "ncp.nds_supervisor", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_nds_inheritance_control,
+ { "Inheritance?", "ncp.nds_inheritance_control", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_nds_browse_entry,
+ { "Browse Entry?", "ncp.nds_browse_entry", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_nds_add_entry,
+ { "Add Entry?", "ncp.nds_add_entry", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_nds_delete_entry,
+ { "Delete Entry?", "ncp.nds_delete_entry", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_nds_rename_entry,
+ { "Rename Entry?", "ncp.nds_rename_entry", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_nds_supervisor_entry,
+ { "Supervisor?", "ncp.nds_supervisor_entry", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_nds_entry_privilege_not_defined,
+ { "Privilege Not Defined", "ncp.nds_entry_privilege_not_defined", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_nds_vflags,
+ { "Value Flags", "ncp.nds_vflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_value_len,
+ { "Value Length", "ncp.nds_vlength", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_cflags,
+ { "Class Flags", "ncp.nds_cflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_asn1,
+ { "ASN.1 ID", "ncp.nds_asn1", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_acflags,
+ { "Attribute Constraint Flags", "ncp.nds_acflags", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_upper,
+ { "Upper Limit Value", "ncp.nds_upper", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_lower,
+ { "Lower Limit Value", "ncp.nds_lower", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_trustee_dn,
+ { "Trustee Distinguished Name", "ncp.nds_trustee_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_attribute_dn,
+ { "Attribute Name", "ncp.nds_attribute_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_acl_add,
+ { "ACL Templates to Add", "ncp.nds_acl_add", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_acl_del,
+ { "Access Control Lists to Delete", "ncp.nds_acl_del", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_att_add,
+ { "Attribute to Add", "ncp.nds_att_add", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_att_del,
+ { "Attribute Names to Delete", "ncp.nds_att_del", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_keep,
+ { "Delete Original RDN", "ncp.nds_keep", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_new_rdn,
+ { "New Relative Distinguished Name", "ncp.nds_new_rdn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_time_delay,
+ { "Time Delay", "ncp.nds_time_delay", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_root_name,
+ { "Root Most Object Name", "ncp.nds_root_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_new_part_id,
+ { "New Partition Root ID", "ncp.nds_new_part_id", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_child_part_id,
+ { "Child Partition Root ID", "ncp.nds_child_part_id", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_master_part_id,
+ { "Master Partition Root ID", "ncp.nds_master_part_id", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_target_name,
+ { "Target Server Name", "ncp.nds_target_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_pingflags1,
+ { "Ping (low) Request Flags", "ncp.pingflags1", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1pingflags1,
+ { "Supported Fields", "ncp.bit1pingflags1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2pingflags1,
+ { "Depth", "ncp.bit2pingflags1", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3pingflags1,
+ { "Build Number", "ncp.bit3pingflags1", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4pingflags1,
+ { "Flags", "ncp.bit4pingflags1", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5pingflags1,
+ { "Verification Flags", "ncp.bit5pingflags1", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6pingflags1,
+ { "Letter Version", "ncp.bit6pingflags1", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7pingflags1,
+ { "OS Version", "ncp.bit7pingflags1", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8pingflags1,
+ { "Not Defined", "ncp.bit8pingflags1", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9pingflags1,
+ { "License Flags", "ncp.bit9pingflags1", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10pingflags1,
+ { "DS Time", "ncp.bit10pingflags1", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11pingflags1,
+ { "Server Time", "ncp.bit11pingflags1", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12pingflags1,
+ { "Create Time", "ncp.bit12pingflags1", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13pingflags1,
+ { "Not Defined", "ncp.bit13pingflags1", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14pingflags1,
+ { "Not Defined", "ncp.bit14pingflags1", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15pingflags1,
+ { "Not Defined", "ncp.bit15pingflags1", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16pingflags1,
+ { "Not Defined", "ncp.bit16pingflags1", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_pingflags2,
+ { "Ping (high) Request Flags", "ncp.pingflags2", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1pingflags2,
+ { "Sap Name", "ncp.bit1pingflags2", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2pingflags2,
+ { "Tree Name", "ncp.bit2pingflags2", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3pingflags2,
+ { "OS Name", "ncp.bit3pingflags2", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4pingflags2,
+ { "Hardware Name", "ncp.bit4pingflags2", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5pingflags2,
+ { "Vendor Name", "ncp.bit5pingflags2", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6pingflags2,
+ { "Not Defined", "ncp.bit6pingflags2", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7pingflags2,
+ { "Not Defined", "ncp.bit7pingflags2", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8pingflags2,
+ { "Not Defined", "ncp.bit8pingflags2", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9pingflags2,
+ { "Not Defined", "ncp.bit9pingflags2", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10pingflags2,
+ { "Not Defined", "ncp.bit10pingflags2", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11pingflags2,
+ { "Not Defined", "ncp.bit11pingflags2", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12pingflags2,
+ { "Not Defined", "ncp.bit12pingflags2", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13pingflags2,
+ { "Not Defined", "ncp.bit13pingflags2", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14pingflags2,
+ { "Not Defined", "ncp.bit14pingflags2", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15pingflags2,
+ { "Not Defined", "ncp.bit15pingflags2", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16pingflags2,
+ { "Not Defined", "ncp.bit16pingflags2", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_pingpflags1,
+ { "Ping Data Flags", "ncp.pingpflags1", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1pingpflags1,
+ { "Root Most Master Replica", "ncp.bit1pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2pingpflags1,
+ { "Is Time Synchronized?", "ncp.bit2pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3pingpflags1,
+ { "Is Time Valid?", "ncp.bit3pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4pingpflags1,
+ { "Is DS Time Synchronized?", "ncp.bit4pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5pingpflags1,
+ { "Does Agent Have All Replicas?", "ncp.bit5pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6pingpflags1,
+ { "Not Defined", "ncp.bit6pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7pingpflags1,
+ { "Not Defined", "ncp.bit7pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8pingpflags1,
+ { "Not Defined", "ncp.bit8pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9pingpflags1,
+ { "Not Defined", "ncp.bit9pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10pingpflags1,
+ { "Not Defined", "ncp.bit10pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11pingpflags1,
+ { "Not Defined", "ncp.bit11pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12pingpflags1,
+ { "Not Defined", "ncp.bit12pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13pingpflags1,
+ { "Not Defined", "ncp.bit13pingpflags1", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14pingpflags1,
+ { "Not Defined", "ncp.bit14pingpflags1", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15pingpflags1,
+ { "Not Defined", "ncp.bit15pingpflags1", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16pingpflags1,
+ { "Not Defined", "ncp.bit16pingpflags1", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_pingvflags1,
+ { "Verification Flags", "ncp.pingvflags1", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1pingvflags1,
+ { "Checksum", "ncp.bit1pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2pingvflags1,
+ { "CRC32", "ncp.bit2pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3pingvflags1,
+ { "Not Defined", "ncp.bit3pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4pingvflags1,
+ { "Not Defined", "ncp.bit4pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5pingvflags1,
+ { "Not Defined", "ncp.bit5pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6pingvflags1,
+ { "Not Defined", "ncp.bit6pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7pingvflags1,
+ { "Not Defined", "ncp.bit7pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8pingvflags1,
+ { "Not Defined", "ncp.bit8pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9pingvflags1,
+ { "Not Defined", "ncp.bit9pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10pingvflags1,
+ { "Not Defined", "ncp.bit10pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11pingvflags1,
+ { "Not Defined", "ncp.bit11pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12pingvflags1,
+ { "Not Defined", "ncp.bit12pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13pingvflags1,
+ { "Not Defined", "ncp.bit13pingvflags1", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14pingvflags1,
+ { "Not Defined", "ncp.bit14pingvflags1", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15pingvflags1,
+ { "Not Defined", "ncp.bit15pingvflags1", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16pingvflags1,
+ { "Not Defined", "ncp.bit16pingvflags1", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_nds_letter_ver,
+ { "Letter Version", "ncp.nds_letter_ver", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_os_majver,
+ { "OS Major Version", "ncp.nds_os_majver", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_os_minver,
+ { "OS Minor Version", "ncp.nds_os_minver", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_lic_flags,
+ { "License Flags", "ncp.nds_lic_flags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_ds_time,
+ { "DS Time", "ncp.nds_ds_time", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_svr_time,
+ { "Server Time", "ncp.nds_svr_time", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_crt_time,
+ { "Agent Create Time", "ncp.nds_crt_time", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_ping_version,
+ { "Ping Version", "ncp.nds_ping_version", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_search_scope,
+ { "Search Scope", "ncp.nds_search_scope", FT_UINT32, BASE_DEC|BASE_RANGE_STRING, RVALS(nds_search_scope), 0x0, NULL, HFILL }},
+
+ { &hf_nds_num_objects,
+ { "Number of Objects to Search", "ncp.nds_num_objects", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_siflags,
+ { "Information Types", "ncp.siflags", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_bit1siflags,
+ { "Names", "ncp.bit1siflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_bit2siflags,
+ { "Names and Values", "ncp.bit2siflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_bit3siflags,
+ { "Effective Privileges", "ncp.bit3siflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_bit4siflags,
+ { "Value Info", "ncp.bit4siflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_bit5siflags,
+ { "Abbreviated Value", "ncp.bit5siflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_bit6siflags,
+ { "Not Defined", "ncp.bit6siflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_bit7siflags,
+ { "Not Defined", "ncp.bit7siflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_bit8siflags,
+ { "Not Defined", "ncp.bit8siflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_bit9siflags,
+ { "Expanded Class", "ncp.bit9siflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_bit10siflags,
+ { "Not Defined", "ncp.bit10siflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_bit11siflags,
+ { "Not Defined", "ncp.bit11siflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_bit12siflags,
+ { "Not Defined", "ncp.bit12siflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_bit13siflags,
+ { "Not Defined", "ncp.bit13siflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_bit14siflags,
+ { "Not Defined", "ncp.bit14siflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_bit15siflags,
+ { "Not Defined", "ncp.bit15siflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_bit16siflags,
+ { "Not Defined", "ncp.bit16siflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_nds_segment_overlap,
+ { "Segment overlap", "nds.segment.overlap", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Segment overlaps with other segments", HFILL }},
+
+ { &hf_nds_segment_overlap_conflict,
+ { "Conflicting data in segment overlap", "nds.segment.overlap.conflict", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Overlapping segments contained conflicting data", HFILL }},
+
+ { &hf_nds_segment_multiple_tails,
+ { "Multiple tail segments found", "nds.segment.multipletails", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Several tails were found when desegmenting the packet", HFILL }},
+
+ { &hf_nds_segment_too_long_segment,
+ { "Segment too long", "nds.segment.toolongsegment", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Segment contained data past end of packet", HFILL }},
+
+ { &hf_nds_segment_error,
+ { "Desegmentation error", "nds.segment.error", FT_FRAMENUM, BASE_NONE, NULL, 0x0, "Desegmentation error due to illegal segments", HFILL }},
+
+ { &hf_nds_segment_count,
+ { "Segment count", "nds.segment.count", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_reassembled_length,
+ { "Reassembled NDS length", "nds.reassembled.length", FT_UINT32, BASE_DEC, NULL, 0x0, "The total length of the reassembled payload", HFILL }},
+
+ { &hf_nds_segment,
+ { "NDS Fragment", "nds.fragment", FT_FRAMENUM, BASE_NONE, NULL, 0x0, "NDPS Fragment", HFILL }},
+
+ { &hf_nds_segments,
+ { "NDS Fragments", "nds.fragments", FT_NONE, BASE_NONE, NULL, 0x0, "NDPS Fragments", HFILL }},
+
+ { &hf_nds_verb2b_req_flags,
+ { "Flags", "ncp.nds_verb2b_flags", FT_UINT32, BASE_HEX, VALS(nds_verb2b_flag_vals), 0x0, NULL, HFILL }},
+
+ { &hf_ncp_ip_address,
+ { "IP Address", "ncp.ip_addr", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_copyright,
+ { "Copyright", "ncp.copyright", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ndsprot1flag,
+ { "Not Defined", "ncp.nds_prot_bit1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
+
+ { &hf_ndsprot2flag,
+ { "Not Defined", "ncp.nds_prot_bit2", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
+
+ { &hf_ndsprot3flag,
+ { "Not Defined", "ncp.nds_prot_bit3", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
+
+ { &hf_ndsprot4flag,
+ { "Not Defined", "ncp.nds_prot_bit4", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
+
+ { &hf_ndsprot5flag,
+ { "Not Defined", "ncp.nds_prot_bit5", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
+
+ { &hf_ndsprot6flag,
+ { "Not Defined", "ncp.nds_prot_bit6", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
+
+ { &hf_ndsprot7flag,
+ { "Not Defined", "ncp.nds_prot_bit7", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
+
+ { &hf_ndsprot8flag,
+ { "Not Defined", "ncp.nds_prot_bit8", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
+
+ { &hf_ndsprot9flag,
+ { "Not Defined", "ncp.nds_prot_bit9", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
+
+ { &hf_ndsprot10flag,
+ { "Not Defined", "ncp.nds_prot_bit10", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
+
+ { &hf_ndsprot11flag,
+ { "Not Defined", "ncp.nds_prot_bit11", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
+
+ { &hf_ndsprot12flag,
+ { "Not Defined", "ncp.nds_prot_bit12", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
+
+ { &hf_ndsprot13flag,
+ { "Not Defined", "ncp.nds_prot_bit13", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
+
+ { &hf_ndsprot14flag,
+ { "Not Defined", "ncp.nds_prot_bit14", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
+
+ { &hf_ndsprot15flag,
+ { "Include CRC in NDS Header", "ncp.nds_prot_bit15", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
+
+ { &hf_ndsprot16flag,
+ { "Client is a Server", "ncp.nds_prot_bit16", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
+
+ { &hf_nds_svr_dst_name,
+ { "Server Distinguished Name", "ncp.nds_svr_dist_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_tune_mark,
+ { "Tune Mark", "ncp.ndstunemark", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+#if 0 /* Unused ? */
+ { &hf_nds_create_time,
+ { "NDS Creation Time", "ncp.ndscreatetime", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }},
+#endif
+
+ { &hf_srvr_param_string,
+ { "Set Parameter Value", "ncp.srvr_param_string", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_srvr_param_number,
+ { "Set Parameter Value", "ncp.srvr_param_number", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_srvr_param_boolean,
+ { "Set Parameter Value", "ncp.srvr_param_boolean", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_number_of_items,
+ { "Number of Items", "ncp.ndsitems", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_nds_iterverb,
+ { "NDS Iteration Verb", "ncp.ndsiterverb", FT_UINT32, BASE_DEC_HEX, VALS(iterator_subverbs), 0x0, NULL, HFILL }},
+
+ { &hf_iter_completion_code,
+ { "Iteration Completion Code", "ncp.iter_completion_code", FT_UINT32, BASE_HEX, VALS(nds_reply_errors), 0x0, NULL, HFILL }},
+
+#if 0 /* Unused ? */
+ { &hf_nds_iterobj,
+ { "Iterator Object", "ncp.ndsiterobj", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+#endif
+
+ { &hf_iter_verb_completion_code,
+ { "Completion Code", "ncp.iter_verb_completion_code", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_iter_ans,
+ { "Iterator Answer", "ncp.iter_answer", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_positionable,
+ { "Positionable", "ncp.iterpositionable", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_num_skipped,
+ { "Number Skipped", "ncp.iternumskipped", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_num_to_skip,
+ { "Number to Skip", "ncp.iternumtoskip", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_timelimit,
+ { "Time Limit", "ncp.itertimelimit", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_iter_index,
+ { "Iterator Index", "ncp.iterindex", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_num_to_get,
+ { "Number to Get", "ncp.iternumtoget", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+#if 0 /* Unused ? */
+ { &hf_ret_info_type,
+ { "Return Information Type", "ncp.iterretinfotype", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+#endif
+
+ { &hf_data_size,
+ { "Data Size", "ncp.iterdatasize", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_this_count,
+ { "Number of Items", "ncp.itercount", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_max_entries,
+ { "Maximum Entries", "ncp.itermaxentries", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_move_position,
+ { "Move Position", "ncp.itermoveposition", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_iter_copy,
+ { "Iterator Copy", "ncp.itercopy", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_iter_position,
+ { "Iteration Position", "ncp.iterposition", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_iter_search,
+ { "Search Filter", "ncp.iter_search", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_iter_other,
+ { "Other Iteration", "ncp.iterother", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_oid,
+ { "Object ID", "ncp.nds_oid", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_ncp_bytes_actually_trans_64,
+ { "Bytes Actually Transferred", "ncp.bytes_actually_trans_64", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_sap_name,
+ { "SAP Name", "ncp.sap_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_os_name,
+ { "OS Name", "ncp.os_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_vendor_name,
+ { "Vendor Name", "ncp.vendor_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_hardware_name,
+ { "Hardware Name", "ncp.harware_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_no_request_record_found,
+ { "No request record found. Parsing is impossible.", "ncp.no_request_record_found", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_search_modifier,
+ { "Search Modifier", "ncp.search_modifier", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_search_pattern,
+ { "Search Pattern", "ncp.search_pattern", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_acl_protected_attribute,
+ { "Protected Attribute", "ncp.nds_acl_protected_attribute", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_acl_subject,
+ { "Subject", "ncp.nds_acl_subject", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
+
+ { &hf_nds_acl_privileges,
+ { "Subject", "ncp.nds_acl_privileges", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
+
+""")
+ # Print the registration code for the hf variables
+ for var in sorted_vars:
+ print(" { &%s," % (var.HFName()))
+ print(" { \"%s\", \"%s\", %s, %s, %s, 0x%x, NULL, HFILL }},\n" % \
+ (var.Description(), var.DFilter(),
+ var.WiresharkFType(), var.Display(), var.ValuesName(),
+ var.Mask()))
+
+ print(" };\n")
+
+ if ett_list:
+ print(" static int *ett[] = {")
+
+ for ett in ett_list:
+ print(" &%s," % (ett,))
+
+ print(" };\n")
+
+ print("""
+ static ei_register_info ei[] = {
+ { &ei_ncp_file_handle, { "ncp.file_handle.expert", PI_REQUEST_CODE, PI_CHAT, "Close file handle", EXPFILL }},
+ { &ei_ncp_file_rights, { "ncp.file_rights", PI_REQUEST_CODE, PI_CHAT, "File rights", EXPFILL }},
+ { &ei_ncp_op_lock_handle, { "ncp.op_lock_handle", PI_REQUEST_CODE, PI_CHAT, "Op-lock on handle", EXPFILL }},
+ { &ei_ncp_file_rights_change, { "ncp.file_rights.change", PI_REQUEST_CODE, PI_CHAT, "Change handle rights", EXPFILL }},
+ { &ei_ncp_effective_rights, { "ncp.effective_rights.expert", PI_RESPONSE_CODE, PI_CHAT, "Handle effective rights", EXPFILL }},
+ { &ei_ncp_server, { "ncp.server", PI_RESPONSE_CODE, PI_CHAT, "Server info", EXPFILL }},
+ { &ei_iter_verb_completion_code, { "ncp.iter_verb_completion_code.expert", PI_RESPONSE_CODE, PI_ERROR, "Iteration Verb Error", EXPFILL }},
+ { &ei_ncp_connection_request, { "ncp.connection_request", PI_RESPONSE_CODE, PI_CHAT, "Connection Request", EXPFILL }},
+ { &ei_ncp_destroy_connection, { "ncp.destroy_connection", PI_RESPONSE_CODE, PI_CHAT, "Destroy Connection Request", EXPFILL }},
+ { &ei_nds_reply_error, { "ncp.ndsreplyerror.expert", PI_RESPONSE_CODE, PI_ERROR, "NDS Error", EXPFILL }},
+ { &ei_nds_iteration, { "ncp.nds_iteration.error", PI_RESPONSE_CODE, PI_ERROR, "NDS Iteration Error", EXPFILL }},
+ { &ei_ncp_eid, { "ncp.eid", PI_RESPONSE_CODE, PI_CHAT, "EID", EXPFILL }},
+ { &ei_ncp_completion_code, { "ncp.completion_code.expert", PI_RESPONSE_CODE, PI_ERROR, "Code Completion Error", EXPFILL }},
+ { &ei_ncp_connection_status, { "ncp.connection_status.bad", PI_RESPONSE_CODE, PI_ERROR, "Error: Bad Connection Status", EXPFILL }},
+ { &ei_ncp_connection_destroyed, { "ncp.connection_destroyed", PI_RESPONSE_CODE, PI_CHAT, "Connection Destroyed", EXPFILL }},
+ { &ei_ncp_no_request_record_found, { "ncp.no_request_record_found", PI_SEQUENCE, PI_NOTE, "No request record found.", EXPFILL }},
+ { &ei_ncp_invalid_offset, { "ncp.invalid_offset", PI_MALFORMED, PI_ERROR, "Invalid offset", EXPFILL }},
+ { &ei_ncp_address_type, { "ncp.address_type.unknown", PI_PROTOCOL, PI_WARN, "Unknown Address Type", EXPFILL }},
+ { &ei_ncp_value_too_large, { "ncp.value_too_large", PI_MALFORMED, PI_ERROR, "Length value goes past the end of the packet", EXPFILL }},
+ };
+
+ expert_module_t* expert_ncp;
+
+ proto_register_field_array(proto_ncp, hf, array_length(hf));""")
+
+ if ett_list:
+ print("""
+ proto_register_subtree_array(ett, array_length(ett));""")
+
+ print("""
+ expert_ncp = expert_register_protocol(proto_ncp);
+ expert_register_field_array(expert_ncp, ei, array_length(ei));
+ register_init_routine(&ncp_init_protocol);
+ /* fragment */
+ reassembly_table_register(&nds_reassembly_table,
+ &addresses_reassembly_table_functions);
+
+ ncp_req_hash = wmem_map_new_autoreset(wmem_epan_scope(), wmem_file_scope(), ncp_hash, ncp_equal);
+ ncp_req_eid_hash = wmem_map_new_autoreset(wmem_epan_scope(), wmem_file_scope(), ncp_eid_hash, ncp_eid_equal);
+
+ """)
+
+ # End of proto_register_ncp2222()
+ print("}")
+
+def usage():
+ print("Usage: ncp2222.py -o output_file")
+ sys.exit(1)
+
+def main():
+ global compcode_lists
+ global ptvc_lists
+ global msg
+
+ optstring = "o:"
+ out_filename = None
+
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], optstring)
+ except getopt.error:
+ usage()
+
+ for opt, arg in opts:
+ if opt == "-o":
+ out_filename = arg
+ else:
+ usage()
+
+ if len(args) != 0:
+ usage()
+
+ if not out_filename:
+ usage()
+
+ # Create the output file
+ try:
+ out_file = open(out_filename, "w")
+ except IOError:
+ sys.exit("Could not open %s for writing: %s" % (out_filename,
+ IOError))
+
+ # Set msg to current stdout
+ msg = sys.stdout
+
+ # Set stdout to the output file
+ sys.stdout = out_file
+
+ msg.write("Processing NCP definitions...\n")
+ # Run the code, and if we catch any exception,
+ # erase the output file.
+ try:
+ compcode_lists = UniqueCollection('Completion Code Lists')
+ ptvc_lists = UniqueCollection('PTVC Lists')
+
+ define_errors()
+ define_groups()
+
+ define_ncp2222()
+
+ msg.write("Defined %d NCP types.\n" % (len(packets),))
+ produce_code()
+ except Exception:
+ traceback.print_exc(20, msg)
+ try:
+ out_file.close()
+ except IOError:
+ msg.write("Could not close %s: %s\n" % (out_filename, IOError))
+
+ try:
+ if os.path.exists(out_filename):
+ os.remove(out_filename)
+ except OSError:
+ msg.write("Could not remove %s: %s\n" % (out_filename, OSError))
+
+ sys.exit(1)
+
+
+
+def define_ncp2222():
+ ##############################################################################
+ # NCP Packets. Here I list functions and subfunctions in hexadecimal like the
+ # NCP book (and I believe LanAlyzer does this too).
+ # However, Novell lists these in decimal in their on-line documentation.
+ ##############################################################################
+ # 2222/01
+ pkt = NCP(0x01, "File Set Lock", 'sync')
+ pkt.Request(7)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/02
+ pkt = NCP(0x02, "File Release Lock", 'sync')
+ pkt.Request(7)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xff00])
+ # 2222/03
+ pkt = NCP(0x03, "Log File Exclusive", 'sync')
+ pkt.Request( (12, 267), [
+ rec( 7, 1, DirHandle ),
+ rec( 8, 1, LockFlag ),
+ rec( 9, 2, TimeoutLimit, ENC_BIG_ENDIAN ),
+ rec( 11, (1, 256), FilePath ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8200, 0x9600, 0xfe0d, 0xff01])
+ # 2222/04
+ pkt = NCP(0x04, "Lock File Set", 'sync')
+ pkt.Request( 9, [
+ rec( 7, 2, TimeoutLimit ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xfe0d, 0xff01])
+ ## 2222/05
+ pkt = NCP(0x05, "Release File", 'sync')
+ pkt.Request( (9, 264), [
+ rec( 7, 1, DirHandle ),
+ rec( 8, (1, 256), FilePath ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9b00, 0x9c03, 0xff1a])
+ # 2222/06
+ pkt = NCP(0x06, "Release File Set", 'sync')
+ pkt.Request( 8, [
+ rec( 7, 1, LockFlag ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/07
+ pkt = NCP(0x07, "Clear File", 'sync')
+ pkt.Request( (9, 264), [
+ rec( 7, 1, DirHandle ),
+ rec( 8, (1, 256), FilePath ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03,
+ 0xa100, 0xfd00, 0xff1a])
+ # 2222/08
+ pkt = NCP(0x08, "Clear File Set", 'sync')
+ pkt.Request( 8, [
+ rec( 7, 1, LockFlag ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/09
+ pkt = NCP(0x09, "Log Logical Record", 'sync')
+ pkt.Request( (11, 138), [
+ rec( 7, 1, LockFlag ),
+ rec( 8, 2, TimeoutLimit, ENC_BIG_ENDIAN ),
+ rec( 10, (1, 128), LogicalRecordName, info_str=(LogicalRecordName, "Log Logical Record: %s", ", %s")),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xfe0d, 0xff1a])
+ # 2222/0A, 10
+ pkt = NCP(0x0A, "Lock Logical Record Set", 'sync')
+ pkt.Request( 10, [
+ rec( 7, 1, LockFlag ),
+ rec( 8, 2, TimeoutLimit ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xfe0d, 0xff1a])
+ # 2222/0B, 11
+ pkt = NCP(0x0B, "Clear Logical Record", 'sync')
+ pkt.Request( (8, 135), [
+ rec( 7, (1, 128), LogicalRecordName, info_str=(LogicalRecordName, "Clear Logical Record: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xff1a])
+ # 2222/0C, 12
+ pkt = NCP(0x0C, "Release Logical Record", 'sync')
+ pkt.Request( (8, 135), [
+ rec( 7, (1, 128), LogicalRecordName, info_str=(LogicalRecordName, "Release Logical Record: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xff1a])
+ # 2222/0D, 13
+ pkt = NCP(0x0D, "Release Logical Record Set", 'sync')
+ pkt.Request( 8, [
+ rec( 7, 1, LockFlag ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/0E, 14
+ pkt = NCP(0x0E, "Clear Logical Record Set", 'sync')
+ pkt.Request( 8, [
+ rec( 7, 1, LockFlag ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/1100, 17/00
+ pkt = NCP(0x1100, "Write to Spool File", 'print')
+ pkt.Request( (11, 16), [
+ rec( 10, ( 1, 6 ), Data, info_str=(Data, "Write to Spool File: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0104, 0x8000, 0x8101, 0x8701, 0x8800,
+ 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9400, 0x9500,
+ 0x9600, 0x9804, 0x9900, 0xa100, 0xa201, 0xff19])
+ # 2222/1101, 17/01
+ pkt = NCP(0x1101, "Close Spool File", 'print')
+ pkt.Request( 11, [
+ rec( 10, 1, AbortQueueFlag ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8701, 0x8800, 0x8d00,
+ 0x8e00, 0x8f00, 0x9001, 0x9300, 0x9400, 0x9500,
+ 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03, 0x9d00,
+ 0xa100, 0xd000, 0xd100, 0xd202, 0xd300, 0xd400,
+ 0xda01, 0xe800, 0xea00, 0xeb00, 0xec00, 0xfc06,
+ 0xfd00, 0xfe07, 0xff06])
+ # 2222/1102, 17/02
+ pkt = NCP(0x1102, "Set Spool File Flags", 'print')
+ pkt.Request( 30, [
+ rec( 10, 1, PrintFlags ),
+ rec( 11, 1, TabSize ),
+ rec( 12, 1, TargetPrinter ),
+ rec( 13, 1, Copies ),
+ rec( 14, 1, FormType ),
+ rec( 15, 1, Reserved ),
+ rec( 16, 14, BannerName ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xd202, 0xd300, 0xe800, 0xea00,
+ 0xeb00, 0xec00, 0xfc06, 0xfe07, 0xff06])
+
+ # 2222/1103, 17/03
+ pkt = NCP(0x1103, "Spool A Disk File", 'print')
+ pkt.Request( (12, 23), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, (1, 12), Data, info_str=(Data, "Spool a Disk File: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8701, 0x8800, 0x8d00,
+ 0x8e00, 0x8f00, 0x9001, 0x9300, 0x9400, 0x9500,
+ 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03, 0x9d00,
+ 0xa100, 0xd000, 0xd100, 0xd202, 0xd300, 0xd400,
+ 0xda01, 0xe800, 0xea00, 0xeb00, 0xec00, 0xfc06,
+ 0xfd00, 0xfe07, 0xff06])
+
+ # 2222/1106, 17/06
+ pkt = NCP(0x1106, "Get Printer Status", 'print')
+ pkt.Request( 11, [
+ rec( 10, 1, TargetPrinter ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 1, PrinterHalted ),
+ rec( 9, 1, PrinterOffLine ),
+ rec( 10, 1, CurrentFormType ),
+ rec( 11, 1, RedirectedPrinter ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xfb05, 0xfd00, 0xff06])
+
+ # 2222/1109, 17/09
+ pkt = NCP(0x1109, "Create Spool File", 'print')
+ pkt.Request( (12, 23), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, (1, 12), Data, info_str=(Data, "Create Spool File: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8400, 0x8701, 0x8d00,
+ 0x8f00, 0x9001, 0x9400, 0x9600, 0x9804, 0x9900,
+ 0x9b03, 0x9c03, 0xa100, 0xd000, 0xd100, 0xd202,
+ 0xd300, 0xd400, 0xda01, 0xe800, 0xea00, 0xeb00,
+ 0xec00, 0xfc06, 0xfd00, 0xfe07, 0xff06])
+
+ # 2222/110A, 17/10
+ pkt = NCP(0x110A, "Get Printer's Queue", 'print')
+ pkt.Request( 11, [
+ rec( 10, 1, TargetPrinter ),
+ ])
+ pkt.Reply( 12, [
+ rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xff06])
+
+ # 2222/12, 18
+ pkt = NCP(0x12, "Get Volume Info with Number", 'file')
+ pkt.Request( 8, [
+ rec( 7, 1, VolumeNumber,info_str=(VolumeNumber, "Get Volume Information for Volume %d", ", %d") )
+ ])
+ pkt.Reply( 36, [
+ rec( 8, 2, SectorsPerCluster, ENC_BIG_ENDIAN ),
+ rec( 10, 2, TotalVolumeClusters, ENC_BIG_ENDIAN ),
+ rec( 12, 2, AvailableClusters, ENC_BIG_ENDIAN ),
+ rec( 14, 2, TotalDirectorySlots, ENC_BIG_ENDIAN ),
+ rec( 16, 2, AvailableDirectorySlots, ENC_BIG_ENDIAN ),
+ rec( 18, 16, VolumeName ),
+ rec( 34, 2, RemovableFlag, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9804])
+
+ # 2222/13, 19
+ pkt = NCP(0x13, "Get Station Number", 'connection')
+ pkt.Request(7)
+ pkt.Reply(11, [
+ rec( 8, 3, StationNumber )
+ ])
+ pkt.CompletionCodes([0x0000, 0xff00])
+
+ # 2222/14, 20
+ pkt = NCP(0x14, "Get File Server Date And Time", 'fileserver')
+ pkt.Request(7)
+ pkt.Reply(15, [
+ rec( 8, 1, Year ),
+ rec( 9, 1, Month ),
+ rec( 10, 1, Day ),
+ rec( 11, 1, Hour ),
+ rec( 12, 1, Minute ),
+ rec( 13, 1, Second ),
+ rec( 14, 1, DayOfWeek ),
+ ])
+ pkt.CompletionCodes([0x0000])
+
+ # 2222/1500, 21/00
+ pkt = NCP(0x1500, "Send Broadcast Message", 'message')
+ pkt.Request((13, 70), [
+ rec( 10, 1, ClientListLen, var="x" ),
+ rec( 11, 1, TargetClientList, repeat="x" ),
+ rec( 12, (1, 58), TargetMessage, info_str=(TargetMessage, "Send Broadcast Message: %s", ", %s") ),
+ ])
+ pkt.Reply(10, [
+ rec( 8, 1, ClientListLen, var="x" ),
+ rec( 9, 1, SendStatus, repeat="x" )
+ ])
+ pkt.CompletionCodes([0x0000, 0xfd00])
+
+ # 2222/1501, 21/01
+ pkt = NCP(0x1501, "Get Broadcast Message", 'message')
+ pkt.Request(10)
+ pkt.Reply((9,66), [
+ rec( 8, (1, 58), TargetMessage )
+ ])
+ pkt.CompletionCodes([0x0000, 0xfd00])
+
+ # 2222/1502, 21/02
+ pkt = NCP(0x1502, "Disable Broadcasts", 'message')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xfb0a])
+
+ # 2222/1503, 21/03
+ pkt = NCP(0x1503, "Enable Broadcasts", 'message')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+
+ # 2222/1509, 21/09
+ pkt = NCP(0x1509, "Broadcast To Console", 'message')
+ pkt.Request((11, 68), [
+ rec( 10, (1, 58), TargetMessage, info_str=(TargetMessage, "Broadcast to Console: %s", ", %s") )
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+
+ # 2222/150A, 21/10
+ pkt = NCP(0x150A, "Send Broadcast Message", 'message')
+ pkt.Request((17, 74), [
+ rec( 10, 2, ClientListCount, ENC_LITTLE_ENDIAN, var="x" ),
+ rec( 12, 4, ClientList, ENC_LITTLE_ENDIAN, repeat="x" ),
+ rec( 16, (1, 58), TargetMessage, info_str=(TargetMessage, "Send Broadcast Message: %s", ", %s") ),
+ ])
+ pkt.Reply(14, [
+ rec( 8, 2, ClientListCount, ENC_LITTLE_ENDIAN, var="x" ),
+ rec( 10, 4, ClientCompFlag, ENC_LITTLE_ENDIAN, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0xfd00])
+
+ # 2222/150B, 21/11
+ pkt = NCP(0x150B, "Get Broadcast Message", 'message')
+ pkt.Request(10)
+ pkt.Reply((9,66), [
+ rec( 8, (1, 58), TargetMessage )
+ ])
+ pkt.CompletionCodes([0x0000, 0xfd00])
+
+ # 2222/150C, 21/12
+ pkt = NCP(0x150C, "Connection Message Control", 'message')
+ pkt.Request(22, [
+ rec( 10, 1, ConnectionControlBits ),
+ rec( 11, 3, Reserved3 ),
+ rec( 14, 4, ConnectionListCount, ENC_LITTLE_ENDIAN, var="x" ),
+ rec( 18, 4, ConnectionList, ENC_LITTLE_ENDIAN, repeat="x" ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xff00])
+
+ # 2222/1600, 22/0
+ pkt = NCP(0x1600, "Set Directory Handle", 'file')
+ pkt.Request((13,267), [
+ rec( 10, 1, TargetDirHandle ),
+ rec( 11, 1, DirHandle ),
+ rec( 12, (1, 255), Path, info_str=(Path, "Set Directory Handle to: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfa00,
+ 0xfd00, 0xff00])
+
+
+ # 2222/1601, 22/1
+ pkt = NCP(0x1601, "Get Directory Path", 'file')
+ pkt.Request(11, [
+ rec( 10, 1, DirHandle,info_str=(DirHandle, "Get Directory Path for Directory Handle %d", ", %d") ),
+ ])
+ pkt.Reply((9,263), [
+ rec( 8, (1,255), Path ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9b00, 0x9c00, 0xa100])
+
+ # 2222/1602, 22/2
+ pkt = NCP(0x1602, "Scan Directory Information", 'file')
+ pkt.Request((14,268), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 2, StartingSearchNumber, ENC_BIG_ENDIAN ),
+ rec( 13, (1, 255), Path, info_str=(Path, "Scan Directory Information: %s", ", %s") ),
+ ])
+ pkt.Reply(36, [
+ rec( 8, 16, DirectoryPath ),
+ rec( 24, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 26, 2, CreationTime, ENC_BIG_ENDIAN ),
+ rec( 28, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 32, 1, AccessRightsMask ),
+ rec( 33, 1, Reserved ),
+ rec( 34, 2, NextSearchNumber, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfa00,
+ 0xfd00, 0xff00])
+
+ # 2222/1603, 22/3
+ pkt = NCP(0x1603, "Get Effective Directory Rights", 'file')
+ pkt.Request((12,266), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, (1, 255), Path, info_str=(Path, "Get Effective Directory Rights: %s", ", %s") ),
+ ])
+ pkt.Reply(9, [
+ rec( 8, 1, AccessRightsMask ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfa00,
+ 0xfd00, 0xff00])
+
+ # 2222/1604, 22/4
+ pkt = NCP(0x1604, "Modify Maximum Rights Mask", 'file')
+ pkt.Request((14,268), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 1, RightsGrantMask ),
+ rec( 12, 1, RightsRevokeMask ),
+ rec( 13, (1, 255), Path, info_str=(Path, "Modify Maximum Rights Mask: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfa00,
+ 0xfd00, 0xff00])
+
+ # 2222/1605, 22/5
+ pkt = NCP(0x1605, "Get Volume Number", 'file')
+ pkt.Request((11, 265), [
+ rec( 10, (1,255), VolumeNameLen, info_str=(VolumeNameLen, "Get Volume Number for: %s", ", %s") ),
+ ])
+ pkt.Reply(9, [
+ rec( 8, 1, VolumeNumber ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804])
+
+ # 2222/1606, 22/6
+ pkt = NCP(0x1606, "Get Volume Name", 'file')
+ pkt.Request(11, [
+ rec( 10, 1, VolumeNumber,info_str=(VolumeNumber, "Get Name for Volume %d", ", %d") ),
+ ])
+ pkt.Reply((9, 263), [
+ rec( 8, (1,255), VolumeNameLen ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0xff00])
+
+ # 2222/160A, 22/10
+ pkt = NCP(0x160A, "Create Directory", 'file')
+ pkt.Request((13,267), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 1, AccessRightsMask ),
+ rec( 12, (1, 255), Path, info_str=(Path, "Create Directory: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8400, 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03,
+ 0x9e00, 0xa100, 0xfd00, 0xff00])
+
+ # 2222/160B, 22/11
+ pkt = NCP(0x160B, "Delete Directory", 'file')
+ pkt.Request((13,267), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 1, Reserved ),
+ rec( 12, (1, 255), Path, info_str=(Path, "Delete Directory: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8a00, 0x9600, 0x9804, 0x9b03, 0x9c03,
+ 0x9f00, 0xa000, 0xa100, 0xfd00, 0xff00])
+
+ # 2222/160C, 22/12
+ pkt = NCP(0x160C, "Scan Directory for Trustees", 'file')
+ pkt.Request((13,267), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 1, TrusteeSetNumber ),
+ rec( 12, (1, 255), Path, info_str=(Path, "Scan Directory for Trustees: %s", ", %s") ),
+ ])
+ pkt.Reply(57, [
+ rec( 8, 16, DirectoryPath ),
+ rec( 24, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 26, 2, CreationTime, ENC_BIG_ENDIAN ),
+ rec( 28, 4, CreatorID ),
+ rec( 32, 4, TrusteeID, ENC_BIG_ENDIAN ),
+ rec( 36, 4, TrusteeID, ENC_BIG_ENDIAN ),
+ rec( 40, 4, TrusteeID, ENC_BIG_ENDIAN ),
+ rec( 44, 4, TrusteeID, ENC_BIG_ENDIAN ),
+ rec( 48, 4, TrusteeID, ENC_BIG_ENDIAN ),
+ rec( 52, 1, AccessRightsMask ),
+ rec( 53, 1, AccessRightsMask ),
+ rec( 54, 1, AccessRightsMask ),
+ rec( 55, 1, AccessRightsMask ),
+ rec( 56, 1, AccessRightsMask ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9b03, 0x9c03,
+ 0xa100, 0xfd00, 0xff00])
+
+ # 2222/160D, 22/13
+ pkt = NCP(0x160D, "Add Trustee to Directory", 'file')
+ pkt.Request((17,271), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 4, TrusteeID, ENC_BIG_ENDIAN ),
+ rec( 15, 1, AccessRightsMask ),
+ rec( 16, (1, 255), Path, info_str=(Path, "Add Trustee to Directory: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03,
+ 0xa100, 0xfc06, 0xfd00, 0xff00])
+
+ # 2222/160E, 22/14
+ pkt = NCP(0x160E, "Delete Trustee from Directory", 'file')
+ pkt.Request((17,271), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 4, TrusteeID, ENC_BIG_ENDIAN ),
+ rec( 15, 1, Reserved ),
+ rec( 16, (1, 255), Path, info_str=(Path, "Delete Trustee from Directory: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03,
+ 0xa100, 0xfc06, 0xfd00, 0xfe07, 0xff00])
+
+ # 2222/160F, 22/15
+ pkt = NCP(0x160F, "Rename Directory", 'file')
+ pkt.Request((13, 521), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, (1, 255), Path, info_str=(Path, "Rename Directory: %s", ", %s") ),
+ rec( -1, (1, 255), NewPath ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8b00, 0x9200, 0x9600, 0x9804, 0x9b03, 0x9c03,
+ 0x9e00, 0xa100, 0xef00, 0xfd00, 0xff00])
+
+ # 2222/1610, 22/16
+ pkt = NCP(0x1610, "Purge Erased Files", 'file')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8100, 0x9600, 0x9804, 0xa100, 0xff00])
+
+ # 2222/1611, 22/17
+ pkt = NCP(0x1611, "Recover Erased File", 'file')
+ pkt.Request(11, [
+ rec( 10, 1, DirHandle,info_str=(DirHandle, "Recover Erased File from Directory Handle %d", ", %d") ),
+ ])
+ pkt.Reply(38, [
+ rec( 8, 15, OldFileName ),
+ rec( 23, 15, NewFileName ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03,
+ 0xa100, 0xfd00, 0xff00])
+ # 2222/1612, 22/18
+ pkt = NCP(0x1612, "Alloc Permanent Directory Handle", 'file')
+ pkt.Request((13, 267), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 1, DirHandleName ),
+ rec( 12, (1,255), Path, info_str=(Path, "Allocate Permanent Directory Handle: %s", ", %s") ),
+ ])
+ pkt.Reply(10, [
+ rec( 8, 1, DirHandle ),
+ rec( 9, 1, AccessRightsMask ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9900, 0x9b00, 0x9c03, 0x9d00,
+ 0xa100, 0xfd00, 0xff00])
+ # 2222/1613, 22/19
+ pkt = NCP(0x1613, "Alloc Temporary Directory Handle", 'file')
+ pkt.Request((13, 267), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 1, DirHandleName ),
+ rec( 12, (1,255), Path, info_str=(Path, "Allocate Temporary Directory Handle: %s", ", %s") ),
+ ])
+ pkt.Reply(10, [
+ rec( 8, 1, DirHandle ),
+ rec( 9, 1, AccessRightsMask ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9900, 0x9c03, 0x9d00,
+ 0xa100, 0xfd00, 0xff00])
+ # 2222/1614, 22/20
+ pkt = NCP(0x1614, "Deallocate Directory Handle", 'file')
+ pkt.Request(11, [
+ rec( 10, 1, DirHandle,info_str=(DirHandle, "Deallocate Directory Handle %d", ", %d") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9b03])
+ # 2222/1615, 22/21
+ pkt = NCP(0x1615, "Get Volume Info with Handle", 'file')
+ pkt.Request( 11, [
+ rec( 10, 1, DirHandle,info_str=(DirHandle, "Get Volume Information with Handle %d", ", %d") )
+ ])
+ pkt.Reply( 36, [
+ rec( 8, 2, SectorsPerCluster, ENC_BIG_ENDIAN ),
+ rec( 10, 2, TotalVolumeClusters, ENC_BIG_ENDIAN ),
+ rec( 12, 2, AvailableClusters, ENC_BIG_ENDIAN ),
+ rec( 14, 2, TotalDirectorySlots, ENC_BIG_ENDIAN ),
+ rec( 16, 2, AvailableDirectorySlots, ENC_BIG_ENDIAN ),
+ rec( 18, 16, VolumeName ),
+ rec( 34, 2, RemovableFlag, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0xff00])
+ # 2222/1616, 22/22
+ pkt = NCP(0x1616, "Alloc Special Temporary Directory Handle", 'file')
+ pkt.Request((13, 267), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 1, DirHandleName ),
+ rec( 12, (1,255), Path, info_str=(Path, "Allocate Special Temporary Directory Handle: %s", ", %s") ),
+ ])
+ pkt.Reply(10, [
+ rec( 8, 1, DirHandle ),
+ rec( 9, 1, AccessRightsMask ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9900, 0x9b00, 0x9c03, 0x9d00,
+ 0xa100, 0xfd00, 0xff00])
+ # 2222/1617, 22/23
+ pkt = NCP(0x1617, "Extract a Base Handle", 'file')
+ pkt.Request(11, [
+ rec( 10, 1, DirHandle, info_str=(DirHandle, "Extract a Base Handle from Directory Handle %d", ", %d") ),
+ ])
+ pkt.Reply(22, [
+ rec( 8, 10, ServerNetworkAddress ),
+ rec( 18, 4, DirHandleLong ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9b03])
+ # 2222/1618, 22/24
+ pkt = NCP(0x1618, "Restore an Extracted Base Handle", 'file')
+ pkt.Request(24, [
+ rec( 10, 10, ServerNetworkAddress ),
+ rec( 20, 4, DirHandleLong ),
+ ])
+ pkt.Reply(10, [
+ rec( 8, 1, DirHandle ),
+ rec( 9, 1, AccessRightsMask ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c00, 0x9d00, 0xa100,
+ 0xfd00, 0xff00])
+ # 2222/1619, 22/25
+ pkt = NCP(0x1619, "Set Directory Information", 'file')
+ pkt.Request((21, 275), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 2, CreationDate ),
+ rec( 13, 2, CreationTime ),
+ rec( 15, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 19, 1, AccessRightsMask ),
+ rec( 20, (1,255), Path, info_str=(Path, "Set Directory Information: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9b03, 0x9c00, 0xa100,
+ 0xff16])
+ # 2222/161A, 22/26
+ pkt = NCP(0x161A, "Get Path Name of a Volume-Directory Number Pair", 'file')
+ pkt.Request(13, [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 2, DirectoryEntryNumberWord ),
+ ])
+ pkt.Reply((9,263), [
+ rec( 8, (1,255), Path ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9804, 0x9c00, 0xa100])
+ # 2222/161B, 22/27
+ pkt = NCP(0x161B, "Scan Salvageable Files", 'file')
+ pkt.Request(15, [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 4, SequenceNumber ),
+ ])
+ pkt.Reply(140, [
+ rec( 8, 4, SequenceNumber ),
+ rec( 12, 2, Subdirectory ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 4, AttributesDef32 ),
+ rec( 20, 1, UniqueID ),
+ rec( 21, 1, FlagsDef ),
+ rec( 22, 1, DestNameSpace ),
+ rec( 23, 1, FileNameLen ),
+ rec( 24, 12, FileName12 ),
+ rec( 36, 2, CreationTime ),
+ rec( 38, 2, CreationDate ),
+ rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 44, 2, ArchivedTime ),
+ rec( 46, 2, ArchivedDate ),
+ rec( 48, 4, ArchiverID, ENC_BIG_ENDIAN ),
+ rec( 52, 2, UpdateTime ),
+ rec( 54, 2, UpdateDate ),
+ rec( 56, 4, UpdateID, ENC_BIG_ENDIAN ),
+ rec( 60, 4, FileSize, ENC_BIG_ENDIAN ),
+ rec( 64, 44, Reserved44 ),
+ rec( 108, 2, InheritedRightsMask ),
+ rec( 110, 2, LastAccessedDate ),
+ rec( 112, 4, DeletedFileTime ),
+ rec( 116, 2, DeletedTime ),
+ rec( 118, 2, DeletedDate ),
+ rec( 120, 4, DeletedID, ENC_BIG_ENDIAN ),
+ rec( 124, 16, Reserved16 ),
+ ])
+ pkt.CompletionCodes([0x0000, 0xfb01, 0x9801, 0xff1d])
+ # 2222/161C, 22/28
+ pkt = NCP(0x161C, "Recover Salvageable File", 'file')
+ pkt.Request((17,525), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 4, SequenceNumber ),
+ rec( 15, (1, 255), FileName, info_str=(FileName, "Recover File: %s", ", %s") ),
+ rec( -1, (1, 255), NewFileNameLen ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8401, 0x9c03, 0xfe02])
+ # 2222/161D, 22/29
+ pkt = NCP(0x161D, "Purge Salvageable File", 'file')
+ pkt.Request(15, [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 4, SequenceNumber ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8500, 0x9c03])
+ # 2222/161E, 22/30
+ pkt = NCP(0x161E, "Scan a Directory", 'file')
+ pkt.Request((17, 271), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 1, DOSFileAttributes ),
+ rec( 12, 4, SequenceNumber ),
+ rec( 16, (1, 255), SearchPattern, info_str=(SearchPattern, "Scan a Directory: %s", ", %s") ),
+ ])
+ pkt.Reply(140, [
+ rec( 8, 4, SequenceNumber ),
+ rec( 12, 4, Subdirectory ),
+ rec( 16, 4, AttributesDef32 ),
+ rec( 20, 1, UniqueID, ENC_LITTLE_ENDIAN ),
+ rec( 21, 1, PurgeFlags ),
+ rec( 22, 1, DestNameSpace ),
+ rec( 23, 1, NameLen ),
+ rec( 24, 12, Name12 ),
+ rec( 36, 2, CreationTime ),
+ rec( 38, 2, CreationDate ),
+ rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 44, 2, ArchivedTime ),
+ rec( 46, 2, ArchivedDate ),
+ rec( 48, 4, ArchiverID, ENC_BIG_ENDIAN ),
+ rec( 52, 2, UpdateTime ),
+ rec( 54, 2, UpdateDate ),
+ rec( 56, 4, UpdateID, ENC_BIG_ENDIAN ),
+ rec( 60, 4, FileSize, ENC_BIG_ENDIAN ),
+ rec( 64, 44, Reserved44 ),
+ rec( 108, 2, InheritedRightsMask ),
+ rec( 110, 2, LastAccessedDate ),
+ rec( 112, 28, Reserved28 ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8500, 0x9c03])
+ # 2222/161F, 22/31
+ pkt = NCP(0x161F, "Get Directory Entry", 'file')
+ pkt.Request(11, [
+ rec( 10, 1, DirHandle ),
+ ])
+ pkt.Reply(136, [
+ rec( 8, 4, Subdirectory ),
+ rec( 12, 4, AttributesDef32 ),
+ rec( 16, 1, UniqueID, ENC_LITTLE_ENDIAN ),
+ rec( 17, 1, PurgeFlags ),
+ rec( 18, 1, DestNameSpace ),
+ rec( 19, 1, NameLen ),
+ rec( 20, 12, Name12 ),
+ rec( 32, 2, CreationTime ),
+ rec( 34, 2, CreationDate ),
+ rec( 36, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 40, 2, ArchivedTime ),
+ rec( 42, 2, ArchivedDate ),
+ rec( 44, 4, ArchiverID, ENC_BIG_ENDIAN ),
+ rec( 48, 2, UpdateTime ),
+ rec( 50, 2, UpdateDate ),
+ rec( 52, 4, NextTrusteeEntry, ENC_BIG_ENDIAN ),
+ rec( 56, 48, Reserved48 ),
+ rec( 104, 2, MaximumSpace ),
+ rec( 106, 2, InheritedRightsMask ),
+ rec( 108, 28, Undefined28 ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8900, 0xbf00, 0xfb00])
+ # 2222/1620, 22/32
+ pkt = NCP(0x1620, "Scan Volume's User Disk Restrictions", 'file')
+ pkt.Request(15, [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, SequenceNumber ),
+ ])
+ pkt.Reply(17, [
+ rec( 8, 1, NumberOfEntries, var="x" ),
+ rec( 9, 8, ObjectIDStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9800])
+ # 2222/1621, 22/33
+ pkt = NCP(0x1621, "Add User Disk Space Restriction", 'file')
+ pkt.Request(19, [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, ObjectID ),
+ rec( 15, 4, DiskSpaceLimit ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9800])
+ # 2222/1622, 22/34
+ pkt = NCP(0x1622, "Remove User Disk Space Restrictions", 'file')
+ pkt.Request(15, [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, ObjectID ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8c00, 0xfe0e])
+ # 2222/1623, 22/35
+ pkt = NCP(0x1623, "Get Directory Disk Space Restriction", 'file')
+ pkt.Request(11, [
+ rec( 10, 1, DirHandle ),
+ ])
+ pkt.Reply(18, [
+ rec( 8, 1, NumberOfEntries ),
+ rec( 9, 1, Level ),
+ rec( 10, 4, MaxSpace ),
+ rec( 14, 4, CurrentSpace ),
+ ])
+ pkt.CompletionCodes([0x0000])
+ # 2222/1624, 22/36
+ pkt = NCP(0x1624, "Set Directory Disk Space Restriction", 'file')
+ pkt.Request(15, [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 4, DiskSpaceLimit ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0101, 0x8c00, 0xbf00])
+ # 2222/1625, 22/37
+ pkt = NCP(0x1625, "Set Directory Entry Information", 'file')
+ pkt.Request(NO_LENGTH_CHECK, [
+ #
+ # XXX - this didn't match what was in the spec for 22/37
+ # on the Novell Web site.
+ #
+ rec( 10, 1, DirHandle ),
+ rec( 11, 1, SearchAttributes ),
+ rec( 12, 4, SequenceNumber ),
+ rec( 16, 2, ChangeBits ),
+ rec( 18, 2, Reserved2 ),
+ rec( 20, 4, Subdirectory ),
+ #srec(DOSDirectoryEntryStruct, req_cond="ncp.search_att_sub == TRUE"),
+ srec(DOSFileEntryStruct, req_cond="ncp.search_att_sub == FALSE"),
+ ])
+ pkt.Reply(8)
+ pkt.ReqCondSizeConstant()
+ pkt.CompletionCodes([0x0000, 0x0106, 0x8c00, 0xbf00])
+ # 2222/1626, 22/38
+ pkt = NCP(0x1626, "Scan File or Directory for Extended Trustees", 'file')
+ pkt.Request((13,267), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 1, SequenceByte ),
+ rec( 12, (1, 255), Path, info_str=(Path, "Scan for Extended Trustees: %s", ", %s") ),
+ ])
+ pkt.Reply(91, [
+ rec( 8, 1, NumberOfEntries, var="x" ),
+ rec( 9, 4, ObjectID ),
+ rec( 13, 4, ObjectID ),
+ rec( 17, 4, ObjectID ),
+ rec( 21, 4, ObjectID ),
+ rec( 25, 4, ObjectID ),
+ rec( 29, 4, ObjectID ),
+ rec( 33, 4, ObjectID ),
+ rec( 37, 4, ObjectID ),
+ rec( 41, 4, ObjectID ),
+ rec( 45, 4, ObjectID ),
+ rec( 49, 4, ObjectID ),
+ rec( 53, 4, ObjectID ),
+ rec( 57, 4, ObjectID ),
+ rec( 61, 4, ObjectID ),
+ rec( 65, 4, ObjectID ),
+ rec( 69, 4, ObjectID ),
+ rec( 73, 4, ObjectID ),
+ rec( 77, 4, ObjectID ),
+ rec( 81, 4, ObjectID ),
+ rec( 85, 4, ObjectID ),
+ rec( 89, 2, AccessRightsMaskWord, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9800, 0x9b00, 0x9c00])
+ # 2222/1627, 22/39
+ pkt = NCP(0x1627, "Add Extended Trustee to Directory or File", 'file')
+ pkt.Request((18,272), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 4, ObjectID, ENC_BIG_ENDIAN ),
+ rec( 15, 2, TrusteeRights ),
+ rec( 17, (1, 255), Path, info_str=(Path, "Add Extended Trustee: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9000])
+ # 2222/1628, 22/40
+ pkt = NCP(0x1628, "Scan Directory Disk Space", 'file')
+ pkt.Request((17,271), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 1, SearchAttributes ),
+ rec( 12, 4, SequenceNumber ),
+ rec( 16, (1, 255), SearchPattern, info_str=(SearchPattern, "Scan Directory Disk Space: %s", ", %s") ),
+ ])
+ pkt.Reply((148), [
+ rec( 8, 4, SequenceNumber ),
+ rec( 12, 4, Subdirectory ),
+ rec( 16, 4, AttributesDef32 ),
+ rec( 20, 1, UniqueID ),
+ rec( 21, 1, PurgeFlags ),
+ rec( 22, 1, DestNameSpace ),
+ rec( 23, 1, NameLen ),
+ rec( 24, 12, Name12 ),
+ rec( 36, 2, CreationTime ),
+ rec( 38, 2, CreationDate ),
+ rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 44, 2, ArchivedTime ),
+ rec( 46, 2, ArchivedDate ),
+ rec( 48, 4, ArchiverID, ENC_BIG_ENDIAN ),
+ rec( 52, 2, UpdateTime ),
+ rec( 54, 2, UpdateDate ),
+ rec( 56, 4, UpdateID, ENC_BIG_ENDIAN ),
+ rec( 60, 4, DataForkSize, ENC_BIG_ENDIAN ),
+ rec( 64, 4, DataForkFirstFAT, ENC_BIG_ENDIAN ),
+ rec( 68, 4, NextTrusteeEntry, ENC_BIG_ENDIAN ),
+ rec( 72, 36, Reserved36 ),
+ rec( 108, 2, InheritedRightsMask ),
+ rec( 110, 2, LastAccessedDate ),
+ rec( 112, 4, DeletedFileTime ),
+ rec( 116, 2, DeletedTime ),
+ rec( 118, 2, DeletedDate ),
+ rec( 120, 4, DeletedID, ENC_BIG_ENDIAN ),
+ rec( 124, 8, Undefined8 ),
+ rec( 132, 4, PrimaryEntry, ENC_LITTLE_ENDIAN ),
+ rec( 136, 4, NameList, ENC_LITTLE_ENDIAN ),
+ rec( 140, 4, OtherFileForkSize, ENC_BIG_ENDIAN ),
+ rec( 144, 4, OtherFileForkFAT, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8900, 0x9c03, 0xfb01, 0xff00])
+ # 2222/1629, 22/41
+ pkt = NCP(0x1629, "Get Object Disk Usage and Restrictions", 'file')
+ pkt.Request(15, [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, ObjectID, ENC_LITTLE_ENDIAN ),
+ ])
+ pkt.Reply(16, [
+ rec( 8, 4, Restriction ),
+ rec( 12, 4, InUse ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9802])
+ # 2222/162A, 22/42
+ pkt = NCP(0x162A, "Get Effective Rights for Directory Entry", 'file')
+ pkt.Request((12,266), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, (1, 255), Path, info_str=(Path, "Get Effective Rights: %s", ", %s") ),
+ ])
+ pkt.Reply(10, [
+ rec( 8, 2, AccessRightsMaskWord ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9804, 0x9c03])
+ # 2222/162B, 22/43
+ pkt = NCP(0x162B, "Remove Extended Trustee from Dir or File", 'file')
+ pkt.Request((17,271), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, 4, ObjectID, ENC_BIG_ENDIAN ),
+ rec( 15, 1, Unused ),
+ rec( 16, (1, 255), Path, info_str=(Path, "Remove Extended Trustee from %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9002, 0x9c03, 0xfe0f, 0xff09])
+ # 2222/162C, 22/44
+ pkt = NCP(0x162C, "Get Volume and Purge Information", 'file')
+ pkt.Request( 11, [
+ rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Volume and Purge Information for Volume %d", ", %d") )
+ ])
+ pkt.Reply( (38,53), [
+ rec( 8, 4, TotalBlocks ),
+ rec( 12, 4, FreeBlocks ),
+ rec( 16, 4, PurgeableBlocks ),
+ rec( 20, 4, NotYetPurgeableBlocks ),
+ rec( 24, 4, TotalDirectoryEntries ),
+ rec( 28, 4, AvailableDirEntries ),
+ rec( 32, 4, Reserved4 ),
+ rec( 36, 1, SectorsPerBlock ),
+ rec( 37, (1,16), VolumeNameLen ),
+ ])
+ pkt.CompletionCodes([0x0000])
+ # 2222/162D, 22/45
+ pkt = NCP(0x162D, "Get Directory Information", 'file')
+ pkt.Request( 11, [
+ rec( 10, 1, DirHandle )
+ ])
+ pkt.Reply( (30, 45), [
+ rec( 8, 4, TotalBlocks ),
+ rec( 12, 4, AvailableBlocks ),
+ rec( 16, 4, TotalDirectoryEntries ),
+ rec( 20, 4, AvailableDirEntries ),
+ rec( 24, 4, Reserved4 ),
+ rec( 28, 1, SectorsPerBlock ),
+ rec( 29, (1,16), VolumeNameLen ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9b03])
+ # 2222/162E, 22/46
+ pkt = NCP(0x162E, "Rename Or Move", 'file')
+ pkt.Request( (17,525), [
+ rec( 10, 1, SourceDirHandle ),
+ rec( 11, 1, SearchAttributes ),
+ rec( 12, 1, SourcePathComponentCount ),
+ rec( 13, (1,255), SourcePath, info_str=(SourcePath, "Rename or Move: %s", ", %s") ),
+ rec( -1, 1, DestDirHandle ),
+ rec( -1, 1, DestPathComponentCount ),
+ rec( -1, (1,255), DestPath ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8701, 0x8b00, 0x8d00, 0x8e00,
+ 0x8f00, 0x9001, 0x9101, 0x9201, 0x9a00, 0x9b03,
+ 0x9c03, 0xa400, 0xff17])
+ # 2222/162F, 22/47
+ pkt = NCP(0x162F, "Get Name Space Information", 'file')
+ pkt.Request( 11, [
+ rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Name Space Information for Volume %d", ", %d") )
+ ])
+ pkt.Reply( (15,523), [
+ #
+ # XXX - why does this not display anything at all
+ # if the stuff after the first IndexNumber is
+ # un-commented? That stuff really is there....
+ #
+ rec( 8, 1, DefinedNameSpaces, var="v" ),
+ rec( 9, (1,255), NameSpaceName, repeat="v" ),
+ rec( -1, 1, DefinedDataStreams, var="w" ),
+ rec( -1, (2,256), DataStreamInfo, repeat="w" ),
+ rec( -1, 1, LoadedNameSpaces, var="x" ),
+ rec( -1, 1, IndexNumber, repeat="x" ),
+# rec( -1, 1, VolumeNameSpaces, var="y" ),
+# rec( -1, 1, IndexNumber, repeat="y" ),
+# rec( -1, 1, VolumeDataStreams, var="z" ),
+# rec( -1, 1, IndexNumber, repeat="z" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9802, 0xff00])
+ # 2222/1630, 22/48
+ pkt = NCP(0x1630, "Get Name Space Directory Entry", 'file')
+ pkt.Request( 16, [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, DOSSequence ),
+ rec( 15, 1, SrcNameSpace ),
+ ])
+ pkt.Reply( 112, [
+ rec( 8, 4, SequenceNumber ),
+ rec( 12, 4, Subdirectory ),
+ rec( 16, 4, AttributesDef32 ),
+ rec( 20, 1, UniqueID ),
+ rec( 21, 1, Flags ),
+ rec( 22, 1, SrcNameSpace ),
+ rec( 23, 1, NameLength ),
+ rec( 24, 12, Name12 ),
+ rec( 36, 2, CreationTime ),
+ rec( 38, 2, CreationDate ),
+ rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 44, 2, ArchivedTime ),
+ rec( 46, 2, ArchivedDate ),
+ rec( 48, 4, ArchiverID ),
+ rec( 52, 2, UpdateTime ),
+ rec( 54, 2, UpdateDate ),
+ rec( 56, 4, UpdateID ),
+ rec( 60, 4, FileSize ),
+ rec( 64, 44, Reserved44 ),
+ rec( 108, 2, InheritedRightsMask ),
+ rec( 110, 2, LastAccessedDate ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8900, 0x9802, 0xbf00])
+ # 2222/1631, 22/49
+ pkt = NCP(0x1631, "Open Data Stream", 'file')
+ pkt.Request( (15,269), [
+ rec( 10, 1, DataStream ),
+ rec( 11, 1, DirHandle ),
+ rec( 12, 1, AttributesDef ),
+ rec( 13, 1, OpenRights ),
+ rec( 14, (1, 255), FileName, info_str=(FileName, "Open Data Stream: %s", ", %s") ),
+ ])
+ pkt.Reply( 12, [
+ rec( 8, 4, CCFileHandle, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8200, 0x9002, 0xbe00, 0xff00])
+ # 2222/1632, 22/50
+ pkt = NCP(0x1632, "Get Object Effective Rights for Directory Entry", 'file')
+ pkt.Request( (16,270), [
+ rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
+ rec( 14, 1, DirHandle ),
+ rec( 15, (1, 255), Path, info_str=(Path, "Get Object Effective Rights: %s", ", %s") ),
+ ])
+ pkt.Reply( 10, [
+ rec( 8, 2, TrusteeRights ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x9b00, 0x9c03, 0xfc06])
+ # 2222/1633, 22/51
+ pkt = NCP(0x1633, "Get Extended Volume Information", 'file')
+ pkt.Request( 11, [
+ rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Extended Volume Information for Volume %d", ", %d") ),
+ ])
+ pkt.Reply( (139,266), [
+ rec( 8, 2, VolInfoReplyLen ),
+ rec( 10, 128, VolInfoStructure),
+ rec( 138, (1,128), VolumeNameLen ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x9804, 0xfb08, 0xff00])
+ pkt.MakeExpert("ncp1633_reply")
+ # 2222/1634, 22/52
+ pkt = NCP(0x1634, "Get Mount Volume List", 'file')
+ pkt.Request( 22, [
+ rec( 10, 4, StartVolumeNumber ),
+ rec( 14, 4, VolumeRequestFlags, ENC_LITTLE_ENDIAN ),
+ rec( 18, 4, SrcNameSpace ),
+ ])
+ pkt.Reply( NO_LENGTH_CHECK, [
+ rec( 8, 4, ItemsInPacket, var="x" ),
+ rec( 12, 4, NextVolumeNumber ),
+ srec( VolumeStruct, req_cond="ncp.volume_request_flags==0x0000", repeat="x" ),
+ srec( VolumeWithNameStruct, req_cond="ncp.volume_request_flags==0x0001", repeat="x" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x9802])
+ # 2222/1635, 22/53
+ pkt = NCP(0x1635, "Get Volume Capabilities", 'file')
+ pkt.Request( 18, [
+ rec( 10, 4, VolumeNumberLong ),
+ rec( 14, 4, VersionNumberLong ),
+ ])
+ pkt.Reply( NO_LENGTH_CHECK, [
+ rec( 8, 4, VolumeCapabilities ),
+ rec( 12, 28, Reserved28 ),
+ rec( 40, 64, VolumeNameStringz ),
+ rec( 104, 128, VolumeGUID ),
+ rec( 232, 256, PoolName ),
+ rec( 488, PROTO_LENGTH_UNKNOWN, VolumeMountPoint ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7700, 0x9802, 0xfb01])
+ # 2222/1636, 22/54
+ pkt = NCP(0x1636, "Add User Disk Space Restriction 64 Bit Aware", 'file')
+ pkt.Request(26, [
+ rec( 10, 4, VolumeNumberLong ),
+ rec( 14, 4, ObjectID, ENC_LITTLE_ENDIAN ),
+ rec( 18, 8, DiskSpaceLimit64 ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9800])
+ # 2222/1637, 22/55
+ pkt = NCP(0x1637, "Get Object Disk Usage and Restrictions 64 Bit Aware", 'file')
+ pkt.Request(18, [
+ rec( 10, 4, VolumeNumberLong ),
+ rec( 14, 4, ObjectID, ENC_LITTLE_ENDIAN ),
+ ])
+ pkt.Reply(24, [
+ rec( 8, 8, RestrictionQuad ),
+ rec( 16, 8, InUse64 ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9802])
+ # 2222/1638, 22/56
+ pkt = NCP(0x1638, "Scan Volume's User Disk Restrictions 64 Bit Aware", 'file')
+ pkt.Request(18, [
+ rec( 10, 4, VolumeNumberLong ),
+ rec( 14, 4, SequenceNumber ),
+ ])
+ pkt.Reply(24, [
+ rec( 8, 4, NumberOfEntriesLong, var="x" ),
+ rec( 12, 12, ObjectIDStruct64, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9800])
+ # 2222/1639, 22/57
+ pkt = NCP(0x1639, "Set Directory Disk Space Restriction 64 Bit Aware", 'file')
+ pkt.Request(26, [
+ rec( 10, 8, DirHandle64 ),
+ rec( 18, 8, DiskSpaceLimit64 ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0101, 0x8c00, 0xbf00])
+ # 2222/163A, 22/58
+ pkt = NCP(0x163A, "Get Directory Information 64 Bit Aware", 'file')
+ pkt.Request( 18, [
+ rec( 10, 8, DirHandle64 )
+ ])
+ pkt.Reply( (49, 64), [
+ rec( 8, 8, TotalBlocks64 ),
+ rec( 16, 8, AvailableBlocks64 ),
+ rec( 24, 8, TotalDirEntries64 ),
+ rec( 32, 8, AvailableDirEntries64 ),
+ rec( 40, 4, Reserved4 ),
+ rec( 44, 4, SectorsPerBlockLong ),
+ rec( 48, (1,16), VolumeNameLen ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9b03])
+ # 2222/1641, 22/59
+# pkt = NCP(0x1641, "Scan Volume's User Disk Restrictions 64-bit Aware", 'file')
+# pkt.Request(18, [
+# rec( 10, 4, VolumeNumberLong ),
+# rec( 14, 4, SequenceNumber ),
+# ])
+# pkt.Reply(24, [
+# rec( 8, 4, NumberOfEntriesLong, var="x" ),
+# rec( 12, 12, ObjectIDStruct64, repeat="x" ),
+# ])
+# pkt.CompletionCodes([0x0000, 0x9800])
+ # 2222/1700, 23/00
+ pkt = NCP(0x1700, "Login User", 'connection')
+ pkt.Request( (12, 58), [
+ rec( 10, (1,16), UserName, info_str=(UserName, "Login User: %s", ", %s") ),
+ rec( -1, (1,32), Password ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9602, 0xc101, 0xc200, 0xc501, 0xd700,
+ 0xd900, 0xda00, 0xdb00, 0xde00, 0xdf00, 0xe800,
+ 0xec00, 0xed00, 0xef00, 0xf001, 0xf100, 0xf200,
+ 0xf600, 0xfb00, 0xfc06, 0xfe07, 0xff00])
+ # 2222/1701, 23/01
+ pkt = NCP(0x1701, "Change User Password", 'bindery')
+ pkt.Request( (13, 90), [
+ rec( 10, (1,16), UserName, info_str=(UserName, "Change Password for User: %s", ", %s") ),
+ rec( -1, (1,32), Password ),
+ rec( -1, (1,32), NewPassword ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xd600, 0xf001, 0xf101, 0xf501,
+ 0xfc06, 0xfe07, 0xff00])
+ # 2222/1702, 23/02
+ pkt = NCP(0x1702, "Get User Connection List", 'connection')
+ pkt.Request( (11, 26), [
+ rec( 10, (1,16), UserName, info_str=(UserName, "Get User Connection: %s", ", %s") ),
+ ])
+ pkt.Reply( (9, 136), [
+ rec( 8, (1, 128), ConnectionNumberList ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00])
+ # 2222/1703, 23/03
+ pkt = NCP(0x1703, "Get User Number", 'bindery')
+ pkt.Request( (11, 26), [
+ rec( 10, (1,16), UserName, info_str=(UserName, "Get User Number: %s", ", %s") ),
+ ])
+ pkt.Reply( 12, [
+ rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00])
+ # 2222/1705, 23/05
+ pkt = NCP(0x1705, "Get Station's Logged Info", 'connection')
+ pkt.Request( 11, [
+ rec( 10, 1, TargetConnectionNumber, info_str=(TargetConnectionNumber, "Get Station's Logged Information on Connection %d", ", %d") ),
+ ])
+ pkt.Reply( 266, [
+ rec( 8, 16, UserName16 ),
+ rec( 24, 7, LoginTime ),
+ rec( 31, 39, FullName ),
+ rec( 70, 4, UserID, ENC_BIG_ENDIAN ),
+ rec( 74, 128, SecurityEquivalentList ),
+ rec( 202, 64, Reserved64 ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9602, 0xfc06, 0xfd00, 0xfe07, 0xff00])
+ # 2222/1707, 23/07
+ pkt = NCP(0x1707, "Get Group Number", 'bindery')
+ pkt.Request( 14, [
+ rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply( 62, [
+ rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ),
+ rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 14, 48, ObjectNameLen ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9602, 0xf101, 0xfc06, 0xfe07, 0xff00])
+ # 2222/170C, 23/12
+ pkt = NCP(0x170C, "Verify Serialization", 'fileserver')
+ pkt.Request( 14, [
+ rec( 10, 4, ServerSerialNumber ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xff00])
+ # 2222/170D, 23/13
+ pkt = NCP(0x170D, "Log Network Message", 'file')
+ pkt.Request( (11, 68), [
+ rec( 10, (1, 58), TargetMessage, info_str=(TargetMessage, "Log Network Message: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8100, 0x8800, 0x8d00, 0x8e00, 0x8f00,
+ 0x9001, 0x9400, 0x9600, 0x9804, 0x9900, 0x9b00, 0xa100,
+ 0xa201, 0xff00])
+ # 2222/170E, 23/14
+ pkt = NCP(0x170E, "Get Disk Utilization", 'fileserver')
+ pkt.Request( 15, [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, TrusteeID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply( 19, [
+ rec( 8, 1, VolumeNumber ),
+ rec( 9, 4, TrusteeID, ENC_BIG_ENDIAN ),
+ rec( 13, 2, DirectoryCount, ENC_BIG_ENDIAN ),
+ rec( 15, 2, FileCount, ENC_BIG_ENDIAN ),
+ rec( 17, 2, ClusterCount, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0xa100, 0xf200])
+ # 2222/170F, 23/15
+ pkt = NCP(0x170F, "Scan File Information", 'file')
+ pkt.Request((15,269), [
+ rec( 10, 2, LastSearchIndex ),
+ rec( 12, 1, DirHandle ),
+ rec( 13, 1, SearchAttributes ),
+ rec( 14, (1, 255), FileName, info_str=(FileName, "Scan File Information: %s", ", %s") ),
+ ])
+ pkt.Reply( 102, [
+ rec( 8, 2, NextSearchIndex ),
+ rec( 10, 14, FileName14 ),
+ rec( 24, 2, AttributesDef16 ),
+ rec( 26, 4, FileSize, ENC_BIG_ENDIAN ),
+ rec( 30, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 32, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
+ rec( 34, 2, ModifiedDate, ENC_BIG_ENDIAN ),
+ rec( 36, 2, ModifiedTime, ENC_BIG_ENDIAN ),
+ rec( 38, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 42, 2, ArchivedDate, ENC_BIG_ENDIAN ),
+ rec( 44, 2, ArchivedTime, ENC_BIG_ENDIAN ),
+ rec( 46, 56, Reserved56 ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8800, 0x8900, 0x9300, 0x9400, 0x9804, 0x9b00, 0x9c00,
+ 0xa100, 0xfd00, 0xff17])
+ # 2222/1710, 23/16
+ pkt = NCP(0x1710, "Set File Information", 'file')
+ pkt.Request((91,345), [
+ rec( 10, 2, AttributesDef16 ),
+ rec( 12, 4, FileSize, ENC_BIG_ENDIAN ),
+ rec( 16, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 18, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
+ rec( 20, 2, ModifiedDate, ENC_BIG_ENDIAN ),
+ rec( 22, 2, ModifiedTime, ENC_BIG_ENDIAN ),
+ rec( 24, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 28, 2, ArchivedDate, ENC_BIG_ENDIAN ),
+ rec( 30, 2, ArchivedTime, ENC_BIG_ENDIAN ),
+ rec( 32, 56, Reserved56 ),
+ rec( 88, 1, DirHandle ),
+ rec( 89, 1, SearchAttributes ),
+ rec( 90, (1, 255), FileName, info_str=(FileName, "Set Information for File: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8800, 0x8c00, 0x8e00, 0x9400, 0x9600, 0x9804,
+ 0x9b03, 0x9c00, 0xa100, 0xa201, 0xfc06, 0xfd00, 0xfe07,
+ 0xff17])
+ # 2222/1711, 23/17
+ pkt = NCP(0x1711, "Get File Server Information", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(136, [
+ rec( 8, 48, ServerName ),
+ rec( 56, 1, OSMajorVersion ),
+ rec( 57, 1, OSMinorVersion ),
+ rec( 58, 2, ConnectionsSupportedMax, ENC_BIG_ENDIAN ),
+ rec( 60, 2, ConnectionsInUse, ENC_BIG_ENDIAN ),
+ rec( 62, 2, VolumesSupportedMax, ENC_BIG_ENDIAN ),
+ rec( 64, 1, OSRevision ),
+ rec( 65, 1, SFTSupportLevel ),
+ rec( 66, 1, TTSLevel ),
+ rec( 67, 2, ConnectionsMaxUsed, ENC_BIG_ENDIAN ),
+ rec( 69, 1, AccountVersion ),
+ rec( 70, 1, VAPVersion ),
+ rec( 71, 1, QueueingVersion ),
+ rec( 72, 1, PrintServerVersion ),
+ rec( 73, 1, VirtualConsoleVersion ),
+ rec( 74, 1, SecurityRestrictionVersion ),
+ rec( 75, 1, InternetBridgeVersion ),
+ rec( 76, 1, MixedModePathFlag ),
+ rec( 77, 1, LocalLoginInfoCcode ),
+ rec( 78, 2, ProductMajorVersion, ENC_BIG_ENDIAN ),
+ rec( 80, 2, ProductMinorVersion, ENC_BIG_ENDIAN ),
+ rec( 82, 2, ProductRevisionVersion, ENC_BIG_ENDIAN ),
+ rec( 84, 1, OSLanguageID, ENC_LITTLE_ENDIAN ),
+ rec( 85, 1, SixtyFourBitOffsetsSupportedFlag ),
+ rec( 86, 1, OESServer ),
+ rec( 87, 1, OESLinuxOrNetWare ),
+ rec( 88, 48, Reserved48 ),
+ ])
+ pkt.MakeExpert("ncp1711_reply")
+ pkt.CompletionCodes([0x0000, 0x9600])
+ # 2222/1712, 23/18
+ pkt = NCP(0x1712, "Get Network Serial Number", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(14, [
+ rec( 8, 4, ServerSerialNumber ),
+ rec( 12, 2, ApplicationNumber ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600])
+ # 2222/1713, 23/19
+ pkt = NCP(0x1713, "Get Internet Address", 'connection')
+ pkt.Request(11, [
+ rec( 10, 1, TargetConnectionNumber, info_str=(TargetConnectionNumber, "Get Internet Address for Connection %d", ", %d") ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 4, NetworkAddress, ENC_BIG_ENDIAN ),
+ rec( 12, 6, NetworkNodeAddress ),
+ rec( 18, 2, NetworkSocket, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0xff00])
+ # 2222/1714, 23/20
+ pkt = NCP(0x1714, "Login Object", 'connection')
+ pkt.Request( (14, 60), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,16), ClientName, info_str=(ClientName, "Login Object: %s", ", %s") ),
+ rec( -1, (1,32), Password ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9602, 0xc101, 0xc200, 0xc501, 0xd600, 0xd700,
+ 0xd900, 0xda00, 0xdb00, 0xde00, 0xdf00, 0xe800, 0xec00,
+ 0xed00, 0xef00, 0xf001, 0xf100, 0xf200, 0xf600, 0xfb00,
+ 0xfc06, 0xfe07, 0xff00])
+ # 2222/1715, 23/21
+ pkt = NCP(0x1715, "Get Object Connection List", 'connection')
+ pkt.Request( (13, 28), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,16), ObjectName, info_str=(ObjectName, "Get Object Connection List: %s", ", %s") ),
+ ])
+ pkt.Reply( (9, 136), [
+ rec( 8, (1, 128), ConnectionNumberList ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00])
+ # 2222/1716, 23/22
+ pkt = NCP(0x1716, "Get Station's Logged Info", 'connection')
+ pkt.Request( 11, [
+ rec( 10, 1, TargetConnectionNumber ),
+ ])
+ pkt.Reply( 70, [
+ rec( 8, 4, UserID, ENC_BIG_ENDIAN ),
+ rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 14, 48, ObjectNameLen ),
+ rec( 62, 7, LoginTime ),
+ rec( 69, 1, Reserved ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9602, 0xfb0a, 0xfc06, 0xfd00, 0xfe07, 0xff00])
+ # 2222/1717, 23/23
+ pkt = NCP(0x1717, "Get Login Key", 'connection')
+ pkt.Request(10)
+ pkt.Reply( 16, [
+ rec( 8, 8, LoginKey ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9602])
+ # 2222/1718, 23/24
+ pkt = NCP(0x1718, "Keyed Object Login", 'connection')
+ pkt.Request( (21, 68), [
+ rec( 10, 8, LoginKey ),
+ rec( 18, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 20, (1,48), ObjectName, info_str=(ObjectName, "Keyed Object Login: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9602, 0xc101, 0xc200, 0xc500, 0xd904, 0xda00,
+ 0xdb00, 0xdc00, 0xde00, 0xff00])
+ # 2222/171A, 23/26
+ pkt = NCP(0x171A, "Get Internet Address", 'connection')
+ pkt.Request(12, [
+ rec( 10, 2, TargetConnectionNumber ),
+ ])
+# Dissect reply in packet-ncp2222.inc
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/171B, 23/27
+ pkt = NCP(0x171B, "Get Object Connection List", 'connection')
+ pkt.Request( (17,64), [
+ rec( 10, 4, SearchConnNumber ),
+ rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 16, (1,48), ObjectName, info_str=(ObjectName, "Get Object Connection List: %s", ", %s") ),
+ ])
+ pkt.Reply( (13), [
+ rec( 8, 1, ConnListLen, var="x" ),
+ rec( 9, 4, ConnectionNumber, ENC_LITTLE_ENDIAN, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00])
+ # 2222/171C, 23/28
+ pkt = NCP(0x171C, "Get Station's Logged Info", 'connection')
+ pkt.Request( 14, [
+ rec( 10, 4, TargetConnectionNumber ),
+ ])
+ pkt.Reply( 70, [
+ rec( 8, 4, UserID, ENC_BIG_ENDIAN ),
+ rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 14, 48, ObjectNameLen ),
+ rec( 62, 7, LoginTime ),
+ rec( 69, 1, Reserved ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7d00, 0x9602, 0xfb02, 0xfc06, 0xfd00, 0xfe07, 0xff00])
+ # 2222/171D, 23/29
+ pkt = NCP(0x171D, "Change Connection State", 'connection')
+ pkt.Request( 11, [
+ rec( 10, 1, RequestCode ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0109, 0x7a00, 0x7b00, 0x7c00, 0xe000, 0xfb06, 0xfd00])
+ # 2222/171E, 23/30
+ pkt = NCP(0x171E, "Set Watchdog Delay Interval", 'connection')
+ pkt.Request( 14, [
+ rec( 10, 4, NumberOfMinutesToDelay ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0107])
+ # 2222/171F, 23/31
+ pkt = NCP(0x171F, "Get Connection List From Object", 'connection')
+ pkt.Request( 18, [
+ rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
+ rec( 14, 4, ConnectionNumber ),
+ ])
+ pkt.Reply( (9, 136), [
+ rec( 8, (1, 128), ConnectionNumberList ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00])
+ # 2222/1720, 23/32
+ pkt = NCP(0x1720, "Scan Bindery Object (List)", 'bindery')
+ pkt.Request((23,70), [
+ rec( 10, 4, NextObjectID, ENC_BIG_ENDIAN ),
+ rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 16, 2, Reserved2 ),
+ rec( 18, 4, InfoFlags ),
+ rec( 22, (1,48), ObjectName, info_str=(ObjectName, "Scan Bindery Object: %s", ", %s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 4, ObjectInfoReturnCount ),
+ rec( 12, 4, NextObjectID, ENC_BIG_ENDIAN ),
+ rec( 16, 4, ObjectID ),
+ srec(ObjectTypeStruct, req_cond="ncp.info_flags_type == TRUE"),
+ srec(ObjectSecurityStruct, req_cond="ncp.info_flags_security == TRUE"),
+ srec(ObjectFlagsStruct, req_cond="ncp.info_flags_flags == TRUE"),
+ srec(ObjectNameStruct, req_cond="ncp.info_flags_name == TRUE"),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x9600, 0xef01, 0xfc02, 0xfe01, 0xff00])
+ # 2222/1721, 23/33
+ pkt = NCP(0x1721, "Generate GUIDs", 'connection')
+ pkt.Request( 14, [
+ rec( 10, 4, ReturnInfoCount ),
+ ])
+ pkt.Reply(28, [
+ rec( 8, 4, ReturnInfoCount, var="x" ),
+ rec( 12, 16, GUID, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01])
+# 2222/1722, 23/34
+ pkt = NCP(0x1722, "Set Connection Language Encoding", 'connection')
+ pkt.Request( 22, [
+ rec( 10, 4, SetMask ),
+ rec( 14, 4, NCPEncodedStringsBits ),
+ rec( 18, 4, CodePage ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/1732, 23/50
+ pkt = NCP(0x1732, "Create Bindery Object", 'bindery')
+ pkt.Request( (15,62), [
+ rec( 10, 1, ObjectFlags ),
+ rec( 11, 1, ObjectSecurity ),
+ rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 14, (1,48), ObjectName, info_str=(ObjectName, "Create Bindery Object: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xe700, 0xee00, 0xef00, 0xf101, 0xf501,
+ 0xfc06, 0xfe07, 0xff00])
+ # 2222/1733, 23/51
+ pkt = NCP(0x1733, "Delete Bindery Object", 'bindery')
+ pkt.Request( (13,60), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Delete Bindery Object: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf200, 0xf400, 0xf600, 0xfb00,
+ 0xfc06, 0xfe07, 0xff00])
+ # 2222/1734, 23/52
+ pkt = NCP(0x1734, "Rename Bindery Object", 'bindery')
+ pkt.Request( (14,108), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Rename Bindery Object: %s", ", %s") ),
+ rec( -1, (1,48), NewObjectName ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xee00, 0xf000, 0xf300, 0xfc06, 0xfe07, 0xff00])
+ # 2222/1735, 23/53
+ pkt = NCP(0x1735, "Get Bindery Object ID", 'bindery')
+ pkt.Request((13,60), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Get Bindery Object: %s", ", %s") ),
+ ])
+ pkt.Reply(62, [
+ rec( 8, 4, ObjectID, ENC_LITTLE_ENDIAN ),
+ rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 14, 48, ObjectNameLen ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xef01, 0xf000, 0xfc02, 0xfe01, 0xff00])
+ # 2222/1736, 23/54
+ pkt = NCP(0x1736, "Get Bindery Object Name", 'bindery')
+ pkt.Request( 14, [
+ rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply( 62, [
+ rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ),
+ rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 14, 48, ObjectNameLen ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xf101, 0xfc02, 0xfe01, 0xff00])
+ # 2222/1737, 23/55
+ pkt = NCP(0x1737, "Scan Bindery Object", 'bindery')
+ pkt.Request((17,64), [
+ rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
+ rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 16, (1,48), ObjectName, info_str=(ObjectName, "Scan Bindery Object: %s", ", %s") ),
+ ])
+ pkt.Reply(65, [
+ rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ),
+ rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 14, 48, ObjectNameLen ),
+ rec( 62, 1, ObjectFlags ),
+ rec( 63, 1, ObjectSecurity ),
+ rec( 64, 1, ObjectHasProperties ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xef01, 0xfc02,
+ 0xfe01, 0xff00])
+ # 2222/1738, 23/56
+ pkt = NCP(0x1738, "Change Bindery Object Security", 'bindery')
+ pkt.Request((14,61), [
+ rec( 10, 1, ObjectSecurity ),
+ rec( 11, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 13, (1,48), ObjectName, info_str=(ObjectName, "Change Bindery Object Security: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf101, 0xf501, 0xfc02, 0xfe01, 0xff00])
+ # 2222/1739, 23/57
+ pkt = NCP(0x1739, "Create Property", 'bindery')
+ pkt.Request((16,78), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName ),
+ rec( -1, 1, PropertyType ),
+ rec( -1, 1, ObjectSecurity ),
+ rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Create Property: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xed00, 0xef00, 0xf000, 0xf101,
+ 0xf200, 0xf600, 0xf700, 0xfb00, 0xfc02, 0xfe01,
+ 0xff00])
+ # 2222/173A, 23/58
+ pkt = NCP(0x173A, "Delete Property", 'bindery')
+ pkt.Request((14,76), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName ),
+ rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Delete Property: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf101, 0xf600, 0xfb00, 0xfc02,
+ 0xfe01, 0xff00])
+ # 2222/173B, 23/59
+ pkt = NCP(0x173B, "Change Property Security", 'bindery')
+ pkt.Request((15,77), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName ),
+ rec( -1, 1, ObjectSecurity ),
+ rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Change Property Security: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf101, 0xf200, 0xf600, 0xfb00,
+ 0xfc02, 0xfe01, 0xff00])
+ # 2222/173C, 23/60
+ pkt = NCP(0x173C, "Scan Property", 'bindery')
+ pkt.Request((18,80), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName ),
+ rec( -1, 4, LastInstance, ENC_BIG_ENDIAN ),
+ rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Scan Property: %s", ", %s") ),
+ ])
+ pkt.Reply( 32, [
+ rec( 8, 16, PropertyName16 ),
+ rec( 24, 1, ObjectFlags ),
+ rec( 25, 1, ObjectSecurity ),
+ rec( 26, 4, SearchInstance, ENC_BIG_ENDIAN ),
+ rec( 30, 1, ValueAvailable ),
+ rec( 31, 1, MoreProperties ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf101, 0xf200, 0xf600, 0xfb00,
+ 0xfc02, 0xfe01, 0xff00])
+ # 2222/173D, 23/61
+ pkt = NCP(0x173D, "Read Property Value", 'bindery')
+ pkt.Request((15,77), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName ),
+ rec( -1, 1, PropertySegment ),
+ rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Read Property Value: %s", ", %s") ),
+ ])
+ pkt.Reply(138, [
+ rec( 8, 128, PropertyData ),
+ rec( 136, 1, PropertyHasMoreSegments ),
+ rec( 137, 1, PropertyType ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8800, 0x9300, 0x9600, 0xec01,
+ 0xf000, 0xf100, 0xf900, 0xfb02, 0xfc02,
+ 0xfe01, 0xff00])
+ # 2222/173E, 23/62
+ pkt = NCP(0x173E, "Write Property Value", 'bindery')
+ pkt.Request((144,206), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName ),
+ rec( -1, 1, PropertySegment ),
+ rec( -1, 1, MoreFlag ),
+ rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Write Property Value: %s", ", %s") ),
+ #
+ # XXX - don't show this if MoreFlag isn't set?
+ # In at least some packages where it's not set,
+ # PropertyValue appears to be garbage.
+ #
+ rec( -1, 128, PropertyValue ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xe800, 0xec01, 0xf000, 0xf800,
+ 0xfb02, 0xfc03, 0xfe01, 0xff00 ])
+ # 2222/173F, 23/63
+ pkt = NCP(0x173F, "Verify Bindery Object Password", 'bindery')
+ pkt.Request((14,92), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Verify Bindery Object Password: %s", ", %s") ),
+ rec( -1, (1,32), Password ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xe800, 0xec01, 0xf000, 0xf101,
+ 0xfb02, 0xfc03, 0xfe01, 0xff00 ])
+ # 2222/1740, 23/64
+ pkt = NCP(0x1740, "Change Bindery Object Password", 'bindery')
+ pkt.Request((15,124), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Change Bindery Object Password: %s", ", %s") ),
+ rec( -1, (1,32), Password ),
+ rec( -1, (1,32), NewPassword ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc501, 0xd701, 0xe800, 0xec01, 0xf001,
+ 0xf100, 0xf800, 0xfb02, 0xfc03, 0xfe01, 0xff00])
+ # 2222/1741, 23/65
+ pkt = NCP(0x1741, "Add Bindery Object To Set", 'bindery')
+ pkt.Request((17,126), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName ),
+ rec( -1, (1,16), PropertyName ),
+ rec( -1, 2, MemberType, ENC_BIG_ENDIAN ),
+ rec( -1, (1,48), MemberName, info_str=(MemberName, "Add Bindery Object to Set: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xe800, 0xe900, 0xea00, 0xeb00,
+ 0xec01, 0xf000, 0xf800, 0xfb02, 0xfc03, 0xfe01,
+ 0xff00])
+ # 2222/1742, 23/66
+ pkt = NCP(0x1742, "Delete Bindery Object From Set", 'bindery')
+ pkt.Request((17,126), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName ),
+ rec( -1, (1,16), PropertyName ),
+ rec( -1, 2, MemberType, ENC_BIG_ENDIAN ),
+ rec( -1, (1,48), MemberName, info_str=(MemberName, "Delete Bindery Object from Set: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xeb00, 0xf000, 0xf800, 0xfb02,
+ 0xfc03, 0xfe01, 0xff00])
+ # 2222/1743, 23/67
+ pkt = NCP(0x1743, "Is Bindery Object In Set", 'bindery')
+ pkt.Request((17,126), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName ),
+ rec( -1, (1,16), PropertyName ),
+ rec( -1, 2, MemberType, ENC_BIG_ENDIAN ),
+ rec( -1, (1,48), MemberName, info_str=(MemberName, "Is Bindery Object in Set: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xea00, 0xeb00, 0xec01, 0xf000,
+ 0xfb02, 0xfc03, 0xfe01, 0xff00])
+ # 2222/1744, 23/68
+ pkt = NCP(0x1744, "Close Bindery", 'bindery')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xff00])
+ # 2222/1745, 23/69
+ pkt = NCP(0x1745, "Open Bindery", 'bindery')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xff00])
+ # 2222/1746, 23/70
+ pkt = NCP(0x1746, "Get Bindery Access Level", 'bindery')
+ pkt.Request(10)
+ pkt.Reply(13, [
+ rec( 8, 1, ObjectSecurity ),
+ rec( 9, 4, LoggedObjectID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600])
+ # 2222/1747, 23/71
+ pkt = NCP(0x1747, "Scan Bindery Object Trustee Paths", 'bindery')
+ pkt.Request(17, [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 2, LastSequenceNumber, ENC_BIG_ENDIAN ),
+ rec( 13, 4, ObjectID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply((16,270), [
+ rec( 8, 2, LastSequenceNumber, ENC_BIG_ENDIAN),
+ rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
+ rec( 14, 1, ObjectSecurity ),
+ rec( 15, (1,255), Path ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9300, 0x9600, 0xa100, 0xf000, 0xf100,
+ 0xf200, 0xfc02, 0xfe01, 0xff00])
+ # 2222/1748, 23/72
+ pkt = NCP(0x1748, "Get Bindery Object Access Level", 'bindery')
+ pkt.Request(14, [
+ rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(9, [
+ rec( 8, 1, ObjectSecurity ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600])
+ # 2222/1749, 23/73
+ pkt = NCP(0x1749, "Is Calling Station a Manager", 'bindery')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0003, 0xff1e])
+ # 2222/174A, 23/74
+ pkt = NCP(0x174A, "Keyed Verify Password", 'bindery')
+ pkt.Request((21,68), [
+ rec( 10, 8, LoginKey ),
+ rec( 18, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 20, (1,48), ObjectName, info_str=(ObjectName, "Keyed Verify Password: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc500, 0xfe01, 0xff0c])
+ # 2222/174B, 23/75
+ pkt = NCP(0x174B, "Keyed Change Password", 'bindery')
+ pkt.Request((22,100), [
+ rec( 10, 8, LoginKey ),
+ rec( 18, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 20, (1,48), ObjectName, info_str=(ObjectName, "Keyed Change Password: %s", ", %s") ),
+ rec( -1, (1,32), Password ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc500, 0xfe01, 0xff0c])
+ # 2222/174C, 23/76
+ pkt = NCP(0x174C, "List Relations Of an Object", 'bindery')
+ pkt.Request((18,80), [
+ rec( 10, 4, LastSeen, ENC_BIG_ENDIAN ),
+ rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 16, (1,48), ObjectName, info_str=(ObjectName, "List Relations of an Object: %s", ", %s") ),
+ rec( -1, (1,16), PropertyName ),
+ ])
+ pkt.Reply(14, [
+ rec( 8, 2, RelationsCount, ENC_BIG_ENDIAN, var="x" ),
+ rec( 10, 4, ObjectID, ENC_BIG_ENDIAN, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0xf000, 0xf200, 0xfe01, 0xff00])
+ # 2222/1764, 23/100
+ pkt = NCP(0x1764, "Create Queue", 'qms')
+ pkt.Request((15,316), [
+ rec( 10, 2, QueueType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), QueueName, info_str=(QueueName, "Create Queue: %s", ", %s") ),
+ rec( -1, 1, PathBase ),
+ rec( -1, (1,255), Path ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, QueueID ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9900, 0xd000, 0xd100,
+ 0xd200, 0xd300, 0xd400, 0xd500, 0xd601,
+ 0xd703, 0xd800, 0xd902, 0xda01, 0xdb02,
+ 0xee00, 0xff00])
+ # 2222/1765, 23/101
+ pkt = NCP(0x1765, "Destroy Queue", 'qms')
+ pkt.Request(14, [
+ rec( 10, 4, QueueID ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/1766, 23/102
+ pkt = NCP(0x1766, "Read Queue Current Status", 'qms')
+ pkt.Request(14, [
+ rec( 10, 4, QueueID ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 4, QueueID ),
+ rec( 12, 1, QueueStatus ),
+ rec( 13, 1, CurrentEntries ),
+ rec( 14, 1, CurrentServers, var="x" ),
+ rec( 15, 4, ServerID, repeat="x" ),
+ rec( 19, 1, ServerStationList, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/1767, 23/103
+ pkt = NCP(0x1767, "Set Queue Current Status", 'qms')
+ pkt.Request(15, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 1, QueueStatus ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07,
+ 0xff00])
+ # 2222/1768, 23/104
+ pkt = NCP(0x1768, "Create Queue Job And File", 'qms')
+ pkt.Request(264, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 250, JobStruct ),
+ ])
+ pkt.Reply(62, [
+ rec( 8, 1, ClientStation ),
+ rec( 9, 1, ClientTaskNumber ),
+ rec( 10, 4, ClientIDNumber, ENC_BIG_ENDIAN ),
+ rec( 14, 4, TargetServerIDNumber, ENC_BIG_ENDIAN ),
+ rec( 18, 6, TargetExecutionTime ),
+ rec( 24, 6, JobEntryTime ),
+ rec( 30, 2, JobNumber, ENC_BIG_ENDIAN ),
+ rec( 32, 2, JobType, ENC_BIG_ENDIAN ),
+ rec( 34, 1, JobPosition ),
+ rec( 35, 1, JobControlFlags ),
+ rec( 36, 14, JobFileName ),
+ rec( 50, 6, JobFileHandle ),
+ rec( 56, 1, ServerStation ),
+ rec( 57, 1, ServerTaskNumber ),
+ rec( 58, 4, ServerID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07,
+ 0xff00])
+ # 2222/1769, 23/105
+ pkt = NCP(0x1769, "Close File And Start Queue Job", 'qms')
+ pkt.Request(16, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/176A, 23/106
+ pkt = NCP(0x176A, "Remove Job From Queue", 'qms')
+ pkt.Request(16, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/176B, 23/107
+ pkt = NCP(0x176B, "Get Queue Job List", 'qms')
+ pkt.Request(14, [
+ rec( 10, 4, QueueID ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 2, JobCount, ENC_BIG_ENDIAN, var="x" ),
+ rec( 10, 2, JobNumber, ENC_BIG_ENDIAN, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/176C, 23/108
+ pkt = NCP(0x176C, "Read Queue Job Entry", 'qms')
+ pkt.Request(16, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(258, [
+ rec( 8, 250, JobStruct ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/176D, 23/109
+ pkt = NCP(0x176D, "Change Queue Job Entry", 'qms')
+ pkt.Request(260, [
+ rec( 14, 250, JobStruct ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff18])
+ # 2222/176E, 23/110
+ pkt = NCP(0x176E, "Change Queue Job Position", 'qms')
+ pkt.Request(17, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
+ rec( 16, 1, NewPosition ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xd000, 0xd100, 0xd300, 0xd500,
+ 0xd601, 0xfe07, 0xff1f])
+ # 2222/176F, 23/111
+ pkt = NCP(0x176F, "Attach Queue Server To Queue", 'qms')
+ pkt.Request(14, [
+ rec( 10, 4, QueueID ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xea00,
+ 0xfc06, 0xff00])
+ # 2222/1770, 23/112
+ pkt = NCP(0x1770, "Detach Queue Server From Queue", 'qms')
+ pkt.Request(14, [
+ rec( 10, 4, QueueID ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/1771, 23/113
+ pkt = NCP(0x1771, "Service Queue Job", 'qms')
+ pkt.Request(16, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 2, ServiceType, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(62, [
+ rec( 8, 1, ClientStation ),
+ rec( 9, 1, ClientTaskNumber ),
+ rec( 10, 4, ClientIDNumber, ENC_BIG_ENDIAN ),
+ rec( 14, 4, TargetServerIDNumber, ENC_BIG_ENDIAN ),
+ rec( 18, 6, TargetExecutionTime ),
+ rec( 24, 6, JobEntryTime ),
+ rec( 30, 2, JobNumber, ENC_BIG_ENDIAN ),
+ rec( 32, 2, JobType, ENC_BIG_ENDIAN ),
+ rec( 34, 1, JobPosition ),
+ rec( 35, 1, JobControlFlags ),
+ rec( 36, 14, JobFileName ),
+ rec( 50, 6, JobFileHandle ),
+ rec( 56, 1, ServerStation ),
+ rec( 57, 1, ServerTaskNumber ),
+ rec( 58, 4, ServerID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/1772, 23/114
+ pkt = NCP(0x1772, "Finish Servicing Queue Job", 'qms')
+ pkt.Request(18, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
+ rec( 16, 2, ChargeInformation, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff00])
+ # 2222/1773, 23/115
+ pkt = NCP(0x1773, "Abort Servicing Queue Job", 'qms')
+ pkt.Request(16, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff18])
+ # 2222/1774, 23/116
+ pkt = NCP(0x1774, "Change To Client Rights", 'qms')
+ pkt.Request(16, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff18])
+ # 2222/1775, 23/117
+ pkt = NCP(0x1775, "Restore Queue Server Rights", 'qms')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/1776, 23/118
+ pkt = NCP(0x1776, "Read Queue Server Current Status", 'qms')
+ pkt.Request(19, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, ServerID, ENC_BIG_ENDIAN ),
+ rec( 18, 1, ServerStation ),
+ ])
+ pkt.Reply(72, [
+ rec( 8, 64, ServerStatusRecord ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/1777, 23/119
+ pkt = NCP(0x1777, "Set Queue Server Current Status", 'qms')
+ pkt.Request(78, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 64, ServerStatusRecord ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/1778, 23/120
+ pkt = NCP(0x1778, "Get Queue Job File Size", 'qms')
+ pkt.Request(16, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(18, [
+ rec( 8, 4, QueueID ),
+ rec( 12, 2, JobNumber, ENC_BIG_ENDIAN ),
+ rec( 14, 4, FileSize, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff00])
+ # 2222/1779, 23/121
+ pkt = NCP(0x1779, "Create Queue Job And File", 'qms')
+ pkt.Request(264, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 250, JobStruct3x ),
+ ])
+ pkt.Reply(94, [
+ rec( 8, 86, JobStructNew ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff00])
+ # 2222/177A, 23/122
+ pkt = NCP(0x177A, "Read Queue Job Entry", 'qms')
+ pkt.Request(18, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, JobNumberLong ),
+ ])
+ pkt.Reply(258, [
+ rec( 8, 250, JobStruct3x ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/177B, 23/123
+ pkt = NCP(0x177B, "Change Queue Job Entry", 'qms')
+ pkt.Request(264, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 250, JobStruct ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xea02, 0xfc07, 0xff00])
+ # 2222/177C, 23/124
+ pkt = NCP(0x177C, "Service Queue Job", 'qms')
+ pkt.Request(16, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 2, ServiceType ),
+ ])
+ pkt.Reply(94, [
+ rec( 8, 86, JobStructNew ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc05, 0xff00])
+ # 2222/177D, 23/125
+ pkt = NCP(0x177D, "Read Queue Current Status", 'qms')
+ pkt.Request(14, [
+ rec( 10, 4, QueueID ),
+ ])
+ pkt.Reply(32, [
+ rec( 8, 4, QueueID ),
+ rec( 12, 1, QueueStatus ),
+ rec( 13, 3, Reserved3 ),
+ rec( 16, 4, CurrentEntries ),
+ rec( 20, 4, CurrentServers, var="x" ),
+ rec( 24, 4, ServerID, repeat="x" ),
+ rec( 28, 4, ServerStationLong, ENC_LITTLE_ENDIAN, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/177E, 23/126
+ pkt = NCP(0x177E, "Set Queue Current Status", 'qms')
+ pkt.Request(15, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 1, QueueStatus ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/177F, 23/127
+ pkt = NCP(0x177F, "Close File And Start Queue Job", 'qms')
+ pkt.Request(18, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, JobNumberLong ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff00])
+ # 2222/1780, 23/128
+ pkt = NCP(0x1780, "Remove Job From Queue", 'qms')
+ pkt.Request(18, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, JobNumberLong ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/1781, 23/129
+ pkt = NCP(0x1781, "Get Queue Job List", 'qms')
+ pkt.Request(18, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, JobNumberLong ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 4, TotalQueueJobs ),
+ rec( 12, 4, ReplyQueueJobNumbers, var="x" ),
+ rec( 16, 4, JobNumberLong, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/1782, 23/130
+ pkt = NCP(0x1782, "Change Job Priority", 'qms')
+ pkt.Request(22, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, JobNumberLong ),
+ rec( 18, 4, Priority ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/1783, 23/131
+ pkt = NCP(0x1783, "Finish Servicing Queue Job", 'qms')
+ pkt.Request(22, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, JobNumberLong ),
+ rec( 18, 4, ChargeInformation ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc05, 0xff00])
+ # 2222/1784, 23/132
+ pkt = NCP(0x1784, "Abort Servicing Queue Job", 'qms')
+ pkt.Request(18, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, JobNumberLong ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc05, 0xff18])
+ # 2222/1785, 23/133
+ pkt = NCP(0x1785, "Change To Client Rights", 'qms')
+ pkt.Request(18, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, JobNumberLong ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff18])
+ # 2222/1786, 23/134
+ pkt = NCP(0x1786, "Read Queue Server Current Status", 'qms')
+ pkt.Request(22, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, ServerID, ENC_BIG_ENDIAN ),
+ rec( 18, 4, ServerStation ),
+ ])
+ pkt.Reply(72, [
+ rec( 8, 64, ServerStatusRecord ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
+ # 2222/1787, 23/135
+ pkt = NCP(0x1787, "Get Queue Job File Size", 'qms')
+ pkt.Request(18, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, JobNumberLong ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 4, QueueID ),
+ rec( 12, 4, JobNumberLong ),
+ rec( 16, 4, FileSize, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
+ 0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
+ 0xd800, 0xd902, 0xda01, 0xdb02, 0xfc05, 0xff00])
+ # 2222/1788, 23/136
+ pkt = NCP(0x1788, "Move Queue Job From Src Q to Dst Q", 'qms')
+ pkt.Request(22, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, JobNumberLong ),
+ rec( 18, 4, DstQueueID ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, JobNumberLong ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfc06])
+ # 2222/1789, 23/137
+ pkt = NCP(0x1789, "Get Queue Jobs From Form List", 'qms')
+ pkt.Request(24, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, QueueStartPosition ),
+ rec( 18, 4, FormTypeCnt, ENC_LITTLE_ENDIAN, var="x" ),
+ rec( 22, 2, FormType, repeat="x" ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 4, TotalQueueJobs ),
+ rec( 12, 4, JobCount, var="x" ),
+ rec( 16, 4, JobNumberLong, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xd300, 0xfc06])
+ # 2222/178A, 23/138
+ pkt = NCP(0x178A, "Service Queue Job By Form List", 'qms')
+ pkt.Request(24, [
+ rec( 10, 4, QueueID ),
+ rec( 14, 4, QueueStartPosition ),
+ rec( 18, 4, FormTypeCnt, ENC_LITTLE_ENDIAN, var= "x" ),
+ rec( 22, 2, FormType, repeat="x" ),
+ ])
+ pkt.Reply(94, [
+ rec( 8, 86, JobStructNew ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xd902, 0xfc06, 0xff00])
+ # 2222/1796, 23/150
+ pkt = NCP(0x1796, "Get Current Account Status", 'accounting')
+ pkt.Request((13,60), [
+ rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Get Current Account Status: %s", ", %s") ),
+ ])
+ pkt.Reply(264, [
+ rec( 8, 4, AccountBalance, ENC_BIG_ENDIAN ),
+ rec( 12, 4, CreditLimit, ENC_BIG_ENDIAN ),
+ rec( 16, 120, Reserved120 ),
+ rec( 136, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 140, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 144, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 148, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 152, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 156, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 160, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 164, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 168, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 172, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 176, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 180, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 184, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 188, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 192, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 196, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 200, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 204, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 208, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 212, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 216, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 220, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 224, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 228, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 232, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 236, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 240, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 244, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 248, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 252, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ rec( 256, 4, HolderID, ENC_BIG_ENDIAN ),
+ rec( 260, 4, HoldAmount, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc000, 0xc101, 0xc400, 0xe800,
+ 0xea00, 0xeb00, 0xec00, 0xfc06, 0xfe07, 0xff00])
+ # 2222/1797, 23/151
+ pkt = NCP(0x1797, "Submit Account Charge", 'accounting')
+ pkt.Request((26,327), [
+ rec( 10, 2, ServiceType, ENC_BIG_ENDIAN ),
+ rec( 12, 4, ChargeAmount, ENC_BIG_ENDIAN ),
+ rec( 16, 4, HoldCancelAmount, ENC_BIG_ENDIAN ),
+ rec( 20, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 22, 2, CommentType, ENC_BIG_ENDIAN ),
+ rec( 24, (1,48), ObjectName, info_str=(ObjectName, "Submit Account Charge: %s", ", %s") ),
+ rec( -1, (1,255), Comment ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8800, 0x9400, 0x9600, 0xa201,
+ 0xc000, 0xc101, 0xc200, 0xc400, 0xe800, 0xea00,
+ 0xeb00, 0xec00, 0xfe07, 0xff00])
+ # 2222/1798, 23/152
+ pkt = NCP(0x1798, "Submit Account Hold", 'accounting')
+ pkt.Request((17,64), [
+ rec( 10, 4, HoldCancelAmount, ENC_BIG_ENDIAN ),
+ rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 16, (1,48), ObjectName, info_str=(ObjectName, "Submit Account Hold: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8800, 0x9400, 0x9600, 0xa201,
+ 0xc000, 0xc101, 0xc200, 0xc400, 0xe800, 0xea00,
+ 0xeb00, 0xec00, 0xfe07, 0xff00])
+ # 2222/1799, 23/153
+ pkt = NCP(0x1799, "Submit Account Note", 'accounting')
+ pkt.Request((18,319), [
+ rec( 10, 2, ServiceType, ENC_BIG_ENDIAN ),
+ rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
+ rec( 14, 2, CommentType, ENC_BIG_ENDIAN ),
+ rec( 16, (1,48), ObjectName, info_str=(ObjectName, "Submit Account Note: %s", ", %s") ),
+ rec( -1, (1,255), Comment ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x9600, 0xc000, 0xc101, 0xc400,
+ 0xe800, 0xea00, 0xeb00, 0xec00, 0xf000, 0xfc06,
+ 0xff00])
+ # 2222/17c8, 23/200
+ pkt = NCP(0x17c8, "Check Console Privileges", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc601])
+ # 2222/17c9, 23/201
+ pkt = NCP(0x17c9, "Get File Server Description Strings", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(108, [
+ rec( 8, 100, DescriptionStrings ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600])
+ # 2222/17CA, 23/202
+ pkt = NCP(0x17CA, "Set File Server Date And Time", 'fileserver')
+ pkt.Request(16, [
+ rec( 10, 1, Year ),
+ rec( 11, 1, Month ),
+ rec( 12, 1, Day ),
+ rec( 13, 1, Hour ),
+ rec( 14, 1, Minute ),
+ rec( 15, 1, Second ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc601])
+ # 2222/17CB, 23/203
+ pkt = NCP(0x17CB, "Disable File Server Login", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc601])
+ # 2222/17CC, 23/204
+ pkt = NCP(0x17CC, "Enable File Server Login", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc601])
+ # 2222/17CD, 23/205
+ pkt = NCP(0x17CD, "Get File Server Login Status", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(9, [
+ rec( 8, 1, UserLoginAllowed ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xfb01])
+ # 2222/17CF, 23/207
+ pkt = NCP(0x17CF, "Disable Transaction Tracking", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc601])
+ # 2222/17D0, 23/208
+ pkt = NCP(0x17D0, "Enable Transaction Tracking", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc601])
+ # 2222/17D1, 23/209
+ pkt = NCP(0x17D1, "Send Console Broadcast", 'fileserver')
+ pkt.Request((13,267), [
+ rec( 10, 1, NumberOfStations, var="x" ),
+ rec( 11, 1, StationList, repeat="x" ),
+ rec( 12, (1, 255), TargetMessage, info_str=(TargetMessage, "Send Console Broadcast: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc601, 0xfd00])
+ # 2222/17D2, 23/210
+ pkt = NCP(0x17D2, "Clear Connection Number", 'fileserver')
+ pkt.Request(11, [
+ rec( 10, 1, ConnectionNumber, info_str=(ConnectionNumber, "Clear Connection Number %d", ", %d") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc601, 0xfd00])
+ # 2222/17D3, 23/211
+ pkt = NCP(0x17D3, "Down File Server", 'fileserver')
+ pkt.Request(11, [
+ rec( 10, 1, ForceFlag ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc601, 0xff00])
+ # 2222/17D4, 23/212
+ pkt = NCP(0x17D4, "Get File System Statistics", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(50, [
+ rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
+ rec( 12, 2, ConfiguredMaxOpenFiles ),
+ rec( 14, 2, ActualMaxOpenFiles ),
+ rec( 16, 2, CurrentOpenFiles ),
+ rec( 18, 4, TotalFilesOpened ),
+ rec( 22, 4, TotalReadRequests ),
+ rec( 26, 4, TotalWriteRequests ),
+ rec( 30, 2, CurrentChangedFATs ),
+ rec( 32, 4, TotalChangedFATs ),
+ rec( 36, 2, FATWriteErrors ),
+ rec( 38, 2, FatalFATWriteErrors ),
+ rec( 40, 2, FATScanErrors ),
+ rec( 42, 2, ActualMaxIndexedFiles ),
+ rec( 44, 2, ActiveIndexedFiles ),
+ rec( 46, 2, AttachedIndexedFiles ),
+ rec( 48, 2, AvailableIndexedFiles ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
+ # 2222/17D5, 23/213
+ pkt = NCP(0x17D5, "Get Transaction Tracking Statistics", 'fileserver')
+ pkt.Request((13,267), [
+ rec( 10, 2, LastRecordSeen ),
+ rec( 12, (1,255), SemaphoreName ),
+ ])
+ pkt.Reply(53, [
+ rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
+ rec( 12, 1, TransactionTrackingSupported ),
+ rec( 13, 1, TransactionTrackingEnabled ),
+ rec( 14, 2, TransactionVolumeNumber ),
+ rec( 16, 2, ConfiguredMaxSimultaneousTransactions ),
+ rec( 18, 2, ActualMaxSimultaneousTransactions ),
+ rec( 20, 2, CurrentTransactionCount ),
+ rec( 22, 4, TotalTransactionsPerformed ),
+ rec( 26, 4, TotalWriteTransactionsPerformed ),
+ rec( 30, 4, TotalTransactionsBackedOut ),
+ rec( 34, 2, TotalUnfilledBackoutRequests ),
+ rec( 36, 2, TransactionDiskSpace ),
+ rec( 38, 4, TransactionFATAllocations ),
+ rec( 42, 4, TransactionFileSizeChanges ),
+ rec( 46, 4, TransactionFilesTruncated ),
+ rec( 50, 1, NumberOfEntries, var="x" ),
+ rec( 51, 2, ConnTaskStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
+ # 2222/17D6, 23/214
+ pkt = NCP(0x17D6, "Read Disk Cache Statistics", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(86, [
+ rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
+ rec( 12, 2, CacheBufferCount ),
+ rec( 14, 2, CacheBufferSize ),
+ rec( 16, 2, DirtyCacheBuffers ),
+ rec( 18, 4, CacheReadRequests ),
+ rec( 22, 4, CacheWriteRequests ),
+ rec( 26, 4, CacheHits ),
+ rec( 30, 4, CacheMisses ),
+ rec( 34, 4, PhysicalReadRequests ),
+ rec( 38, 4, PhysicalWriteRequests ),
+ rec( 42, 2, PhysicalReadErrors ),
+ rec( 44, 2, PhysicalWriteErrors ),
+ rec( 46, 4, CacheGetRequests ),
+ rec( 50, 4, CacheFullWriteRequests ),
+ rec( 54, 4, CachePartialWriteRequests ),
+ rec( 58, 4, BackgroundDirtyWrites ),
+ rec( 62, 4, BackgroundAgedWrites ),
+ rec( 66, 4, TotalCacheWrites ),
+ rec( 70, 4, CacheAllocations ),
+ rec( 74, 2, ThrashingCount ),
+ rec( 76, 2, LRUBlockWasDirty ),
+ rec( 78, 2, ReadBeyondWrite ),
+ rec( 80, 2, FragmentWriteOccurred ),
+ rec( 82, 2, CacheHitOnUnavailableBlock ),
+ rec( 84, 2, CacheBlockScrapped ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
+ # 2222/17D7, 23/215
+ pkt = NCP(0x17D7, "Get Drive Mapping Table", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(184, [
+ rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
+ rec( 12, 1, SFTSupportLevel ),
+ rec( 13, 1, LogicalDriveCount ),
+ rec( 14, 1, PhysicalDriveCount ),
+ rec( 15, 1, DiskChannelTable ),
+ rec( 16, 4, Reserved4 ),
+ rec( 20, 2, PendingIOCommands, ENC_BIG_ENDIAN ),
+ rec( 22, 32, DriveMappingTable ),
+ rec( 54, 32, DriveMirrorTable ),
+ rec( 86, 32, DeadMirrorTable ),
+ rec( 118, 1, ReMirrorDriveNumber ),
+ rec( 119, 1, Filler ),
+ rec( 120, 4, ReMirrorCurrentOffset, ENC_BIG_ENDIAN ),
+ rec( 124, 60, SFTErrorTable ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
+ # 2222/17D8, 23/216
+ pkt = NCP(0x17D8, "Read Physical Disk Statistics", 'fileserver')
+ pkt.Request(11, [
+ rec( 10, 1, PhysicalDiskNumber ),
+ ])
+ pkt.Reply(101, [
+ rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
+ rec( 12, 1, PhysicalDiskChannel ),
+ rec( 13, 1, DriveRemovableFlag ),
+ rec( 14, 1, PhysicalDriveType ),
+ rec( 15, 1, ControllerDriveNumber ),
+ rec( 16, 1, ControllerNumber ),
+ rec( 17, 1, ControllerType ),
+ rec( 18, 4, DriveSize ),
+ rec( 22, 2, DriveCylinders ),
+ rec( 24, 1, DriveHeads ),
+ rec( 25, 1, SectorsPerTrack ),
+ rec( 26, 64, DriveDefinitionString ),
+ rec( 90, 2, IOErrorCount ),
+ rec( 92, 4, HotFixTableStart ),
+ rec( 96, 2, HotFixTableSize ),
+ rec( 98, 2, HotFixBlocksAvailable ),
+ rec( 100, 1, HotFixDisabled ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
+ # 2222/17D9, 23/217
+ pkt = NCP(0x17D9, "Get Disk Channel Statistics", 'fileserver')
+ pkt.Request(11, [
+ rec( 10, 1, DiskChannelNumber ),
+ ])
+ pkt.Reply(192, [
+ rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
+ rec( 12, 2, ChannelState, ENC_BIG_ENDIAN ),
+ rec( 14, 2, ChannelSynchronizationState, ENC_BIG_ENDIAN ),
+ rec( 16, 1, SoftwareDriverType ),
+ rec( 17, 1, SoftwareMajorVersionNumber ),
+ rec( 18, 1, SoftwareMinorVersionNumber ),
+ rec( 19, 65, SoftwareDescription ),
+ rec( 84, 8, IOAddressesUsed ),
+ rec( 92, 10, SharedMemoryAddresses ),
+ rec( 102, 4, InterruptNumbersUsed ),
+ rec( 106, 4, DMAChannelsUsed ),
+ rec( 110, 1, FlagBits ),
+ rec( 111, 1, Reserved ),
+ rec( 112, 80, ConfigurationDescription ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
+ # 2222/17DB, 23/219
+ pkt = NCP(0x17DB, "Get Connection's Open Files", 'fileserver')
+ pkt.Request(14, [
+ rec( 10, 2, ConnectionNumber ),
+ rec( 12, 2, LastRecordSeen, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(32, [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 1, NumberOfRecords, var="x" ),
+ rec( 11, 21, ConnStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
+ # 2222/17DC, 23/220
+ pkt = NCP(0x17DC, "Get Connection Using A File", 'fileserver')
+ pkt.Request((14,268), [
+ rec( 10, 2, LastRecordSeen, ENC_BIG_ENDIAN ),
+ rec( 12, 1, DirHandle ),
+ rec( 13, (1,255), Path, info_str=(Path, "Get Connection Using File: %s", ", %s") ),
+ ])
+ pkt.Reply(30, [
+ rec( 8, 2, UseCount, ENC_BIG_ENDIAN ),
+ rec( 10, 2, OpenCount, ENC_BIG_ENDIAN ),
+ rec( 12, 2, OpenForReadCount, ENC_BIG_ENDIAN ),
+ rec( 14, 2, OpenForWriteCount, ENC_BIG_ENDIAN ),
+ rec( 16, 2, DenyReadCount, ENC_BIG_ENDIAN ),
+ rec( 18, 2, DenyWriteCount, ENC_BIG_ENDIAN ),
+ rec( 20, 2, NextRequestRecord, ENC_BIG_ENDIAN ),
+ rec( 22, 1, Locked ),
+ rec( 23, 1, NumberOfRecords, var="x" ),
+ rec( 24, 6, ConnFileStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
+ # 2222/17DD, 23/221
+ pkt = NCP(0x17DD, "Get Physical Record Locks By Connection And File", 'fileserver')
+ pkt.Request(31, [
+ rec( 10, 2, TargetConnectionNumber ),
+ rec( 12, 2, LastRecordSeen, ENC_BIG_ENDIAN ),
+ rec( 14, 1, VolumeNumber ),
+ rec( 15, 2, DirectoryID ),
+ rec( 17, 14, FileName14, info_str=(FileName14, "Get Physical Record Locks by Connection and File: %s", ", %s") ),
+ ])
+ pkt.Reply(22, [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 1, NumberOfLocks, var="x" ),
+ rec( 11, 1, Reserved ),
+ rec( 12, 10, LockStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17DE, 23/222
+ pkt = NCP(0x17DE, "Get Physical Record Locks By File", 'fileserver')
+ pkt.Request((14,268), [
+ rec( 10, 2, TargetConnectionNumber ),
+ rec( 12, 1, DirHandle ),
+ rec( 13, (1,255), Path, info_str=(Path, "Get Physical Record Locks by File: %s", ", %s") ),
+ ])
+ pkt.Reply(28, [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 1, NumberOfLocks, var="x" ),
+ rec( 11, 1, Reserved ),
+ rec( 12, 16, PhyLockStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17DF, 23/223
+ pkt = NCP(0x17DF, "Get Logical Records By Connection", 'fileserver')
+ pkt.Request(14, [
+ rec( 10, 2, TargetConnectionNumber ),
+ rec( 12, 2, LastRecordSeen, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply((14,268), [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 1, NumberOfRecords, var="x" ),
+ rec( 11, (3, 257), LogLockStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17E0, 23/224
+ pkt = NCP(0x17E0, "Get Logical Record Information", 'fileserver')
+ pkt.Request((13,267), [
+ rec( 10, 2, LastRecordSeen ),
+ rec( 12, (1,255), LogicalRecordName, info_str=(LogicalRecordName, "Get Logical Record Information: %s", ", %s") ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 2, UseCount, ENC_BIG_ENDIAN ),
+ rec( 10, 2, ShareableLockCount, ENC_BIG_ENDIAN ),
+ rec( 12, 2, NextRequestRecord ),
+ rec( 14, 1, Locked ),
+ rec( 15, 1, NumberOfRecords, var="x" ),
+ rec( 16, 4, LogRecStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17E1, 23/225
+ pkt = NCP(0x17E1, "Get Connection's Semaphores", 'fileserver')
+ pkt.Request(14, [
+ rec( 10, 2, ConnectionNumber ),
+ rec( 12, 2, LastRecordSeen ),
+ ])
+ pkt.Reply((18,272), [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 2, NumberOfSemaphores, var="x" ),
+ rec( 12, (6,260), SemaStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17E2, 23/226
+ pkt = NCP(0x17E2, "Get Semaphore Information", 'fileserver')
+ pkt.Request((13,267), [
+ rec( 10, 2, LastRecordSeen ),
+ rec( 12, (1,255), SemaphoreName, info_str=(SemaphoreName, "Get Semaphore Information: %s", ", %s") ),
+ ])
+ pkt.Reply(17, [
+ rec( 8, 2, NextRequestRecord, ENC_BIG_ENDIAN ),
+ rec( 10, 2, OpenCount, ENC_BIG_ENDIAN ),
+ rec( 12, 1, SemaphoreValue ),
+ rec( 13, 1, NumberOfRecords, var="x" ),
+ rec( 14, 3, SemaInfoStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17E3, 23/227
+ pkt = NCP(0x17E3, "Get LAN Driver Configuration Information", 'fileserver')
+ pkt.Request(11, [
+ rec( 10, 1, LANDriverNumber ),
+ ])
+ pkt.Reply(180, [
+ rec( 8, 4, NetworkAddress, ENC_BIG_ENDIAN ),
+ rec( 12, 6, HostAddress ),
+ rec( 18, 1, BoardInstalled ),
+ rec( 19, 1, OptionNumber ),
+ rec( 20, 160, ConfigurationText ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17E5, 23/229
+ pkt = NCP(0x17E5, "Get Connection Usage Statistics", 'fileserver')
+ pkt.Request(12, [
+ rec( 10, 2, ConnectionNumber ),
+ ])
+ pkt.Reply(26, [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 6, BytesRead ),
+ rec( 16, 6, BytesWritten ),
+ rec( 22, 4, TotalRequestPackets ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17E6, 23/230
+ pkt = NCP(0x17E6, "Get Object's Remaining Disk Space", 'fileserver')
+ pkt.Request(14, [
+ rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(21, [
+ rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
+ rec( 12, 4, ObjectID ),
+ rec( 16, 4, UnusedDiskBlocks, ENC_BIG_ENDIAN ),
+ rec( 20, 1, RestrictionsEnforced ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17E7, 23/231
+ pkt = NCP(0x17E7, "Get File Server LAN I/O Statistics", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(74, [
+ rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
+ rec( 12, 2, ConfiguredMaxRoutingBuffers ),
+ rec( 14, 2, ActualMaxUsedRoutingBuffers ),
+ rec( 16, 2, CurrentlyUsedRoutingBuffers ),
+ rec( 18, 4, TotalFileServicePackets ),
+ rec( 22, 2, TurboUsedForFileService ),
+ rec( 24, 2, PacketsFromInvalidConnection ),
+ rec( 26, 2, BadLogicalConnectionCount ),
+ rec( 28, 2, PacketsReceivedDuringProcessing ),
+ rec( 30, 2, RequestsReprocessed ),
+ rec( 32, 2, PacketsWithBadSequenceNumber ),
+ rec( 34, 2, DuplicateRepliesSent ),
+ rec( 36, 2, PositiveAcknowledgesSent ),
+ rec( 38, 2, PacketsWithBadRequestType ),
+ rec( 40, 2, AttachDuringProcessing ),
+ rec( 42, 2, AttachWhileProcessingAttach ),
+ rec( 44, 2, ForgedDetachedRequests ),
+ rec( 46, 2, DetachForBadConnectionNumber ),
+ rec( 48, 2, DetachDuringProcessing ),
+ rec( 50, 2, RepliesCancelled ),
+ rec( 52, 2, PacketsDiscardedByHopCount ),
+ rec( 54, 2, PacketsDiscardedUnknownNet ),
+ rec( 56, 2, IncomingPacketDiscardedNoDGroup ),
+ rec( 58, 2, OutgoingPacketDiscardedNoTurboBuffer ),
+ rec( 60, 2, IPXNotMyNetwork ),
+ rec( 62, 4, NetBIOSBroadcastWasPropagated ),
+ rec( 66, 4, TotalOtherPackets ),
+ rec( 70, 4, TotalRoutedPackets ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17E8, 23/232
+ pkt = NCP(0x17E8, "Get File Server Misc Information", 'fileserver')
+ pkt.Request(10)
+ pkt.Reply(40, [
+ rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
+ rec( 12, 1, ProcessorType ),
+ rec( 13, 1, Reserved ),
+ rec( 14, 1, NumberOfServiceProcesses ),
+ rec( 15, 1, ServerUtilizationPercentage ),
+ rec( 16, 2, ConfiguredMaxBinderyObjects ),
+ rec( 18, 2, ActualMaxBinderyObjects ),
+ rec( 20, 2, CurrentUsedBinderyObjects ),
+ rec( 22, 2, TotalServerMemory ),
+ rec( 24, 2, WastedServerMemory ),
+ rec( 26, 2, NumberOfDynamicMemoryAreas, var="x" ),
+ rec( 28, 12, DynMemStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17E9, 23/233
+ pkt = NCP(0x17E9, "Get Volume Information", 'fileserver')
+ pkt.Request(11, [
+ rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Information on Volume %d", ", %d") ),
+ ])
+ pkt.Reply(48, [
+ rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
+ rec( 12, 1, VolumeNumber ),
+ rec( 13, 1, LogicalDriveNumber ),
+ rec( 14, 2, BlockSize ),
+ rec( 16, 2, StartingBlock ),
+ rec( 18, 2, TotalBlocks ),
+ rec( 20, 2, FreeBlocks ),
+ rec( 22, 2, TotalDirectoryEntries ),
+ rec( 24, 2, FreeDirectoryEntries ),
+ rec( 26, 2, ActualMaxUsedDirectoryEntries ),
+ rec( 28, 1, VolumeHashedFlag ),
+ rec( 29, 1, VolumeCachedFlag ),
+ rec( 30, 1, VolumeRemovableFlag ),
+ rec( 31, 1, VolumeMountedFlag ),
+ rec( 32, 16, VolumeName ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17EA, 23/234
+ pkt = NCP(0x17EA, "Get Connection's Task Information", 'fileserver')
+ pkt.Request(12, [
+ rec( 10, 2, ConnectionNumber ),
+ ])
+ pkt.Reply(13, [
+ rec( 8, 1, ConnLockStatus ),
+ rec( 9, 1, NumberOfActiveTasks, var="x" ),
+ rec( 10, 3, TaskStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17EB, 23/235
+ pkt = NCP(0x17EB, "Get Connection's Open Files", 'fileserver')
+ pkt.Request(14, [
+ rec( 10, 2, ConnectionNumber ),
+ rec( 12, 2, LastRecordSeen ),
+ ])
+ pkt.Reply((29,283), [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 2, NumberOfRecords, var="x" ),
+ rec( 12, (17, 271), OpnFilesStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
+ # 2222/17EC, 23/236
+ pkt = NCP(0x17EC, "Get Connection Using A File", 'fileserver')
+ pkt.Request(18, [
+ rec( 10, 1, DataStreamNumber ),
+ rec( 11, 1, VolumeNumber ),
+ rec( 12, 4, DirectoryBase, ENC_LITTLE_ENDIAN ),
+ rec( 16, 2, LastRecordSeen ),
+ ])
+ pkt.Reply(33, [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 2, FileUseCount ),
+ rec( 12, 2, OpenCount ),
+ rec( 14, 2, OpenForReadCount ),
+ rec( 16, 2, OpenForWriteCount ),
+ rec( 18, 2, DenyReadCount ),
+ rec( 20, 2, DenyWriteCount ),
+ rec( 22, 1, Locked ),
+ rec( 23, 1, ForkCount ),
+ rec( 24, 2, NumberOfRecords, var="x" ),
+ rec( 26, 7, ConnFileStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
+ # 2222/17ED, 23/237
+ pkt = NCP(0x17ED, "Get Physical Record Locks By Connection And File", 'fileserver')
+ pkt.Request(20, [
+ rec( 10, 2, TargetConnectionNumber ),
+ rec( 12, 1, DataStreamNumber ),
+ rec( 13, 1, VolumeNumber ),
+ rec( 14, 4, DirectoryBase, ENC_LITTLE_ENDIAN ),
+ rec( 18, 2, LastRecordSeen ),
+ ])
+ pkt.Reply(23, [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 2, NumberOfLocks, ENC_LITTLE_ENDIAN, var="x" ),
+ rec( 12, 11, LockStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17EE, 23/238
+ pkt = NCP(0x17EE, "Get Physical Record Locks By File", 'fileserver')
+ pkt.Request(18, [
+ rec( 10, 1, DataStreamNumber ),
+ rec( 11, 1, VolumeNumber ),
+ rec( 12, 4, DirectoryBase ),
+ rec( 16, 2, LastRecordSeen ),
+ ])
+ pkt.Reply(30, [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 2, NumberOfLocks, ENC_LITTLE_ENDIAN, var="x" ),
+ rec( 12, 18, PhyLockStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17EF, 23/239
+ pkt = NCP(0x17EF, "Get Logical Records By Connection", 'fileserver')
+ pkt.Request(14, [
+ rec( 10, 2, TargetConnectionNumber ),
+ rec( 12, 2, LastRecordSeen ),
+ ])
+ pkt.Reply((16,270), [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 2, NumberOfRecords, var="x" ),
+ rec( 12, (4, 258), LogLockStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17F0, 23/240
+ pkt = NCP(0x17F0, "Get Logical Record Information (old)", 'fileserver')
+ pkt.Request((13,267), [
+ rec( 10, 2, LastRecordSeen ),
+ rec( 12, (1,255), LogicalRecordName ),
+ ])
+ pkt.Reply(22, [
+ rec( 8, 2, ShareableLockCount ),
+ rec( 10, 2, UseCount ),
+ rec( 12, 1, Locked ),
+ rec( 13, 2, NextRequestRecord ),
+ rec( 15, 2, NumberOfRecords, var="x" ),
+ rec( 17, 5, LogRecStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17F1, 23/241
+ pkt = NCP(0x17F1, "Get Connection's Semaphores", 'fileserver')
+ pkt.Request(14, [
+ rec( 10, 2, ConnectionNumber ),
+ rec( 12, 2, LastRecordSeen ),
+ ])
+ pkt.Reply((19,273), [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 2, NumberOfSemaphores, var="x" ),
+ rec( 12, (7, 261), SemaStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17F2, 23/242
+ pkt = NCP(0x17F2, "Get Semaphore Information", 'fileserver')
+ pkt.Request((13,267), [
+ rec( 10, 2, LastRecordSeen ),
+ rec( 12, (1,255), SemaphoreName, info_str=(SemaphoreName, "Get Semaphore Information: %s", ", %s") ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 2, NextRequestRecord ),
+ rec( 10, 2, OpenCount ),
+ rec( 12, 2, SemaphoreValue ),
+ rec( 14, 2, NumberOfRecords, var="x" ),
+ rec( 16, 4, SemaInfoStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17F3, 23/243
+ pkt = NCP(0x17F3, "Map Directory Number to Path", 'file')
+ pkt.Request(16, [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, DirectoryNumber ),
+ rec( 15, 1, NameSpace ),
+ ])
+ pkt.Reply((9,263), [
+ rec( 8, (1,255), Path ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9c00, 0xc601, 0xfd00, 0xff00])
+ # 2222/17F4, 23/244
+ pkt = NCP(0x17F4, "Convert Path to Dir Entry", 'file')
+ pkt.Request((12,266), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, (1,255), Path, info_str=(Path, "Convert Path to Directory Entry: %s", ", %s") ),
+ ])
+ pkt.Reply(13, [
+ rec( 8, 1, VolumeNumber ),
+ rec( 9, 4, DirectoryNumber ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
+ # 2222/17FD, 23/253
+ pkt = NCP(0x17FD, "Send Console Broadcast", 'fileserver')
+ pkt.Request((16, 270), [
+ rec( 10, 1, NumberOfStations, var="x" ),
+ rec( 11, 4, StationList, repeat="x" ),
+ rec( 15, (1, 255), TargetMessage, info_str=(TargetMessage, "Send Console Broadcast: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc601, 0xfd00])
+ # 2222/17FE, 23/254
+ pkt = NCP(0x17FE, "Clear Connection Number", 'fileserver')
+ pkt.Request(14, [
+ rec( 10, 4, ConnectionNumber ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xc601, 0xfd00])
+ # 2222/18, 24
+ pkt = NCP(0x18, "End of Job", 'connection')
+ pkt.Request(7)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/19, 25
+ pkt = NCP(0x19, "Logout", 'connection')
+ pkt.Request(7)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/1A, 26
+ pkt = NCP(0x1A, "Log Physical Record", 'sync')
+ pkt.Request(24, [
+ rec( 7, 1, LockFlag ),
+ rec( 8, 6, FileHandle ),
+ rec( 14, 4, LockAreasStartOffset, ENC_BIG_ENDIAN ),
+ rec( 18, 4, LockAreaLen, ENC_BIG_ENDIAN, info_str=(LockAreaLen, "Lock Record - Length of %d", "%d") ),
+ rec( 22, 2, LockTimeout ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff01])
+ # 2222/1B, 27
+ pkt = NCP(0x1B, "Lock Physical Record Set", 'sync')
+ pkt.Request(10, [
+ rec( 7, 1, LockFlag ),
+ rec( 8, 2, LockTimeout ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff01])
+ # 2222/1C, 28
+ pkt = NCP(0x1C, "Release Physical Record", 'sync')
+ pkt.Request(22, [
+ rec( 7, 1, Reserved ),
+ rec( 8, 6, FileHandle ),
+ rec( 14, 4, LockAreasStartOffset ),
+ rec( 18, 4, LockAreaLen, info_str=(LockAreaLen, "Release Lock Record - Length of %d", "%d") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff03])
+ # 2222/1D, 29
+ pkt = NCP(0x1D, "Release Physical Record Set", 'sync')
+ pkt.Request(8, [
+ rec( 7, 1, LockFlag ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff03])
+ # 2222/1E, 30 #Tested and fixed 6-14-02 GM
+ pkt = NCP(0x1E, "Clear Physical Record", 'sync')
+ pkt.Request(22, [
+ rec( 7, 1, Reserved ),
+ rec( 8, 6, FileHandle ),
+ rec( 14, 4, LockAreasStartOffset, ENC_BIG_ENDIAN ),
+ rec( 18, 4, LockAreaLen, ENC_BIG_ENDIAN, info_str=(LockAreaLen, "Clear Lock Record - Length of %d", "%d") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff03])
+ # 2222/1F, 31
+ pkt = NCP(0x1F, "Clear Physical Record Set", 'sync')
+ pkt.Request(8, [
+ rec( 7, 1, LockFlag ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff03])
+ # 2222/2000, 32/00
+ pkt = NCP(0x2000, "Open Semaphore", 'sync', has_length=0)
+ pkt.Request((10,264), [
+ rec( 8, 1, InitialSemaphoreValue ),
+ rec( 9, (1,255), SemaphoreName, info_str=(SemaphoreName, "Open Semaphore: %s", ", %s") ),
+ ])
+ pkt.Reply(13, [
+ rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ),
+ rec( 12, 1, SemaphoreOpenCount ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
+ # 2222/2001, 32/01
+ pkt = NCP(0x2001, "Examine Semaphore", 'sync', has_length=0)
+ pkt.Request(12, [
+ rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(10, [
+ rec( 8, 1, SemaphoreValue ),
+ rec( 9, 1, SemaphoreOpenCount ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
+ # 2222/2002, 32/02
+ pkt = NCP(0x2002, "Wait On Semaphore", 'sync', has_length=0)
+ pkt.Request(14, [
+ rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ),
+ rec( 12, 2, SemaphoreTimeOut, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
+ # 2222/2003, 32/03
+ pkt = NCP(0x2003, "Signal Semaphore", 'sync', has_length=0)
+ pkt.Request(12, [
+ rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
+ # 2222/2004, 32/04
+ pkt = NCP(0x2004, "Close Semaphore", 'sync', has_length=0)
+ pkt.Request(12, [
+ rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
+ # 2222/21, 33
+ pkt = NCP(0x21, "Negotiate Buffer Size", 'connection')
+ pkt.Request(9, [
+ rec( 7, 2, BufferSize, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(10, [
+ rec( 8, 2, BufferSize, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000])
+ # 2222/2200, 34/00
+ pkt = NCP(0x2200, "TTS Is Available", 'tts', has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0001, 0xfd03, 0xff12])
+ # 2222/2201, 34/01
+ pkt = NCP(0x2201, "TTS Begin Transaction", 'tts', has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/2202, 34/02
+ pkt = NCP(0x2202, "TTS End Transaction", 'tts', has_length=0)
+ pkt.Request(8)
+ pkt.Reply(12, [
+ rec( 8, 4, TransactionNumber, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0xff01])
+ # 2222/2203, 34/03
+ pkt = NCP(0x2203, "TTS Abort Transaction", 'tts', has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xfd03, 0xfe0b, 0xff01])
+ # 2222/2204, 34/04
+ pkt = NCP(0x2204, "TTS Transaction Status", 'tts', has_length=0)
+ pkt.Request(12, [
+ rec( 8, 4, TransactionNumber, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/2205, 34/05
+ pkt = NCP(0x2205, "TTS Get Application Thresholds", 'tts', has_length=0)
+ pkt.Request(8)
+ pkt.Reply(10, [
+ rec( 8, 1, LogicalLockThreshold ),
+ rec( 9, 1, PhysicalLockThreshold ),
+ ])
+ pkt.CompletionCodes([0x0000])
+ # 2222/2206, 34/06
+ pkt = NCP(0x2206, "TTS Set Application Thresholds", 'tts', has_length=0)
+ pkt.Request(10, [
+ rec( 8, 1, LogicalLockThreshold ),
+ rec( 9, 1, PhysicalLockThreshold ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600])
+ # 2222/2207, 34/07
+ pkt = NCP(0x2207, "TTS Get Workstation Thresholds", 'tts', has_length=0)
+ pkt.Request(8)
+ pkt.Reply(10, [
+ rec( 8, 1, LogicalLockThreshold ),
+ rec( 9, 1, PhysicalLockThreshold ),
+ ])
+ pkt.CompletionCodes([0x0000])
+ # 2222/2208, 34/08
+ pkt = NCP(0x2208, "TTS Set Workstation Thresholds", 'tts', has_length=0)
+ pkt.Request(10, [
+ rec( 8, 1, LogicalLockThreshold ),
+ rec( 9, 1, PhysicalLockThreshold ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/2209, 34/09
+ pkt = NCP(0x2209, "TTS Get Transaction Bits", 'tts', has_length=0)
+ pkt.Request(8)
+ pkt.Reply(9, [
+ rec( 8, 1, ControlFlags ),
+ ])
+ pkt.CompletionCodes([0x0000])
+ # 2222/220A, 34/10
+ pkt = NCP(0x220A, "TTS Set Transaction Bits", 'tts', has_length=0)
+ pkt.Request(9, [
+ rec( 8, 1, ControlFlags ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/2301, 35/01
+ pkt = NCP(0x2301, "AFP Create Directory", 'afp')
+ pkt.Request((49, 303), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, BaseDirectoryID ),
+ rec( 15, 1, Reserved ),
+ rec( 16, 4, CreatorID ),
+ rec( 20, 4, Reserved4 ),
+ rec( 24, 2, FinderAttr ),
+ rec( 26, 2, HorizLocation ),
+ rec( 28, 2, VertLocation ),
+ rec( 30, 2, FileDirWindow ),
+ rec( 32, 16, Reserved16 ),
+ rec( 48, (1,255), Path, info_str=(Path, "AFP Create Directory: %s", ", %s") ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, NewDirectoryID ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8301, 0x8400, 0x8800, 0x9300, 0x9600, 0x9804,
+ 0x9900, 0x9c03, 0x9e02, 0xa100, 0xa201, 0xfd00, 0xff18])
+ # 2222/2302, 35/02
+ pkt = NCP(0x2302, "AFP Create File", 'afp')
+ pkt.Request((49, 303), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, BaseDirectoryID ),
+ rec( 15, 1, DeleteExistingFileFlag ),
+ rec( 16, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 20, 4, Reserved4 ),
+ rec( 24, 2, FinderAttr ),
+ rec( 26, 2, HorizLocation, ENC_BIG_ENDIAN ),
+ rec( 28, 2, VertLocation, ENC_BIG_ENDIAN ),
+ rec( 30, 2, FileDirWindow, ENC_BIG_ENDIAN ),
+ rec( 32, 16, Reserved16 ),
+ rec( 48, (1,255), Path, info_str=(Path, "AFP Create File: %s", ", %s") ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, NewDirectoryID ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8301, 0x8400, 0x8701, 0x8800,
+ 0x8a00, 0x8d00, 0x8e00, 0x8f00, 0x9300, 0x9600, 0x9804,
+ 0x9900, 0x9b03, 0x9c03, 0x9e02, 0xa100, 0xa201, 0xfd00,
+ 0xff18])
+ # 2222/2303, 35/03
+ pkt = NCP(0x2303, "AFP Delete", 'afp')
+ pkt.Request((16,270), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, BaseDirectoryID ),
+ rec( 15, (1,255), Path, info_str=(Path, "AFP Delete: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x8a00, 0x8d00, 0x8e00, 0x8f00,
+ 0x9000, 0x9300, 0x9600, 0x9804, 0x9b03, 0x9c03, 0x9e02,
+ 0xa000, 0xa100, 0xa201, 0xfd00, 0xff19])
+ # 2222/2304, 35/04
+ pkt = NCP(0x2304, "AFP Get Entry ID From Name", 'afp')
+ pkt.Request((16,270), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, BaseDirectoryID ),
+ rec( 15, (1,255), Path, info_str=(Path, "AFP Get Entry from Name: %s", ", %s") ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, TargetEntryID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804, 0x9c03,
+ 0xa100, 0xa201, 0xfd00, 0xff19])
+ # 2222/2305, 35/05
+ pkt = NCP(0x2305, "AFP Get File Information", 'afp')
+ pkt.Request((18,272), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, BaseDirectoryID ),
+ rec( 15, 2, RequestBitMap, ENC_BIG_ENDIAN ),
+ rec( 17, (1,255), Path, info_str=(Path, "AFP Get File Information: %s", ", %s") ),
+ ])
+ pkt.Reply(121, [
+ rec( 8, 4, AFPEntryID, ENC_BIG_ENDIAN ),
+ rec( 12, 4, ParentID, ENC_BIG_ENDIAN ),
+ rec( 16, 2, AttributesDef16, ENC_LITTLE_ENDIAN ),
+ rec( 18, 4, DataForkLen, ENC_BIG_ENDIAN ),
+ rec( 22, 4, ResourceForkLen, ENC_BIG_ENDIAN ),
+ rec( 26, 2, TotalOffspring, ENC_BIG_ENDIAN ),
+ rec( 28, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 30, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
+ rec( 32, 2, ModifiedDate, ENC_BIG_ENDIAN ),
+ rec( 34, 2, ModifiedTime, ENC_BIG_ENDIAN ),
+ rec( 36, 2, ArchivedDate, ENC_BIG_ENDIAN ),
+ rec( 38, 2, ArchivedTime, ENC_BIG_ENDIAN ),
+ rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 44, 4, Reserved4 ),
+ rec( 48, 2, FinderAttr ),
+ rec( 50, 2, HorizLocation ),
+ rec( 52, 2, VertLocation ),
+ rec( 54, 2, FileDirWindow ),
+ rec( 56, 16, Reserved16 ),
+ rec( 72, 32, LongName ),
+ rec( 104, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 108, 12, ShortName ),
+ rec( 120, 1, AccessPrivileges ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804, 0x9c03,
+ 0xa100, 0xa201, 0xfd00, 0xff19])
+ # 2222/2306, 35/06
+ pkt = NCP(0x2306, "AFP Get Entry ID From NetWare Handle", 'afp')
+ pkt.Request(16, [
+ rec( 10, 6, FileHandle ),
+ ])
+ pkt.Reply(14, [
+ rec( 8, 1, VolumeID ),
+ rec( 9, 4, TargetEntryID, ENC_BIG_ENDIAN ),
+ rec( 13, 1, ForkIndicator ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0xa201])
+ # 2222/2307, 35/07
+ pkt = NCP(0x2307, "AFP Rename", 'afp')
+ pkt.Request((21, 529), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, MacSourceBaseID, ENC_BIG_ENDIAN ),
+ rec( 15, 4, MacDestinationBaseID, ENC_BIG_ENDIAN ),
+ rec( 19, (1,255), Path, info_str=(Path, "AFP Rename: %s", ", %s") ),
+ rec( -1, (1,255), NewFileNameLen ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8301, 0x8401, 0x8800, 0x8b00, 0x8e00,
+ 0x9001, 0x9201, 0x9300, 0x9600, 0x9804, 0x9900,
+ 0x9c03, 0x9e00, 0xa100, 0xa201, 0xfd00, 0xff0a])
+ # 2222/2308, 35/08
+ pkt = NCP(0x2308, "AFP Open File Fork", 'afp')
+ pkt.Request((18, 272), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, MacBaseDirectoryID ),
+ rec( 15, 1, ForkIndicator ),
+ rec( 16, 1, AccessMode ),
+ rec( 17, (1,255), Path, info_str=(Path, "AFP Open File Fork: %s", ", %s") ),
+ ])
+ pkt.Reply(22, [
+ rec( 8, 4, AFPEntryID, ENC_BIG_ENDIAN ),
+ rec( 12, 4, DataForkLen, ENC_BIG_ENDIAN ),
+ rec( 16, 6, NetWareAccessHandle ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8301, 0x8800, 0x9300,
+ 0x9400, 0x9600, 0x9804, 0x9900, 0x9c03, 0xa100,
+ 0xa201, 0xfd00, 0xff16])
+ # 2222/2309, 35/09
+ pkt = NCP(0x2309, "AFP Set File Information", 'afp')
+ pkt.Request((64, 318), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, MacBaseDirectoryID ),
+ rec( 15, 2, RequestBitMap, ENC_BIG_ENDIAN ),
+ rec( 17, 2, MacAttr, ENC_BIG_ENDIAN ),
+ rec( 19, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 21, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
+ rec( 23, 2, ModifiedDate, ENC_BIG_ENDIAN ),
+ rec( 25, 2, ModifiedTime, ENC_BIG_ENDIAN ),
+ rec( 27, 2, ArchivedDate, ENC_BIG_ENDIAN ),
+ rec( 29, 2, ArchivedTime, ENC_BIG_ENDIAN ),
+ rec( 31, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 35, 4, Reserved4 ),
+ rec( 39, 2, FinderAttr ),
+ rec( 41, 2, HorizLocation ),
+ rec( 43, 2, VertLocation ),
+ rec( 45, 2, FileDirWindow ),
+ rec( 47, 16, Reserved16 ),
+ rec( 63, (1,255), Path, info_str=(Path, "AFP Set File Information: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0104, 0x8301, 0x8800, 0x9300, 0x9400,
+ 0x9500, 0x9600, 0x9804, 0x9c03, 0xa100, 0xa201,
+ 0xfd00, 0xff16])
+ # 2222/230A, 35/10
+ pkt = NCP(0x230A, "AFP Scan File Information", 'afp')
+ pkt.Request((26, 280), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, MacBaseDirectoryID ),
+ rec( 15, 4, MacLastSeenID, ENC_BIG_ENDIAN ),
+ rec( 19, 2, DesiredResponseCount, ENC_BIG_ENDIAN ),
+ rec( 21, 2, SearchBitMap, ENC_BIG_ENDIAN ),
+ rec( 23, 2, RequestBitMap, ENC_BIG_ENDIAN ),
+ rec( 25, (1,255), Path, info_str=(Path, "AFP Scan File Information: %s", ", %s") ),
+ ])
+ pkt.Reply(123, [
+ rec( 8, 2, ActualResponseCount, ENC_BIG_ENDIAN, var="x" ),
+ rec( 10, 113, AFP10Struct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804,
+ 0x9c03, 0xa100, 0xa201, 0xfd00, 0xff16])
+ # 2222/230B, 35/11
+ pkt = NCP(0x230B, "AFP Alloc Temporary Directory Handle", 'afp')
+ pkt.Request((16,270), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, MacBaseDirectoryID ),
+ rec( 15, (1,255), Path, info_str=(Path, "AFP Allocate Temporary Directory Handle: %s", ", %s") ),
+ ])
+ pkt.Reply(10, [
+ rec( 8, 1, DirHandle ),
+ rec( 9, 1, AccessRightsMask ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0x9d00, 0xa100,
+ 0xa201, 0xfd00, 0xff00])
+ # 2222/230C, 35/12
+ pkt = NCP(0x230C, "AFP Get Entry ID From Path Name", 'afp')
+ pkt.Request((12,266), [
+ rec( 10, 1, DirHandle ),
+ rec( 11, (1,255), Path, info_str=(Path, "AFP Get Entry ID from Path Name: %s", ", %s") ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, AFPEntryID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa100, 0xa201,
+ 0xfd00, 0xff00])
+ # 2222/230D, 35/13
+ pkt = NCP(0x230D, "AFP 2.0 Create Directory", 'afp')
+ pkt.Request((55,309), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, BaseDirectoryID ),
+ rec( 15, 1, Reserved ),
+ rec( 16, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 20, 4, Reserved4 ),
+ rec( 24, 2, FinderAttr ),
+ rec( 26, 2, HorizLocation ),
+ rec( 28, 2, VertLocation ),
+ rec( 30, 2, FileDirWindow ),
+ rec( 32, 16, Reserved16 ),
+ rec( 48, 6, ProDOSInfo ),
+ rec( 54, (1,255), Path, info_str=(Path, "AFP 2.0 Create Directory: %s", ", %s") ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, NewDirectoryID ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8301, 0x8400, 0x8800, 0x9300,
+ 0x9600, 0x9804, 0x9900, 0x9c03, 0x9e00,
+ 0xa100, 0xa201, 0xfd00, 0xff00])
+ # 2222/230E, 35/14
+ pkt = NCP(0x230E, "AFP 2.0 Create File", 'afp')
+ pkt.Request((55,309), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, BaseDirectoryID ),
+ rec( 15, 1, DeleteExistingFileFlag ),
+ rec( 16, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 20, 4, Reserved4 ),
+ rec( 24, 2, FinderAttr ),
+ rec( 26, 2, HorizLocation ),
+ rec( 28, 2, VertLocation ),
+ rec( 30, 2, FileDirWindow ),
+ rec( 32, 16, Reserved16 ),
+ rec( 48, 6, ProDOSInfo ),
+ rec( 54, (1,255), Path, info_str=(Path, "AFP 2.0 Create File: %s", ", %s") ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, NewDirectoryID ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8301, 0x8400,
+ 0x8701, 0x8800, 0x8a00, 0x8d00, 0x8e00,
+ 0x8f00, 0x9001, 0x9300, 0x9600, 0x9804,
+ 0x9900, 0x9b03, 0x9c03, 0x9e00, 0xa100,
+ 0xa201, 0xfd00, 0xff00])
+ # 2222/230F, 35/15
+ pkt = NCP(0x230F, "AFP 2.0 Get File Or Directory Information", 'afp')
+ pkt.Request((18,272), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, BaseDirectoryID ),
+ rec( 15, 2, RequestBitMap, ENC_BIG_ENDIAN ),
+ rec( 17, (1,255), Path, info_str=(Path, "AFP 2.0 Get Information: %s", ", %s") ),
+ ])
+ pkt.Reply(128, [
+ rec( 8, 4, AFPEntryID, ENC_BIG_ENDIAN ),
+ rec( 12, 4, ParentID, ENC_BIG_ENDIAN ),
+ rec( 16, 2, AttributesDef16 ),
+ rec( 18, 4, DataForkLen, ENC_BIG_ENDIAN ),
+ rec( 22, 4, ResourceForkLen, ENC_BIG_ENDIAN ),
+ rec( 26, 2, TotalOffspring, ENC_BIG_ENDIAN ),
+ rec( 28, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 30, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
+ rec( 32, 2, ModifiedDate, ENC_BIG_ENDIAN ),
+ rec( 34, 2, ModifiedTime, ENC_BIG_ENDIAN ),
+ rec( 36, 2, ArchivedDate, ENC_BIG_ENDIAN ),
+ rec( 38, 2, ArchivedTime, ENC_BIG_ENDIAN ),
+ rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 44, 4, Reserved4 ),
+ rec( 48, 2, FinderAttr ),
+ rec( 50, 2, HorizLocation ),
+ rec( 52, 2, VertLocation ),
+ rec( 54, 2, FileDirWindow ),
+ rec( 56, 16, Reserved16 ),
+ rec( 72, 32, LongName ),
+ rec( 104, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 108, 12, ShortName ),
+ rec( 120, 1, AccessPrivileges ),
+ rec( 121, 1, Reserved ),
+ rec( 122, 6, ProDOSInfo ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804, 0x9c03,
+ 0xa100, 0xa201, 0xfd00, 0xff19])
+ # 2222/2310, 35/16
+ pkt = NCP(0x2310, "AFP 2.0 Set File Information", 'afp')
+ pkt.Request((70, 324), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, MacBaseDirectoryID ),
+ rec( 15, 2, RequestBitMap, ENC_BIG_ENDIAN ),
+ rec( 17, 2, AttributesDef16 ),
+ rec( 19, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 21, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
+ rec( 23, 2, ModifiedDate, ENC_BIG_ENDIAN ),
+ rec( 25, 2, ModifiedTime, ENC_BIG_ENDIAN ),
+ rec( 27, 2, ArchivedDate, ENC_BIG_ENDIAN ),
+ rec( 29, 2, ArchivedTime, ENC_BIG_ENDIAN ),
+ rec( 31, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 35, 4, Reserved4 ),
+ rec( 39, 2, FinderAttr ),
+ rec( 41, 2, HorizLocation ),
+ rec( 43, 2, VertLocation ),
+ rec( 45, 2, FileDirWindow ),
+ rec( 47, 16, Reserved16 ),
+ rec( 63, 6, ProDOSInfo ),
+ rec( 69, (1,255), Path, info_str=(Path, "AFP 2.0 Set File Information: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0104, 0x8301, 0x8800, 0x9300, 0x9400,
+ 0x9500, 0x9600, 0x9804, 0x9c03, 0xa100, 0xa201,
+ 0xfd00, 0xff16])
+ # 2222/2311, 35/17
+ pkt = NCP(0x2311, "AFP 2.0 Scan File Information", 'afp')
+ pkt.Request((26, 280), [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, MacBaseDirectoryID ),
+ rec( 15, 4, MacLastSeenID, ENC_BIG_ENDIAN ),
+ rec( 19, 2, DesiredResponseCount, ENC_BIG_ENDIAN ),
+ rec( 21, 2, SearchBitMap, ENC_BIG_ENDIAN ),
+ rec( 23, 2, RequestBitMap, ENC_BIG_ENDIAN ),
+ rec( 25, (1,255), Path, info_str=(Path, "AFP 2.0 Scan File Information: %s", ", %s") ),
+ ])
+ pkt.Reply(14, [
+ rec( 8, 2, ActualResponseCount, var="x" ),
+ rec( 10, 4, AFP20Struct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804,
+ 0x9c03, 0xa100, 0xa201, 0xfd00, 0xff16])
+ # 2222/2312, 35/18
+ pkt = NCP(0x2312, "AFP Get DOS Name From Entry ID", 'afp')
+ pkt.Request(15, [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, AFPEntryID, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply((9,263), [
+ rec( 8, (1,255), Path ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8900, 0x9600, 0xbf00])
+ # 2222/2313, 35/19
+ pkt = NCP(0x2313, "AFP Get Macintosh Info On Deleted File", 'afp')
+ pkt.Request(15, [
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, DirectoryNumber, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply((51,305), [
+ rec( 8, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 12, 4, Reserved4 ),
+ rec( 16, 2, FinderAttr ),
+ rec( 18, 2, HorizLocation ),
+ rec( 20, 2, VertLocation ),
+ rec( 22, 2, FileDirWindow ),
+ rec( 24, 16, Reserved16 ),
+ rec( 40, 6, ProDOSInfo ),
+ rec( 46, 4, ResourceForkSize, ENC_BIG_ENDIAN ),
+ rec( 50, (1,255), FileName ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9c03, 0xbf00])
+ # 2222/2400, 36/00
+ pkt = NCP(0x2400, "Get NCP Extension Information", 'extension')
+ pkt.Request(14, [
+ rec( 10, 4, NCPextensionNumber, ENC_LITTLE_ENDIAN ),
+ ])
+ pkt.Reply((16,270), [
+ rec( 8, 4, NCPextensionNumber ),
+ rec( 12, 1, NCPextensionMajorVersion ),
+ rec( 13, 1, NCPextensionMinorVersion ),
+ rec( 14, 1, NCPextensionRevisionNumber ),
+ rec( 15, (1, 255), NCPextensionName ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
+ # 2222/2401, 36/01
+ pkt = NCP(0x2401, "Get NCP Extension Maximum Data Size", 'extension')
+ pkt.Request(10)
+ pkt.Reply(10, [
+ rec( 8, 2, NCPdataSize ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
+ # 2222/2402, 36/02
+ pkt = NCP(0x2402, "Get NCP Extension Information by Name", 'extension')
+ pkt.Request((11, 265), [
+ rec( 10, (1,255), NCPextensionName, info_str=(NCPextensionName, "Get NCP Extension Information by Name: %s", ", %s") ),
+ ])
+ pkt.Reply((16,270), [
+ rec( 8, 4, NCPextensionNumber ),
+ rec( 12, 1, NCPextensionMajorVersion ),
+ rec( 13, 1, NCPextensionMinorVersion ),
+ rec( 14, 1, NCPextensionRevisionNumber ),
+ rec( 15, (1, 255), NCPextensionName ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
+ # 2222/2403, 36/03
+ pkt = NCP(0x2403, "Get Number of Registered NCP Extensions", 'extension')
+ pkt.Request(10)
+ pkt.Reply(12, [
+ rec( 8, 4, NumberOfNCPExtensions ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
+ # 2222/2404, 36/04
+ pkt = NCP(0x2404, "Get NCP Extension Registered Verbs List", 'extension')
+ pkt.Request(14, [
+ rec( 10, 4, StartingNumber ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 4, ReturnedListCount, var="x" ),
+ rec( 12, 4, nextStartingNumber ),
+ rec( 16, 4, NCPExtensionNumbers, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
+ # 2222/2405, 36/05
+ pkt = NCP(0x2405, "Return NCP Extension Information", 'extension')
+ pkt.Request(14, [
+ rec( 10, 4, NCPextensionNumber ),
+ ])
+ pkt.Reply((16,270), [
+ rec( 8, 4, NCPextensionNumber ),
+ rec( 12, 1, NCPextensionMajorVersion ),
+ rec( 13, 1, NCPextensionMinorVersion ),
+ rec( 14, 1, NCPextensionRevisionNumber ),
+ rec( 15, (1, 255), NCPextensionName ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
+ # 2222/2406, 36/06
+ pkt = NCP(0x2406, "Return NCP Extension Maximum Data Size", 'extension')
+ pkt.Request(10)
+ pkt.Reply(12, [
+ rec( 8, 4, NCPdataSize ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
+ # 2222/25, 37
+ pkt = NCP(0x25, "Execute NCP Extension", 'extension')
+ pkt.Request(11, [
+ rec( 7, 4, NCPextensionNumber ),
+ # The following value is Unicode
+ #rec[ 13, (1,255), RequestData ],
+ ])
+ pkt.Reply(8)
+ # The following value is Unicode
+ #[ 8, (1, 255), ReplyBuffer ],
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xf000, 0x9c00, 0xd504, 0xee00, 0xfe00, 0xff20])
+ # 2222/3B, 59
+ pkt = NCP(0x3B, "Commit File", 'file', has_length=0 )
+ pkt.Request(14, [
+ rec( 7, 1, Reserved ),
+ rec( 8, 6, FileHandle, info_str=(FileHandle, "Commit File - 0x%s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8800, 0x9804, 0xff00])
+ # 2222/3D, 61
+ pkt = NCP(0x3D, "Commit File", 'file', has_length=0 )
+ pkt.Request(14, [
+ rec( 7, 1, Reserved ),
+ rec( 8, 6, FileHandle, info_str=(FileHandle, "Commit File - 0x%s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8800, 0x9804, 0xff00])
+ # 2222/3E, 62
+ pkt = NCP(0x3E, "File Search Initialize", 'file', has_length=0 )
+ pkt.Request((9, 263), [
+ rec( 7, 1, DirHandle ),
+ rec( 8, (1,255), Path, info_str=(Path, "Initialize File Search: %s", ", %s") ),
+ ])
+ pkt.Reply(14, [
+ rec( 8, 1, VolumeNumber ),
+ rec( 9, 2, DirectoryID ),
+ rec( 11, 2, SequenceNumber, ENC_BIG_ENDIAN ),
+ rec( 13, 1, AccessRightsMask ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100,
+ 0xfd00, 0xff16])
+ # 2222/3F, 63
+ pkt = NCP(0x3F, "File Search Continue", 'file', has_length=0 )
+ pkt.Request((14, 268), [
+ rec( 7, 1, VolumeNumber ),
+ rec( 8, 2, DirectoryID ),
+ rec( 10, 2, SequenceNumber, ENC_BIG_ENDIAN ),
+ rec( 12, 1, SearchAttributes ),
+ rec( 13, (1,255), Path, info_str=(Path, "File Search Continue: %s", ", %s") ),
+ ])
+ pkt.Reply( NO_LENGTH_CHECK, [
+ #
+ # XXX - don't show this if we got back a non-zero
+ # completion code? For example, 255 means "No
+ # matching files or directories were found", so
+ # presumably it can't show you a matching file or
+ # directory instance - it appears to just leave crap
+ # there.
+ #
+ srec( DirectoryInstance, req_cond="ncp.sattr_sub==TRUE"),
+ srec( FileInstance, req_cond="ncp.sattr_sub!=TRUE"),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0xff16])
+ # 2222/40, 64
+ pkt = NCP(0x40, "Search for a File", 'file')
+ pkt.Request((12, 266), [
+ rec( 7, 2, SequenceNumber, ENC_BIG_ENDIAN ),
+ rec( 9, 1, DirHandle ),
+ rec( 10, 1, SearchAttributes ),
+ rec( 11, (1,255), FileName, info_str=(FileName, "Search for File: %s", ", %s") ),
+ ])
+ pkt.Reply(40, [
+ rec( 8, 2, SequenceNumber, ENC_BIG_ENDIAN ),
+ rec( 10, 2, Reserved2 ),
+ rec( 12, 14, FileName14 ),
+ rec( 26, 1, AttributesDef ),
+ rec( 27, 1, FileExecuteType ),
+ rec( 28, 4, FileSize ),
+ rec( 32, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 34, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
+ rec( 36, 2, ModifiedDate, ENC_BIG_ENDIAN ),
+ rec( 38, 2, ModifiedTime, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8900, 0x9600, 0x9804, 0x9b03,
+ 0x9c03, 0xa100, 0xfd00, 0xff16])
+ # 2222/41, 65
+ pkt = NCP(0x41, "Open File", 'file')
+ pkt.Request((10, 264), [
+ rec( 7, 1, DirHandle ),
+ rec( 8, 1, SearchAttributes ),
+ rec( 9, (1,255), FileName, info_str=(FileName, "Open File: %s", ", %s") ),
+ ])
+ pkt.Reply(44, [
+ rec( 8, 6, FileHandle ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 14, FileName14 ),
+ rec( 30, 1, AttributesDef ),
+ rec( 31, 1, FileExecuteType ),
+ rec( 32, 4, FileSize, ENC_BIG_ENDIAN ),
+ rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
+ rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ),
+ rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8200, 0x9400,
+ 0x9600, 0x9804, 0x9c03, 0xa100, 0xfd00,
+ 0xff16])
+ # 2222/42, 66
+ pkt = NCP(0x42, "Close File", 'file')
+ pkt.Request(14, [
+ rec( 7, 1, Reserved ),
+ rec( 8, 6, FileHandle, info_str=(FileHandle, "Close File - 0x%s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8800, 0xff1a])
+ pkt.MakeExpert("ncp42_request")
+ # 2222/43, 67
+ pkt = NCP(0x43, "Create File", 'file')
+ pkt.Request((10, 264), [
+ rec( 7, 1, DirHandle ),
+ rec( 8, 1, AttributesDef ),
+ rec( 9, (1,255), FileName, info_str=(FileName, "Create File: %s", ", %s") ),
+ ])
+ pkt.Reply(44, [
+ rec( 8, 6, FileHandle ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 14, FileName14 ),
+ rec( 30, 1, AttributesDef ),
+ rec( 31, 1, FileExecuteType ),
+ rec( 32, 4, FileSize, ENC_BIG_ENDIAN ),
+ rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
+ rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ),
+ rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9900, 0x9b03, 0x9c03, 0xfd00,
+ 0xff00])
+ # 2222/44, 68
+ pkt = NCP(0x44, "Erase File", 'file')
+ pkt.Request((10, 264), [
+ rec( 7, 1, DirHandle ),
+ rec( 8, 1, SearchAttributes ),
+ rec( 9, (1,255), FileName, info_str=(FileName, "Erase File: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8a00, 0x8d00, 0x8e00, 0x8f00,
+ 0x9001, 0x9600, 0x9804, 0x9b03, 0x9c03,
+ 0xa100, 0xfd00, 0xff00])
+ # 2222/45, 69
+ pkt = NCP(0x45, "Rename File", 'file')
+ pkt.Request((12, 520), [
+ rec( 7, 1, DirHandle ),
+ rec( 8, 1, SearchAttributes ),
+ rec( 9, (1,255), FileName, info_str=(FileName, "Rename File: %s", ", %s") ),
+ rec( -1, 1, TargetDirHandle ),
+ rec( -1, (1, 255), NewFileNameLen ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8701, 0x8b00, 0x8d00, 0x8e00,
+ 0x8f00, 0x9001, 0x9101, 0x9201, 0x9600,
+ 0x9804, 0x9a00, 0x9b03, 0x9c03, 0xa100,
+ 0xfd00, 0xff16])
+ # 2222/46, 70
+ pkt = NCP(0x46, "Set File Attributes", 'file')
+ pkt.Request((11, 265), [
+ rec( 7, 1, AttributesDef ),
+ rec( 8, 1, DirHandle ),
+ rec( 9, 1, SearchAttributes ),
+ rec( 10, (1,255), FileName, info_str=(FileName, "Set File Attributes: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8c00, 0x8d00, 0x8e00, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfd00,
+ 0xff16])
+ # 2222/47, 71
+ pkt = NCP(0x47, "Get Current Size of File", 'file')
+ pkt.Request(14, [
+ rec(7, 1, Reserved ),
+ rec( 8, 6, FileHandle, info_str=(FileHandle, "Get Current Size of File - 0x%s", ", %s") ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, FileSize, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8800])
+ # 2222/48, 72
+ pkt = NCP(0x48, "Read From A File", 'file')
+ pkt.Request(20, [
+ rec( 7, 1, Reserved ),
+ rec( 8, 6, FileHandle, info_str=(FileHandle, "Read From File - 0x%s", ", %s") ),
+ rec( 14, 4, FileOffset, ENC_BIG_ENDIAN ),
+ rec( 18, 2, MaxBytes, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(10, [
+ rec( 8, 2, NumBytes, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8300, 0x8800, 0x9300, 0xff1b])
+ # 2222/49, 73
+ pkt = NCP(0x49, "Write to a File", 'file')
+ pkt.Request(20, [
+ rec( 7, 1, Reserved ),
+ rec( 8, 6, FileHandle, info_str=(FileHandle, "Write to a File - 0x%s", ", %s") ),
+ rec( 14, 4, FileOffset, ENC_BIG_ENDIAN ),
+ rec( 18, 2, MaxBytes, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0104, 0x8300, 0x8800, 0x9400, 0x9500, 0xa201, 0xff1b])
+ # 2222/4A, 74
+ pkt = NCP(0x4A, "Copy from One File to Another", 'file')
+ pkt.Request(30, [
+ rec( 7, 1, Reserved ),
+ rec( 8, 6, FileHandle ),
+ rec( 14, 6, TargetFileHandle ),
+ rec( 20, 4, FileOffset, ENC_BIG_ENDIAN ),
+ rec( 24, 4, TargetFileOffset, ENC_BIG_ENDIAN ),
+ rec( 28, 2, BytesToCopy, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, BytesActuallyTransferred, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x0104, 0x8300, 0x8800, 0x9300, 0x9400,
+ 0x9500, 0x9600, 0xa201, 0xff1b])
+ # 2222/4B, 75
+ pkt = NCP(0x4B, "Set File Time Date Stamp", 'file')
+ pkt.Request(18, [
+ rec( 7, 1, Reserved ),
+ rec( 8, 6, FileHandle, info_str=(FileHandle, "Set Time and Date Stamp for File - 0x%s", ", %s") ),
+ rec( 14, 2, FileTime, ENC_BIG_ENDIAN ),
+ rec( 16, 2, FileDate, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8800, 0x9400, 0x9600, 0xfb08])
+ # 2222/4C, 76
+ pkt = NCP(0x4C, "Open File", 'file')
+ pkt.Request((11, 265), [
+ rec( 7, 1, DirHandle ),
+ rec( 8, 1, SearchAttributes ),
+ rec( 9, 1, AccessRightsMask ),
+ rec( 10, (1,255), FileName, info_str=(FileName, "Open File: %s", ", %s") ),
+ ])
+ pkt.Reply(44, [
+ rec( 8, 6, FileHandle ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 14, FileName14 ),
+ rec( 30, 1, AttributesDef ),
+ rec( 31, 1, FileExecuteType ),
+ rec( 32, 4, FileSize, ENC_BIG_ENDIAN ),
+ rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
+ rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ),
+ rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8200, 0x9400,
+ 0x9600, 0x9804, 0x9c03, 0xa100, 0xfd00,
+ 0xff16])
+ # 2222/4D, 77
+ pkt = NCP(0x4D, "Create File", 'file')
+ pkt.Request((10, 264), [
+ rec( 7, 1, DirHandle ),
+ rec( 8, 1, AttributesDef ),
+ rec( 9, (1,255), FileName, info_str=(FileName, "Create File: %s", ", %s") ),
+ ])
+ pkt.Reply(44, [
+ rec( 8, 6, FileHandle ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 14, FileName14 ),
+ rec( 30, 1, AttributesDef ),
+ rec( 31, 1, FileExecuteType ),
+ rec( 32, 4, FileSize, ENC_BIG_ENDIAN ),
+ rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
+ rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ),
+ rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9900, 0x9b03, 0x9c03, 0xfd00,
+ 0xff00])
+ # 2222/4F, 79
+ pkt = NCP(0x4F, "Set File Extended Attributes", 'file')
+ pkt.Request((11, 265), [
+ rec( 7, 1, AttributesDef ),
+ rec( 8, 1, DirHandle ),
+ rec( 9, 1, AccessRightsMask ),
+ rec( 10, (1,255), FileName, info_str=(FileName, "Set File Extended Attributes: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8c00, 0x8d00, 0x8e00, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfd00,
+ 0xff16])
+ # 2222/54, 84
+ pkt = NCP(0x54, "Open/Create File", 'file')
+ pkt.Request((12, 266), [
+ rec( 7, 1, DirHandle ),
+ rec( 8, 1, AttributesDef ),
+ rec( 9, 1, AccessRightsMask ),
+ rec( 10, 1, ActionFlag ),
+ rec( 11, (1,255), FileName, info_str=(FileName, "Open/Create File: %s", ", %s") ),
+ ])
+ pkt.Reply(44, [
+ rec( 8, 6, FileHandle ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 14, FileName14 ),
+ rec( 30, 1, AttributesDef ),
+ rec( 31, 1, FileExecuteType ),
+ rec( 32, 4, FileSize, ENC_BIG_ENDIAN ),
+ rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ),
+ rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
+ rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ),
+ rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16])
+ # 2222/55, 85
+ pkt = NCP(0x55, "Get Sparse File Data Block Bit Map", 'file', has_length=1)
+ pkt.Request(19, [
+ rec( 7, 2, SubFuncStrucLen, ENC_BIG_ENDIAN ),
+ rec( 9, 6, FileHandle, info_str=(FileHandle, "Get Sparse File Data Block Bitmap for File - 0x%s", ", %s") ),
+ rec( 15, 4, FileOffset ),
+ ])
+ pkt.Reply(528, [
+ rec( 8, 4, AllocationBlockSize ),
+ rec( 12, 4, Reserved4 ),
+ rec( 16, 512, BitMap ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8800])
+ # 2222/5601, 86/01
+ pkt = NCP(0x5601, "Close Extended Attribute Handle", 'extended', has_length=0 )
+ pkt.Request(14, [
+ rec( 8, 2, Reserved2 ),
+ rec( 10, 4, EAHandle ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xcf00, 0xd301])
+ # 2222/5602, 86/02
+ pkt = NCP(0x5602, "Write Extended Attribute", 'extended', has_length=0 )
+ pkt.Request((35,97), [
+ rec( 8, 2, EAFlags ),
+ rec( 10, 4, EAHandleOrNetWareHandleOrVolume, ENC_BIG_ENDIAN ),
+ rec( 14, 4, ReservedOrDirectoryNumber ),
+ rec( 18, 4, TtlWriteDataSize ),
+ rec( 22, 4, FileOffset ),
+ rec( 26, 4, EAAccessFlag ),
+ rec( 30, 2, EAValueLength, var='x' ),
+ rec( 32, (2,64), EAKey, info_str=(EAKey, "Write Extended Attribute: %s", ", %s") ),
+ rec( -1, 1, EAValueRep, repeat='x' ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 4, EAErrorCodes ),
+ rec( 12, 4, EABytesWritten ),
+ rec( 16, 4, NewEAHandle ),
+ ])
+ pkt.CompletionCodes([0x0000, 0xc800, 0xc900, 0xcb00, 0xce00, 0xcf00, 0xd101,
+ 0xd203, 0xd301, 0xd402, 0xda02, 0xdc01, 0xef00, 0xff00])
+ # 2222/5603, 86/03
+ pkt = NCP(0x5603, "Read Extended Attribute", 'extended', has_length=0 )
+ pkt.Request((28,538), [
+ rec( 8, 2, EAFlags ),
+ rec( 10, 4, EAHandleOrNetWareHandleOrVolume ),
+ rec( 14, 4, ReservedOrDirectoryNumber ),
+ rec( 18, 4, FileOffset ),
+ rec( 22, 4, InspectSize ),
+ rec( 26, (2,512), EAKey, info_str=(EAKey, "Read Extended Attribute: %s", ", %s") ),
+ ])
+ pkt.Reply((26,536), [
+ rec( 8, 4, EAErrorCodes ),
+ rec( 12, 4, TtlValuesLength ),
+ rec( 16, 4, NewEAHandle ),
+ rec( 20, 4, EAAccessFlag ),
+ rec( 24, (2,512), EAValue ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8800, 0x9c03, 0xc900, 0xce00, 0xcf00, 0xd101,
+ 0xd301, 0xd503])
+ # 2222/5604, 86/04
+ pkt = NCP(0x5604, "Enumerate Extended Attribute", 'extended', has_length=0 )
+ pkt.Request((26,536), [
+ rec( 8, 2, EAFlags ),
+ rec( 10, 4, EAHandleOrNetWareHandleOrVolume ),
+ rec( 14, 4, ReservedOrDirectoryNumber ),
+ rec( 18, 4, InspectSize ),
+ rec( 22, 2, SequenceNumber ),
+ rec( 24, (2,512), EAKey, info_str=(EAKey, "Enumerate Extended Attribute: %s", ", %s") ),
+ ])
+ pkt.Reply(28, [
+ rec( 8, 4, EAErrorCodes ),
+ rec( 12, 4, TtlEAs ),
+ rec( 16, 4, TtlEAsDataSize ),
+ rec( 20, 4, TtlEAsKeySize ),
+ rec( 24, 4, NewEAHandle ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8800, 0x8c01, 0xc800, 0xc900, 0xce00, 0xcf00, 0xd101,
+ 0xd301, 0xd503, 0xfb08, 0xff00])
+ # 2222/5605, 86/05
+ pkt = NCP(0x5605, "Duplicate Extended Attributes", 'extended', has_length=0 )
+ pkt.Request(28, [
+ rec( 8, 2, EAFlags ),
+ rec( 10, 2, DstEAFlags ),
+ rec( 12, 4, EAHandleOrNetWareHandleOrVolume ),
+ rec( 16, 4, ReservedOrDirectoryNumber ),
+ rec( 20, 4, EAHandleOrNetWareHandleOrVolume ),
+ rec( 24, 4, ReservedOrDirectoryNumber ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 4, EADuplicateCount ),
+ rec( 12, 4, EADataSizeDuplicated ),
+ rec( 16, 4, EAKeySizeDuplicated ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8800, 0xd101])
+ # 2222/5701, 87/01
+ pkt = NCP(0x5701, "Open/Create File or Subdirectory", 'file', has_length=0)
+ pkt.Request((30, 284), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, OpenCreateMode ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 4, AttributesDef32 ),
+ rec( 20, 2, DesiredAccessRights ),
+ rec( 22, 1, VolumeNumber ),
+ rec( 23, 4, DirectoryBase ),
+ rec( 27, 1, HandleFlag ),
+ rec( 28, 1, PathCount, var="x" ),
+ rec( 29, (1,255), Path, repeat="x", info_str=(Path, "Open or Create: %s", "/%s") ),
+ ])
+ pkt.Reply( NO_LENGTH_CHECK, [
+ rec( 8, 4, FileHandle ),
+ rec( 12, 1, OpenCreateAction ),
+ rec( 13, 1, Reserved ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
+ srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
+ rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
+ srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ),
+ srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
+ srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
+ srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
+ srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
+ srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
+ srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
+ srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
+ srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x0102, 0x7f00, 0x8001, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8900, 0x8d00, 0x8f00, 0x9001, 0x9400, 0x9600,
+ 0x9804, 0x9900, 0x9b03, 0x9c03, 0xa500, 0xa802, 0xa901, 0xbf00, 0xfd00, 0xff16])
+ pkt.MakeExpert("file_rights")
+ # 2222/5702, 87/02
+ pkt = NCP(0x5702, "Initialize Search", 'file', has_length=0)
+ pkt.Request( (18,272), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, DirectoryBase ),
+ rec( 15, 1, HandleFlag ),
+ rec( 16, 1, PathCount, var="x" ),
+ rec( 17, (1,255), Path, repeat="x", info_str=(Path, "Set Search Pointer to: %s", "/%s") ),
+ ])
+ pkt.Reply(17, [
+ rec( 8, 1, VolumeNumber ),
+ rec( 9, 4, DirectoryNumber ),
+ rec( 13, 4, DirectoryEntryNumber ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5703, 87/03
+ pkt = NCP(0x5703, "Search for File or Subdirectory", 'file', has_length=0)
+ pkt.Request((26, 280), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 9, SeachSequenceStruct ),
+ rec( 25, (1,255), SearchPattern, info_str=(SearchPattern, "Search for: %s", "/%s") ),
+ ])
+ pkt.Reply( NO_LENGTH_CHECK, [
+ rec( 8, 9, SeachSequenceStruct ),
+ rec( 17, 1, Reserved ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
+ srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
+ srec( DStreamActual, req_cond="ncp.ret_info_mask_actual == 1" ),
+ srec( DStreamLogical, req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
+ srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ),
+ srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
+ srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
+ srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
+ srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
+ srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
+ srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
+ srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
+ srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5704, 87/04
+ pkt = NCP(0x5704, "Rename Or Move a File or Subdirectory", 'file', has_length=0)
+ pkt.Request((28, 536), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, RenameFlag ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 1, VolumeNumber ),
+ rec( 13, 4, DirectoryBase ),
+ rec( 17, 1, HandleFlag ),
+ rec( 18, 1, PathCount, var="x" ),
+ rec( 19, 1, VolumeNumber ),
+ rec( 20, 4, DirectoryBase ),
+ rec( 24, 1, HandleFlag ),
+ rec( 25, 1, PathCount, var="y" ),
+ rec( 26, (1, 255), Path, repeat="x", info_str=(Path, "Rename or Move: %s", "/%s") ),
+ rec( -1, (1,255), DestPath, repeat="y" ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9100, 0x9200, 0x9600,
+ 0x9804, 0x9a00, 0x9b03, 0x9c03, 0x9e00, 0xa901, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5705, 87/05
+ pkt = NCP(0x5705, "Scan File or Subdirectory for Trustees", 'file', has_length=0)
+ pkt.Request((24, 278), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 4, SequenceNumber ),
+ rec( 16, 1, VolumeNumber ),
+ rec( 17, 4, DirectoryBase ),
+ rec( 21, 1, HandleFlag ),
+ rec( 22, 1, PathCount, var="x" ),
+ rec( 23, (1, 255), Path, repeat="x", info_str=(Path, "Scan Trustees for: %s", "/%s") ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 4, SequenceNumber ),
+ rec( 12, 2, ObjectIDCount, var="x" ),
+ rec( 14, 6, TrusteeStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c04, 0xa901, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5706, 87/06
+ pkt = NCP(0x5706, "Obtain File or SubDirectory Information", 'file', has_length=0)
+ pkt.Request((24,278), [
+ rec( 10, 1, SrcNameSpace ),
+ rec( 11, 1, DestNameSpace ),
+ rec( 12, 2, SearchAttributesLow ),
+ rec( 14, 2, ReturnInfoMask, ENC_LITTLE_ENDIAN ),
+ rec( 16, 2, ExtendedInfo ),
+ rec( 18, 1, VolumeNumber ),
+ rec( 19, 4, DirectoryBase ),
+ rec( 23, 1, HandleFlag ),
+ rec( 24, 1, PathCount, var="x" ),
+ rec( 25, (1,255), Path, repeat="x", info_str=(Path, "Obtain Info for: %s", "/%s")),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
+ srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
+ rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
+ srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ),
+ srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
+ srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
+ srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
+ srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
+ srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
+ srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
+ srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
+ srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
+ srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8700, 0x8900, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9802, 0x9b03, 0x9c03, 0xa802, 0xa901, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5707, 87/07
+ pkt = NCP(0x5707, "Modify File or Subdirectory DOS Information", 'file', has_length=0)
+ pkt.Request((62,316), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ModifyDOSInfoMask ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 2, AttributesDef16 ),
+ rec( 18, 1, FileMode ),
+ rec( 19, 1, FileExtendedAttributes ),
+ rec( 20, 2, CreationDate ),
+ rec( 22, 2, CreationTime ),
+ rec( 24, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 28, 2, ModifiedDate ),
+ rec( 30, 2, ModifiedTime ),
+ rec( 32, 4, ModifierID, ENC_BIG_ENDIAN ),
+ rec( 36, 2, ArchivedDate ),
+ rec( 38, 2, ArchivedTime ),
+ rec( 40, 4, ArchiverID, ENC_BIG_ENDIAN ),
+ rec( 44, 2, LastAccessedDate ),
+ rec( 46, 2, InheritedRightsMask ),
+ rec( 48, 2, InheritanceRevokeMask ),
+ rec( 50, 4, MaxSpace ),
+ rec( 54, 1, VolumeNumber ),
+ rec( 55, 4, DirectoryBase ),
+ rec( 59, 1, HandleFlag ),
+ rec( 60, 1, PathCount, var="x" ),
+ rec( 61, (1,255), Path, repeat="x", info_str=(Path, "Modify DOS Information for: %s", "/%s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8c01, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5708, 87/08
+ pkt = NCP(0x5708, "Delete a File or Subdirectory", 'file', has_length=0)
+ pkt.Request((20,274), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 1, VolumeNumber ),
+ rec( 13, 4, DirectoryBase ),
+ rec( 17, 1, HandleFlag ),
+ rec( 18, 1, PathCount, var="x" ),
+ rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Delete a File or Subdirectory: %s", "/%s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8900, 0x8a00, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5709, 87/09
+ pkt = NCP(0x5709, "Set Short Directory Handle", 'file', has_length=0)
+ pkt.Request((20,274), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 1, DestDirHandle ),
+ rec( 11, 1, Reserved ),
+ rec( 12, 1, VolumeNumber ),
+ rec( 13, 4, DirectoryBase ),
+ rec( 17, 1, HandleFlag ),
+ rec( 18, 1, PathCount, var="x" ),
+ rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Set Short Directory Handle to: %s", "/%s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16])
+ # 2222/570A, 87/10
+ pkt = NCP(0x570A, "Add Trustee Set to File or Subdirectory", 'file', has_length=0)
+ pkt.Request((31,285), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, AccessRightsMaskWord ),
+ rec( 14, 2, ObjectIDCount, var="y" ),
+ rec( 16, 1, VolumeNumber ),
+ rec( 17, 4, DirectoryBase ),
+ rec( 21, 1, HandleFlag ),
+ rec( 22, 1, PathCount, var="x" ),
+ rec( 23, (1,255), Path, repeat="x", info_str=(Path, "Add Trustee Set to: %s", "/%s") ),
+ rec( -1, 7, TrusteeStruct, repeat="y" ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8c01, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa802, 0xa901, 0xbf00, 0xfc01, 0xfd00, 0xff16])
+ # 2222/570B, 87/11
+ pkt = NCP(0x570B, "Delete Trustee Set from File or SubDirectory", 'file', has_length=0)
+ pkt.Request((27,281), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, ObjectIDCount, var="y" ),
+ rec( 12, 1, VolumeNumber ),
+ rec( 13, 4, DirectoryBase ),
+ rec( 17, 1, HandleFlag ),
+ rec( 18, 1, PathCount, var="x" ),
+ rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Delete Trustee Set from: %s", "/%s") ),
+ rec( -1, 7, TrusteeStruct, repeat="y" ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8c01, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/570C, 87/12
+ pkt = NCP(0x570C, "Allocate Short Directory Handle", 'file', has_length=0)
+ pkt.Request((20,274), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, AllocateMode ),
+ rec( 12, 1, VolumeNumber ),
+ rec( 13, 4, DirectoryBase ),
+ rec( 17, 1, HandleFlag ),
+ rec( 18, 1, PathCount, var="x" ),
+ rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Allocate Short Directory Handle to: %s", "/%s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ srec( ReplyLevel2Struct, req_cond="ncp.alloc_reply_lvl2 == TRUE" ),
+ srec( ReplyLevel1Struct, req_cond="ncp.alloc_reply_lvl2 == FALSE" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8900, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0x9d00, 0xa901, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5710, 87/16
+ pkt = NCP(0x5710, "Scan Salvageable Files", 'file', has_length=0)
+ pkt.Request((26,280), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 2, ReturnInfoMask ),
+ rec( 12, 2, ExtendedInfo ),
+ rec( 14, 4, SequenceNumber ),
+ rec( 18, 1, VolumeNumber ),
+ rec( 19, 4, DirectoryBase ),
+ rec( 23, 1, HandleFlag ),
+ rec( 24, 1, PathCount, var="x" ),
+ rec( 25, (1,255), Path, repeat="x", info_str=(Path, "Scan for Deleted Files in: %s", "/%s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 4, SequenceNumber ),
+ rec( 12, 2, DeletedTime ),
+ rec( 14, 2, DeletedDate ),
+ rec( 16, 4, DeletedID, ENC_BIG_ENDIAN ),
+ rec( 20, 4, VolumeID ),
+ rec( 24, 4, DirectoryBase ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5711, 87/17
+ pkt = NCP(0x5711, "Recover Salvageable File", 'file', has_length=0)
+ pkt.Request((23,277), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 4, SequenceNumber ),
+ rec( 14, 4, VolumeID ),
+ rec( 18, 4, DirectoryBase ),
+ rec( 22, (1,255), FileName, info_str=(FileName, "Recover Deleted File: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa802, 0xbf00, 0xfe02, 0xfd00, 0xff16])
+ # 2222/5712, 87/18
+ pkt = NCP(0x5712, "Purge Salvageable Files", 'file', has_length=0)
+ pkt.Request(22, [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 4, SequenceNumber ),
+ rec( 14, 4, VolumeID ),
+ rec( 18, 4, DirectoryBase ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x010a, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5713, 87/19
+ pkt = NCP(0x5713, "Get Name Space Information", 'file', has_length=0)
+ pkt.Request(18, [
+ rec( 8, 1, SrcNameSpace ),
+ rec( 9, 1, DestNameSpace ),
+ rec( 10, 1, Reserved ),
+ rec( 11, 1, VolumeNumber ),
+ rec( 12, 4, DirectoryBase ),
+ rec( 16, 2, NamesSpaceInfoMask ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ srec( FileNameStruct, req_cond="ncp.ns_info_mask_modify == TRUE" ),
+ srec( FileAttributesStruct, req_cond="ncp.ns_info_mask_fatt == TRUE" ),
+ srec( CreationDateStruct, req_cond="ncp.ns_info_mask_cdate == TRUE" ),
+ srec( CreationTimeStruct, req_cond="ncp.ns_info_mask_ctime == TRUE" ),
+ srec( OwnerIDStruct, req_cond="ncp.ns_info_mask_owner == TRUE" ),
+ srec( ArchiveDateStruct, req_cond="ncp.ns_info_mask_adate == TRUE" ),
+ srec( ArchiveTimeStruct, req_cond="ncp.ns_info_mask_atime == TRUE" ),
+ srec( ArchiveIdStruct, req_cond="ncp.ns_info_mask_aid == TRUE" ),
+ srec( UpdateDateStruct, req_cond="ncp.ns_info_mask_udate == TRUE" ),
+ srec( UpdateTimeStruct, req_cond="ncp.ns_info_mask_utime == TRUE" ),
+ srec( UpdateIDStruct, req_cond="ncp.ns_info_mask_uid == TRUE" ),
+ srec( LastAccessStruct, req_cond="ncp.ns_info_mask_acc_date == TRUE" ),
+ srec( RightsInfoStruct, req_cond="ncp.ns_info_mask_max_acc_mask == TRUE" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5714, 87/20
+ pkt = NCP(0x5714, "Search for File or Subdirectory Set", 'file', has_length=0)
+ pkt.Request((27, 27), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 2, ReturnInfoCount ),
+ rec( 18, 9, SeachSequenceStruct ),
+# rec( 27, (1,255), SearchPattern ),
+ ])
+# The reply packet is dissected in packet-ncp2222.inc
+ pkt.Reply(NO_LENGTH_CHECK, [
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5715, 87/21
+ pkt = NCP(0x5715, "Get Path String from Short Directory Handle", 'file', has_length=0)
+ pkt.Request(10, [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DirHandle ),
+ ])
+ pkt.Reply((9,263), [
+ rec( 8, (1,255), Path ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16])
+ # 2222/5716, 87/22
+ pkt = NCP(0x5716, "Generate Directory Base and Volume Number", 'file', has_length=0)
+ pkt.Request((20,274), [
+ rec( 8, 1, SrcNameSpace ),
+ rec( 9, 1, DestNameSpace ),
+ rec( 10, 2, dstNSIndicator ),
+ rec( 12, 1, VolumeNumber ),
+ rec( 13, 4, DirectoryBase ),
+ rec( 17, 1, HandleFlag ),
+ rec( 18, 1, PathCount, var="x" ),
+ rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Get Volume and Directory Base from: %s", "/%s") ),
+ ])
+ pkt.Reply(17, [
+ rec( 8, 4, DirectoryBase ),
+ rec( 12, 4, DOSDirectoryBase ),
+ rec( 16, 1, VolumeNumber ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5717, 87/23
+ pkt = NCP(0x5717, "Query Name Space Information Format", 'file', has_length=0)
+ pkt.Request(10, [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, VolumeNumber ),
+ ])
+ pkt.Reply(58, [
+ rec( 8, 4, FixedBitMask ),
+ rec( 12, 4, VariableBitMask ),
+ rec( 16, 4, HugeBitMask ),
+ rec( 20, 2, FixedBitsDefined ),
+ rec( 22, 2, VariableBitsDefined ),
+ rec( 24, 2, HugeBitsDefined ),
+ rec( 26, 32, FieldsLenTable ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5718, 87/24
+ pkt = NCP(0x5718, "Get Name Spaces Loaded List from Volume Number", 'file', has_length=0)
+ pkt.Request(11, [
+ rec( 8, 2, Reserved2 ),
+ rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Name Spaces Loaded List from Vol: %d", "/%d") ),
+ ])
+ pkt.Reply(11, [
+ rec( 8, 2, NumberOfNSLoaded, var="x" ),
+ rec( 10, 1, NameSpace, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5719, 87/25
+ pkt = NCP(0x5719, "Set Name Space Information", 'file', has_length=0)
+ pkt.Request(531, [
+ rec( 8, 1, SrcNameSpace ),
+ rec( 9, 1, DestNameSpace ),
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, DirectoryBase ),
+ rec( 15, 2, NamesSpaceInfoMask ),
+ rec( 17, 2, Reserved2 ),
+ rec( 19, 512, NSSpecificInfo ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001,
+ 0x9600, 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00,
+ 0xff16])
+ # 2222/571A, 87/26
+ pkt = NCP(0x571A, "Get Huge Name Space Information", 'file', has_length=0)
+ pkt.Request(34, [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, VolumeNumber ),
+ rec( 10, 4, DirectoryBase ),
+ rec( 14, 4, HugeBitMask ),
+ rec( 18, 16, HugeStateInfo ),
+ ])
+ pkt.Reply((25,279), [
+ rec( 8, 16, NextHugeStateInfo ),
+ rec( 24, (1,255), HugeData ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001,
+ 0x9600, 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00,
+ 0xff16])
+ # 2222/571B, 87/27
+ pkt = NCP(0x571B, "Set Huge Name Space Information", 'file', has_length=0)
+ pkt.Request((35,289), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, VolumeNumber ),
+ rec( 10, 4, DirectoryBase ),
+ rec( 14, 4, HugeBitMask ),
+ rec( 18, 16, HugeStateInfo ),
+ rec( 34, (1,255), HugeData ),
+ ])
+ pkt.Reply(28, [
+ rec( 8, 16, NextHugeStateInfo ),
+ rec( 24, 4, HugeDataUsed ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001,
+ 0x9600, 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00,
+ 0xff16])
+ # 2222/571C, 87/28
+ pkt = NCP(0x571C, "Get Full Path String", 'file', has_length=0)
+ pkt.Request((28,282), [
+ rec( 8, 1, SrcNameSpace ),
+ rec( 9, 1, DestNameSpace ),
+ rec( 10, 2, PathCookieFlags ),
+ rec( 12, 4, Cookie1 ),
+ rec( 16, 4, Cookie2 ),
+ rec( 20, 1, VolumeNumber ),
+ rec( 21, 4, DirectoryBase ),
+ rec( 25, 1, HandleFlag ),
+ rec( 26, 1, PathCount, var="x" ),
+ rec( 27, (1,255), Path, repeat="x", info_str=(Path, "Get Full Path from: %s", "/%s") ),
+ ])
+ pkt.Reply((23,277), [
+ rec( 8, 2, PathCookieFlags ),
+ rec( 10, 4, Cookie1 ),
+ rec( 14, 4, Cookie2 ),
+ rec( 18, 2, PathComponentSize ),
+ rec( 20, 2, PathComponentCount, var='x' ),
+ rec( 22, (1,255), Path, repeat='x' ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001,
+ 0x9600, 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00,
+ 0xff16])
+ # 2222/571D, 87/29
+ pkt = NCP(0x571D, "Get Effective Directory Rights", 'file', has_length=0)
+ pkt.Request((24, 278), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DestNameSpace ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 1, VolumeNumber ),
+ rec( 17, 4, DirectoryBase ),
+ rec( 21, 1, HandleFlag ),
+ rec( 22, 1, PathCount, var="x" ),
+ rec( 23, (1,255), Path, repeat="x", info_str=(Path, "Get Effective Rights for: %s", "/%s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 2, EffectiveRights ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/571E, 87/30
+ pkt = NCP(0x571E, "Open/Create File or Subdirectory", 'file', has_length=0)
+ pkt.Request((34, 288), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 1, OpenCreateMode ),
+ rec( 11, 1, Reserved ),
+ rec( 12, 2, SearchAttributesLow ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 2, ReturnInfoMask ),
+ rec( 18, 2, ExtendedInfo ),
+ rec( 20, 4, AttributesDef32 ),
+ rec( 24, 2, DesiredAccessRights ),
+ rec( 26, 1, VolumeNumber ),
+ rec( 27, 4, DirectoryBase ),
+ rec( 31, 1, HandleFlag ),
+ rec( 32, 1, PathCount, var="x" ),
+ rec( 33, (1,255), Path, repeat="x", info_str=(Path, "Open or Create File: %s", "/%s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec( 12, 1, OpenCreateAction ),
+ rec( 13, 1, Reserved ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbe00, 0xbf00, 0xfd00, 0xff16])
+ pkt.MakeExpert("file_rights")
+ # 2222/571F, 87/31
+ pkt = NCP(0x571F, "Get File Information", 'file', has_length=0)
+ pkt.Request(15, [
+ rec( 8, 6, FileHandle, info_str=(FileHandle, "Get File Information - 0x%s", ", %s") ),
+ rec( 14, 1, HandleInfoLevel ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 4, VolumeNumberLong ),
+ rec( 12, 4, DirectoryBase ),
+ srec(HandleInfoLevel0, req_cond="ncp.handle_info_level==0x00" ),
+ srec(HandleInfoLevel1, req_cond="ncp.handle_info_level==0x01" ),
+ srec(HandleInfoLevel2, req_cond="ncp.handle_info_level==0x02" ),
+ srec(HandleInfoLevel3, req_cond="ncp.handle_info_level==0x03" ),
+ srec(HandleInfoLevel4, req_cond="ncp.handle_info_level==0x04" ),
+ srec(HandleInfoLevel5, req_cond="ncp.handle_info_level==0x05" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5720, 87/32
+ pkt = NCP(0x5720, "Open/Create File or Subdirectory with Callback", 'file', has_length=0)
+ pkt.Request((30, 284), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, OpenCreateMode ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 4, AttributesDef32 ),
+ rec( 20, 2, DesiredAccessRights ),
+ rec( 22, 1, VolumeNumber ),
+ rec( 23, 4, DirectoryBase ),
+ rec( 27, 1, HandleFlag ),
+ rec( 28, 1, PathCount, var="x" ),
+ rec( 29, (1,255), Path, repeat="x", info_str=(Path, "Open or Create with Op-Lock: %s", "/%s") ),
+ ])
+ pkt.Reply( NO_LENGTH_CHECK, [
+ rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec( 12, 1, OpenCreateAction ),
+ rec( 13, 1, OCRetFlags ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
+ srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
+ rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
+ srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ),
+ srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
+ srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
+ srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
+ srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
+ srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
+ srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
+ srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
+ srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x0102, 0x7f00, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ pkt.MakeExpert("file_rights")
+ # 2222/5721, 87/33
+ pkt = NCP(0x5721, "Open/Create File or Subdirectory II with Callback", 'file', has_length=0)
+ pkt.Request((34, 288), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 1, OpenCreateMode ),
+ rec( 11, 1, Reserved ),
+ rec( 12, 2, SearchAttributesLow ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 2, ReturnInfoMask ),
+ rec( 18, 2, ExtendedInfo ),
+ rec( 20, 4, AttributesDef32 ),
+ rec( 24, 2, DesiredAccessRights ),
+ rec( 26, 1, VolumeNumber ),
+ rec( 27, 4, DirectoryBase ),
+ rec( 31, 1, HandleFlag ),
+ rec( 32, 1, PathCount, var="x" ),
+ rec( 33, (1,255), Path, repeat="x", info_str=(Path, "Open or Create II with Op-Lock: %s", "/%s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 4, FileHandle ),
+ rec( 12, 1, OpenCreateAction ),
+ rec( 13, 1, OCRetFlags ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
+ srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
+ rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
+ srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ),
+ srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
+ srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
+ srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
+ srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
+ srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
+ srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
+ srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
+ srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbe00, 0xbf00, 0xfd00, 0xff16])
+ pkt.MakeExpert("file_rights")
+ # 2222/5722, 87/34
+ pkt = NCP(0x5722, "Open CallBack Control (Op-Lock)", 'file', has_length=0)
+ pkt.Request(13, [
+ rec( 10, 4, CCFileHandle, ENC_BIG_ENDIAN ),
+ rec( 14, 1, CCFunction ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8800, 0xff16])
+ pkt.MakeExpert("ncp5722_request")
+ # 2222/5723, 87/35
+ pkt = NCP(0x5723, "Modify DOS Attributes on a File or Subdirectory", 'file', has_length=0)
+ pkt.Request((28, 282), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Flags ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 4, AttributesDef32 ),
+ rec( 20, 1, VolumeNumber ),
+ rec( 21, 4, DirectoryBase ),
+ rec( 25, 1, HandleFlag ),
+ rec( 26, 1, PathCount, var="x" ),
+ rec( 27, (1,255), Path, repeat="x", info_str=(Path, "Modify DOS Attributes for: %s", "/%s") ),
+ ])
+ pkt.Reply(24, [
+ rec( 8, 4, ItemsChecked ),
+ rec( 12, 4, ItemsChanged ),
+ rec( 16, 4, AttributeValidFlag ),
+ rec( 20, 4, AttributesDef32 ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5724, 87/36
+ pkt = NCP(0x5724, "Log File", 'sync', has_length=0)
+ pkt.Request((28, 282), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, Reserved2 ),
+ rec( 12, 1, LogFileFlagLow ),
+ rec( 13, 1, LogFileFlagHigh ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 4, WaitTime ),
+ rec( 20, 1, VolumeNumber ),
+ rec( 21, 4, DirectoryBase ),
+ rec( 25, 1, HandleFlag ),
+ rec( 26, 1, PathCount, var="x" ),
+ rec( 27, (1,255), Path, repeat="x", info_str=(Path, "Lock File: %s", "/%s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5725, 87/37
+ pkt = NCP(0x5725, "Release File", 'sync', has_length=0)
+ pkt.Request((20, 274), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, Reserved2 ),
+ rec( 12, 1, VolumeNumber ),
+ rec( 13, 4, DirectoryBase ),
+ rec( 17, 1, HandleFlag ),
+ rec( 18, 1, PathCount, var="x" ),
+ rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Release Lock on: %s", "/%s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5726, 87/38
+ pkt = NCP(0x5726, "Clear File", 'sync', has_length=0)
+ pkt.Request((20, 274), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, Reserved2 ),
+ rec( 12, 1, VolumeNumber ),
+ rec( 13, 4, DirectoryBase ),
+ rec( 17, 1, HandleFlag ),
+ rec( 18, 1, PathCount, var="x" ),
+ rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Clear File: %s", "/%s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5727, 87/39
+ pkt = NCP(0x5727, "Get Directory Disk Space Restriction", 'file', has_length=0)
+ pkt.Request((19, 273), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 2, Reserved2 ),
+ rec( 11, 1, VolumeNumber ),
+ rec( 12, 4, DirectoryBase ),
+ rec( 16, 1, HandleFlag ),
+ rec( 17, 1, PathCount, var="x" ),
+ rec( 18, (1,255), Path, repeat="x", info_str=(Path, "Get Disk Space Restriction for: %s", "/%s") ),
+ ])
+ pkt.Reply(18, [
+ rec( 8, 1, NumberOfEntries, var="x" ),
+ rec( 9, 9, SpaceStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00,
+ 0xff16])
+ # 2222/5728, 87/40
+ pkt = NCP(0x5728, "Search for File or Subdirectory Set (Extended Errors)", 'file', has_length=0)
+ pkt.Request((28, 282), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 2, ReturnInfoCount ),
+ rec( 18, 9, SeachSequenceStruct ),
+ rec( 27, (1,255), SearchPattern, info_str=(SearchPattern, "Search for: %s", ", %s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 9, SeachSequenceStruct ),
+ rec( 17, 1, MoreFlag ),
+ rec( 18, 2, InfoCount ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5729, 87/41
+ pkt = NCP(0x5729, "Scan Salvageable Files", 'file', has_length=0)
+ pkt.Request((24,278), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, CtrlFlags, ENC_LITTLE_ENDIAN ),
+ rec( 12, 4, SequenceNumber ),
+ rec( 16, 1, VolumeNumber ),
+ rec( 17, 4, DirectoryBase ),
+ rec( 21, 1, HandleFlag ),
+ rec( 22, 1, PathCount, var="x" ),
+ rec( 23, (1,255), Path, repeat="x", info_str=(Path, "Scan Deleted Files: %s", "/%s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 4, SequenceNumber ),
+ rec( 12, 4, DirectoryBase ),
+ rec( 16, 4, ScanItems, var="x" ),
+ srec(ScanInfoFileName, req_cond="ncp.ctrl_flags==0x0001", repeat="x" ),
+ srec(ScanInfoFileNoName, req_cond="ncp.ctrl_flags==0x0000", repeat="x" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/572A, 87/42
+ pkt = NCP(0x572A, "Purge Salvageable File List", 'file', has_length=0)
+ pkt.Request(28, [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, PurgeFlags ),
+ rec( 12, 4, VolumeNumberLong ),
+ rec( 16, 4, DirectoryBase ),
+ rec( 20, 4, PurgeCount, var="x" ),
+ rec( 24, 4, PurgeList, repeat="x" ),
+ ])
+ pkt.Reply(16, [
+ rec( 8, 4, PurgeCount, var="x" ),
+ rec( 12, 4, PurgeCcode, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/572B, 87/43
+ pkt = NCP(0x572B, "Revoke File Handle Rights", 'file', has_length=0)
+ pkt.Request(17, [
+ rec( 8, 3, Reserved3 ),
+ rec( 11, 1, RevQueryFlag ),
+ rec( 12, 4, FileHandle ),
+ rec( 16, 1, RemoveOpenRights ),
+ ])
+ pkt.Reply(13, [
+ rec( 8, 4, FileHandle ),
+ rec( 12, 1, OpenRights ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ # 2222/572C, 87/44
+ pkt = NCP(0x572C, "Update File Handle Rights", 'file', has_length=0)
+ pkt.Request(24, [
+ rec( 8, 2, Reserved2 ),
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 1, NameSpace ),
+ rec( 12, 4, DirectoryNumber ),
+ rec( 16, 2, AccessRightsMaskWord ),
+ rec( 18, 2, NewAccessRights ),
+ rec( 20, 4, FileHandle, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(16, [
+ rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec( 12, 4, EffectiveRights, ENC_LITTLE_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
+ pkt.MakeExpert("ncp572c")
+ # 2222/5740, 87/64
+ pkt = NCP(0x5740, "Read from File", 'file', has_length=0)
+ pkt.Request(22, [
+ rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec( 12, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
+ rec( 20, 2, NumBytes, ENC_BIG_ENDIAN ),
+])
+ pkt.Reply(10, [
+ rec( 8, 2, NumBytes, ENC_BIG_ENDIAN),
+])
+ pkt.CompletionCodes([0x0000, 0x8300, 0x8800, 0x9300, 0x9500, 0xa201, 0xfd00, 0xff1b])
+ # 2222/5741, 87/65
+ pkt = NCP(0x5741, "Write to File", 'file', has_length=0)
+ pkt.Request(22, [
+ rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec( 12, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
+ rec( 20, 2, NumBytes, ENC_BIG_ENDIAN ),
+])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8300, 0x8800, 0x9400, 0x9500, 0xa201, 0xfd00, 0xff1b])
+ # 2222/5742, 87/66
+ pkt = NCP(0x5742, "Get Current Size of File", 'file', has_length=0)
+ pkt.Request(12, [
+ rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
+])
+ pkt.Reply(16, [
+ rec( 8, 8, FileSize64bit),
+])
+ pkt.CompletionCodes([0x0000, 0x7f00, 0x8800, 0x9600, 0xfd02, 0xff01])
+ # 2222/5743, 87/67
+ pkt = NCP(0x5743, "Log Physical Record", 'file', has_length=0)
+ pkt.Request(36, [
+ rec( 8, 4, LockFlag, ENC_BIG_ENDIAN ),
+ rec(12, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec(16, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
+ rec(24, 8, Length64bit, ENC_BIG_ENDIAN ),
+ rec(32, 4, LockTimeout, ENC_BIG_ENDIAN),
+])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7f00, 0x8800, 0x9600, 0xfb08, 0xfd02, 0xff01])
+ # 2222/5744, 87/68
+ pkt = NCP(0x5744, "Release Physical Record", 'file', has_length=0)
+ pkt.Request(28, [
+ rec(8, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec(12, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
+ rec(20, 8, Length64bit, ENC_BIG_ENDIAN ),
+])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff1a])
+ # 2222/5745, 87/69
+ pkt = NCP(0x5745, "Clear Physical Record", 'file', has_length=0)
+ pkt.Request(28, [
+ rec(8, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec(12, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
+ rec(20, 8, Length64bit, ENC_BIG_ENDIAN ),
+])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff1a])
+ # 2222/5746, 87/70
+ pkt = NCP(0x5746, "Copy from One File to Another (64 Bit offset capable)", 'file', has_length=0)
+ pkt.Request(44, [
+ rec(8, 6, SourceFileHandle, ENC_BIG_ENDIAN ),
+ rec(14, 6, TargetFileHandle, ENC_BIG_ENDIAN ),
+ rec(20, 8, SourceFileOffset, ENC_BIG_ENDIAN ),
+ rec(28, 8, TargetFileOffset64bit, ENC_BIG_ENDIAN ),
+ rec(36, 8, BytesToCopy64bit, ENC_BIG_ENDIAN ),
+])
+ pkt.Reply(16, [
+ rec( 8, 8, BytesActuallyTransferred64bit, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x0104, 0x8301, 0x8800, 0x9300, 0x9400,
+ 0x9500, 0x9600, 0xa201])
+ # 2222/5747, 87/71
+ pkt = NCP(0x5747, "Get Sparse File Data Block Bit Map", 'file', has_length=0)
+ pkt.Request(23, [
+ rec(8, 6, SourceFileHandle, ENC_BIG_ENDIAN ),
+ rec(14, 8, SourceFileOffset, ENC_BIG_ENDIAN ),
+ rec(22, 1, ExtentListFormat, ENC_BIG_ENDIAN ),
+])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 1, ExtentListFormat ),
+ rec( 9, 1, RetExtentListCount, var="x" ),
+ rec( 10, 8, EndingOffset ),
+ srec(zFileMap_Allocation, req_cond="ncp.ext_lst_format==0", repeat="x" ),
+ srec(zFileMap_Logical, req_cond="ncp.ext_lst_format==1", repeat="x" ),
+ srec(zFileMap_Physical, req_cond="ncp.ext_lst_format==2", repeat="x" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8800, 0xff00])
+ # 2222/5748, 87/72
+ pkt = NCP(0x5748, "Read a File", 'file', has_length=0)
+ pkt.Request(24, [
+ rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec( 12, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
+ rec( 20, 4, NumBytesLong, ENC_BIG_ENDIAN ),
+])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 4, NumBytesLong, ENC_BIG_ENDIAN),
+ rec( 12, PROTO_LENGTH_UNKNOWN, Data64),
+ #decoded in packet-ncp2222.inc
+ # rec( NumBytesLong, 4, BytesActuallyTransferred64bit, ENC_BIG_ENDIAN),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8300, 0x8800, 0x9300, 0x9500, 0xa201, 0xfd00, 0xff1b])
+
+ # 2222/5749, 87/73
+ pkt = NCP(0x5749, "Write to a File", 'file', has_length=0)
+ pkt.Request(24, [
+ rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec( 12, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
+ rec( 20, 4, NumBytesLong, ENC_BIG_ENDIAN ),
+])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8300, 0x8800, 0x9400, 0x9500, 0xa201, 0xfd00, 0xff1b])
+
+ # 2222/5801, 8801
+ pkt = NCP(0x5801, "Query Volume Audit Status", "auditing", has_length=0)
+ pkt.Request(12, [
+ rec( 8, 4, ConnectionNumber ),
+ ])
+ pkt.Reply(40, [
+ rec(8, 32, NWAuditStatus ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5802, 8802
+ pkt = NCP(0x5802, "Add User Audit Property", "auditing", has_length=0)
+ pkt.Request(25, [
+ rec(8, 4, AuditIDType ),
+ rec(12, 4, AuditID ),
+ rec(16, 4, AuditHandle ),
+ rec(20, 4, ObjectID ),
+ rec(24, 1, AuditFlag ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5803, 8803
+ pkt = NCP(0x5803, "Add Auditor Access", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xde00, 0xfd00, 0xff16])
+ # 2222/5804, 8804
+ pkt = NCP(0x5804, "Change Auditor Volume Password", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5805, 8805
+ pkt = NCP(0x5805, "Check Auditor Access", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5806, 8806
+ pkt = NCP(0x5806, "Delete User Audit Property", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff21])
+ # 2222/5807, 8807
+ pkt = NCP(0x5807, "Disable Auditing On A Volume", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5808, 8808
+ pkt = NCP(0x5808, "Enable Auditing On A Volume", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xde00, 0xfd00, 0xff16])
+ # 2222/5809, 8809
+ pkt = NCP(0x5809, "Query User Being Audited", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/580A, 88,10
+ pkt = NCP(0x580A, "Read Audit Bit Map", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/580B, 88,11
+ pkt = NCP(0x580B, "Read Audit File Configuration Header", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/580D, 88,13
+ pkt = NCP(0x580D, "Remove Auditor Access", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/580E, 88,14
+ pkt = NCP(0x580E, "Reset Audit File", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+
+ # 2222/580F, 88,15
+ pkt = NCP(0x580F, "Auditing NCP", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfb00, 0xfd00, 0xff16])
+ # 2222/5810, 88,16
+ pkt = NCP(0x5810, "Write Audit Bit Map", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5811, 88,17
+ pkt = NCP(0x5811, "Write Audit File Configuration Header", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5812, 88,18
+ pkt = NCP(0x5812, "Change Auditor Volume Password2", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5813, 88,19
+ pkt = NCP(0x5813, "Return Audit Flags", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5814, 88,20
+ pkt = NCP(0x5814, "Close Old Audit File", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5816, 88,22
+ pkt = NCP(0x5816, "Check Level Two Access", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xde00, 0xfd00, 0xff16])
+ # 2222/5817, 88,23
+ pkt = NCP(0x5817, "Return Old Audit File List", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5818, 88,24
+ pkt = NCP(0x5818, "Init Audit File Reads", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5819, 88,25
+ pkt = NCP(0x5819, "Read Auditing File", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/581A, 88,26
+ pkt = NCP(0x581A, "Delete Old Audit File", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/581E, 88,30
+ pkt = NCP(0x581E, "Restart Volume auditing", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/581F, 88,31
+ pkt = NCP(0x581F, "Set Volume Password", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
+ # 2222/5901, 89,01
+ pkt = NCP(0x5901, "Open/Create File or Subdirectory", "enhanced", has_length=0)
+ pkt.Request((37,290), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, OpenCreateMode ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 4, AttributesDef32 ),
+ rec( 20, 2, DesiredAccessRights ),
+ rec( 22, 4, DirectoryBase ),
+ rec( 26, 1, VolumeNumber ),
+ rec( 27, 1, HandleFlag ),
+ rec( 28, 1, DataTypeFlag ),
+ rec( 29, 5, Reserved5 ),
+ rec( 34, 1, PathCount, var="x" ),
+ rec( 35, (2,255), Path16, repeat="x", info_str=(Path16, "Open or Create File or Subdirectory: %s", "/%s") ),
+ ])
+ pkt.Reply( NO_LENGTH_CHECK, [
+ rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec( 12, 1, OpenCreateAction ),
+ rec( 13, 1, Reserved ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
+ srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
+ rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
+ srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ),
+ srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
+ srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
+ srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
+ srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
+ srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
+ srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
+ srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
+ srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
+ srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x0102, 0x7f00, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9900, 0x9b03, 0x9c03, 0xa901, 0xa500, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ pkt.MakeExpert("file_rights")
+ # 2222/5902, 89/02
+ pkt = NCP(0x5902, "Initialize Search", 'enhanced', has_length=0)
+ pkt.Request( (25,278), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 4, DirectoryBase ),
+ rec( 14, 1, VolumeNumber ),
+ rec( 15, 1, HandleFlag ),
+ rec( 16, 1, DataTypeFlag ),
+ rec( 17, 5, Reserved5 ),
+ rec( 22, 1, PathCount, var="x" ),
+ rec( 23, (2,255), Path16, repeat="x", info_str=(Path16, "Set Search Pointer to: %s", "/%s") ),
+ ])
+ pkt.Reply(17, [
+ rec( 8, 1, VolumeNumber ),
+ rec( 9, 4, DirectoryNumber ),
+ rec( 13, 4, DirectoryEntryNumber ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5903, 89/03
+ pkt = NCP(0x5903, "Search for File or Subdirectory", 'enhanced', has_length=0)
+ pkt.Request(26, [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 9, SeachSequenceStruct ),
+ rec( 25, 1, DataTypeFlag ),
+ # next field is dissected in packet-ncp2222.inc
+ #rec( 26, (2,255), SearchPattern16, info_str=(SearchPattern16, "Search for: %s", "/%s") ),
+ ])
+ pkt.Reply( NO_LENGTH_CHECK, [
+ rec( 8, 9, SeachSequenceStruct ),
+ rec( 17, 1, Reserved ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
+ srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
+ rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
+ srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ),
+ srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
+ srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
+ srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
+ srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
+ srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
+ srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
+ srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
+ srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
+ srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5904, 89/04
+ pkt = NCP(0x5904, "Rename Or Move a File or Subdirectory", 'enhanced', has_length=0)
+ pkt.Request((42, 548), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, RenameFlag ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 12, SrcEnhNWHandlePathS1 ),
+ rec( 24, 1, PathCount, var="x" ),
+ rec( 25, 12, DstEnhNWHandlePathS1 ),
+ rec( 37, 1, PathCount, var="y" ),
+ rec( 38, (2, 255), Path16, repeat="x", info_str=(Path16, "Rename or Move: %s", "/%s") ),
+ rec( -1, (2,255), DestPath16, repeat="y" ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9200, 0x9600,
+ 0x9804, 0x9a00, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5905, 89/05
+ pkt = NCP(0x5905, "Scan File or Subdirectory for Trustees", 'enhanced', has_length=0)
+ pkt.Request((31, 284), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, MaxReplyObjectIDCount ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 4, SequenceNumber ),
+ rec( 16, 4, DirectoryBase ),
+ rec( 20, 1, VolumeNumber ),
+ rec( 21, 1, HandleFlag ),
+ rec( 22, 1, DataTypeFlag ),
+ rec( 23, 5, Reserved5 ),
+ rec( 28, 1, PathCount, var="x" ),
+ rec( 29, (2, 255), Path16, repeat="x", info_str=(Path16, "Scan Trustees for: %s", "/%s") ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 4, SequenceNumber ),
+ rec( 12, 2, ObjectIDCount, var="x" ),
+ rec( 14, 6, TrusteeStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5906, 89/06
+ pkt = NCP(0x5906, "Obtain File or SubDirectory Information", 'enhanced', has_length=0)
+ pkt.Request((22), [
+ rec( 8, 1, SrcNameSpace ),
+ rec( 9, 1, DestNameSpace ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask, ENC_LITTLE_ENDIAN ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 4, DirectoryBase ),
+ rec( 20, 1, VolumeNumber ),
+ rec( 21, 1, HandleFlag ),
+ #
+ # Move to packet-ncp2222.inc
+ # The datatype flag indicates if the path is represented as ASCII or UTF8
+ # ASCII has a 1 byte count field whereas UTF8 has a two byte count field.
+ #
+ #rec( 22, 1, DataTypeFlag ),
+ #rec( 23, 5, Reserved5 ),
+ #rec( 28, 1, PathCount, var="x" ),
+ #rec( 29, (2,255), Path16, repeat="x", info_str=(Path16, "Obtain Info for: %s", "/%s")),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
+ srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
+ rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
+ srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
+ srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
+ srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
+ srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
+ srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
+ srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
+ srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
+ srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ),
+ srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
+ srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8700, 0x8900, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa802, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5907, 89/07
+ pkt = NCP(0x5907, "Modify File or Subdirectory DOS Information", 'enhanced', has_length=0)
+ pkt.Request((69,322), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ModifyDOSInfoMask ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 2, AttributesDef16 ),
+ rec( 18, 1, FileMode ),
+ rec( 19, 1, FileExtendedAttributes ),
+ rec( 20, 2, CreationDate ),
+ rec( 22, 2, CreationTime ),
+ rec( 24, 4, CreatorID, ENC_BIG_ENDIAN ),
+ rec( 28, 2, ModifiedDate ),
+ rec( 30, 2, ModifiedTime ),
+ rec( 32, 4, ModifierID, ENC_BIG_ENDIAN ),
+ rec( 36, 2, ArchivedDate ),
+ rec( 38, 2, ArchivedTime ),
+ rec( 40, 4, ArchiverID, ENC_BIG_ENDIAN ),
+ rec( 44, 2, LastAccessedDate ),
+ rec( 46, 2, InheritedRightsMask ),
+ rec( 48, 2, InheritanceRevokeMask ),
+ rec( 50, 4, MaxSpace ),
+ rec( 54, 4, DirectoryBase ),
+ rec( 58, 1, VolumeNumber ),
+ rec( 59, 1, HandleFlag ),
+ rec( 60, 1, DataTypeFlag ),
+ rec( 61, 5, Reserved5 ),
+ rec( 66, 1, PathCount, var="x" ),
+ rec( 67, (2,255), Path16, repeat="x", info_str=(Path16, "Modify DOS Information for: %s", "/%s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x7902, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8c01, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5908, 89/08
+ pkt = NCP(0x5908, "Delete a File or Subdirectory", 'enhanced', has_length=0)
+ pkt.Request((27,280), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 4, DirectoryBase ),
+ rec( 16, 1, VolumeNumber ),
+ rec( 17, 1, HandleFlag ),
+ rec( 18, 1, DataTypeFlag ),
+ rec( 19, 5, Reserved5 ),
+ rec( 24, 1, PathCount, var="x" ),
+ rec( 25, (2,255), Path16, repeat="x", info_str=(Path16, "Delete a File or Subdirectory: %s", "/%s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8900, 0x8a00, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5909, 89/09
+ pkt = NCP(0x5909, "Set Short Directory Handle", 'enhanced', has_length=0)
+ pkt.Request((27,280), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 1, DestDirHandle ),
+ rec( 11, 1, Reserved ),
+ rec( 12, 4, DirectoryBase ),
+ rec( 16, 1, VolumeNumber ),
+ rec( 17, 1, HandleFlag ),
+ rec( 18, 1, DataTypeFlag ),
+ rec( 19, 5, Reserved5 ),
+ rec( 24, 1, PathCount, var="x" ),
+ rec( 25, (2,255), Path16, repeat="x", info_str=(Path16, "Set Short Directory Handle to: %s", "/%s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/590A, 89/10
+ pkt = NCP(0x590A, "Add Trustee Set to File or Subdirectory", 'enhanced', has_length=0)
+ pkt.Request((37,290), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, AccessRightsMaskWord ),
+ rec( 14, 2, ObjectIDCount, var="y" ),
+ rec( -1, 6, TrusteeStruct, repeat="y" ),
+ rec( -1, 4, DirectoryBase ),
+ rec( -1, 1, VolumeNumber ),
+ rec( -1, 1, HandleFlag ),
+ rec( -1, 1, DataTypeFlag ),
+ rec( -1, 5, Reserved5 ),
+ rec( -1, 1, PathCount, var="x" ),
+ rec( -1, (2,255), Path16, repeat="x", info_str=(Path16, "Add Trustee Set to: %s", "/%s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8c01, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfc01, 0xfd00, 0xff16])
+ # 2222/590B, 89/11
+ pkt = NCP(0x590B, "Delete Trustee Set from File or SubDirectory", 'enhanced', has_length=0)
+ pkt.Request((34,287), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 2, ObjectIDCount, var="y" ),
+ rec( 12, 7, TrusteeStruct, repeat="y" ),
+ rec( 19, 4, DirectoryBase ),
+ rec( 23, 1, VolumeNumber ),
+ rec( 24, 1, HandleFlag ),
+ rec( 25, 1, DataTypeFlag ),
+ rec( 26, 5, Reserved5 ),
+ rec( 31, 1, PathCount, var="x" ),
+ rec( 32, (2,255), Path16, repeat="x", info_str=(Path16, "Delete Trustee Set from: %s", "/%s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8c01, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/590C, 89/12
+ pkt = NCP(0x590C, "Allocate Short Directory Handle", 'enhanced', has_length=0)
+ pkt.Request((27,280), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DestNameSpace ),
+ rec( 10, 2, AllocateMode ),
+ rec( 12, 4, DirectoryBase ),
+ rec( 16, 1, VolumeNumber ),
+ rec( 17, 1, HandleFlag ),
+ rec( 18, 1, DataTypeFlag ),
+ rec( 19, 5, Reserved5 ),
+ rec( 24, 1, PathCount, var="x" ),
+ rec( 25, (2,255), Path16, repeat="x", info_str=(Path16, "Allocate Short Directory Handle to: %s", "/%s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ srec( ReplyLevel2Struct, req_cond="ncp.alloc_reply_lvl2 == TRUE" ),
+ srec( ReplyLevel1Struct, req_cond="ncp.alloc_reply_lvl2 == FALSE" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+# 2222/5910, 89/16
+ pkt = NCP(0x5910, "Scan Salvageable Files", 'enhanced', has_length=0)
+ pkt.Request((33,286), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 2, ReturnInfoMask ),
+ rec( 12, 2, ExtendedInfo ),
+ rec( 14, 4, SequenceNumber ),
+ rec( 18, 4, DirectoryBase ),
+ rec( 22, 1, VolumeNumber ),
+ rec( 23, 1, HandleFlag ),
+ rec( 24, 1, DataTypeFlag ),
+ rec( 25, 5, Reserved5 ),
+ rec( 30, 1, PathCount, var="x" ),
+ rec( 31, (2,255), Path16, repeat="x", info_str=(Path16, "Scan for Deleted Files in: %s", "/%s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 4, SequenceNumber ),
+ rec( 12, 2, DeletedTime ),
+ rec( 14, 2, DeletedDate ),
+ rec( 16, 4, DeletedID, ENC_BIG_ENDIAN ),
+ rec( 20, 4, VolumeID ),
+ rec( 24, 4, DirectoryBase ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( ReferenceIDStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_id == 1)" ),
+ srec( NSAttributeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns_attr == 1)" ),
+ rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
+ srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
+ srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
+ srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
+ srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
+ srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
+ srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
+ srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
+ srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ),
+ srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
+ srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5911, 89/17
+ pkt = NCP(0x5911, "Recover Salvageable File", 'enhanced', has_length=0)
+ pkt.Request((24,278), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 4, SequenceNumber ),
+ rec( 14, 4, VolumeID ),
+ rec( 18, 4, DirectoryBase ),
+ rec( 22, 1, DataTypeFlag ),
+ rec( 23, (1,255), FileName16, info_str=(FileName16, "Recover Deleted File: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5913, 89/19
+ pkt = NCP(0x5913, "Get Name Space Information", 'enhanced', has_length=0)
+ pkt.Request(18, [
+ rec( 8, 1, SrcNameSpace ),
+ rec( 9, 1, DestNameSpace ),
+ rec( 10, 1, DataTypeFlag ),
+ rec( 11, 1, VolumeNumber ),
+ rec( 12, 4, DirectoryBase ),
+ rec( 16, 2, NamesSpaceInfoMask ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ srec( FileName16Struct, req_cond="ncp.ns_info_mask_modify == TRUE" ),
+ srec( FileAttributesStruct, req_cond="ncp.ns_info_mask_fatt == TRUE" ),
+ srec( CreationDateStruct, req_cond="ncp.ns_info_mask_cdate == TRUE" ),
+ srec( CreationTimeStruct, req_cond="ncp.ns_info_mask_ctime == TRUE" ),
+ srec( OwnerIDStruct, req_cond="ncp.ns_info_mask_owner == TRUE" ),
+ srec( ArchiveDateStruct, req_cond="ncp.ns_info_mask_adate == TRUE" ),
+ srec( ArchiveTimeStruct, req_cond="ncp.ns_info_mask_atime == TRUE" ),
+ srec( ArchiveIdStruct, req_cond="ncp.ns_info_mask_aid == TRUE" ),
+ srec( UpdateDateStruct, req_cond="ncp.ns_info_mask_udate == TRUE" ),
+ srec( UpdateTimeStruct, req_cond="ncp.ns_info_mask_utime == TRUE" ),
+ srec( UpdateIDStruct, req_cond="ncp.ns_info_mask_uid == TRUE" ),
+ srec( LastAccessStruct, req_cond="ncp.ns_info_mask_acc_date == TRUE" ),
+ srec( RightsInfoStruct, req_cond="ncp.ns_info_mask_max_acc_mask == TRUE" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5914, 89/20
+ pkt = NCP(0x5914, "Search for File or Subdirectory Set", 'enhanced', has_length=0)
+ pkt.Request((28, 28), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 2, ReturnInfoCount ),
+ rec( 18, 9, SeachSequenceStruct ),
+ rec( 27, 1, DataTypeFlag ),
+ # next field is dissected in packet-ncp2222.inc
+ #rec( 28, (2,255), SearchPattern16 ),
+ ])
+# The reply packet is dissected in packet-ncp2222.inc
+ pkt.Reply(NO_LENGTH_CHECK, [
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5916, 89/22
+ pkt = NCP(0x5916, "Generate Directory Base and Volume Number", 'enhanced', has_length=0)
+ pkt.Request((27,280), [
+ rec( 8, 1, SrcNameSpace ),
+ rec( 9, 1, DestNameSpace ),
+ rec( 10, 2, dstNSIndicator ),
+ rec( 12, 4, DirectoryBase ),
+ rec( 16, 1, VolumeNumber ),
+ rec( 17, 1, HandleFlag ),
+ rec( 18, 1, DataTypeFlag ),
+ rec( 19, 5, Reserved5 ),
+ rec( 24, 1, PathCount, var="x" ),
+ rec( 25, (2,255), Path16, repeat="x", info_str=(Path16, "Get Volume and Directory Base from: %s", "/%s") ),
+ ])
+ pkt.Reply(17, [
+ rec( 8, 4, DirectoryBase ),
+ rec( 12, 4, DOSDirectoryBase ),
+ rec( 16, 1, VolumeNumber ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5919, 89/25
+ pkt = NCP(0x5919, "Set Name Space Information", 'enhanced', has_length=0)
+ pkt.Request(530, [
+ rec( 8, 1, SrcNameSpace ),
+ rec( 9, 1, DestNameSpace ),
+ rec( 10, 1, VolumeNumber ),
+ rec( 11, 4, DirectoryBase ),
+ rec( 15, 2, NamesSpaceInfoMask ),
+ rec( 17, 1, DataTypeFlag ),
+ rec( 18, 512, NSSpecificInfo ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001,
+ 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00,
+ 0xff16])
+ # 2222/591C, 89/28
+ pkt = NCP(0x591C, "Get Full Path String", 'enhanced', has_length=0)
+ pkt.Request((35,288), [
+ rec( 8, 1, SrcNameSpace ),
+ rec( 9, 1, DestNameSpace ),
+ rec( 10, 2, PathCookieFlags ),
+ rec( 12, 4, Cookie1 ),
+ rec( 16, 4, Cookie2 ),
+ rec( 20, 4, DirectoryBase ),
+ rec( 24, 1, VolumeNumber ),
+ rec( 25, 1, HandleFlag ),
+ rec( 26, 1, DataTypeFlag ),
+ rec( 27, 5, Reserved5 ),
+ rec( 32, 1, PathCount, var="x" ),
+ rec( 33, (2,255), Path16, repeat="x", info_str=(Path16, "Get Full Path from: %s", "/%s") ),
+ ])
+ pkt.Reply((24,277), [
+ rec( 8, 2, PathCookieFlags ),
+ rec( 10, 4, Cookie1 ),
+ rec( 14, 4, Cookie2 ),
+ rec( 18, 2, PathComponentSize ),
+ rec( 20, 2, PathComponentCount, var='x' ),
+ rec( 22, (2,255), Path16, repeat='x' ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001,
+ 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00,
+ 0xff16])
+ # 2222/591D, 89/29
+ pkt = NCP(0x591D, "Get Effective Directory Rights", 'enhanced', has_length=0)
+ pkt.Request((31, 284), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DestNameSpace ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 4, DirectoryBase ),
+ rec( 20, 1, VolumeNumber ),
+ rec( 21, 1, HandleFlag ),
+ rec( 22, 1, DataTypeFlag ),
+ rec( 23, 5, Reserved5 ),
+ rec( 28, 1, PathCount, var="x" ),
+ rec( 29, (2,255), Path16, repeat="x", info_str=(Path16, "Get Effective Rights for: %s", "/%s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 2, EffectiveRights, ENC_LITTLE_ENDIAN ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( FileSize64bitStruct, req_cond="(ncp.ext_info_64_bit_fs == 1) && (ncp.ret_info_mask_fname == 1)" ),
+ srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/591E, 89/30
+ pkt = NCP(0x591E, "Open/Create File or Subdirectory", 'enhanced', has_length=0)
+ pkt.Request((41, 294), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 1, OpenCreateMode ),
+ rec( 11, 1, Reserved ),
+ rec( 12, 2, SearchAttributesLow ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 2, ReturnInfoMask ),
+ rec( 18, 2, ExtendedInfo ),
+ rec( 20, 4, AttributesDef32 ),
+ rec( 24, 2, DesiredAccessRights ),
+ rec( 26, 4, DirectoryBase ),
+ rec( 30, 1, VolumeNumber ),
+ rec( 31, 1, HandleFlag ),
+ rec( 32, 1, DataTypeFlag ),
+ rec( 33, 5, Reserved5 ),
+ rec( 38, 1, PathCount, var="x" ),
+ rec( 39, (2,255), Path16, repeat="x", info_str=(Path16, "Open or Create File: %s", "/%s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec( 12, 1, OpenCreateAction ),
+ rec( 13, 1, Reserved ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( FileSize64bitStruct, req_cond="(ncp.ext_info_64_bit_fs == 1) && (ncp.ret_info_mask_fname == 1)" ),
+ srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ pkt.MakeExpert("file_rights")
+ # 2222/5920, 89/32
+ pkt = NCP(0x5920, "Open/Create File or Subdirectory with Callback", 'enhanced', has_length=0)
+ pkt.Request((37, 290), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, OpenCreateMode ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 4, AttributesDef32 ),
+ rec( 20, 2, DesiredAccessRights ),
+ rec( 22, 4, DirectoryBase ),
+ rec( 26, 1, VolumeNumber ),
+ rec( 27, 1, HandleFlag ),
+ rec( 28, 1, DataTypeFlag ),
+ rec( 29, 5, Reserved5 ),
+ rec( 34, 1, PathCount, var="x" ),
+ rec( 35, (2,255), Path16, repeat="x", info_str=(Path16, "Open or Create with Op-Lock: %s", "/%s") ),
+ ])
+ pkt.Reply( NO_LENGTH_CHECK, [
+ rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
+ rec( 12, 1, OpenCreateAction ),
+ rec( 13, 1, OCRetFlags ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
+ srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
+ rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
+ srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ),
+ srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
+ srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
+ srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
+ srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
+ srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
+ srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
+ srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
+ srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
+ srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x0102, 0x7f00, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9400, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ pkt.MakeExpert("file_rights")
+ # 2222/5921, 89/33
+ pkt = NCP(0x5921, "Open/Create File or Subdirectory II with Callback", 'enhanced', has_length=0)
+ pkt.Request((41, 294), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 1, OpenCreateMode ),
+ rec( 11, 1, Reserved ),
+ rec( 12, 2, SearchAttributesLow ),
+ rec( 14, 2, Reserved2 ),
+ rec( 16, 2, ReturnInfoMask ),
+ rec( 18, 2, ExtendedInfo ),
+ rec( 20, 4, AttributesDef32 ),
+ rec( 24, 2, DesiredAccessRights ),
+ rec( 26, 4, DirectoryBase ),
+ rec( 30, 1, VolumeNumber ),
+ rec( 31, 1, HandleFlag ),
+ rec( 32, 1, DataTypeFlag ),
+ rec( 33, 5, Reserved5 ),
+ rec( 38, 1, PathCount, var="x" ),
+ rec( 39, (2,255), Path16, repeat="x", info_str=(Path16, "Open or Create II with Op-Lock: %s", "/%s") ),
+ ])
+ pkt.Reply( NO_LENGTH_CHECK, [
+ rec( 8, 4, FileHandle ),
+ rec( 12, 1, OpenCreateAction ),
+ rec( 13, 1, OCRetFlags ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
+ srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
+ rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
+ rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
+ srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
+ srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ),
+ srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
+ srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
+ srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
+ srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
+ srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
+ srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
+ srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
+ srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
+ srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ pkt.MakeExpert("file_rights")
+ # 2222/5923, 89/35
+ pkt = NCP(0x5923, "Modify DOS Attributes on a File or Subdirectory", 'enhanced', has_length=0)
+ pkt.Request((35, 288), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Flags ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 4, AttributesDef32 ),
+ rec( 20, 4, DirectoryBase ),
+ rec( 24, 1, VolumeNumber ),
+ rec( 25, 1, HandleFlag ),
+ rec( 26, 1, DataTypeFlag ),
+ rec( 27, 5, Reserved5 ),
+ rec( 32, 1, PathCount, var="x" ),
+ rec( 33, (2,255), Path16, repeat="x", info_str=(Path16, "Modify DOS Attributes for: %s", "/%s") ),
+ ])
+ pkt.Reply(24, [
+ rec( 8, 4, ItemsChecked ),
+ rec( 12, 4, ItemsChanged ),
+ rec( 16, 4, AttributeValidFlag ),
+ rec( 20, 4, AttributesDef32 ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5927, 89/39
+ pkt = NCP(0x5927, "Get Directory Disk Space Restriction", 'enhanced', has_length=0)
+ pkt.Request((26, 279), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 2, Reserved2 ),
+ rec( 11, 4, DirectoryBase ),
+ rec( 15, 1, VolumeNumber ),
+ rec( 16, 1, HandleFlag ),
+ rec( 17, 1, DataTypeFlag ),
+ rec( 18, 5, Reserved5 ),
+ rec( 23, 1, PathCount, var="x" ),
+ rec( 24, (2,255), Path16, repeat="x", info_str=(Path16, "Get Disk Space Restriction for: %s", "/%s") ),
+ ])
+ pkt.Reply(18, [
+ rec( 8, 1, NumberOfEntries, var="x" ),
+ rec( 9, 9, SpaceStruct, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00,
+ 0xff16])
+ # 2222/5928, 89/40
+ pkt = NCP(0x5928, "Search for File or Subdirectory Set (Extended Errors)", 'enhanced', has_length=0)
+ pkt.Request((30, 283), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, DataStream ),
+ rec( 10, 2, SearchAttributesLow ),
+ rec( 12, 2, ReturnInfoMask ),
+ rec( 14, 2, ExtendedInfo ),
+ rec( 16, 2, ReturnInfoCount ),
+ rec( 18, 9, SeachSequenceStruct ),
+ rec( 27, 1, DataTypeFlag ),
+ rec( 28, (2,255), SearchPattern16, info_str=(SearchPattern16, "Search for: %s", ", %s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( 8, 9, SeachSequenceStruct ),
+ rec( 17, 1, MoreFlag ),
+ rec( 18, 2, InfoCount ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
+ srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
+ srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
+ srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
+ srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
+ srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
+ srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
+ srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
+ srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
+ srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
+ srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
+ srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
+ srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
+ srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
+ srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
+ srec( FileSize64bitStruct, req_cond="(ncp.ext_info_64_bit_fs == 1) && (ncp.ret_info_mask_fname == 1)" ),
+ srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5929, 89/41
+ pkt = NCP(0x5929, "Get Directory Disk Space Restriction 64 Bit Aware", 'enhanced', has_length=0)
+ pkt.Request((26, 279), [
+ rec( 8, 1, NameSpace ),
+ rec( 9, 1, Reserved ),
+ rec( 10, 1, InfoLevelNumber),
+ rec( 11, 4, DirectoryBase ),
+ rec( 15, 1, VolumeNumber ),
+ rec( 16, 1, HandleFlag ),
+ rec( 17, 1, DataTypeFlag ),
+ rec( 18, 5, Reserved5 ),
+ rec( 23, 1, PathCount, var="x" ),
+ rec( 24, (2,255), Path16, repeat="x", info_str=(Path16, "Get Disk Space Restriction for: %s", "/%s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec( -1, 8, MaxSpace64, req_cond = "(ncp.info_level_num == 0)" ),
+ rec( -1, 8, MinSpaceLeft64, req_cond = "(ncp.info_level_num == 0)" ),
+ rec( -1, 1, NumberOfEntries, var="x", req_cond = "(ncp.info_level_num == 1)" ),
+ srec( DirDiskSpaceRest64bit, repeat="x", req_cond = "(ncp.info_level_num == 1)" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00,
+ 0xff16])
+ # 2222/5932, 89/50
+ pkt = NCP(0x5932, "Get Object Effective Rights", "enhanced", has_length=0)
+ pkt.Request(25, [
+rec( 8, 1, NameSpace ),
+rec( 9, 4, ObjectID ),
+ rec( 13, 4, DirectoryBase ),
+ rec( 17, 1, VolumeNumber ),
+ rec( 18, 1, HandleFlag ),
+ rec( 19, 1, DataTypeFlag ),
+ rec( 20, 5, Reserved5 ),
+])
+ pkt.Reply( 10, [
+ rec( 8, 2, TrusteeRights ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x9b00, 0x9c03, 0xa901, 0xaa00])
+ # 2222/5934, 89/52
+ pkt = NCP(0x5934, "Write Extended Attribute", 'enhanced', has_length=0 )
+ pkt.Request((36,98), [
+ rec( 8, 2, EAFlags ),
+ rec( 10, 4, EAHandleOrNetWareHandleOrVolume ),
+ rec( 14, 4, ReservedOrDirectoryNumber ),
+ rec( 18, 4, TtlWriteDataSize ),
+ rec( 22, 4, FileOffset ),
+ rec( 26, 4, EAAccessFlag ),
+ rec( 30, 1, DataTypeFlag ),
+ rec( 31, 2, EAValueLength, var='x' ),
+ rec( 33, (2,64), EAKey, info_str=(EAKey, "Write Extended Attribute: %s", ", %s") ),
+ rec( -1, 1, EAValueRep, repeat='x' ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 4, EAErrorCodes ),
+ rec( 12, 4, EABytesWritten ),
+ rec( 16, 4, NewEAHandle ),
+ ])
+ pkt.CompletionCodes([0x0000, 0xc800, 0xc900, 0xcb00, 0xce00, 0xcf00, 0xd101,
+ 0xd203, 0xa901, 0xaa00, 0xd301, 0xd402])
+ # 2222/5935, 89/53
+ pkt = NCP(0x5935, "Read Extended Attribute", 'enhanced', has_length=0 )
+ pkt.Request((31,541), [
+ rec( 8, 2, EAFlags ),
+ rec( 10, 4, EAHandleOrNetWareHandleOrVolume ),
+ rec( 14, 4, ReservedOrDirectoryNumber ),
+ rec( 18, 4, FileOffset ),
+ rec( 22, 4, InspectSize ),
+ rec( 26, 1, DataTypeFlag ),
+ rec( 27, 2, MaxReadDataReplySize ),
+ rec( 29, (2,512), EAKey, info_str=(EAKey, "Read Extended Attribute: %s", ", %s") ),
+ ])
+ pkt.Reply((26,536), [
+ rec( 8, 4, EAErrorCodes ),
+ rec( 12, 4, TtlValuesLength ),
+ rec( 16, 4, NewEAHandle ),
+ rec( 20, 4, EAAccessFlag ),
+ rec( 24, (2,512), EAValue ),
+ ])
+ pkt.CompletionCodes([0x0000, 0xa901, 0xaa00, 0xc900, 0xce00, 0xcf00, 0xd101,
+ 0xd301])
+ # 2222/5936, 89/54
+ pkt = NCP(0x5936, "Enumerate Extended Attribute", 'enhanced', has_length=0 )
+ pkt.Request((27,537), [
+ rec( 8, 2, EAFlags ),
+ rec( 10, 4, EAHandleOrNetWareHandleOrVolume ),
+ rec( 14, 4, ReservedOrDirectoryNumber ),
+ rec( 18, 4, InspectSize ),
+ rec( 22, 2, SequenceNumber ),
+ rec( 24, 1, DataTypeFlag ),
+ rec( 25, (2,512), EAKey, info_str=(EAKey, "Enumerate Extended Attribute: %s", ", %s") ),
+ ])
+ pkt.Reply(28, [
+ rec( 8, 4, EAErrorCodes ),
+ rec( 12, 4, TtlEAs ),
+ rec( 16, 4, TtlEAsDataSize ),
+ rec( 20, 4, TtlEAsKeySize ),
+ rec( 24, 4, NewEAHandle ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8800, 0xa901, 0xaa00, 0xc900, 0xce00, 0xcf00, 0xd101,
+ 0xd301])
+ # 2222/5947, 89/71
+ pkt = NCP(0x5947, "Scan Volume Trustee Object Paths", 'enhanced', has_length=0)
+ pkt.Request(21, [
+ rec( 8, 4, VolumeID ),
+ rec( 12, 4, ObjectID ),
+ rec( 16, 4, SequenceNumber ),
+ rec( 20, 1, DataTypeFlag ),
+ ])
+ pkt.Reply((20,273), [
+ rec( 8, 4, SequenceNumber ),
+ rec( 12, 4, ObjectID ),
+ rec( 16, 1, TrusteeAccessMask ),
+ rec( 17, 1, PathCount, var="x" ),
+ rec( 18, (2,255), Path16, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
+ # 2222/5A01, 90/00
+ pkt = NCP(0x5A00, "Parse Tree", 'file')
+ pkt.Request(46, [
+ rec( 10, 4, InfoMask ),
+ rec( 14, 4, Reserved4 ),
+ rec( 18, 4, Reserved4 ),
+ rec( 22, 4, limbCount ),
+ rec( 26, 4, limbFlags ),
+ rec( 30, 4, VolumeNumberLong ),
+ rec( 34, 4, DirectoryBase ),
+ rec( 38, 4, limbScanNum ),
+ rec( 42, 4, NameSpace ),
+ ])
+ pkt.Reply(32, [
+ rec( 8, 4, limbCount ),
+ rec( 12, 4, ItemsCount ),
+ rec( 16, 4, nextLimbScanNum ),
+ rec( 20, 4, CompletionCode ),
+ rec( 24, 1, FolderFlag ),
+ rec( 25, 3, Reserved ),
+ rec( 28, 4, DirectoryBase ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16])
+ # 2222/5A0A, 90/10
+ pkt = NCP(0x5A0A, "Get Reference Count from Dir Entry Number", 'file')
+ pkt.Request(19, [
+ rec( 10, 4, VolumeNumberLong ),
+ rec( 14, 4, DirectoryBase ),
+ rec( 18, 1, NameSpace ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, ReferenceCount ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16])
+ # 2222/5A0B, 90/11
+ pkt = NCP(0x5A0B, "Get Reference Count from Dir Handle", 'file')
+ pkt.Request(14, [
+ rec( 10, 4, DirHandle ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, ReferenceCount ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16])
+ # 2222/5A0C, 90/12
+ pkt = NCP(0x5A0C, "Set Compressed File Size", 'file')
+ pkt.Request(20, [
+ rec( 10, 6, FileHandle ),
+ rec( 16, 4, SuggestedFileSize ),
+ ])
+ pkt.Reply(16, [
+ rec( 8, 4, OldFileSize ),
+ rec( 12, 4, NewFileSize ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16])
+ # 2222/5A80, 90/128
+ pkt = NCP(0x5A80, "Move File Data To Data Migration", 'migration')
+ pkt.Request(27, [
+ rec( 10, 4, VolumeNumberLong ),
+ rec( 14, 4, DirectoryEntryNumber ),
+ rec( 18, 1, NameSpace ),
+ rec( 19, 3, Reserved ),
+ rec( 22, 4, SupportModuleID ),
+ rec( 26, 1, DMFlags ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5A81, 90/129
+ pkt = NCP(0x5A81, "Data Migration File Information", 'migration')
+ pkt.Request(19, [
+ rec( 10, 4, VolumeNumberLong ),
+ rec( 14, 4, DirectoryEntryNumber ),
+ rec( 18, 1, NameSpace ),
+ ])
+ pkt.Reply(24, [
+ rec( 8, 4, SupportModuleID ),
+ rec( 12, 4, RestoreTime ),
+ rec( 16, 4, DMInfoEntries, var="x" ),
+ rec( 20, 4, DataSize, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5A82, 90/130
+ pkt = NCP(0x5A82, "Volume Data Migration Status", 'migration')
+ pkt.Request(18, [
+ rec( 10, 4, VolumeNumberLong ),
+ rec( 14, 4, SupportModuleID ),
+ ])
+ pkt.Reply(32, [
+ rec( 8, 4, NumOfFilesMigrated ),
+ rec( 12, 4, TtlMigratedSize ),
+ rec( 16, 4, SpaceUsed ),
+ rec( 20, 4, LimboUsed ),
+ rec( 24, 4, SpaceMigrated ),
+ rec( 28, 4, FileLimbo ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5A83, 90/131
+ pkt = NCP(0x5A83, "Migrator Status Info", 'migration')
+ pkt.Request(10)
+ pkt.Reply(20, [
+ rec( 8, 1, DMPresentFlag ),
+ rec( 9, 3, Reserved3 ),
+ rec( 12, 4, DMmajorVersion ),
+ rec( 16, 4, DMminorVersion ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5A84, 90/132
+ pkt = NCP(0x5A84, "Data Migration Support Module Information", 'migration')
+ pkt.Request(18, [
+ rec( 10, 1, DMInfoLevel ),
+ rec( 11, 3, Reserved3),
+ rec( 14, 4, SupportModuleID ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ srec( DMInfoLevel0, req_cond="ncp.dm_info_level == 0x00" ),
+ srec( DMInfoLevel1, req_cond="ncp.dm_info_level == 0x01" ),
+ srec( DMInfoLevel2, req_cond="ncp.dm_info_level == 0x02" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5A85, 90/133
+ pkt = NCP(0x5A85, "Move File Data From Data Migration", 'migration')
+ pkt.Request(19, [
+ rec( 10, 4, VolumeNumberLong ),
+ rec( 14, 4, DirectoryEntryNumber ),
+ rec( 18, 1, NameSpace ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5A86, 90/134
+ pkt = NCP(0x5A86, "Get/Set Default Read-Write Support Module ID", 'migration')
+ pkt.Request(18, [
+ rec( 10, 1, GetSetFlag ),
+ rec( 11, 3, Reserved3 ),
+ rec( 14, 4, SupportModuleID ),
+ ])
+ pkt.Reply(12, [
+ rec( 8, 4, SupportModuleID ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5A87, 90/135
+ pkt = NCP(0x5A87, "Data Migration Support Module Capacity Request", 'migration')
+ pkt.Request(22, [
+ rec( 10, 4, SupportModuleID ),
+ rec( 14, 4, VolumeNumberLong ),
+ rec( 18, 4, DirectoryBase ),
+ ])
+ pkt.Reply(20, [
+ rec( 8, 4, BlockSizeInSectors ),
+ rec( 12, 4, TotalBlocks ),
+ rec( 16, 4, UsedBlocks ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5A88, 90/136
+ pkt = NCP(0x5A88, "RTDM Request", 'migration')
+ pkt.Request(15, [
+ rec( 10, 4, Verb ),
+ rec( 14, 1, VerbData ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+# 2222/5A96, 90/150
+ pkt = NCP(0x5A96, "File Migration Request", 'file')
+ pkt.Request(22, [
+ rec( 10, 4, VolumeNumberLong ),
+ rec( 14, 4, DirectoryBase ),
+ rec( 18, 4, FileMigrationState ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfb00, 0xff16])
+# 2222/5C, 91
+ pkt = NCP(0x5B, "NMAS Graded Authentication", 'nmas')
+ #Need info on this packet structure
+ pkt.Request(7)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # SecretStore data is dissected by packet-ncp-sss.c
+# 2222/5C01, 9201
+ pkt = NCP(0x5C01, "SecretStore Services (Ping Server)", 'sss', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5C02, 9202
+ pkt = NCP(0x5C02, "SecretStore Services (Fragment)", 'sss', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5C03, 9203
+ pkt = NCP(0x5C03, "SecretStore Services (Write App Secrets)", 'sss', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5C04, 9204
+ pkt = NCP(0x5C04, "SecretStore Services (Add Secret ID)", 'sss', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5C05, 9205
+ pkt = NCP(0x5C05, "SecretStore Services (Remove Secret ID)", 'sss', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5C06, 9206
+ pkt = NCP(0x5C06, "SecretStore Services (Remove SecretStore)", 'sss', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5C07, 9207
+ pkt = NCP(0x5C07, "SecretStore Services (Enumerate Secret IDs)", 'sss', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5C08, 9208
+ pkt = NCP(0x5C08, "SecretStore Services (Unlock Store)", 'sss', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5C09, 9209
+ pkt = NCP(0x5C09, "SecretStore Services (Set Master Password)", 'sss', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+ # 2222/5C0a, 9210
+ pkt = NCP(0x5C0a, "SecretStore Services (Get Service Information)", 'sss', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
+ 0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
+ 0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
+# NMAS packets are dissected in packet-ncp-nmas.c
+ # 2222/5E, 9401
+ pkt = NCP(0x5E01, "NMAS Communications Packet (Ping)", 'nmas', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xfb09, 0xff08])
+ # 2222/5E, 9402
+ pkt = NCP(0x5E02, "NMAS Communications Packet (Fragment)", 'nmas', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xfb09, 0xff08])
+ # 2222/5E, 9403
+ pkt = NCP(0x5E03, "NMAS Communications Packet (Abort)", 'nmas', 0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xfb09, 0xff08])
+ # 2222/61, 97
+ pkt = NCP(0x61, "Get Big Packet NCP Max Packet Size", 'connection')
+ pkt.Request(10, [
+ rec( 7, 2, ProposedMaxSize, ENC_BIG_ENDIAN, info_str=(ProposedMaxSize, "Get Big Max Packet Size - %d", ", %d") ),
+ rec( 9, 1, SecurityFlag ),
+ ])
+ pkt.Reply(13, [
+ rec( 8, 2, AcceptedMaxSize, ENC_BIG_ENDIAN ),
+ rec( 10, 2, EchoSocket, ENC_BIG_ENDIAN ),
+ rec( 12, 1, SecurityFlag ),
+ ])
+ pkt.CompletionCodes([0x0000])
+ # 2222/62, 98
+ pkt = NCP(0x62, "Negotiate NDS connection buffer size", 'connection')
+ pkt.Request(15, [
+ rec( 7, 8, ProposedMaxSize64, ENC_BIG_ENDIAN, Info_str=(ProposedMaxSize, "Negotiate NDS connection - %d", ", %d")),
+ ])
+ pkt.Reply(18, [
+ rec( 8, 8, AcceptedMaxSize64, ENC_BIG_ENDIAN ),
+ rec( 16, 2, EchoSocket, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000])
+ # 2222/63, 99
+ pkt = NCP(0x63, "Undocumented Packet Burst", 'pburst')
+ pkt.Request(7)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/64, 100
+ pkt = NCP(0x64, "Undocumented Packet Burst", 'pburst')
+ pkt.Request(7)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/65, 101
+ pkt = NCP(0x65, "Packet Burst Connection Request", 'pburst')
+ pkt.Request(25, [
+ rec( 7, 4, LocalConnectionID, ENC_BIG_ENDIAN ),
+ rec( 11, 4, LocalMaxPacketSize, ENC_BIG_ENDIAN ),
+ rec( 15, 2, LocalTargetSocket, ENC_BIG_ENDIAN ),
+ rec( 17, 4, LocalMaxSendSize, ENC_BIG_ENDIAN ),
+ rec( 21, 4, LocalMaxRecvSize, ENC_BIG_ENDIAN ),
+ ])
+ pkt.Reply(16, [
+ rec( 8, 4, RemoteTargetID, ENC_BIG_ENDIAN ),
+ rec( 12, 4, RemoteMaxPacketSize, ENC_BIG_ENDIAN ),
+ ])
+ pkt.CompletionCodes([0x0000])
+ # 2222/66, 102
+ pkt = NCP(0x66, "Undocumented Packet Burst", 'pburst')
+ pkt.Request(7)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/67, 103
+ pkt = NCP(0x67, "Undocumented Packet Burst", 'pburst')
+ pkt.Request(7)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000])
+ # 2222/6801, 104/01
+ pkt = NCP(0x6801, "Ping for NDS NCP", "nds", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x8100, 0xfb04, 0xfe0c])
+ # 2222/6802, 104/02
+ #
+ # XXX - if FraggerHandle is not 0xffffffff, this is not the
+ # first fragment, so we can only dissect this by reassembling;
+ # the fields after "Fragment Handle" are bogus for non-0xffffffff
+ # fragments, so we shouldn't dissect them. This is all handled in packet-ncp2222.inc.
+ #
+ pkt = NCP(0x6802, "Send NDS Fragmented Request/Reply", "nds", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0xac00, 0xfd01])
+ # 2222/6803, 104/03
+ pkt = NCP(0x6803, "Fragment Close", "nds", has_length=0)
+ pkt.Request(12, [
+ rec( 8, 4, FraggerHandle ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xff00])
+ # 2222/6804, 104/04
+ pkt = NCP(0x6804, "Return Bindery Context", "nds", has_length=0)
+ pkt.Request(8)
+ pkt.Reply((9, 263), [
+ rec( 8, (1,255), binderyContext ),
+ ])
+ pkt.CompletionCodes([0x0000, 0xfe0c, 0xff00])
+ # 2222/6805, 104/05
+ pkt = NCP(0x6805, "Monitor NDS Connection", "nds", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/6806, 104/06
+ pkt = NCP(0x6806, "Return NDS Statistics", "nds", has_length=0)
+ pkt.Request(10, [
+ rec( 8, 2, NDSRequestFlags ),
+ ])
+ pkt.Reply(8)
+ #Need to investigate how to decode Statistics Return Value
+ pkt.CompletionCodes([0x0000, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/6807, 104/07
+ pkt = NCP(0x6807, "Clear NDS Statistics", "nds", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/6808, 104/08
+ pkt = NCP(0x6808, "Reload NDS Software", "nds", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(12, [
+ rec( 8, 4, NDSStatus ),
+ ])
+ pkt.CompletionCodes([0x0000, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68C8, 104/200
+ pkt = NCP(0x68C8, "Query Container Audit Status", "auditing", has_length=0)
+ pkt.Request(12, [
+ rec( 8, 4, ConnectionNumber ),
+ ])
+ pkt.Reply(40, [
+ rec(8, 32, NWAuditStatus ),
+ ])
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68CA, 104/202
+ pkt = NCP(0x68CA, "Add Auditor Access", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68CB, 104/203
+ pkt = NCP(0x68CB, "Change Auditor Container Password", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68CC, 104/204
+ pkt = NCP(0x68CC, "Check Auditor Access", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68CE, 104/206
+ pkt = NCP(0x680CE, "Disable Container Auditing", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68CF, 104/207
+ pkt = NCP(0x68CF, "Enable Container Auditing", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68D1, 104/209
+ pkt = NCP(0x68D1, "Read Audit File Header", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68D3, 104/211
+ pkt = NCP(0x68D3, "Remove Auditor Access", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68D4, 104/212
+ pkt = NCP(0x68D4, "Reset Audit File", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68D6, 104/214
+ pkt = NCP(0x68D6, "Write Audit File Configuration Header", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68D7, 104/215
+ pkt = NCP(0x68D7, "Change Auditor Container Password2", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68D8, 104/216
+ pkt = NCP(0x68D8, "Return Audit Flags", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68D9, 104/217
+ pkt = NCP(0x68D9, "Close Old Audit File", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68DB, 104/219
+ pkt = NCP(0x68DB, "Check Level Two Access", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68DC, 104/220
+ pkt = NCP(0x68DC, "Check Object Audited", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68DD, 104/221
+ pkt = NCP(0x68DD, "Change Object Audited", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68DE, 104/222
+ pkt = NCP(0x68DE, "Return Old Audit File List", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68DF, 104/223
+ pkt = NCP(0x68DF, "Init Audit File Reads", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68E0, 104/224
+ pkt = NCP(0x68E0, "Read Auditing File", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68E1, 104/225
+ pkt = NCP(0x68E1, "Delete Old Audit File", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68E5, 104/229
+ pkt = NCP(0x68E5, "Set Audit Password", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/68E7, 104/231
+ pkt = NCP(0x68E7, "External Audit Append To File", "auditing", has_length=0)
+ pkt.Request(8)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
+ # 2222/69, 105
+ pkt = NCP(0x69, "Log File", 'sync')
+ pkt.Request( (12, 267), [
+ rec( 7, 1, DirHandle ),
+ rec( 8, 1, LockFlag ),
+ rec( 9, 2, TimeoutLimit ),
+ rec( 11, (1, 256), FilePath, info_str=(FilePath, "Log File: %s", "/%s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7f00, 0x8200, 0x9600, 0xfe0d, 0xff01])
+ # 2222/6A, 106
+ pkt = NCP(0x6A, "Lock File Set", 'sync')
+ pkt.Request( 9, [
+ rec( 7, 2, TimeoutLimit ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7f00, 0x8200, 0x9600, 0xfe0d, 0xff01])
+ # 2222/6B, 107
+ pkt = NCP(0x6B, "Log Logical Record", 'sync')
+ pkt.Request( (11, 266), [
+ rec( 7, 1, LockFlag ),
+ rec( 8, 2, TimeoutLimit ),
+ rec( 10, (1, 256), SynchName, info_str=(SynchName, "Log Logical Record: %s", ", %s") ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7f00, 0x9600, 0xfe0d, 0xff01])
+ # 2222/6C, 108
+ pkt = NCP(0x6C, "Log Logical Record", 'sync')
+ pkt.Request( 10, [
+ rec( 7, 1, LockFlag ),
+ rec( 8, 2, TimeoutLimit ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7f00, 0x9600, 0xfe0d, 0xff01])
+ # 2222/6D, 109
+ pkt = NCP(0x6D, "Log Physical Record", 'sync')
+ pkt.Request(24, [
+ rec( 7, 1, LockFlag ),
+ rec( 8, 6, FileHandle ),
+ rec( 14, 4, LockAreasStartOffset ),
+ rec( 18, 4, LockAreaLen ),
+ rec( 22, 2, LockTimeout ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7f00, 0x8200, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff01])
+ # 2222/6E, 110
+ pkt = NCP(0x6E, "Lock Physical Record Set", 'sync')
+ pkt.Request(10, [
+ rec( 7, 1, LockFlag ),
+ rec( 8, 2, LockTimeout ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7f00, 0x8200, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff01])
+ # 2222/6F00, 111/00
+ pkt = NCP(0x6F00, "Open/Create a Semaphore", 'sync', has_length=0)
+ pkt.Request((10,521), [
+ rec( 8, 1, InitialSemaphoreValue ),
+ rec( 9, (1, 512), SemaphoreName, info_str=(SemaphoreName, "Open/Create Semaphore: %s", ", %s") ),
+ ])
+ pkt.Reply(13, [
+ rec( 8, 4, SemaphoreHandle ),
+ rec( 12, 1, SemaphoreOpenCount ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
+ # 2222/6F01, 111/01
+ pkt = NCP(0x6F01, "Examine Semaphore", 'sync', has_length=0)
+ pkt.Request(12, [
+ rec( 8, 4, SemaphoreHandle ),
+ ])
+ pkt.Reply(10, [
+ rec( 8, 1, SemaphoreValue ),
+ rec( 9, 1, SemaphoreOpenCount ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
+ # 2222/6F02, 111/02
+ pkt = NCP(0x6F02, "Wait On (P) Semaphore", 'sync', has_length=0)
+ pkt.Request(14, [
+ rec( 8, 4, SemaphoreHandle ),
+ rec( 12, 2, LockTimeout ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xfe04, 0xff01])
+ # 2222/6F03, 111/03
+ pkt = NCP(0x6F03, "Signal (V) Semaphore", 'sync', has_length=0)
+ pkt.Request(12, [
+ rec( 8, 4, SemaphoreHandle ),
+ ])
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9600, 0xfe04, 0xff01])
+ # 2222/6F04, 111/04
+ pkt = NCP(0x6F04, "Close Semaphore", 'sync', has_length=0)
+ pkt.Request(12, [
+ rec( 8, 4, SemaphoreHandle ),
+ ])
+ pkt.Reply(10, [
+ rec( 8, 1, SemaphoreOpenCount ),
+ rec( 9, 1, SemaphoreShareCount ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x9600, 0xfe04, 0xff01])
+ ## 2222/1125
+ pkt = NCP(0x70, "Clear and Get Waiting Lock Completion", 'sync')
+ pkt.Request(7)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x9b00, 0x9c03, 0xff1a])
+ # 2222/7201, 114/01
+ pkt = NCP(0x7201, "Timesync Get Time", 'tsync')
+ pkt.Request(10)
+ pkt.Reply(32,[
+ rec( 8, 12, theTimeStruct ),
+ rec(20, 8, eventOffset ),
+ rec(28, 4, eventTime ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
+ # 2222/7202, 114/02
+ pkt = NCP(0x7202, "Timesync Exchange Time", 'tsync')
+ pkt.Request((63,112), [
+ rec( 10, 4, protocolFlags ),
+ rec( 14, 4, nodeFlags ),
+ rec( 18, 8, sourceOriginateTime ),
+ rec( 26, 8, targetReceiveTime ),
+ rec( 34, 8, targetTransmitTime ),
+ rec( 42, 8, sourceReturnTime ),
+ rec( 50, 8, eventOffset ),
+ rec( 58, 4, eventTime ),
+ rec( 62, (1,50), ServerNameLen, info_str=(ServerNameLen, "Timesync Exchange Time: %s", ", %s") ),
+ ])
+ pkt.Reply((64,113), [
+ rec( 8, 3, Reserved3 ),
+ rec( 11, 4, protocolFlags ),
+ rec( 15, 4, nodeFlags ),
+ rec( 19, 8, sourceOriginateTime ),
+ rec( 27, 8, targetReceiveTime ),
+ rec( 35, 8, targetTransmitTime ),
+ rec( 43, 8, sourceReturnTime ),
+ rec( 51, 8, eventOffset ),
+ rec( 59, 4, eventTime ),
+ rec( 63, (1,50), ServerNameLen ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
+ # 2222/7205, 114/05
+ pkt = NCP(0x7205, "Timesync Get Server List", 'tsync')
+ pkt.Request(14, [
+ rec( 10, 4, StartNumber ),
+ ])
+ pkt.Reply(66, [
+ rec( 8, 4, nameType ),
+ rec( 12, 48, ServerName ),
+ rec( 60, 4, serverListFlags ),
+ rec( 64, 2, startNumberFlag ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
+ # 2222/7206, 114/06
+ pkt = NCP(0x7206, "Timesync Set Server List", 'tsync')
+ pkt.Request(14, [
+ rec( 10, 4, StartNumber ),
+ ])
+ pkt.Reply(66, [
+ rec( 8, 4, nameType ),
+ rec( 12, 48, ServerName ),
+ rec( 60, 4, serverListFlags ),
+ rec( 64, 2, startNumberFlag ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
+ # 2222/720C, 114/12
+ pkt = NCP(0x720C, "Timesync Get Version", 'tsync')
+ pkt.Request(10)
+ pkt.Reply(12, [
+ rec( 8, 4, version ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
+ # 2222/7B01, 123/01
+ pkt = NCP(0x7B01, "Get Cache Information", 'stats')
+ pkt.Request(10)
+ pkt.Reply(288, [
+ rec(8, 4, CurrentServerTime, ENC_LITTLE_ENDIAN),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 104, Counters ),
+ rec(120, 40, ExtraCacheCntrs ),
+ rec(160, 40, MemoryCounters ),
+ rec(200, 48, TrendCounters ),
+ rec(248, 40, CacheInfo ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xff00])
+ # 2222/7B02, 123/02
+ pkt = NCP(0x7B02, "Get File Server Information", 'stats')
+ pkt.Request(10)
+ pkt.Reply(150, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, NCPStaInUseCnt ),
+ rec(20, 4, NCPPeakStaInUse ),
+ rec(24, 4, NumOfNCPReqs ),
+ rec(28, 4, ServerUtilization ),
+ rec(32, 96, ServerInfo ),
+ rec(128, 22, FileServerCounters ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B03, 123/03
+ pkt = NCP(0x7B03, "NetWare File System Information", 'stats')
+ pkt.Request(11, [
+ rec(10, 1, FileSystemID ),
+ ])
+ pkt.Reply(68, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 52, FileSystemInfo ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B04, 123/04
+ pkt = NCP(0x7B04, "User Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, ConnectionNumber, ENC_LITTLE_ENDIAN ),
+ ])
+ pkt.Reply((85, 132), [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 68, UserInformation ),
+ rec(84, (1, 48), UserName ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B05, 123/05
+ pkt = NCP(0x7B05, "Packet Burst Information", 'stats')
+ pkt.Request(10)
+ pkt.Reply(216, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 200, PacketBurstInformation ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B06, 123/06
+ pkt = NCP(0x7B06, "IPX SPX Information", 'stats')
+ pkt.Request(10)
+ pkt.Reply(94, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 34, IPXInformation ),
+ rec(50, 44, SPXInformation ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B07, 123/07
+ pkt = NCP(0x7B07, "Garbage Collection Information", 'stats')
+ pkt.Request(10)
+ pkt.Reply(40, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, FailedAllocReqCnt ),
+ rec(20, 4, NumberOfAllocs ),
+ rec(24, 4, NoMoreMemAvlCnt ),
+ rec(28, 4, NumOfGarbageColl ),
+ rec(32, 4, FoundSomeMem ),
+ rec(36, 4, NumOfChecks ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B08, 123/08
+ pkt = NCP(0x7B08, "CPU Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, CPUNumber ),
+ ])
+ pkt.Reply(51, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, NumberOfCPUs ),
+ rec(20, 31, CPUInformation ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B09, 123/09
+ pkt = NCP(0x7B09, "Volume Switch Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, StartNumber )
+ ])
+ pkt.Reply(28, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, TotalLFSCounters ),
+ rec(20, 4, CurrentLFSCounters, var="x"),
+ rec(24, 4, LFSCounters, repeat="x"),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B0A, 123/10
+ pkt = NCP(0x7B0A, "Get NLM Loaded List", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, StartNumber )
+ ])
+ pkt.Reply(28, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, NLMcount ),
+ rec(20, 4, NLMsInList, var="x" ),
+ rec(24, 4, NLMNumbers, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B0B, 123/11
+ pkt = NCP(0x7B0B, "NLM Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, NLMNumber ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 60, NLMInformation ),
+ # The remainder of this dissection is manually decoded in packet-ncp2222.inc
+ #rec(-1, (1,255), FileName ),
+ #rec(-1, (1,255), Name ),
+ #rec(-1, (1,255), Copyright ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B0C, 123/12
+ pkt = NCP(0x7B0C, "Get Directory Cache Information", 'stats')
+ pkt.Request(10)
+ pkt.Reply(72, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 56, DirCacheInfo ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B0D, 123/13
+ pkt = NCP(0x7B0D, "Get Operating System Version Information", 'stats')
+ pkt.Request(10)
+ pkt.Reply(70, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 1, OSMajorVersion ),
+ rec(17, 1, OSMinorVersion ),
+ rec(18, 1, OSRevision ),
+ rec(19, 1, AccountVersion ),
+ rec(20, 1, VAPVersion ),
+ rec(21, 1, QueueingVersion ),
+ rec(22, 1, SecurityRestrictionVersion ),
+ rec(23, 1, InternetBridgeVersion ),
+ rec(24, 4, MaxNumOfVol ),
+ rec(28, 4, MaxNumOfConn ),
+ rec(32, 4, MaxNumOfUsers ),
+ rec(36, 4, MaxNumOfNmeSps ),
+ rec(40, 4, MaxNumOfLANS ),
+ rec(44, 4, MaxNumOfMedias ),
+ rec(48, 4, MaxNumOfStacks ),
+ rec(52, 4, MaxDirDepth ),
+ rec(56, 4, MaxDataStreams ),
+ rec(60, 4, MaxNumOfSpoolPr ),
+ rec(64, 4, ServerSerialNumber ),
+ rec(68, 2, ServerAppNumber ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B0E, 123/14
+ pkt = NCP(0x7B0E, "Get Active Connection List by Type", 'stats')
+ pkt.Request(15, [
+ rec(10, 4, StartConnNumber ),
+ rec(14, 1, ConnectionType ),
+ ])
+ pkt.Reply(528, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 512, ActiveConnBitList ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfd01, 0xff00])
+ # 2222/7B0F, 123/15
+ pkt = NCP(0x7B0F, "Get NLM Resource Tag List", 'stats')
+ pkt.Request(18, [
+ rec(10, 4, NLMNumber ),
+ rec(14, 4, NLMStartNumber ),
+ ])
+ pkt.Reply(37, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, TtlNumOfRTags ),
+ rec(20, 4, CurNumOfRTags ),
+ rec(24, 13, RTagStructure ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B10, 123/16
+ pkt = NCP(0x7B10, "Enumerate Connection Information from Connection List", 'stats')
+ pkt.Request(22, [
+ rec(10, 1, EnumInfoMask),
+ rec(11, 3, Reserved3),
+ rec(14, 4, itemsInList, var="x"),
+ rec(18, 4, connList, repeat="x"),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, ItemsInPacket ),
+ srec(netAddr, req_cond="ncp.enum_info_transport==TRUE"),
+ srec(timeInfo, req_cond="ncp.enum_info_time==TRUE"),
+ srec(nameInfo, req_cond="ncp.enum_info_name==TRUE"),
+ srec(lockInfo, req_cond="ncp.enum_info_lock==TRUE"),
+ srec(printInfo, req_cond="ncp.enum_info_print==TRUE"),
+ srec(statsInfo, req_cond="ncp.enum_info_stats==TRUE"),
+ srec(acctngInfo, req_cond="ncp.enum_info_account==TRUE"),
+ srec(authInfo, req_cond="ncp.enum_info_auth==TRUE"),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B11, 123/17
+ pkt = NCP(0x7B11, "Enumerate NCP Service Network Addresses", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, SearchNumber ),
+ ])
+ pkt.Reply(36, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, ServerInfoFlags ),
+ rec(16, 16, GUID ),
+ rec(32, 4, NextSearchNum ),
+ # The following two items are dissected in packet-ncp2222.inc
+ #rec(36, 4, ItemsInPacket, var="x"),
+ #rec(40, 20, NCPNetworkAddress, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb01, 0xff00])
+ # 2222/7B14, 123/20
+ pkt = NCP(0x7B14, "Active LAN Board List", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, StartNumber ),
+ ])
+ pkt.Reply(28, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, MaxNumOfLANS ),
+ rec(20, 4, ItemsInPacket, var="x"),
+ rec(24, 4, BoardNumbers, repeat="x"),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B15, 123/21
+ pkt = NCP(0x7B15, "LAN Configuration Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, BoardNumber ),
+ ])
+ pkt.Reply(152, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16,136, LANConfigInfo ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B16, 123/22
+ pkt = NCP(0x7B16, "LAN Common Counters Information", 'stats')
+ pkt.Request(18, [
+ rec(10, 4, BoardNumber ),
+ rec(14, 4, BlockNumber ),
+ ])
+ pkt.Reply(86, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 1, StatMajorVersion ),
+ rec(15, 1, StatMinorVersion ),
+ rec(16, 4, TotalCommonCnts ),
+ rec(20, 4, TotalCntBlocks ),
+ rec(24, 4, CustomCounters ),
+ rec(28, 4, NextCntBlock ),
+ rec(32, 54, CommonLanStruc ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B17, 123/23
+ pkt = NCP(0x7B17, "LAN Custom Counters Information", 'stats')
+ pkt.Request(18, [
+ rec(10, 4, BoardNumber ),
+ rec(14, 4, StartNumber ),
+ ])
+ pkt.Reply(25, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, NumOfCCinPkt, var="x"),
+ rec(20, 5, CustomCntsInfo, repeat="x"),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B18, 123/24
+ pkt = NCP(0x7B18, "LAN Name Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, BoardNumber ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, PROTO_LENGTH_UNKNOWN, DriverBoardName ),
+ rec(-1, PROTO_LENGTH_UNKNOWN, DriverShortName ),
+ rec(-1, PROTO_LENGTH_UNKNOWN, DriverLogicalName ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B19, 123/25
+ pkt = NCP(0x7B19, "LSL Information", 'stats')
+ pkt.Request(10)
+ pkt.Reply(90, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 74, LSLInformation ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B1A, 123/26
+ pkt = NCP(0x7B1A, "LSL Logical Board Statistics", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, BoardNumber ),
+ ])
+ pkt.Reply(28, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, LogTtlTxPkts ),
+ rec(20, 4, LogTtlRxPkts ),
+ rec(24, 4, UnclaimedPkts ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B1B, 123/27
+ pkt = NCP(0x7B1B, "MLID Board Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, BoardNumber ),
+ ])
+ pkt.Reply(44, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 1, Reserved ),
+ rec(15, 1, NumberOfProtocols ),
+ rec(16, 28, MLIDBoardInfo ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B1E, 123/30
+ pkt = NCP(0x7B1E, "Get Media Manager Object Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, ObjectNumber ),
+ ])
+ pkt.Reply(212, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 196, GenericInfoDef ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B1F, 123/31
+ pkt = NCP(0x7B1F, "Get Media Manager Objects List", 'stats')
+ pkt.Request(15, [
+ rec(10, 4, StartNumber ),
+ rec(14, 1, MediaObjectType ),
+ ])
+ pkt.Reply(28, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, nextStartingNumber ),
+ rec(20, 4, ObjectCount, var="x"),
+ rec(24, 4, ObjectID, repeat="x"),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B20, 123/32
+ pkt = NCP(0x7B20, "Get Media Manager Object Childrens List", 'stats')
+ pkt.Request(22, [
+ rec(10, 4, StartNumber ),
+ rec(14, 1, MediaObjectType ),
+ rec(15, 3, Reserved3 ),
+ rec(18, 4, ParentObjectNumber ),
+ ])
+ pkt.Reply(28, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, nextStartingNumber ),
+ rec(20, 4, ObjectCount, var="x" ),
+ rec(24, 4, ObjectID, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B21, 123/33
+ pkt = NCP(0x7B21, "Get Volume Segment List", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, VolumeNumberLong ),
+ ])
+ pkt.Reply(32, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, NumOfSegments, var="x" ),
+ rec(20, 12, Segments, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0x9801, 0xfb06, 0xff00])
+ # 2222/7B22, 123/34
+ pkt = NCP(0x7B22, "Get Volume Information by Level", 'stats')
+ pkt.Request(15, [
+ rec(10, 4, VolumeNumberLong ),
+ rec(14, 1, InfoLevelNumber ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 1, InfoLevelNumber ),
+ rec(17, 3, Reserved3 ),
+ srec(VolInfoStructure, req_cond="ncp.info_level_num==0x01"),
+ srec(VolInfo2Struct, req_cond="ncp.info_level_num==0x02"),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B23, 123/35
+ pkt = NCP(0x7B23, "Get Volume Information by Level 64 Bit Aware", 'stats')
+ pkt.Request(22, [
+ rec(10, 4, InpInfotype ),
+ rec(14, 4, Inpld ),
+ rec(18, 4, VolInfoReturnInfoMask),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, VolInfoReturnInfoMask),
+ srec(VolInfoStructure64, req_cond="ncp.vinfo_info64==0x00000001"),
+ rec( -1, (1,255), VolumeNameLen, req_cond="ncp.vinfo_volname==0x00000002" ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B28, 123/40
+ pkt = NCP(0x7B28, "Active Protocol Stacks", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, StartNumber ),
+ ])
+ pkt.Reply(48, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, MaxNumOfLANS ),
+ rec(20, 4, StackCount, var="x" ),
+ rec(24, 4, nextStartingNumber ),
+ rec(28, 20, StackInfo, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B29, 123/41
+ pkt = NCP(0x7B29, "Get Protocol Stack Configuration Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, StackNumber ),
+ ])
+ pkt.Reply((37,164), [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 1, ConfigMajorVN ),
+ rec(17, 1, ConfigMinorVN ),
+ rec(18, 1, StackMajorVN ),
+ rec(19, 1, StackMinorVN ),
+ rec(20, 16, ShortStkName ),
+ rec(36, (1,128), StackFullNameStr ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B2A, 123/42
+ pkt = NCP(0x7B2A, "Get Protocol Stack Statistics Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, StackNumber ),
+ ])
+ pkt.Reply(38, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 1, StatMajorVersion ),
+ rec(17, 1, StatMinorVersion ),
+ rec(18, 2, ComCnts ),
+ rec(20, 4, CounterMask ),
+ rec(24, 4, TotalTxPkts ),
+ rec(28, 4, TotalRxPkts ),
+ rec(32, 4, IgnoredRxPkts ),
+ rec(36, 2, CustomCnts ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B2B, 123/43
+ pkt = NCP(0x7B2B, "Get Protocol Stack Custom Information", 'stats')
+ pkt.Request(18, [
+ rec(10, 4, StackNumber ),
+ rec(14, 4, StartNumber ),
+ ])
+ pkt.Reply(25, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, CustomCount, var="x" ),
+ rec(20, 5, CustomCntsInfo, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B2C, 123/44
+ pkt = NCP(0x7B2C, "Get Protocol Stack Numbers by Media Number", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, MediaNumber ),
+ ])
+ pkt.Reply(24, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, StackCount, var="x" ),
+ rec(20, 4, StackNumber, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B2D, 123/45
+ pkt = NCP(0x7B2D, "Get Protocol Stack Numbers by LAN Board Number", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, BoardNumber ),
+ ])
+ pkt.Reply(24, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, StackCount, var="x" ),
+ rec(20, 4, StackNumber, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B2E, 123/46
+ pkt = NCP(0x7B2E, "Get Media Name by Media Number", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, MediaNumber ),
+ ])
+ pkt.Reply((17,144), [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, (1,128), MediaName ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B2F, 123/47
+ pkt = NCP(0x7B2F, "Get Loaded Media Number", 'stats')
+ pkt.Request(10)
+ pkt.Reply(28, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, MaxNumOfMedias ),
+ rec(20, 4, MediaListCount, var="x" ),
+ rec(24, 4, MediaList, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B32, 123/50
+ pkt = NCP(0x7B32, "Get General Router and SAP Information", 'stats')
+ pkt.Request(10)
+ pkt.Reply(37, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 2, RIPSocketNumber ),
+ rec(18, 2, Reserved2 ),
+ rec(20, 1, RouterDownFlag ),
+ rec(21, 3, Reserved3 ),
+ rec(24, 1, TrackOnFlag ),
+ rec(25, 3, Reserved3 ),
+ rec(28, 1, ExtRouterActiveFlag ),
+ rec(29, 3, Reserved3 ),
+ rec(32, 2, SAPSocketNumber ),
+ rec(34, 2, Reserved2 ),
+ rec(36, 1, RpyNearestSrvFlag ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B33, 123/51
+ pkt = NCP(0x7B33, "Get Network Router Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, NetworkNumber ),
+ ])
+ pkt.Reply(26, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 10, KnownRoutes ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x0108, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B34, 123/52
+ pkt = NCP(0x7B34, "Get Network Routers Information", 'stats')
+ pkt.Request(18, [
+ rec(10, 4, NetworkNumber),
+ rec(14, 4, StartNumber ),
+ ])
+ pkt.Reply(34, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, NumOfEntries, var="x" ),
+ rec(20, 14, RoutersInfo, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x0108, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B35, 123/53
+ pkt = NCP(0x7B35, "Get Known Networks Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, StartNumber ),
+ ])
+ pkt.Reply(30, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, NumOfEntries, var="x" ),
+ rec(20, 10, KnownRoutes, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B36, 123/54
+ pkt = NCP(0x7B36, "Get Server Information", 'stats')
+ pkt.Request((15,64), [
+ rec(10, 2, ServerType ),
+ rec(12, 2, Reserved2 ),
+ rec(14, (1,50), ServerNameLen, info_str=(ServerNameLen, "Get Server Information: %s", ", %s") ),
+ ])
+ pkt.Reply(30, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 12, ServerAddress ),
+ rec(28, 2, HopsToNet ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B37, 123/55
+ pkt = NCP(0x7B37, "Get Server Sources Information", 'stats')
+ pkt.Request((19,68), [
+ rec(10, 4, StartNumber ),
+ rec(14, 2, ServerType ),
+ rec(16, 2, Reserved2 ),
+ rec(18, (1,50), ServerNameLen, info_str=(ServerNameLen, "Get Server Sources Info: %s", ", %s") ),
+ ])
+ pkt.Reply(32, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, NumOfEntries, var="x" ),
+ rec(20, 12, ServersSrcInfo, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x0108, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B38, 123/56
+ pkt = NCP(0x7B38, "Get Known Servers Information", 'stats')
+ pkt.Request(16, [
+ rec(10, 4, StartNumber ),
+ rec(14, 2, ServerType ),
+ ])
+ pkt.Reply(35, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, NumOfEntries, var="x" ),
+ rec(20, 15, KnownServStruc, repeat="x" ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x0108, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B3C, 123/60
+ pkt = NCP(0x7B3C, "Get Server Set Commands Information", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, StartNumber ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, TtlNumOfSetCmds ),
+ rec(20, 4, nextStartingNumber ),
+ rec(24, 1, SetCmdType ),
+ rec(25, 3, Reserved3 ),
+ rec(28, 1, SetCmdCategory ),
+ rec(29, 3, Reserved3 ),
+ rec(32, 1, SetCmdFlags ),
+ rec(33, 3, Reserved3 ),
+ rec(36, PROTO_LENGTH_UNKNOWN, SetCmdName ),
+ rec(-1, 4, SetCmdValueNum ),
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B3D, 123/61
+ pkt = NCP(0x7B3D, "Get Server Set Categories", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, StartNumber ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, NumberOfSetCategories ),
+ rec(20, 4, nextStartingNumber ),
+ rec(24, PROTO_LENGTH_UNKNOWN, CategoryName ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00])
+ # 2222/7B3E, 123/62
+ pkt = NCP(0x7B3E, "Get Server Set Commands Information By Name", 'stats')
+ pkt.Request(NO_LENGTH_CHECK, [
+ rec(10, PROTO_LENGTH_UNKNOWN, SetParmName, info_str=(SetParmName, "Get Server Set Command Info for: %s", ", %s") ),
+ ])
+ pkt.Reply(NO_LENGTH_CHECK, [
+ rec(8, 4, CurrentServerTime ),
+ rec(12, 1, VConsoleVersion ),
+ rec(13, 1, VConsoleRevision ),
+ rec(14, 2, Reserved2 ),
+ rec(16, 4, TtlNumOfSetCmds ),
+ rec(20, 4, nextStartingNumber ),
+ rec(24, 1, SetCmdType ),
+ rec(25, 3, Reserved3 ),
+ rec(28, 1, SetCmdCategory ),
+ rec(29, 3, Reserved3 ),
+ rec(32, 1, SetCmdFlags ),
+ rec(33, 3, Reserved3 ),
+ rec(36, PROTO_LENGTH_UNKNOWN, SetCmdName ),
+ # The value of the set command is decoded in packet-ncp2222.inc
+ ])
+ pkt.ReqCondSizeVariable()
+ pkt.CompletionCodes([0x0000, 0x7e01, 0xc600, 0xfb06, 0xff22])
+ # 2222/7B46, 123/70
+ pkt = NCP(0x7B46, "Get Current Compressing File", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, VolumeNumberLong ),
+ ])
+ pkt.Reply(56, [
+ rec(8, 4, ParentID ),
+ rec(12, 4, DirectoryEntryNumber ),
+ rec(16, 4, compressionStage ),
+ rec(20, 4, ttlIntermediateBlks ),
+ rec(24, 4, ttlCompBlks ),
+ rec(28, 4, curIntermediateBlks ),
+ rec(32, 4, curCompBlks ),
+ rec(36, 4, curInitialBlks ),
+ rec(40, 4, fileFlags ),
+ rec(44, 4, projectedCompSize ),
+ rec(48, 4, originalSize ),
+ rec(52, 4, compressVolume ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0x7901, 0x9801, 0xfb06, 0xff00])
+ # 2222/7B47, 123/71
+ pkt = NCP(0x7B47, "Get Current DeCompressing File Info List", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, VolumeNumberLong ),
+ ])
+ pkt.Reply(24, [
+ #rec(8, 4, FileListCount ),
+ rec(8, 16, FileInfoStruct ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0x9801, 0xfb06, 0xff00])
+ # 2222/7B48, 123/72
+ pkt = NCP(0x7B48, "Get Compression and Decompression Time and Counts", 'stats')
+ pkt.Request(14, [
+ rec(10, 4, VolumeNumberLong ),
+ ])
+ pkt.Reply(64, [
+ rec(8, 56, CompDeCompStat ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0x9801, 0xfb06, 0xff00])
+ # 2222/7BF9, 123/249
+ pkt = NCP(0x7BF9, "Set Alert Notification", 'stats')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
+ # 2222/7BFB, 123/251
+ pkt = NCP(0x7BFB, "Get Item Configuration Information", 'stats')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
+ # 2222/7BFC, 123/252
+ pkt = NCP(0x7BFC, "Get Subject Item ID List", 'stats')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
+ # 2222/7BFD, 123/253
+ pkt = NCP(0x7BFD, "Get Subject Item List Count", 'stats')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
+ # 2222/7BFE, 123/254
+ pkt = NCP(0x7BFE, "Get Subject ID List", 'stats')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
+ # 2222/7BFF, 123/255
+ pkt = NCP(0x7BFF, "Get Number of NetMan Subjects", 'stats')
+ pkt.Request(10)
+ pkt.Reply(8)
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
+ # 2222/8301, 131/01
+ pkt = NCP(0x8301, "RPC Load an NLM", 'remote')
+ pkt.Request(NO_LENGTH_CHECK, [
+ rec(10, 4, NLMLoadOptions ),
+ rec(14, 16, Reserved16 ),
+ rec(30, PROTO_LENGTH_UNKNOWN, PathAndName, info_str=(PathAndName, "RPC Load NLM: %s", ", %s") ),
+ ])
+ pkt.Reply(12, [
+ rec(8, 4, RPCccode ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7c00, 0x7e00, 0xfb07, 0xff00])
+ # 2222/8302, 131/02
+ pkt = NCP(0x8302, "RPC Unload an NLM", 'remote')
+ pkt.Request(NO_LENGTH_CHECK, [
+ rec(10, 20, Reserved20 ),
+ rec(30, PROTO_LENGTH_UNKNOWN, NLMName, info_str=(NLMName, "RPC Unload NLM: %s", ", %s") ),
+ ])
+ pkt.Reply(12, [
+ rec(8, 4, RPCccode ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7c00, 0x7e00, 0xfb07, 0xff00])
+ # 2222/8303, 131/03
+ pkt = NCP(0x8303, "RPC Mount Volume", 'remote')
+ pkt.Request(NO_LENGTH_CHECK, [
+ rec(10, 20, Reserved20 ),
+ rec(30, PROTO_LENGTH_UNKNOWN, VolumeNameStringz, info_str=(VolumeNameStringz, "RPC Mount Volume: %s", ", %s") ),
+ ])
+ pkt.Reply(32, [
+ rec(8, 4, RPCccode),
+ rec(12, 16, Reserved16 ),
+ rec(28, 4, VolumeNumberLong ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00])
+ # 2222/8304, 131/04
+ pkt = NCP(0x8304, "RPC Dismount Volume", 'remote')
+ pkt.Request(NO_LENGTH_CHECK, [
+ rec(10, 20, Reserved20 ),
+ rec(30, PROTO_LENGTH_UNKNOWN, VolumeNameStringz, info_str=(VolumeNameStringz, "RPC Dismount Volume: %s", ", %s") ),
+ ])
+ pkt.Reply(12, [
+ rec(8, 4, RPCccode ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00])
+ # 2222/8305, 131/05
+ pkt = NCP(0x8305, "RPC Add Name Space To Volume", 'remote')
+ pkt.Request(NO_LENGTH_CHECK, [
+ rec(10, 20, Reserved20 ),
+ rec(30, PROTO_LENGTH_UNKNOWN, AddNameSpaceAndVol, info_str=(AddNameSpaceAndVol, "RPC Add Name Space to Volume: %s", ", %s") ),
+ ])
+ pkt.Reply(12, [
+ rec(8, 4, RPCccode ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00])
+ # 2222/8306, 131/06
+ pkt = NCP(0x8306, "RPC Set Command Value", 'remote')
+ pkt.Request(NO_LENGTH_CHECK, [
+ rec(10, 1, SetCmdType ),
+ rec(11, 3, Reserved3 ),
+ rec(14, 4, SetCmdValueNum ),
+ rec(18, 12, Reserved12 ),
+ rec(30, PROTO_LENGTH_UNKNOWN, SetCmdName, info_str=(SetCmdName, "RPC Set Command Value: %s", ", %s") ),
+ #
+ # XXX - optional string, if SetCmdType is 0
+ #
+ ])
+ pkt.Reply(12, [
+ rec(8, 4, RPCccode ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00])
+ # 2222/8307, 131/07
+ pkt = NCP(0x8307, "RPC Execute NCF File", 'remote')
+ pkt.Request(NO_LENGTH_CHECK, [
+ rec(10, 20, Reserved20 ),
+ rec(30, PROTO_LENGTH_UNKNOWN, PathAndName, info_str=(PathAndName, "RPC Execute NCF File: %s", ", %s") ),
+ ])
+ pkt.Reply(12, [
+ rec(8, 4, RPCccode ),
+ ])
+ pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00])
+if __name__ == '__main__':
+# import profile
+# filename = "ncp.pstats"
+# profile.run("main()", filename)
+#
+# import pstats
+# sys.stdout = msg
+# p = pstats.Stats(filename)
+#
+# print "Stats sorted by cumulative time"
+# p.strip_dirs().sort_stats('cumulative').print_stats()
+#
+# print "Function callees"
+# p.print_callees()
+ main()
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 4
+# indent-tabs-mode: nil
+# End:
+#
+# vi: set shiftwidth=4 expandtab:
+# :indentSize=4:noTabs=true:
+#
diff --git a/tools/netscreen2dump.py b/tools/netscreen2dump.py
new file mode 100755
index 0000000..7aaac94
--- /dev/null
+++ b/tools/netscreen2dump.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+"""
+Converts netscreen snoop hex-dumps to a hex-dump that text2pcap can read.
+
+Copyright (c) 2004 by Gilbert Ramirez <gram@alumni.rice.edu>
+
+SPDX-License-Identifier: GPL-2.0-or-later
+"""
+
+import sys
+import re
+import os
+import stat
+import time
+
+
+class OutputFile:
+ TIMER_MAX = 99999.9
+
+ def __init__(self, name, base_time):
+ try:
+ self.fh = open(name, "w")
+ except IOError, err:
+ sys.exit(err)
+
+ self.base_time = base_time
+ self.prev_timestamp = 0.0
+
+ def PrintPacket(self, timestamp, datalines):
+ # What do to with the timestamp? I need more data about what
+ # the netscreen timestamp is, then I can generate one for the text file.
+ # print("TS:", timestamp.group("time"))
+ try:
+ timestamp = float(timestamp.group("time"))
+ except ValueError:
+ sys.exit("Unable to convert '%s' to floating point." %
+ (timestamp,))
+
+ # Did we wrap around the timeer max?
+ if timestamp < self.prev_timestamp:
+ self.base_time += self.TIMER_MAX
+
+ self.prev_timestamp = timestamp
+
+ packet_timestamp = self.base_time + timestamp
+
+ # Determine the time string to print
+ gmtime = time.gmtime(packet_timestamp)
+ subsecs = packet_timestamp - int(packet_timestamp)
+ assert subsecs <= 0
+ subsecs = int(subsecs * 10)
+
+ print >> self.fh, "%s.%d" % (time.strftime("%Y-%m-%d %H:%M:%S", gmtime), \
+ subsecs)
+
+ # Print the packet data
+ offset = 0
+ for lineno, hexgroup in datalines:
+ hexline = hexgroup.group("hex")
+ hexpairs = hexline.split()
+ print >> self.fh, "%08x %s" % (offset, hexline)
+ offset += len(hexpairs)
+
+ # Blank line
+ print >> self.fh
+
+
+# Find a timestamp line
+re_timestamp = re.compile(r"^(?P<time>\d+\.\d): [\w/]+\((?P<io>.)\)(:| len=)")
+
+# Find a hex dump line
+re_hex_line = re.compile(r"(?P<hex>([0-9a-f]{2} ){1,16})\s+(?P<ascii>.){1,16}")
+
+
+def run(input_filename, output_filename):
+ try:
+ ifh = open(input_filename, "r")
+ except IOError, err:
+ sys.exit(err)
+
+ # Get the file's creation time.
+ try:
+ ctime = os.stat(input_filename)[stat.ST_CTIME]
+ except OSError, err:
+ sys.exit(err)
+
+ output_file = OutputFile(output_filename, ctime)
+
+ timestamp = None
+ datalines = []
+ lineno = 0
+
+ for line in ifh.xreadlines():
+ lineno += 1
+ # If we have no timestamp yet, look for one
+ if not timestamp:
+ m = re_timestamp.search(line)
+ if m:
+ timestamp = m
+
+ # Otherwise, look for hex dump lines
+ else:
+ m = re_hex_line.search(line)
+ if m:
+ datalines.append((lineno, m))
+ else:
+ # If we have been gathering hex dump lines,
+ # and this line is not a hex dump line, then the hex dump
+ # has finished, and so has the packet. So print the packet
+ # and reset our variables so we can look for the next packet.
+ if datalines:
+ output_file.PrintPacket(timestamp, datalines)
+ timestamp = None
+ datalines = []
+
+ # At the end of the file we may still have hex dump data in memory.
+ # If so, print the packet
+ if datalines:
+ output_file.PrintPacket(timestamp, datalines)
+ timestamp = None
+ datalines = []
+
+
+def usage():
+ print >> sys.stderr, "Usage: netscreen2dump.py netscreen-dump-file new-dump-file"
+ sys.exit(1)
+
+
+def main():
+ if len(sys.argv) != 3:
+ usage()
+
+ run(sys.argv[1], sys.argv[2])
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/oss-fuzzshark/build.sh b/tools/oss-fuzzshark/build.sh
new file mode 100755
index 0000000..bc86cdc
--- /dev/null
+++ b/tools/oss-fuzzshark/build.sh
@@ -0,0 +1,22 @@
+#!/bin/bash -eux
+# Copyright 2017 Google Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# TODO: support specifing targets in args. Google oss-fuzz specifies 'all'.
+
+# TODO update oss-fuzz configuration to build with OSS_FUZZ=1? This is necessary
+# to build the fuzzshark_* targets for oss-fuzz.
+cmake -DOSS_FUZZ=1 .
+
+cmake --build . --target all-fuzzers
+
+for file in run/fuzzshark_*; do
+ fuzzer_name="${file##*/}"
+ fuzzer_target="${fuzzer_name#fuzzshark_}"
+ mv "$file" "$OUT/"
+ echo -en "[libfuzzer]\nmax_len = 1024\n" > $OUT/${fuzzer_name}.options
+ if [ -d "$SAMPLES_DIR/${fuzzer_target}" ]; then
+ zip -j $OUT/${fuzzer_name}_seed_corpus.zip $SAMPLES_DIR/${fuzzer_target}/*/*.bin
+ fi
+done
diff --git a/tools/parse_xml2skinny_dissector.py b/tools/parse_xml2skinny_dissector.py
new file mode 100755
index 0000000..b13776e
--- /dev/null
+++ b/tools/parse_xml2skinny_dissector.py
@@ -0,0 +1,1073 @@
+#
+# Wireshark Dissector Generator for SkinnyProtocolOptimized.xml
+#
+# Author: Diederik de Groot <ddegroot@user.sf.net>
+# Date: 2014-7-22
+# Skinny Protocol Versions: 0 through 22
+#
+# Heritage:
+# xml2obj based on https://code.activestate.com/recipes/149368-xml2obj/
+#
+# Dependencies:
+# python / xml / sax
+#
+# Called By:
+# cog.py + packet-skinny.c.in for inplace code generation
+# See: https://nedbatchelder.com/code/cog/
+#
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+import re
+import xml.sax.handler
+
+indentation = 0
+indent_str = ''
+fieldsArray = {}
+si_fields = {
+ "callReference" : "si->callId",
+ "lineInstance": "si->lineId",
+ "passThroughPartyId" : "si->passThroughPartyId",
+ "callState" : "si->callState",
+ "callingParty" : "si->callingParty",
+ "calledParty" : "si->calledParty",
+ "mediaReceptionStatus" : "si->mediaReceptionStatus",
+ "mediaTransmissionStatus" : "si->mediaTransmissionStatus",
+ "multimediaReceptionStatus" : "si->multimediaReceptionStatus",
+ "multimediaTransmissionStatus" : "si->multimediaTransmissionStatus",
+ "multicastReceptionStatus" : "si->multicastReceptionStatus",
+}
+
+debug = 0
+
+def xml2obj(src):
+ """
+ A function to converts XML data into native Python objects.
+
+ """
+ non_id_char = re.compile('[^_0-9a-zA-Z]')
+
+ def _name_mangle(name):
+ return non_id_char.sub('_',
+ name)
+
+ class DataNode(object):
+ def __init__(self):
+ self._attrs = {} # XML attributes and child elements
+ self.data = None # child text data
+ self.parent = None
+ self.basemessage = None
+ self.intsize = 0
+ self._children = []
+ self.declared = []
+
+ def __len__(self):
+ # treat single element as a list of 1
+ return 1
+ def __getitem__(self, key):
+ if isinstance(key, str):
+ return self._attrs.get(key,None)
+ else:
+ return [self][key]
+
+ def __contains__(self, name):
+ return name in self._attrs
+
+ def __bool__(self):
+ return bool(self._attrs or self.data)
+
+ def __getattr__(self, name):
+ if name.startswith('__'):
+ # need to do this for Python special methods???
+ raise AttributeError(name)
+ return self._attrs.get(name,None)
+
+ def _add_xml_attr(self, name, value):
+ if name in self._attrs:
+ # multiple attribute of the same name are represented by a list
+ children = self._attrs[name]
+ if not isinstance(children, list):
+ children = [children]
+ self._attrs[name] = children
+ children.append(value)
+ else:
+ self._attrs[name] = value
+
+ def _add_child(self, name, value):
+ #print "adding : %s / %s to %s" %(name,value, self.__class__)
+ self._children.append(value)
+
+ def __str__(self):
+ return '%s:%s' %(self.__class__,self.name)
+
+ def keys(self):
+ return self._attrs.keys()
+
+ def __repr__(self):
+ items = {}
+ if self.data:
+ items.append(('data', self.data))
+ return '{%s}' % ', '.join(['%s:%s' % (k,repr(v)) for k,v in items])
+
+ def __setitem__(self, key, value):
+ self._attrs[key] = value
+
+ def getfieldnames(self):
+ return ''
+
+ def get_req_resp_keys(self, req_resp_keys):
+ return []
+
+ def get_req_resp_key(self):
+ if self.req_resp_key == "1":
+ return self.name
+ return None
+
+ def declaration(self):
+ global fieldsArray
+ if self.name not in fieldsArray:
+ fieldsArray[self.name] = '/* UNKNOWN { &hf_skinny_%s,\n {\n"%s", "skinny.%s", FT_UINT32, BASE_DEC, NULL, 0x0,\n "%s", HFILL }}, */\n' %(self.name, self.name, self.name, self.comment)
+ return ''
+
+ def dissect(self):
+ return self.name or ''
+
+ def incr_indent(self):
+ global indentation
+ global indent_str
+ indentation += 1
+ indent_str = ''
+ for x in range(0, indentation):
+ indent_str += ' '
+
+ def decr_indent(self):
+ global indentation
+ global indent_str
+ indentation -= 1
+ indent_str = ''
+ for x in range(0, indentation):
+ indent_str += ' '
+
+ def indent_out(self, string):
+ return indent_str + string
+
+
+ class Message(DataNode):
+ ''' Message '''
+ def __str__(self):
+ return self.name
+
+ def gen_handler(self):
+ if self.fields is None:
+ # skip whole message and return NULL as handler
+ return 'NULL'
+ return 'handle_%s' %self.name
+
+ def dissect(self):
+ ret = ''
+ declarations = 0
+ fixed = 0
+
+ if (self.fields is not None):
+ ret += self.indent_out("/*\n")
+ ret += self.indent_out(" * Message: %s\n" %self.name)
+ ret += self.indent_out(" * Opcode: %s\n" %self.opcode)
+ ret += self.indent_out(" * Type: %s\n" %self.type)
+ ret += self.indent_out(" * Direction: %s\n" %self.direction)
+ ret += self.indent_out(" * VarLength: %s\n" %self.dynamic)
+ ret += self.indent_out(" * MsgType: %s\n" %self.msgtype)
+ if self.comment:
+ ret += self.indent_out(" * Comment: %s\n" %self.comment)
+ ret += self.indent_out(" */\n")
+ ret += self.indent_out("static void\n")
+ ret += self.indent_out("handle_%s(ptvcursor_t *cursor, packet_info * pinfo _U_, skinny_conv_info_t * skinny_conv _U_)\n" %self.name)
+ ret += self.indent_out("{\n")
+ self.incr_indent()
+
+ for fields in self.fields:
+ if fields.size_lt or fields.size_gt:
+ if self.basemessage.declared is None or "hdr_data_length" not in self.basemessage.declared:
+ ret += self.indent_out("uint32_t hdr_data_length = tvb_get_letohl(ptvcursor_tvbuff(cursor), 0);\n")
+ self.basemessage.declared.append("hdr_data_length")
+ declarations += 1
+ if fields.fixed == "yes":
+ fixed = 1
+
+ if not declarations or fixed == 1:
+ for fields in self.fields[1:]:
+ if self.basemessage.declared is None or "hdr_version" not in self.basemessage.declared:
+ ret += self.indent_out("uint32_t hdr_version = tvb_get_letohl(ptvcursor_tvbuff(cursor), 4);\n")
+ self.basemessage.declared.append("hdr_version")
+ declarations += 1
+
+ req_resp_keys = []
+ for fields in self.fields:
+ fields.get_req_resp_keys(req_resp_keys)
+ ret += '%s' %fields.declaration()
+ declarations += 1
+
+ if declarations > 1:
+ ret += "\n"
+
+ if self.fields is not None:
+ for fields in self.fields:
+ ret += '%s' %fields.dissect()
+
+ # setup request/response
+ if self.msgtype == "request":
+ if req_resp_keys and req_resp_keys[0] != '':
+ ret += self.indent_out('skinny_reqrep_add_request(cursor, pinfo, skinny_conv, %s ^ %s);\n' %(self.opcode, req_resp_keys[0]))
+ else:
+ ret += self.indent_out('skinny_reqrep_add_request(cursor, pinfo, skinny_conv, %s);\n' %(self.opcode))
+
+ if self.msgtype == "response":
+ if req_resp_keys and req_resp_keys[0] != '':
+ ret += self.indent_out('skinny_reqrep_add_response(cursor, pinfo, skinny_conv, %s ^ %s);\n' %(self.request, req_resp_keys[0]))
+ else:
+ ret += self.indent_out('skinny_reqrep_add_response(cursor, pinfo, skinny_conv, %s);\n' %(self.request))
+
+ self.decr_indent()
+
+ ret += "}\n\n"
+ return ret
+
+ class Fields(DataNode):
+ ''' Fields '''
+ size_fieldnames= []
+
+ def get_req_resp_keys(self, req_resp):
+ for field in self._children:
+ key = field.get_req_resp_key()
+ if not key is None and not key in req_resp:
+ req_resp.append(key)
+
+ def declaration(self):
+ ret = ''
+
+ for field in self._children:
+ ret += '%s' %(field.declaration())
+ self.intsize += field.intsize
+ return ret
+
+ def dissect(self, lookupguide=""):
+ ret = ''
+ ifstarted = 0
+ #ret += "/* [PARENT: %s, BASEMESSAGE: %s] */\n" %(self.parent.name,self.basemessage.name)
+
+ if ((self.beginversion or self.endversion) and (self.beginversion != "0" or self.endversion != "22")):
+
+ ifstarted = 1
+ ret += self.indent_out('if (')
+ if (self.beginversion and self.beginversion != "0"):
+ if (not self.endversion or self.endversion == "22"):
+ ret += 'hdr_version >= V%s_MSG_TYPE) {\n' %self.beginversion
+ else:
+ ret += 'hdr_version >= V%s_MSG_TYPE && ' %self.beginversion
+ if (self.endversion and self.endversion != "22"):
+ ret += 'hdr_version <= V%s_MSG_TYPE) {\n' %self.endversion
+ self.incr_indent()
+
+ if self.size_lt:
+ ret += self.indent_out('if (hdr_data_length < %s) {\n' %self.size_lt)
+ self.incr_indent()
+
+ if self.size_gt:
+ ret += self.indent_out('if (hdr_data_length > %s) {\n' %self.size_gt)
+ self.incr_indent()
+
+ # generate dissection
+ for field in self._children:
+ ret += '%s' %(field.dissect())
+
+ if self.size_lt:
+ self.decr_indent()
+ ret += self.indent_out('}\n')
+
+ if self.size_gt:
+ self.decr_indent()
+ ret += self.indent_out('}\n')
+
+ if ifstarted:
+ self.decr_indent()
+ ret += self.indent_out('}\n')
+
+ return ret;
+
+ class Integer(DataNode):
+ def __init__(self):
+ DataNode.__init__(self)
+ self.intsize = 0
+ self.endian = "ENC_LITTLE_ENDIAN"
+
+ def __str__(self):
+ return '%s:%s' %(self.__class__,self.name)
+
+ def declaration(self):
+ ret = ''
+
+ int_sizes = {'uint32':4,'uint16':2,'uint8':1,'int32':4,'int16':2,'int8':1,'ipport':4}
+ if self.endianness == "big":
+ self.endian = "ENC_BIG_ENDIAN"
+ if self.type in int_sizes:
+ self.intsize = int_sizes[self.type]
+ else:
+ print(("ERROR integer %s with type: %s, could not be found" %(self.name, self.type)))
+
+ if self.declare == "yes" or self.make_additional_info == "yes":
+ if self.basemessage.declared is None or self.name not in self.basemessage.declared:
+ ret += self.indent_out(f'uint{self.intsize * 8}_t {self.name} = 0;\n')
+ self.basemessage.declared.append(self.name)
+
+ global fieldsArray
+ if self.name not in fieldsArray:
+ fieldsArray[self.name] ='{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_UINT%d, BASE_DEC, NULL, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), self.intsize * 8, '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
+ return ret
+
+ def dissect(self):
+ ret = ''
+
+ size = 0
+ if self.size_fieldname:
+ if self.basemessage.dynamic == "yes":
+ size = self.size_fieldname
+ else:
+ size = self.maxsize
+ elif self.size:
+ size = self.size
+
+ if size:
+ if self.size_fieldname:
+ ret += self.indent_out('if (%s <= %s) {%s\n' %(self.size_fieldname, size, ' /* tvb integer size guard */' if debug else ''))
+ else:
+ ret += self.indent_out('{\n')
+ self.incr_indent()
+ variable = 'counter_%d' %indentation
+ ret += self.indent_out('uint32_t %s = 0;\n' %(variable));
+ if self.size_fieldname:
+ ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [ref:%s = %%d, max:%s]", %s);\n' %(self.name, self.size_fieldname, size, self.size_fieldname))
+ else:
+ ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [max:%s]");\n' %(self.name, size))
+ ret += self.indent_out('for (%s = 0; %s < %s; %s++) {\n' %(variable, variable, size, variable));
+ if self.basemessage.dynamic == "no" and self.size_fieldname:
+ self.incr_indent()
+ ret += self.indent_out('if (%s < %s) {\n' %(variable,self.size_fieldname))
+ self.incr_indent()
+
+ if self.declare == "yes" or self.make_additional_info == "yes":
+ if self.endianness == "big":
+ if (self.intsize == 4):
+ ret += self.indent_out('%s = tvb_get_ntohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
+ elif (self.intsize == 2):
+ ret += self.indent_out('%s = tvb_get_ntohs(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
+ else:
+ ret += self.indent_out('%s = tvb_get_guint8(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
+ else:
+ if (self.intsize == 4):
+ ret += self.indent_out('%s = tvb_get_letohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
+ elif (self.intsize == 2):
+ ret += self.indent_out('%s = tvb_get_letohs(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
+ else:
+ ret += self.indent_out('%s = tvb_get_guint8(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
+
+ if self.name in si_fields.keys():
+ if self.endianness == "big":
+ ret += self.indent_out('%s = tvb_get_ntohs(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(si_fields[self.name]))
+ else:
+ ret += self.indent_out('%s = tvb_get_letohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(si_fields[self.name]))
+
+ ret += self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, %d, %s);\n' %(self.name, self.intsize, self.endian))
+
+ if size:
+ if self.basemessage.dynamic == "no" and self.size_fieldname:
+ self.decr_indent()
+ ret += self.indent_out('} else {\n')
+ ret += self.indent_out(' ptvcursor_advance(cursor, %d);\n' %self.intsize)
+ ret += self.indent_out('}\n')
+ self.decr_indent()
+ ret += self.indent_out('}\n')
+ if debug:
+ ret += self.indent_out('ptvcursor_pop_subtree(cursor); /* end for loop tree: %s */\n' %(self.name))
+ else:
+ ret += self.indent_out('ptvcursor_pop_subtree(cursor);\n')
+ self.decr_indent()
+ if self.size_fieldname:
+ ret += self.indent_out('} else {\n')
+ self.incr_indent()
+ ret += self.indent_out('ptvcursor_advance(cursor, (%s * %s));%s\n' %(size, self.intsize, ' /* guard kicked in -> skip the rest */;' if debug else ''))
+ self.decr_indent()
+ ret += self.indent_out('}\n')
+
+ if self.make_additional_info == "yes":
+ ret += self.indent_out('srtp_add_address(pinfo, PT_UDP, &%s, %s, 0, "SKINNY", pinfo->num, false, NULL, NULL, NULL);\n' %(self.use_param, self.name))
+ ret += self.indent_out('%s_str = address_to_display(NULL, &%s);\n' % (self.use_param, self.use_param))
+ ret += self.indent_out('si->additionalInfo = ws_strdup_printf("%%s:%%d", %s_str, %s);\n' % (self.use_param, self.name))
+ ret += self.indent_out('wmem_free(NULL, %s_str);\n' % (self.use_param))
+
+ return ret
+
+ class Enum(DataNode):
+ def __init__(self):
+ DataNode.__init__(self)
+ self.intsize = 0
+ self.sparse = 0
+
+ def __str__(self):
+ return '%s:%s' %(self.__class__,self.name)
+
+ def declaration(self):
+ ret = ''
+ prevvalue = 0
+ enum_sizes = {'uint32':4,'uint16':2,'uint8':1}
+ if self.type in enum_sizes:
+ self.intsize = enum_sizes[self.type]
+ else:
+ print(("ERROR enum %s with type: %s, could not be found" %(self.name, self.type)))
+
+ if self.declare == "yes":
+ if self.basemessage.declared is None or self.name not in self.basemessage.declared:
+ ret += self.indent_out('g%s %s = 0;\n' %(self.type, self.name))
+ self.basemessage.declared.append(self.name)
+
+ global fieldsArray
+ if self.name not in fieldsArray:
+ fieldsArray[self.name] ='{&hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_UINT%d, BASE_HEX | BASE_EXT_STRING, &%s_ext, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), self.intsize * 8, self.subtype[0].upper() + self.subtype[1:], '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
+ return ret
+
+ def dissect(self):
+ ret = ''
+ endian = "ENC_LITTLE_ENDIAN"
+ size = 0
+ if self.size_fieldname:
+ if self.basemessage.dynamic == "yes":
+ size = self.size_fieldname
+ else:
+ size = self.maxsize
+ elif self.size:
+ size = self.size
+
+ if self.make_additional_info == "yes":
+ ret += self.indent_out('si->additionalInfo = ws_strdup_printf("\\"%s\\"",\n')
+ self.incr_indent()
+ ret += self.indent_out('try_val_to_str_ext(\n')
+ self.incr_indent()
+ ret += self.indent_out('tvb_get_letohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor)),\n')
+ ret += self.indent_out('&%s_ext\n' %(self.subtype[0].upper() + self.subtype[1:]))
+ self.decr_indent()
+ ret += self.indent_out(')\n')
+ self.decr_indent()
+ ret += self.indent_out(');\n')
+
+ if self.make_additional_info_short == "yes":
+ ret += self.indent_out('si->additionalInfo = ws_strdup_printf("\\"%s\\"",\n')
+ self.incr_indent()
+ ret += self.indent_out('try_val_to_str_ext(\n')
+ self.incr_indent()
+ ret += self.indent_out('tvb_get_letohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor)),\n')
+ ret += self.indent_out('&%s_short_ext\n' %(self.subtype[0].upper() + self.subtype[1:]))
+ self.decr_indent()
+ ret += self.indent_out(')\n')
+ self.decr_indent()
+ ret += self.indent_out(');\n')
+
+ if size:
+ if self.size_fieldname:
+ ret += self.indent_out('if (%s <= %s) { /* tvb enum size guard */\n' %(self.size_fieldname, self.maxsize))
+ else:
+ ret += self.indent_out('{\n')
+ self.incr_indent()
+ variable = 'counter_%d' %indentation
+ ret += self.indent_out('uint32_t %s = 0;\n' %(variable));
+ if self.size_fieldname:
+ ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [ref: %s = %%d, max:%s]", %s);\n' %(self.name, self.size_fieldname, size, self.size_fieldname))
+ else:
+ ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [max:%s]");\n' %(self.name, size))
+ ret += self.indent_out('for (%s = 0; %s < %s; %s++) {\n' %(variable, variable, size, variable));
+ if self.basemessage.dynamic == "no" and self.size_fieldname:
+ self.incr_indent()
+ ret += self.indent_out('if (%s < %s) {\n' %(variable,self.size_fieldname))
+ self.incr_indent()
+
+ if self.name in si_fields.keys():
+ ret += self.indent_out('%s = tvb_get_letohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(si_fields[self.name]))
+
+ if self.declare == "yes":
+ if (self.intsize == 4):
+ ret += self.indent_out('%s = tvb_get_letohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
+ elif (self.intsize == 2):
+ ret += self.indent_out('%s = tvb_get_letohs(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
+ else:
+ ret += self.indent_out('%s = tvb_get_guint8(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
+
+ ret += self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, %d, %s);\n' %(self.name, self.intsize, endian))
+
+ if size:
+ if self.basemessage.dynamic == "no" and self.size_fieldname:
+ self.decr_indent()
+ ret += self.indent_out('} else {\n')
+ ret += self.indent_out(' ptvcursor_advance(cursor, 4);\n')
+ ret += self.indent_out('}\n')
+ self.decr_indent()
+ ret += self.indent_out('}\n')
+ if debug:
+ ret += self.indent_out('ptvcursor_pop_subtree(cursor); /* end for loop tree: %s */\n' %(self.name))
+ else:
+ ret += self.indent_out('ptvcursor_pop_subtree(cursor);\n')
+ self.decr_indent()
+ if self.size_fieldname:
+ ret += self.indent_out('} else {\n')
+ self.incr_indent()
+ ret += self.indent_out('ptvcursor_advance(cursor, (%s * %s)); /* guard kicked in -> skip the rest */;\n' %(size, self.intsize))
+ self.decr_indent()
+ ret += self.indent_out('}\n')
+
+ return ret
+
+ class String(DataNode):
+ def __init__(self):
+ DataNode.__init__(self)
+
+ def __str__(self):
+ return '%s:%s' %(self.__class__,self.name)
+
+ def get_req_resp_key(self):
+ if self.req_resp_key == "1":
+ return 'wmem_str_hash(%s)' %self.name
+ return None
+
+ def declaration(self):
+ ret = ''
+ self.intsize = 0
+ if self.size:
+ if self.size=="VariableDirnumSize":
+ self.intsize = 24
+ else:
+ self.intsize = int(self.size)
+ elif self.maxsize and self.basemessage.dynamic == "no":
+ self.intsize = int(self.maxsize)
+
+ if self.declare == "yes":
+ if self.size=="VariableDirnumSize":
+ if self.basemessage.declared is None or "VariableDirnumSize" not in self.basemessage.declared:
+ if self.basemessage.declared is None or "hdr_version" not in self.basemessage.declared:
+ #if (self.basemessage.fields is not None and len(self.basemessage.fields) == 1):
+ ret += self.indent_out('uint32_t hdr_version = tvb_get_letohl(ptvcursor_tvbuff(cursor), 4);\n')
+ self.basemessage.declared.append("hdr_version")
+ ret += self.indent_out('uint32_t VariableDirnumSize = (hdr_version >= V18_MSG_TYPE) ? 25 : 24;\n')
+ self.basemessage.declared.append("VariableDirnumSize")
+ #else:
+ # if self.basemessage.declared is None or self.name not in self.basemessage.declared:
+ # ret += self.indent_out('char *%s = NULL;\n' %self.name)
+ # self.basemessage.declared.append(self.name)
+
+ if self.basemessage.dynamic == "yes" and not self.subtype == "DisplayLabel":
+ if self.basemessage.declared is None or self.name + '_len' not in self.basemessage.declared:
+ ret += self.indent_out('uint32_t %s_len = 0;\n' %self.name)
+ self.basemessage.declared.append(self.name + '_len')
+
+ global fieldsArray
+ if self.name not in fieldsArray:
+ fieldsArray[self.name] = '{&hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_STRING, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
+ return ret
+
+ def dissect(self):
+ ret = ''
+
+ if self.declare == "yes" and self.size != "VariableDirnumSize":
+ ret += self.indent_out('const char * %s = g_strdup(tvb_format_stringzpad(pinfo->pool, ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), %s));\n' %(self.name, self.size))
+
+ if self.subtype == "DisplayLabel":
+ if self.basemessage.dynamic == "yes":
+ ret += self.indent_out('dissect_skinny_displayLabel(cursor, pinfo, hf_skinny_%s, 0);\n' %(self.name))
+ elif self.size_fieldname:
+ ret += self.indent_out('dissect_skinny_displayLabel(cursor, pinfo, hf_skinny_%s, %s);\n' %(self.name, self.size_fieldname))
+ else:
+ ret += self.indent_out('dissect_skinny_displayLabel(cursor, pinfo, hf_skinny_%s, %s);\n' %(self.name, self.size))
+
+ elif self.basemessage.dynamic == "yes":
+ ret += self.indent_out('%s_len = tvb_strnlen(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), -1)+1;\n' %self.name)
+ ret += self.indent_out('if (%s_len > 1) {\n' %self.name)
+ if self.name in si_fields.keys():
+ ret += self.indent_out(' %s = g_strdup(tvb_format_stringzpad(pinfo->pool, ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), %s_len));\n' %(si_fields[self.name], self.name))
+ ret += self.indent_out(' ptvcursor_add(cursor, hf_skinny_%s, %s_len, ENC_ASCII);\n' %(self.name, self.name))
+ ret += self.indent_out('} else {\n')
+ ret += self.indent_out(' ptvcursor_advance(cursor, 1);\n')
+ ret += self.indent_out('}\n')
+ elif self.size_fieldname:
+ if self.name in si_fields.keys():
+ ret += self.indent_out('%s = g_strdup(tvb_format_stringzpad(pinfo->pool, ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), %s));\n' %(si_fields[self.name], self.size_fieldname))
+ ret += self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, %s, ENC_ASCII);\n' %(self.name, self.size_fieldname))
+ else:
+ if self.name in si_fields.keys():
+ ret += self.indent_out('%s = g_strdup(tvb_format_stringzpad(pinfo->pool, ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), %s));\n' %(si_fields[self.name], self.size))
+ if self.make_additional_info == "yes":
+ ret += self.indent_out('uint32_t %s_len;\n' %(self.name))
+ if self.size=="VariableDirnumSize":
+ ret += self.indent_out('%s_len = tvb_strnlen(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), VariableDirnumSize)+1;\n' %(self.name))
+ else:
+ ret += self.indent_out('%s_len = tvb_strnlen(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), 24)+1;\n' %(self.name))
+ ret += self.indent_out('if (%s_len > 1) {\n' %(self.name))
+ self.incr_indent()
+ ret += self.indent_out('si->additionalInfo = ws_strdup_printf("\\"%%s\\"", tvb_format_stringzpad(pinfo->pool, ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), %s_len));\n' %(self.name))
+ self.decr_indent()
+ ret += self.indent_out('}\n')
+
+ ret += self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, %s, ENC_ASCII);\n' %(self.name, self.size))
+
+ return ret
+
+ class Ether(DataNode):
+ def __init__(self):
+ DataNode.__init__(self)
+
+ def __str__(self):
+ return '%s:%s' %(self.__class__,self.name)
+
+ def declaration(self):
+ ret = ''
+ self.intsize = 6
+ if self.size:
+ self.intsize = int(self.size)
+ elif self.maxsize and self.basemessage.dynamic == "no":
+ self.intsize = int(self.maxsize)
+
+ if self.declare == "yes":
+ if self.basemessage.declared is None or self.name not in self.basemessage.declared:
+ ret += self.indent_out('uint32_t %s = 0;\n' %self.name)
+ self.basemessage.declared.append(self.name)
+
+ if self.basemessage.dynamic == "yes":
+ if self.basemessage.declared is None or self.name + '_len' not in self.basemessage.declared:
+ ret += self.indent_out('uint32_t %s_len = 0;\n' %self.name)
+ self.basemessage.declared.append(self.name + '_len')
+
+ global fieldsArray
+ if self.name not in fieldsArray:
+ fieldsArray[self.name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_ETHER, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
+ return ret
+
+ def dissect(self):
+ ret = ''
+
+ if self.basemessage.dynamic == "yes":
+ ret += self.indent_out('%s_len = tvb_strnlen(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), -1)+1;\n' %self.name)
+ ret += self.indent_out('if (%s_len > 1) {\n' %self.name)
+ ret += self.indent_out(' ptvcursor_add(cursor, hf_skinny_%s, 6, ENC_NA);\n' %(self.name))
+ ret += self.indent_out(' ptvcursor_advance(cursor, %s_len - 6);\n' %(self.name))
+ ret += self.indent_out('} else {\n')
+ ret += self.indent_out(' ptvcursor_advance(cursor, 1);\n')
+ ret += self.indent_out('}\n')
+ elif self.size_fieldname:
+ ret += self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, 6, ENC_NA);\n' %(self.name))
+ ret += self.indent_out('ptvcursor_advance(cursor, %s - 6);\n' %(self.size_fieldname))
+ else:
+ ret += self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, 6, ENC_NA);\n' %(self.name))
+ ret += self.indent_out('ptvcursor_advance(cursor, %s - 6);\n' %(self.size))
+ return ret
+
+ class BitField(DataNode):
+ def __init__(self):
+ DataNode.__init__(self)
+
+ def __str__(self):
+ return '%s:%s' %(self.__class__,self.name)
+
+ def declaration(self):
+ global fieldsArray
+ ret = ''
+ int_sizes = {'uint32':4,'uint16':2,'uint8':1,'int32':4,'int16':2,'int8':1}
+ self.intsize = 0
+ if self.size in int_sizes:
+ self.intsize = int_sizes[self.size]
+
+ for entries in self.entries:
+ for entry in entries.entry:
+ if entry.name not in fieldsArray:
+ fieldsArray[entry.name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_BOOLEAN, %d, TFS(&tfs_yes_no), %s,\n %s, HFILL }},\n' %(entry.name, entry.text, entry.name.replace("_","."), self.intsize * 8, entry.value, '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
+
+ return ret
+
+ def dissect(self):
+ ret = ''
+ ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s");\n' %(self.name))
+ for entries in self.entries:
+ for entry in entries.entry:
+ ret += self.indent_out('ptvcursor_add_no_advance(cursor, hf_skinny_%s, %d, ENC_LITTLE_ENDIAN);\n' %(entry.name, self.intsize))
+ ret += self.indent_out('ptvcursor_advance(cursor, %d);\n' %(self.intsize))
+ ret += self.indent_out('ptvcursor_pop_subtree(cursor); /* end bitfield: %s */\n' %(self.name))
+
+
+ return ret
+
+ class Ip(DataNode):
+ def __init__(self):
+ DataNode.__init__(self)
+ self.intsize = 4
+ if self.type == "ipv6":
+ self.intsize = 16
+
+ def __str__(self):
+ return '%s:%s' %(self.__class__,self.name)
+
+ def declaration(self):
+ global fieldsArray
+ if self.name not in fieldsArray:
+ if self.type == "ipv4":
+ fieldsArray[self.name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_IPv4, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
+ else:
+ fieldsArray[self.name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_IPv6, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
+ return ''
+
+ def dissect(self):
+ if self.type == "ipv4":
+ return self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, 4, ENC_BIG_ENDIAN);\n' %self.name)
+ else:
+ return self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, 16, ENC_NA);\n' %self.name)
+
+ class Ipv4or6(DataNode):
+ def __init__(self):
+ DataNode.__init__(self)
+ self.intsize = 4
+ if self.endianness is None:
+ self.intsize += 16
+
+ def __str__(self):
+ return '%s:%s' %(self.__class__,self.name)
+
+ def declaration(self):
+ global fieldsArray
+
+ ret = ''
+ name = self.name + '_ipv4'
+ if name not in fieldsArray:
+ fieldsArray[name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_IPv4, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(name, self.name + ' IPv4 Address', name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
+ name = self.name + '_ipv6'
+ if name not in fieldsArray:
+ fieldsArray[name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_IPv6, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(name, self.name + ' IPv6 Address', name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
+ if self.make_additional_info == "yes":
+ if self.basemessage.declared is None or self.name not in self.basemessage.declared:
+ ret += self.indent_out('address %s;\n' %(self.name))
+ ret += self.indent_out('char *%s_str = NULL;\n' %(self.name))
+ self.basemessage.declared.append(self.name)
+
+ return ret
+
+ def dissect(self):
+ ret = ''
+ if self.make_additional_info == "yes":
+ ret += self.indent_out('read_skinny_ipv4or6(cursor, &%s);\n' %(self.name));
+ ret += self.indent_out('dissect_skinny_ipv4or6(cursor, hf_skinny_%s_ipv4, hf_skinny_%s_ipv6);\n' %(self.name, self.name));
+ return ret;
+
+ class XML(DataNode):
+ def __init__(self):
+ DataNode.__init__(self)
+ self.intsize = 0
+
+ def __str__(self):
+ return '%s:%s' %(self.__class__,self.name)
+
+ def declaration(self):
+ global fieldsArray
+
+ if self.size:
+ self.intsize = int(self.size)
+ elif self.maxsize:
+ self.intsize = int(self.maxsize)
+
+ if self.name not in fieldsArray:
+ fieldsArray[self.name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_STRING, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
+ return ''
+
+ def dissect(self):
+ ret = ''
+ if self.size_fieldname:
+ ret += self.indent_out('dissect_skinny_xml(cursor, hf_skinny_%s, pinfo, %s, %d);\n' %(self.name, self.size_fieldname, self.intsize))
+ else:
+ ret += self.indent_out('dissect_skinny_xml(cursor, hf_skinny_%s, pinfo, 0, %d);\n' %(self.name, self.intsize))
+ return ret
+
+ class Code(DataNode):
+ def __init__(self):
+ DataNode.__init__(self)
+
+ def __str__(self):
+ return '%s:%s' %(self.__class__,self.name)
+
+ def declaration(self):
+ return ''
+
+ def dissect(self):
+ ret = ''
+ if self.type == "calling_and_called_party":
+ params = self.use_param.split(',')
+ ret += self.indent_out('if (si->%s && si->%s) {\n' %(params[0], params[1]))
+ self.incr_indent()
+ ret += self.indent_out('si->additionalInfo = ws_strdup_printf("\\"%%s -> %%s\\"", si->%s, si->%s);\n' %(params[0], params[1]))
+ self.decr_indent()
+ ret += self.indent_out('}\n')
+ return ret
+
+ class Struct(DataNode):
+ def __str__(self):
+ return '// Struct : %s / %s / %s / %s\n' %(self.name, self.size, self.field_sizename, self.maxsize)
+
+ def declaration(self):
+ ret = ''
+
+ if (self.fields is not None and len(self.fields)):
+ if (len(self.fields) > 1):
+ if self.basemessage.declared is None or "hdr_version" not in self.basemessage.declared:
+ ret += self.indent_out("uint32_t hdr_version = tvb_get_letohl(ptvcursor_tvbuff(cursor), 4);\n")
+ self.basemessage.declared.append("hdr_version")
+ for fields in self.fields:
+ ret += '%s' %fields.declaration()
+ #self.intsize += fields.intsize
+ self.intsize = fields.intsize
+ return ret
+
+ def dissect(self):
+ ret = ''
+ variable = 'counter_%d' %indentation
+ size = 0
+
+ if self.size_fieldname:
+ #if self.basemessage.dynamic == "yes":
+ # size = self.size_fieldname
+ #else:
+ # size = self.maxsize
+ size = self.maxsize
+ elif self.size:
+ size = self.size
+
+ if size:
+ if self.size_fieldname:
+ ret += self.indent_out('if (%s <= %s) {%s\n' %(self.size_fieldname, size, ' /* tvb struct size guard */' if debug else ''))
+ else:
+ ret += self.indent_out('{\n')
+ self.incr_indent()
+ if debug:
+ ret += self.indent_out('/* start struct : %s / size: %d */\n' %(self.name, self.intsize))
+ ret += self.indent_out('uint32_t %s = 0;\n' %(variable));
+ if self.size_fieldname:
+ ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [ref:%s = %%d, max:%s]", %s);\n' %(self.name, self.size_fieldname, self.maxsize, self.size_fieldname))
+ if self.maxsize:
+ ret += self.indent_out('if (%s && tvb_get_letohl(ptvcursor_tvbuff(cursor), 0) + 8 >= ptvcursor_current_offset(cursor) + (%s * %s) && %s <= %s) {%s\n' %(self.size_fieldname, self.size_fieldname, self.intsize, self.size_fieldname, self.maxsize, '/* tvb counter size guard */' if debug else ''))
+ else:
+ ret += self.indent_out('if (%s && tvb_get_letohl(ptvcursor_tvbuff(cursor), 0) + 8 >= ptvcursor_current_offset(cursor) + (%s * %s)) {%s\n' %(self.size_fieldname, self.size_fieldname, self.intsize, '/* tvb counter size guard */' if debug else ''))
+ self.incr_indent()
+ else:
+ ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [max:%s]");\n' %(self.name, size))
+
+ ret += self.indent_out('for (%s = 0; %s < %s; %s++) {\n' %(variable, variable, size, variable));
+ if self.basemessage.dynamic == "no" and self.size_fieldname:
+ self.incr_indent()
+ ret += self.indent_out('if (%s < %s) {\n' %(variable,self.size_fieldname))
+ self.incr_indent()
+ else:
+ if debug:
+ ret += self.indent_out('{ /* start struct : %s / size: %d */\n' %(self.name, self.intsize))
+ else:
+ ret += self.indent_out('{\n')
+ self.incr_indent()
+ ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s");\n' %(self.name))
+
+ if size:
+ if self.size_fieldname:
+ ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [%%d / %%d]", %s + 1, %s);\n' %(self.name, variable, self.size_fieldname))
+ else:
+ ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [%%d / %%d]", %s + 1, %s);\n' %(self.name, variable, size))
+
+ if (self.fields is not None and len(self.fields)):
+ for fields in self.fields:
+ ret += '%s' %fields.dissect()
+
+ if self.basemessage.dynamic == "no" and self.size_fieldname:
+ self.decr_indent()
+ ret += self.indent_out('} else {\n')
+ ret += self.indent_out(' ptvcursor_advance(cursor, %d);\n' %(self.intsize))
+ ret += self.indent_out('}\n')
+
+ if size:
+ ret += self.indent_out('ptvcursor_pop_subtree(cursor);\n')
+ self.decr_indent()
+ if debug:
+ ret += self.indent_out('} /* end for loop tree: %s */\n' %self.name)
+ else:
+ ret += self.indent_out('}\n')
+ if self.size_fieldname:
+ self.decr_indent()
+ ret += self.indent_out('} /* end counter tvb size guard */\n' if debug else '}\n')
+
+ ret += self.indent_out('ptvcursor_pop_subtree(cursor);\n')
+ if debug:
+ ret += self.indent_out('/* end struct: %s */\n' %self.name)
+ self.decr_indent()
+ if self.size_fieldname:
+ ret += self.indent_out('} else {\n')
+ self.incr_indent()
+ ret += self.indent_out('ptvcursor_advance(cursor, (%s * %s));%s\n' %(self.size_fieldname, self.intsize, ' /* guard kicked in -> skip the rest */' if debug else ''));
+ self.decr_indent()
+ ret += self.indent_out('} /* end struct size guard */\n' if debug else '}\n')
+
+ return ret
+
+ class Union(DataNode):
+ def __str__(self):
+ return '%s:%s' %(self.__class__,self.name)
+
+ def declaration(self):
+ ret = ''
+ self.maxsize = 0
+ if (self.fields is not None and len(self.fields)):
+ if (len(self.fields) > 1):
+ if self.basemessage.declared is None or "hdr_version" not in self.basemessage.declared:
+ ret += self.indent_out("uint32_t hdr_version = tvb_get_letohl(ptvcursor_tvbuff(cursor), 4);\n")
+ self.basemessage.declared.append("hdr_version")
+ for fields in self.fields:
+ ret += '%s' %fields.declaration()
+ previous_lookup_eq = fields._children[0].lookup_eq
+ previous_lookup_le = fields._children[0].lookup_le
+ previous_lookup_ge = fields._children[0].lookup_ge
+ self.runningtotal = 0
+ for field in fields._children:
+ if previous_lookup_eq != field.lookup_eq or previous_lookup_le != field.lookup_le or previous_lookup_ge == field.lookup_ge:
+ previous_lookup_eq = field.lookup_eq
+ previous_lookup_le = field.lookup_le
+ previous_lookup_ge = field.lookup_ge
+ self.runningtotal = 0
+
+ self.runningtotal += field.intsize
+ if self.runningtotal > self.maxsize:
+ self.maxsize = self.runningtotal
+
+ self.intsize = self.maxsize
+
+ return ret
+
+ def dissect(self):
+ ret = ''
+ ifblock = self.indent_out('if')
+ skip = 0
+ #ret += self.indent_out('/* Union : %s / maxsize: %s */\n' %(self.name, self.maxsize))
+
+ if (self.fields is not None and len(self.fields)):
+ for fields in self.fields:
+ for field in fields._children:
+ if self.lookup_guide and (field.lookup_ge or field.lookup_le or field.lookup_eq):
+ lookupguide = self.lookup_guide
+ # start block
+ subtree_text = ''
+ if field.lookup_ge and field.lookup_le:
+ ret += '%s (%s >= %s && %s <= %s)' %(ifblock, lookupguide, field.lookup_ge.upper(), lookupguide, field.lookup_le.upper())
+ subtree_text = "%s <= %s <= %s" %(field.lookup_ge, lookupguide, field.lookup_le)
+ elif field.lookup_ge:
+ ret += '%s (%s >= %s)' %(ifblock, lookupguide, field.lookup_ge.upper())
+ subtree_text = "%s >= %s" %(lookupguide, field.lookup_ge)
+ elif field.lookup_le:
+ ret += '%s (%s <= %s)' %(ifblock, lookupguide, field.lookup_le.upper())
+ subtree_text = "%s <= %s" %(lookupguide, field.lookup_le)
+ elif field.lookup_eq:
+ if field.lookup_eq == "*":
+ ret += ' else'
+ subtree_text = "any %s" %(lookupguide)
+ elif field.lookup_eq == "skip":
+ continue
+ else:
+ ret += '%s (%s == %s)' %(ifblock, lookupguide, field.lookup_eq.upper())
+ subtree_text = "%s is %s" %(lookupguide, field.lookup_eq)
+
+ ret += self.indent_out(' {\n')
+ self.incr_indent()
+ if debug:
+ ret += self.indent_out('/* start union : %s / maxsize: %s */\n' %(self.name, self.maxsize))
+ currsize = 0
+ # dissect field
+
+ ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s");\n' %subtree_text)
+ ret += '%s' %field.dissect()
+ ret += self.indent_out('ptvcursor_pop_subtree(cursor);\n')
+
+ currsize += field.intsize
+
+ # compensate length
+ if (self.maxsize - currsize) > 0:
+ ret += self.indent_out('ptvcursor_advance(cursor, %d);\n' %(self.maxsize - currsize))
+
+ self.decr_indent()
+
+ # close block
+ ret += self.indent_out('}')
+ ifblock = ' else if'
+ else:
+ ret += '/* ERROR %s, missing lookup_guide */' %field.dissect()
+ ret += '\n'
+
+ return ret
+
+ class TreeBuilder(xml.sax.handler.ContentHandler):
+ def __init__(self):
+ self.stack = []
+ self.root = DataNode()
+ self.previous = self.root
+ self.current = self.root
+ self.basemessage = None
+ self.text_parts = []
+ def startElement(self, name, attrs):
+ objecttype = {"message": Message(), "fields": Fields(), "enum" : Enum(), "bitfield" : BitField(), "struct": Struct(), "union": Union(), "integer": Integer(), "string": String(), "ether": Ether(), "ip": Ip(), "ipv4or6": Ipv4or6(), "xml": XML(), "code": Code()}
+ self.previous = self.current
+ self.stack.append((self.current, self.text_parts))
+ if name in objecttype.keys():
+ self.current = objecttype[name]
+ else:
+ self.current = DataNode()
+ if name == "message":
+ self.basemessage = self.current
+ self.text_parts = []
+ #self.children = []
+ self.current.parent = self.previous
+ self.current.basemessage = self.basemessage
+ # xml attributes --> python attributes
+ for k, v in list(attrs.items()):
+ self.current._add_xml_attr(_name_mangle(k), v)
+
+ def endElement(self, name):
+ text = ''.join(self.text_parts).strip()
+ if text:
+ self.current.data = text
+ if self.current._attrs:
+ obj = self.current
+ else:
+ # a text only node is simply represented by the string
+ obj = text or ''
+ self.current, self.text_parts = self.stack.pop()
+ self.current._add_xml_attr(_name_mangle(name), obj)
+ self.current._add_child(_name_mangle(name), obj)
+ def characters(self, content):
+ self.text_parts.append(content)
+
+ builder = TreeBuilder()
+ xml.sax.parse(src, builder)
+ return list(builder.root._attrs.values())[0]
+
+# skinny = xml2obj('SkinnyProtocolOptimized.xml')
+# for message in skinny.message:
+# print '%s' %message.dissect()
+
+#if __name__ == '__main__':
+# import timeit
+# print(timeit.timeit("generateMessageDissectors()", setup="from __main__ import generateMessageDissectors"))
+
+
+#skinny = xml2obj('SkinnyProtocolOptimized.xml')
+#for message in skinny.message:
+# print(message)
+# message.dissect()
+
+#for key,value in fieldsArray.items():
+# print "%s : %s" %(key,value)
+#print '%r\n' %fieldsArray
+
+#skinny = xml2obj('SkinnyProtocolOptimized.xml')
+#for message in skinny.message:
+# print message.declaration()
diff --git a/tools/pidl/MANIFEST b/tools/pidl/MANIFEST
new file mode 100644
index 0000000..8eb4d22
--- /dev/null
+++ b/tools/pidl/MANIFEST
@@ -0,0 +1,41 @@
+MANIFEST
+tests/parse_idl.pl
+tests/Util.pm
+tests/ndr_refptr.pl
+tests/ndr_string.pl
+tests/ndr_simple.pl
+tests/ndr_align.pl
+tests/ndr_alloc.pl
+tests/ndr_array.pl
+tests/ndr.pl
+tests/samba-ndr.pl
+tests/util.pl
+tests/test_util.pl
+tests/ndr_represent.pl
+tests/ndr_compat.pl
+tests/ndr_fullptr.pl
+tests/ndr_tagtype.pl
+tests/header.pl
+lib/Parse/Pidl/Samba3/ClientNDR.pm
+lib/Parse/Pidl/Samba3/ServerNDR.pm
+lib/Parse/Pidl/Samba4/NDR/Server.pm
+lib/Parse/Pidl/Samba4/NDR/Parser.pm
+lib/Parse/Pidl/Samba4/NDR/Client.pm
+lib/Parse/Pidl/Samba4/Header.pm
+lib/Parse/Pidl/Samba4/TDR.pm
+lib/Parse/Pidl/Samba4/Template.pm
+lib/Parse/Pidl/Samba4.pm
+lib/Parse/Pidl/Wireshark/Conformance.pm
+lib/Parse/Pidl/Wireshark/NDR.pm
+lib/Parse/Pidl/Typelist.pm
+lib/Parse/Pidl/Dump.pm
+lib/Parse/Pidl/Compat.pm
+lib/Parse/Pidl/Util.pm
+lib/Parse/Pidl/NDR.pm
+lib/Parse/Pidl.pm
+Makefile.PL
+idl.yp
+TODO
+README
+pidl
+META.yml
diff --git a/tools/pidl/META.yml b/tools/pidl/META.yml
new file mode 100644
index 0000000..4822b50
--- /dev/null
+++ b/tools/pidl/META.yml
@@ -0,0 +1,18 @@
+name: Parse-Pidl
+abstract: Generate parsers / DCE/RPC-clients from IDL
+author:
+ - Andrew Tridgell <tridge@samba.org>
+ - Jelmer Vernooij <jelmer@samba.org>
+ - Stefan Metzmacher <metze@samba.org>
+ - Tim Potter <tpot@samba.org>
+license: gplv3
+installdirs: site
+homepage: http://www.samba.org/
+bugtracker: http://bugzilla.samba.org/
+requires:
+ Parse::Yapp: 0
+recommends:
+ Data::Dumper: 0
+meta-spec:
+ version: 1.3
+ url: http://module-build.sourceforge.net/META-spec-v1.3.html
diff --git a/tools/pidl/Makefile.PL b/tools/pidl/Makefile.PL
new file mode 100755
index 0000000..2a405fc
--- /dev/null
+++ b/tools/pidl/Makefile.PL
@@ -0,0 +1,17 @@
+use ExtUtils::MakeMaker;
+WriteMakefile(
+ 'NAME' => 'Parse::Pidl',
+ 'VERSION_FROM' => 'lib/Parse/Pidl.pm',
+ 'EXE_FILES' => [ 'pidl' ],
+ 'test' => { 'TESTS' => 'tests/*.pl' }
+);
+
+sub MY::postamble {
+<<'EOT';
+lib/Parse/Pidl/IDL.pm: idl.yp
+ yapp -m 'Parse::Pidl::IDL' -o lib/Parse/Pidl/IDL.pm idl.yp
+
+lib/Parse/Pidl/Expr.pm: expr.yp
+ yapp -m 'Parse::Pidl::Expr' -o lib/Parse/Pidl/Expr.pm expr.yp
+EOT
+}
diff --git a/tools/pidl/README b/tools/pidl/README
new file mode 100644
index 0000000..240a07a
--- /dev/null
+++ b/tools/pidl/README
@@ -0,0 +1,64 @@
+Introduction:
+=============
+This directory contains the source code of the pidl (Perl IDL)
+compiler for Samba 4.
+
+The main sources for pidl are available using Git as part of
+the Samba source tree. Use:
+git clone git://git.samba.org/samba.git
+
+Pidl works by building a parse tree from a .pidl file (a simple
+dump of it's internal parse tree) or a .idl file
+(a file format mostly like the IDL file format midl uses).
+The IDL file parser is in idl.yp (a yacc file converted to
+perl code by yapp)
+
+Standalone installation:
+========================
+Run Makefile.PL to generate the Makefile.
+
+Then run "make install" (as root) to install.
+
+Internals overview:
+===================
+
+After a parse tree is present, pidl will call one of it's backends
+(which one depends on the options given on the command-line). Here is
+a list of current backends:
+
+-- Generic --
+Parse::Pidl::Dump - Converts the parse tree back to an IDL file
+Parse::Pidl::Samba4::Header - Generates header file with data structures defined in IDL file
+Parse::Pidl::NDR - Generates intermediate datastructures for use by NDR parses/generators
+Parse::Pidl::ODL - Generates IDL structures from ODL structures for use in the NDR parser generator
+Parse::Pidl::Test - Utility functions for use in pidl's testsuite
+
+-- Samba NDR --
+Parse::Pidl::Samba4::NDR::Client - Generates client call functions in C using the NDR parser
+Parse::Pidl::Samba4::NDR::Parser - Generates pull/push functions for parsing NDR
+Parse::Pidl::Samba4::NDR::Server - Generates server side implementation in C
+Parse::Pidl::Samba4::TDR - Parser generator for the "Trivial Data Representation"
+Parse::Pidl::Samba4::Template - Generates stubs in C for server implementation
+Parse::Pidl::Samba4::Python - Generates bindings for Python
+
+-- Samba COM / DCOM --
+Parse::Pidl::Samba4::COM::Proxy - Generates proxy object for DCOM (client-side)
+Parse::Pidl::Samba4::COM::Stub - Generates stub call handler for DCOM (server-side)
+Parse::Pidl::Samba4::COM::Header - Generates headers for COM
+
+-- Wireshark --
+Parse::Pidl::Wireshark::NDR - Generates a parser for the Wireshark network sniffer
+Parse::Pidl::Wireshark::Conformance - Reads conformance files containing additional data for generating Wireshark parsers
+
+-- Utility modules --
+Parse::Pidl::Util - Misc utility functions used by *.pm and pidl.pl
+Parse::Pidl::Typelist - Utility functions for keeping track of known types and their representation in C
+
+Tips for hacking on pidl:
+ - Inspect pidl's parse tree by using the --keep option and looking at the
+ generated .pidl file.
+ - The various backends have a lot in common, if you don't understand how one
+ implements something, look at the others.
+ - See pidl(1) and the documentation on midl
+ - See 'info bison' and yapp(1) for information on the file format of idl.yp
+ - Run the tests (all in tests/)
diff --git a/tools/pidl/TODO b/tools/pidl/TODO
new file mode 100644
index 0000000..36ae5e9
--- /dev/null
+++ b/tools/pidl/TODO
@@ -0,0 +1,44 @@
+- warn when union instances don't have a discriminant
+
+- true multiple dimension array / strings in arrays support
+
+- compatibility mode for generating MIDL-readable data:
+ - strip out pidl-specific properties
+
+- make bitmap an optional attribute on enum
+- support nested elements
+- support typedefs properly (e.g. allow "typedef void **bla;")
+- make typedefs generate real typedefs
+- improve represent_as(): allow it to be used for arrays and other complex
+ types
+
+- --explain-ndr option that dumps out parse tree ?
+
+- separate tables for NDR and DCE/RPC
+ - maybe no tables for NDR at all? we only need them for ndrdump
+ and that can use dlsym()
+
+- allow data structures outside of interfaces
+
+- mem_ctx in the interface rather than as struct ndr member.
+
+- real typelibs
+
+- fix [in,out] handling and allocation for samba3:
+ - add inout
+ - make NULL to mean "allocate me"
+ - remove NDR_AUTO_REF_ALLOC flag
+
+- automatic test generator based on IDL pointer types
+
+- support converting structs to tuples in Python rather than objects
+- convert structs with a single mattering member to that member directly, e.g.:
+ struct bar {
+ int size;
+ [size_is(size)] uint32 *array;
+ };
+
+ should be converted to an array of uint32's
+
+- python: fill in size members automatically in some places if the struct isn't being returned
+ (so we don't have to cope with the array growing)
diff --git a/tools/pidl/expr.yp b/tools/pidl/expr.yp
new file mode 100644
index 0000000..ef8eee3
--- /dev/null
+++ b/tools/pidl/expr.yp
@@ -0,0 +1,202 @@
+# expr.yp
+# Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU GPL
+#
+%left '->'
+%right '!' '~'
+%left '*' '/' '%'
+%left '+' '-'
+%left '<<' '>>'
+%left '>' '<'
+%left '==' '!='
+%left '&'
+%left '|'
+%left '&&'
+%left '||'
+%left '?' ':'
+%left NEG DEREF ADDROF INV
+%left '.'
+
+%%
+exp:
+ NUM
+ |
+ TEXT { "\"$_[1]\"" }
+ |
+ func
+ |
+ var
+ |
+ '~' exp %prec INV { "~$_[2]" }
+ |
+ exp '+' exp { "$_[1] + $_[3]" }
+ |
+ exp '-' exp { "$_[1] - $_[3]" }
+ |
+ exp '*' exp { "$_[1] * $_[3]" }
+ |
+ exp '%' exp { "$_[1] % $_[3]" }
+ |
+ exp '<' exp { "$_[1] < $_[3]" }
+ |
+ exp '>' exp { "$_[1] > $_[3]" }
+ |
+ exp '|' exp { "$_[1] | $_[3]" }
+ |
+ exp '==' exp { "$_[1] == $_[3]" }
+ |
+ exp '<=' exp { "$_[1] <= $_[3]" }
+ |
+ exp '=>' exp { "$_[1] => $_[3]" }
+ |
+ exp '<<' exp { "$_[1] << $_[3]" }
+ |
+ exp '>>' exp { "$_[1] >> $_[3]" }
+ |
+ exp '!=' exp { "$_[1] != $_[3]" }
+ |
+ exp '||' exp { "$_[1] || $_[3]" }
+ |
+ exp '&&' exp { "$_[1] && $_[3]" }
+ |
+ exp '&' exp { "$_[1] & $_[3]" }
+ |
+ exp '?' exp ':' exp { "$_[1]?$_[3]:$_[5]" }
+ |
+ '~' exp { "~$_[1]" }
+ |
+ '!' exp { "not $_[1]" }
+ |
+ exp '/' exp { "$_[1] / $_[3]" }
+ |
+ '-' exp %prec NEG { "-$_[2]" }
+ |
+ '&' exp %prec ADDROF { "&$_[2]" }
+ |
+ exp '^' exp { "$_[1]^$_[3]" }
+ |
+ '(' exp ')' { "($_[2])" }
+;
+
+possible_pointer:
+ VAR { $_[0]->_Lookup($_[1]) }
+ |
+ '*' possible_pointer %prec DEREF { $_[0]->_Dereference($_[2]); "*$_[2]" }
+;
+
+var:
+ possible_pointer { $_[0]->_Use($_[1]) }
+ |
+ var '.' VAR { $_[0]->_Use("$_[1].$_[3]") }
+ |
+ '(' var ')' { "($_[2])" }
+ |
+ var '->' VAR { $_[0]->_Use("*$_[1]"); $_[1]."->".$_[3] }
+;
+
+
+func:
+ VAR '(' opt_args ')' { "$_[1]($_[3])" }
+;
+
+opt_args:
+ #empty
+ { "" }
+ |
+ args
+;
+
+exp_or_possible_pointer:
+ exp
+ |
+ possible_pointer
+;
+
+args:
+ exp_or_possible_pointer
+ |
+ exp_or_possible_pointer ',' args { "$_[1], $_[3]" }
+;
+
+%%
+
+package Parse::Pidl::Expr;
+
+sub _Lexer {
+ my($parser)=shift;
+
+ $parser->YYData->{INPUT}=~s/^[ \t]//;
+
+ for ($parser->YYData->{INPUT}) {
+ if (s/^(0x[0-9A-Fa-f]+)//) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return('NUM',$1);
+ }
+ if (s/^([0-9]+(?:\.[0-9]+)?)//) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return('NUM',$1);
+ }
+ if (s/^([A-Za-z_][A-Za-z0-9_]*)//) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return('VAR',$1);
+ }
+ if (s/^\"(.*?)\"//) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return('TEXT',$1);
+ }
+ if (s/^(==|!=|<=|>=|->|\|\||<<|>>|&&)//s) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return($1,$1);
+ }
+ if (s/^(.)//s) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return($1,$1);
+ }
+ }
+}
+
+sub _Use($$)
+{
+ my ($self, $x) = @_;
+ if (defined($self->YYData->{USE})) {
+ return $self->YYData->{USE}->($x);
+ }
+ return $x;
+}
+
+sub _Lookup($$)
+{
+ my ($self, $x) = @_;
+ return $self->YYData->{LOOKUP}->($x);
+}
+
+sub _Dereference($$)
+{
+ my ($self, $x) = @_;
+ if (defined($self->YYData->{DEREFERENCE})) {
+ $self->YYData->{DEREFERENCE}->($x);
+ }
+}
+
+sub _Error($)
+{
+ my ($self) = @_;
+ if (defined($self->YYData->{LAST_TOKEN})) {
+ $self->YYData->{ERROR}->("Parse error in `".$self->YYData->{FULL_INPUT}."' near `". $self->YYData->{LAST_TOKEN} . "'");
+ } else {
+ $self->YYData->{ERROR}->("Parse error in `".$self->YYData->{FULL_INPUT}."'");
+ }
+}
+
+sub Run {
+ my($self, $data, $error, $lookup, $deref, $use) = @_;
+
+ $self->YYData->{FULL_INPUT} = $data;
+ $self->YYData->{INPUT} = $data;
+ $self->YYData->{LOOKUP} = $lookup;
+ $self->YYData->{DEREFERENCE} = $deref;
+ $self->YYData->{ERROR} = $error;
+ $self->YYData->{USE} = $use;
+
+ return $self->YYParse( yylex => \&_Lexer, yyerror => \&_Error);
+}
diff --git a/tools/pidl/idl.yp b/tools/pidl/idl.yp
new file mode 100644
index 0000000..08f982a
--- /dev/null
+++ b/tools/pidl/idl.yp
@@ -0,0 +1,696 @@
+########################
+# IDL Parse::Yapp parser
+# Copyright (C) Andrew Tridgell <tridge@samba.org>
+# released under the GNU GPL version 3 or later
+
+
+
+# the precedence actually doesn't matter at all for this grammar, but
+# by providing a precedence we reduce the number of conflicts
+# enormously
+%left '-' '+' '&' '|' '*' '>' '.' '/' '(' ')' '[' ',' ';'
+
+
+################
+# grammar
+%%
+idl:
+ #empty { {} }
+ |
+ idl interface { push(@{$_[1]}, $_[2]); $_[1] }
+ |
+ idl coclass { push(@{$_[1]}, $_[2]); $_[1] }
+ |
+ idl import { push(@{$_[1]}, $_[2]); $_[1] }
+ |
+ idl include { push(@{$_[1]}, $_[2]); $_[1] }
+ |
+ idl importlib { push(@{$_[1]}, $_[2]); $_[1] }
+ |
+ idl cpp_quote { push(@{$_[1]}, $_[2]); $_[1] }
+;
+
+import:
+ 'import' commalist ';'
+ {{
+ "TYPE" => "IMPORT",
+ "PATHS" => $_[2],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+include:
+ 'include' commalist ';'
+ {{
+ "TYPE" => "INCLUDE",
+ "PATHS" => $_[2],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+importlib:
+ 'importlib' commalist ';'
+ {{
+ "TYPE" => "IMPORTLIB",
+ "PATHS" => $_[2],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+commalist:
+ text { [ $_[1] ] }
+ |
+ commalist ',' text { push(@{$_[1]}, $_[3]); $_[1] }
+;
+
+coclass:
+ property_list 'coclass' identifier '{' interface_names '}' optional_semicolon
+ {{
+ "TYPE" => "COCLASS",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[3],
+ "DATA" => $_[5],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+interface_names:
+ #empty { {} }
+ |
+ interface_names 'interface' identifier ';' { push(@{$_[1]}, $_[2]); $_[1] }
+;
+
+interface:
+ property_list 'interface' identifier base_interface '{' definitions '}' optional_semicolon
+ {{
+ "TYPE" => "INTERFACE",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[3],
+ "BASE" => $_[4],
+ "DATA" => $_[6],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+base_interface:
+ #empty
+ |
+ ':' identifier { $_[2] }
+;
+
+
+cpp_quote:
+ 'cpp_quote' '(' text ')'
+ {{
+ "TYPE" => "CPP_QUOTE",
+ "DATA" => $_[3],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+definitions:
+ definition { [ $_[1] ] }
+ |
+ definitions definition { push(@{$_[1]}, $_[2]); $_[1] }
+;
+
+definition:
+ function
+ |
+ const
+ |
+ typedef
+ |
+ typedecl
+;
+
+const:
+ 'const' identifier pointers identifier '=' anytext ';'
+ {{
+ "TYPE" => "CONST",
+ "DTYPE" => $_[2],
+ "POINTERS" => $_[3],
+ "NAME" => $_[4],
+ "VALUE" => $_[6],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ |
+ 'const' identifier pointers identifier array_len '=' anytext ';'
+ {{
+ "TYPE" => "CONST",
+ "DTYPE" => $_[2],
+ "POINTERS" => $_[3],
+ "NAME" => $_[4],
+ "ARRAY_LEN" => $_[5],
+ "VALUE" => $_[7],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+function:
+ property_list type identifier '(' element_list2 ')' ';'
+ {{
+ "TYPE" => "FUNCTION",
+ "NAME" => $_[3],
+ "RETURN_TYPE" => $_[2],
+ "PROPERTIES" => $_[1],
+ "ELEMENTS" => $_[5],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+typedef:
+ property_list 'typedef' type pointers identifier array_len ';'
+ {{
+ "TYPE" => "TYPEDEF",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[5],
+ "DATA" => $_[3],
+ "POINTERS" => $_[4],
+ "ARRAY_LEN" => $_[6],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+usertype:
+ struct
+ |
+ union
+ |
+ enum
+ |
+ bitmap
+ |
+ pipe
+;
+
+typedecl:
+ usertype ';' { $_[1] }
+;
+
+sign:
+ 'signed'
+ |
+ 'unsigned'
+;
+
+existingtype:
+ sign identifier { ($_[1]?$_[1]:"signed") ." $_[2]" }
+ |
+ identifier
+;
+
+type:
+ usertype
+ |
+ existingtype
+ |
+ void { "void" }
+;
+
+enum_body:
+ '{' enum_elements '}' { $_[2] }
+;
+
+opt_enum_body:
+ #empty
+ |
+ enum_body
+;
+
+enum:
+ property_list 'enum' optional_identifier opt_enum_body
+ {{
+ "TYPE" => "ENUM",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[3],
+ "ELEMENTS" => $_[4],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+enum_elements:
+ enum_element { [ $_[1] ] }
+ |
+ enum_elements ',' enum_element { push(@{$_[1]}, $_[3]); $_[1] }
+;
+
+enum_element:
+ identifier
+ |
+ identifier '=' anytext { "$_[1]$_[2]$_[3]" }
+;
+
+bitmap_body:
+ '{' opt_bitmap_elements '}' { $_[2] }
+;
+
+opt_bitmap_body:
+ #empty
+ |
+ bitmap_body
+;
+
+bitmap:
+ property_list 'bitmap' optional_identifier opt_bitmap_body
+ {{
+ "TYPE" => "BITMAP",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[3],
+ "ELEMENTS" => $_[4],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+bitmap_elements:
+ bitmap_element { [ $_[1] ] }
+ |
+ bitmap_elements ',' bitmap_element { push(@{$_[1]}, $_[3]); $_[1] }
+;
+
+opt_bitmap_elements:
+ #empty
+ |
+ bitmap_elements
+;
+
+bitmap_element:
+ identifier '=' anytext { "$_[1] ( $_[3] )" }
+;
+
+struct_body:
+ '{' element_list1 '}' { $_[2] }
+;
+
+opt_struct_body:
+ #empty
+ |
+ struct_body
+;
+
+struct:
+ property_list 'struct' optional_identifier opt_struct_body
+ {{
+ "TYPE" => "STRUCT",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[3],
+ "ELEMENTS" => $_[4],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+empty_element:
+ property_list ';'
+ {{
+ "NAME" => "",
+ "TYPE" => "EMPTY",
+ "PROPERTIES" => $_[1],
+ "POINTERS" => 0,
+ "ARRAY_LEN" => [],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+base_or_empty:
+ base_element ';'
+ |
+ empty_element;
+
+optional_base_element:
+ property_list base_or_empty { $_[2]->{PROPERTIES} = FlattenHash([$_[1],$_[2]->{PROPERTIES}]); $_[2] }
+;
+
+union_elements:
+ #empty
+ |
+ union_elements optional_base_element { push(@{$_[1]}, $_[2]); $_[1] }
+;
+
+union_body:
+ '{' union_elements '}' { $_[2] }
+;
+
+opt_union_body:
+ #empty
+ |
+ union_body
+;
+
+union:
+ property_list 'union' optional_identifier opt_union_body
+ {{
+ "TYPE" => "UNION",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[3],
+ "ELEMENTS" => $_[4],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+base_element:
+ property_list type pointers identifier array_len
+ {{
+ "NAME" => $_[4],
+ "TYPE" => $_[2],
+ "PROPERTIES" => $_[1],
+ "POINTERS" => $_[3],
+ "ARRAY_LEN" => $_[5],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+pointers:
+ #empty
+ { 0 }
+ |
+ pointers '*' { $_[1]+1 }
+;
+
+pipe:
+ property_list 'pipe' type
+ {{
+ "TYPE" => "PIPE",
+ "PROPERTIES" => $_[1],
+ "NAME" => undef,
+ "DATA" => {
+ "TYPE" => "STRUCT",
+ "PROPERTIES" => $_[1],
+ "NAME" => undef,
+ "ELEMENTS" => [{
+ "NAME" => "count",
+ "PROPERTIES" => $_[1],
+ "POINTERS" => 0,
+ "ARRAY_LEN" => [],
+ "TYPE" => "uint3264",
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ },{
+ "NAME" => "array",
+ "PROPERTIES" => $_[1],
+ "POINTERS" => 0,
+ "ARRAY_LEN" => [ "count" ],
+ "TYPE" => $_[3],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ },
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+;
+
+element_list1:
+ #empty
+ { [] }
+ |
+ element_list1 base_element ';' { push(@{$_[1]}, $_[2]); $_[1] }
+;
+
+optional_const:
+ #empty
+ |
+ 'const'
+;
+
+element_list2:
+ #empty
+ |
+ 'void'
+ |
+ optional_const base_element { [ $_[2] ] }
+ |
+ element_list2 ',' optional_const base_element { push(@{$_[1]}, $_[4]); $_[1] }
+;
+
+array_len:
+ #empty { [] }
+ |
+ '[' ']' array_len { push(@{$_[3]}, "*"); $_[3] }
+ |
+ '[' anytext ']' array_len { push(@{$_[4]}, "$_[2]"); $_[4] }
+;
+
+property_list:
+ #empty
+ |
+ property_list '[' properties ']' { FlattenHash([$_[1],$_[3]]); }
+;
+
+properties:
+ property { $_[1] }
+ |
+ properties ',' property { FlattenHash([$_[1], $_[3]]); }
+;
+
+property:
+ identifier {{ "$_[1]" => "1" }}
+ |
+ identifier '(' commalisttext ')' {{ "$_[1]" => "$_[3]" }}
+;
+
+commalisttext:
+ anytext
+ |
+ commalisttext ',' anytext { "$_[1],$_[3]" }
+;
+
+anytext:
+ #empty
+ { "" }
+ |
+ identifier
+ |
+ constant
+ |
+ text
+ |
+ anytext '-' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext '.' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext '*' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext '>' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext '<' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext '|' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext '&' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext '/' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext '?' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext ':' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext '=' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext '+' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext '~' anytext { "$_[1]$_[2]$_[3]" }
+ |
+ anytext '(' commalisttext ')' anytext { "$_[1]$_[2]$_[3]$_[4]$_[5]" }
+ |
+ anytext '{' commalisttext '}' anytext { "$_[1]$_[2]$_[3]$_[4]$_[5]" }
+;
+
+identifier:
+ IDENTIFIER
+;
+
+optional_identifier:
+ #empty { undef }
+ |
+ IDENTIFIER
+;
+
+constant:
+ CONSTANT
+;
+
+text:
+ TEXT { "\"$_[1]\"" }
+;
+
+optional_semicolon:
+ #empty
+ |
+ ';'
+;
+
+
+#####################################
+# start code
+%%
+
+use Parse::Pidl qw(error);
+
+#####################################################################
+# flatten an array of hashes into a single hash
+sub FlattenHash($)
+{
+ my $a = shift;
+ my %b;
+ for my $d (@{$a}) {
+ for my $k (keys %{$d}) {
+ $b{$k} = $d->{$k};
+ }
+ }
+ return \%b;
+}
+
+#####################################################################
+# traverse a perl data structure removing any empty arrays or
+# hashes and any hash elements that map to undef
+sub CleanData($)
+{
+ sub CleanData($);
+ my($v) = shift;
+
+ return undef if (not defined($v));
+
+ if (ref($v) eq "ARRAY") {
+ foreach my $i (0 .. $#{$v}) {
+ CleanData($v->[$i]);
+ }
+ # this removes any undefined elements from the array
+ @{$v} = grep { defined $_ } @{$v};
+ } elsif (ref($v) eq "HASH") {
+ foreach my $x (keys %{$v}) {
+ CleanData($v->{$x});
+ if (!defined $v->{$x}) {
+ delete($v->{$x});
+ next;
+ }
+ }
+ }
+
+ return $v;
+}
+
+sub _Error {
+ if (exists $_[0]->YYData->{ERRMSG}) {
+ error($_[0]->YYData, $_[0]->YYData->{ERRMSG});
+ delete $_[0]->YYData->{ERRMSG};
+ return;
+ }
+
+ my $last_token = $_[0]->YYData->{LAST_TOKEN};
+
+ error($_[0]->YYData, "Syntax error near '$last_token'");
+}
+
+sub _Lexer($)
+{
+ my($parser)=shift;
+
+ $parser->YYData->{INPUT} or return('',undef);
+
+again:
+ $parser->YYData->{INPUT} =~ s/^[ \t]*//;
+
+ for ($parser->YYData->{INPUT}) {
+ if (/^\#/) {
+ # Linemarker format is described at
+ # https://gcc.gnu.org/onlinedocs/cpp/Preprocessor-Output.html
+ if (s/^\# (\d+) \"(.*?)\"(( \d+){1,4}|)//) {
+ $parser->YYData->{LINE} = $1-1;
+ $parser->YYData->{FILE} = $2;
+ goto again;
+ }
+ if (s/^\#line (\d+) \"(.*?)\"( \d+|)//) {
+ $parser->YYData->{LINE} = $1-1;
+ $parser->YYData->{FILE} = $2;
+ goto again;
+ }
+ if (s/^(\#.*)$//m) {
+ goto again;
+ }
+ }
+ if (s/^(\n)//) {
+ $parser->YYData->{LINE}++;
+ goto again;
+ }
+ if (s/^\"(.*?)\"//) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return('TEXT',$1);
+ }
+ if (s/^(\d+)(\W|$)/$2/) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return('CONSTANT',$1);
+ }
+ if (s/^([\w_]+)//) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ if ($1 =~
+ /^(coclass|interface|import|importlib
+ |include|cpp_quote|typedef
+ |union|struct|enum|bitmap|pipe
+ |void|const|unsigned|signed)$/x) {
+ return $1;
+ }
+ return('IDENTIFIER',$1);
+ }
+ if (s/^(.)//s) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return($1,$1);
+ }
+ }
+}
+
+sub parse_string
+{
+ my ($data,$filename) = @_;
+
+ my $self = new Parse::Pidl::IDL;
+
+ $self->YYData->{FILE} = $filename;
+ $self->YYData->{INPUT} = $data;
+ $self->YYData->{LINE} = 0;
+ $self->YYData->{LAST_TOKEN} = "NONE";
+
+ my $idl = $self->YYParse( yylex => \&_Lexer, yyerror => \&_Error );
+
+ return CleanData($idl);
+}
+
+sub parse_file($$)
+{
+ my ($filename,$incdirs) = @_;
+
+ my $saved_delim = $/;
+ undef $/;
+ my $cpp = $ENV{CPP};
+ my $options = "";
+ if (! defined $cpp) {
+ if (defined $ENV{CC}) {
+ $cpp = "$ENV{CC}";
+ $options = "-E";
+ } else {
+ $cpp = "cpp";
+ }
+ }
+ my $includes = join('',map { " -I$_" } @$incdirs);
+ my $data = `$cpp $options -D__PIDL__$includes -xc "$filename"`;
+ $/ = $saved_delim;
+
+ return parse_string($data, $filename);
+}
diff --git a/tools/pidl/lib/Parse/Pidl.pm b/tools/pidl/lib/Parse/Pidl.pm
new file mode 100644
index 0000000..40e3673
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl.pm
@@ -0,0 +1,44 @@
+###################################################
+# package to parse IDL files and generate code for
+# rpc functions in Samba
+# Copyright tridge@samba.org 2000-2003
+# Copyright jelmer@samba.org 2005
+# released under the GNU GPL
+
+package Parse::Pidl;
+
+require Exporter;
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(warning error fatal $VERSION);
+
+use strict;
+
+use vars qw ( $VERSION );
+
+$VERSION = '0.02';
+
+sub warning
+{
+ my ($l,$m) = @_;
+ if ($l) {
+ print STDERR "$l->{FILE}:$l->{LINE}: ";
+ }
+ print STDERR "warning: $m\n";
+}
+
+sub error
+{
+ my ($l,$m) = @_;
+ if ($l) {
+ print STDERR "$l->{FILE}:$l->{LINE}: ";
+ }
+ print STDERR "error: $m\n";
+}
+
+sub fatal($$)
+{
+ my ($e,$s) = @_;
+ die("$e->{FILE}:$e->{LINE}: $s\n");
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/CUtil.pm b/tools/pidl/lib/Parse/Pidl/CUtil.pm
new file mode 100644
index 0000000..9deb6ee
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/CUtil.pm
@@ -0,0 +1,52 @@
+###################################################
+# C utility functions for pidl
+# Copyright jelmer@samba.org 2005-2007
+# released under the GNU GPL
+package Parse::Pidl::CUtil;
+
+require Exporter;
+@ISA = qw(Exporter);
+@EXPORT = qw(get_pointer_to get_value_of get_array_element);
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+use strict;
+
+sub get_pointer_to($)
+{
+ my $var_name = shift;
+
+ if ($var_name =~ /^\*(.*)$/) {
+ return $1;
+ } elsif ($var_name =~ /^\&(.*)$/) {
+ return "&($var_name)";
+ } else {
+ return "&$var_name";
+ }
+}
+
+sub get_value_of($)
+{
+ my $var_name = shift;
+
+ if ($var_name =~ /^\&(.*)$/) {
+ return $1;
+ } else {
+ return "*$var_name";
+ }
+}
+
+sub get_array_element($$)
+{
+ my ($var_name, $idx) = @_;
+
+ if ($var_name =~ /^\*.*$/) {
+ $var_name = "($var_name)";
+ } elsif ($var_name =~ /^\&.*$/) {
+ $var_name = "($var_name)";
+ }
+
+ return "$var_name"."[$idx]";
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Compat.pm b/tools/pidl/lib/Parse/Pidl/Compat.pm
new file mode 100644
index 0000000..b8abcb8
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Compat.pm
@@ -0,0 +1,168 @@
+###################################################
+# IDL Compatibility checker
+# Copyright jelmer@samba.org 2005
+# released under the GNU GPL
+
+package Parse::Pidl::Compat;
+
+use Parse::Pidl qw(warning);
+use Parse::Pidl::Util qw(has_property);
+use strict;
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+my %supported_properties = (
+ # interface
+ "helpstring" => ["INTERFACE", "FUNCTION"],
+ "version" => ["INTERFACE"],
+ "uuid" => ["INTERFACE"],
+ "endpoint" => ["INTERFACE"],
+ "pointer_default" => ["INTERFACE"],
+ "no_srv_register" => ["INTERFACE"],
+
+ # dcom
+ "object" => ["INTERFACE"],
+ "local" => ["INTERFACE", "FUNCTION"],
+ "iid_is" => ["ELEMENT"],
+ "call_as" => ["FUNCTION"],
+ "idempotent" => ["FUNCTION"],
+
+ # function
+ "in" => ["ELEMENT"],
+ "out" => ["ELEMENT"],
+
+ # pointer
+ "ref" => ["ELEMENT"],
+ "ptr" => ["ELEMENT"],
+ "unique" => ["ELEMENT"],
+ "ignore" => ["ELEMENT"],
+
+ "value" => ["ELEMENT"],
+
+ # generic
+ "public" => ["FUNCTION", "TYPEDEF"],
+ "nopush" => ["FUNCTION", "TYPEDEF"],
+ "nopull" => ["FUNCTION", "TYPEDEF"],
+ "noprint" => ["FUNCTION", "TYPEDEF"],
+ "nopython" => ["FUNCTION", "TYPEDEF"],
+
+ # union
+ "switch_is" => ["ELEMENT"],
+ "switch_type" => ["ELEMENT", "TYPEDEF"],
+ "case" => ["ELEMENT"],
+ "default" => ["ELEMENT"],
+
+ # subcontext
+ "subcontext" => ["ELEMENT"],
+ "subcontext_size" => ["ELEMENT"],
+
+ # enum
+ "enum16bit" => ["TYPEDEF"],
+ "v1_enum" => ["TYPEDEF"],
+
+ # bitmap
+ "bitmap8bit" => ["TYPEDEF"],
+ "bitmap16bit" => ["TYPEDEF"],
+ "bitmap32bit" => ["TYPEDEF"],
+ "bitmap64bit" => ["TYPEDEF"],
+
+ # array
+ "range" => ["ELEMENT"],
+ "size_is" => ["ELEMENT"],
+ "string" => ["ELEMENT"],
+ "noheader" => ["ELEMENT"],
+ "charset" => ["ELEMENT"],
+ "length_is" => ["ELEMENT"],
+);
+
+sub CheckTypedef($)
+{
+ my ($td) = @_;
+
+ if (has_property($td, "nodiscriminant")) {
+ warning($td, "nodiscriminant property not supported");
+ }
+
+ if ($td->{TYPE} eq "BITMAP") {
+ warning($td, "converting bitmap to scalar");
+ #FIXME
+ }
+
+ if (has_property($td, "gensize")) {
+ warning($td, "ignoring gensize() property. ");
+ }
+
+ if (has_property($td, "enum8bit") and has_property($td, "enum16bit")) {
+ warning($td, "8 and 16 bit enums not supported, converting to scalar");
+ #FIXME
+ }
+
+ StripProperties($td);
+}
+
+sub CheckElement($)
+{
+ my $e = shift;
+
+ if (has_property($e, "noheader")) {
+ warning($e, "noheader property not supported");
+ return;
+ }
+
+ if (has_property($e, "subcontext")) {
+ warning($e, "converting subcontext to byte array");
+ #FIXME
+ }
+
+ if (has_property($e, "compression")) {
+ warning($e, "compression() property not supported");
+ }
+
+ if (has_property($e, "sptr")) {
+ warning($e, "sptr() pointer property not supported");
+ }
+
+ if (has_property($e, "relative")) {
+ warning($e, "relative() pointer property not supported");
+ }
+
+ if (has_property($e, "relative_short")) {
+ warning($e, "relative_short() pointer property not supported");
+ }
+
+ if (has_property($e, "flag")) {
+ warning($e, "ignoring flag() property");
+ }
+
+ if (has_property($e, "value")) {
+ warning($e, "ignoring value() property");
+ }
+}
+
+sub CheckFunction($)
+{
+ my $fn = shift;
+
+ if (has_property($fn, "noopnum")) {
+ warning($fn, "noopnum not converted. Opcodes will be out of sync.");
+ }
+}
+
+sub CheckInterface($)
+{
+ my $if = shift;
+
+}
+
+sub Check($)
+{
+ my $pidl = shift;
+ my $nidl = [];
+
+ foreach (@{$pidl}) {
+ push (@$nidl, CheckInterface($_)) if ($_->{TYPE} eq "INTERFACE");
+ }
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Dump.pm b/tools/pidl/lib/Parse/Pidl/Dump.pm
new file mode 100644
index 0000000..4e623db
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Dump.pm
@@ -0,0 +1,294 @@
+###################################################
+# dump function for IDL structures
+# Copyright tridge@samba.org 2000
+# Copyright jelmer@samba.org 2005
+# released under the GNU GPL
+
+=pod
+
+=head1 NAME
+
+Parse::Pidl::Dump - Dump support
+
+=head1 DESCRIPTION
+
+This module provides functions that can generate IDL code from
+internal pidl data structures.
+
+=cut
+
+package Parse::Pidl::Dump;
+
+use Exporter;
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(DumpType DumpTypedef DumpStruct DumpEnum DumpBitmap DumpUnion DumpFunction);
+
+use strict;
+use Parse::Pidl::Util qw(has_property);
+
+my($res);
+
+#####################################################################
+# dump a properties list
+sub DumpProperties($)
+{
+ my($props) = shift;
+ my $res = "";
+
+ foreach my $d ($props) {
+ foreach my $k (sort(keys %{$d})) {
+ if ($k eq "in") {
+ $res .= "[in] ";
+ next;
+ }
+ if ($k eq "out") {
+ $res .= "[out] ";
+ next;
+ }
+ if ($k eq "ref") {
+ $res .= "[ref] ";
+ next;
+ }
+ $res .= "[$k($d->{$k})] ";
+ }
+ }
+ return $res;
+}
+
+#####################################################################
+# dump a structure element
+sub DumpElement($)
+{
+ my($element) = shift;
+ my $res = "";
+
+ (defined $element->{PROPERTIES}) &&
+ ($res .= DumpProperties($element->{PROPERTIES}));
+ $res .= DumpType($element->{TYPE});
+ $res .= " ";
+ for my $i (1..$element->{POINTERS}) {
+ $res .= "*";
+ }
+ $res .= "$element->{NAME}";
+ foreach (@{$element->{ARRAY_LEN}}) {
+ $res .= "[$_]";
+ }
+
+ return $res;
+}
+
+#####################################################################
+# dump a struct
+sub DumpStruct($)
+{
+ my($struct) = shift;
+ my($res);
+
+ $res .= "struct ";
+ if ($struct->{NAME}) {
+ $res.="$struct->{NAME} ";
+ }
+
+ $res.="{\n";
+ if (defined $struct->{ELEMENTS}) {
+ foreach (@{$struct->{ELEMENTS}}) {
+ $res .= "\t" . DumpElement($_) . ";\n";
+ }
+ }
+ $res .= "}";
+
+ return $res;
+}
+
+
+#####################################################################
+# dump a struct
+sub DumpEnum($)
+{
+ my($enum) = shift;
+ my($res);
+
+ $res .= "enum {\n";
+
+ foreach (@{$enum->{ELEMENTS}}) {
+ if (/^([A-Za-z0-9_]+)[ \t]*\((.*)\)$/) {
+ $res .= "\t$1 = $2,\n";
+ } else {
+ $res .= "\t$_,\n";
+ }
+ }
+
+ $res.= "}";
+
+ return $res;
+}
+
+#####################################################################
+# dump a struct
+sub DumpBitmap($)
+{
+ my($bitmap) = shift;
+ my($res);
+
+ $res .= "bitmap {\n";
+
+ foreach (@{$bitmap->{ELEMENTS}}) {
+ if (/^([A-Za-z0-9_]+)[ \t]*\((.*)\)$/) {
+ $res .= "\t$1 = $2,\n";
+ } else {
+ die ("Bitmap $bitmap->{NAME} has field $_ without proper value");
+ }
+ }
+
+ $res.= "}";
+
+ return $res;
+}
+
+
+#####################################################################
+# dump a union element
+sub DumpUnionElement($)
+{
+ my($element) = shift;
+ my($res);
+
+ if (has_property($element, "default")) {
+ $res .= "[default] ;\n";
+ } else {
+ $res .= "[case($element->{PROPERTIES}->{case})] ";
+ $res .= DumpElement($element), if defined($element);
+ $res .= ";\n";
+ }
+
+ return $res;
+}
+
+#####################################################################
+# dump a union
+sub DumpUnion($)
+{
+ my($union) = shift;
+ my($res);
+
+ (defined $union->{PROPERTIES}) &&
+ ($res .= DumpProperties($union->{PROPERTIES}));
+ $res .= "union {\n";
+ foreach my $e (@{$union->{ELEMENTS}}) {
+ $res .= DumpUnionElement($e);
+ }
+ $res .= "}";
+
+ return $res;
+}
+
+#####################################################################
+# dump a type
+sub DumpType($)
+{
+ my($data) = shift;
+
+ if (ref($data) eq "HASH") {
+ return DumpStruct($data) if ($data->{TYPE} eq "STRUCT");
+ return DumpUnion($data) if ($data->{TYPE} eq "UNION");
+ return DumpEnum($data) if ($data->{TYPE} eq "ENUM");
+ return DumpBitmap($data) if ($data->{TYPE} eq "BITMAP");
+ } else {
+ return $data;
+ }
+}
+
+#####################################################################
+# dump a typedef
+sub DumpTypedef($)
+{
+ my($typedef) = shift;
+ my($res);
+
+ $res .= "typedef ";
+ $res .= DumpType($typedef->{DATA});
+ $res .= " $typedef->{NAME};\n\n";
+
+ return $res;
+}
+
+#####################################################################
+# dump a typedef
+sub DumpFunction($)
+{
+ my($function) = shift;
+ my($first) = 1;
+ my($res);
+
+ $res .= DumpType($function->{RETURN_TYPE});
+ $res .= " $function->{NAME}(\n";
+ for my $d (@{$function->{ELEMENTS}}) {
+ unless ($first) { $res .= ",\n"; } $first = 0;
+ $res .= DumpElement($d);
+ }
+ $res .= "\n);\n\n";
+
+ return $res;
+}
+
+#####################################################################
+# dump a module header
+sub DumpInterfaceProperties($)
+{
+ my($header) = shift;
+ my($data) = $header->{DATA};
+ my($first) = 1;
+ my($res);
+
+ $res .= "[\n";
+ foreach my $k (sort(keys %{$data})) {
+ $first || ($res .= ",\n"); $first = 0;
+ $res .= "$k($data->{$k})";
+ }
+ $res .= "\n]\n";
+
+ return $res;
+}
+
+#####################################################################
+# dump the interface definitions
+sub DumpInterface($)
+{
+ my($interface) = shift;
+ my($data) = $interface->{DATA};
+ my($res);
+
+ $res .= DumpInterfaceProperties($interface->{PROPERTIES});
+
+ $res .= "interface $interface->{NAME}\n{\n";
+ foreach my $d (@{$data}) {
+ ($d->{TYPE} eq "TYPEDEF") &&
+ ($res .= DumpTypedef($d));
+ ($d->{TYPE} eq "FUNCTION") &&
+ ($res .= DumpFunction($d));
+ }
+ $res .= "}\n";
+
+ return $res;
+}
+
+
+#####################################################################
+# dump a parsed IDL structure back into an IDL file
+sub Dump($)
+{
+ my($idl) = shift;
+ my($res);
+
+ $res = "/* Dumped by pidl */\n\n";
+ foreach my $x (@{$idl}) {
+ ($x->{TYPE} eq "INTERFACE") &&
+ ($res .= DumpInterface($x));
+ }
+ return $res;
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Expr.pm b/tools/pidl/lib/Parse/Pidl/Expr.pm
new file mode 100644
index 0000000..24581d2
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Expr.pm
@@ -0,0 +1,1444 @@
+####################################################################
+#
+# This file was generated using Parse::Yapp version 1.05.
+#
+# Don't edit this file, use source file instead.
+#
+# ANY CHANGE MADE HERE WILL BE LOST !
+#
+####################################################################
+package Parse::Pidl::Expr;
+use vars qw ( @ISA );
+use strict;
+
+@ISA= qw ( Parse::Yapp::Driver );
+use Parse::Yapp::Driver;
+
+
+
+sub new {
+ my($class)=shift;
+ ref($class)
+ and $class=ref($class);
+
+ my($self)=$class->SUPER::new( yyversion => '1.05',
+ yystates =>
+[
+ {#State 0
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'NUM' => 5,
+ 'TEXT' => 6,
+ "(" => 7,
+ "!" => 8,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 2,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 1
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "(" => 7,
+ "!" => 8,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 14,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 2
+ ACTIONS => {
+ '' => 16,
+ "-" => 15,
+ "<" => 17,
+ "+" => 18,
+ "%" => 19,
+ "==" => 20,
+ "^" => 21,
+ "*" => 22,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "||" => 26,
+ "&&" => 27,
+ "&" => 28,
+ "/" => 29,
+ "|" => 30,
+ "<<" => 32,
+ "=>" => 31,
+ "<=" => 33,
+ ">" => 34
+ }
+ },
+ {#State 3
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 35,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 4
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 36,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 5
+ DEFAULT => -1
+ },
+ {#State 6
+ DEFAULT => -2
+ },
+ {#State 7
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 38,
+ 'var' => 37,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 8
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 39,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 9
+ ACTIONS => {
+ "*" => 9,
+ 'VAR' => 41
+ },
+ GOTOS => {
+ 'possible_pointer' => 40
+ }
+ },
+ {#State 10
+ ACTIONS => {
+ "(" => 42
+ },
+ DEFAULT => -30
+ },
+ {#State 11
+ ACTIONS => {
+ "->" => 43,
+ "." => 44
+ },
+ DEFAULT => -4
+ },
+ {#State 12
+ DEFAULT => -3
+ },
+ {#State 13
+ DEFAULT => -32
+ },
+ {#State 14
+ ACTIONS => {
+ "^" => 21,
+ "=>" => 31,
+ "<=" => 33
+ },
+ DEFAULT => -26
+ },
+ {#State 15
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 45,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 16
+ DEFAULT => 0
+ },
+ {#State 17
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 46,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 18
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 47,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 19
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 48,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 20
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 49,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 21
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 50,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 22
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 51,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 23
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 52,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 24
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 53,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 25
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 54,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 26
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 55,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 27
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 56,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 28
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 57,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 29
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 58,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 30
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 59,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 31
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 60,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 32
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 61,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 33
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 62,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 34
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 63,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 35
+ ACTIONS => {
+ "^" => 21,
+ "=>" => 31,
+ "<=" => 33
+ },
+ DEFAULT => -5
+ },
+ {#State 36
+ ACTIONS => {
+ "^" => 21,
+ "=>" => 31,
+ "<=" => 33
+ },
+ DEFAULT => -27
+ },
+ {#State 37
+ ACTIONS => {
+ ")" => 64,
+ "->" => 43,
+ "." => 44
+ },
+ DEFAULT => -4
+ },
+ {#State 38
+ ACTIONS => {
+ "-" => 15,
+ "<" => 17,
+ "+" => 18,
+ "%" => 19,
+ "==" => 20,
+ "^" => 21,
+ "*" => 22,
+ ")" => 65,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "/" => 29,
+ "|" => 30,
+ "=>" => 31,
+ "<<" => 32,
+ "<=" => 33,
+ ">" => 34
+ }
+ },
+ {#State 39
+ ACTIONS => {
+ "-" => 15,
+ "<" => 17,
+ "+" => 18,
+ "%" => 19,
+ "==" => 20,
+ "^" => 21,
+ "*" => 22,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "/" => 29,
+ "|" => 30,
+ "=>" => 31,
+ "<<" => 32,
+ "<=" => 33,
+ ">" => 34
+ },
+ DEFAULT => -24
+ },
+ {#State 40
+ DEFAULT => -31
+ },
+ {#State 41
+ DEFAULT => -30
+ },
+ {#State 42
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ DEFAULT => -37,
+ GOTOS => {
+ 'exp' => 69,
+ 'var' => 11,
+ 'args' => 66,
+ 'func' => 12,
+ 'opt_args' => 70,
+ 'exp_or_possible_pointer' => 67,
+ 'possible_pointer' => 68
+ }
+ },
+ {#State 43
+ ACTIONS => {
+ 'VAR' => 71
+ }
+ },
+ {#State 44
+ ACTIONS => {
+ 'VAR' => 72
+ }
+ },
+ {#State 45
+ ACTIONS => {
+ "<" => 17,
+ "==" => 20,
+ "^" => 21,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "|" => 30,
+ "=>" => 31,
+ "<<" => 32,
+ "<=" => 33,
+ ">" => 34
+ },
+ DEFAULT => -7
+ },
+ {#State 46
+ ACTIONS => {
+ "==" => 20,
+ "^" => 21,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "|" => 30,
+ "=>" => 31,
+ "<=" => 33
+ },
+ DEFAULT => -10
+ },
+ {#State 47
+ ACTIONS => {
+ "<" => 17,
+ "==" => 20,
+ "^" => 21,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "|" => 30,
+ "=>" => 31,
+ "<<" => 32,
+ "<=" => 33,
+ ">" => 34
+ },
+ DEFAULT => -6
+ },
+ {#State 48
+ ACTIONS => {
+ "-" => 15,
+ "<" => 17,
+ "+" => 18,
+ "==" => 20,
+ "^" => 21,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "|" => 30,
+ "=>" => 31,
+ "<<" => 32,
+ "<=" => 33,
+ ">" => 34
+ },
+ DEFAULT => -9
+ },
+ {#State 49
+ ACTIONS => {
+ "^" => 21,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "|" => 30,
+ "=>" => 31,
+ "<=" => 33
+ },
+ DEFAULT => -13
+ },
+ {#State 50
+ ACTIONS => {
+ "-" => 15,
+ "<" => 17,
+ "+" => 18,
+ "%" => 19,
+ "==" => 20,
+ "^" => 21,
+ "*" => 22,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "/" => 29,
+ "|" => 30,
+ "=>" => 31,
+ "<<" => 32,
+ "<=" => 33,
+ ">" => 34
+ },
+ DEFAULT => -28
+ },
+ {#State 51
+ ACTIONS => {
+ "-" => 15,
+ "<" => 17,
+ "+" => 18,
+ "==" => 20,
+ "^" => 21,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "|" => 30,
+ "=>" => 31,
+ "<<" => 32,
+ "<=" => 33,
+ ">" => 34
+ },
+ DEFAULT => -8
+ },
+ {#State 52
+ ACTIONS => {
+ "<" => 17,
+ "==" => 20,
+ "^" => 21,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "|" => 30,
+ "=>" => 31,
+ "<=" => 33,
+ ">" => 34
+ },
+ DEFAULT => -17
+ },
+ {#State 53
+ ACTIONS => {
+ "^" => 21,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "|" => 30,
+ "=>" => 31,
+ "<=" => 33
+ },
+ DEFAULT => -18
+ },
+ {#State 54
+ ACTIONS => {
+ ":" => 73,
+ "-" => 15,
+ "<" => 17,
+ "+" => 18,
+ "%" => 19,
+ "==" => 20,
+ "^" => 21,
+ "*" => 22,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "/" => 29,
+ "|" => 30,
+ "=>" => 31,
+ "<<" => 32,
+ "<=" => 33,
+ ">" => 34
+ }
+ },
+ {#State 55
+ ACTIONS => {
+ "^" => 21,
+ "?" => 25,
+ "=>" => 31,
+ "<=" => 33
+ },
+ DEFAULT => -19
+ },
+ {#State 56
+ ACTIONS => {
+ "^" => 21,
+ "?" => 25,
+ "||" => 26,
+ "=>" => 31,
+ "<=" => 33
+ },
+ DEFAULT => -20
+ },
+ {#State 57
+ ACTIONS => {
+ "^" => 21,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "|" => 30,
+ "=>" => 31,
+ "<=" => 33
+ },
+ DEFAULT => -21
+ },
+ {#State 58
+ ACTIONS => {
+ "-" => 15,
+ "<" => 17,
+ "+" => 18,
+ "==" => 20,
+ "^" => 21,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "|" => 30,
+ "=>" => 31,
+ "<<" => 32,
+ "<=" => 33,
+ ">" => 34
+ },
+ DEFAULT => -25
+ },
+ {#State 59
+ ACTIONS => {
+ "^" => 21,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "=>" => 31,
+ "<=" => 33
+ },
+ DEFAULT => -12
+ },
+ {#State 60
+ ACTIONS => {
+ "-" => 15,
+ "<" => 17,
+ "+" => 18,
+ "%" => 19,
+ "==" => 20,
+ "^" => 21,
+ "*" => 22,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "/" => 29,
+ "|" => 30,
+ "=>" => 31,
+ "<<" => 32,
+ "<=" => 33,
+ ">" => 34
+ },
+ DEFAULT => -15
+ },
+ {#State 61
+ ACTIONS => {
+ "<" => 17,
+ "==" => 20,
+ "^" => 21,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "|" => 30,
+ "=>" => 31,
+ "<=" => 33,
+ ">" => 34
+ },
+ DEFAULT => -16
+ },
+ {#State 62
+ ACTIONS => {
+ "-" => 15,
+ "<" => 17,
+ "+" => 18,
+ "%" => 19,
+ "==" => 20,
+ "^" => 21,
+ "*" => 22,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "/" => 29,
+ "|" => 30,
+ "=>" => 31,
+ "<<" => 32,
+ "<=" => 33,
+ ">" => 34
+ },
+ DEFAULT => -14
+ },
+ {#State 63
+ ACTIONS => {
+ "==" => 20,
+ "^" => 21,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "|" => 30,
+ "=>" => 31,
+ "<=" => 33
+ },
+ DEFAULT => -11
+ },
+ {#State 64
+ DEFAULT => -34
+ },
+ {#State 65
+ DEFAULT => -29
+ },
+ {#State 66
+ DEFAULT => -38
+ },
+ {#State 67
+ ACTIONS => {
+ "," => 74
+ },
+ DEFAULT => -41
+ },
+ {#State 68
+ DEFAULT => -32
+ },
+ {#State 69
+ ACTIONS => {
+ "-" => 15,
+ "<" => 17,
+ "+" => 18,
+ "%" => 19,
+ "==" => 20,
+ "^" => 21,
+ "*" => 22,
+ ">>" => 23,
+ "!=" => 24,
+ "?" => 25,
+ "&&" => 27,
+ "||" => 26,
+ "&" => 28,
+ "/" => 29,
+ "|" => 30,
+ "=>" => 31,
+ "<<" => 32,
+ "<=" => 33,
+ ">" => 34
+ },
+ DEFAULT => -39
+ },
+ {#State 70
+ ACTIONS => {
+ ")" => 75
+ }
+ },
+ {#State 71
+ DEFAULT => -35
+ },
+ {#State 72
+ DEFAULT => -33
+ },
+ {#State 73
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 76,
+ 'var' => 11,
+ 'func' => 12,
+ 'possible_pointer' => 13
+ }
+ },
+ {#State 74
+ ACTIONS => {
+ "-" => 1,
+ "~" => 3,
+ "&" => 4,
+ 'TEXT' => 6,
+ 'NUM' => 5,
+ "!" => 8,
+ "(" => 7,
+ "*" => 9,
+ 'VAR' => 10
+ },
+ GOTOS => {
+ 'exp' => 69,
+ 'var' => 11,
+ 'args' => 77,
+ 'func' => 12,
+ 'exp_or_possible_pointer' => 67,
+ 'possible_pointer' => 68
+ }
+ },
+ {#State 75
+ DEFAULT => -36
+ },
+ {#State 76
+ ACTIONS => {
+ "^" => 21,
+ "=>" => 31,
+ "<=" => 33
+ },
+ DEFAULT => -22
+ },
+ {#State 77
+ DEFAULT => -42
+ }
+],
+ yyrules =>
+[
+ [#Rule 0
+ '$start', 2, undef
+ ],
+ [#Rule 1
+ 'exp', 1, undef
+ ],
+ [#Rule 2
+ 'exp', 1,
+sub
+#line 24 "expr.yp"
+{ "\"$_[1]\"" }
+ ],
+ [#Rule 3
+ 'exp', 1, undef
+ ],
+ [#Rule 4
+ 'exp', 1, undef
+ ],
+ [#Rule 5
+ 'exp', 2,
+sub
+#line 30 "expr.yp"
+{ "~$_[2]" }
+ ],
+ [#Rule 6
+ 'exp', 3,
+sub
+#line 32 "expr.yp"
+{ "$_[1] + $_[3]" }
+ ],
+ [#Rule 7
+ 'exp', 3,
+sub
+#line 34 "expr.yp"
+{ "$_[1] - $_[3]" }
+ ],
+ [#Rule 8
+ 'exp', 3,
+sub
+#line 36 "expr.yp"
+{ "$_[1] * $_[3]" }
+ ],
+ [#Rule 9
+ 'exp', 3,
+sub
+#line 38 "expr.yp"
+{ "$_[1] % $_[3]" }
+ ],
+ [#Rule 10
+ 'exp', 3,
+sub
+#line 40 "expr.yp"
+{ "$_[1] < $_[3]" }
+ ],
+ [#Rule 11
+ 'exp', 3,
+sub
+#line 42 "expr.yp"
+{ "$_[1] > $_[3]" }
+ ],
+ [#Rule 12
+ 'exp', 3,
+sub
+#line 44 "expr.yp"
+{ "$_[1] | $_[3]" }
+ ],
+ [#Rule 13
+ 'exp', 3,
+sub
+#line 46 "expr.yp"
+{ "$_[1] == $_[3]" }
+ ],
+ [#Rule 14
+ 'exp', 3,
+sub
+#line 48 "expr.yp"
+{ "$_[1] <= $_[3]" }
+ ],
+ [#Rule 15
+ 'exp', 3,
+sub
+#line 50 "expr.yp"
+{ "$_[1] => $_[3]" }
+ ],
+ [#Rule 16
+ 'exp', 3,
+sub
+#line 52 "expr.yp"
+{ "$_[1] << $_[3]" }
+ ],
+ [#Rule 17
+ 'exp', 3,
+sub
+#line 54 "expr.yp"
+{ "$_[1] >> $_[3]" }
+ ],
+ [#Rule 18
+ 'exp', 3,
+sub
+#line 56 "expr.yp"
+{ "$_[1] != $_[3]" }
+ ],
+ [#Rule 19
+ 'exp', 3,
+sub
+#line 58 "expr.yp"
+{ "$_[1] || $_[3]" }
+ ],
+ [#Rule 20
+ 'exp', 3,
+sub
+#line 60 "expr.yp"
+{ "$_[1] && $_[3]" }
+ ],
+ [#Rule 21
+ 'exp', 3,
+sub
+#line 62 "expr.yp"
+{ "$_[1] & $_[3]" }
+ ],
+ [#Rule 22
+ 'exp', 5,
+sub
+#line 64 "expr.yp"
+{ "$_[1]?$_[3]:$_[5]" }
+ ],
+ [#Rule 23
+ 'exp', 2,
+sub
+#line 66 "expr.yp"
+{ "~$_[1]" }
+ ],
+ [#Rule 24
+ 'exp', 2,
+sub
+#line 68 "expr.yp"
+{ "not $_[1]" }
+ ],
+ [#Rule 25
+ 'exp', 3,
+sub
+#line 70 "expr.yp"
+{ "$_[1] / $_[3]" }
+ ],
+ [#Rule 26
+ 'exp', 2,
+sub
+#line 72 "expr.yp"
+{ "-$_[2]" }
+ ],
+ [#Rule 27
+ 'exp', 2,
+sub
+#line 74 "expr.yp"
+{ "&$_[2]" }
+ ],
+ [#Rule 28
+ 'exp', 3,
+sub
+#line 76 "expr.yp"
+{ "$_[1]^$_[3]" }
+ ],
+ [#Rule 29
+ 'exp', 3,
+sub
+#line 78 "expr.yp"
+{ "($_[2])" }
+ ],
+ [#Rule 30
+ 'possible_pointer', 1,
+sub
+#line 82 "expr.yp"
+{ $_[0]->_Lookup($_[1]) }
+ ],
+ [#Rule 31
+ 'possible_pointer', 2,
+sub
+#line 84 "expr.yp"
+{ $_[0]->_Dereference($_[2]); "*$_[2]" }
+ ],
+ [#Rule 32
+ 'var', 1,
+sub
+#line 88 "expr.yp"
+{ $_[0]->_Use($_[1]) }
+ ],
+ [#Rule 33
+ 'var', 3,
+sub
+#line 90 "expr.yp"
+{ $_[0]->_Use("$_[1].$_[3]") }
+ ],
+ [#Rule 34
+ 'var', 3,
+sub
+#line 92 "expr.yp"
+{ "($_[2])" }
+ ],
+ [#Rule 35
+ 'var', 3,
+sub
+#line 94 "expr.yp"
+{ $_[0]->_Use("*$_[1]"); $_[1]."->".$_[3] }
+ ],
+ [#Rule 36
+ 'func', 4,
+sub
+#line 99 "expr.yp"
+{ "$_[1]($_[3])" }
+ ],
+ [#Rule 37
+ 'opt_args', 0,
+sub
+#line 104 "expr.yp"
+{ "" }
+ ],
+ [#Rule 38
+ 'opt_args', 1, undef
+ ],
+ [#Rule 39
+ 'exp_or_possible_pointer', 1, undef
+ ],
+ [#Rule 40
+ 'exp_or_possible_pointer', 1, undef
+ ],
+ [#Rule 41
+ 'args', 1, undef
+ ],
+ [#Rule 42
+ 'args', 3,
+sub
+#line 118 "expr.yp"
+{ "$_[1], $_[3]" }
+ ]
+],
+ @_);
+ bless($self,$class);
+}
+
+#line 121 "expr.yp"
+
+
+package Parse::Pidl::Expr;
+
+sub _Lexer {
+ my($parser)=shift;
+
+ $parser->YYData->{INPUT}=~s/^[ \t]//;
+
+ for ($parser->YYData->{INPUT}) {
+ if (s/^(0x[0-9A-Fa-f]+)//) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return('NUM',$1);
+ }
+ if (s/^([0-9]+(?:\.[0-9]+)?)//) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return('NUM',$1);
+ }
+ if (s/^([A-Za-z_][A-Za-z0-9_]*)//) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return('VAR',$1);
+ }
+ if (s/^\"(.*?)\"//) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return('TEXT',$1);
+ }
+ if (s/^(==|!=|<=|>=|->|\|\||<<|>>|&&)//s) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return($1,$1);
+ }
+ if (s/^(.)//s) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return($1,$1);
+ }
+ }
+}
+
+sub _Use($$)
+{
+ my ($self, $x) = @_;
+ if (defined($self->YYData->{USE})) {
+ return $self->YYData->{USE}->($x);
+ }
+ return $x;
+}
+
+sub _Lookup($$)
+{
+ my ($self, $x) = @_;
+ return $self->YYData->{LOOKUP}->($x);
+}
+
+sub _Dereference($$)
+{
+ my ($self, $x) = @_;
+ if (defined($self->YYData->{DEREFERENCE})) {
+ $self->YYData->{DEREFERENCE}->($x);
+ }
+}
+
+sub _Error($)
+{
+ my ($self) = @_;
+ if (defined($self->YYData->{LAST_TOKEN})) {
+ $self->YYData->{ERROR}->("Parse error in `".$self->YYData->{FULL_INPUT}."' near `". $self->YYData->{LAST_TOKEN} . "'");
+ } else {
+ $self->YYData->{ERROR}->("Parse error in `".$self->YYData->{FULL_INPUT}."'");
+ }
+}
+
+sub Run {
+ my($self, $data, $error, $lookup, $deref, $use) = @_;
+
+ $self->YYData->{FULL_INPUT} = $data;
+ $self->YYData->{INPUT} = $data;
+ $self->YYData->{LOOKUP} = $lookup;
+ $self->YYData->{DEREFERENCE} = $deref;
+ $self->YYData->{ERROR} = $error;
+ $self->YYData->{USE} = $use;
+
+ return $self->YYParse( yylex => \&_Lexer, yyerror => \&_Error);
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/IDL.pm b/tools/pidl/lib/Parse/Pidl/IDL.pm
new file mode 100644
index 0000000..6927c89
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/IDL.pm
@@ -0,0 +1,2664 @@
+####################################################################
+#
+# This file was generated using Parse::Yapp version 1.05.
+#
+# Don't edit this file, use source file instead.
+#
+# ANY CHANGE MADE HERE WILL BE LOST !
+#
+####################################################################
+package Parse::Pidl::IDL;
+use vars qw ( @ISA );
+use strict;
+
+@ISA= qw ( Parse::Yapp::Driver );
+use Parse::Yapp::Driver;
+
+
+
+sub new {
+ my($class)=shift;
+ ref($class)
+ and $class=ref($class);
+
+ my($self)=$class->SUPER::new( yyversion => '1.05',
+ yystates =>
+[
+ {#State 0
+ DEFAULT => -1,
+ GOTOS => {
+ 'idl' => 1
+ }
+ },
+ {#State 1
+ ACTIONS => {
+ '' => 2,
+ "cpp_quote" => 3,
+ "importlib" => 4,
+ "import" => 7,
+ "include" => 13
+ },
+ DEFAULT => -89,
+ GOTOS => {
+ 'cpp_quote' => 11,
+ 'importlib' => 10,
+ 'interface' => 9,
+ 'include' => 5,
+ 'coclass' => 12,
+ 'import' => 8,
+ 'property_list' => 6
+ }
+ },
+ {#State 2
+ DEFAULT => 0
+ },
+ {#State 3
+ ACTIONS => {
+ "(" => 14
+ }
+ },
+ {#State 4
+ ACTIONS => {
+ 'TEXT' => 16
+ },
+ GOTOS => {
+ 'commalist' => 15,
+ 'text' => 17
+ }
+ },
+ {#State 5
+ DEFAULT => -5
+ },
+ {#State 6
+ ACTIONS => {
+ "coclass" => 18,
+ "[" => 20,
+ "interface" => 19
+ }
+ },
+ {#State 7
+ ACTIONS => {
+ 'TEXT' => 16
+ },
+ GOTOS => {
+ 'commalist' => 21,
+ 'text' => 17
+ }
+ },
+ {#State 8
+ DEFAULT => -4
+ },
+ {#State 9
+ DEFAULT => -2
+ },
+ {#State 10
+ DEFAULT => -6
+ },
+ {#State 11
+ DEFAULT => -7
+ },
+ {#State 12
+ DEFAULT => -3
+ },
+ {#State 13
+ ACTIONS => {
+ 'TEXT' => 16
+ },
+ GOTOS => {
+ 'commalist' => 22,
+ 'text' => 17
+ }
+ },
+ {#State 14
+ ACTIONS => {
+ 'TEXT' => 16
+ },
+ GOTOS => {
+ 'text' => 23
+ }
+ },
+ {#State 15
+ ACTIONS => {
+ ";" => 24,
+ "," => 25
+ }
+ },
+ {#State 16
+ DEFAULT => -120
+ },
+ {#State 17
+ DEFAULT => -11
+ },
+ {#State 18
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ GOTOS => {
+ 'identifier' => 27
+ }
+ },
+ {#State 19
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ GOTOS => {
+ 'identifier' => 28
+ }
+ },
+ {#State 20
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ GOTOS => {
+ 'identifier' => 30,
+ 'property' => 31,
+ 'properties' => 29
+ }
+ },
+ {#State 21
+ ACTIONS => {
+ ";" => 32,
+ "," => 25
+ }
+ },
+ {#State 22
+ ACTIONS => {
+ ";" => 33,
+ "," => 25
+ }
+ },
+ {#State 23
+ ACTIONS => {
+ ")" => 34
+ }
+ },
+ {#State 24
+ DEFAULT => -10
+ },
+ {#State 25
+ ACTIONS => {
+ 'TEXT' => 16
+ },
+ GOTOS => {
+ 'text' => 35
+ }
+ },
+ {#State 26
+ DEFAULT => -116
+ },
+ {#State 27
+ ACTIONS => {
+ "{" => 36
+ }
+ },
+ {#State 28
+ ACTIONS => {
+ ":" => 37
+ },
+ DEFAULT => -17,
+ GOTOS => {
+ 'base_interface' => 38
+ }
+ },
+ {#State 29
+ ACTIONS => {
+ "," => 39,
+ "]" => 40
+ }
+ },
+ {#State 30
+ ACTIONS => {
+ "(" => 41
+ },
+ DEFAULT => -93
+ },
+ {#State 31
+ DEFAULT => -91
+ },
+ {#State 32
+ DEFAULT => -8
+ },
+ {#State 33
+ DEFAULT => -9
+ },
+ {#State 34
+ DEFAULT => -19
+ },
+ {#State 35
+ DEFAULT => -12
+ },
+ {#State 36
+ DEFAULT => -14,
+ GOTOS => {
+ 'interface_names' => 42
+ }
+ },
+ {#State 37
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ GOTOS => {
+ 'identifier' => 43
+ }
+ },
+ {#State 38
+ ACTIONS => {
+ "{" => 44
+ }
+ },
+ {#State 39
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ GOTOS => {
+ 'identifier' => 30,
+ 'property' => 45
+ }
+ },
+ {#State 40
+ DEFAULT => -90
+ },
+ {#State 41
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'text' => 51,
+ 'anytext' => 46,
+ 'constant' => 47,
+ 'commalisttext' => 49
+ }
+ },
+ {#State 42
+ ACTIONS => {
+ "}" => 52,
+ "interface" => 53
+ }
+ },
+ {#State 43
+ DEFAULT => -18
+ },
+ {#State 44
+ ACTIONS => {
+ "const" => 64
+ },
+ DEFAULT => -89,
+ GOTOS => {
+ 'typedecl' => 54,
+ 'function' => 55,
+ 'pipe' => 56,
+ 'definitions' => 58,
+ 'bitmap' => 57,
+ 'definition' => 61,
+ 'property_list' => 60,
+ 'usertype' => 59,
+ 'const' => 63,
+ 'struct' => 62,
+ 'typedef' => 66,
+ 'enum' => 65,
+ 'union' => 67
+ }
+ },
+ {#State 45
+ DEFAULT => -92
+ },
+ {#State 46
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "<" => 71,
+ "+" => 73,
+ "~" => 72,
+ "*" => 80,
+ "?" => 70,
+ "{" => 74,
+ "&" => 75,
+ "/" => 76,
+ "=" => 77,
+ "(" => 78,
+ "|" => 79,
+ "." => 81,
+ ">" => 82
+ },
+ DEFAULT => -95
+ },
+ {#State 47
+ DEFAULT => -99
+ },
+ {#State 48
+ DEFAULT => -119
+ },
+ {#State 49
+ ACTIONS => {
+ "," => 83,
+ ")" => 84
+ }
+ },
+ {#State 50
+ DEFAULT => -98
+ },
+ {#State 51
+ DEFAULT => -100
+ },
+ {#State 52
+ ACTIONS => {
+ ";" => 86
+ },
+ DEFAULT => -121,
+ GOTOS => {
+ 'optional_semicolon' => 85
+ }
+ },
+ {#State 53
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ GOTOS => {
+ 'identifier' => 87
+ }
+ },
+ {#State 54
+ DEFAULT => -25
+ },
+ {#State 55
+ DEFAULT => -22
+ },
+ {#State 56
+ DEFAULT => -34
+ },
+ {#State 57
+ DEFAULT => -33
+ },
+ {#State 58
+ ACTIONS => {
+ "}" => 88,
+ "const" => 64
+ },
+ DEFAULT => -89,
+ GOTOS => {
+ 'typedecl' => 54,
+ 'function' => 55,
+ 'pipe' => 56,
+ 'bitmap' => 57,
+ 'definition' => 89,
+ 'property_list' => 60,
+ 'usertype' => 59,
+ 'const' => 63,
+ 'struct' => 62,
+ 'typedef' => 66,
+ 'enum' => 65,
+ 'union' => 67
+ }
+ },
+ {#State 59
+ ACTIONS => {
+ ";" => 90
+ }
+ },
+ {#State 60
+ ACTIONS => {
+ "typedef" => 91,
+ 'IDENTIFIER' => 26,
+ "signed" => 100,
+ "union" => 92,
+ "enum" => 101,
+ "bitmap" => 102,
+ 'void' => 93,
+ "pipe" => 103,
+ "unsigned" => 104,
+ "[" => 20,
+ "struct" => 98
+ },
+ GOTOS => {
+ 'existingtype' => 99,
+ 'pipe' => 56,
+ 'bitmap' => 57,
+ 'usertype' => 95,
+ 'property_list' => 94,
+ 'identifier' => 96,
+ 'struct' => 62,
+ 'enum' => 65,
+ 'type' => 105,
+ 'union' => 67,
+ 'sign' => 97
+ }
+ },
+ {#State 61
+ DEFAULT => -20
+ },
+ {#State 62
+ DEFAULT => -30
+ },
+ {#State 63
+ DEFAULT => -23
+ },
+ {#State 64
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ GOTOS => {
+ 'identifier' => 106
+ }
+ },
+ {#State 65
+ DEFAULT => -32
+ },
+ {#State 66
+ DEFAULT => -24
+ },
+ {#State 67
+ DEFAULT => -31
+ },
+ {#State 68
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 107,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 69
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 108,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 70
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 109,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 71
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 110,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 72
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 111,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 73
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 112,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 74
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 46,
+ 'text' => 51,
+ 'constant' => 47,
+ 'commalisttext' => 113
+ }
+ },
+ {#State 75
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 114,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 76
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 115,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 77
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 116,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 78
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 46,
+ 'text' => 51,
+ 'constant' => 47,
+ 'commalisttext' => 117
+ }
+ },
+ {#State 79
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 118,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 80
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 119,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 81
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 120,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 82
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 121,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 83
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 122,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 84
+ DEFAULT => -94
+ },
+ {#State 85
+ DEFAULT => -13
+ },
+ {#State 86
+ DEFAULT => -122
+ },
+ {#State 87
+ ACTIONS => {
+ ";" => 123
+ }
+ },
+ {#State 88
+ ACTIONS => {
+ ";" => 86
+ },
+ DEFAULT => -121,
+ GOTOS => {
+ 'optional_semicolon' => 124
+ }
+ },
+ {#State 89
+ DEFAULT => -21
+ },
+ {#State 90
+ DEFAULT => -35
+ },
+ {#State 91
+ ACTIONS => {
+ 'IDENTIFIER' => 26,
+ "signed" => 100,
+ 'void' => 93,
+ "unsigned" => 104
+ },
+ DEFAULT => -89,
+ GOTOS => {
+ 'existingtype' => 99,
+ 'pipe' => 56,
+ 'bitmap' => 57,
+ 'usertype' => 95,
+ 'property_list' => 94,
+ 'identifier' => 96,
+ 'struct' => 62,
+ 'enum' => 65,
+ 'type' => 125,
+ 'union' => 67,
+ 'sign' => 97
+ }
+ },
+ {#State 92
+ ACTIONS => {
+ 'IDENTIFIER' => 126
+ },
+ DEFAULT => -117,
+ GOTOS => {
+ 'optional_identifier' => 127
+ }
+ },
+ {#State 93
+ DEFAULT => -42
+ },
+ {#State 94
+ ACTIONS => {
+ "pipe" => 103,
+ "union" => 92,
+ "enum" => 101,
+ "bitmap" => 102,
+ "[" => 20,
+ "struct" => 98
+ }
+ },
+ {#State 95
+ DEFAULT => -40
+ },
+ {#State 96
+ DEFAULT => -39
+ },
+ {#State 97
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ GOTOS => {
+ 'identifier' => 128
+ }
+ },
+ {#State 98
+ ACTIONS => {
+ 'IDENTIFIER' => 126
+ },
+ DEFAULT => -117,
+ GOTOS => {
+ 'optional_identifier' => 129
+ }
+ },
+ {#State 99
+ DEFAULT => -41
+ },
+ {#State 100
+ DEFAULT => -36
+ },
+ {#State 101
+ ACTIONS => {
+ 'IDENTIFIER' => 126
+ },
+ DEFAULT => -117,
+ GOTOS => {
+ 'optional_identifier' => 130
+ }
+ },
+ {#State 102
+ ACTIONS => {
+ 'IDENTIFIER' => 126
+ },
+ DEFAULT => -117,
+ GOTOS => {
+ 'optional_identifier' => 131
+ }
+ },
+ {#State 103
+ ACTIONS => {
+ 'IDENTIFIER' => 26,
+ "signed" => 100,
+ 'void' => 93,
+ "unsigned" => 104
+ },
+ DEFAULT => -89,
+ GOTOS => {
+ 'existingtype' => 99,
+ 'pipe' => 56,
+ 'bitmap' => 57,
+ 'usertype' => 95,
+ 'property_list' => 94,
+ 'identifier' => 96,
+ 'struct' => 62,
+ 'enum' => 65,
+ 'type' => 132,
+ 'union' => 67,
+ 'sign' => 97
+ }
+ },
+ {#State 104
+ DEFAULT => -37
+ },
+ {#State 105
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ GOTOS => {
+ 'identifier' => 133
+ }
+ },
+ {#State 106
+ DEFAULT => -75,
+ GOTOS => {
+ 'pointers' => 134
+ }
+ },
+ {#State 107
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "<" => 71,
+ "+" => 73,
+ "~" => 72,
+ "*" => 80,
+ "?" => 70,
+ "{" => 74,
+ "&" => 75,
+ "/" => 76,
+ "=" => 77,
+ "(" => 78,
+ "|" => 79,
+ "." => 81,
+ ">" => 82
+ },
+ DEFAULT => -110
+ },
+ {#State 108
+ ACTIONS => {
+ ":" => 68,
+ "<" => 71,
+ "~" => 72,
+ "?" => 70,
+ "{" => 74,
+ "=" => 77
+ },
+ DEFAULT => -101
+ },
+ {#State 109
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "<" => 71,
+ "+" => 73,
+ "~" => 72,
+ "*" => 80,
+ "?" => 70,
+ "{" => 74,
+ "&" => 75,
+ "/" => 76,
+ "=" => 77,
+ "(" => 78,
+ "|" => 79,
+ "." => 81,
+ ">" => 82
+ },
+ DEFAULT => -109
+ },
+ {#State 110
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "<" => 71,
+ "+" => 73,
+ "~" => 72,
+ "*" => 80,
+ "?" => 70,
+ "{" => 74,
+ "&" => 75,
+ "/" => 76,
+ "=" => 77,
+ "(" => 78,
+ "|" => 79,
+ "." => 81,
+ ">" => 82
+ },
+ DEFAULT => -105
+ },
+ {#State 111
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "<" => 71,
+ "+" => 73,
+ "~" => 72,
+ "*" => 80,
+ "?" => 70,
+ "{" => 74,
+ "&" => 75,
+ "/" => 76,
+ "=" => 77,
+ "(" => 78,
+ "|" => 79,
+ "." => 81,
+ ">" => 82
+ },
+ DEFAULT => -113
+ },
+ {#State 112
+ ACTIONS => {
+ ":" => 68,
+ "<" => 71,
+ "~" => 72,
+ "?" => 70,
+ "{" => 74,
+ "=" => 77
+ },
+ DEFAULT => -112
+ },
+ {#State 113
+ ACTIONS => {
+ "}" => 135,
+ "," => 83
+ }
+ },
+ {#State 114
+ ACTIONS => {
+ ":" => 68,
+ "<" => 71,
+ "~" => 72,
+ "?" => 70,
+ "{" => 74,
+ "=" => 77
+ },
+ DEFAULT => -107
+ },
+ {#State 115
+ ACTIONS => {
+ ":" => 68,
+ "<" => 71,
+ "~" => 72,
+ "?" => 70,
+ "{" => 74,
+ "=" => 77
+ },
+ DEFAULT => -108
+ },
+ {#State 116
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "<" => 71,
+ "+" => 73,
+ "~" => 72,
+ "*" => 80,
+ "?" => 70,
+ "{" => 74,
+ "&" => 75,
+ "/" => 76,
+ "=" => 77,
+ "(" => 78,
+ "|" => 79,
+ "." => 81,
+ ">" => 82
+ },
+ DEFAULT => -111
+ },
+ {#State 117
+ ACTIONS => {
+ "," => 83,
+ ")" => 136
+ }
+ },
+ {#State 118
+ ACTIONS => {
+ ":" => 68,
+ "<" => 71,
+ "~" => 72,
+ "?" => 70,
+ "{" => 74,
+ "=" => 77
+ },
+ DEFAULT => -106
+ },
+ {#State 119
+ ACTIONS => {
+ ":" => 68,
+ "<" => 71,
+ "~" => 72,
+ "?" => 70,
+ "{" => 74,
+ "=" => 77
+ },
+ DEFAULT => -103
+ },
+ {#State 120
+ ACTIONS => {
+ ":" => 68,
+ "<" => 71,
+ "~" => 72,
+ "?" => 70,
+ "{" => 74,
+ "=" => 77
+ },
+ DEFAULT => -102
+ },
+ {#State 121
+ ACTIONS => {
+ ":" => 68,
+ "<" => 71,
+ "~" => 72,
+ "?" => 70,
+ "{" => 74,
+ "=" => 77
+ },
+ DEFAULT => -104
+ },
+ {#State 122
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "<" => 71,
+ "+" => 73,
+ "~" => 72,
+ "*" => 80,
+ "?" => 70,
+ "{" => 74,
+ "&" => 75,
+ "/" => 76,
+ "=" => 77,
+ "(" => 78,
+ "|" => 79,
+ "." => 81,
+ ">" => 82
+ },
+ DEFAULT => -96
+ },
+ {#State 123
+ DEFAULT => -15
+ },
+ {#State 124
+ DEFAULT => -16
+ },
+ {#State 125
+ DEFAULT => -75,
+ GOTOS => {
+ 'pointers' => 137
+ }
+ },
+ {#State 126
+ DEFAULT => -118
+ },
+ {#State 127
+ ACTIONS => {
+ "{" => 139
+ },
+ DEFAULT => -71,
+ GOTOS => {
+ 'union_body' => 140,
+ 'opt_union_body' => 138
+ }
+ },
+ {#State 128
+ DEFAULT => -38
+ },
+ {#State 129
+ ACTIONS => {
+ "{" => 142
+ },
+ DEFAULT => -61,
+ GOTOS => {
+ 'struct_body' => 141,
+ 'opt_struct_body' => 143
+ }
+ },
+ {#State 130
+ ACTIONS => {
+ "{" => 144
+ },
+ DEFAULT => -44,
+ GOTOS => {
+ 'opt_enum_body' => 146,
+ 'enum_body' => 145
+ }
+ },
+ {#State 131
+ ACTIONS => {
+ "{" => 148
+ },
+ DEFAULT => -52,
+ GOTOS => {
+ 'bitmap_body' => 149,
+ 'opt_bitmap_body' => 147
+ }
+ },
+ {#State 132
+ DEFAULT => -77
+ },
+ {#State 133
+ ACTIONS => {
+ "(" => 150
+ }
+ },
+ {#State 134
+ ACTIONS => {
+ 'IDENTIFIER' => 26,
+ "*" => 152
+ },
+ GOTOS => {
+ 'identifier' => 151
+ }
+ },
+ {#State 135
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 153,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 136
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 154,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 137
+ ACTIONS => {
+ 'IDENTIFIER' => 26,
+ "*" => 152
+ },
+ GOTOS => {
+ 'identifier' => 155
+ }
+ },
+ {#State 138
+ DEFAULT => -73
+ },
+ {#State 139
+ DEFAULT => -68,
+ GOTOS => {
+ 'union_elements' => 156
+ }
+ },
+ {#State 140
+ DEFAULT => -72
+ },
+ {#State 141
+ DEFAULT => -62
+ },
+ {#State 142
+ DEFAULT => -78,
+ GOTOS => {
+ 'element_list1' => 157
+ }
+ },
+ {#State 143
+ DEFAULT => -63
+ },
+ {#State 144
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ GOTOS => {
+ 'identifier' => 158,
+ 'enum_element' => 159,
+ 'enum_elements' => 160
+ }
+ },
+ {#State 145
+ DEFAULT => -45
+ },
+ {#State 146
+ DEFAULT => -46
+ },
+ {#State 147
+ DEFAULT => -54
+ },
+ {#State 148
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -57,
+ GOTOS => {
+ 'identifier' => 163,
+ 'bitmap_element' => 162,
+ 'bitmap_elements' => 161,
+ 'opt_bitmap_elements' => 164
+ }
+ },
+ {#State 149
+ DEFAULT => -53
+ },
+ {#State 150
+ ACTIONS => {
+ "," => -82,
+ "void" => 168,
+ "const" => 166,
+ ")" => -82
+ },
+ DEFAULT => -80,
+ GOTOS => {
+ 'optional_const' => 165,
+ 'element_list2' => 167
+ }
+ },
+ {#State 151
+ ACTIONS => {
+ "[" => 169,
+ "=" => 171
+ },
+ GOTOS => {
+ 'array_len' => 170
+ }
+ },
+ {#State 152
+ DEFAULT => -76
+ },
+ {#State 153
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "<" => 71,
+ "+" => 73,
+ "~" => 72,
+ "*" => 80,
+ "?" => 70,
+ "{" => 74,
+ "&" => 75,
+ "/" => 76,
+ "=" => 77,
+ "(" => 78,
+ "|" => 79,
+ "." => 81,
+ ">" => 82
+ },
+ DEFAULT => -115
+ },
+ {#State 154
+ ACTIONS => {
+ ":" => 68,
+ "<" => 71,
+ "~" => 72,
+ "?" => 70,
+ "{" => 74,
+ "=" => 77
+ },
+ DEFAULT => -114
+ },
+ {#State 155
+ ACTIONS => {
+ "[" => 169
+ },
+ DEFAULT => -86,
+ GOTOS => {
+ 'array_len' => 172
+ }
+ },
+ {#State 156
+ ACTIONS => {
+ "}" => 173
+ },
+ DEFAULT => -89,
+ GOTOS => {
+ 'optional_base_element' => 175,
+ 'property_list' => 174
+ }
+ },
+ {#State 157
+ ACTIONS => {
+ "}" => 176
+ },
+ DEFAULT => -89,
+ GOTOS => {
+ 'base_element' => 177,
+ 'property_list' => 178
+ }
+ },
+ {#State 158
+ ACTIONS => {
+ "=" => 179
+ },
+ DEFAULT => -49
+ },
+ {#State 159
+ DEFAULT => -47
+ },
+ {#State 160
+ ACTIONS => {
+ "}" => 180,
+ "," => 181
+ }
+ },
+ {#State 161
+ ACTIONS => {
+ "," => 182
+ },
+ DEFAULT => -58
+ },
+ {#State 162
+ DEFAULT => -55
+ },
+ {#State 163
+ ACTIONS => {
+ "=" => 183
+ }
+ },
+ {#State 164
+ ACTIONS => {
+ "}" => 184
+ }
+ },
+ {#State 165
+ DEFAULT => -89,
+ GOTOS => {
+ 'base_element' => 185,
+ 'property_list' => 178
+ }
+ },
+ {#State 166
+ DEFAULT => -81
+ },
+ {#State 167
+ ACTIONS => {
+ "," => 186,
+ ")" => 187
+ }
+ },
+ {#State 168
+ DEFAULT => -83
+ },
+ {#State 169
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ "]" => 188,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 189,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 170
+ ACTIONS => {
+ "=" => 190
+ }
+ },
+ {#State 171
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 191,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 172
+ ACTIONS => {
+ ";" => 192
+ }
+ },
+ {#State 173
+ DEFAULT => -70
+ },
+ {#State 174
+ ACTIONS => {
+ "[" => 20
+ },
+ DEFAULT => -89,
+ GOTOS => {
+ 'base_or_empty' => 193,
+ 'base_element' => 194,
+ 'empty_element' => 195,
+ 'property_list' => 196
+ }
+ },
+ {#State 175
+ DEFAULT => -69
+ },
+ {#State 176
+ DEFAULT => -60
+ },
+ {#State 177
+ ACTIONS => {
+ ";" => 197
+ }
+ },
+ {#State 178
+ ACTIONS => {
+ 'IDENTIFIER' => 26,
+ "signed" => 100,
+ 'void' => 93,
+ "unsigned" => 104,
+ "[" => 20
+ },
+ DEFAULT => -89,
+ GOTOS => {
+ 'existingtype' => 99,
+ 'pipe' => 56,
+ 'bitmap' => 57,
+ 'usertype' => 95,
+ 'property_list' => 94,
+ 'identifier' => 96,
+ 'struct' => 62,
+ 'enum' => 65,
+ 'type' => 198,
+ 'union' => 67,
+ 'sign' => 97
+ }
+ },
+ {#State 179
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 199,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 180
+ DEFAULT => -43
+ },
+ {#State 181
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ GOTOS => {
+ 'identifier' => 158,
+ 'enum_element' => 200
+ }
+ },
+ {#State 182
+ ACTIONS => {
+ 'IDENTIFIER' => 26
+ },
+ GOTOS => {
+ 'identifier' => 163,
+ 'bitmap_element' => 201
+ }
+ },
+ {#State 183
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 202,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 184
+ DEFAULT => -51
+ },
+ {#State 185
+ DEFAULT => -84
+ },
+ {#State 186
+ ACTIONS => {
+ "const" => 166
+ },
+ DEFAULT => -80,
+ GOTOS => {
+ 'optional_const' => 203
+ }
+ },
+ {#State 187
+ ACTIONS => {
+ ";" => 204
+ }
+ },
+ {#State 188
+ ACTIONS => {
+ "[" => 169
+ },
+ DEFAULT => -86,
+ GOTOS => {
+ 'array_len' => 205
+ }
+ },
+ {#State 189
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "?" => 70,
+ "<" => 71,
+ "+" => 73,
+ "~" => 72,
+ "&" => 75,
+ "{" => 74,
+ "/" => 76,
+ "=" => 77,
+ "|" => 79,
+ "(" => 78,
+ "*" => 80,
+ "." => 81,
+ "]" => 206,
+ ">" => 82
+ }
+ },
+ {#State 190
+ ACTIONS => {
+ 'CONSTANT' => 48,
+ 'TEXT' => 16,
+ 'IDENTIFIER' => 26
+ },
+ DEFAULT => -97,
+ GOTOS => {
+ 'identifier' => 50,
+ 'anytext' => 207,
+ 'text' => 51,
+ 'constant' => 47
+ }
+ },
+ {#State 191
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "?" => 70,
+ "<" => 71,
+ ";" => 208,
+ "+" => 73,
+ "~" => 72,
+ "&" => 75,
+ "{" => 74,
+ "/" => 76,
+ "=" => 77,
+ "|" => 79,
+ "(" => 78,
+ "*" => 80,
+ "." => 81,
+ ">" => 82
+ }
+ },
+ {#State 192
+ DEFAULT => -29
+ },
+ {#State 193
+ DEFAULT => -67
+ },
+ {#State 194
+ ACTIONS => {
+ ";" => 209
+ }
+ },
+ {#State 195
+ DEFAULT => -66
+ },
+ {#State 196
+ ACTIONS => {
+ 'IDENTIFIER' => 26,
+ "signed" => 100,
+ ";" => 210,
+ 'void' => 93,
+ "unsigned" => 104,
+ "[" => 20
+ },
+ DEFAULT => -89,
+ GOTOS => {
+ 'existingtype' => 99,
+ 'pipe' => 56,
+ 'bitmap' => 57,
+ 'usertype' => 95,
+ 'property_list' => 94,
+ 'identifier' => 96,
+ 'struct' => 62,
+ 'enum' => 65,
+ 'type' => 198,
+ 'union' => 67,
+ 'sign' => 97
+ }
+ },
+ {#State 197
+ DEFAULT => -79
+ },
+ {#State 198
+ DEFAULT => -75,
+ GOTOS => {
+ 'pointers' => 211
+ }
+ },
+ {#State 199
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "<" => 71,
+ "+" => 73,
+ "~" => 72,
+ "*" => 80,
+ "?" => 70,
+ "{" => 74,
+ "&" => 75,
+ "/" => 76,
+ "=" => 77,
+ "(" => 78,
+ "|" => 79,
+ "." => 81,
+ ">" => 82
+ },
+ DEFAULT => -50
+ },
+ {#State 200
+ DEFAULT => -48
+ },
+ {#State 201
+ DEFAULT => -56
+ },
+ {#State 202
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "<" => 71,
+ "+" => 73,
+ "~" => 72,
+ "*" => 80,
+ "?" => 70,
+ "{" => 74,
+ "&" => 75,
+ "/" => 76,
+ "=" => 77,
+ "(" => 78,
+ "|" => 79,
+ "." => 81,
+ ">" => 82
+ },
+ DEFAULT => -59
+ },
+ {#State 203
+ DEFAULT => -89,
+ GOTOS => {
+ 'base_element' => 212,
+ 'property_list' => 178
+ }
+ },
+ {#State 204
+ DEFAULT => -28
+ },
+ {#State 205
+ DEFAULT => -87
+ },
+ {#State 206
+ ACTIONS => {
+ "[" => 169
+ },
+ DEFAULT => -86,
+ GOTOS => {
+ 'array_len' => 213
+ }
+ },
+ {#State 207
+ ACTIONS => {
+ "-" => 69,
+ ":" => 68,
+ "?" => 70,
+ "<" => 71,
+ ";" => 214,
+ "+" => 73,
+ "~" => 72,
+ "&" => 75,
+ "{" => 74,
+ "/" => 76,
+ "=" => 77,
+ "|" => 79,
+ "(" => 78,
+ "*" => 80,
+ "." => 81,
+ ">" => 82
+ }
+ },
+ {#State 208
+ DEFAULT => -26
+ },
+ {#State 209
+ DEFAULT => -65
+ },
+ {#State 210
+ DEFAULT => -64
+ },
+ {#State 211
+ ACTIONS => {
+ 'IDENTIFIER' => 26,
+ "*" => 152
+ },
+ GOTOS => {
+ 'identifier' => 215
+ }
+ },
+ {#State 212
+ DEFAULT => -85
+ },
+ {#State 213
+ DEFAULT => -88
+ },
+ {#State 214
+ DEFAULT => -27
+ },
+ {#State 215
+ ACTIONS => {
+ "[" => 169
+ },
+ DEFAULT => -86,
+ GOTOS => {
+ 'array_len' => 216
+ }
+ },
+ {#State 216
+ DEFAULT => -74
+ }
+],
+ yyrules =>
+[
+ [#Rule 0
+ '$start', 2, undef
+ ],
+ [#Rule 1
+ 'idl', 0, undef
+ ],
+ [#Rule 2
+ 'idl', 2,
+sub
+#line 20 "idl.yp"
+{ push(@{$_[1]}, $_[2]); $_[1] }
+ ],
+ [#Rule 3
+ 'idl', 2,
+sub
+#line 22 "idl.yp"
+{ push(@{$_[1]}, $_[2]); $_[1] }
+ ],
+ [#Rule 4
+ 'idl', 2,
+sub
+#line 24 "idl.yp"
+{ push(@{$_[1]}, $_[2]); $_[1] }
+ ],
+ [#Rule 5
+ 'idl', 2,
+sub
+#line 26 "idl.yp"
+{ push(@{$_[1]}, $_[2]); $_[1] }
+ ],
+ [#Rule 6
+ 'idl', 2,
+sub
+#line 28 "idl.yp"
+{ push(@{$_[1]}, $_[2]); $_[1] }
+ ],
+ [#Rule 7
+ 'idl', 2,
+sub
+#line 30 "idl.yp"
+{ push(@{$_[1]}, $_[2]); $_[1] }
+ ],
+ [#Rule 8
+ 'import', 3,
+sub
+#line 35 "idl.yp"
+{{
+ "TYPE" => "IMPORT",
+ "PATHS" => $_[2],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 9
+ 'include', 3,
+sub
+#line 45 "idl.yp"
+{{
+ "TYPE" => "INCLUDE",
+ "PATHS" => $_[2],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 10
+ 'importlib', 3,
+sub
+#line 55 "idl.yp"
+{{
+ "TYPE" => "IMPORTLIB",
+ "PATHS" => $_[2],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 11
+ 'commalist', 1,
+sub
+#line 64 "idl.yp"
+{ [ $_[1] ] }
+ ],
+ [#Rule 12
+ 'commalist', 3,
+sub
+#line 66 "idl.yp"
+{ push(@{$_[1]}, $_[3]); $_[1] }
+ ],
+ [#Rule 13
+ 'coclass', 7,
+sub
+#line 71 "idl.yp"
+{{
+ "TYPE" => "COCLASS",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[3],
+ "DATA" => $_[5],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 14
+ 'interface_names', 0, undef
+ ],
+ [#Rule 15
+ 'interface_names', 4,
+sub
+#line 84 "idl.yp"
+{ push(@{$_[1]}, $_[2]); $_[1] }
+ ],
+ [#Rule 16
+ 'interface', 8,
+sub
+#line 89 "idl.yp"
+{{
+ "TYPE" => "INTERFACE",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[3],
+ "BASE" => $_[4],
+ "DATA" => $_[6],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 17
+ 'base_interface', 0, undef
+ ],
+ [#Rule 18
+ 'base_interface', 2,
+sub
+#line 103 "idl.yp"
+{ $_[2] }
+ ],
+ [#Rule 19
+ 'cpp_quote', 4,
+sub
+#line 109 "idl.yp"
+{{
+ "TYPE" => "CPP_QUOTE",
+ "DATA" => $_[3],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 20
+ 'definitions', 1,
+sub
+#line 118 "idl.yp"
+{ [ $_[1] ] }
+ ],
+ [#Rule 21
+ 'definitions', 2,
+sub
+#line 120 "idl.yp"
+{ push(@{$_[1]}, $_[2]); $_[1] }
+ ],
+ [#Rule 22
+ 'definition', 1, undef
+ ],
+ [#Rule 23
+ 'definition', 1, undef
+ ],
+ [#Rule 24
+ 'definition', 1, undef
+ ],
+ [#Rule 25
+ 'definition', 1, undef
+ ],
+ [#Rule 26
+ 'const', 7,
+sub
+#line 135 "idl.yp"
+{{
+ "TYPE" => "CONST",
+ "DTYPE" => $_[2],
+ "POINTERS" => $_[3],
+ "NAME" => $_[4],
+ "VALUE" => $_[6],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 27
+ 'const', 8,
+sub
+#line 146 "idl.yp"
+{{
+ "TYPE" => "CONST",
+ "DTYPE" => $_[2],
+ "POINTERS" => $_[3],
+ "NAME" => $_[4],
+ "ARRAY_LEN" => $_[5],
+ "VALUE" => $_[7],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 28
+ 'function', 7,
+sub
+#line 160 "idl.yp"
+{{
+ "TYPE" => "FUNCTION",
+ "NAME" => $_[3],
+ "RETURN_TYPE" => $_[2],
+ "PROPERTIES" => $_[1],
+ "ELEMENTS" => $_[5],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 29
+ 'typedef', 7,
+sub
+#line 173 "idl.yp"
+{{
+ "TYPE" => "TYPEDEF",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[5],
+ "DATA" => $_[3],
+ "POINTERS" => $_[4],
+ "ARRAY_LEN" => $_[6],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 30
+ 'usertype', 1, undef
+ ],
+ [#Rule 31
+ 'usertype', 1, undef
+ ],
+ [#Rule 32
+ 'usertype', 1, undef
+ ],
+ [#Rule 33
+ 'usertype', 1, undef
+ ],
+ [#Rule 34
+ 'usertype', 1, undef
+ ],
+ [#Rule 35
+ 'typedecl', 2,
+sub
+#line 198 "idl.yp"
+{ $_[1] }
+ ],
+ [#Rule 36
+ 'sign', 1, undef
+ ],
+ [#Rule 37
+ 'sign', 1, undef
+ ],
+ [#Rule 38
+ 'existingtype', 2,
+sub
+#line 208 "idl.yp"
+{ ($_[1]?$_[1]:"signed") ." $_[2]" }
+ ],
+ [#Rule 39
+ 'existingtype', 1, undef
+ ],
+ [#Rule 40
+ 'type', 1, undef
+ ],
+ [#Rule 41
+ 'type', 1, undef
+ ],
+ [#Rule 42
+ 'type', 1,
+sub
+#line 218 "idl.yp"
+{ "void" }
+ ],
+ [#Rule 43
+ 'enum_body', 3,
+sub
+#line 222 "idl.yp"
+{ $_[2] }
+ ],
+ [#Rule 44
+ 'opt_enum_body', 0, undef
+ ],
+ [#Rule 45
+ 'opt_enum_body', 1, undef
+ ],
+ [#Rule 46
+ 'enum', 4,
+sub
+#line 233 "idl.yp"
+{{
+ "TYPE" => "ENUM",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[3],
+ "ELEMENTS" => $_[4],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 47
+ 'enum_elements', 1,
+sub
+#line 244 "idl.yp"
+{ [ $_[1] ] }
+ ],
+ [#Rule 48
+ 'enum_elements', 3,
+sub
+#line 246 "idl.yp"
+{ push(@{$_[1]}, $_[3]); $_[1] }
+ ],
+ [#Rule 49
+ 'enum_element', 1, undef
+ ],
+ [#Rule 50
+ 'enum_element', 3,
+sub
+#line 252 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 51
+ 'bitmap_body', 3,
+sub
+#line 256 "idl.yp"
+{ $_[2] }
+ ],
+ [#Rule 52
+ 'opt_bitmap_body', 0, undef
+ ],
+ [#Rule 53
+ 'opt_bitmap_body', 1, undef
+ ],
+ [#Rule 54
+ 'bitmap', 4,
+sub
+#line 267 "idl.yp"
+{{
+ "TYPE" => "BITMAP",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[3],
+ "ELEMENTS" => $_[4],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 55
+ 'bitmap_elements', 1,
+sub
+#line 278 "idl.yp"
+{ [ $_[1] ] }
+ ],
+ [#Rule 56
+ 'bitmap_elements', 3,
+sub
+#line 280 "idl.yp"
+{ push(@{$_[1]}, $_[3]); $_[1] }
+ ],
+ [#Rule 57
+ 'opt_bitmap_elements', 0, undef
+ ],
+ [#Rule 58
+ 'opt_bitmap_elements', 1, undef
+ ],
+ [#Rule 59
+ 'bitmap_element', 3,
+sub
+#line 290 "idl.yp"
+{ "$_[1] ( $_[3] )" }
+ ],
+ [#Rule 60
+ 'struct_body', 3,
+sub
+#line 294 "idl.yp"
+{ $_[2] }
+ ],
+ [#Rule 61
+ 'opt_struct_body', 0, undef
+ ],
+ [#Rule 62
+ 'opt_struct_body', 1, undef
+ ],
+ [#Rule 63
+ 'struct', 4,
+sub
+#line 305 "idl.yp"
+{{
+ "TYPE" => "STRUCT",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[3],
+ "ELEMENTS" => $_[4],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 64
+ 'empty_element', 2,
+sub
+#line 317 "idl.yp"
+{{
+ "NAME" => "",
+ "TYPE" => "EMPTY",
+ "PROPERTIES" => $_[1],
+ "POINTERS" => 0,
+ "ARRAY_LEN" => [],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 65
+ 'base_or_empty', 2, undef
+ ],
+ [#Rule 66
+ 'base_or_empty', 1, undef
+ ],
+ [#Rule 67
+ 'optional_base_element', 2,
+sub
+#line 334 "idl.yp"
+{ $_[2]->{PROPERTIES} = FlattenHash([$_[1],$_[2]->{PROPERTIES}]); $_[2] }
+ ],
+ [#Rule 68
+ 'union_elements', 0, undef
+ ],
+ [#Rule 69
+ 'union_elements', 2,
+sub
+#line 340 "idl.yp"
+{ push(@{$_[1]}, $_[2]); $_[1] }
+ ],
+ [#Rule 70
+ 'union_body', 3,
+sub
+#line 344 "idl.yp"
+{ $_[2] }
+ ],
+ [#Rule 71
+ 'opt_union_body', 0, undef
+ ],
+ [#Rule 72
+ 'opt_union_body', 1, undef
+ ],
+ [#Rule 73
+ 'union', 4,
+sub
+#line 355 "idl.yp"
+{{
+ "TYPE" => "UNION",
+ "PROPERTIES" => $_[1],
+ "NAME" => $_[3],
+ "ELEMENTS" => $_[4],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 74
+ 'base_element', 5,
+sub
+#line 367 "idl.yp"
+{{
+ "NAME" => $_[4],
+ "TYPE" => $_[2],
+ "PROPERTIES" => $_[1],
+ "POINTERS" => $_[3],
+ "ARRAY_LEN" => $_[5],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 75
+ 'pointers', 0,
+sub
+#line 380 "idl.yp"
+{ 0 }
+ ],
+ [#Rule 76
+ 'pointers', 2,
+sub
+#line 382 "idl.yp"
+{ $_[1]+1 }
+ ],
+ [#Rule 77
+ 'pipe', 3,
+sub
+#line 387 "idl.yp"
+{{
+ "TYPE" => "PIPE",
+ "PROPERTIES" => $_[1],
+ "NAME" => undef,
+ "DATA" => {
+ "TYPE" => "STRUCT",
+ "PROPERTIES" => $_[1],
+ "NAME" => undef,
+ "ELEMENTS" => [{
+ "NAME" => "count",
+ "PROPERTIES" => $_[1],
+ "POINTERS" => 0,
+ "ARRAY_LEN" => [],
+ "TYPE" => "uint3264",
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ },{
+ "NAME" => "array",
+ "PROPERTIES" => $_[1],
+ "POINTERS" => 0,
+ "ARRAY_LEN" => [ "count" ],
+ "TYPE" => $_[3],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }],
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ },
+ "FILE" => $_[0]->YYData->{FILE},
+ "LINE" => $_[0]->YYData->{LINE},
+ }}
+ ],
+ [#Rule 78
+ 'element_list1', 0,
+sub
+#line 422 "idl.yp"
+{ [] }
+ ],
+ [#Rule 79
+ 'element_list1', 3,
+sub
+#line 424 "idl.yp"
+{ push(@{$_[1]}, $_[2]); $_[1] }
+ ],
+ [#Rule 80
+ 'optional_const', 0, undef
+ ],
+ [#Rule 81
+ 'optional_const', 1, undef
+ ],
+ [#Rule 82
+ 'element_list2', 0, undef
+ ],
+ [#Rule 83
+ 'element_list2', 1, undef
+ ],
+ [#Rule 84
+ 'element_list2', 2,
+sub
+#line 438 "idl.yp"
+{ [ $_[2] ] }
+ ],
+ [#Rule 85
+ 'element_list2', 4,
+sub
+#line 440 "idl.yp"
+{ push(@{$_[1]}, $_[4]); $_[1] }
+ ],
+ [#Rule 86
+ 'array_len', 0, undef
+ ],
+ [#Rule 87
+ 'array_len', 3,
+sub
+#line 446 "idl.yp"
+{ push(@{$_[3]}, "*"); $_[3] }
+ ],
+ [#Rule 88
+ 'array_len', 4,
+sub
+#line 448 "idl.yp"
+{ push(@{$_[4]}, "$_[2]"); $_[4] }
+ ],
+ [#Rule 89
+ 'property_list', 0, undef
+ ],
+ [#Rule 90
+ 'property_list', 4,
+sub
+#line 454 "idl.yp"
+{ FlattenHash([$_[1],$_[3]]); }
+ ],
+ [#Rule 91
+ 'properties', 1,
+sub
+#line 458 "idl.yp"
+{ $_[1] }
+ ],
+ [#Rule 92
+ 'properties', 3,
+sub
+#line 460 "idl.yp"
+{ FlattenHash([$_[1], $_[3]]); }
+ ],
+ [#Rule 93
+ 'property', 1,
+sub
+#line 464 "idl.yp"
+{{ "$_[1]" => "1" }}
+ ],
+ [#Rule 94
+ 'property', 4,
+sub
+#line 466 "idl.yp"
+{{ "$_[1]" => "$_[3]" }}
+ ],
+ [#Rule 95
+ 'commalisttext', 1, undef
+ ],
+ [#Rule 96
+ 'commalisttext', 3,
+sub
+#line 472 "idl.yp"
+{ "$_[1],$_[3]" }
+ ],
+ [#Rule 97
+ 'anytext', 0,
+sub
+#line 477 "idl.yp"
+{ "" }
+ ],
+ [#Rule 98
+ 'anytext', 1, undef
+ ],
+ [#Rule 99
+ 'anytext', 1, undef
+ ],
+ [#Rule 100
+ 'anytext', 1, undef
+ ],
+ [#Rule 101
+ 'anytext', 3,
+sub
+#line 485 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 102
+ 'anytext', 3,
+sub
+#line 487 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 103
+ 'anytext', 3,
+sub
+#line 489 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 104
+ 'anytext', 3,
+sub
+#line 491 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 105
+ 'anytext', 3,
+sub
+#line 493 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 106
+ 'anytext', 3,
+sub
+#line 495 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 107
+ 'anytext', 3,
+sub
+#line 497 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 108
+ 'anytext', 3,
+sub
+#line 499 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 109
+ 'anytext', 3,
+sub
+#line 501 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 110
+ 'anytext', 3,
+sub
+#line 503 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 111
+ 'anytext', 3,
+sub
+#line 505 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 112
+ 'anytext', 3,
+sub
+#line 507 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 113
+ 'anytext', 3,
+sub
+#line 509 "idl.yp"
+{ "$_[1]$_[2]$_[3]" }
+ ],
+ [#Rule 114
+ 'anytext', 5,
+sub
+#line 511 "idl.yp"
+{ "$_[1]$_[2]$_[3]$_[4]$_[5]" }
+ ],
+ [#Rule 115
+ 'anytext', 5,
+sub
+#line 513 "idl.yp"
+{ "$_[1]$_[2]$_[3]$_[4]$_[5]" }
+ ],
+ [#Rule 116
+ 'identifier', 1, undef
+ ],
+ [#Rule 117
+ 'optional_identifier', 0, undef
+ ],
+ [#Rule 118
+ 'optional_identifier', 1, undef
+ ],
+ [#Rule 119
+ 'constant', 1, undef
+ ],
+ [#Rule 120
+ 'text', 1,
+sub
+#line 531 "idl.yp"
+{ "\"$_[1]\"" }
+ ],
+ [#Rule 121
+ 'optional_semicolon', 0, undef
+ ],
+ [#Rule 122
+ 'optional_semicolon', 1, undef
+ ]
+],
+ @_);
+ bless($self,$class);
+}
+
+#line 543 "idl.yp"
+
+
+use Parse::Pidl qw(error);
+
+#####################################################################
+# flatten an array of hashes into a single hash
+sub FlattenHash($)
+{
+ my $a = shift;
+ my %b;
+ for my $d (@{$a}) {
+ for my $k (keys %{$d}) {
+ $b{$k} = $d->{$k};
+ }
+ }
+ return \%b;
+}
+
+#####################################################################
+# traverse a perl data structure removing any empty arrays or
+# hashes and any hash elements that map to undef
+sub CleanData($)
+{
+ sub CleanData($);
+ my($v) = shift;
+
+ return undef if (not defined($v));
+
+ if (ref($v) eq "ARRAY") {
+ foreach my $i (0 .. $#{$v}) {
+ CleanData($v->[$i]);
+ }
+ # this removes any undefined elements from the array
+ @{$v} = grep { defined $_ } @{$v};
+ } elsif (ref($v) eq "HASH") {
+ foreach my $x (keys %{$v}) {
+ CleanData($v->{$x});
+ if (!defined $v->{$x}) {
+ delete($v->{$x});
+ next;
+ }
+ }
+ }
+
+ return $v;
+}
+
+sub _Error {
+ if (exists $_[0]->YYData->{ERRMSG}) {
+ error($_[0]->YYData, $_[0]->YYData->{ERRMSG});
+ delete $_[0]->YYData->{ERRMSG};
+ return;
+ }
+
+ my $last_token = $_[0]->YYData->{LAST_TOKEN};
+
+ error($_[0]->YYData, "Syntax error near '$last_token'");
+}
+
+sub _Lexer($)
+{
+ my($parser)=shift;
+
+ $parser->YYData->{INPUT} or return('',undef);
+
+again:
+ $parser->YYData->{INPUT} =~ s/^[ \t]*//;
+
+ for ($parser->YYData->{INPUT}) {
+ if (/^\#/) {
+ # Linemarker format is described at
+ # http://gcc.gnu.org/onlinedocs/cpp/Preprocessor-Output.html
+ if (s/^\# (\d+) \"(.*?)\"(( \d+){1,4}|)//) {
+ $parser->YYData->{LINE} = $1-1;
+ $parser->YYData->{FILE} = $2;
+ goto again;
+ }
+ if (s/^\#line (\d+) \"(.*?)\"( \d+|)//) {
+ $parser->YYData->{LINE} = $1-1;
+ $parser->YYData->{FILE} = $2;
+ goto again;
+ }
+ if (s/^(\#.*)$//m) {
+ goto again;
+ }
+ }
+ if (s/^(\n)//) {
+ $parser->YYData->{LINE}++;
+ goto again;
+ }
+ if (s/^\"(.*?)\"//) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return('TEXT',$1);
+ }
+ if (s/^(\d+)(\W|$)/$2/) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return('CONSTANT',$1);
+ }
+ if (s/^([\w_]+)//) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ if ($1 =~
+ /^(coclass|interface|import|importlib
+ |include|cpp_quote|typedef
+ |union|struct|enum|bitmap|pipe
+ |void|const|unsigned|signed)$/x) {
+ return $1;
+ }
+ return('IDENTIFIER',$1);
+ }
+ if (s/^(.)//s) {
+ $parser->YYData->{LAST_TOKEN} = $1;
+ return($1,$1);
+ }
+ }
+}
+
+sub parse_string
+{
+ my ($data,$filename) = @_;
+
+ my $self = new Parse::Pidl::IDL;
+
+ $self->YYData->{FILE} = $filename;
+ $self->YYData->{INPUT} = $data;
+ $self->YYData->{LINE} = 0;
+ $self->YYData->{LAST_TOKEN} = "NONE";
+
+ my $idl = $self->YYParse( yylex => \&_Lexer, yyerror => \&_Error );
+
+ return CleanData($idl);
+}
+
+sub parse_file($$)
+{
+ my ($filename,$incdirs) = @_;
+
+ my $saved_delim = $/;
+ undef $/;
+ my $cpp = $ENV{CPP};
+ my $options = "";
+ if (! defined $cpp) {
+ if (defined $ENV{CC}) {
+ $cpp = "$ENV{CC}";
+ $options = "-E";
+ } else {
+ $cpp = "cpp";
+ }
+ }
+ my $includes = join('',map { " -I$_" } @$incdirs);
+ my $data = `$cpp $options -D__PIDL__$includes -xc "$filename"`;
+ $/ = $saved_delim;
+
+ return parse_string($data, $filename);
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/NDR.pm b/tools/pidl/lib/Parse/Pidl/NDR.pm
new file mode 100644
index 0000000..003156e
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/NDR.pm
@@ -0,0 +1,1472 @@
+###################################################
+# Samba4 NDR info tree generator
+# Copyright tridge@samba.org 2000-2003
+# Copyright tpot@samba.org 2001
+# Copyright jelmer@samba.org 2004-2006
+# released under the GNU GPL
+
+=pod
+
+=head1 NAME
+
+Parse::Pidl::NDR - NDR parsing information generator
+
+=head1 DESCRIPTION
+
+Return a table describing the order in which the parts of an element
+should be parsed
+Possible level types:
+ - POINTER
+ - ARRAY
+ - SUBCONTEXT
+ - SWITCH
+ - DATA
+
+=head1 AUTHOR
+
+Jelmer Vernooij <jelmer@samba.org>
+
+=cut
+
+package Parse::Pidl::NDR;
+
+require Exporter;
+use vars qw($VERSION);
+$VERSION = '0.01';
+@ISA = qw(Exporter);
+@EXPORT = qw(GetPrevLevel GetNextLevel ContainsDeferred ContainsPipe ContainsString);
+@EXPORT_OK = qw(GetElementLevelTable ParseElement ReturnTypeElement ValidElement align_type mapToScalar ParseType can_contain_deferred is_charset_array);
+
+use strict;
+use Parse::Pidl qw(warning fatal);
+use Parse::Pidl::Typelist qw(hasType getType typeIs expandAlias mapScalarType is_fixed_size_scalar);
+use Parse::Pidl::Util qw(has_property property_matches);
+
+# Alignment of the built-in scalar types
+my $scalar_alignment = {
+ 'void' => 0,
+ 'char' => 1,
+ 'int8' => 1,
+ 'uint8' => 1,
+ 'int16' => 2,
+ 'uint16' => 2,
+ 'int1632' => 3,
+ 'uint1632' => 3,
+ 'int32' => 4,
+ 'uint32' => 4,
+ 'int3264' => 5,
+ 'uint3264' => 5,
+ 'hyper' => 8,
+ 'double' => 8,
+ 'pointer' => 8,
+ 'dlong' => 4,
+ 'udlong' => 4,
+ 'udlongr' => 4,
+ 'DATA_BLOB' => 4,
+ 'string' => 4,
+ 'string_array' => 4, #???
+ 'time_t' => 4,
+ 'uid_t' => 8,
+ 'gid_t' => 8,
+ 'NTTIME' => 4,
+ 'NTTIME_1sec' => 4,
+ 'NTTIME_hyper' => 8,
+ 'WERROR' => 4,
+ 'NTSTATUS' => 4,
+ 'COMRESULT' => 4,
+ 'dns_string' => 4,
+ 'nbt_string' => 4,
+ 'wrepl_nbt_name' => 4,
+ 'ipv4address' => 4,
+ 'ipv6address' => 4, #16?
+ 'dnsp_name' => 1,
+ 'dnsp_string' => 1
+};
+
+sub GetElementLevelTable($$$)
+{
+ my ($e, $pointer_default, $ms_union) = @_;
+
+ my $order = [];
+ my $is_deferred = 0;
+ my @bracket_array = ();
+ my @length_is = ();
+ my @size_is = ();
+ my $pointer_idx = 0;
+
+ if (has_property($e, "size_is")) {
+ @size_is = split /,/, has_property($e, "size_is");
+ }
+
+ if (has_property($e, "length_is")) {
+ @length_is = split /,/, has_property($e, "length_is");
+ }
+
+ if (defined($e->{ARRAY_LEN})) {
+ @bracket_array = @{$e->{ARRAY_LEN}};
+ }
+
+ if (has_property($e, "out")) {
+ my $needptrs = 1;
+
+ if (has_property($e, "string") and not has_property($e, "in")) { $needptrs++; }
+ if ($#bracket_array >= 0) { $needptrs = 0; }
+
+ warning($e, "[out] argument `$e->{NAME}' not a pointer") if ($needptrs > $e->{POINTERS});
+ }
+
+ my $allow_pipe = ($e->{PARENT}->{TYPE} eq "FUNCTION");
+ my $is_pipe = typeIs($e->{TYPE}, "PIPE");
+
+ if ($is_pipe) {
+ if (not $allow_pipe) {
+ fatal($e, "argument `$e->{NAME}' is a pipe and not allowed on $e->{PARENT}->{TYPE}");
+ }
+
+ if ($e->{POINTERS} > 1) {
+ fatal($e, "$e->{POINTERS} are not allowed on pipe element $e->{NAME}");
+ }
+
+ if ($e->{POINTERS} < 0) {
+ fatal($e, "pipe element $e->{NAME} needs pointer");
+ }
+
+ if ($e->{POINTERS} == 1 and pointer_type($e) ne "ref") {
+ fatal($e, "pointer should be 'ref' on pipe element $e->{NAME}");
+ }
+
+ if (scalar(@size_is) > 0) {
+ fatal($e, "size_is() on pipe element");
+ }
+
+ if (scalar(@length_is) > 0) {
+ fatal($e, "length_is() on pipe element");
+ }
+
+ if (scalar(@bracket_array) > 0) {
+ fatal($e, "brackets on pipe element");
+ }
+
+ if (defined(has_property($e, "subcontext"))) {
+ fatal($e, "subcontext on pipe element");
+ }
+
+ if (has_property($e, "switch_is")) {
+ fatal($e, "switch_is on pipe element");
+ }
+
+ if (can_contain_deferred($e->{TYPE})) {
+ fatal($e, "$e->{TYPE} can_contain_deferred - not allowed on pipe element");
+ }
+ }
+
+ # Parse the [][][][] style array stuff
+ for my $i (0 .. $#bracket_array) {
+ my $d = $bracket_array[$#bracket_array - $i];
+ my $size = $d;
+ my $length = $d;
+ my $is_surrounding = 0;
+ my $is_varying = 0;
+ my $is_conformant = 0;
+ my $is_string = 0;
+ my $is_fixed = 0;
+ my $is_inline = 0;
+ my $is_to_null = 0;
+
+ if ($d eq "*") {
+ $is_conformant = 1;
+ if ($size = shift @size_is) {
+ if ($e->{POINTERS} < 1 and has_property($e, "string")) {
+ $is_string = 1;
+ delete($e->{PROPERTIES}->{string});
+ }
+ } elsif ((scalar(@size_is) == 0) and has_property($e, "string")) {
+ $is_string = 1;
+ delete($e->{PROPERTIES}->{string});
+ } else {
+ fatal($e, "Must specify size_is() for conformant array!")
+ }
+
+ if (($length = shift @length_is) or $is_string) {
+ $is_varying = 1;
+ } else {
+ $length = $size;
+ }
+
+ if ($e == $e->{PARENT}->{ELEMENTS}[-1]
+ and $e->{PARENT}->{TYPE} ne "FUNCTION") {
+ $is_surrounding = 1;
+ }
+ }
+
+ $is_fixed = 1 if (not $is_conformant and Parse::Pidl::Util::is_constant($size));
+ $is_inline = 1 if (not $is_conformant and not Parse::Pidl::Util::is_constant($size));
+
+ if ($i == 0 and $is_fixed and has_property($e, "string")) {
+ $is_fixed = 0;
+ $is_varying = 1;
+ $is_string = 1;
+ delete($e->{PROPERTIES}->{string});
+ }
+
+ if (has_property($e, "to_null")) {
+ $is_to_null = 1;
+ }
+
+ push (@$order, {
+ TYPE => "ARRAY",
+ SIZE_IS => $size,
+ LENGTH_IS => $length,
+ IS_DEFERRED => $is_deferred,
+ IS_SURROUNDING => $is_surrounding,
+ IS_ZERO_TERMINATED => $is_string,
+ IS_VARYING => $is_varying,
+ IS_CONFORMANT => $is_conformant,
+ IS_FIXED => $is_fixed,
+ IS_INLINE => $is_inline,
+ IS_TO_NULL => $is_to_null
+ });
+ }
+
+ # Next, all the pointers
+ foreach my $i (1..$e->{POINTERS}) {
+ my $level = "EMBEDDED";
+ # Top level "ref" pointers do not have a referrent identifier
+ $level = "TOP" if ($i == 1 and $e->{PARENT}->{TYPE} eq "FUNCTION");
+
+ my $pt;
+ #
+ # Only the first level gets the pointer type from the
+ # pointer property, the others get them from
+ # the pointer_default() interface property
+ #
+ # see http://msdn2.microsoft.com/en-us/library/aa378984(VS.85).aspx
+ # (Here they talk about the rightmost pointer, but testing shows
+ # they mean the leftmost pointer.)
+ #
+ # --metze
+ #
+ $pt = pointer_type($e);
+ if ($i > 1) {
+ $is_deferred = 1 if ($pt ne "ref" and $e->{PARENT}->{TYPE} eq "FUNCTION");
+ $pt = $pointer_default;
+ }
+
+ push (@$order, {
+ TYPE => "POINTER",
+ POINTER_TYPE => $pt,
+ POINTER_INDEX => $pointer_idx,
+ IS_DEFERRED => "$is_deferred",
+ LEVEL => $level
+ });
+
+ warning($e, "top-level \[out\] pointer `$e->{NAME}' is not a \[ref\] pointer")
+ if ($i == 1 and $pt ne "ref" and
+ $e->{PARENT}->{TYPE} eq "FUNCTION" and
+ not has_property($e, "in"));
+
+ $pointer_idx++;
+
+ # everything that follows will be deferred
+ $is_deferred = 1 if ($level ne "TOP");
+
+ my $array_size = shift @size_is;
+ my $array_length;
+ my $is_varying;
+ my $is_conformant;
+ my $is_string = 0;
+ if ($array_size) {
+ $is_conformant = 1;
+ if ($array_length = shift @length_is) {
+ $is_varying = 1;
+ } else {
+ $array_length = $array_size;
+ $is_varying =0;
+ }
+ }
+
+ if (scalar(@size_is) == 0 and has_property($e, "string") and
+ $i == $e->{POINTERS}) {
+ $is_string = 1;
+ $is_varying = $is_conformant = has_property($e, "noheader")?0:1;
+ delete($e->{PROPERTIES}->{string});
+ }
+
+ if ($array_size or $is_string) {
+ push (@$order, {
+ TYPE => "ARRAY",
+ SIZE_IS => $array_size,
+ LENGTH_IS => $array_length,
+ IS_DEFERRED => $is_deferred,
+ IS_SURROUNDING => 0,
+ IS_ZERO_TERMINATED => $is_string,
+ IS_VARYING => $is_varying,
+ IS_CONFORMANT => $is_conformant,
+ IS_FIXED => 0,
+ IS_INLINE => 0
+ });
+
+ $is_deferred = 0;
+ }
+ }
+
+ if ($is_pipe) {
+ push (@$order, {
+ TYPE => "PIPE",
+ IS_DEFERRED => 0,
+ CONTAINS_DEFERRED => 0,
+ });
+
+ my $i = 0;
+ foreach (@$order) { $_->{LEVEL_INDEX} = $i; $i+=1; }
+
+ return $order;
+ }
+
+ if (defined(has_property($e, "subcontext"))) {
+ my $hdr_size = has_property($e, "subcontext");
+ my $subsize = has_property($e, "subcontext_size");
+ if (not defined($subsize)) {
+ $subsize = -1;
+ }
+
+ push (@$order, {
+ TYPE => "SUBCONTEXT",
+ HEADER_SIZE => $hdr_size,
+ SUBCONTEXT_SIZE => $subsize,
+ IS_DEFERRED => $is_deferred,
+ COMPRESSION => has_property($e, "compression"),
+ });
+ }
+
+ if (my $switch = has_property($e, "switch_is")) {
+ push (@$order, {
+ TYPE => "SWITCH",
+ SWITCH_IS => $switch,
+ IS_DEFERRED => $is_deferred
+ });
+ }
+
+ if (scalar(@size_is) > 0) {
+ fatal($e, "size_is() on non-array element");
+ }
+
+ if (scalar(@length_is) > 0) {
+ fatal($e, "length_is() on non-array element");
+ }
+
+ if (has_property($e, "string")) {
+ fatal($e, "string() attribute on non-array element");
+ }
+
+ push (@$order, {
+ TYPE => "DATA",
+ DATA_TYPE => $e->{TYPE},
+ IS_DEFERRED => $is_deferred,
+ CONTAINS_DEFERRED => can_contain_deferred($e->{TYPE}),
+ IS_SURROUNDING => 0 #FIXME
+ });
+
+ my $i = 0;
+ foreach (@$order) { $_->{LEVEL_INDEX} = $i; $i+=1; }
+
+ return $order;
+}
+
+sub GetTypedefLevelTable($$$$)
+{
+ my ($e, $data, $pointer_default, $ms_union) = @_;
+
+ my $order = [];
+
+ push (@$order, {
+ TYPE => "TYPEDEF"
+ });
+
+ my $i = 0;
+ foreach (@$order) { $_->{LEVEL_INDEX} = $i; $i+=1; }
+
+ return $order;
+}
+
+#####################################################################
+# see if a type contains any deferred data
+sub can_contain_deferred($)
+{
+ sub can_contain_deferred($);
+ my ($type) = @_;
+
+ return 1 unless (hasType($type)); # assume the worst
+
+ $type = getType($type);
+
+ return 0 if (Parse::Pidl::Typelist::is_scalar($type));
+
+ return can_contain_deferred($type->{DATA}) if ($type->{TYPE} eq "TYPEDEF");
+
+ return 0 unless defined($type->{ELEMENTS});
+
+ foreach (@{$type->{ELEMENTS}}) {
+ return 1 if ($_->{POINTERS});
+ return 1 if (can_contain_deferred ($_->{TYPE}));
+ }
+
+ return 0;
+}
+
+sub pointer_type($)
+{
+ my $e = shift;
+
+ return undef unless $e->{POINTERS};
+
+ return "ref" if (has_property($e, "ref"));
+ return "full" if (has_property($e, "ptr"));
+ return "sptr" if (has_property($e, "sptr"));
+ return "unique" if (has_property($e, "unique"));
+ return "relative" if (has_property($e, "relative"));
+ return "relative_short" if (has_property($e, "relative_short"));
+ return "ignore" if (has_property($e, "ignore"));
+
+ return undef;
+}
+
+#####################################################################
+# work out the correct alignment for a structure or union
+sub find_largest_alignment($)
+{
+ my $s = shift;
+
+ my $align = 1;
+ for my $e (@{$s->{ELEMENTS}}) {
+ my $a = 1;
+
+ if ($e->{POINTERS}) {
+ # this is a hack for NDR64
+ # the NDR layer translates this into
+ # an alignment of 4 for NDR and 8 for NDR64
+ $a = 5;
+ } elsif (has_property($e, "subcontext")) {
+ $a = 1;
+ } elsif (has_property($e, "transmit_as")) {
+ $a = align_type($e->{PROPERTIES}->{transmit_as});
+ } else {
+ $a = align_type($e->{TYPE});
+ }
+
+ $align = $a if ($align < $a);
+ }
+
+ return $align;
+}
+
+#####################################################################
+# align a type
+sub align_type($)
+{
+ sub align_type($);
+ my ($e) = @_;
+
+ if (ref($e) eq "HASH" and $e->{TYPE} eq "SCALAR") {
+ return $scalar_alignment->{$e->{NAME}};
+ }
+
+ return 0 if ($e eq "EMPTY");
+
+ unless (hasType($e)) {
+ # it must be an external type - all we can do is guess
+ # warning($e, "assuming alignment of unknown type '$e' is 4");
+ return 4;
+ }
+
+ my $dt = getType($e);
+
+ if ($dt->{TYPE} eq "TYPEDEF") {
+ return align_type($dt->{DATA});
+ } elsif ($dt->{TYPE} eq "CONFORMANCE") {
+ return $dt->{DATA}->{ALIGN};
+ } elsif ($dt->{TYPE} eq "ENUM") {
+ return align_type(Parse::Pidl::Typelist::enum_type_fn($dt));
+ } elsif ($dt->{TYPE} eq "BITMAP") {
+ return align_type(Parse::Pidl::Typelist::bitmap_type_fn($dt));
+ } elsif (($dt->{TYPE} eq "STRUCT") or ($dt->{TYPE} eq "UNION")) {
+ # Struct/union without body: assume 4
+ return 4 unless (defined($dt->{ELEMENTS}));
+ return find_largest_alignment($dt);
+ } elsif (($dt->{TYPE} eq "PIPE")) {
+ return 5;
+ }
+
+ die("Unknown data type type $dt->{TYPE}");
+}
+
+sub ParseElement($$$)
+{
+ my ($e, $pointer_default, $ms_union) = @_;
+
+ $e->{TYPE} = expandAlias($e->{TYPE});
+
+ if (ref($e->{TYPE}) eq "HASH") {
+ $e->{TYPE} = ParseType($e->{TYPE}, $pointer_default, $ms_union);
+ }
+
+ return {
+ NAME => $e->{NAME},
+ TYPE => $e->{TYPE},
+ PROPERTIES => $e->{PROPERTIES},
+ LEVELS => GetElementLevelTable($e, $pointer_default, $ms_union),
+ REPRESENTATION_TYPE => ($e->{PROPERTIES}->{represent_as} or $e->{TYPE}),
+ ALIGN => align_type($e->{TYPE}),
+ ORIGINAL => $e
+ };
+}
+
+sub ParseStruct($$$)
+{
+ my ($struct, $pointer_default, $ms_union) = @_;
+ my @elements = ();
+ my $surrounding = undef;
+
+ return {
+ TYPE => "STRUCT",
+ NAME => $struct->{NAME},
+ SURROUNDING_ELEMENT => undef,
+ ELEMENTS => undef,
+ PROPERTIES => $struct->{PROPERTIES},
+ ORIGINAL => $struct,
+ ALIGN => undef
+ } unless defined($struct->{ELEMENTS});
+
+ CheckPointerTypes($struct, $pointer_default);
+
+ foreach my $x (@{$struct->{ELEMENTS}})
+ {
+ my $e = ParseElement($x, $pointer_default, $ms_union);
+ if ($x != $struct->{ELEMENTS}[-1] and
+ $e->{LEVELS}[0]->{IS_SURROUNDING}) {
+ fatal($x, "conformant member not at end of struct");
+ }
+ push @elements, $e;
+ }
+
+ my $e = $elements[-1];
+ if (defined($e) and defined($e->{LEVELS}[0]->{IS_SURROUNDING}) and
+ $e->{LEVELS}[0]->{IS_SURROUNDING}) {
+ $surrounding = $e;
+ }
+
+ if (defined $e->{TYPE} && $e->{TYPE} eq "string"
+ && property_matches($e, "flag", ".*LIBNDR_FLAG_STR_CONFORMANT.*")) {
+ $surrounding = $struct->{ELEMENTS}[-1];
+ }
+
+ my $align = undef;
+ if ($struct->{NAME}) {
+ $align = align_type($struct->{NAME});
+ }
+
+ return {
+ TYPE => "STRUCT",
+ NAME => $struct->{NAME},
+ SURROUNDING_ELEMENT => $surrounding,
+ ELEMENTS => \@elements,
+ PROPERTIES => $struct->{PROPERTIES},
+ ORIGINAL => $struct,
+ ALIGN => $align
+ };
+}
+
+sub ParseUnion($$)
+{
+ my ($e, $pointer_default, $ms_union) = @_;
+ my @elements = ();
+ my $is_ms_union = $ms_union;
+ $is_ms_union = 1 if has_property($e, "ms_union");
+ my $hasdefault = 0;
+ my $switch_type = has_property($e, "switch_type");
+ unless (defined($switch_type)) { $switch_type = "uint32"; }
+ if (has_property($e, "nodiscriminant")) { $switch_type = undef; }
+
+ return {
+ TYPE => "UNION",
+ NAME => $e->{NAME},
+ SWITCH_TYPE => $switch_type,
+ ELEMENTS => undef,
+ PROPERTIES => $e->{PROPERTIES},
+ HAS_DEFAULT => $hasdefault,
+ IS_MS_UNION => $is_ms_union,
+ ORIGINAL => $e,
+ ALIGN => undef
+ } unless defined($e->{ELEMENTS});
+
+ CheckPointerTypes($e, $pointer_default);
+
+ foreach my $x (@{$e->{ELEMENTS}})
+ {
+ my $t;
+ if ($x->{TYPE} eq "EMPTY") {
+ $t = { TYPE => "EMPTY" };
+ } else {
+ $t = ParseElement($x, $pointer_default, $ms_union);
+ }
+ if (has_property($x, "default")) {
+ $t->{CASE} = "default";
+ $hasdefault = 1;
+ } elsif (defined($x->{PROPERTIES}->{case})) {
+ $t->{CASE} = "case $x->{PROPERTIES}->{case}";
+ } else {
+ die("Union element $x->{NAME} has neither default nor case property");
+ }
+ push @elements, $t;
+ }
+
+ my $align = undef;
+ if ($e->{NAME}) {
+ $align = align_type($e->{NAME});
+ }
+
+ return {
+ TYPE => "UNION",
+ NAME => $e->{NAME},
+ SWITCH_TYPE => $switch_type,
+ ELEMENTS => \@elements,
+ PROPERTIES => $e->{PROPERTIES},
+ HAS_DEFAULT => $hasdefault,
+ IS_MS_UNION => $is_ms_union,
+ ORIGINAL => $e,
+ ALIGN => $align
+ };
+}
+
+sub ParseEnum($$)
+{
+ my ($e, $pointer_default, $ms_union) = @_;
+
+ return {
+ TYPE => "ENUM",
+ NAME => $e->{NAME},
+ BASE_TYPE => Parse::Pidl::Typelist::enum_type_fn($e),
+ ELEMENTS => $e->{ELEMENTS},
+ PROPERTIES => $e->{PROPERTIES},
+ ORIGINAL => $e
+ };
+}
+
+sub ParseBitmap($$$)
+{
+ my ($e, $pointer_default, $ms_union) = @_;
+
+ return {
+ TYPE => "BITMAP",
+ NAME => $e->{NAME},
+ BASE_TYPE => Parse::Pidl::Typelist::bitmap_type_fn($e),
+ ELEMENTS => $e->{ELEMENTS},
+ PROPERTIES => $e->{PROPERTIES},
+ ORIGINAL => $e
+ };
+}
+
+sub ParsePipe($$$)
+{
+ my ($pipe, $pointer_default, $ms_union) = @_;
+
+ my $pname = $pipe->{NAME};
+ $pname = $pipe->{PARENT}->{NAME} unless defined $pname;
+
+ if (not defined($pipe->{PROPERTIES})
+ and defined($pipe->{PARENT}->{PROPERTIES})) {
+ $pipe->{PROPERTIES} = $pipe->{PARENT}->{PROPERTIES};
+ }
+
+ if (ref($pipe->{DATA}) eq "HASH") {
+ if (not defined($pipe->{DATA}->{PROPERTIES})
+ and defined($pipe->{PROPERTIES})) {
+ $pipe->{DATA}->{PROPERTIES} = $pipe->{PROPERTIES};
+ }
+ }
+
+ my $struct = ParseStruct($pipe->{DATA}, $pointer_default, $ms_union);
+ $struct->{ALIGN} = 5;
+ $struct->{NAME} = "$pname\_chunk";
+
+ # 'count' is element [0] and 'array' [1]
+ my $e = $struct->{ELEMENTS}[1];
+ # level [0] is of type "ARRAY"
+ my $l = $e->{LEVELS}[1];
+
+ # here we check that pipe elements have a fixed size type
+ while (defined($l)) {
+ my $cl = $l;
+ $l = GetNextLevel($e, $cl);
+ if ($cl->{TYPE} ne "DATA") {
+ fatal($pipe, el_name($pipe) . ": pipe contains non DATA level");
+ }
+
+ # for now we only support scalars
+ next if is_fixed_size_scalar($cl->{DATA_TYPE});
+
+ fatal($pipe, el_name($pipe) . ": pipe contains non fixed size type[$cl->{DATA_TYPE}]");
+ }
+
+ return {
+ TYPE => "PIPE",
+ NAME => $pipe->{NAME},
+ DATA => $struct,
+ PROPERTIES => $pipe->{PROPERTIES},
+ ORIGINAL => $pipe,
+ };
+}
+
+sub ParseType($$$)
+{
+ my ($d, $pointer_default, $ms_union) = @_;
+
+ my $data = {
+ STRUCT => \&ParseStruct,
+ UNION => \&ParseUnion,
+ ENUM => \&ParseEnum,
+ BITMAP => \&ParseBitmap,
+ TYPEDEF => \&ParseTypedef,
+ PIPE => \&ParsePipe,
+ }->{$d->{TYPE}}->($d, $pointer_default, $ms_union);
+
+ return $data;
+}
+
+sub ParseTypedef($$)
+{
+ my ($d, $pointer_default, $ms_union) = @_;
+
+ my $data;
+
+ if (ref($d->{DATA}) eq "HASH") {
+ if (defined($d->{DATA}->{PROPERTIES})
+ and not defined($d->{PROPERTIES})) {
+ $d->{PROPERTIES} = $d->{DATA}->{PROPERTIES};
+ }
+
+ $data = ParseType($d->{DATA}, $pointer_default, $ms_union);
+ $data->{ALIGN} = align_type($d->{NAME});
+ } else {
+ $data = getType($d->{DATA});
+ }
+
+ return {
+ NAME => $d->{NAME},
+ TYPE => $d->{TYPE},
+ PROPERTIES => $d->{PROPERTIES},
+ LEVELS => GetTypedefLevelTable($d, $data, $pointer_default, $ms_union),
+ DATA => $data,
+ ORIGINAL => $d
+ };
+}
+
+sub ParseConst($$)
+{
+ my ($ndr,$d) = @_;
+
+ return $d;
+}
+
+sub ParseFunction($$$$)
+{
+ my ($ndr,$d,$opnum,$ms_union) = @_;
+ my @elements = ();
+ my $rettype = undef;
+ my $thisopnum = undef;
+
+ CheckPointerTypes($d, "ref");
+
+ if (not defined($d->{PROPERTIES}{noopnum})) {
+ $thisopnum = ${$opnum};
+ ${$opnum}++;
+ }
+
+ foreach my $x (@{$d->{ELEMENTS}}) {
+ my $e = ParseElement($x, $ndr->{PROPERTIES}->{pointer_default}, $ms_union);
+ push (@{$e->{DIRECTION}}, "in") if (has_property($x, "in"));
+ push (@{$e->{DIRECTION}}, "out") if (has_property($x, "out"));
+
+ push (@elements, $e);
+ }
+
+ if ($d->{RETURN_TYPE} ne "void") {
+ $rettype = expandAlias($d->{RETURN_TYPE});
+ }
+
+ return {
+ NAME => $d->{NAME},
+ TYPE => "FUNCTION",
+ OPNUM => $thisopnum,
+ RETURN_TYPE => $rettype,
+ PROPERTIES => $d->{PROPERTIES},
+ ELEMENTS => \@elements,
+ ORIGINAL => $d
+ };
+}
+
+sub ReturnTypeElement($)
+{
+ my ($fn) = @_;
+
+ return undef unless defined($fn->{RETURN_TYPE});
+
+ my $e = {
+ "NAME" => "result",
+ "TYPE" => $fn->{RETURN_TYPE},
+ "PROPERTIES" => undef,
+ "POINTERS" => 0,
+ "ARRAY_LEN" => [],
+ "FILE" => $fn->{FILE},
+ "LINE" => $fn->{LINE},
+ };
+
+ return ParseElement($e, 0, 0);
+}
+
+sub CheckPointerTypes($$)
+{
+ my ($s,$default) = @_;
+
+ return unless defined($s->{ELEMENTS});
+
+ foreach my $e (@{$s->{ELEMENTS}}) {
+ if ($e->{POINTERS} and not defined(pointer_type($e))) {
+ $e->{PROPERTIES}->{$default} = '1';
+ }
+ }
+}
+
+sub FindNestedTypes($$)
+{
+ sub FindNestedTypes($$);
+ my ($l, $t) = @_;
+
+ return unless defined($t->{ELEMENTS});
+ return if ($t->{TYPE} eq "ENUM");
+ return if ($t->{TYPE} eq "BITMAP");
+
+ foreach (@{$t->{ELEMENTS}}) {
+ if (ref($_->{TYPE}) eq "HASH") {
+ push (@$l, $_->{TYPE}) if (defined($_->{TYPE}->{NAME}));
+ FindNestedTypes($l, $_->{TYPE});
+ }
+ }
+}
+
+sub ParseInterface($)
+{
+ my $idl = shift;
+ my @types = ();
+ my @consts = ();
+ my @functions = ();
+ my @endpoints;
+ my $opnum = 0;
+ my $version;
+ my $ms_union = 0;
+ $ms_union = 1 if has_property($idl, "ms_union");
+
+ if (not has_property($idl, "pointer_default")) {
+ # MIDL defaults to "ptr" in DCE compatible mode (/osf)
+ # and "unique" in Microsoft Extensions mode (default)
+ $idl->{PROPERTIES}->{pointer_default} = "unique";
+ }
+
+ foreach my $d (@{$idl->{DATA}}) {
+ if ($d->{TYPE} eq "FUNCTION") {
+ push (@functions, ParseFunction($idl, $d, \$opnum, $ms_union));
+ } elsif ($d->{TYPE} eq "CONST") {
+ push (@consts, ParseConst($idl, $d));
+ } else {
+ push (@types, ParseType($d, $idl->{PROPERTIES}->{pointer_default}, $ms_union));
+ FindNestedTypes(\@types, $d);
+ }
+ }
+
+ $version = "0.0";
+
+ if(defined $idl->{PROPERTIES}->{version}) {
+ my @if_version = split(/\./, $idl->{PROPERTIES}->{version});
+ if ($if_version[0] == $idl->{PROPERTIES}->{version}) {
+ $version = $idl->{PROPERTIES}->{version};
+ } else {
+ $version = $if_version[1] << 16 | $if_version[0];
+ }
+ }
+
+ # If no endpoint is set, default to the interface name as a named pipe
+ if (!defined $idl->{PROPERTIES}->{endpoint}) {
+ push @endpoints, "\"ncacn_np:[\\\\pipe\\\\" . $idl->{NAME} . "]\"";
+ } else {
+ @endpoints = split /,/, $idl->{PROPERTIES}->{endpoint};
+ }
+
+ return {
+ NAME => $idl->{NAME},
+ UUID => lc(has_property($idl, "uuid")),
+ VERSION => $version,
+ TYPE => "INTERFACE",
+ PROPERTIES => $idl->{PROPERTIES},
+ FUNCTIONS => \@functions,
+ CONSTS => \@consts,
+ TYPES => \@types,
+ ENDPOINTS => \@endpoints,
+ ORIGINAL => $idl
+ };
+}
+
+# Convert a IDL tree to a NDR tree
+# Gives a result tree describing all that's necessary for easily generating
+# NDR parsers / generators
+sub Parse($)
+{
+ my $idl = shift;
+
+ return undef unless (defined($idl));
+
+ Parse::Pidl::NDR::Validate($idl);
+
+ my @ndr = ();
+
+ foreach (@{$idl}) {
+ ($_->{TYPE} eq "CPP_QUOTE") && push(@ndr, $_);
+ ($_->{TYPE} eq "INTERFACE") && push(@ndr, ParseInterface($_));
+ ($_->{TYPE} eq "IMPORT") && push(@ndr, $_);
+ }
+
+ return \@ndr;
+}
+
+sub GetNextLevel($$)
+{
+ my $e = shift;
+ my $fl = shift;
+
+ my $seen = 0;
+
+ foreach my $l (@{$e->{LEVELS}}) {
+ return $l if ($seen);
+ ($seen = 1) if ($l == $fl);
+ }
+
+ return undef;
+}
+
+sub GetPrevLevel($$)
+{
+ my ($e,$fl) = @_;
+ my $prev = undef;
+
+ foreach my $l (@{$e->{LEVELS}}) {
+ (return $prev) if ($l == $fl);
+ $prev = $l;
+ }
+
+ return undef;
+}
+
+sub ContainsString($)
+{
+ my ($e) = @_;
+
+ if (property_matches($e, "flag", ".*STR_NULLTERM.*")) {
+ return 1;
+ }
+ if (exists($e->{LEVELS}) and $e->{LEVELS}->[0]->{TYPE} eq "ARRAY" and
+ ($e->{LEVELS}->[0]->{IS_FIXED} or $e->{LEVELS}->[0]->{IS_INLINE}) and
+ has_property($e, "charset"))
+ {
+ return 1;
+ }
+
+ foreach my $l (@{$e->{LEVELS}}) {
+ return 1 if ($l->{TYPE} eq "ARRAY" and $l->{IS_ZERO_TERMINATED});
+ }
+ if (property_matches($e, "charset", ".*DOS.*")) {
+ return 1;
+ }
+
+ return 0;
+}
+
+sub ContainsDeferred($$)
+{
+ my ($e,$l) = @_;
+
+ return 1 if ($l->{CONTAINS_DEFERRED});
+
+ while ($l = GetNextLevel($e,$l))
+ {
+ return 1 if ($l->{IS_DEFERRED});
+ return 1 if ($l->{CONTAINS_DEFERRED});
+ }
+
+ return 0;
+}
+
+sub ContainsPipe($$)
+{
+ my ($e,$l) = @_;
+
+ return 1 if ($l->{TYPE} eq "PIPE");
+
+ while ($l = GetNextLevel($e,$l))
+ {
+ return 1 if ($l->{TYPE} eq "PIPE");
+ }
+
+ return 0;
+}
+
+sub el_name($)
+{
+ my $e = shift;
+ my $name = "<ANONYMOUS>";
+
+ $name = $e->{NAME} if defined($e->{NAME});
+
+ if (defined($e->{PARENT}) and defined($e->{PARENT}->{NAME})) {
+ return "$e->{PARENT}->{NAME}.$name";
+ }
+
+ if (defined($e->{PARENT}) and
+ defined($e->{PARENT}->{PARENT}) and
+ defined($e->{PARENT}->{PARENT}->{NAME})) {
+ return "$e->{PARENT}->{PARENT}->{NAME}.$name";
+ }
+
+ return $name;
+}
+
+###################################
+# find a sibling var in a structure
+sub find_sibling($$)
+{
+ my($e,$name) = @_;
+ my($fn) = $e->{PARENT};
+
+ if ($name =~ /\*(.*)/) {
+ $name = $1;
+ }
+
+ for my $e2 (@{$fn->{ELEMENTS}}) {
+ return $e2 if ($e2->{NAME} eq $name);
+ }
+
+ return undef;
+}
+
+my %property_list = (
+ # interface
+ "helpstring" => ["INTERFACE", "FUNCTION"],
+ "version" => ["INTERFACE"],
+ "uuid" => ["INTERFACE"],
+ "endpoint" => ["INTERFACE"],
+ "pointer_default" => ["INTERFACE"],
+ "helper" => ["INTERFACE"],
+ "pyhelper" => ["INTERFACE"],
+ "authservice" => ["INTERFACE"],
+ "restricted" => ["INTERFACE"],
+ "no_srv_register" => ["INTERFACE"],
+
+ # dcom
+ "object" => ["INTERFACE"],
+ "local" => ["INTERFACE", "FUNCTION"],
+ "iid_is" => ["ELEMENT"],
+ "call_as" => ["FUNCTION"],
+ "idempotent" => ["FUNCTION"],
+
+ # function
+ "noopnum" => ["FUNCTION"],
+ "in" => ["ELEMENT"],
+ "out" => ["ELEMENT"],
+
+ # pointer
+ "ref" => ["ELEMENT", "TYPEDEF"],
+ "ptr" => ["ELEMENT", "TYPEDEF"],
+ "unique" => ["ELEMENT", "TYPEDEF"],
+ "ignore" => ["ELEMENT"],
+ "relative" => ["ELEMENT", "TYPEDEF"],
+ "relative_short" => ["ELEMENT", "TYPEDEF"],
+ "null_is_ffffffff" => ["ELEMENT"],
+ "relative_base" => ["TYPEDEF", "STRUCT", "UNION"],
+
+ "gensize" => ["TYPEDEF", "STRUCT", "UNION"],
+ "value" => ["ELEMENT"],
+ "flag" => ["ELEMENT", "TYPEDEF", "STRUCT", "UNION", "ENUM", "BITMAP", "PIPE"],
+
+ # generic
+ "public" => ["FUNCTION", "TYPEDEF", "STRUCT", "UNION", "ENUM", "BITMAP", "PIPE"],
+ "nopush" => ["FUNCTION", "TYPEDEF", "STRUCT", "UNION", "ENUM", "BITMAP", "PIPE"],
+ "nopull" => ["FUNCTION", "TYPEDEF", "STRUCT", "UNION", "ENUM", "BITMAP", "PIPE"],
+ "nosize" => ["FUNCTION", "TYPEDEF", "STRUCT", "UNION", "ENUM", "BITMAP"],
+ "noprint" => ["FUNCTION", "TYPEDEF", "STRUCT", "UNION", "ENUM", "BITMAP", "ELEMENT", "PIPE"],
+ "nopython" => ["FUNCTION", "TYPEDEF", "STRUCT", "UNION", "ENUM", "BITMAP"],
+ "todo" => ["FUNCTION"],
+ "skip" => ["ELEMENT"],
+ "skip_noinit" => ["ELEMENT"],
+
+ # union
+ "switch_is" => ["ELEMENT"],
+ "switch_type" => ["ELEMENT", "UNION"],
+ "nodiscriminant" => ["UNION"],
+ "ms_union" => ["INTERFACE", "UNION"],
+ "case" => ["ELEMENT"],
+ "default" => ["ELEMENT"],
+
+ "represent_as" => ["ELEMENT"],
+ "transmit_as" => ["ELEMENT"],
+
+ # subcontext
+ "subcontext" => ["ELEMENT"],
+ "subcontext_size" => ["ELEMENT"],
+ "compression" => ["ELEMENT"],
+
+ # enum
+ "enum8bit" => ["ENUM"],
+ "enum16bit" => ["ENUM"],
+ "v1_enum" => ["ENUM"],
+
+ # bitmap
+ "bitmap8bit" => ["BITMAP"],
+ "bitmap16bit" => ["BITMAP"],
+ "bitmap32bit" => ["BITMAP"],
+ "bitmap64bit" => ["BITMAP"],
+
+ # array
+ "range" => ["ELEMENT", "PIPE"],
+ "size_is" => ["ELEMENT"],
+ "string" => ["ELEMENT"],
+ "noheader" => ["ELEMENT"],
+ "charset" => ["ELEMENT"],
+ "length_is" => ["ELEMENT"],
+ "to_null" => ["ELEMENT"],
+);
+
+#####################################################################
+# check for unknown properties
+sub ValidProperties($$)
+{
+ my ($e,$t) = @_;
+
+ return unless defined $e->{PROPERTIES};
+
+ foreach my $key (keys %{$e->{PROPERTIES}}) {
+ warning($e, el_name($e) . ": unknown property '$key'")
+ unless defined($property_list{$key});
+
+ fatal($e, el_name($e) . ": property '$key' not allowed on '$t'")
+ unless grep(/^$t$/, @{$property_list{$key}});
+ }
+}
+
+sub mapToScalar($)
+{
+ sub mapToScalar($);
+ my $t = shift;
+ return $t->{NAME} if (ref($t) eq "HASH" and $t->{TYPE} eq "SCALAR");
+ my $ti = getType($t);
+
+ if (not defined ($ti)) {
+ return undef;
+ } elsif ($ti->{TYPE} eq "TYPEDEF") {
+ return mapToScalar($ti->{DATA});
+ } elsif ($ti->{TYPE} eq "ENUM") {
+ return Parse::Pidl::Typelist::enum_type_fn($ti);
+ } elsif ($ti->{TYPE} eq "BITMAP") {
+ return Parse::Pidl::Typelist::bitmap_type_fn($ti);
+ }
+
+ return undef;
+}
+
+#####################################################################
+# validate an element
+sub ValidElement($)
+{
+ my $e = shift;
+
+ ValidProperties($e,"ELEMENT");
+
+ # Check whether switches are used correctly.
+ if (my $switch = has_property($e, "switch_is")) {
+ my $e2 = find_sibling($e, $switch);
+ my $type = getType($e->{TYPE});
+
+ if (defined($type) and $type->{DATA}->{TYPE} ne "UNION") {
+ fatal($e, el_name($e) . ": switch_is() used on non-union type $e->{TYPE} which is a $type->{DATA}->{TYPE}");
+ }
+
+ if (not has_property($type->{DATA}, "nodiscriminant") and defined($e2)) {
+ my $discriminator_type = has_property($type->{DATA}, "switch_type");
+ $discriminator_type = "uint32" unless defined ($discriminator_type);
+
+ my $t1 = mapScalarType(mapToScalar($discriminator_type));
+
+ if (not defined($t1)) {
+ fatal($e, el_name($e) . ": unable to map discriminator type '$discriminator_type' to scalar");
+ }
+
+ my $t2 = mapScalarType(mapToScalar($e2->{TYPE}));
+ if (not defined($t2)) {
+ fatal($e, el_name($e) . ": unable to map variable used for switch_is() to scalar");
+ }
+
+ if ($t1 ne $t2) {
+ warning($e, el_name($e) . ": switch_is() is of type $e2->{TYPE} ($t2), while discriminator type for union $type->{NAME} is $discriminator_type ($t1)");
+ }
+ }
+ }
+
+ if (has_property($e, "subcontext") and has_property($e, "represent_as")) {
+ fatal($e, el_name($e) . " : subcontext() and represent_as() can not be used on the same element");
+ }
+
+ if (has_property($e, "subcontext") and has_property($e, "transmit_as")) {
+ fatal($e, el_name($e) . " : subcontext() and transmit_as() can not be used on the same element");
+ }
+
+ if (has_property($e, "represent_as") and has_property($e, "transmit_as")) {
+ fatal($e, el_name($e) . " : represent_as() and transmit_as() can not be used on the same element");
+ }
+
+ if (has_property($e, "represent_as") and has_property($e, "value")) {
+ fatal($e, el_name($e) . " : represent_as() and value() can not be used on the same element");
+ }
+
+ if (has_property($e, "subcontext")) {
+ warning($e, "subcontext() is deprecated. Use represent_as() or transmit_as() instead");
+ }
+
+ if (defined (has_property($e, "subcontext_size")) and not defined(has_property($e, "subcontext"))) {
+ fatal($e, el_name($e) . " : subcontext_size() on non-subcontext element");
+ }
+
+ if (defined (has_property($e, "compression")) and not defined(has_property($e, "subcontext"))) {
+ fatal($e, el_name($e) . " : compression() on non-subcontext element");
+ }
+
+ if (!$e->{POINTERS} && (
+ has_property($e, "ptr") or
+ has_property($e, "unique") or
+ has_property($e, "relative") or
+ has_property($e, "relative_short") or
+ has_property($e, "ref"))) {
+ fatal($e, el_name($e) . " : pointer properties on non-pointer element\n");
+ }
+}
+
+#####################################################################
+# validate an enum
+sub ValidEnum($)
+{
+ my ($enum) = @_;
+
+ ValidProperties($enum, "ENUM");
+}
+
+#####################################################################
+# validate a bitmap
+sub ValidBitmap($)
+{
+ my ($bitmap) = @_;
+
+ ValidProperties($bitmap, "BITMAP");
+}
+
+#####################################################################
+# validate a struct
+sub ValidStruct($)
+{
+ my($struct) = shift;
+
+ ValidProperties($struct, "STRUCT");
+
+ return unless defined($struct->{ELEMENTS});
+
+ foreach my $e (@{$struct->{ELEMENTS}}) {
+ $e->{PARENT} = $struct;
+ ValidElement($e);
+ }
+}
+
+#####################################################################
+# parse a union
+sub ValidUnion($)
+{
+ my($union) = shift;
+
+ ValidProperties($union,"UNION");
+
+ if (has_property($union->{PARENT}, "nodiscriminant") and
+ has_property($union->{PARENT}, "switch_type")) {
+ fatal($union->{PARENT}, $union->{PARENT}->{NAME} . ": switch_type(" . $union->{PARENT}->{PROPERTIES}->{switch_type} . ") on union without discriminant");
+ }
+
+ return unless defined($union->{ELEMENTS});
+
+ foreach my $e (@{$union->{ELEMENTS}}) {
+ $e->{PARENT} = $union;
+
+ if (defined($e->{PROPERTIES}->{default}) and
+ defined($e->{PROPERTIES}->{case})) {
+ fatal($e, "Union member $e->{NAME} can not have both default and case properties!");
+ }
+
+ unless (defined ($e->{PROPERTIES}->{default}) or
+ defined ($e->{PROPERTIES}->{case})) {
+ fatal($e, "Union member $e->{NAME} must have default or case property");
+ }
+
+ if (has_property($e, "ref")) {
+ fatal($e, el_name($e) . ": embedded ref pointers are not supported yet\n");
+ }
+
+
+ ValidElement($e);
+ }
+}
+
+#####################################################################
+# validate a pipe
+sub ValidPipe($)
+{
+ my ($pipe) = @_;
+ my $struct = $pipe->{DATA};
+
+ ValidProperties($pipe, "PIPE");
+
+ $struct->{PARENT} = $pipe;
+
+ $struct->{FILE} = $pipe->{FILE} unless defined($struct->{FILE});
+ $struct->{LINE} = $pipe->{LINE} unless defined($struct->{LINE});
+
+ ValidType($struct);
+}
+
+#####################################################################
+# parse a typedef
+sub ValidTypedef($)
+{
+ my($typedef) = shift;
+ my $data = $typedef->{DATA};
+
+ ValidProperties($typedef, "TYPEDEF");
+
+ return unless (ref($data) eq "HASH");
+
+ $data->{PARENT} = $typedef;
+
+ $data->{FILE} = $typedef->{FILE} unless defined($data->{FILE});
+ $data->{LINE} = $typedef->{LINE} unless defined($data->{LINE});
+
+ ValidType($data);
+}
+
+#####################################################################
+# validate a function
+sub ValidFunction($)
+{
+ my($fn) = shift;
+
+ ValidProperties($fn,"FUNCTION");
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ $e->{PARENT} = $fn;
+ if (has_property($e, "ref") && !$e->{POINTERS}) {
+ fatal($e, "[ref] variables must be pointers ($fn->{NAME}/$e->{NAME})");
+ }
+ ValidElement($e);
+ }
+}
+
+#####################################################################
+# validate a type
+sub ValidType($)
+{
+ my ($t) = @_;
+
+ {
+ TYPEDEF => \&ValidTypedef,
+ STRUCT => \&ValidStruct,
+ UNION => \&ValidUnion,
+ ENUM => \&ValidEnum,
+ BITMAP => \&ValidBitmap,
+ PIPE => \&ValidPipe
+ }->{$t->{TYPE}}->($t);
+}
+
+#####################################################################
+# parse the interface definitions
+sub ValidInterface($)
+{
+ my($interface) = shift;
+ my($data) = $interface->{DATA};
+
+ if (has_property($interface, "helper")) {
+ warning($interface, "helper() is pidl-specific and deprecated. Use `include' instead");
+ }
+
+ ValidProperties($interface,"INTERFACE");
+
+ if (has_property($interface, "pointer_default")) {
+ if (not grep (/$interface->{PROPERTIES}->{pointer_default}/,
+ ("ref", "unique", "ptr"))) {
+ fatal($interface, "Unknown default pointer type `$interface->{PROPERTIES}->{pointer_default}'");
+ }
+ }
+
+ if (has_property($interface, "object")) {
+ if (has_property($interface, "version") &&
+ $interface->{PROPERTIES}->{version} != 0) {
+ fatal($interface, "Object interfaces must have version 0.0 ($interface->{NAME})");
+ }
+
+ if (!defined($interface->{BASE}) &&
+ not ($interface->{NAME} eq "IUnknown")) {
+ fatal($interface, "Object interfaces must all derive from IUnknown ($interface->{NAME})");
+ }
+ }
+
+ foreach my $d (@{$data}) {
+ ($d->{TYPE} eq "FUNCTION") && ValidFunction($d);
+ ($d->{TYPE} eq "TYPEDEF" or
+ $d->{TYPE} eq "STRUCT" or
+ $d->{TYPE} eq "UNION" or
+ $d->{TYPE} eq "ENUM" or
+ $d->{TYPE} eq "BITMAP" or
+ $d->{TYPE} eq "PIPE") && ValidType($d);
+ }
+
+}
+
+#####################################################################
+# Validate an IDL structure
+sub Validate($)
+{
+ my($idl) = shift;
+
+ foreach my $x (@{$idl}) {
+ ($x->{TYPE} eq "INTERFACE") &&
+ ValidInterface($x);
+ ($x->{TYPE} eq "IMPORTLIB") &&
+ fatal($x, "importlib() not supported");
+ }
+}
+
+sub is_charset_array($$)
+{
+ my ($e,$l) = @_;
+
+ return 0 if ($l->{TYPE} ne "ARRAY");
+
+ my $nl = GetNextLevel($e,$l);
+
+ return 0 unless ($nl->{TYPE} eq "DATA");
+
+ return has_property($e, "charset");
+}
+
+
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/ODL.pm b/tools/pidl/lib/Parse/Pidl/ODL.pm
new file mode 100644
index 0000000..14e77fa
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/ODL.pm
@@ -0,0 +1,130 @@
+##########################################
+# Converts ODL stuctures to IDL structures
+# (C) 2004-2005, 2008 Jelmer Vernooij <jelmer@samba.org>
+
+package Parse::Pidl::ODL;
+
+use Parse::Pidl qw(error);
+use Parse::Pidl::IDL;
+use Parse::Pidl::Util qw(has_property unmake_str);
+use Parse::Pidl::Typelist qw(hasType getType);
+use File::Basename;
+use strict;
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+sub FunctionAddObjArgs($)
+{
+ my $e = shift;
+
+ unshift(@{$e->{ELEMENTS}}, {
+ 'NAME' => 'ORPCthis',
+ 'POINTERS' => 0,
+ 'PROPERTIES' => { 'in' => '1' },
+ 'TYPE' => 'ORPCTHIS',
+ 'FILE' => $e->{FILE},
+ 'LINE' => $e->{LINE}
+ });
+ unshift(@{$e->{ELEMENTS}}, {
+ 'NAME' => 'ORPCthat',
+ 'POINTERS' => 1,
+ 'PROPERTIES' => { 'out' => '1', 'ref' => '1' },
+ 'TYPE' => 'ORPCTHAT',
+ 'FILE' => $e->{FILE},
+ 'LINE' => $e->{LINE}
+ });
+}
+
+sub ReplaceInterfacePointers($)
+{
+ my ($e) = @_;
+ foreach my $x (@{$e->{ELEMENTS}}) {
+ next unless (hasType($x->{TYPE}));
+ next unless getType($x->{TYPE})->{DATA}->{TYPE} eq "INTERFACE";
+
+ $x->{TYPE} = "MInterfacePointer";
+ }
+}
+
+# Add ORPC specific bits to an interface.
+sub ODL2IDL
+{
+ my ($odl, $basedir, $opt_incdirs) = (@_);
+ my $addedorpc = 0;
+ my $interfaces = {};
+
+ foreach my $x (@$odl) {
+ if ($x->{TYPE} eq "IMPORT") {
+ foreach my $idl_file (@{$x->{PATHS}}) {
+ $idl_file = unmake_str($idl_file);
+ my $idl_path = undef;
+ foreach ($basedir, @$opt_incdirs) {
+ if (-f "$_/$idl_file") {
+ $idl_path = "$_/$idl_file";
+ last;
+ }
+ }
+ unless ($idl_path) {
+ error($x, "Unable to open include file `$idl_file'");
+ next;
+ }
+ my $podl = Parse::Pidl::IDL::parse_file($idl_path, $opt_incdirs);
+ if (defined($podl)) {
+ require Parse::Pidl::Typelist;
+ my $basename = basename($idl_path, ".idl");
+
+ Parse::Pidl::Typelist::LoadIdl($podl, $basename);
+ my $pidl = ODL2IDL($podl, $basedir, $opt_incdirs);
+
+ foreach my $y (@$pidl) {
+ if ($y->{TYPE} eq "INTERFACE") {
+ $interfaces->{$y->{NAME}} = $y;
+ }
+ }
+ } else {
+ error($x, "Failed to parse $idl_path");
+ }
+ }
+ }
+
+ if ($x->{TYPE} eq "INTERFACE") {
+ $interfaces->{$x->{NAME}} = $x;
+ # Add [in] ORPCTHIS *this, [out] ORPCTHAT *that
+ # and replace interfacepointers with MInterfacePointer
+ # for 'object' interfaces
+ if (has_property($x, "object")) {
+ foreach my $e (@{$x->{DATA}}) {
+ ($e->{TYPE} eq "FUNCTION") && FunctionAddObjArgs($e);
+ ReplaceInterfacePointers($e);
+ }
+ $addedorpc = 1;
+ }
+
+ if ($x->{BASE}) {
+ my $base = $interfaces->{$x->{BASE}};
+
+ unless (defined($base)) {
+ error($x, "Undefined base interface `$x->{BASE}'");
+ } else {
+ foreach my $fn (reverse @{$base->{DATA}}) {
+ next unless ($fn->{TYPE} eq "FUNCTION");
+ push (@{$x->{INHERITED_FUNCTIONS}}, $fn);
+ }
+ }
+ }
+ }
+ }
+
+ unshift (@$odl, {
+ TYPE => "IMPORT",
+ PATHS => [ "\"orpc.idl\"" ],
+ FILE => undef,
+ LINE => undef
+ }) if ($addedorpc);
+
+
+ return $odl;
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba3/ClientNDR.pm b/tools/pidl/lib/Parse/Pidl/Samba3/ClientNDR.pm
new file mode 100644
index 0000000..6acf1c5
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba3/ClientNDR.pm
@@ -0,0 +1,409 @@
+###################################################
+# Samba3 client generator for IDL structures
+# on top of Samba4 style NDR functions
+# Copyright jelmer@samba.org 2005-2006
+# Copyright gd@samba.org 2008
+# released under the GNU GPL
+
+package Parse::Pidl::Samba3::ClientNDR;
+
+use Exporter;
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(ParseFunction $res $res_hdr);
+
+use strict;
+use Parse::Pidl qw(fatal warning error);
+use Parse::Pidl::Util qw(has_property ParseExpr genpad);
+use Parse::Pidl::NDR qw(ContainsPipe);
+use Parse::Pidl::Typelist qw(mapTypeName);
+use Parse::Pidl::Samba4 qw(DeclLong);
+use Parse::Pidl::Samba4::Header qw(GenerateFunctionInEnv GenerateFunctionOutEnv);
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+sub indent($) { my ($self) = @_; $self->{tabs}.="\t"; }
+sub deindent($) { my ($self) = @_; $self->{tabs} = substr($self->{tabs}, 1); }
+sub pidl($$) { my ($self,$txt) = @_; $self->{res} .= $txt ? "$self->{tabs}$txt\n" : "\n"; }
+sub pidl_hdr($$) { my ($self, $txt) = @_; $self->{res_hdr} .= "$txt\n"; }
+sub fn_declare($$) { my ($self,$n) = @_; $self->pidl($n); $self->pidl_hdr("$n;"); }
+
+sub new($)
+{
+ my ($class) = shift;
+ my $self = { res => "", res_hdr => "", tabs => "" };
+ bless($self, $class);
+}
+
+sub ElementDirection($)
+{
+ my ($e) = @_;
+
+ return "[in,out]" if (has_property($e, "in") and has_property($e, "out"));
+ return "[in]" if (has_property($e, "in"));
+ return "[out]" if (has_property($e, "out"));
+ return "[in,out]";
+}
+
+sub HeaderProperties($$)
+{
+ my($props,$ignores) = @_;
+ my $ret = "";
+
+ foreach my $d (sort(keys %{$props})) {
+ next if (grep(/^$d$/, @$ignores));
+ if($props->{$d} ne "1") {
+ $ret.= "$d($props->{$d}),";
+ } else {
+ $ret.="$d,";
+ }
+ }
+
+ if ($ret) {
+ return "[" . substr($ret, 0, -1) . "]";
+ }
+}
+
+sub ParseInvalidResponse($$)
+{
+ my ($self, $type) = @_;
+
+ if ($type eq "sync") {
+ $self->pidl("return NT_STATUS_INVALID_NETWORK_RESPONSE;");
+ } elsif ($type eq "async") {
+ $self->pidl("tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);");
+ $self->pidl("return;");
+ } else {
+ die("ParseInvalidResponse($type)");
+ }
+}
+
+sub ParseFunctionAsyncState($$$)
+{
+ my ($self, $if, $fn) = @_;
+
+ my $state_str = "struct rpccli_$fn->{NAME}_state";
+ my $done_fn = "rpccli_$fn->{NAME}_done";
+
+ $self->pidl("$state_str {");
+ $self->indent;
+ $self->pidl("TALLOC_CTX *out_mem_ctx;");
+ if (defined($fn->{RETURN_TYPE})) {
+ $self->pidl(mapTypeName($fn->{RETURN_TYPE}). " result;");
+ }
+ $self->deindent;
+ $self->pidl("};");
+ $self->pidl("");
+ $self->pidl("static void $done_fn(struct tevent_req *subreq);");
+ $self->pidl("");
+}
+
+sub ParseFunctionAsyncSend($$$)
+{
+ my ($self, $if, $fn) = @_;
+
+ my $fn_args = "";
+ my $uif = uc($if);
+ my $ufn = "NDR_".uc($fn->{NAME});
+ my $state_str = "struct rpccli_$fn->{NAME}_state";
+ my $done_fn = "rpccli_$fn->{NAME}_done";
+ my $out_mem_ctx = "rpccli_$fn->{NAME}_out_memory";
+ my $fn_str = "struct tevent_req *rpccli_$fn->{NAME}_send";
+ my $pad = genpad($fn_str);
+
+ $fn_args .= "TALLOC_CTX *mem_ctx";
+ $fn_args .= ",\n" . $pad . "struct tevent_context *ev";
+ $fn_args .= ",\n" . $pad . "struct rpc_pipe_client *cli";
+
+ foreach (@{$fn->{ELEMENTS}}) {
+ my $dir = ElementDirection($_);
+ my $prop = HeaderProperties($_->{PROPERTIES}, ["in", "out"]);
+ $fn_args .= ",\n" . $pad . DeclLong($_, "_") . " /* $dir $prop */";
+ }
+
+ $self->fn_declare("$fn_str($fn_args)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("struct tevent_req *req;");
+ $self->pidl("$state_str *state;");
+ $self->pidl("struct tevent_req *subreq;");
+ $self->pidl("");
+ $self->pidl("req = tevent_req_create(mem_ctx, &state,");
+ $self->pidl("\t\t\t$state_str);");
+ $self->pidl("if (req == NULL) {");
+ $self->indent;
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("state->out_mem_ctx = NULL;");
+ $self->pidl("");
+
+ my $out_params = 0;
+ foreach (@{$fn->{ELEMENTS}}) {
+ if (grep(/out/, @{$_->{DIRECTION}})) {
+ $out_params++;
+ }
+ }
+
+ if ($out_params > 0) {
+ $self->pidl("state->out_mem_ctx = talloc_named_const(state, 0,");
+ $self->pidl("\t\t \"$out_mem_ctx\");");
+ $self->pidl("if (tevent_req_nomem(state->out_mem_ctx, req)) {");
+ $self->indent;
+ $self->pidl("return tevent_req_post(req, ev);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ }
+
+ $fn_str = "subreq = dcerpc_$fn->{NAME}_send";
+ $pad = "\t" . genpad($fn_str);
+ $fn_args = "state,\n" . $pad . "ev,\n" . $pad . "cli->binding_handle";
+ foreach (@{$fn->{ELEMENTS}}) {
+ $fn_args .= ",\n" . $pad . "_". $_->{NAME};
+ }
+
+ $self->pidl("$fn_str($fn_args);");
+ $self->pidl("if (tevent_req_nomem(subreq, req)) {");
+ $self->indent;
+ $self->pidl("return tevent_req_post(req, ev);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("tevent_req_set_callback(subreq, $done_fn, req);");
+ $self->pidl("return req;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParseFunctionAsyncDone($$$)
+{
+ my ($self, $if, $fn) = @_;
+
+ my $state_str = "struct rpccli_$fn->{NAME}_state";
+ my $done_fn = "rpccli_$fn->{NAME}_done";
+
+ $self->pidl("static void $done_fn(struct tevent_req *subreq)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("struct tevent_req *req = tevent_req_callback_data(");
+ $self->pidl("\tsubreq, struct tevent_req);");
+ $self->pidl("$state_str *state = tevent_req_data(");
+ $self->pidl("\treq, $state_str);");
+ $self->pidl("NTSTATUS status;");
+ $self->pidl("TALLOC_CTX *mem_ctx;");
+ $self->pidl("");
+
+ $self->pidl("if (state->out_mem_ctx) {");
+ $self->indent;
+ $self->pidl("mem_ctx = state->out_mem_ctx;");
+ $self->deindent;
+ $self->pidl("} else {");
+ $self->indent;
+ $self->pidl("mem_ctx = state;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ my $fn_str = "status = dcerpc_$fn->{NAME}_recv";
+ my $pad = "\t" . genpad($fn_str);
+ my $fn_args = "subreq,\n" . $pad . "mem_ctx";
+ if (defined($fn->{RETURN_TYPE})) {
+ $fn_args .= ",\n" . $pad . "&state->result";
+ }
+
+ $self->pidl("$fn_str($fn_args);");
+ $self->pidl("TALLOC_FREE(subreq);");
+ $self->pidl("if (!NT_STATUS_IS_OK(status)) {");
+ $self->indent;
+ $self->pidl("tevent_req_nterror(req, status);");
+ $self->pidl("return;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("tevent_req_done(req);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParseFunctionAsyncRecv($$$)
+{
+ my ($self, $if, $fn) = @_;
+
+ my $fn_args = "";
+ my $state_str = "struct rpccli_$fn->{NAME}_state";
+ my $fn_str = "NTSTATUS rpccli_$fn->{NAME}_recv";
+ my $pad = genpad($fn_str);
+
+ $fn_args .= "struct tevent_req *req,\n" . $pad . "TALLOC_CTX *mem_ctx";
+
+ if (defined($fn->{RETURN_TYPE})) {
+ $fn_args .= ",\n" . $pad . "$fn->{RETURN_TYPE} *result";
+ }
+
+ $self->fn_declare("$fn_str($fn_args)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("$state_str *state = tevent_req_data(");
+ $self->pidl("\treq, $state_str);");
+ $self->pidl("NTSTATUS status;");
+ $self->pidl("");
+ $self->pidl("if (tevent_req_is_nterror(req, &status)) {");
+ $self->indent;
+ $self->pidl("tevent_req_received(req);");
+ $self->pidl("return status;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("/* Steal possible out parameters to the callers context */");
+ $self->pidl("talloc_steal(mem_ctx, state->out_mem_ctx);");
+ $self->pidl("");
+
+ if (defined($fn->{RETURN_TYPE})) {
+ $self->pidl("/* Return result */");
+ $self->pidl("*result = state->result;");
+ $self->pidl("");
+ }
+
+ $self->pidl("tevent_req_received(req);");
+ $self->pidl("return NT_STATUS_OK;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParseFunctionSync($$$)
+{
+ my ($self, $if, $fn) = @_;
+
+ my $fn_args = "";
+ my $uif = uc($if);
+ my $ufn = "NDR_".uc($fn->{NAME});
+ my $fn_str = "NTSTATUS rpccli_$fn->{NAME}";
+ my $pad = genpad($fn_str);
+
+ $fn_args .= "struct rpc_pipe_client *cli,\n" . $pad . "TALLOC_CTX *mem_ctx";
+
+ foreach (@{$fn->{ELEMENTS}}) {
+ my $dir = ElementDirection($_);
+ my $prop = HeaderProperties($_->{PROPERTIES}, ["in", "out"]);
+ $fn_args .= ",\n" . $pad . DeclLong($_, "_") . " /* $dir $prop */";
+ }
+
+ if (defined($fn->{RETURN_TYPE}) && ($fn->{RETURN_TYPE} eq "WERROR")) {
+ $fn_args .= ",\n" . $pad . "WERROR *werror";
+ }
+
+ $self->fn_declare("$fn_str($fn_args)");
+ $self->pidl("{");
+ $self->indent;
+ if (defined($fn->{RETURN_TYPE})) {
+ $self->pidl(mapTypeName($fn->{RETURN_TYPE})." result;");
+ }
+ $self->pidl("NTSTATUS status;");
+ $self->pidl("");
+
+ $fn_str = "status = dcerpc_$fn->{NAME}";
+ $pad = "\t" . genpad($fn_str);
+ $fn_args = "cli->binding_handle,\n" . $pad . "mem_ctx";
+ foreach (@{$fn->{ELEMENTS}}) {
+ $fn_args .= ",\n" . $pad . "_". $_->{NAME};
+ }
+ if (defined($fn->{RETURN_TYPE})) {
+ $fn_args .= ",\n" . $pad . "&result";
+ }
+
+ $self->pidl("$fn_str($fn_args);");
+ $self->pidl("if (!NT_STATUS_IS_OK(status)) {");
+ $self->indent;
+ $self->pidl("return status;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("/* Return result */");
+ if (not $fn->{RETURN_TYPE}) {
+ $self->pidl("return NT_STATUS_OK;");
+ } elsif ($fn->{RETURN_TYPE} eq "NTSTATUS") {
+ $self->pidl("return result;");
+ } elsif ($fn->{RETURN_TYPE} eq "WERROR") {
+ $self->pidl("if (werror) {");
+ $self->indent;
+ $self->pidl("*werror = result;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("return werror_to_ntstatus(result);");
+ } else {
+ warning($fn->{ORIGINAL}, "Unable to convert $fn->{RETURN_TYPE} to NTSTATUS");
+ $self->pidl("return NT_STATUS_OK;");
+ }
+
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParseFunction($$$)
+{
+ my ($self, $if, $fn) = @_;
+
+ $self->ParseFunctionAsyncState($if, $fn);
+ $self->ParseFunctionAsyncSend($if, $fn);
+ $self->ParseFunctionAsyncDone($if, $fn);
+ $self->ParseFunctionAsyncRecv($if, $fn);
+
+ $self->ParseFunctionSync($if, $fn);
+}
+
+sub ParseInterface($$)
+{
+ my ($self, $if) = @_;
+
+ my $uif = uc($if->{NAME});
+
+ $self->pidl_hdr("#ifndef __CLI_$uif\__");
+ $self->pidl_hdr("#define __CLI_$uif\__");
+ foreach my $fn (@{$if->{FUNCTIONS}}) {
+ next if has_property($fn, "noopnum");
+ next if has_property($fn, "todo");
+
+ my $skip = 0;
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ if (ContainsPipe($e, $e->{LEVELS}[0])) {
+ $skip = 1;
+ last;
+ }
+ }
+ next if $skip;
+
+ $self->ParseFunction($if->{NAME}, $fn);
+ }
+ $self->pidl_hdr("#endif /* __CLI_$uif\__ */");
+}
+
+sub Parse($$$$)
+{
+ my($self,$ndr,$header,$c_header) = @_;
+
+ $self->pidl("/*");
+ $self->pidl(" * Unix SMB/CIFS implementation.");
+ $self->pidl(" * client auto-generated by pidl. DO NOT MODIFY!");
+ $self->pidl(" */");
+ $self->pidl("");
+ $self->pidl("#include \"includes.h\"");
+ $self->pidl("#include \"$header\"");
+ $self->pidl_hdr("#include \"$c_header\"");
+ $self->pidl("");
+
+ foreach (@$ndr) {
+ $self->ParseInterface($_) if ($_->{TYPE} eq "INTERFACE");
+ }
+
+ return ($self->{res}, $self->{res_hdr});
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba3/ServerNDR.pm b/tools/pidl/lib/Parse/Pidl/Samba3/ServerNDR.pm
new file mode 100644
index 0000000..c87d17a
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba3/ServerNDR.pm
@@ -0,0 +1,322 @@
+###################################################
+# Samba3 server generator for IDL structures
+# on top of Samba4 style NDR functions
+# Copyright jelmer@samba.org 2005-2006
+# released under the GNU GPL
+
+package Parse::Pidl::Samba3::ServerNDR;
+
+use Exporter;
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(DeclLevel);
+
+use strict;
+use Parse::Pidl qw(warning error fatal);
+use Parse::Pidl::Typelist qw(mapTypeName scalar_is_reference);
+use Parse::Pidl::Util qw(ParseExpr has_property is_constant);
+use Parse::Pidl::NDR qw(GetNextLevel ContainsPipe);
+use Parse::Pidl::Samba4 qw(ElementStars DeclLong);
+use Parse::Pidl::Samba4::Header qw(GenerateFunctionOutEnv);
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+my $res;
+my $res_hdr;
+my $tabs = "";
+sub pidl_reset() { $res=""; $res_hdr="", $tabs=""; }
+sub pidl_return() { my $s = $res; my $h = $res_hdr; pidl_reset(); return ($s, $h) }
+sub indent() { $tabs.="\t"; }
+sub deindent() { $tabs = substr($tabs, 1); }
+sub pidl($) { my ($txt) = @_; $res .= $txt?$tabs.(shift)."\n":"\n"; }
+sub pidl_hdr($) { $res_hdr .= (shift)."\n"; }
+sub fn_declare($) { my ($n) = @_; pidl $n; pidl_hdr "$n;"; }
+
+sub DeclLevel($$)
+{
+ my ($e, $l) = @_;
+ my $res = "";
+
+ if (has_property($e, "charset")) {
+ $res .= "const char";
+ } else {
+ $res .= mapTypeName($e->{TYPE});
+ }
+
+ my $stars = ElementStars($e, $l);
+
+ $res .= " ".$stars unless ($stars eq "");
+
+ return $res;
+}
+
+sub AllocOutVar($$$$$$$)
+{
+ my ($e, $mem_ctx, $name, $env, $check, $cleanup, $return) = @_;
+
+ my $l = $e->{LEVELS}[0];
+
+ # we skip pointer to arrays
+ if ($l->{TYPE} eq "POINTER") {
+ my $nl = GetNextLevel($e, $l);
+ $l = $nl if ($nl->{TYPE} eq "ARRAY");
+ } elsif
+
+ # we don't support multi-dimentional arrays yet
+ ($l->{TYPE} eq "ARRAY") {
+ my $nl = GetNextLevel($e, $l);
+ if ($nl->{TYPE} eq "ARRAY") {
+ fatal($e->{ORIGINAL},"multi-dimentional [out] arrays are not supported!");
+ }
+ } else {
+ # neither pointer nor array, no need to alloc something.
+ return;
+ }
+
+ if ($l->{TYPE} eq "ARRAY") {
+ unless(defined($l->{SIZE_IS})) {
+ error($e->{ORIGINAL}, "No size known for array `$e->{NAME}'");
+ pidl "#error No size known for array `$e->{NAME}'";
+ } else {
+ my $size = ParseExpr($l->{SIZE_IS}, $env, $e);
+ pidl "$name = talloc_zero_array($mem_ctx, " . DeclLevel($e, 1) . ", $size);";
+ }
+ } else {
+ pidl "$name = talloc_zero($mem_ctx, " . DeclLevel($e, 1) . ");";
+ }
+
+ pidl "if (" . $check->($name) . ") {";
+ indent;
+ pidl $cleanup->($name) if defined($cleanup);
+ pidl $return->($name) if defined($return);
+ deindent;
+ pidl "}";
+ pidl "";
+}
+
+sub CallWithStruct($$$$$$)
+{
+ my ($pipes_struct, $mem_ctx, $fn, $check, $cleanup, $return) = @_;
+ my $env = GenerateFunctionOutEnv($fn);
+ my $hasout = 0;
+ foreach (@{$fn->{ELEMENTS}}) {
+ if (grep(/out/, @{$_->{DIRECTION}})) { $hasout = 1; }
+ }
+
+ pidl "ZERO_STRUCT(r->out);" if ($hasout);
+
+ foreach (@{$fn->{ELEMENTS}}) {
+ my @dir = @{$_->{DIRECTION}};
+ if (grep(/in/, @dir) and grep(/out/, @dir)) {
+ pidl "r->out.$_->{NAME} = r->in.$_->{NAME};";
+ }
+ }
+
+ foreach (@{$fn->{ELEMENTS}}) {
+ next if ContainsPipe($_, $_->{LEVELS}[0]);
+ my @dir = @{$_->{DIRECTION}};
+ if (grep(/in/, @dir) and grep(/out/, @dir)) {
+ # noop
+ } elsif (grep(/out/, @dir) and not
+ has_property($_, "represent_as")) {
+ AllocOutVar($_, $mem_ctx, "r->out.$_->{NAME}", $env,
+ $check, $cleanup, $return);
+ }
+ }
+
+ my $proto = "_$fn->{NAME}(struct pipes_struct *p, struct $fn->{NAME} *r)";
+ my $ret = "_$fn->{NAME}($pipes_struct, r)";
+
+ if ($fn->{RETURN_TYPE}) {
+ $ret = "r->out.result = $ret";
+ $proto = mapTypeName($fn->{RETURN_TYPE})." $proto";
+ } else {
+ $proto = "void $proto";
+ }
+
+ pidl_hdr "$proto;";
+ pidl "$ret;";
+}
+
+sub ParseFunction($$)
+{
+ my ($if,$fn) = @_;
+
+ my $op = "NDR_".uc($fn->{NAME});
+
+ pidl "static bool api_$fn->{NAME}(struct pipes_struct *p)";
+ pidl "{";
+ indent;
+ pidl "const struct ndr_interface_call *call;";
+ pidl "struct ndr_pull *pull;";
+ pidl "struct ndr_push *push;";
+ pidl "enum ndr_err_code ndr_err;";
+ pidl "struct $fn->{NAME} *r;";
+ pidl "";
+ pidl "call = &ndr_table_$if->{NAME}.calls[$op];";
+ pidl "";
+ pidl "r = talloc(talloc_tos(), struct $fn->{NAME});";
+ pidl "if (r == NULL) {";
+ pidl "\treturn false;";
+ pidl "}";
+ pidl "";
+ pidl "pull = ndr_pull_init_blob(&p->in_data.data, r);";
+ pidl "if (pull == NULL) {";
+ pidl "\ttalloc_free(r);";
+ pidl "\treturn false;";
+ pidl "}";
+ pidl "";
+ pidl "pull->flags |= LIBNDR_FLAG_REF_ALLOC;";
+ pidl "if (p->endian) {";
+ pidl "\tpull->flags |= LIBNDR_FLAG_BIGENDIAN;";
+ pidl "}";
+ pidl "ndr_err = call->ndr_pull(pull, NDR_IN, r);";
+ pidl "if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {";
+ pidl "\ttalloc_free(r);";
+ pidl "\treturn false;";
+ pidl "}";
+ pidl "";
+ pidl "if (DEBUGLEVEL >= 10) {";
+ pidl "\tNDR_PRINT_FUNCTION_DEBUG($fn->{NAME}, NDR_IN, r);";
+ pidl "}";
+ pidl "";
+
+ CallWithStruct("p", "r", $fn,
+ sub ($) {
+ my ($name) = @_;
+ return "${name} == NULL";
+ },
+ sub ($) {
+ my ($name) = @_;
+ return "talloc_free(r);";
+ },
+ sub ($) {
+ my ($name) = @_;
+ return "return false;";
+ }
+ );
+
+ pidl "";
+ pidl "if (p->fault_state) {";
+ pidl "\ttalloc_free(r);";
+ pidl "\t/* Return true here, srv_pipe_hnd.c will take care */";
+ pidl "\treturn true;";
+ pidl "}";
+ pidl "";
+ pidl "if (DEBUGLEVEL >= 10) {";
+ pidl "\tNDR_PRINT_FUNCTION_DEBUG($fn->{NAME}, NDR_OUT | NDR_SET_VALUES, r);";
+ pidl "}";
+ pidl "";
+ pidl "push = ndr_push_init_ctx(r);";
+ pidl "if (push == NULL) {";
+ pidl "\ttalloc_free(r);";
+ pidl "\treturn false;";
+ pidl "}";
+ pidl "";
+ pidl "/*";
+ pidl " * carry over the pointer count to the reply in case we are";
+ pidl " * using full pointer. See NDR specification for full pointers";
+ pidl " */";
+ pidl "push->ptr_count = pull->ptr_count;";
+ pidl "";
+ pidl "ndr_err = call->ndr_push(push, NDR_OUT, r);";
+ pidl "if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {";
+ pidl "\ttalloc_free(r);";
+ pidl "\treturn false;";
+ pidl "}";
+ pidl "";
+ pidl "p->out_data.rdata = ndr_push_blob(push);";
+ pidl "talloc_steal(p->mem_ctx, p->out_data.rdata.data);";
+ pidl "";
+ pidl "talloc_free(r);";
+ pidl "";
+ pidl "return true;";
+ deindent;
+ pidl "}";
+ pidl "";
+}
+
+sub ParseInterface($)
+{
+ my $if = shift;
+
+ my $uif = uc($if->{NAME});
+
+ pidl_hdr "#ifndef __SRV_$uif\__";
+ pidl_hdr "#define __SRV_$uif\__";
+
+ foreach (@{$if->{FUNCTIONS}}) {
+ next if ($_->{PROPERTIES}{noopnum});
+ ParseFunction($if, $_);
+ }
+
+ pidl "";
+ pidl "/* Tables */";
+ pidl "static const struct api_struct api_$if->{NAME}_cmds[] = ";
+ pidl "{";
+ indent;
+
+ foreach (@{$if->{FUNCTIONS}}) {
+ next if ($_->{PROPERTIES}{noopnum});
+ pidl "{\"" . uc($_->{NAME}) . "\", NDR_" . uc($_->{NAME}) . ", api_$_->{NAME}},";
+ }
+
+ deindent;
+ pidl "};";
+
+ pidl "";
+
+ pidl_hdr "const struct api_struct *$if->{NAME}_get_pipe_fns(int *n_fns);";
+ pidl "const struct api_struct *$if->{NAME}_get_pipe_fns(int *n_fns)";
+ pidl "{";
+ indent;
+ pidl "*n_fns = sizeof(api_$if->{NAME}_cmds) / sizeof(struct api_struct);";
+ pidl "return api_$if->{NAME}_cmds;";
+ deindent;
+ pidl "}";
+ pidl "";
+
+ if (not has_property($if, "no_srv_register")) {
+ pidl_hdr "struct rpc_srv_callbacks;";
+ pidl_hdr "NTSTATUS rpc_$if->{NAME}_init(const struct rpc_srv_callbacks *rpc_srv_cb);";
+ pidl "NTSTATUS rpc_$if->{NAME}_init(const struct rpc_srv_callbacks *rpc_srv_cb)";
+ pidl "{";
+ pidl "\treturn rpc_srv_register(SMB_RPC_INTERFACE_VERSION, \"$if->{NAME}\", \"$if->{NAME}\", \&ndr_table_$if->{NAME}, api_$if->{NAME}_cmds, sizeof(api_$if->{NAME}_cmds) / sizeof(struct api_struct), rpc_srv_cb);";
+ pidl "}";
+
+ pidl "";
+
+ pidl_hdr "NTSTATUS rpc_$if->{NAME}_shutdown(void);";
+ pidl "NTSTATUS rpc_$if->{NAME}_shutdown(void)";
+ pidl "{";
+ pidl "\treturn rpc_srv_unregister(\&ndr_table_$if->{NAME});";
+ pidl "}";
+ }
+ pidl_hdr "#endif /* __SRV_$uif\__ */";
+}
+
+sub Parse($$$)
+{
+ my($ndr,$header,$ndr_header) = @_;
+
+ pidl_reset();
+
+ pidl "/*";
+ pidl " * Unix SMB/CIFS implementation.";
+ pidl " * server auto-generated by pidl. DO NOT MODIFY!";
+ pidl " */";
+ pidl "";
+ pidl "#include \"includes.h\"";
+ pidl "#include \"ntdomain.h\"";
+ pidl "#include \"$header\"";
+ pidl_hdr "#include \"$ndr_header\"";
+ pidl "";
+
+ foreach (@$ndr) {
+ ParseInterface($_) if ($_->{TYPE} eq "INTERFACE");
+ }
+
+ return pidl_return();
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba4.pm b/tools/pidl/lib/Parse/Pidl/Samba4.pm
new file mode 100644
index 0000000..b720ab9
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba4.pm
@@ -0,0 +1,133 @@
+###################################################
+# Common Samba4 functions
+# Copyright jelmer@samba.org 2006
+# released under the GNU GPL
+
+package Parse::Pidl::Samba4;
+
+require Exporter;
+@ISA = qw(Exporter);
+@EXPORT = qw(is_intree choose_header NumStars ElementStars ArrayBrackets DeclLong ArrayDynamicallyAllocated);
+
+use Parse::Pidl::Util qw(has_property is_constant);
+use Parse::Pidl::NDR qw(GetNextLevel);
+use Parse::Pidl::Typelist qw(mapTypeName scalar_is_reference);
+use Parse::Pidl qw(fatal error);
+use strict;
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+
+# return true if we are using pidl within the samba source tree. This changes
+# the names of include files, as some include files (such as ntstatus.h) have
+# different paths when installed to the patch in the source tree
+sub is_intree()
+{
+ my $srcdir = $ENV{srcdir};
+ $srcdir = $srcdir ? "$srcdir/" : "";
+ return 1 if (-f "${srcdir}kdc/kdc.c");
+ return 1 if (-d "${srcdir}source4");
+ return 1 if (-f "${srcdir}include/smb.h");
+ return 0;
+}
+
+# Return an #include line depending on whether this build is an in-tree
+# build or not.
+sub choose_header($$)
+{
+ my ($in,$out) = @_;
+ return "#include \"$in\"" if (is_intree());
+ return "#include <$out>";
+}
+
+sub ArrayDynamicallyAllocated($$)
+{
+ my ($e, $l) = @_;
+ die("Not an array") unless ($l->{TYPE} eq "ARRAY");
+ return 0 if ($l->{IS_FIXED} and not has_property($e, "charset"));
+ return 1;
+}
+
+sub NumStars($;$)
+{
+ my ($e, $d) = @_;
+ $d = 0 unless defined($d);
+ my $n = 0;
+
+ foreach my $l (@{$e->{LEVELS}}) {
+ next unless ($l->{TYPE} eq "POINTER");
+
+ my $nl = GetNextLevel($e, $l);
+ next if (defined($nl) and $nl->{TYPE} eq "ARRAY");
+
+ $n++;
+ }
+
+ if ($n >= 1) {
+ $n-- if (scalar_is_reference($e->{TYPE}));
+ }
+
+ foreach my $l (@{$e->{LEVELS}}) {
+ next unless ($l->{TYPE} eq "ARRAY");
+ next unless (ArrayDynamicallyAllocated($e, $l));
+ $n++;
+ }
+
+ error($e->{ORIGINAL}, "Too few pointers $n < $d") if ($n < $d);
+
+ $n -= $d;
+
+ return $n;
+}
+
+sub ElementStars($;$)
+{
+ my ($e, $d) = @_;
+ my $res = "";
+ my $n = 0;
+
+ $n = NumStars($e, $d);
+ $res .= "*" foreach (1..$n);
+
+ return $res;
+}
+
+sub ArrayBrackets($)
+{
+ my ($e) = @_;
+ my $res = "";
+
+ foreach my $l (@{$e->{LEVELS}}) {
+ next unless ($l->{TYPE} eq "ARRAY");
+ next if ArrayDynamicallyAllocated($e, $l);
+ $res .= "[$l->{SIZE_IS}]";
+ }
+
+ return $res;
+}
+
+sub DeclLong($;$)
+{
+ my ($e, $p) = @_;
+ my $res = "";
+ $p = "" unless defined($p);
+
+ if (has_property($e, "represent_as")) {
+ $res .= mapTypeName($e->{PROPERTIES}->{represent_as})." ";
+ } else {
+ if (has_property($e, "charset")) {
+ $res .= "const char ";
+ } else {
+ $res .= mapTypeName($e->{TYPE})." ";
+ }
+
+ $res .= ElementStars($e);
+ }
+ $res .= $p.$e->{NAME};
+ $res .= ArrayBrackets($e);
+
+ return $res;
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba4/COM/Header.pm b/tools/pidl/lib/Parse/Pidl/Samba4/COM/Header.pm
new file mode 100644
index 0000000..de7d454
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba4/COM/Header.pm
@@ -0,0 +1,160 @@
+# COM Header generation
+# (C) 2005 Jelmer Vernooij <jelmer@samba.org>
+
+package Parse::Pidl::Samba4::COM::Header;
+
+use Parse::Pidl::Typelist qw(mapTypeName);
+use Parse::Pidl::Util qw(has_property is_constant);
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+use strict;
+
+sub GetArgumentProtoList($)
+{
+ my $f = shift;
+ my $res = "";
+
+ foreach my $a (@{$f->{ELEMENTS}}) {
+
+ $res .= ", " . mapTypeName($a->{TYPE}) . " ";
+
+ my $l = $a->{POINTERS};
+ $l-- if (Parse::Pidl::Typelist::scalar_is_reference($a->{TYPE}));
+ foreach my $i (1..$l) {
+ $res .= "*";
+ }
+
+ if (defined $a->{ARRAY_LEN}[0] && !is_constant($a->{ARRAY_LEN}[0]) &&
+ !$a->{POINTERS}) {
+ $res .= "*";
+ }
+ $res .= $a->{NAME};
+ if (defined $a->{ARRAY_LEN}[0] && is_constant($a->{ARRAY_LEN}[0])) {
+ $res .= "[$a->{ARRAY_LEN}[0]]";
+ }
+ }
+
+ return $res;
+}
+
+sub GetArgumentList($)
+{
+ my $f = shift;
+ my $res = "";
+
+ foreach (@{$f->{ELEMENTS}}) { $res .= ", $_->{NAME}"; }
+
+ return $res;
+}
+
+#####################################################################
+# generate vtable structure for COM interface
+sub HeaderVTable($)
+{
+ my $interface = shift;
+ my $res;
+ $res .= "#define " . uc($interface->{NAME}) . "_METHODS \\\n";
+ if (defined($interface->{BASE})) {
+ $res .= "\t" . uc($interface->{BASE} . "_METHODS") . "\\\n";
+ }
+
+ my $data = $interface->{DATA};
+ foreach my $d (@{$data}) {
+ $res .= "\t" . mapTypeName($d->{RETURN_TYPE}) . " (*$d->{NAME}) (struct $interface->{NAME} *d, TALLOC_CTX *mem_ctx" . GetArgumentProtoList($d) . ");\\\n" if ($d->{TYPE} eq "FUNCTION");
+ }
+ $res .= "\n";
+ $res .= "struct $interface->{NAME}_vtable {\n";
+ $res .= "\tstruct GUID iid;\n";
+ $res .= "\t" . uc($interface->{NAME}) . "_METHODS\n";
+ $res .= "};\n\n";
+
+ return $res;
+}
+
+sub ParseInterface($)
+{
+ my $if = shift;
+ my $res;
+
+ $res .= "\n#ifndef _$if->{NAME}_\n";
+ $res .= "#define _$if->{NAME}_\n";
+
+ $res .="\n\n/* $if->{NAME} */\n";
+
+ $res .="#define COM_" . uc($if->{NAME}) . "_UUID $if->{PROPERTIES}->{uuid}\n\n";
+
+ $res .="struct $if->{NAME}_vtable;\n\n";
+
+ $res .="struct $if->{NAME} {
+ struct OBJREF obj;
+ struct com_context *ctx;
+ struct $if->{NAME}_vtable *vtable;
+ void *object_data;
+};\n\n";
+
+ $res.=HeaderVTable($if);
+
+ foreach my $d (@{$if->{DATA}}) {
+ next if ($d->{TYPE} ne "FUNCTION");
+
+ $res .= "#define $if->{NAME}_$d->{NAME}(interface, mem_ctx" . GetArgumentList($d) . ") ";
+
+ $res .= "((interface)->vtable->$d->{NAME}(interface, mem_ctx" . GetArgumentList($d) . "))";
+
+ $res .="\n";
+ }
+
+ $res .= "#endif\n";
+
+ return $res;
+}
+
+sub ParseCoClass($)
+{
+ my ($c) = @_;
+ my $res = "";
+ $res .= "#define CLSID_" . uc($c->{NAME}) . " $c->{PROPERTIES}->{uuid}\n";
+ if (has_property($c, "progid")) {
+ $res .= "#define PROGID_" . uc($c->{NAME}) . " $c->{PROPERTIES}->{progid}\n";
+ }
+ $res .= "\n";
+ return $res;
+}
+
+sub Parse($$)
+{
+ my ($idl,$ndr_header) = @_;
+ my $res = "";
+ my $has_obj = 0;
+
+ $res .= "#include \"librpc/gen_ndr/orpc.h\"\n" .
+ "#include \"$ndr_header\"\n\n";
+
+ foreach (@{$idl})
+ {
+ if ($_->{TYPE} eq "INTERFACE" && has_property($_, "object")) {
+ $res .="struct $_->{NAME};\n";
+ $has_obj = 1;
+ }
+ }
+
+ foreach (@{$idl})
+ {
+ if ($_->{TYPE} eq "INTERFACE" && has_property($_, "object")) {
+ $res.=ParseInterface($_);
+ $has_obj = 1;
+ }
+
+ if ($_->{TYPE} eq "COCLASS") {
+ $res.=ParseCoClass($_);
+ $has_obj = 1;
+ }
+ }
+
+ return $res if ($has_obj);
+ return undef;
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba4/COM/Proxy.pm b/tools/pidl/lib/Parse/Pidl/Samba4/COM/Proxy.pm
new file mode 100644
index 0000000..35e6e3f
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba4/COM/Proxy.pm
@@ -0,0 +1,225 @@
+###################################################
+# DCOM parser for Samba
+# Basically the glue between COM and DCE/RPC with NDR
+# Copyright jelmer@samba.org 2003-2005
+# released under the GNU GPL
+
+package Parse::Pidl::Samba4::COM::Proxy;
+
+use Parse::Pidl::Samba4::COM::Header;
+use Parse::Pidl::Typelist qw(mapTypeName);
+use Parse::Pidl::Util qw(has_property);
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+use strict;
+
+my($res);
+
+sub ParseVTable($$)
+{
+ my ($interface, $name) = @_;
+
+ # Generate the vtable
+ $res .="\tstruct $interface->{NAME}_vtable $name = {";
+
+ if (defined($interface->{BASE})) {
+ $res .= "\n\t\t{},";
+ }
+
+ my $data = $interface->{DATA};
+
+ foreach my $d (@{$data}) {
+ if ($d->{TYPE} eq "FUNCTION") {
+ $res .= "\n\t\tdcom_proxy_$interface->{NAME}_$d->{NAME}";
+ $res .= ",";
+ }
+ }
+
+ $res .= "\n\t};\n\n";
+}
+
+sub ParseRegFunc($)
+{
+ my $interface = shift;
+
+ $res .= "static NTSTATUS dcom_proxy_$interface->{NAME}_init(TALLOC_CTX *ctx)
+{
+ struct $interface->{NAME}_vtable *proxy_vtable = talloc(ctx, struct $interface->{NAME}_vtable);
+";
+
+ if (defined($interface->{BASE})) {
+ $res.= "
+ struct GUID base_iid;
+ const void *base_vtable;
+
+ base_iid = ndr_table_$interface->{BASE}.syntax_id.uuid;
+
+ base_vtable = dcom_proxy_vtable_by_iid(&base_iid);
+ if (base_vtable == NULL) {
+ DEBUG(0, (\"No proxy registered for base interface '$interface->{BASE}'\\n\"));
+ return NT_STATUS_FOOBAR;
+ }
+
+ memcpy(&proxy_vtable, base_vtable, sizeof(struct $interface->{BASE}_vtable));
+
+";
+ }
+ foreach my $x (@{$interface->{DATA}}) {
+ next unless ($x->{TYPE} eq "FUNCTION");
+
+ $res .= "\tproxy_vtable->$x->{NAME} = dcom_proxy_$interface->{NAME}_$x->{NAME};\n";
+ }
+
+ $res.= "
+ proxy_vtable->iid = ndr_table_$interface->{NAME}.syntax_id.uuid;
+
+ return dcom_register_proxy(ctx, (struct IUnknown_vtable *)proxy_vtable);
+}\n\n";
+}
+
+#####################################################################
+# parse a function
+sub ParseFunction($$)
+{
+ my ($interface, $fn) = @_;
+ my $name = $fn->{NAME};
+ my $uname = uc $name;
+
+ my $tn = mapTypeName($fn->{RETURN_TYPE});
+
+ $res.="
+static $tn dcom_proxy_$interface->{NAME}_$name(struct $interface->{NAME} *d, TALLOC_CTX *mem_ctx" . Parse::Pidl::Samba4::COM::Header::GetArgumentProtoList($fn) . ")
+{
+ struct dcerpc_pipe *p;
+ NTSTATUS status = dcom_get_pipe(d, &p);
+ struct $name r;
+ struct rpc_request *req;
+
+ if (NT_STATUS_IS_ERR(status)) {
+ return status;
+ }
+
+ ZERO_STRUCT(r.in.ORPCthis);
+ r.in.ORPCthis.version.MajorVersion = COM_MAJOR_VERSION;
+ r.in.ORPCthis.version.MinorVersion = COM_MINOR_VERSION;
+";
+
+ # Put arguments into r
+ foreach my $a (@{$fn->{ELEMENTS}}) {
+ next unless (has_property($a, "in"));
+ if (Parse::Pidl::Typelist::typeIs($a->{TYPE}, "INTERFACE")) {
+ $res .="\tNDR_CHECK(dcom_OBJREF_from_IUnknown(mem_ctx, &r.in.$a->{NAME}.obj, $a->{NAME}));\n";
+ } else {
+ $res .= "\tr.in.$a->{NAME} = $a->{NAME};\n";
+ }
+ }
+
+ $res .="
+ if (p->conn->flags & DCERPC_DEBUG_PRINT_IN) {
+ NDR_PRINT_IN_DEBUG($name, &r);
+ }
+
+ status = dcerpc_ndr_request(p, &d->ipid, &ndr_table_$interface->{NAME}, NDR_$uname, mem_ctx, &r);
+
+ if (NT_STATUS_IS_OK(status) && (p->conn->flags & DCERPC_DEBUG_PRINT_OUT)) {
+ NDR_PRINT_OUT_DEBUG($name, r);
+ }
+
+";
+
+ # Put r info back into arguments
+ foreach my $a (@{$fn->{ELEMENTS}}) {
+ next unless (has_property($a, "out"));
+
+ if (Parse::Pidl::Typelist::typeIs($a->{TYPE}, "INTERFACE")) {
+ $res .="\tNDR_CHECK(dcom_IUnknown_from_OBJREF(d->ctx, &$a->{NAME}, r.out.$a->{NAME}.obj));\n";
+ } else {
+ $res .= "\t*$a->{NAME} = r.out.$a->{NAME};\n";
+ }
+
+ }
+
+ if ($fn->{RETURN_TYPE} eq "NTSTATUS") {
+ $res .= "\tif (NT_STATUS_IS_OK(status)) status = r.out.result;\n";
+ }
+
+ $res .=
+ "
+ return r.out.result;
+}\n\n";
+}
+
+#####################################################################
+# parse the interface definitions
+sub ParseInterface($)
+{
+ my($interface) = shift;
+ my($data) = $interface->{DATA};
+ $res = "/* DCOM proxy for $interface->{NAME} generated by pidl */\n\n";
+ foreach my $d (@{$data}) {
+ ($d->{TYPE} eq "FUNCTION") &&
+ ParseFunction($interface, $d);
+ }
+
+ ParseRegFunc($interface);
+}
+
+sub RegistrationFunction($$)
+{
+ my $idl = shift;
+ my $basename = shift;
+
+ my $res = "\n\nNTSTATUS dcom_$basename\_init(void)\n";
+ $res .= "{\n";
+ $res .="\tNTSTATUS status = NT_STATUS_OK;\n";
+ foreach my $interface (@{$idl}) {
+ next if $interface->{TYPE} ne "INTERFACE";
+ next if not has_property($interface, "object");
+
+ my $data = $interface->{DATA};
+ my $count = 0;
+ foreach my $d (@{$data}) {
+ if ($d->{TYPE} eq "FUNCTION") { $count++; }
+ }
+
+ next if ($count == 0);
+
+ $res .= "\tstatus = dcom_$interface->{NAME}_init();\n";
+ $res .= "\tif (NT_STATUS_IS_ERR(status)) {\n";
+ $res .= "\t\treturn status;\n";
+ $res .= "\t}\n\n";
+ }
+ $res .= "\treturn status;\n";
+ $res .= "}\n\n";
+
+ return $res;
+}
+
+sub Parse($$)
+{
+ my ($pidl,$comh_filename) = @_;
+ my $res = "";
+ my $has_obj = 0;
+
+ $res .= "#include \"includes.h\"\n" .
+ "#include \"lib/com/dcom/dcom.h\"\n" .
+ "#include \"$comh_filename\"\n" .
+ "#include \"librpc/rpc/dcerpc.h\"\n";
+
+ foreach (@{$pidl}) {
+ next if ($_->{TYPE} ne "INTERFACE");
+ next if has_property($_, "local");
+ next unless has_property($_, "object");
+
+ $res .= ParseInterface($_);
+
+ $has_obj = 1;
+ }
+
+ return $res if ($has_obj);
+ return undef;
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba4/COM/Stub.pm b/tools/pidl/lib/Parse/Pidl/Samba4/COM/Stub.pm
new file mode 100644
index 0000000..239f5ba
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba4/COM/Stub.pm
@@ -0,0 +1,327 @@
+###################################################
+# DCOM stub boilerplate generator
+# Copyright jelmer@samba.org 2004-2005
+# Copyright tridge@samba.org 2003
+# Copyright metze@samba.org 2004
+# released under the GNU GPL
+
+package Parse::Pidl::Samba4::COM::Stub;
+
+use Parse::Pidl::Util qw(has_property);
+use strict;
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+my($res);
+
+sub pidl($)
+{
+ $res .= shift;
+}
+
+#####################################################
+# generate the switch statement for function dispatch
+sub gen_dispatch_switch($)
+{
+ my $data = shift;
+
+ my $count = 0;
+ foreach my $d (@{$data}) {
+ next if ($d->{TYPE} ne "FUNCTION");
+
+ pidl "\tcase $count: {\n";
+ if ($d->{RETURN_TYPE} && $d->{RETURN_TYPE} ne "void") {
+ pidl "\t\tNTSTATUS result;\n";
+ }
+ pidl "\t\tstruct $d->{NAME} *r2 = r;\n";
+ pidl "\t\tif (DEBUGLEVEL > 10) {\n";
+ pidl "\t\t\tNDR_PRINT_FUNCTION_DEBUG($d->{NAME}, NDR_IN, r2);\n";
+ pidl "\t\t}\n";
+ if ($d->{RETURN_TYPE} && $d->{RETURN_TYPE} ne "void") {
+ pidl "\t\tresult = vtable->$d->{NAME}(iface, mem_ctx, r2);\n";
+ } else {
+ pidl "\t\tvtable->$d->{NAME}(iface, mem_ctx, r2);\n";
+ }
+ pidl "\t\tif (dce_call->state_flags & DCESRV_CALL_STATE_FLAG_ASYNC) {\n";
+ pidl "\t\t\tDEBUG(5,(\"function $d->{NAME} will reply async\\n\"));\n";
+ pidl "\t\t}\n";
+ pidl "\t\tbreak;\n\t}\n";
+ $count++;
+ }
+}
+
+#####################################################
+# generate the switch statement for function reply
+sub gen_reply_switch($)
+{
+ my $data = shift;
+
+ my $count = 0;
+ foreach my $d (@{$data}) {
+ next if ($d->{TYPE} ne "FUNCTION");
+
+ pidl "\tcase $count: {\n";
+ pidl "\t\tstruct $d->{NAME} *r2 = r;\n";
+ pidl "\t\tif (dce_call->state_flags & DCESRV_CALL_STATE_FLAG_ASYNC) {\n";
+ pidl "\t\t\tDEBUG(5,(\"function $d->{NAME} replied async\\n\"));\n";
+ pidl "\t\t}\n";
+ pidl "\t\tif (DEBUGLEVEL > 10 && dce_call->fault_code == 0) {\n";
+ pidl "\t\t\tNDR_PRINT_FUNCTION_DEBUG($d->{NAME}, NDR_OUT | NDR_SET_VALUES, r2);\n";
+ pidl "\t\t}\n";
+ pidl "\t\tif (dce_call->fault_code != 0) {\n";
+ pidl "\t\t\tDEBUG(2,(\"dcerpc_fault %s in $d->{NAME}\\n\", dcerpc_errstr(mem_ctx, dce_call->fault_code)));\n";
+ pidl "\t\t}\n";
+ pidl "\t\tbreak;\n\t}\n";
+ $count++;
+ }
+}
+
+#####################################################################
+# produce boilerplate code for a interface
+sub Boilerplate_Iface($)
+{
+ my($interface) = shift;
+ my($data) = $interface->{DATA};
+ my $name = $interface->{NAME};
+ my $uname = uc $name;
+ my $uuid = Parse::Pidl::Util::make_str($interface->{PROPERTIES}->{uuid});
+ my $if_version = $interface->{PROPERTIES}->{version};
+
+ pidl "
+static NTSTATUS $name\__op_bind(struct dcesrv_call_state *dce_call, const struct dcesrv_interface *iface, uint32_t if_version)
+{
+#ifdef DCESRV_INTERFACE_$uname\_BIND
+ return DCESRV_INTERFACE_$uname\_BIND(dce_call,iface);
+#else
+ return NT_STATUS_OK;
+#endif
+}
+
+static void $name\__op_unbind(struct dcesrv_connection_context *context, const struct dcesrv_interface *iface)
+{
+#ifdef DCESRV_INTERFACE_$uname\_UNBIND
+ DCESRV_INTERFACE_$uname\_UNBIND(context, iface);
+#else
+ return;
+#endif
+}
+
+static NTSTATUS $name\__op_ndr_pull(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, struct ndr_pull *pull, void **r)
+{
+ NTSTATUS status;
+ uint16_t opnum = dce_call->pkt.u.request.opnum;
+
+ dce_call->fault_code = 0;
+
+ if (opnum >= dcerpc_table_$name.num_calls) {
+ dce_call->fault_code = DCERPC_FAULT_OP_RNG_ERROR;
+ return NT_STATUS_NET_WRITE_FAULT;
+ }
+
+ *r = talloc_size(mem_ctx, dcerpc_table_$name.calls[opnum].struct_size);
+ NT_STATUS_HAVE_NO_MEMORY(*r);
+
+ /* unravel the NDR for the packet */
+ status = dcerpc_table_$name.calls[opnum].ndr_pull(pull, NDR_IN, *r);
+ if (!NT_STATUS_IS_OK(status)) {
+ dcerpc_log_packet(&dcerpc_table_$name, opnum, NDR_IN,
+ &dce_call->pkt.u.request.stub_and_verifier);
+ dce_call->fault_code = DCERPC_FAULT_NDR;
+ return NT_STATUS_NET_WRITE_FAULT;
+ }
+
+ return NT_STATUS_OK;
+}
+
+static NTSTATUS $name\__op_dispatch(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, void *r)
+{
+ uint16_t opnum = dce_call->pkt.u.request.opnum;
+ struct GUID ipid = dce_call->pkt.u.request.object.object;
+ struct dcom_interface_p *iface = dcom_get_local_iface_p(&ipid);
+ const struct dcom_$name\_vtable *vtable = iface->vtable;
+
+ switch (opnum) {
+";
+ gen_dispatch_switch($data);
+
+pidl "
+ default:
+ dce_call->fault_code = DCERPC_FAULT_OP_RNG_ERROR;
+ break;
+ }
+
+ if (dce_call->fault_code != 0) {
+ dcerpc_log_packet(&dcerpc_table_$name, opnum, NDR_IN,
+ &dce_call->pkt.u.request.stub_and_verifier);
+ return NT_STATUS_NET_WRITE_FAULT;
+ }
+
+ return NT_STATUS_OK;
+}
+
+static NTSTATUS $name\__op_reply(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, void *r)
+{
+ uint16_t opnum = dce_call->pkt.u.request.opnum;
+
+ switch (opnum) {
+";
+ gen_reply_switch($data);
+
+pidl "
+ default:
+ dce_call->fault_code = DCERPC_FAULT_OP_RNG_ERROR;
+ break;
+ }
+
+ if (dce_call->fault_code != 0) {
+ dcerpc_log_packet(&dcerpc_table_$name, opnum, NDR_IN,
+ &dce_call->pkt.u.request.stub_and_verifier);
+ return NT_STATUS_NET_WRITE_FAULT;
+ }
+
+ return NT_STATUS_OK;
+}
+
+static NTSTATUS $name\__op_ndr_push(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, struct ndr_push *push, const void *r)
+{
+ NTSTATUS status;
+ uint16_t opnum = dce_call->pkt.u.request.opnum;
+
+ status = dcerpc_table_$name.calls[opnum].ndr_push(push, NDR_OUT, r);
+ if (!NT_STATUS_IS_OK(status)) {
+ dce_call->fault_code = DCERPC_FAULT_NDR;
+ return NT_STATUS_NET_WRITE_FAULT;
+ }
+
+ return NT_STATUS_OK;
+}
+
+static const struct dcesrv_interface $name\_interface = {
+ .name = \"$name\",
+ .uuid = $uuid,
+ .if_version = $if_version,
+ .bind = $name\__op_bind,
+ .unbind = $name\__op_unbind,
+ .ndr_pull = $name\__op_ndr_pull,
+ .dispatch = $name\__op_dispatch,
+ .reply = $name\__op_reply,
+ .ndr_push = $name\__op_ndr_push
+};
+
+";
+}
+
+#####################################################################
+# produce boilerplate code for an endpoint server
+sub Boilerplate_Ep_Server($)
+{
+ my($interface) = shift;
+ my $name = $interface->{NAME};
+ my $uname = uc $name;
+
+ pidl "
+static NTSTATUS $name\__op_init_server(struct dcesrv_context *dce_ctx, const struct dcesrv_endpoint_server *ep_server)
+{
+ int i;
+
+ for (i=0;i<dcerpc_table_$name.endpoints->count;i++) {
+ NTSTATUS ret;
+ const char *name = dcerpc_table_$name.endpoints->names[i];
+
+ ret = dcesrv_interface_register(dce_ctx, name, &$name\_interface, NULL);
+ if (!NT_STATUS_IS_OK(ret)) {
+ DEBUG(1,(\"$name\_op_init_server: failed to register endpoint \'%s\'\\n\",name));
+ return ret;
+ }
+ }
+
+ return NT_STATUS_OK;
+}
+
+static BOOL $name\__op_interface_by_uuid(struct dcesrv_interface *iface, const char *uuid, uint32_t if_version)
+{
+ if (dcerpc_table_$name.if_version == if_version &&
+ strcmp(dcerpc_table_$name.uuid, uuid)==0) {
+ memcpy(iface,&dcerpc_table_$name, sizeof(*iface));
+ return True;
+ }
+
+ return False;
+}
+
+static BOOL $name\__op_interface_by_name(struct dcesrv_interface *iface, const char *name)
+{
+ if (strcmp(dcerpc_table_$name.name, name)==0) {
+ memcpy(iface,&dcerpc_table_$name, sizeof(*iface));
+ return True;
+ }
+
+ return False;
+}
+
+NTSTATUS dcerpc_server_$name\_init(void)
+{
+ NTSTATUS ret;
+ struct dcesrv_endpoint_server ep_server;
+
+ /* fill in our name */
+ ep_server.name = \"$name\";
+
+ /* fill in all the operations */
+ ep_server.init_server = $name\__op_init_server;
+
+ ep_server.interface_by_uuid = $name\__op_interface_by_uuid;
+ ep_server.interface_by_name = $name\__op_interface_by_name;
+
+ /* register ourselves with the DCERPC subsystem. */
+ ret = dcerpc_register_ep_server(&ep_server);
+
+ if (!NT_STATUS_IS_OK(ret)) {
+ DEBUG(0,(\"Failed to register \'$name\' endpoint server!\\n\"));
+ return ret;
+ }
+
+ return ret;
+}
+
+";
+}
+
+#####################################################################
+# dcom interface stub from a parsed IDL structure
+sub ParseInterface($)
+{
+ my($interface) = shift;
+
+ return "" if has_property($interface, "local");
+
+ my($data) = $interface->{DATA};
+ my $count = 0;
+
+ $res = "";
+
+ if (!defined $interface->{PROPERTIES}->{uuid}) {
+ return $res;
+ }
+
+ if (!defined $interface->{PROPERTIES}->{version}) {
+ $interface->{PROPERTIES}->{version} = "0.0";
+ }
+
+ foreach my $d (@{$data}) {
+ if ($d->{TYPE} eq "FUNCTION") { $count++; }
+ }
+
+ if ($count == 0) {
+ return $res;
+ }
+
+ $res = "/* dcom interface stub generated by pidl */\n\n";
+ Boilerplate_Iface($interface);
+ Boilerplate_Ep_Server($interface);
+
+ return $res;
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba4/Header.pm b/tools/pidl/lib/Parse/Pidl/Samba4/Header.pm
new file mode 100644
index 0000000..e9b7bee
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba4/Header.pm
@@ -0,0 +1,537 @@
+###################################################
+# create C header files for an IDL structure
+# Copyright tridge@samba.org 2000
+# Copyright jelmer@samba.org 2005
+# released under the GNU GPL
+
+package Parse::Pidl::Samba4::Header;
+require Exporter;
+
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(GenerateFunctionInEnv GenerateFunctionOutEnv EnvSubstituteValue GenerateStructEnv);
+
+use strict;
+use Parse::Pidl qw(fatal);
+use Parse::Pidl::Typelist qw(mapTypeName scalar_is_reference);
+use Parse::Pidl::Util qw(has_property is_constant unmake_str ParseExpr);
+use Parse::Pidl::Samba4 qw(is_intree ElementStars ArrayBrackets choose_header);
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+my($res);
+my($tab_depth);
+
+sub pidl($) { $res .= shift; }
+
+sub tabs()
+{
+ my $res = "";
+ $res .="\t" foreach (1..$tab_depth);
+ return $res;
+}
+
+#####################################################################
+# parse a properties list
+sub HeaderProperties($$)
+{
+ my($props,$ignores) = @_;
+ my $ret = "";
+
+ foreach my $d (sort(keys %{$props})) {
+ next if (grep(/^$d$/, @$ignores));
+ if($props->{$d} ne "1") {
+ $ret.= "$d($props->{$d}),";
+ } else {
+ $ret.="$d,";
+ }
+ }
+
+ if ($ret) {
+ pidl "/* [" . substr($ret, 0, -1) . "] */";
+ }
+}
+
+#####################################################################
+# parse a structure element
+sub HeaderElement($)
+{
+ my($element) = shift;
+
+ pidl tabs();
+ if (has_property($element, "represent_as")) {
+ pidl mapTypeName($element->{PROPERTIES}->{represent_as})." ";
+ } else {
+ if (ref($element->{TYPE}) eq "HASH") {
+ HeaderType($element, $element->{TYPE}, $element->{TYPE}->{NAME});
+ } else {
+ HeaderType($element, $element->{TYPE}, "");
+ }
+ pidl " ".ElementStars($element);
+ }
+ pidl $element->{NAME};
+ pidl ArrayBrackets($element);
+
+ pidl ";";
+ if (defined $element->{PROPERTIES}) {
+ HeaderProperties($element->{PROPERTIES}, ["in", "out"]);
+ }
+ pidl "\n";
+}
+
+#####################################################################
+# parse a struct
+sub HeaderStruct($$;$)
+{
+ my($struct,$name,$tail) = @_;
+ pidl "struct $name";
+ pidl $tail if defined($tail) and not defined($struct->{ELEMENTS});
+ return if (not defined($struct->{ELEMENTS}));
+ pidl " {\n";
+ $tab_depth++;
+ my $el_count=0;
+ foreach (@{$struct->{ELEMENTS}}) {
+ HeaderElement($_);
+ $el_count++;
+ }
+ if ($el_count == 0) {
+ # some compilers can't handle empty structures
+ pidl tabs()."char _empty_;\n";
+ }
+ $tab_depth--;
+ pidl tabs()."}";
+ if (defined $struct->{PROPERTIES}) {
+ HeaderProperties($struct->{PROPERTIES}, []);
+ }
+ pidl $tail if defined($tail);
+}
+
+#####################################################################
+# parse a enum
+sub HeaderEnum($$;$)
+{
+ my($enum,$name,$tail) = @_;
+ my $first = 1;
+
+ pidl "enum $name";
+ if (defined($enum->{ELEMENTS})) {
+ pidl "\n#ifndef USE_UINT_ENUMS\n";
+ pidl " {\n";
+ $tab_depth++;
+ foreach my $e (@{$enum->{ELEMENTS}}) {
+ my @enum_els = ();
+ unless ($first) { pidl ",\n"; }
+ $first = 0;
+ pidl tabs();
+ @enum_els = split(/=/, $e);
+ if (@enum_els == 2) {
+ pidl $enum_els[0];
+ pidl "=(int)";
+ pidl "(";
+ pidl $enum_els[1];
+ pidl ")";
+ } else {
+ pidl $e;
+ }
+ }
+ pidl "\n";
+ $tab_depth--;
+ pidl "}";
+ pidl "\n";
+ pidl "#else\n";
+ my $count = 0;
+ my $with_val = 0;
+ my $without_val = 0;
+ pidl " { __do_not_use_enum_$name=0x7FFFFFFF}\n";
+ foreach my $e (@{$enum->{ELEMENTS}}) {
+ my $t = "$e";
+ my $name;
+ my $value;
+ if ($t =~ /(.*)=(.*)/) {
+ $name = $1;
+ $value = $2;
+ $with_val = 1;
+ fatal($e->{ORIGINAL}, "you can't mix enum member with values and without values!")
+ unless ($without_val == 0);
+ } else {
+ $name = $t;
+ $value = $count++;
+ $without_val = 1;
+ fatal($e->{ORIGINAL}, "you can't mix enum member with values and without values!")
+ unless ($with_val == 0);
+ }
+ pidl "#define $name ( $value )\n";
+ }
+ pidl "#endif\n";
+ }
+ pidl $tail if defined($tail);
+}
+
+#####################################################################
+# parse a bitmap
+sub HeaderBitmap($$)
+{
+ my($bitmap,$name) = @_;
+
+ return unless defined($bitmap->{ELEMENTS});
+
+ pidl "/* bitmap $name */\n";
+ pidl "#define $_\n" foreach (@{$bitmap->{ELEMENTS}});
+ pidl "\n";
+}
+
+#####################################################################
+# parse a union
+sub HeaderUnion($$;$)
+{
+ my($union,$name,$tail) = @_;
+ my %done = ();
+
+ pidl "union $name";
+ pidl $tail if defined($tail) and not defined($union->{ELEMENTS});
+ return if (not defined($union->{ELEMENTS}));
+ pidl " {\n";
+ $tab_depth++;
+ my $needed = 0;
+ foreach my $e (@{$union->{ELEMENTS}}) {
+ if ($e->{TYPE} ne "EMPTY") {
+ if (! defined $done{$e->{NAME}}) {
+ HeaderElement($e);
+ }
+ $done{$e->{NAME}} = 1;
+ $needed++;
+ }
+ }
+ if (!$needed) {
+ # sigh - some compilers don't like empty structures
+ pidl tabs()."int _dummy_element;\n";
+ }
+ $tab_depth--;
+ pidl "}";
+
+ if (defined $union->{PROPERTIES}) {
+ HeaderProperties($union->{PROPERTIES}, []);
+ }
+ pidl $tail if defined($tail);
+}
+
+#####################################################################
+# parse a pipe
+sub HeaderPipe($$;$)
+{
+ my($pipe,$name,$tail) = @_;
+
+ my $struct = $pipe->{DATA};
+ my $e = $struct->{ELEMENTS}[1];
+
+ pidl "struct $name;\n";
+ pidl "struct $struct->{NAME} {\n";
+ $tab_depth++;
+ pidl tabs()."uint32_t count;\n";
+ pidl tabs().mapTypeName($e->{TYPE})." *array;\n";
+ $tab_depth--;
+ pidl "}";
+
+ if (defined $struct->{PROPERTIES}) {
+ HeaderProperties($struct->{PROPERTIES}, []);
+ }
+
+ pidl $tail if defined($tail);
+}
+
+#####################################################################
+# parse a type
+sub HeaderType($$$;$)
+{
+ my($e,$data,$name,$tail) = @_;
+ if (ref($data) eq "HASH") {
+ ($data->{TYPE} eq "ENUM") && HeaderEnum($data, $name, $tail);
+ ($data->{TYPE} eq "BITMAP") && HeaderBitmap($data, $name);
+ ($data->{TYPE} eq "STRUCT") && HeaderStruct($data, $name, $tail);
+ ($data->{TYPE} eq "UNION") && HeaderUnion($data, $name, $tail);
+ ($data->{TYPE} eq "PIPE") && HeaderPipe($data, $name, $tail);
+ return;
+ }
+
+ if (has_property($e, "charset")) {
+ pidl "const char";
+ } else {
+ pidl mapTypeName($e->{TYPE});
+ }
+ pidl $tail if defined($tail);
+}
+
+#####################################################################
+# parse a typedef
+sub HeaderTypedef($;$)
+{
+ my($typedef,$tail) = @_;
+ # Don't print empty "enum foo;", since some compilers don't like it.
+ return if ($typedef->{DATA}->{TYPE} eq "ENUM" and not defined($typedef->{DATA}->{ELEMENTS}));
+ HeaderType($typedef, $typedef->{DATA}, $typedef->{NAME}, $tail) if defined ($typedef->{DATA});
+}
+
+#####################################################################
+# parse a const
+sub HeaderConst($)
+{
+ my($const) = shift;
+ if (!defined($const->{ARRAY_LEN}[0])) {
+ pidl "#define $const->{NAME}\t( $const->{VALUE} )\n";
+ } else {
+ pidl "#define $const->{NAME}\t $const->{VALUE}\n";
+ }
+}
+
+sub ElementDirection($)
+{
+ my ($e) = @_;
+
+ return "inout" if (has_property($e, "in") and has_property($e, "out"));
+ return "in" if (has_property($e, "in"));
+ return "out" if (has_property($e, "out"));
+ return "inout";
+}
+
+#####################################################################
+# parse a function
+sub HeaderFunctionInOut($$)
+{
+ my($fn,$prop) = @_;
+
+ return unless defined($fn->{ELEMENTS});
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ HeaderElement($e) if (ElementDirection($e) eq $prop);
+ }
+}
+
+#####################################################################
+# determine if we need an "in" or "out" section
+sub HeaderFunctionInOut_needed($$)
+{
+ my($fn,$prop) = @_;
+
+ return 1 if ($prop eq "out" && defined($fn->{RETURN_TYPE}));
+
+ return undef unless defined($fn->{ELEMENTS});
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ return 1 if (ElementDirection($e) eq $prop);
+ }
+
+ return undef;
+}
+
+my %headerstructs;
+
+#####################################################################
+# parse a function
+sub HeaderFunction($)
+{
+ my($fn) = shift;
+
+ return if ($headerstructs{$fn->{NAME}});
+
+ $headerstructs{$fn->{NAME}} = 1;
+
+ pidl "\nstruct $fn->{NAME} {\n";
+ $tab_depth++;
+ my $needed = 0;
+
+ if (HeaderFunctionInOut_needed($fn, "in") or
+ HeaderFunctionInOut_needed($fn, "inout")) {
+ pidl tabs()."struct {\n";
+ $tab_depth++;
+ HeaderFunctionInOut($fn, "in");
+ HeaderFunctionInOut($fn, "inout");
+ $tab_depth--;
+ pidl tabs()."} in;\n\n";
+ $needed++;
+ }
+
+ if (HeaderFunctionInOut_needed($fn, "out") or
+ HeaderFunctionInOut_needed($fn, "inout")) {
+ pidl tabs()."struct {\n";
+ $tab_depth++;
+ HeaderFunctionInOut($fn, "out");
+ HeaderFunctionInOut($fn, "inout");
+ if (defined($fn->{RETURN_TYPE})) {
+ pidl tabs().mapTypeName($fn->{RETURN_TYPE}) . " result;\n";
+ }
+ $tab_depth--;
+ pidl tabs()."} out;\n\n";
+ $needed++;
+ }
+
+ if (!$needed) {
+ # sigh - some compilers don't like empty structures
+ pidl tabs()."int _dummy_element;\n";
+ }
+
+ $tab_depth--;
+ pidl "};\n\n";
+}
+
+sub HeaderImport
+{
+ my @imports = @_;
+ foreach my $import (@imports) {
+ $import = unmake_str($import);
+ $import =~ s/\.idl$//;
+ pidl choose_header("librpc/gen_ndr/$import\.h", "gen_ndr/$import.h") . "\n";
+ }
+}
+
+sub HeaderInclude
+{
+ my @includes = @_;
+ foreach (@includes) {
+ pidl "#include $_\n";
+ }
+}
+
+#####################################################################
+# parse the interface definitions
+sub HeaderInterface($)
+{
+ my($interface) = shift;
+
+ pidl "#ifndef _HEADER_$interface->{NAME}\n";
+ pidl "#define _HEADER_$interface->{NAME}\n\n";
+
+ foreach my $c (@{$interface->{CONSTS}}) {
+ HeaderConst($c);
+ }
+
+ foreach my $t (@{$interface->{TYPES}}) {
+ HeaderTypedef($t, ";\n\n") if ($t->{TYPE} eq "TYPEDEF");
+ HeaderStruct($t, $t->{NAME}, ";\n\n") if ($t->{TYPE} eq "STRUCT");
+ HeaderUnion($t, $t->{NAME}, ";\n\n") if ($t->{TYPE} eq "UNION");
+ HeaderEnum($t, $t->{NAME}, ";\n\n") if ($t->{TYPE} eq "ENUM");
+ HeaderBitmap($t, $t->{NAME}) if ($t->{TYPE} eq "BITMAP");
+ HeaderPipe($t, $t->{NAME}, "\n\n") if ($t->{TYPE} eq "PIPE");
+ }
+
+ foreach my $fn (@{$interface->{FUNCTIONS}}) {
+ HeaderFunction($fn);
+ }
+
+ pidl "#endif /* _HEADER_$interface->{NAME} */\n";
+}
+
+sub HeaderQuote($)
+{
+ my($quote) = shift;
+
+ pidl unmake_str($quote->{DATA}) . "\n";
+}
+
+#####################################################################
+# parse a parsed IDL into a C header
+sub Parse($)
+{
+ my($ndr) = shift;
+ $tab_depth = 0;
+
+ $res = "";
+ %headerstructs = ();
+ pidl "/* header auto-generated by pidl */\n\n";
+
+ my $ifacename = "";
+
+ # work out a unique interface name
+ foreach (@{$ndr}) {
+ if ($_->{TYPE} eq "INTERFACE") {
+ $ifacename = $_->{NAME};
+ last;
+ }
+ }
+
+ pidl "#ifndef _PIDL_HEADER_$ifacename\n";
+ pidl "#define _PIDL_HEADER_$ifacename\n\n";
+
+ if (!is_intree()) {
+ pidl "#include <util/data_blob.h>\n";
+ }
+ pidl "#include <stdint.h>\n";
+ pidl "\n";
+ # FIXME: Include this only if NTSTATUS was actually used
+ pidl choose_header("libcli/util/ntstatus.h", "core/ntstatus.h") . "\n";
+ pidl "\n";
+
+ foreach (@{$ndr}) {
+ ($_->{TYPE} eq "CPP_QUOTE") && HeaderQuote($_);
+ ($_->{TYPE} eq "INTERFACE") && HeaderInterface($_);
+ ($_->{TYPE} eq "IMPORT") && HeaderImport(@{$_->{PATHS}});
+ ($_->{TYPE} eq "INCLUDE") && HeaderInclude(@{$_->{PATHS}});
+ }
+
+ pidl "#endif /* _PIDL_HEADER_$ifacename */\n";
+
+ return $res;
+}
+
+sub GenerateStructEnv($$)
+{
+ my ($x, $v) = @_;
+ my %env;
+
+ foreach my $e (@{$x->{ELEMENTS}}) {
+ $env{$e->{NAME}} = "$v->$e->{NAME}";
+ }
+
+ $env{"this"} = $v;
+
+ return \%env;
+}
+
+sub EnvSubstituteValue($$)
+{
+ my ($env,$s) = @_;
+
+ # Substitute the value() values in the env
+ foreach my $e (@{$s->{ELEMENTS}}) {
+ next unless (defined(my $v = has_property($e, "value")));
+
+ $env->{$e->{NAME}} = ParseExpr($v, $env, $e);
+ }
+
+ return $env;
+}
+
+sub GenerateFunctionInEnv($;$)
+{
+ my ($fn, $base) = @_;
+ my %env;
+
+ $base = "r->" unless defined($base);
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ if (grep (/in/, @{$e->{DIRECTION}})) {
+ $env{$e->{NAME}} = $base."in.$e->{NAME}";
+ }
+ }
+
+ return \%env;
+}
+
+sub GenerateFunctionOutEnv($;$)
+{
+ my ($fn, $base) = @_;
+ my %env;
+
+ $base = "r->" unless defined($base);
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ if (grep (/out/, @{$e->{DIRECTION}})) {
+ $env{$e->{NAME}} = $base."out.$e->{NAME}";
+ } elsif (grep (/in/, @{$e->{DIRECTION}})) {
+ $env{$e->{NAME}} = $base."in.$e->{NAME}";
+ }
+ }
+
+ return \%env;
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba4/NDR/Client.pm b/tools/pidl/lib/Parse/Pidl/Samba4/NDR/Client.pm
new file mode 100644
index 0000000..040cd5a
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba4/NDR/Client.pm
@@ -0,0 +1,884 @@
+###################################################
+# client calls generator
+# Copyright tridge@samba.org 2003
+# Copyright jelmer@samba.org 2005-2006
+# released under the GNU GPL
+
+package Parse::Pidl::Samba4::NDR::Client;
+
+use Exporter;
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(Parse);
+
+use Parse::Pidl qw(fatal warning error);
+use Parse::Pidl::Util qw(has_property ParseExpr genpad);
+use Parse::Pidl::NDR qw(ContainsPipe);
+use Parse::Pidl::Typelist qw(mapTypeName);
+use Parse::Pidl::Samba4 qw(choose_header is_intree DeclLong);
+use Parse::Pidl::Samba4::Header qw(GenerateFunctionInEnv GenerateFunctionOutEnv);
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+use strict;
+
+sub indent($) { my ($self) = @_; $self->{tabs}.="\t"; }
+sub deindent($) { my ($self) = @_; $self->{tabs} = substr($self->{tabs}, 1); }
+sub pidl($$) { my ($self,$txt) = @_; $self->{res} .= $txt ? "$self->{tabs}$txt\n" : "\n"; }
+sub pidl_hdr($$) { my ($self, $txt) = @_; $self->{res_hdr} .= "$txt\n"; }
+sub pidl_both($$) { my ($self, $txt) = @_; $self->{hdr} .= "$txt\n"; $self->{res_hdr} .= "$txt\n"; }
+sub fn_declare($$) { my ($self,$n) = @_; $self->pidl($n); $self->pidl_hdr("$n;"); }
+
+sub new($)
+{
+ my ($class) = shift;
+ my $self = { res => "", res_hdr => "", tabs => "" };
+ bless($self, $class);
+}
+
+sub ParseFunctionHasPipes($$)
+{
+ my ($self, $fn) = @_;
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ return 1 if ContainsPipe($e, $e->{LEVELS}[0]);
+ }
+
+ return 0;
+}
+
+sub ParseFunction_r_State($$$$)
+{
+ my ($self, $if, $fn, $name) = @_;
+ my $uname = uc $name;
+
+ $self->pidl("struct dcerpc_$name\_r_state {");
+ $self->indent;
+ $self->pidl("TALLOC_CTX *out_mem_ctx;");
+ $self->deindent;
+ $self->pidl("};");
+ $self->pidl("");
+ $self->pidl("static void dcerpc_$name\_r_done(struct tevent_req *subreq);");
+ $self->pidl("");
+}
+
+sub ParseFunction_r_Send($$$$)
+{
+ my ($self, $if, $fn, $name) = @_;
+ my $uname = uc $name;
+
+ my $proto = "struct tevent_req *dcerpc_$name\_r_send(TALLOC_CTX *mem_ctx,\n";
+ $proto .= "\tstruct tevent_context *ev,\n",
+ $proto .= "\tstruct dcerpc_binding_handle *h,\n",
+ $proto .= "\tstruct $name *r)";
+
+ $self->fn_declare($proto);
+
+ $self->pidl("{");
+ $self->indent;
+
+ $self->pidl("struct tevent_req *req;");
+ $self->pidl("struct dcerpc_$name\_r_state *state;");
+ $self->pidl("struct tevent_req *subreq;");
+ $self->pidl("");
+
+ $self->pidl("req = tevent_req_create(mem_ctx, &state,");
+ $self->pidl("\t\t\tstruct dcerpc_$name\_r_state);");
+ $self->pidl("if (req == NULL) {");
+ $self->indent;
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ my $out_params = 0;
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless grep(/out/, @{$e->{DIRECTION}});
+ next if ContainsPipe($e, $e->{LEVELS}[0]);
+ $out_params++;
+
+ }
+
+ my $submem;
+ if ($out_params > 0) {
+ $self->pidl("state->out_mem_ctx = talloc_new(state);");
+ $self->pidl("if (tevent_req_nomem(state->out_mem_ctx, req)) {");
+ $self->indent;
+ $self->pidl("return tevent_req_post(req, ev);");
+ $self->deindent;
+ $self->pidl("}");
+ $submem = "state->out_mem_ctx";
+ } else {
+ $self->pidl("state->out_mem_ctx = NULL;");
+ $submem = "state";
+ }
+ $self->pidl("");
+
+ $self->pidl("subreq = dcerpc_binding_handle_call_send(state, ev, h,");
+ $self->pidl("\t\tNULL, &ndr_table_$if->{NAME},");
+ $self->pidl("\t\tNDR_$uname, $submem, r);");
+ $self->pidl("if (tevent_req_nomem(subreq, req)) {");
+ $self->indent;
+ $self->pidl("return tevent_req_post(req, ev);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("tevent_req_set_callback(subreq, dcerpc_$name\_r_done, req);");
+ $self->pidl("");
+
+ $self->pidl("return req;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParseFunction_r_Done($$$$)
+{
+ my ($self, $if, $fn, $name) = @_;
+ my $uname = uc $name;
+
+ my $proto = "static void dcerpc_$name\_r_done(struct tevent_req *subreq)";
+
+ $self->pidl("$proto");
+ $self->pidl("{");
+ $self->indent;
+
+ $self->pidl("struct tevent_req *req =");
+ $self->pidl("\ttevent_req_callback_data(subreq,");
+ $self->pidl("\tstruct tevent_req);");
+ $self->pidl("NTSTATUS status;");
+ $self->pidl("");
+
+ $self->pidl("status = dcerpc_binding_handle_call_recv(subreq);");
+ $self->pidl("TALLOC_FREE(subreq);");
+ $self->pidl("if (tevent_req_nterror(req, status)) {");
+ $self->indent;
+ $self->pidl("return;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("tevent_req_done(req);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParseFunction_r_Recv($$$$)
+{
+ my ($self, $if, $fn, $name) = @_;
+ my $uname = uc $name;
+
+ my $proto = "NTSTATUS dcerpc_$name\_r_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx)";
+
+ $self->fn_declare($proto);
+
+ $self->pidl("{");
+ $self->indent;
+
+ $self->pidl("struct dcerpc_$name\_r_state *state =");
+ $self->pidl("\ttevent_req_data(req,");
+ $self->pidl("\tstruct dcerpc_$name\_r_state);");
+ $self->pidl("NTSTATUS status;");
+ $self->pidl("");
+
+ $self->pidl("if (tevent_req_is_nterror(req, &status)) {");
+ $self->indent;
+ $self->pidl("tevent_req_received(req);");
+ $self->pidl("return status;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("talloc_steal(mem_ctx, state->out_mem_ctx);");
+ $self->pidl("");
+
+ $self->pidl("tevent_req_received(req);");
+ $self->pidl("return NT_STATUS_OK;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParseFunction_r_Sync($$$$)
+{
+ my ($self, $if, $fn, $name) = @_;
+ my $uname = uc $name;
+
+ if ($self->ParseFunctionHasPipes($fn)) {
+ $self->pidl_both("/*");
+ $self->pidl_both(" * The following function is skipped because");
+ $self->pidl_both(" * it uses pipes:");
+ $self->pidl_both(" *");
+ $self->pidl_both(" * dcerpc_$name\_r()");
+ $self->pidl_both(" */");
+ $self->pidl_both("");
+ return;
+ }
+
+ my $proto = "NTSTATUS dcerpc_$name\_r(struct dcerpc_binding_handle *h, TALLOC_CTX *mem_ctx, struct $name *r)";
+
+ $self->fn_declare($proto);
+
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("NTSTATUS status;");
+ $self->pidl("");
+
+ $self->pidl("status = dcerpc_binding_handle_call(h,");
+ $self->pidl("\t\tNULL, &ndr_table_$if->{NAME},");
+ $self->pidl("\t\tNDR_$uname, mem_ctx, r);");
+ $self->pidl("");
+ $self->pidl("return status;");
+
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ElementDirection($)
+{
+ my ($e) = @_;
+
+ return "[in,out]" if (has_property($e, "in") and has_property($e, "out"));
+ return "[in]" if (has_property($e, "in"));
+ return "[out]" if (has_property($e, "out"));
+ return "[in,out]";
+}
+
+sub HeaderProperties($$)
+{
+ my($props,$ignores) = @_;
+ my $ret = "";
+
+ foreach my $d (sort(keys %{$props})) {
+ next if (grep(/^$d$/, @$ignores));
+ if($props->{$d} ne "1") {
+ $ret.= "$d($props->{$d}),";
+ } else {
+ $ret.="$d,";
+ }
+ }
+
+ if ($ret) {
+ return "[" . substr($ret, 0, -1) . "]";
+ }
+}
+
+sub ParseCopyArgument($$$$$)
+{
+ my ($self, $fn, $e, $r, $i) = @_;
+ my $l = $e->{LEVELS}[0];
+
+ if ($l->{TYPE} eq "ARRAY" and $l->{IS_FIXED} == 1) {
+ $self->pidl("memcpy(${r}$e->{NAME}, ${i}$e->{NAME}, sizeof(${r}$e->{NAME}));");
+ } else {
+ $self->pidl("${r}$e->{NAME} = ${i}$e->{NAME};");
+ }
+}
+
+sub ParseInvalidResponse($$)
+{
+ my ($self, $type) = @_;
+
+ if ($type eq "sync") {
+ $self->pidl("return NT_STATUS_INVALID_NETWORK_RESPONSE;");
+ } elsif ($type eq "async") {
+ $self->pidl("tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);");
+ $self->pidl("return;");
+ } else {
+ die("ParseInvalidResponse($type)");
+ }
+}
+
+sub ParseOutputArgument($$$$$$)
+{
+ my ($self, $fn, $e, $r, $o, $invalid_response_type) = @_;
+ my $level = 0;
+
+ if ($e->{LEVELS}[0]->{TYPE} ne "POINTER" and $e->{LEVELS}[0]->{TYPE} ne "ARRAY") {
+ fatal($e->{ORIGINAL}, "[out] argument is not a pointer or array");
+ return;
+ }
+
+ if ($e->{LEVELS}[0]->{TYPE} eq "POINTER") {
+ $level = 1;
+ if ($e->{LEVELS}[0]->{POINTER_TYPE} ne "ref") {
+ $self->pidl("if ($o$e->{NAME} && ${r}out.$e->{NAME}) {");
+ $self->indent;
+ }
+ }
+
+ if ($e->{LEVELS}[$level]->{TYPE} eq "ARRAY") {
+ # This is a call to GenerateFunctionInEnv intentionally.
+ # Since the data is being copied into a user-provided data
+ # structure, the user should be able to know the size beforehand
+ # to allocate a structure of the right size.
+ my $in_env = GenerateFunctionInEnv($fn, $r);
+ my $out_env = GenerateFunctionOutEnv($fn, $r);
+ my $l = $e->{LEVELS}[$level];
+
+ my $in_var = undef;
+ if (grep(/in/, @{$e->{DIRECTION}})) {
+ $in_var = ParseExpr($e->{NAME}, $in_env, $e->{ORIGINAL});
+ }
+ my $out_var = ParseExpr($e->{NAME}, $out_env, $e->{ORIGINAL});
+
+ my $in_size_is = undef;
+ my $out_size_is = undef;
+ my $out_length_is = undef;
+
+ my $avail_len = undef;
+ my $needed_len = undef;
+
+ $self->pidl("{");
+ $self->indent;
+ my $copy_len_var = "_copy_len_$e->{NAME}";
+ $self->pidl("size_t $copy_len_var;");
+
+ if (not defined($l->{SIZE_IS})) {
+ if (not $l->{IS_ZERO_TERMINATED}) {
+ fatal($e->{ORIGINAL}, "no size known for [out] array `$e->{NAME}'");
+ }
+ if (has_property($e, "charset")) {
+ $avail_len = "ndr_charset_length($in_var, CH_UNIX)";
+ $needed_len = "ndr_charset_length($out_var, CH_UNIX)";
+ } else {
+ $avail_len = "ndr_string_length($in_var, sizeof(*$in_var))";
+ $needed_len = "ndr_string_length($out_var, sizeof(*$out_var))";
+ }
+ $in_size_is = "";
+ $out_size_is = "";
+ $out_length_is = "";
+ } else {
+ $in_size_is = ParseExpr($l->{SIZE_IS}, $in_env, $e->{ORIGINAL});
+ $out_size_is = ParseExpr($l->{SIZE_IS}, $out_env, $e->{ORIGINAL});
+ $out_length_is = $out_size_is;
+ if (defined($l->{LENGTH_IS})) {
+ $out_length_is = ParseExpr($l->{LENGTH_IS}, $out_env, $e->{ORIGINAL});
+ }
+ if (has_property($e, "charset")) {
+ if (defined($in_var)) {
+ $avail_len = "ndr_charset_length($in_var, CH_UNIX)";
+ } else {
+ $avail_len = $out_length_is;
+ }
+ $needed_len = "ndr_charset_length($out_var, CH_UNIX)";
+ }
+ }
+
+ if ($out_size_is ne $in_size_is) {
+ $self->pidl("if (($out_size_is) > ($in_size_is)) {");
+ $self->indent;
+ $self->ParseInvalidResponse($invalid_response_type);
+ $self->deindent;
+ $self->pidl("}");
+ }
+ if ($out_length_is ne $out_size_is) {
+ $self->pidl("if (($out_length_is) > ($out_size_is)) {");
+ $self->indent;
+ $self->ParseInvalidResponse($invalid_response_type);
+ $self->deindent;
+ $self->pidl("}");
+ }
+ if (defined($needed_len)) {
+ $self->pidl("$copy_len_var = $needed_len;");
+ $self->pidl("if ($copy_len_var > $avail_len) {");
+ $self->indent;
+ $self->ParseInvalidResponse($invalid_response_type);
+ $self->deindent;
+ $self->pidl("}");
+ } else {
+ $self->pidl("$copy_len_var = $out_length_is;");
+ }
+
+ my $dest_ptr = "$o$e->{NAME}";
+ my $elem_size = "sizeof(*$dest_ptr)";
+ $self->pidl("if ($dest_ptr != $out_var) {");
+ $self->indent;
+ if (has_property($e, "charset")) {
+ $dest_ptr = "discard_const_p(uint8_t *, $dest_ptr)";
+ }
+ $self->pidl("memcpy($dest_ptr, $out_var, $copy_len_var * $elem_size);");
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->deindent;
+ $self->pidl("}");
+ } else {
+ $self->pidl("*$o$e->{NAME} = *${r}out.$e->{NAME};");
+ }
+
+ if ($e->{LEVELS}[0]->{TYPE} eq "POINTER") {
+ if ($e->{LEVELS}[0]->{POINTER_TYPE} ne "ref") {
+ $self->deindent;
+ $self->pidl("}");
+ }
+ }
+}
+
+sub ParseFunction_State($$$$)
+{
+ my ($self, $if, $fn, $name) = @_;
+
+ my $state_str = "struct dcerpc_$name\_state";
+ my $done_fn = "dcerpc_$name\_done";
+
+ $self->pidl("$state_str {");
+ $self->indent;
+ $self->pidl("struct $name orig;");
+ $self->pidl("struct $name tmp;");
+ $self->pidl("TALLOC_CTX *out_mem_ctx;");
+ $self->deindent;
+ $self->pidl("};");
+ $self->pidl("");
+ $self->pidl("static void $done_fn(struct tevent_req *subreq);");
+ $self->pidl("");
+}
+
+sub ParseFunction_Send($$$$)
+{
+ my ($self, $if, $fn, $name) = @_;
+
+ my $fn_args = "";
+ my $state_str = "struct dcerpc_$name\_state";
+ my $done_fn = "dcerpc_$name\_done";
+ my $out_mem_ctx = "dcerpc_$name\_out_memory";
+ my $fn_str = "struct tevent_req *dcerpc_$name\_send";
+ my $pad = genpad($fn_str);
+
+ $fn_args .= "TALLOC_CTX *mem_ctx";
+ $fn_args .= ",\n" . $pad . "struct tevent_context *ev";
+ $fn_args .= ",\n" . $pad . "struct dcerpc_binding_handle *h";
+
+ foreach (@{$fn->{ELEMENTS}}) {
+ my $dir = ElementDirection($_);
+ my $prop = HeaderProperties($_->{PROPERTIES}, ["in", "out"]);
+ $fn_args .= ",\n" . $pad . DeclLong($_, "_") . " /* $dir $prop */";
+ }
+
+ $self->fn_declare("$fn_str($fn_args)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("struct tevent_req *req;");
+ $self->pidl("$state_str *state;");
+ $self->pidl("struct tevent_req *subreq;");
+ $self->pidl("");
+ $self->pidl("req = tevent_req_create(mem_ctx, &state,");
+ $self->pidl("\t\t\t$state_str);");
+ $self->pidl("if (req == NULL) {");
+ $self->indent;
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("state->out_mem_ctx = NULL;");
+ $self->pidl("");
+
+ $self->pidl("/* In parameters */");
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless (grep(/in/, @{$e->{DIRECTION}}));
+
+ $self->ParseCopyArgument($fn, $e, "state->orig.in.", "_");
+ }
+ $self->pidl("");
+
+ my $out_params = 0;
+ $self->pidl("/* Out parameters */");
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless grep(/out/, @{$e->{DIRECTION}});
+
+ $self->ParseCopyArgument($fn, $e, "state->orig.out.", "_");
+
+ next if ContainsPipe($e, $e->{LEVELS}[0]);
+
+ $out_params++;
+ }
+ $self->pidl("");
+
+ if (defined($fn->{RETURN_TYPE})) {
+ $self->pidl("/* Result */");
+ $self->pidl("ZERO_STRUCT(state->orig.out.result);");
+ $self->pidl("");
+ }
+
+ if ($out_params > 0) {
+ $self->pidl("state->out_mem_ctx = talloc_named_const(state, 0,");
+ $self->pidl("\t\t \"$out_mem_ctx\");");
+ $self->pidl("if (tevent_req_nomem(state->out_mem_ctx, req)) {");
+ $self->indent;
+ $self->pidl("return tevent_req_post(req, ev);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ }
+
+ $self->pidl("/* make a temporary copy, that we pass to the dispatch function */");
+ $self->pidl("state->tmp = state->orig;");
+ $self->pidl("");
+
+ $self->pidl("subreq = dcerpc_$name\_r_send(state, ev, h, &state->tmp);");
+ $self->pidl("if (tevent_req_nomem(subreq, req)) {");
+ $self->indent;
+ $self->pidl("return tevent_req_post(req, ev);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("tevent_req_set_callback(subreq, $done_fn, req);");
+ $self->pidl("return req;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParseFunction_Done($$$$)
+{
+ my ($self, $if, $fn, $name) = @_;
+
+ my $state_str = "struct dcerpc_$name\_state";
+ my $done_fn = "dcerpc_$name\_done";
+
+ $self->pidl("static void $done_fn(struct tevent_req *subreq)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("struct tevent_req *req = tevent_req_callback_data(");
+ $self->pidl("\tsubreq, struct tevent_req);");
+ $self->pidl("$state_str *state = tevent_req_data(");
+ $self->pidl("\treq, $state_str);");
+ $self->pidl("NTSTATUS status;");
+ $self->pidl("TALLOC_CTX *mem_ctx;");
+ $self->pidl("");
+
+ $self->pidl("if (state->out_mem_ctx) {");
+ $self->indent;
+ $self->pidl("mem_ctx = state->out_mem_ctx;");
+ $self->deindent;
+ $self->pidl("} else {");
+ $self->indent;
+ $self->pidl("mem_ctx = state;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("status = dcerpc_$name\_r_recv(subreq, mem_ctx);");
+ $self->pidl("TALLOC_FREE(subreq);");
+ $self->pidl("if (tevent_req_nterror(req, status)) {");
+ $self->indent;
+ $self->pidl("return;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("/* Copy out parameters */");
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next if ContainsPipe($e, $e->{LEVELS}[0]);
+ next unless (grep(/out/, @{$e->{DIRECTION}}));
+
+ $self->ParseOutputArgument($fn, $e,
+ "state->tmp.",
+ "state->orig.out.",
+ "async");
+ }
+ $self->pidl("");
+
+ if (defined($fn->{RETURN_TYPE})) {
+ $self->pidl("/* Copy result */");
+ $self->pidl("state->orig.out.result = state->tmp.out.result;");
+ $self->pidl("");
+ }
+
+ $self->pidl("/* Reset temporary structure */");
+ $self->pidl("ZERO_STRUCT(state->tmp);");
+ $self->pidl("");
+
+ $self->pidl("tevent_req_done(req);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParseFunction_Recv($$$$)
+{
+ my ($self, $if, $fn, $name) = @_;
+
+ my $fn_args = "";
+ my $state_str = "struct dcerpc_$name\_state";
+ my $fn_str = "NTSTATUS dcerpc_$name\_recv";
+ my $pad = genpad($fn_str);
+
+ $fn_args .= "struct tevent_req *req,\n" . $pad . "TALLOC_CTX *mem_ctx";
+
+ if (defined($fn->{RETURN_TYPE})) {
+ $fn_args .= ",\n" . $pad . mapTypeName($fn->{RETURN_TYPE}). " *result";
+ }
+
+ $self->fn_declare("$fn_str($fn_args)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("$state_str *state = tevent_req_data(");
+ $self->pidl("\treq, $state_str);");
+ $self->pidl("NTSTATUS status;");
+ $self->pidl("");
+ $self->pidl("if (tevent_req_is_nterror(req, &status)) {");
+ $self->indent;
+ $self->pidl("tevent_req_received(req);");
+ $self->pidl("return status;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("/* Steal possible out parameters to the callers context */");
+ $self->pidl("talloc_steal(mem_ctx, state->out_mem_ctx);");
+ $self->pidl("");
+
+ if (defined($fn->{RETURN_TYPE})) {
+ $self->pidl("/* Return result */");
+ $self->pidl("*result = state->orig.out.result;");
+ $self->pidl("");
+ }
+
+ $self->pidl("tevent_req_received(req);");
+ $self->pidl("return NT_STATUS_OK;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParseFunction_Sync($$$$)
+{
+ my ($self, $if, $fn, $name) = @_;
+
+ if ($self->ParseFunctionHasPipes($fn)) {
+ $self->pidl_both("/*");
+ $self->pidl_both(" * The following function is skipped because");
+ $self->pidl_both(" * it uses pipes:");
+ $self->pidl_both(" *");
+ $self->pidl_both(" * dcerpc_$name()");
+ $self->pidl_both(" */");
+ $self->pidl_both("");
+ return;
+ }
+
+ my $uname = uc $name;
+ my $fn_args = "";
+ my $fn_str = "NTSTATUS dcerpc_$name";
+ my $pad = genpad($fn_str);
+
+ $fn_args .= "struct dcerpc_binding_handle *h,\n" . $pad . "TALLOC_CTX *mem_ctx";
+
+ foreach (@{$fn->{ELEMENTS}}) {
+ my $dir = ElementDirection($_);
+ my $prop = HeaderProperties($_->{PROPERTIES}, ["in", "out"]);
+ $fn_args .= ",\n" . $pad . DeclLong($_, "_") . " /* $dir $prop */";
+ }
+
+ if (defined($fn->{RETURN_TYPE})) {
+ $fn_args .= ",\n" . $pad . mapTypeName($fn->{RETURN_TYPE}). " *result";
+ }
+
+ $self->fn_declare("$fn_str($fn_args)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("struct $name r;");
+ $self->pidl("NTSTATUS status;");
+ $self->pidl("");
+
+ $self->pidl("/* In parameters */");
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless (grep(/in/, @{$e->{DIRECTION}}));
+
+ $self->ParseCopyArgument($fn, $e, "r.in.", "_");
+ }
+ $self->pidl("");
+
+ $self->pidl("/* Out parameters */");
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless grep(/out/, @{$e->{DIRECTION}});
+
+ $self->ParseCopyArgument($fn, $e, "r.out.", "_");
+ }
+ $self->pidl("");
+
+ if (defined($fn->{RETURN_TYPE})) {
+ $self->pidl("/* Result */");
+ $self->pidl("ZERO_STRUCT(r.out.result);");
+ $self->pidl("");
+ }
+
+ $self->pidl("status = dcerpc_$name\_r(h, mem_ctx, &r);");
+ $self->pidl("if (!NT_STATUS_IS_OK(status)) {");
+ $self->indent;
+ $self->pidl("return status;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("/* Return variables */");
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next if ContainsPipe($e, $e->{LEVELS}[0]);
+ next unless (grep(/out/, @{$e->{DIRECTION}}));
+
+ $self->ParseOutputArgument($fn, $e, "r.", "_", "sync");
+ }
+ $self->pidl("");
+
+ $self->pidl("/* Return result */");
+ if ($fn->{RETURN_TYPE}) {
+ $self->pidl("*result = r.out.result;");
+ }
+ $self->pidl("");
+
+ $self->pidl("return NT_STATUS_OK;");
+
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+#####################################################################
+# parse a function
+sub ParseFunction($$$)
+{
+ my ($self, $if, $fn) = @_;
+
+ if ($self->ParseFunctionHasPipes($fn)) {
+ $self->pidl_both("/*");
+ $self->pidl_both(" * The following function is skipped because");
+ $self->pidl_both(" * it uses pipes:");
+ $self->pidl_both(" *");
+ $self->pidl_both(" * dcerpc_$fn->{NAME}_r_send()");
+ $self->pidl_both(" * dcerpc_$fn->{NAME}_r_recv()");
+ $self->pidl_both(" * dcerpc_$fn->{NAME}_r()");
+ $self->pidl_both(" *");
+ $self->pidl_both(" * dcerpc_$fn->{NAME}_send()");
+ $self->pidl_both(" * dcerpc_$fn->{NAME}_recv()");
+ $self->pidl_both(" * dcerpc_$fn->{NAME}()");
+ $self->pidl_both(" */");
+ $self->pidl_both("");
+ warning($fn->{ORIGINAL}, "$fn->{NAME}: dcerpc client does not support pipe yet");
+ return;
+ }
+
+ $self->ParseFunction_r_State($if, $fn, $fn->{NAME});
+ $self->ParseFunction_r_Send($if, $fn, $fn->{NAME});
+ $self->ParseFunction_r_Done($if, $fn, $fn->{NAME});
+ $self->ParseFunction_r_Recv($if, $fn, $fn->{NAME});
+ $self->ParseFunction_r_Sync($if, $fn, $fn->{NAME});
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless (grep(/out/, @{$e->{DIRECTION}}));
+
+ my $reason = "is not a pointer or array";
+
+ # TODO: make this fatal at NDR level
+ if ($e->{LEVELS}[0]->{TYPE} eq "POINTER") {
+ if ($e->{LEVELS}[1]->{TYPE} eq "DATA" and
+ $e->{LEVELS}[1]->{DATA_TYPE} eq "string") {
+ $reason = "is a pointer to type 'string'";
+ } elsif ($e->{LEVELS}[1]->{TYPE} eq "ARRAY" and
+ $e->{LEVELS}[1]->{IS_ZERO_TERMINATED}) {
+ next;
+ } elsif ($e->{LEVELS}[1]->{TYPE} eq "ARRAY" and
+ not defined($e->{LEVELS}[1]->{SIZE_IS})) {
+ $reason = "is a pointer to an unsized array";
+ } else {
+ next;
+ }
+ }
+ if ($e->{LEVELS}[0]->{TYPE} eq "ARRAY") {
+ if (not defined($e->{LEVELS}[0]->{SIZE_IS})) {
+ $reason = "is an unsized array";
+ } else {
+ next;
+ }
+ }
+
+ $self->pidl_both("/*");
+ $self->pidl_both(" * The following functions are skipped because");
+ $self->pidl_both(" * an [out] argument $e->{NAME} $reason:");
+ $self->pidl_both(" *");
+ $self->pidl_both(" * dcerpc_$fn->{NAME}_send()");
+ $self->pidl_both(" * dcerpc_$fn->{NAME}_recv()");
+ $self->pidl_both(" * dcerpc_$fn->{NAME}()");
+ $self->pidl_both(" */");
+ $self->pidl_both("");
+
+ error($e->{ORIGINAL}, "$fn->{NAME}: [out] argument '$e->{NAME}' $reason, skip client functions");
+ return;
+ }
+
+ $self->ParseFunction_State($if, $fn, $fn->{NAME});
+ $self->ParseFunction_Send($if, $fn, $fn->{NAME});
+ $self->ParseFunction_Done($if, $fn, $fn->{NAME});
+ $self->ParseFunction_Recv($if, $fn, $fn->{NAME});
+ $self->ParseFunction_Sync($if, $fn, $fn->{NAME});
+
+ $self->pidl_hdr("");
+}
+
+my %done;
+
+#####################################################################
+# parse the interface definitions
+sub ParseInterface($$)
+{
+ my ($self, $if) = @_;
+ my $ifu = uc($if->{NAME});
+
+ $self->pidl_hdr("#ifndef _HEADER_RPC_$if->{NAME}");
+ $self->pidl_hdr("#define _HEADER_RPC_$if->{NAME}");
+ $self->pidl_hdr("");
+
+ if (defined $if->{PROPERTIES}->{uuid}) {
+ $self->pidl_hdr("extern const struct ndr_interface_table ndr_table_$if->{NAME};");
+ $self->pidl_hdr("");
+ }
+
+ $self->pidl("/* $if->{NAME} - client functions generated by pidl */");
+ $self->pidl("");
+
+ foreach my $fn (@{$if->{FUNCTIONS}}) {
+ next if defined($done{$fn->{NAME}});
+ next if has_property($fn, "noopnum");
+ next if has_property($fn, "todo");
+ $self->ParseFunction($if, $fn);
+ $done{$fn->{NAME}} = 1;
+ }
+
+ $self->pidl_hdr("#endif /* _HEADER_RPC_$if->{NAME} */");
+}
+
+sub Parse($$$$$$)
+{
+ my($self,$ndr,$header,$ndr_header,$client_header) = @_;
+
+ $self->pidl("/* client functions auto-generated by pidl */");
+ $self->pidl("");
+ if (is_intree()) {
+ $self->pidl("#include \"includes.h\"");
+ } else {
+ $self->pidl("#ifndef _GNU_SOURCE");
+ $self->pidl("#define _GNU_SOURCE");
+ $self->pidl("#endif");
+ $self->pidl("#include <stdio.h>");
+ $self->pidl("#include <stdbool.h>");
+ $self->pidl("#include <stdlib.h>");
+ $self->pidl("#include <stdint.h>");
+ $self->pidl("#include <stdarg.h>");
+ $self->pidl("#include <string.h>");
+ $self->pidl("#include <core/ntstatus.h>");
+ }
+ $self->pidl("#include <tevent.h>");
+ $self->pidl(choose_header("lib/util/tevent_ntstatus.h", "util/tevent_ntstatus.h")."");
+ $self->pidl("#include \"$ndr_header\"");
+ $self->pidl("#include \"$client_header\"");
+ $self->pidl("");
+
+ $self->pidl_hdr(choose_header("librpc/rpc/dcerpc.h", "dcerpc.h")."");
+ $self->pidl_hdr("#include \"$header\"");
+
+ foreach my $x (@{$ndr}) {
+ ($x->{TYPE} eq "INTERFACE") && $self->ParseInterface($x);
+ }
+
+ return ($self->{res},$self->{res_hdr});
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba4/NDR/Parser.pm b/tools/pidl/lib/Parse/Pidl/Samba4/NDR/Parser.pm
new file mode 100644
index 0000000..cfcd29e
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba4/NDR/Parser.pm
@@ -0,0 +1,3224 @@
+###################################################
+# Samba4 NDR parser generator for IDL structures
+# Copyright tridge@samba.org 2000-2003
+# Copyright tpot@samba.org 2001
+# Copyright jelmer@samba.org 2004-2006
+# released under the GNU GPL
+
+package Parse::Pidl::Samba4::NDR::Parser;
+
+require Exporter;
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(check_null_pointer NeededFunction NeededElement NeededType $res NeededInterface TypeFunctionName ParseElementPrint);
+
+use strict;
+use Parse::Pidl::Typelist qw(hasType getType mapTypeName typeHasBody);
+use Parse::Pidl::Util qw(has_property ParseExpr ParseExprExt print_uuid unmake_str);
+use Parse::Pidl::CUtil qw(get_pointer_to get_value_of get_array_element);
+use Parse::Pidl::NDR qw(GetPrevLevel GetNextLevel ContainsDeferred ContainsPipe is_charset_array);
+use Parse::Pidl::Samba4 qw(is_intree choose_header ArrayDynamicallyAllocated);
+use Parse::Pidl::Samba4::Header qw(GenerateFunctionInEnv GenerateFunctionOutEnv EnvSubstituteValue GenerateStructEnv);
+use Parse::Pidl qw(warning);
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+# list of known types
+my %typefamily;
+
+sub new($$) {
+ my ($class) = @_;
+ my $self = { res => "", res_hdr => "", deferred => [], tabs => "", defer_tabs => "" };
+ bless($self, $class);
+}
+
+sub get_typefamily($)
+{
+ my $n = shift;
+ return $typefamily{$n};
+}
+
+sub append_prefix($$)
+{
+ my ($e, $var_name) = @_;
+ my $pointers = 0;
+ my $arrays = 0;
+
+ foreach my $l (@{$e->{LEVELS}}) {
+ if ($l->{TYPE} eq "POINTER") {
+ $pointers++;
+ } elsif ($l->{TYPE} eq "ARRAY") {
+ $arrays++;
+ if (($pointers == 0) and
+ (not $l->{IS_FIXED}) and
+ (not $l->{IS_INLINE})) {
+ return get_value_of($var_name);
+ }
+ } elsif ($l->{TYPE} eq "DATA") {
+ if (Parse::Pidl::Typelist::scalar_is_reference($l->{DATA_TYPE})) {
+ return get_value_of($var_name) unless ($pointers or $arrays);
+ }
+ }
+ }
+
+ return $var_name;
+}
+
+sub has_fast_array($$)
+{
+ my ($e,$l) = @_;
+
+ return 0 if ($l->{TYPE} ne "ARRAY");
+
+ my $nl = GetNextLevel($e,$l);
+ return 0 unless ($nl->{TYPE} eq "DATA");
+ return 0 unless (hasType($nl->{DATA_TYPE}));
+
+ my $t = getType($nl->{DATA_TYPE});
+
+ # Only uint8 and string have fast array functions at the moment
+ return ($t->{NAME} eq "uint8") or ($t->{NAME} eq "string");
+}
+
+
+####################################
+# pidl() is our basic output routine
+sub pidl($$)
+{
+ my ($self, $d) = @_;
+ if ($d) {
+ $self->{res} .= $self->{tabs};
+ $self->{res} .= $d;
+ }
+ $self->{res} .="\n";
+}
+
+sub pidl_hdr($$) { my ($self, $d) = @_; $self->{res_hdr} .= "$d\n"; }
+
+####################################
+# defer() is like pidl(), but adds to
+# a deferred buffer which is then added to the
+# output buffer at the end of the structure/union/function
+# This is needed to cope with code that must be pushed back
+# to the end of a block of elements
+sub defer_indent($) { my ($self) = @_; $self->{defer_tabs}.="\t"; }
+sub defer_deindent($) { my ($self) = @_; $self->{defer_tabs}=substr($self->{defer_tabs}, 0, -1); }
+
+sub defer($$)
+{
+ my ($self, $d) = @_;
+ if ($d) {
+ push(@{$self->{deferred}}, $self->{defer_tabs}.$d);
+ }
+}
+
+########################################
+# add the deferred content to the current
+# output
+sub add_deferred($)
+{
+ my ($self) = @_;
+ $self->pidl($_) foreach (@{$self->{deferred}});
+ $self->{deferred} = [];
+ $self->{defer_tabs} = "";
+}
+
+sub indent($)
+{
+ my ($self) = @_;
+ $self->{tabs} .= "\t";
+}
+
+sub deindent($)
+{
+ my ($self) = @_;
+ $self->{tabs} = substr($self->{tabs}, 0, -1);
+}
+
+#####################################################################
+# declare a function public or static, depending on its attributes
+sub fn_declare($$$$)
+{
+ my ($self,$type,$fn,$decl) = @_;
+
+ if (has_property($fn, "no$type")) {
+ $self->pidl_hdr("$decl;");
+ return 0;
+ }
+
+ if (has_property($fn, "public")) {
+ $self->pidl_hdr("$decl;");
+ $self->pidl("_PUBLIC_ $decl");
+ } else {
+ $self->pidl("static $decl");
+ }
+
+ return 1;
+}
+
+###################################################################
+# setup any special flags for an element or structure
+sub start_flags($$$)
+{
+ my ($self, $e, $ndr) = @_;
+ my $flags = has_property($e, "flag");
+ if (defined $flags) {
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("uint32_t _flags_save_$e->{TYPE} = $ndr->flags;");
+ $self->pidl("ndr_set_flags(&$ndr->flags, $flags);");
+ }
+}
+
+###################################################################
+# end any special flags for an element or structure
+sub end_flags($$$)
+{
+ my ($self, $e, $ndr) = @_;
+ my $flags = has_property($e, "flag");
+ if (defined $flags) {
+ $self->pidl("$ndr->flags = _flags_save_$e->{TYPE};");
+ $self->deindent;
+ $self->pidl("}");
+ }
+}
+
+#####################################################################
+# parse the data of an array - push side
+sub ParseArrayPushHeader($$$$$$)
+{
+ my ($self,$e,$l,$ndr,$var_name,$env) = @_;
+
+ my $size;
+ my $length;
+
+ if ($l->{IS_ZERO_TERMINATED}) {
+ if (has_property($e, "charset")) {
+ $size = $length = "ndr_charset_length($var_name, CH_$e->{PROPERTIES}->{charset})";
+ } else {
+ $size = $length = "ndr_string_length($var_name, sizeof(*$var_name))";
+ }
+ if (defined($l->{SIZE_IS})) {
+ $size = ParseExpr($l->{SIZE_IS}, $env, $e);
+ }
+ if (defined($l->{LENGTH_IS})) {
+ $length = ParseExpr($l->{LENGTH_IS}, $env, $e);
+ }
+ } else {
+ $size = ParseExpr($l->{SIZE_IS}, $env, $e);
+ $length = ParseExpr($l->{LENGTH_IS}, $env, $e);
+ }
+
+ if ((!$l->{IS_SURROUNDING}) and $l->{IS_CONFORMANT}) {
+ $self->pidl("NDR_CHECK(ndr_push_uint3264($ndr, NDR_SCALARS, $size));");
+ }
+
+ if ($l->{IS_VARYING}) {
+ $self->pidl("NDR_CHECK(ndr_push_uint3264($ndr, NDR_SCALARS, 0));"); # array offset
+ $self->pidl("NDR_CHECK(ndr_push_uint3264($ndr, NDR_SCALARS, $length));");
+ }
+
+ return $length;
+}
+
+sub check_fully_dereferenced($$)
+{
+ my ($element, $env) = @_;
+
+ return sub ($) {
+ my $origvar = shift;
+ my $check = 0;
+
+ # Figure out the number of pointers in $ptr
+ my $expandedvar = $origvar;
+ $expandedvar =~ s/^(\**)//;
+ my $ptr = $1;
+
+ my $var = undef;
+ foreach (keys %$env) {
+ if ($env->{$_} eq $expandedvar) {
+ $var = $_;
+ last;
+ }
+ }
+
+ return($origvar) unless (defined($var));
+ my $e;
+ foreach (@{$element->{PARENT}->{ELEMENTS}}) {
+ if ($_->{NAME} eq $var) {
+ $e = $_;
+ last;
+ }
+ }
+
+ $e or die("Environment doesn't match siblings");
+
+ # See if pointer at pointer level $level
+ # needs to be checked.
+ my $nump = 0;
+ foreach (@{$e->{LEVELS}}) {
+ if ($_->{TYPE} eq "POINTER") {
+ $nump = $_->{POINTER_INDEX}+1;
+ }
+ }
+ warning($element->{ORIGINAL}, "Got pointer for `$e->{NAME}', expected fully dereferenced variable") if ($nump > length($ptr));
+ return ($origvar);
+ }
+}
+
+sub check_null_pointer($$$$)
+{
+ my ($element, $env, $print_fn, $return) = @_;
+
+ return sub ($) {
+ my $expandedvar = shift;
+ my $check = 0;
+
+ # Figure out the number of pointers in $ptr
+ $expandedvar =~ s/^(\**)//;
+ my $ptr = $1;
+
+ my $var = undef;
+ foreach (keys %$env) {
+ if ($env->{$_} eq $expandedvar) {
+ $var = $_;
+ last;
+ }
+ }
+
+ if (defined($var)) {
+ my $e;
+ # lookup ptr in $e
+ foreach (@{$element->{PARENT}->{ELEMENTS}}) {
+ if ($_->{NAME} eq $var) {
+ $e = $_;
+ last;
+ }
+ }
+
+ $e or die("Environment doesn't match siblings");
+
+ # See if pointer at pointer level $level
+ # needs to be checked.
+ foreach my $l (@{$e->{LEVELS}}) {
+ if ($l->{TYPE} eq "POINTER" and
+ $l->{POINTER_INDEX} == length($ptr)) {
+ # No need to check ref pointers
+ $check = ($l->{POINTER_TYPE} ne "ref");
+ last;
+ }
+
+ if ($l->{TYPE} eq "DATA") {
+ warning($element, "too much dereferences for `$var'");
+ }
+ }
+ } else {
+ warning($element, "unknown dereferenced expression `$expandedvar'");
+ $check = 1;
+ }
+
+ $print_fn->("if ($ptr$expandedvar == NULL) $return") if $check;
+ }
+}
+
+sub is_deferred_switch_non_empty($)
+{
+ # 1 if there needs to be a deferred branch in an ndr_pull/push,
+ # 0 otherwise.
+ my ($e) = @_;
+ my $have_default = 0;
+ foreach my $el (@{$e->{ELEMENTS}}) {
+ if ($el->{CASE} eq "default") {
+ $have_default = 1;
+ }
+ if ($el->{TYPE} ne "EMPTY") {
+ if (ContainsDeferred($el, $el->{LEVELS}[0])) {
+ return 1;
+ }
+ }
+ }
+ return ! $have_default;
+}
+
+sub ParseArrayPullGetSize($$$$$$)
+{
+ my ($self,$e,$l,$ndr,$var_name,$env) = @_;
+
+ my $size;
+
+ if ($l->{IS_CONFORMANT}) {
+ $size = "ndr_get_array_size($ndr, " . get_pointer_to($var_name) . ")";
+ } elsif ($l->{IS_ZERO_TERMINATED} and $l->{SIZE_IS} == 0 and $l->{LENGTH_IS} == 0) { # Noheader arrays
+ $size = "ndr_get_string_size($ndr, sizeof(*$var_name))";
+ } else {
+ $size = ParseExprExt($l->{SIZE_IS}, $env, $e->{ORIGINAL},
+ check_null_pointer($e, $env, sub { $self->pidl(shift); },
+ "return ndr_pull_error($ndr, NDR_ERR_INVALID_POINTER, \"NULL Pointer for size_is()\");"),
+ check_fully_dereferenced($e, $env));
+ }
+
+ $self->pidl("size_$e->{NAME}_$l->{LEVEL_INDEX} = $size;");
+ my $array_size = "size_$e->{NAME}_$l->{LEVEL_INDEX}";
+
+ if (my $range = has_property($e, "range")) {
+ my ($low, $high) = split(/,/, $range, 2);
+ if ($low < 0) {
+ warning(0, "$low is invalid for the range of an array size");
+ }
+ if ($low == 0) {
+ $self->pidl("if ($array_size > $high) {");
+ } else {
+ $self->pidl("if ($array_size < $low || $array_size > $high) {");
+ }
+ $self->pidl("\treturn ndr_pull_error($ndr, NDR_ERR_RANGE, \"value out of range\");");
+ $self->pidl("}");
+ }
+
+ return $array_size;
+}
+
+#####################################################################
+# parse an array - pull side
+sub ParseArrayPullGetLength($$$$$$;$)
+{
+ my ($self,$e,$l,$ndr,$var_name,$env,$array_size) = @_;
+
+ if (not defined($array_size)) {
+ $array_size = $self->ParseArrayPullGetSize($e, $l, $ndr, $var_name, $env);
+ }
+
+ if (not $l->{IS_VARYING}) {
+ return $array_size;
+ }
+
+ my $length = "ndr_get_array_length($ndr, " . get_pointer_to($var_name) .")";
+ $self->pidl("length_$e->{NAME}_$l->{LEVEL_INDEX} = $length;");
+ my $array_length = "length_$e->{NAME}_$l->{LEVEL_INDEX}";
+
+ if (my $range = has_property($e, "range")) {
+ my ($low, $high) = split(/,/, $range, 2);
+ if ($low < 0) {
+ warning(0, "$low is invalid for the range of an array size");
+ }
+ if ($low == 0) {
+ $self->pidl("if ($array_length > $high) {");
+ } else {
+ $self->pidl("if ($array_length < $low || $array_length > $high) {");
+ }
+ $self->pidl("\treturn ndr_pull_error($ndr, NDR_ERR_RANGE, \"value out of range\");");
+ $self->pidl("}");
+ }
+
+ return $array_length;
+}
+
+#####################################################################
+# parse an array - pull side
+sub ParseArrayPullHeader($$$$$$)
+{
+ my ($self,$e,$l,$ndr,$var_name,$env) = @_;
+
+ if ((!$l->{IS_SURROUNDING}) and $l->{IS_CONFORMANT}) {
+ $self->pidl("NDR_CHECK(ndr_pull_array_size($ndr, " . get_pointer_to($var_name) . "));");
+ }
+
+ if ($l->{IS_VARYING}) {
+ $self->pidl("NDR_CHECK(ndr_pull_array_length($ndr, " . get_pointer_to($var_name) . "));");
+ }
+
+ my $array_size = $self->ParseArrayPullGetSize($e, $l, $ndr, $var_name, $env);
+ my $array_length = $self->ParseArrayPullGetLength($e, $l, $ndr, $var_name, $env, $array_size);
+
+ if ($array_length ne $array_size) {
+ $self->pidl("if ($array_length > $array_size) {");
+ $self->indent;
+ $self->pidl("return ndr_pull_error($ndr, NDR_ERR_ARRAY_SIZE, \"Bad array size %u should exceed array length %u\", $array_size, $array_length);");
+ $self->deindent;
+ $self->pidl("}");
+ }
+
+ if ($l->{IS_CONFORMANT} and (defined($l->{SIZE_IS}) or not $l->{IS_ZERO_TERMINATED})) {
+ $self->defer("if ($var_name) {");
+ $self->defer_indent;
+ my $size = ParseExprExt($l->{SIZE_IS}, $env, $e->{ORIGINAL},
+ check_null_pointer($e, $env, sub { $self->defer(shift); },
+ "return ndr_pull_error($ndr, NDR_ERR_INVALID_POINTER, \"NULL Pointer for size_is()\");"),
+ check_fully_dereferenced($e, $env));
+ $self->defer("NDR_CHECK(ndr_check_array_size($ndr, (void*)" . get_pointer_to($var_name) . ", $size));");
+ $self->defer_deindent;
+ $self->defer("}");
+ }
+
+ if ($l->{IS_VARYING} and (defined($l->{LENGTH_IS}) or not $l->{IS_ZERO_TERMINATED})) {
+ $self->defer("if ($var_name) {");
+ $self->defer_indent;
+ my $length = ParseExprExt($l->{LENGTH_IS}, $env, $e->{ORIGINAL},
+ check_null_pointer($e, $env, sub { $self->defer(shift); },
+ "return ndr_pull_error($ndr, NDR_ERR_INVALID_POINTER, \"NULL Pointer for length_is()\");"),
+ check_fully_dereferenced($e, $env));
+ $self->defer("NDR_CHECK(ndr_check_array_length($ndr, (void*)" . get_pointer_to($var_name) . ", $length));");
+ $self->defer_deindent;
+ $self->defer("}");
+ }
+
+ if (ArrayDynamicallyAllocated($e,$l) and not is_charset_array($e,$l)) {
+ $self->AllocateArrayLevel($e,$l,$ndr,$var_name,$array_size);
+ }
+
+ return $array_length;
+}
+
+sub compression_alg($$)
+{
+ my ($e, $l) = @_;
+ my ($alg, $clen, $dlen) = split(/,/, $l->{COMPRESSION});
+
+ return $alg;
+}
+
+sub compression_clen($$$)
+{
+ my ($e, $l, $env) = @_;
+ my ($alg, $clen, $dlen) = split(/,/, $l->{COMPRESSION});
+
+ return ParseExpr($clen, $env, $e->{ORIGINAL});
+}
+
+sub compression_dlen($$$)
+{
+ my ($e,$l,$env) = @_;
+ my ($alg, $clen, $dlen) = split(/,/, $l->{COMPRESSION});
+
+ return ParseExpr($dlen, $env, $e->{ORIGINAL});
+}
+
+sub ParseCompressionPushStart($$$$$)
+{
+ my ($self,$e,$l,$ndr,$env) = @_;
+ my $comndr = "$ndr\_compressed";
+ my $alg = compression_alg($e, $l);
+ my $dlen = compression_dlen($e, $l, $env);
+
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("struct ndr_push *$comndr;");
+ $self->pidl("NDR_CHECK(ndr_push_compression_start($ndr, &$comndr, $alg, $dlen));");
+
+ return $comndr;
+}
+
+sub ParseCompressionPushEnd($$$$$)
+{
+ my ($self,$e,$l,$ndr,$env) = @_;
+ my $comndr = "$ndr\_compressed";
+ my $alg = compression_alg($e, $l);
+ my $dlen = compression_dlen($e, $l, $env);
+
+ $self->pidl("NDR_CHECK(ndr_push_compression_end($ndr, $comndr, $alg, $dlen));");
+ $self->deindent;
+ $self->pidl("}");
+}
+
+sub ParseCompressionPullStart($$$$$)
+{
+ my ($self,$e,$l,$ndr,$env) = @_;
+ my $comndr = "$ndr\_compressed";
+ my $alg = compression_alg($e, $l);
+ my $dlen = compression_dlen($e, $l, $env);
+ my $clen = compression_clen($e, $l, $env);
+
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("struct ndr_pull *$comndr;");
+ $self->pidl("NDR_CHECK(ndr_pull_compression_start($ndr, &$comndr, $alg, $dlen, $clen));");
+
+ return $comndr;
+}
+
+sub ParseCompressionPullEnd($$$$$)
+{
+ my ($self,$e,$l,$ndr,$env) = @_;
+ my $comndr = "$ndr\_compressed";
+ my $alg = compression_alg($e, $l);
+ my $dlen = compression_dlen($e, $l, $env);
+
+ $self->pidl("NDR_CHECK(ndr_pull_compression_end($ndr, $comndr, $alg, $dlen));");
+ $self->deindent;
+ $self->pidl("}");
+}
+
+sub ParseSubcontextPushStart($$$$$)
+{
+ my ($self,$e,$l,$ndr,$env) = @_;
+ my $subndr = "_ndr_$e->{NAME}";
+ my $subcontext_size = ParseExpr($l->{SUBCONTEXT_SIZE}, $env, $e->{ORIGINAL});
+
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("struct ndr_push *$subndr;");
+ $self->pidl("NDR_CHECK(ndr_push_subcontext_start($ndr, &$subndr, $l->{HEADER_SIZE}, $subcontext_size));");
+
+ if (defined $l->{COMPRESSION}) {
+ $subndr = $self->ParseCompressionPushStart($e, $l, $subndr, $env);
+ }
+
+ return $subndr;
+}
+
+sub ParseSubcontextPushEnd($$$$$)
+{
+ my ($self,$e,$l,$ndr,$env) = @_;
+ my $subndr = "_ndr_$e->{NAME}";
+ my $subcontext_size = ParseExpr($l->{SUBCONTEXT_SIZE}, $env, $e->{ORIGINAL});
+
+ if (defined $l->{COMPRESSION}) {
+ $self->ParseCompressionPushEnd($e, $l, $subndr, $env);
+ }
+
+ $self->pidl("NDR_CHECK(ndr_push_subcontext_end($ndr, $subndr, $l->{HEADER_SIZE}, $subcontext_size));");
+ $self->deindent;
+ $self->pidl("}");
+}
+
+sub ParseSubcontextPullStart($$$$$)
+{
+ my ($self,$e,$l,$ndr,$env) = @_;
+ my $subndr = "_ndr_$e->{NAME}";
+ my $subcontext_size = ParseExpr($l->{SUBCONTEXT_SIZE}, $env, $e->{ORIGINAL});
+
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("struct ndr_pull *$subndr;");
+ $self->pidl("NDR_CHECK(ndr_pull_subcontext_start($ndr, &$subndr, $l->{HEADER_SIZE}, $subcontext_size));");
+
+ if (defined $l->{COMPRESSION}) {
+ $subndr = $self->ParseCompressionPullStart($e, $l, $subndr, $env);
+ }
+
+ return $subndr;
+}
+
+sub ParseSubcontextPullEnd($$$$$)
+{
+ my ($self,$e,$l,$ndr,$env) = @_;
+ my $subndr = "_ndr_$e->{NAME}";
+ my $subcontext_size = ParseExpr($l->{SUBCONTEXT_SIZE}, $env, $e->{ORIGINAL});
+
+ if (defined $l->{COMPRESSION}) {
+ $self->ParseCompressionPullEnd($e, $l, $subndr, $env);
+ }
+
+ $self->pidl("NDR_CHECK(ndr_pull_subcontext_end($ndr, $subndr, $l->{HEADER_SIZE}, $subcontext_size));");
+ $self->deindent;
+ $self->pidl("}");
+}
+
+sub ParseElementPushLevel
+{
+ my ($self,$e,$l,$ndr,$var_name,$env,$primitives,$deferred) = @_;
+
+ my $ndr_flags = CalcNdrFlags($l, $primitives, $deferred);
+
+ if ($l->{TYPE} eq "ARRAY" and ($l->{IS_CONFORMANT} or $l->{IS_VARYING})) {
+ $var_name = get_pointer_to($var_name);
+ }
+
+ if (defined($ndr_flags)) {
+ if ($l->{TYPE} eq "SUBCONTEXT") {
+ my $subndr = $self->ParseSubcontextPushStart($e, $l, $ndr, $env);
+ $self->ParseElementPushLevel($e, GetNextLevel($e, $l), $subndr, $var_name, $env, 1, 1);
+ $self->ParseSubcontextPushEnd($e, $l, $ndr, $env);
+ } elsif ($l->{TYPE} eq "POINTER") {
+ $self->ParsePtrPush($e, $l, $ndr, $var_name);
+ } elsif ($l->{TYPE} eq "ARRAY") {
+ my $length = $self->ParseArrayPushHeader($e, $l, $ndr, $var_name, $env);
+
+ my $nl = GetNextLevel($e, $l);
+
+ # Allow speedups for arrays of scalar types
+ if (is_charset_array($e,$l)) {
+ if ($l->{IS_TO_NULL}) {
+ $self->pidl("NDR_CHECK(ndr_push_charset_to_null($ndr, $ndr_flags, $var_name, $length, sizeof(" . mapTypeName($nl->{DATA_TYPE}) . "), CH_$e->{PROPERTIES}->{charset}));");
+ } else {
+ $self->pidl("NDR_CHECK(ndr_push_charset($ndr, $ndr_flags, $var_name, $length, sizeof(" . mapTypeName($nl->{DATA_TYPE}) . "), CH_$e->{PROPERTIES}->{charset}));");
+ }
+ return;
+ } elsif (has_fast_array($e,$l)) {
+ $self->pidl("NDR_CHECK(ndr_push_array_$nl->{DATA_TYPE}($ndr, $ndr_flags, $var_name, $length));");
+ return;
+ }
+ } elsif ($l->{TYPE} eq "SWITCH") {
+ $self->ParseSwitchPush($e, $l, $ndr, $var_name, $env);
+ } elsif ($l->{TYPE} eq "DATA") {
+ $self->ParseDataPush($e, $l, $ndr, $var_name, $primitives, $deferred);
+ } elsif ($l->{TYPE} eq "TYPEDEF") {
+ $typefamily{$e->{DATA}->{TYPE}}->{PUSH_FN_BODY}->($self, $e->{DATA}, $ndr, $var_name);
+ }
+ }
+
+ if ($l->{TYPE} eq "POINTER" and $l->{POINTER_TYPE} eq "ignore") {
+ $self->pidl("/* [ignore] '$e->{NAME}' */");
+ } elsif ($l->{TYPE} eq "POINTER" and $deferred) {
+ my $rel_var_name = $var_name;
+ if ($l->{POINTER_TYPE} ne "ref") {
+ $self->pidl("if ($var_name) {");
+ $self->indent;
+ if ($l->{POINTER_TYPE} eq "relative") {
+ $self->pidl("NDR_CHECK(ndr_push_relative_ptr2_start($ndr, $rel_var_name));");
+ }
+ if ($l->{POINTER_TYPE} eq "relative_short") {
+ $self->pidl("NDR_CHECK(ndr_push_short_relative_ptr2($ndr, $var_name));");
+ }
+ }
+ $var_name = get_value_of($var_name);
+ $self->ParseElementPushLevel($e, GetNextLevel($e, $l), $ndr, $var_name, $env, 1, 1);
+
+ if ($l->{POINTER_TYPE} ne "ref") {
+ if ($l->{POINTER_TYPE} eq "relative") {
+ $self->pidl("NDR_CHECK(ndr_push_relative_ptr2_end($ndr, $rel_var_name));");
+ }
+ $self->deindent;
+ $self->pidl("}");
+ }
+ } elsif ($l->{TYPE} eq "ARRAY" and not has_fast_array($e,$l) and
+ not is_charset_array($e, $l)) {
+ my $length = ParseExpr($l->{LENGTH_IS}, $env, $e->{ORIGINAL});
+ my $counter = "cntr_$e->{NAME}_$l->{LEVEL_INDEX}";
+
+ my $array_pointless = ($length eq "0");
+
+ if ($array_pointless) {
+ warning($e->{ORIGINAL}, "pointless array `$e->{NAME}' will always have size 0");
+ }
+
+ $var_name = get_array_element($var_name, $counter);
+
+ if ((($primitives and not $l->{IS_DEFERRED}) or ($deferred and $l->{IS_DEFERRED})) and not $array_pointless) {
+ $self->pidl("for ($counter = 0; $counter < ($length); $counter++) {");
+ $self->indent;
+ $self->ParseElementPushLevel($e, GetNextLevel($e, $l), $ndr, $var_name, $env, 1, 0);
+ $self->deindent;
+ $self->pidl("}");
+ }
+
+ if ($deferred and ContainsDeferred($e, $l) and not $array_pointless) {
+ $self->pidl("for ($counter = 0; $counter < ($length); $counter++) {");
+ $self->indent;
+ $self->ParseElementPushLevel($e, GetNextLevel($e, $l), $ndr, $var_name, $env, 0, 1);
+ $self->deindent;
+ $self->pidl("}");
+ }
+ } elsif ($l->{TYPE} eq "SWITCH") {
+ $self->ParseElementPushLevel($e, GetNextLevel($e, $l), $ndr, $var_name, $env, $primitives, $deferred);
+ }
+}
+
+#####################################################################
+# parse scalars in a structure element
+sub ParseElementPush($$$$$$)
+{
+ my ($self,$e,$ndr,$env,$primitives,$deferred) = @_;
+ my $subndr = undef;
+
+ my $var_name = $env->{$e->{NAME}};
+
+ if (has_property($e, "skip") or has_property($e, "skip_noinit")) {
+ $self->pidl("/* [skip] '$var_name' */");
+ return;
+ }
+
+ return if ContainsPipe($e, $e->{LEVELS}[0]);
+
+ return unless $primitives or ($deferred and ContainsDeferred($e, $e->{LEVELS}[0]));
+
+ # Representation type is different from transmit_as
+ if ($e->{REPRESENTATION_TYPE} ne $e->{TYPE}) {
+ $self->pidl("{");
+ $self->indent;
+ my $transmit_name = "_transmit_$e->{NAME}";
+ $self->pidl(mapTypeName($e->{TYPE}) ." $transmit_name;");
+ $self->pidl("NDR_CHECK(ndr_$e->{REPRESENTATION_TYPE}_to_$e->{TYPE}($var_name, " . get_pointer_to($transmit_name) . "));");
+ $var_name = $transmit_name;
+ }
+
+ $var_name = append_prefix($e, $var_name);
+
+ $self->start_flags($e, $ndr);
+
+ if (defined(my $value = has_property($e, "value"))) {
+ $var_name = ParseExpr($value, $env, $e->{ORIGINAL});
+ }
+
+ $self->ParseElementPushLevel($e, $e->{LEVELS}[0], $ndr, $var_name, $env, $primitives, $deferred);
+
+ $self->end_flags($e, $ndr);
+
+ if ($e->{REPRESENTATION_TYPE} ne $e->{TYPE}) {
+ $self->deindent;
+ $self->pidl("}");
+ }
+}
+
+#####################################################################
+# parse a pointer in a struct element or function
+sub ParsePtrPush($$$$$)
+{
+ my ($self,$e,$l,$ndr,$var_name) = @_;
+
+ if ($l->{POINTER_TYPE} eq "ref") {
+ if ($l->{LEVEL_INDEX} > 0) {
+ $self->pidl("if ($var_name == NULL) {");
+ $self->indent;
+ $self->pidl("return ndr_push_error($ndr, NDR_ERR_INVALID_POINTER, \"NULL [ref] pointer\");");
+ $self->deindent;
+ $self->pidl("}");
+ }
+ if ($l->{LEVEL} eq "EMBEDDED") {
+ $self->pidl("NDR_CHECK(ndr_push_ref_ptr(ndr)); /* $var_name */");
+ }
+ } elsif ($l->{POINTER_TYPE} eq "relative") {
+ $self->pidl("NDR_CHECK(ndr_push_relative_ptr1($ndr, $var_name));");
+ } elsif ($l->{POINTER_TYPE} eq "relative_short") {
+ $self->pidl("NDR_CHECK(ndr_push_short_relative_ptr1($ndr, $var_name));");
+ } elsif ($l->{POINTER_TYPE} eq "unique") {
+ $self->pidl("NDR_CHECK(ndr_push_unique_ptr($ndr, $var_name));");
+ } elsif ($l->{POINTER_TYPE} eq "full") {
+ $self->pidl("NDR_CHECK(ndr_push_full_ptr($ndr, $var_name));");
+ } elsif ($l->{POINTER_TYPE} eq "ignore") {
+ # We don't want this pointer to appear on the wire at all
+ $self->pidl("NDR_CHECK(ndr_push_uint3264(ndr, NDR_SCALARS, 0));");
+ } else {
+ die("Unhandled pointer type $l->{POINTER_TYPE}");
+ }
+}
+
+sub need_pointer_to($$$)
+{
+ my ($e, $l, $scalar_only) = @_;
+
+ my $t;
+ if (ref($l->{DATA_TYPE})) {
+ $t = "$l->{DATA_TYPE}->{TYPE}_$l->{DATA_TYPE}->{NAME}";
+ } else {
+ $t = $l->{DATA_TYPE};
+ }
+
+ if (not Parse::Pidl::Typelist::is_scalar($t)) {
+ return 1 if $scalar_only;
+ }
+
+ my $arrays = 0;
+
+ foreach my $tl (@{$e->{LEVELS}}) {
+ last if $l == $tl;
+ if ($tl->{TYPE} eq "ARRAY") {
+ $arrays++;
+ }
+ }
+
+ if (Parse::Pidl::Typelist::scalar_is_reference($t)) {
+ return 1 unless $arrays;
+ }
+
+ return 0;
+}
+
+sub ParseDataPrint($$$$$)
+{
+ my ($self, $e, $l, $ndr, $var_name) = @_;
+
+ if (not ref($l->{DATA_TYPE}) or defined($l->{DATA_TYPE}->{NAME})) {
+
+ if (need_pointer_to($e, $l, 1)) {
+ $var_name = get_pointer_to($var_name);
+ }
+
+ $self->pidl(TypeFunctionName("ndr_print", $l->{DATA_TYPE})."($ndr, \"$e->{NAME}\", $var_name);");
+ } else {
+ $self->ParseTypePrint($l->{DATA_TYPE}, $ndr, $var_name);
+ }
+}
+
+#####################################################################
+# print scalars in a structure element
+sub ParseElementPrint($$$$$)
+{
+ my($self, $e, $ndr, $var_name, $env) = @_;
+
+ return if (has_property($e, "noprint"));
+ my $cur_depth = 0;
+ my $ignore_depth = 0xFFFF;
+
+ $self->start_flags($e, $ndr);
+ if ($e->{REPRESENTATION_TYPE} ne $e->{TYPE}) {
+ $self->pidl("ndr_print_$e->{REPRESENTATION_TYPE}($ndr, \"$e->{NAME}\", $var_name);");
+ $self->end_flags($e, $ndr);
+ return;
+ }
+
+ $var_name = append_prefix($e, $var_name);
+
+ if (defined(my $value = has_property($e, "value"))) {
+ $var_name = "($ndr->flags & LIBNDR_PRINT_SET_VALUES)?" . ParseExpr($value,$env, $e->{ORIGINAL}) . ":$var_name";
+ }
+
+ foreach my $l (@{$e->{LEVELS}}) {
+ $cur_depth += 1;
+
+ if ($cur_depth > $ignore_depth) {
+ next;
+ }
+
+ if ($l->{TYPE} eq "POINTER") {
+ $self->pidl("ndr_print_ptr($ndr, \"$e->{NAME}\", $var_name);");
+ if ($l->{POINTER_TYPE} eq "ignore") {
+ $self->pidl("/* [ignore] '$e->{NAME}' */");
+ $ignore_depth = $cur_depth;
+ last;
+ }
+ $self->pidl("$ndr->depth++;");
+ if ($l->{POINTER_TYPE} ne "ref") {
+ $self->pidl("if ($var_name) {");
+ $self->indent;
+ }
+ $var_name = get_value_of($var_name);
+ } elsif ($l->{TYPE} eq "ARRAY") {
+ my $length;
+
+ if ($l->{IS_CONFORMANT} or $l->{IS_VARYING}) {
+ $var_name = get_pointer_to($var_name);
+ }
+
+ if ($l->{IS_ZERO_TERMINATED} and not defined($l->{LENGTH_IS})) {
+ $length = "ndr_string_length($var_name, sizeof(*$var_name))";
+ } else {
+ $length = ParseExprExt($l->{LENGTH_IS}, $env, $e->{ORIGINAL},
+ check_null_pointer($e, $env, sub { $self->pidl(shift); }, "return;"), check_fully_dereferenced($e, $env));
+ }
+
+ if (is_charset_array($e,$l)) {
+ $self->pidl("ndr_print_string($ndr, \"$e->{NAME}\", $var_name);");
+ last;
+ } elsif (has_fast_array($e, $l)) {
+ my $nl = GetNextLevel($e, $l);
+ $self->pidl("ndr_print_array_$nl->{DATA_TYPE}($ndr, \"$e->{NAME}\", $var_name, $length);");
+ last;
+ } else {
+ my $counter = "cntr_$e->{NAME}_$l->{LEVEL_INDEX}";
+
+ $self->pidl("$ndr->print($ndr, \"\%s: ARRAY(\%d)\", \"$e->{NAME}\", (int)$length);");
+ $self->pidl("$ndr->depth++;");
+ $self->pidl("for ($counter = 0; $counter < ($length); $counter++) {");
+ $self->indent;
+
+ $var_name = get_array_element($var_name, $counter);
+ }
+ } elsif ($l->{TYPE} eq "DATA") {
+ $self->ParseDataPrint($e, $l, $ndr, $var_name);
+ } elsif ($l->{TYPE} eq "SWITCH") {
+ my $switch_var = ParseExprExt($l->{SWITCH_IS}, $env, $e->{ORIGINAL},
+ check_null_pointer($e, $env, sub { $self->pidl(shift); }, "return;"), check_fully_dereferenced($e, $env));
+ $self->pidl("ndr_print_set_switch_value($ndr, " . get_pointer_to($var_name) . ", $switch_var);");
+ }
+ }
+
+ foreach my $l (reverse @{$e->{LEVELS}}) {
+ $cur_depth -= 1;
+
+ if ($cur_depth > $ignore_depth) {
+ next;
+ }
+
+ if ($l->{TYPE} eq "POINTER") {
+ if ($l->{POINTER_TYPE} eq "ignore") {
+ next;
+ }
+
+ if ($l->{POINTER_TYPE} ne "ref") {
+ $self->deindent;
+ $self->pidl("}");
+ }
+ $self->pidl("$ndr->depth--;");
+ } elsif (($l->{TYPE} eq "ARRAY")
+ and not is_charset_array($e,$l)
+ and not has_fast_array($e,$l)) {
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("$ndr->depth--;");
+ }
+ }
+
+ $self->end_flags($e, $ndr);
+}
+
+#####################################################################
+# parse scalars in a structure element - pull size
+sub ParseSwitchPull($$$$$$)
+{
+ my($self,$e,$l,$ndr,$var_name,$env) = @_;
+ my $switch_var = ParseExprExt($l->{SWITCH_IS}, $env, $e->{ORIGINAL},
+ check_null_pointer($e, $env, sub { $self->pidl(shift); },
+ "return ndr_pull_error($ndr, NDR_ERR_INVALID_POINTER, \"NULL Pointer for switch_is()\");"),
+ check_fully_dereferenced($e, $env));
+
+ $var_name = get_pointer_to($var_name);
+ $self->pidl("NDR_CHECK(ndr_pull_set_switch_value($ndr, $var_name, $switch_var));");
+}
+
+#####################################################################
+# push switch element
+sub ParseSwitchPush($$$$$$)
+{
+ my($self,$e,$l,$ndr,$var_name,$env) = @_;
+ my $switch_var = ParseExprExt($l->{SWITCH_IS}, $env, $e->{ORIGINAL},
+ check_null_pointer($e, $env, sub { $self->pidl(shift); },
+ "return ndr_push_error($ndr, NDR_ERR_INVALID_POINTER, \"NULL Pointer for switch_is()\");"),
+ check_fully_dereferenced($e, $env));
+
+ $var_name = get_pointer_to($var_name);
+ $self->pidl("NDR_CHECK(ndr_push_set_switch_value($ndr, $var_name, $switch_var));");
+}
+
+sub ParseDataPull($$$$$$$)
+{
+ my ($self,$e,$l,$ndr,$var_name,$primitives,$deferred) = @_;
+
+ if (not ref($l->{DATA_TYPE}) or defined($l->{DATA_TYPE}->{NAME})) {
+
+ my $ndr_flags = CalcNdrFlags($l, $primitives, $deferred);
+
+ if (need_pointer_to($e, $l, 0)) {
+ $var_name = get_pointer_to($var_name);
+ }
+
+ $var_name = get_pointer_to($var_name);
+
+ $self->pidl("NDR_CHECK(".TypeFunctionName("ndr_pull", $l->{DATA_TYPE})."($ndr, $ndr_flags, $var_name));");
+
+ my $pl = GetPrevLevel($e, $l);
+
+ my $range = has_property($e, "range");
+ if ($range and $pl->{TYPE} ne "ARRAY") {
+ $var_name = get_value_of($var_name);
+ my $signed = Parse::Pidl::Typelist::is_signed($l->{DATA_TYPE});
+ my ($low, $high) = split(/,/, $range, 2);
+ if ($low < 0 and not $signed) {
+ warning(0, "$low is invalid for the range of an unsigned type");
+ }
+ if ($low == 0 and not $signed) {
+ $self->pidl("if ($var_name > $high) {");
+ } else {
+ $self->pidl("if ($var_name < $low || $var_name > $high) {");
+ }
+ $self->pidl("\treturn ndr_pull_error($ndr, NDR_ERR_RANGE, \"value out of range\");");
+ $self->pidl("}");
+ }
+ } else {
+ $self->ParseTypePull($l->{DATA_TYPE}, $ndr, $var_name, $primitives, $deferred);
+ }
+}
+
+sub ParseDataPush($$$$$$$)
+{
+ my ($self,$e,$l,$ndr,$var_name,$primitives,$deferred) = @_;
+
+ if (not ref($l->{DATA_TYPE}) or defined($l->{DATA_TYPE}->{NAME})) {
+
+ my $ndr_flags = CalcNdrFlags($l, $primitives, $deferred);
+
+ # strings are passed by value rather than reference
+ if (need_pointer_to($e, $l, 1)) {
+ $var_name = get_pointer_to($var_name);
+ }
+
+ $self->pidl("NDR_CHECK(".TypeFunctionName("ndr_push", $l->{DATA_TYPE})."($ndr, $ndr_flags, $var_name));");
+ } else {
+ $self->ParseTypePush($l->{DATA_TYPE}, $ndr, $var_name, $primitives, $deferred);
+ }
+}
+
+sub CalcNdrFlags($$$)
+{
+ my ($l,$primitives,$deferred) = @_;
+
+ my $scalars = 0;
+ my $buffers = 0;
+
+ # Add NDR_SCALARS if this one is deferred
+ # and deferreds may be pushed
+ $scalars = 1 if ($l->{IS_DEFERRED} and $deferred);
+
+ # Add NDR_SCALARS if this one is not deferred and
+ # primitives may be pushed
+ $scalars = 1 if (!$l->{IS_DEFERRED} and $primitives);
+
+ # Add NDR_BUFFERS if this one contains deferred stuff
+ # and deferreds may be pushed
+ $buffers = 1 if ($l->{CONTAINS_DEFERRED} and $deferred);
+
+ return "NDR_SCALARS|NDR_BUFFERS" if ($scalars and $buffers);
+ return "NDR_SCALARS" if ($scalars);
+ return "NDR_BUFFERS" if ($buffers);
+ return undef;
+}
+
+sub ParseMemCtxPullFlags($$$$)
+{
+ my ($self, $e, $l) = @_;
+
+ return undef unless ($l->{TYPE} eq "POINTER" or $l->{TYPE} eq "ARRAY");
+ return undef if (($l->{TYPE} eq "POINTER") and ($l->{POINTER_TYPE} eq "ignore"));
+
+ return undef unless ($l->{TYPE} ne "ARRAY" or ArrayDynamicallyAllocated($e,$l));
+ return undef if has_fast_array($e, $l);
+ return undef if is_charset_array($e, $l);
+
+ my $mem_flags = "0";
+
+ if (($l->{TYPE} eq "POINTER") and ($l->{POINTER_TYPE} eq "ref")) {
+ my $nl = GetNextLevel($e, $l);
+ return undef if ($nl->{TYPE} eq "PIPE");
+ return undef if ($nl->{TYPE} eq "ARRAY");
+ return undef if (($nl->{TYPE} eq "DATA") and ($nl->{DATA_TYPE} eq "string"));
+
+ if ($l->{LEVEL} eq "TOP") {
+ $mem_flags = "LIBNDR_FLAG_REF_ALLOC";
+ }
+ }
+
+ return $mem_flags;
+}
+
+sub ParseMemCtxPullStart($$$$$)
+{
+ my ($self, $e, $l, $ndr, $ptr_name) = @_;
+
+ my $mem_r_ctx = "_mem_save_$e->{NAME}_$l->{LEVEL_INDEX}";
+ my $mem_c_ctx = $ptr_name;
+ my $mem_c_flags = $self->ParseMemCtxPullFlags($e, $l);
+
+ return unless defined($mem_c_flags);
+
+ $self->pidl("$mem_r_ctx = NDR_PULL_GET_MEM_CTX($ndr);");
+ $self->pidl("NDR_PULL_SET_MEM_CTX($ndr, $mem_c_ctx, $mem_c_flags);");
+}
+
+sub ParseMemCtxPullEnd($$$$)
+{
+ my ($self, $e, $l, $ndr) = @_;
+
+ my $mem_r_ctx = "_mem_save_$e->{NAME}_$l->{LEVEL_INDEX}";
+ my $mem_r_flags = $self->ParseMemCtxPullFlags($e, $l);
+
+ return unless defined($mem_r_flags);
+
+ $self->pidl("NDR_PULL_SET_MEM_CTX($ndr, $mem_r_ctx, $mem_r_flags);");
+}
+
+sub CheckStringTerminator($$$$$)
+{
+ my ($self,$ndr,$e,$l,$length) = @_;
+ my $nl = GetNextLevel($e, $l);
+
+ # Make sure last element is zero!
+ $self->pidl("NDR_CHECK(ndr_check_string_terminator($ndr, $length, sizeof($nl->{DATA_TYPE}_t)));");
+}
+
+sub ParseElementPullLevel
+{
+ my($self,$e,$l,$ndr,$var_name,$env,$primitives,$deferred) = @_;
+
+ my $ndr_flags = CalcNdrFlags($l, $primitives, $deferred);
+ my $array_length = undef;
+
+ if (has_property($e, "skip") or has_property($e, "skip_noinit")) {
+ $self->pidl("/* [skip] '$var_name' */");
+ if (not has_property($e, "skip_noinit")) {
+ $self->pidl("ZERO_STRUCT($var_name);");
+ }
+ return;
+ }
+
+ if ($l->{TYPE} eq "ARRAY" and ($l->{IS_VARYING} or $l->{IS_CONFORMANT})) {
+ $var_name = get_pointer_to($var_name);
+ }
+
+ # Only pull something if there's actually something to be pulled
+ if (defined($ndr_flags)) {
+ if ($l->{TYPE} eq "SUBCONTEXT") {
+ my $subndr = $self->ParseSubcontextPullStart($e, $l, $ndr, $env);
+ $self->ParseElementPullLevel($e, GetNextLevel($e,$l), $subndr, $var_name, $env, 1, 1);
+ $self->ParseSubcontextPullEnd($e, $l, $ndr, $env);
+ } elsif ($l->{TYPE} eq "ARRAY") {
+ my $length = $self->ParseArrayPullHeader($e, $l, $ndr, $var_name, $env);
+ $array_length = $length;
+
+ my $nl = GetNextLevel($e, $l);
+
+ if (is_charset_array($e,$l)) {
+ if ($l->{IS_ZERO_TERMINATED}) {
+ $self->CheckStringTerminator($ndr, $e, $l, $length);
+ }
+ if ($l->{IS_TO_NULL}) {
+ $self->pidl("NDR_CHECK(ndr_pull_charset_to_null($ndr, $ndr_flags, ".get_pointer_to($var_name).", $length, sizeof(" . mapTypeName($nl->{DATA_TYPE}) . "), CH_$e->{PROPERTIES}->{charset}));");
+ } else {
+ $self->pidl("NDR_CHECK(ndr_pull_charset($ndr, $ndr_flags, ".get_pointer_to($var_name).", $length, sizeof(" . mapTypeName($nl->{DATA_TYPE}) . "), CH_$e->{PROPERTIES}->{charset}));");
+ }
+ return;
+ } elsif (has_fast_array($e, $l)) {
+ if ($l->{IS_ZERO_TERMINATED}) {
+ $self->CheckStringTerminator($ndr,$e,$l,$length);
+ }
+ $self->pidl("NDR_CHECK(ndr_pull_array_$nl->{DATA_TYPE}($ndr, $ndr_flags, $var_name, $length));");
+ return;
+ }
+ } elsif ($l->{TYPE} eq "POINTER") {
+ $self->ParsePtrPull($e, $l, $ndr, $var_name);
+ } elsif ($l->{TYPE} eq "SWITCH") {
+ $self->ParseSwitchPull($e, $l, $ndr, $var_name, $env);
+ } elsif ($l->{TYPE} eq "DATA") {
+ $self->ParseDataPull($e, $l, $ndr, $var_name, $primitives, $deferred);
+ } elsif ($l->{TYPE} eq "TYPEDEF") {
+ $typefamily{$e->{DATA}->{TYPE}}->{PULL_FN_BODY}->($self, $e->{DATA}, $ndr, $var_name);
+ }
+ }
+
+ # add additional constructions
+ if ($l->{TYPE} eq "POINTER" and $l->{POINTER_TYPE} eq "ignore") {
+ $self->pidl("/* [ignore] '$e->{NAME}' */");
+ } elsif ($l->{TYPE} eq "POINTER" and $deferred) {
+ if ($l->{POINTER_TYPE} ne "ref") {
+ $self->pidl("if ($var_name) {");
+ $self->indent;
+
+ if ($l->{POINTER_TYPE} eq "relative" or $l->{POINTER_TYPE} eq "relative_short") {
+ $self->pidl("uint32_t _relative_save_offset;");
+ $self->pidl("_relative_save_offset = $ndr->offset;");
+ $self->pidl("NDR_CHECK(ndr_pull_relative_ptr2($ndr, $var_name));");
+ }
+ }
+
+ $self->ParseMemCtxPullStart($e, $l, $ndr, $var_name);
+
+ $var_name = get_value_of($var_name);
+ $self->ParseElementPullLevel($e, GetNextLevel($e,$l), $ndr, $var_name, $env, 1, 1);
+
+ $self->ParseMemCtxPullEnd($e, $l, $ndr);
+
+ if ($l->{POINTER_TYPE} ne "ref") {
+ if ($l->{POINTER_TYPE} eq "relative" or $l->{POINTER_TYPE} eq "relative_short") {
+ $self->pidl("if ($ndr->offset > $ndr->relative_highest_offset) {");
+ $self->indent;
+ $self->pidl("$ndr->relative_highest_offset = $ndr->offset;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("$ndr->offset = _relative_save_offset;");
+ }
+ $self->deindent;
+ $self->pidl("}");
+ }
+ } elsif ($l->{TYPE} eq "ARRAY" and
+ not has_fast_array($e,$l) and not is_charset_array($e, $l)) {
+ my $length = $array_length;
+ my $counter = "cntr_$e->{NAME}_$l->{LEVEL_INDEX}";
+ my $array_name = $var_name;
+
+ if (not defined($length)) {
+ $length = $self->ParseArrayPullGetLength($e, $l, $ndr, $var_name, $env);
+ }
+
+ $var_name = get_array_element($var_name, $counter);
+
+ $self->ParseMemCtxPullStart($e, $l, $ndr, $array_name);
+
+ if (($primitives and not $l->{IS_DEFERRED}) or ($deferred and $l->{IS_DEFERRED})) {
+ my $nl = GetNextLevel($e,$l);
+
+ if ($l->{IS_ZERO_TERMINATED}) {
+ $self->CheckStringTerminator($ndr,$e,$l,$length);
+ }
+
+ $self->pidl("for ($counter = 0; $counter < ($length); $counter++) {");
+ $self->indent;
+ $self->ParseElementPullLevel($e, $nl, $ndr, $var_name, $env, 1, 0);
+ $self->deindent;
+ $self->pidl("}");
+ }
+
+ if ($deferred and ContainsDeferred($e, $l)) {
+ $self->pidl("for ($counter = 0; $counter < ($length); $counter++) {");
+ $self->indent;
+ $self->ParseElementPullLevel($e,GetNextLevel($e,$l), $ndr, $var_name, $env, 0, 1);
+ $self->deindent;
+ $self->pidl("}");
+ }
+
+ $self->ParseMemCtxPullEnd($e, $l, $ndr);
+
+ } elsif ($l->{TYPE} eq "SWITCH") {
+ $self->ParseElementPullLevel($e, GetNextLevel($e,$l), $ndr, $var_name, $env, $primitives, $deferred);
+ }
+}
+
+#####################################################################
+# parse scalars in a structure element - pull size
+sub ParseElementPull($$$$$$)
+{
+ my($self,$e,$ndr,$env,$primitives,$deferred) = @_;
+
+ my $var_name = $env->{$e->{NAME}};
+ my $represent_name;
+ my $transmit_name;
+
+ return if ContainsPipe($e, $e->{LEVELS}[0]);
+
+ return unless $primitives or ($deferred and ContainsDeferred($e, $e->{LEVELS}[0]));
+
+ if ($e->{REPRESENTATION_TYPE} ne $e->{TYPE}) {
+ $self->pidl("{");
+ $self->indent;
+ $represent_name = $var_name;
+ $transmit_name = "_transmit_$e->{NAME}";
+ $var_name = $transmit_name;
+ $self->pidl(mapTypeName($e->{TYPE})." $var_name;");
+ }
+
+ $var_name = append_prefix($e, $var_name);
+
+ $self->start_flags($e, $ndr);
+
+ $self->ParseElementPullLevel($e,$e->{LEVELS}[0],$ndr,$var_name,$env,$primitives,$deferred);
+
+ $self->end_flags($e, $ndr);
+
+ # Representation type is different from transmit_as
+ if ($e->{REPRESENTATION_TYPE} ne $e->{TYPE}) {
+ $self->pidl("NDR_CHECK(ndr_$e->{TYPE}_to_$e->{REPRESENTATION_TYPE}($transmit_name, ".get_pointer_to($represent_name)."));");
+ $self->deindent;
+ $self->pidl("}");
+ }
+}
+
+#####################################################################
+# parse a pointer in a struct element or function
+sub ParsePtrPull($$$$$)
+{
+ my($self, $e,$l,$ndr,$var_name) = @_;
+
+ my $nl = GetNextLevel($e, $l);
+ my $next_is_array = ($nl->{TYPE} eq "ARRAY");
+ my $next_is_string = (($nl->{TYPE} eq "DATA") and
+ ($nl->{DATA_TYPE} eq "string"));
+
+ if ($l->{POINTER_TYPE} eq "ref" and $l->{LEVEL} eq "TOP") {
+
+ if (!$next_is_array and !$next_is_string) {
+ $self->pidl("if ($ndr->flags & LIBNDR_FLAG_REF_ALLOC) {");
+ $self->pidl("\tNDR_PULL_ALLOC($ndr, $var_name);");
+ $self->pidl("}");
+ }
+
+ return;
+ } elsif ($l->{POINTER_TYPE} eq "ref" and $l->{LEVEL} eq "EMBEDDED") {
+ $self->pidl("NDR_CHECK(ndr_pull_ref_ptr($ndr, &_ptr_$e->{NAME}));");
+ } elsif (($l->{POINTER_TYPE} eq "unique") or
+ ($l->{POINTER_TYPE} eq "relative") or
+ ($l->{POINTER_TYPE} eq "full")) {
+ $self->pidl("NDR_CHECK(ndr_pull_generic_ptr($ndr, &_ptr_$e->{NAME}));");
+ } elsif ($l->{POINTER_TYPE} eq "relative_short") {
+ $self->pidl("NDR_CHECK(ndr_pull_relative_ptr_short($ndr, &_ptr_$e->{NAME}));");
+ } elsif ($l->{POINTER_TYPE} eq "ignore") {
+ #We want to consume the pointer bytes, but ignore the pointer value
+ $self->pidl("NDR_CHECK(ndr_pull_uint3264(ndr, NDR_SCALARS, &_ptr_$e->{NAME}));");
+ $self->pidl("_ptr_$e->{NAME} = 0;");
+ } else {
+ die("Unhandled pointer type $l->{POINTER_TYPE}");
+ }
+
+ $self->pidl("if (_ptr_$e->{NAME}) {");
+ $self->indent;
+
+ if ($l->{POINTER_TYPE} eq "ignore") {
+ # Don't do anything, we don't want to do the
+ # allocation, as we forced it to NULL just above, and
+ # we may not know the declared type anyway.
+ } else {
+ # Don't do this for arrays, they're allocated at the actual level
+ # of the array
+ unless ($next_is_array or $next_is_string) {
+ $self->pidl("NDR_PULL_ALLOC($ndr, $var_name);");
+ } else {
+ # FIXME: Yes, this is nasty.
+ # We allocate an array twice
+ # - once just to indicate that it's there,
+ # - then the real allocation...
+ $self->pidl("NDR_PULL_ALLOC($ndr, $var_name);");
+ }
+ }
+
+ #$self->pidl("memset($var_name, 0, sizeof($var_name));");
+ if ($l->{POINTER_TYPE} eq "relative" or $l->{POINTER_TYPE} eq "relative_short") {
+ $self->pidl("NDR_CHECK(ndr_pull_relative_ptr1($ndr, $var_name, _ptr_$e->{NAME}));");
+ }
+ $self->deindent;
+ $self->pidl("} else {");
+ $self->pidl("\t$var_name = NULL;");
+ $self->pidl("}");
+}
+
+sub CheckRefPtrs($$$$)
+{
+ my ($self,$e,$ndr,$env) = @_;
+
+ return if ContainsPipe($e, $e->{LEVELS}[0]);
+ return if ($e->{LEVELS}[0]->{TYPE} ne "POINTER");
+ return if ($e->{LEVELS}[0]->{POINTER_TYPE} ne "ref");
+
+ my $var_name = $env->{$e->{NAME}};
+ $var_name = append_prefix($e, $var_name);
+
+ $self->pidl("if ($var_name == NULL) {");
+ $self->indent;
+ $self->pidl("return ndr_push_error($ndr, NDR_ERR_INVALID_POINTER, \"NULL [ref] pointer\");");
+ $self->deindent;
+ $self->pidl("}");
+}
+
+sub ParseStructPushPrimitives($$$$$)
+{
+ my ($self, $struct, $ndr, $varname, $env) = @_;
+
+ $self->CheckRefPtrs($_, $ndr, $env) foreach (@{$struct->{ELEMENTS}});
+
+ # see if the structure contains a conformant array. If it
+ # does, then it must be the last element of the structure, and
+ # we need to push the conformant length early, as it fits on
+ # the wire before the structure (and even before the structure
+ # alignment)
+ if (defined($struct->{SURROUNDING_ELEMENT})) {
+ my $e = $struct->{SURROUNDING_ELEMENT};
+
+ if (defined($e->{LEVELS}[0]) and
+ $e->{LEVELS}[0]->{TYPE} eq "ARRAY") {
+ my $size;
+
+ if ($e->{LEVELS}[0]->{IS_ZERO_TERMINATED}) {
+ if (has_property($e, "charset")) {
+ $size = "ndr_charset_length($varname->$e->{NAME}, CH_$e->{PROPERTIES}->{charset})";
+ } else {
+ $size = "ndr_string_length($varname->$e->{NAME}, sizeof(*$varname->$e->{NAME}))";
+ }
+ if (defined($e->{LEVELS}[0]->{SIZE_IS})) {
+ $size = ParseExpr($e->{LEVELS}[0]->{SIZE_IS}, $env, $e->{ORIGINAL});
+ }
+ } else {
+ $size = ParseExpr($e->{LEVELS}[0]->{SIZE_IS}, $env, $e->{ORIGINAL});
+ }
+
+ $self->pidl("NDR_CHECK(ndr_push_uint3264($ndr, NDR_SCALARS, $size));");
+ } else {
+ $self->pidl("NDR_CHECK(ndr_push_uint3264($ndr, NDR_SCALARS, ndr_string_array_size($ndr, $varname->$e->{NAME})));");
+ }
+ }
+
+ $self->pidl("NDR_CHECK(ndr_push_align($ndr, $struct->{ALIGN}));");
+
+ if (defined($struct->{PROPERTIES}{relative_base})) {
+ # set the current offset as base for relative pointers
+ # and store it based on the toplevel struct/union
+ $self->pidl("NDR_CHECK(ndr_push_setup_relative_base_offset1($ndr, $varname, $ndr->offset));");
+ }
+
+ $self->ParseElementPush($_, $ndr, $env, 1, 0) foreach (@{$struct->{ELEMENTS}});
+
+ $self->pidl("NDR_CHECK(ndr_push_trailer_align($ndr, $struct->{ALIGN}));");
+}
+
+sub ParseStructPushDeferred($$$$)
+{
+ my ($self, $struct, $ndr, $varname, $env) = @_;
+ if (defined($struct->{PROPERTIES}{relative_base})) {
+ # retrieve the current offset as base for relative pointers
+ # based on the toplevel struct/union
+ $self->pidl("NDR_CHECK(ndr_push_setup_relative_base_offset2($ndr, $varname));");
+ }
+ $self->ParseElementPush($_, $ndr, $env, 0, 1) foreach (@{$struct->{ELEMENTS}});
+}
+
+#####################################################################
+# parse a struct
+sub ParseStructPush($$$$)
+{
+ my ($self, $struct, $ndr, $varname) = @_;
+
+ return unless defined($struct->{ELEMENTS});
+
+ my $env = GenerateStructEnv($struct, $varname);
+
+ EnvSubstituteValue($env, $struct);
+
+ $self->DeclareArrayVariablesNoZero($_, $env) foreach (@{$struct->{ELEMENTS}});
+
+ $self->start_flags($struct, $ndr);
+
+ $self->pidl("NDR_PUSH_CHECK_FLAGS(ndr, ndr_flags);");
+ $self->pidl("if (ndr_flags & NDR_SCALARS) {");
+ $self->indent;
+ $self->ParseStructPushPrimitives($struct, $ndr, $varname, $env);
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->pidl("if (ndr_flags & NDR_BUFFERS) {");
+ $self->indent;
+ $self->ParseStructPushDeferred($struct, $ndr, $varname, $env);
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->end_flags($struct, $ndr);
+}
+
+#####################################################################
+# generate a push function for an enum
+sub ParseEnumPush($$$$)
+{
+ my($self,$enum,$ndr,$varname) = @_;
+ my($type_fn) = $enum->{BASE_TYPE};
+
+ $self->start_flags($enum, $ndr);
+ $self->pidl("NDR_CHECK(ndr_push_enum_$type_fn($ndr, NDR_SCALARS, $varname));");
+ $self->end_flags($enum, $ndr);
+}
+
+#####################################################################
+# generate a pull function for an enum
+sub ParseEnumPull($$$$)
+{
+ my($self,$enum,$ndr,$varname) = @_;
+ my($type_fn) = $enum->{BASE_TYPE};
+ my($type_v_decl) = mapTypeName($type_fn);
+
+ $self->pidl("$type_v_decl v;");
+ $self->start_flags($enum, $ndr);
+ $self->pidl("NDR_CHECK(ndr_pull_enum_$type_fn($ndr, NDR_SCALARS, &v));");
+ $self->pidl("*$varname = v;");
+
+ $self->end_flags($enum, $ndr);
+}
+
+#####################################################################
+# generate a print function for an enum
+sub ParseEnumPrint($$$$$)
+{
+ my($self,$enum,$ndr,$name,$varname) = @_;
+
+ $self->pidl("const char *val = NULL;");
+ $self->pidl("");
+
+ $self->start_flags($enum, $ndr);
+
+ $self->pidl("switch ($varname) {");
+ $self->indent;
+ my $els = \@{$enum->{ELEMENTS}};
+ foreach my $i (0 .. $#{$els}) {
+ my $e = ${$els}[$i];
+ chomp $e;
+ if ($e =~ /^(.*)=/) {
+ $e = $1;
+ }
+ $self->pidl("case $e: val = \"$e\"; break;");
+ }
+
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->pidl("ndr_print_enum($ndr, name, \"$enum->{TYPE}\", val, $varname);");
+
+ $self->end_flags($enum, $ndr);
+}
+
+sub DeclEnum($$$$)
+{
+ my ($e,$t,$name,$varname) = @_;
+ return "enum $name " .
+ ($t eq "pull"?"*":"") . $varname;
+}
+
+$typefamily{ENUM} = {
+ DECL => \&DeclEnum,
+ PUSH_FN_BODY => \&ParseEnumPush,
+ PULL_FN_BODY => \&ParseEnumPull,
+ PRINT_FN_BODY => \&ParseEnumPrint,
+};
+
+#####################################################################
+# generate a push function for a bitmap
+sub ParseBitmapPush($$$$)
+{
+ my($self,$bitmap,$ndr,$varname) = @_;
+ my($type_fn) = $bitmap->{BASE_TYPE};
+
+ $self->start_flags($bitmap, $ndr);
+
+ $self->pidl("NDR_CHECK(ndr_push_$type_fn($ndr, NDR_SCALARS, $varname));");
+
+ $self->end_flags($bitmap, $ndr);
+}
+
+#####################################################################
+# generate a pull function for an bitmap
+sub ParseBitmapPull($$$$)
+{
+ my($self,$bitmap,$ndr,$varname) = @_;
+ my $type_fn = $bitmap->{BASE_TYPE};
+ my($type_decl) = mapTypeName($bitmap->{BASE_TYPE});
+
+ $self->pidl("$type_decl v;");
+ $self->start_flags($bitmap, $ndr);
+ $self->pidl("NDR_CHECK(ndr_pull_$type_fn($ndr, NDR_SCALARS, &v));");
+ $self->pidl("*$varname = v;");
+
+ $self->end_flags($bitmap, $ndr);
+}
+
+#####################################################################
+# generate a print function for an bitmap
+sub ParseBitmapPrintElement($$$$$$)
+{
+ my($self,$e,$bitmap,$ndr,$name,$varname) = @_;
+ my($type_decl) = mapTypeName($bitmap->{BASE_TYPE});
+ my($type_fn) = $bitmap->{BASE_TYPE};
+ my($flag);
+
+ if ($e =~ /^(\w+) .*$/) {
+ $flag = "$1";
+ } else {
+ die "Bitmap: \"$name\" invalid Flag: \"$e\"";
+ }
+
+ $self->pidl("ndr_print_bitmap_flag($ndr, sizeof($type_decl), \"$flag\", $flag, $varname);");
+}
+
+#####################################################################
+# generate a print function for an bitmap
+sub ParseBitmapPrint($$$$$)
+{
+ my($self,$bitmap,$ndr,$name,$varname) = @_;
+ my($type_decl) = mapTypeName($bitmap->{TYPE});
+ my($type_fn) = $bitmap->{BASE_TYPE};
+
+ $self->start_flags($bitmap, $ndr);
+
+ $self->pidl("ndr_print_$type_fn($ndr, name, $varname);");
+
+ $self->pidl("$ndr->depth++;");
+ foreach my $e (@{$bitmap->{ELEMENTS}}) {
+ $self->ParseBitmapPrintElement($e, $bitmap, $ndr, $name, $varname);
+ }
+ $self->pidl("$ndr->depth--;");
+
+ $self->end_flags($bitmap, $ndr);
+}
+
+sub DeclBitmap($$$$)
+{
+ my ($e,$t,$name,$varname) = @_;
+ return mapTypeName(Parse::Pidl::Typelist::bitmap_type_fn($e)) .
+ ($t eq "pull"?" *":" ") . $varname;
+}
+
+$typefamily{BITMAP} = {
+ DECL => \&DeclBitmap,
+ PUSH_FN_BODY => \&ParseBitmapPush,
+ PULL_FN_BODY => \&ParseBitmapPull,
+ PRINT_FN_BODY => \&ParseBitmapPrint,
+};
+
+#####################################################################
+# generate a struct print function
+sub ParseStructPrint($$$$$)
+{
+ my($self,$struct,$ndr,$name,$varname) = @_;
+
+ return unless defined $struct->{ELEMENTS};
+
+ my $env = GenerateStructEnv($struct, $varname);
+
+ $self->DeclareArrayVariables($_) foreach (@{$struct->{ELEMENTS}});
+
+ $self->pidl("ndr_print_struct($ndr, name, \"$name\");");
+ $self->pidl("if (r == NULL) { ndr_print_null($ndr); return; }");
+
+ $self->start_flags($struct, $ndr);
+
+ $self->pidl("$ndr->depth++;");
+
+ $self->ParseElementPrint($_, $ndr, $env->{$_->{NAME}}, $env)
+ foreach (@{$struct->{ELEMENTS}});
+ $self->pidl("$ndr->depth--;");
+
+ $self->end_flags($struct, $ndr);
+}
+
+sub DeclarePtrVariables($$)
+{
+ my ($self,$e) = @_;
+
+ if (has_property($e, "skip") or has_property($e, "skip_noinit")) {
+ return;
+ }
+
+ foreach my $l (@{$e->{LEVELS}}) {
+ my $size = 32;
+ if ($l->{TYPE} eq "POINTER" and
+ not ($l->{POINTER_TYPE} eq "ref" and $l->{LEVEL} eq "TOP")) {
+ if ($l->{POINTER_TYPE} eq "relative_short") {
+ $size = 16;
+ }
+ $self->pidl("uint${size}_t _ptr_$e->{NAME};");
+ last;
+ }
+ }
+}
+
+sub DeclareArrayVariables($$;$)
+{
+ my ($self,$e,$pull) = @_;
+
+ if (has_property($e, "skip") or has_property($e, "skip_noinit")) {
+ return;
+ }
+
+ foreach my $l (@{$e->{LEVELS}}) {
+ next if ($l->{TYPE} ne "ARRAY");
+ if (defined($pull)) {
+ $self->pidl("uint32_t size_$e->{NAME}_$l->{LEVEL_INDEX} = 0;");
+ if ($l->{IS_VARYING}) {
+ $self->pidl("uint32_t length_$e->{NAME}_$l->{LEVEL_INDEX} = 0;");
+ }
+ }
+ next if has_fast_array($e,$l);
+ next if is_charset_array($e,$l);
+ $self->pidl("uint32_t cntr_$e->{NAME}_$l->{LEVEL_INDEX};");
+ }
+}
+
+sub DeclareArrayVariablesNoZero($$$)
+{
+ my ($self,$e,$env) = @_;
+
+ if (has_property($e, "skip") or has_property($e, "skip_noinit")) {
+ return;
+ }
+
+ foreach my $l (@{$e->{LEVELS}}) {
+ next if ($l->{TYPE} ne "ARRAY");
+ next if has_fast_array($e,$l);
+ next if is_charset_array($e,$l);
+ my $length = ParseExpr($l->{LENGTH_IS}, $env, $e->{ORIGINAL});
+ if ($length eq "0") {
+ warning($e->{ORIGINAL}, "pointless array cntr: 'cntr_$e->{NAME}_$l->{LEVEL_INDEX}': length=$length");
+ } else {
+ $self->pidl("uint32_t cntr_$e->{NAME}_$l->{LEVEL_INDEX};");
+ }
+ }
+}
+
+sub DeclareMemCtxVariables($$)
+{
+ my ($self,$e) = @_;
+
+ if (has_property($e, "skip") or has_property($e, "skip_noinit")) {
+ return;
+ }
+
+ foreach my $l (@{$e->{LEVELS}}) {
+ my $mem_flags = $self->ParseMemCtxPullFlags($e, $l);
+
+ if (($l->{TYPE} eq "POINTER") and ($l->{POINTER_TYPE} eq "ignore")) {
+ last;
+ }
+
+ if (defined($mem_flags)) {
+ $self->pidl("TALLOC_CTX *_mem_save_$e->{NAME}_$l->{LEVEL_INDEX} = NULL;");
+ }
+ }
+}
+
+sub ParseStructPullPrimitives($$$$$)
+{
+ my($self,$struct,$ndr,$varname,$env) = @_;
+
+ if (defined $struct->{SURROUNDING_ELEMENT}) {
+ $self->pidl("NDR_CHECK(ndr_pull_array_size($ndr, &$varname->$struct->{SURROUNDING_ELEMENT}->{NAME}));");
+ }
+
+ $self->pidl("NDR_CHECK(ndr_pull_align($ndr, $struct->{ALIGN}));");
+
+ if (defined($struct->{PROPERTIES}{relative_base})) {
+ # set the current offset as base for relative pointers
+ # and store it based on the toplevel struct/union
+ $self->pidl("NDR_CHECK(ndr_pull_setup_relative_base_offset1($ndr, $varname, $ndr->offset));");
+ }
+
+ $self->ParseElementPull($_, $ndr, $env, 1, 0) foreach (@{$struct->{ELEMENTS}});
+
+ $self->add_deferred();
+
+ $self->pidl("NDR_CHECK(ndr_pull_trailer_align($ndr, $struct->{ALIGN}));");
+}
+
+sub ParseStructPullDeferred($$$$$)
+{
+ my ($self,$struct,$ndr,$varname,$env) = @_;
+
+ if (defined($struct->{PROPERTIES}{relative_base})) {
+ # retrieve the current offset as base for relative pointers
+ # based on the toplevel struct/union
+ $self->pidl("NDR_CHECK(ndr_pull_setup_relative_base_offset2($ndr, $varname));");
+ }
+ foreach my $e (@{$struct->{ELEMENTS}}) {
+ $self->ParseElementPull($e, $ndr, $env, 0, 1);
+ }
+
+ $self->add_deferred();
+}
+
+#####################################################################
+# parse a struct - pull side
+sub ParseStructPull($$$$)
+{
+ my($self,$struct,$ndr,$varname) = @_;
+
+ return unless defined $struct->{ELEMENTS};
+
+ # declare any internal pointers we need
+ foreach my $e (@{$struct->{ELEMENTS}}) {
+ $self->DeclarePtrVariables($e);
+ $self->DeclareArrayVariables($e, "pull");
+ $self->DeclareMemCtxVariables($e);
+ }
+
+ $self->start_flags($struct, $ndr);
+
+ my $env = GenerateStructEnv($struct, $varname);
+
+ $self->pidl("NDR_PULL_CHECK_FLAGS(ndr, ndr_flags);");
+ $self->pidl("if (ndr_flags & NDR_SCALARS) {");
+ $self->indent;
+ $self->ParseStructPullPrimitives($struct,$ndr,$varname,$env);
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("if (ndr_flags & NDR_BUFFERS) {");
+ $self->indent;
+ $self->ParseStructPullDeferred($struct,$ndr,$varname,$env);
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->end_flags($struct, $ndr);
+}
+
+#####################################################################
+# calculate size of ndr struct
+sub ParseStructNdrSize($$$$)
+{
+ my ($self,$t, $name, $varname) = @_;
+ my $sizevar;
+
+ if (my $flags = has_property($t, "flag")) {
+ $self->pidl("flags |= $flags;");
+ }
+ $self->pidl("return ndr_size_struct($varname, flags, (ndr_push_flags_fn_t)ndr_push_$name);");
+}
+
+sub DeclStruct($$$$)
+{
+ my ($e,$t,$name,$varname) = @_;
+ return ($t ne "pull"?"const ":"") . "struct $name *$varname";
+}
+
+sub ArgsStructNdrSize($$$)
+{
+ my ($d, $name, $varname) = @_;
+ return "const struct $name *$varname, int flags";
+}
+
+$typefamily{STRUCT} = {
+ PUSH_FN_BODY => \&ParseStructPush,
+ DECL => \&DeclStruct,
+ PULL_FN_BODY => \&ParseStructPull,
+ PRINT_FN_BODY => \&ParseStructPrint,
+ SIZE_FN_BODY => \&ParseStructNdrSize,
+ SIZE_FN_ARGS => \&ArgsStructNdrSize,
+};
+
+#####################################################################
+# calculate size of ndr struct
+sub ParseUnionNdrSize($$$)
+{
+ my ($self, $t, $name, $varname) = @_;
+ my $sizevar;
+
+ if (my $flags = has_property($t, "flag")) {
+ $self->pidl("flags |= $flags;");
+ }
+
+ $self->pidl("return ndr_size_union($varname, flags, level, (ndr_push_flags_fn_t)ndr_push_$name);");
+}
+
+sub ParseUnionPushPrimitives($$$$)
+{
+ my ($self, $e, $ndr ,$varname) = @_;
+
+ my $have_default = 0;
+
+ $self->pidl("uint32_t level = ndr_push_get_switch_value($ndr, $varname);");
+
+ if (defined($e->{SWITCH_TYPE})) {
+ if (defined($e->{ALIGN})) {
+ $self->pidl("NDR_CHECK(ndr_push_union_align($ndr, $e->{ALIGN}));");
+ }
+
+ $self->pidl("NDR_CHECK(ndr_push_$e->{SWITCH_TYPE}($ndr, NDR_SCALARS, level));");
+ }
+
+ if (defined($e->{ALIGN})) {
+ if ($e->{IS_MS_UNION}) {
+ $self->pidl("/* ms_union is always aligned to the largest union arm*/");
+ $self->pidl("NDR_CHECK(ndr_push_align($ndr, $e->{ALIGN}));");
+ } else {
+ $self->pidl("NDR_CHECK(ndr_push_union_align($ndr, $e->{ALIGN}));");
+ }
+ }
+
+ $self->pidl("switch (level) {");
+ $self->indent;
+ foreach my $el (@{$e->{ELEMENTS}}) {
+ if ($el->{CASE} eq "default") {
+ $have_default = 1;
+ }
+ $self->pidl("$el->{CASE}: {");
+
+ if ($el->{TYPE} ne "EMPTY") {
+ $self->indent;
+ if (defined($e->{PROPERTIES}{relative_base})) {
+ $self->pidl("NDR_CHECK(ndr_push_align($ndr, $el->{ALIGN}));");
+ # set the current offset as base for relative pointers
+ # and store it based on the toplevel struct/union
+ $self->pidl("NDR_CHECK(ndr_push_setup_relative_base_offset1($ndr, $varname, $ndr->offset));");
+ }
+ $self->DeclareArrayVariables($el);
+ my $el_env = {$el->{NAME} => "$varname->$el->{NAME}"};
+ $self->CheckRefPtrs($el, $ndr, $el_env);
+ $self->ParseElementPush($el, $ndr, $el_env, 1, 0);
+ $self->deindent;
+ }
+ $self->pidl("break; }");
+ $self->pidl("");
+ }
+ if (! $have_default) {
+ $self->pidl("default:");
+ $self->pidl("\treturn ndr_push_error($ndr, NDR_ERR_BAD_SWITCH, \"Bad switch value \%u at \%s\", level, __location__);");
+ }
+ $self->deindent;
+ $self->pidl("}");
+}
+
+sub ParseUnionPushDeferred($$$$)
+{
+ my ($self,$e,$ndr,$varname) = @_;
+
+ my $have_default = 0;
+
+ $self->pidl("uint32_t level = ndr_push_get_switch_value($ndr, $varname);");
+ if (defined($e->{PROPERTIES}{relative_base})) {
+ # retrieve the current offset as base for relative pointers
+ # based on the toplevel struct/union
+ $self->pidl("NDR_CHECK(ndr_push_setup_relative_base_offset2($ndr, $varname));");
+ }
+ $self->pidl("switch (level) {");
+ $self->indent;
+ foreach my $el (@{$e->{ELEMENTS}}) {
+ if ($el->{CASE} eq "default") {
+ $have_default = 1;
+ }
+
+ $self->pidl("$el->{CASE}:");
+ if ($el->{TYPE} ne "EMPTY") {
+ $self->indent;
+ $self->ParseElementPush($el, $ndr, {$el->{NAME} => "$varname->$el->{NAME}"}, 0, 1);
+ $self->deindent;
+ }
+ $self->pidl("break;");
+ $self->pidl("");
+ }
+ if (! $have_default) {
+ $self->pidl("default:");
+ $self->pidl("\treturn ndr_push_error($ndr, NDR_ERR_BAD_SWITCH, \"Bad switch value \%u at \%s\", level, __location__);");
+ }
+ $self->deindent;
+ $self->pidl("}");
+}
+
+#####################################################################
+# parse a union - push side
+sub ParseUnionPush($$$$)
+{
+ my ($self,$e,$ndr,$varname) = @_;
+ my $have_default = 0;
+
+ $self->start_flags($e, $ndr);
+
+ $self->pidl("NDR_PUSH_CHECK_FLAGS(ndr, ndr_flags);");
+ $self->pidl("if (ndr_flags & NDR_SCALARS) {");
+ $self->indent;
+ $self->ParseUnionPushPrimitives($e, $ndr, $varname);
+ $self->deindent;
+ $self->pidl("}");
+ if (is_deferred_switch_non_empty($e)) {
+ $self->pidl("if (ndr_flags & NDR_BUFFERS) {");
+ $self->indent;
+ $self->ParseUnionPushDeferred($e, $ndr, $varname);
+ $self->deindent;
+ $self->pidl("}");
+ }
+ $self->end_flags($e, $ndr);
+}
+
+#####################################################################
+# print a union
+sub ParseUnionPrint($$$$$)
+{
+ my ($self,$e,$ndr,$name,$varname) = @_;
+ my $have_default = 0;
+
+ $self->pidl("uint32_t level;");
+ foreach my $el (@{$e->{ELEMENTS}}) {
+ $self->DeclareArrayVariables($el);
+ }
+
+ $self->start_flags($e, $ndr);
+
+ $self->pidl("level = ndr_print_get_switch_value($ndr, $varname);");
+
+ $self->pidl("ndr_print_union($ndr, name, level, \"$name\");");
+
+ $self->pidl("switch (level) {");
+ $self->indent;
+ foreach my $el (@{$e->{ELEMENTS}}) {
+ if ($el->{CASE} eq "default") {
+ $have_default = 1;
+ }
+ $self->pidl("$el->{CASE}:");
+ if ($el->{TYPE} ne "EMPTY") {
+ $self->indent;
+ $self->ParseElementPrint($el, $ndr, "$varname->$el->{NAME}", {});
+ $self->deindent;
+ }
+ $self->pidl("break;");
+ $self->pidl("");
+ }
+ if (! $have_default) {
+ $self->pidl("default:");
+ $self->pidl("\tndr_print_bad_level($ndr, name, level);");
+ }
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->end_flags($e, $ndr);
+}
+
+sub ParseUnionPullPrimitives($$$$$)
+{
+ my ($self,$e,$ndr,$varname,$switch_type) = @_;
+ my $have_default = 0;
+
+
+ if (defined($switch_type)) {
+ if (defined($e->{ALIGN})) {
+ $self->pidl("NDR_CHECK(ndr_pull_union_align($ndr, $e->{ALIGN}));");
+ }
+
+ $self->pidl("NDR_CHECK(ndr_pull_$switch_type($ndr, NDR_SCALARS, &_level));");
+ $self->pidl("if (_level != level) {");
+ $self->pidl("\treturn ndr_pull_error($ndr, NDR_ERR_BAD_SWITCH, \"Bad switch value %u for $varname at \%s\", _level, __location__);");
+ $self->pidl("}");
+ }
+
+ if (defined($e->{ALIGN})) {
+ if ($e->{IS_MS_UNION}) {
+ $self->pidl("/* ms_union is always aligned to the largest union arm*/");
+ $self->pidl("NDR_CHECK(ndr_pull_align($ndr, $e->{ALIGN}));");
+ } else {
+ $self->pidl("NDR_CHECK(ndr_pull_union_align($ndr, $e->{ALIGN}));");
+ }
+ }
+
+ $self->pidl("switch (level) {");
+ $self->indent;
+ foreach my $el (@{$e->{ELEMENTS}}) {
+ if ($el->{CASE} eq "default") {
+ $have_default = 1;
+ }
+ $self->pidl("$el->{CASE}: {");
+
+ if ($el->{TYPE} ne "EMPTY") {
+ $self->indent;
+ if (defined($e->{PROPERTIES}{relative_base})) {
+ $self->pidl("NDR_CHECK(ndr_pull_align($ndr, $el->{ALIGN}));");
+ # set the current offset as base for relative pointers
+ # and store it based on the toplevel struct/union
+ $self->pidl("NDR_CHECK(ndr_pull_setup_relative_base_offset1($ndr, $varname, $ndr->offset));");
+ }
+ $self->ParseElementPull($el, $ndr, {$el->{NAME} => "$varname->$el->{NAME}"}, 1, 0);
+ $self->deindent;
+ }
+ $self->pidl("break; }");
+ $self->pidl("");
+ }
+ if (! $have_default) {
+ $self->pidl("default:");
+ $self->pidl("\treturn ndr_pull_error($ndr, NDR_ERR_BAD_SWITCH, \"Bad switch value \%u at \%s\", level, __location__);");
+ }
+ $self->deindent;
+ $self->pidl("}");
+}
+
+sub ParseUnionPullDeferred($$$$)
+{
+ my ($self,$e,$ndr,$varname) = @_;
+ my $have_default = 0;
+
+ if (defined($e->{PROPERTIES}{relative_base})) {
+ # retrieve the current offset as base for relative pointers
+ # based on the toplevel struct/union
+ $self->pidl("NDR_CHECK(ndr_pull_setup_relative_base_offset2($ndr, $varname));");
+ }
+ $self->pidl("switch (level) {");
+ $self->indent;
+ foreach my $el (@{$e->{ELEMENTS}}) {
+ if ($el->{CASE} eq "default") {
+ $have_default = 1;
+ }
+
+ $self->pidl("$el->{CASE}:");
+ if ($el->{TYPE} ne "EMPTY") {
+ $self->indent;
+ $self->ParseElementPull($el, $ndr, {$el->{NAME} => "$varname->$el->{NAME}"}, 0, 1);
+ $self->deindent;
+ }
+ $self->pidl("break;");
+ $self->pidl("");
+ }
+ if (! $have_default) {
+ $self->pidl("default:");
+ $self->pidl("\treturn ndr_pull_error($ndr, NDR_ERR_BAD_SWITCH, \"Bad switch value \%u at \%s\", level, __location__);");
+ }
+ $self->deindent;
+ $self->pidl("}");
+
+
+}
+
+#####################################################################
+# parse a union - pull side
+sub ParseUnionPull($$$$)
+{
+ my ($self,$e,$ndr,$varname) = @_;
+ my $switch_type = $e->{SWITCH_TYPE};
+ my $needs_deferred_switch = is_deferred_switch_non_empty($e);
+ $self->pidl("uint32_t level;");
+ if (defined($switch_type)) {
+ if (Parse::Pidl::Typelist::typeIs($switch_type, "ENUM")) {
+ $switch_type = Parse::Pidl::Typelist::enum_type_fn(getType($switch_type)->{DATA});
+ }
+ $self->pidl(mapTypeName($switch_type) . " _level;");
+ }
+
+ my %double_cases = ();
+ foreach my $el (@{$e->{ELEMENTS}}) {
+ next if ($el->{TYPE} eq "EMPTY");
+ next if ($double_cases{"$el->{NAME}"});
+ $self->DeclareMemCtxVariables($el);
+ $self->DeclarePtrVariables($el);
+ $self->DeclareArrayVariables($el, "pull");
+ $double_cases{"$el->{NAME}"} = 1;
+ }
+
+ $self->start_flags($e, $ndr);
+
+ $self->pidl("NDR_PULL_CHECK_FLAGS(ndr, ndr_flags);");
+ $self->pidl("if (ndr_flags & NDR_SCALARS) {");
+ $self->indent;
+ if (! $needs_deferred_switch) {
+ $self->pidl("/* This token is not used again */");
+ $self->pidl("level = ndr_pull_steal_switch_value($ndr, $varname);");
+ } else {
+ $self->pidl("level = ndr_pull_get_switch_value($ndr, $varname);");
+ }
+ $self->ParseUnionPullPrimitives($e,$ndr,$varname,$switch_type);
+ $self->deindent;
+ $self->pidl("}");
+ if ($needs_deferred_switch) {
+ $self->pidl("if (ndr_flags & NDR_BUFFERS) {");
+ $self->indent;
+ $self->pidl("/* The token is not needed after this. */");
+ $self->pidl("level = ndr_pull_steal_switch_value($ndr, $varname);");
+ $self->ParseUnionPullDeferred($e,$ndr,$varname);
+ $self->deindent;
+ $self->pidl("}");
+ }
+ $self->add_deferred();
+
+ $self->end_flags($e, $ndr);
+}
+
+sub DeclUnion($$$$)
+{
+ my ($e,$t,$name,$varname) = @_;
+ return ($t ne "pull"?"const ":"") . "union $name *$varname";
+}
+
+sub ArgsUnionNdrSize($$)
+{
+ my ($d,$name) = @_;
+ return "const union $name *r, uint32_t level, int flags";
+}
+
+$typefamily{UNION} = {
+ PUSH_FN_BODY => \&ParseUnionPush,
+ DECL => \&DeclUnion,
+ PULL_FN_BODY => \&ParseUnionPull,
+ PRINT_FN_BODY => \&ParseUnionPrint,
+ SIZE_FN_ARGS => \&ArgsUnionNdrSize,
+ SIZE_FN_BODY => \&ParseUnionNdrSize,
+};
+
+#####################################################################
+# parse a typedef - push side
+sub ParseTypedefPush($$$$)
+{
+ my($self,$e,$ndr,$varname) = @_;
+
+ my $env;
+
+ $env->{$e->{NAME}} = $varname;
+
+ $self->ParseElementPushLevel($e, $e->{LEVELS}[0], $ndr, $varname, $env, 1, 1);
+}
+
+#####################################################################
+# parse a typedef - pull side
+sub ParseTypedefPull($$$$)
+{
+ my($self,$e,$ndr,$varname) = @_;
+
+ my $env;
+
+ $env->{$e->{NAME}} = $varname;
+
+ $self->ParseElementPullLevel($e, $e->{LEVELS}[0], $ndr, $varname, $env, 1, 1);
+}
+
+#####################################################################
+# parse a typedef - print side
+sub ParseTypedefPrint($$$$$)
+{
+ my($self,$e,$ndr,$name,$varname) = @_;
+
+ $typefamily{$e->{DATA}->{TYPE}}->{PRINT_FN_BODY}->($self, $e->{DATA}, $ndr, $name, $varname);
+}
+
+#####################################################################
+## calculate the size of a structure
+sub ParseTypedefNdrSize($$$$)
+{
+ my($self,$t,$name,$varname) = @_;
+
+ $typefamily{$t->{DATA}->{TYPE}}->{SIZE_FN_BODY}->($self, $t->{DATA}, $name, $varname);
+}
+
+sub DeclTypedef($$$$)
+{
+ my ($e, $t, $name, $varname) = @_;
+
+ return $typefamily{$e->{DATA}->{TYPE}}->{DECL}->($e->{DATA}, $t, $name, $varname);
+}
+
+sub ArgsTypedefNdrSize($$$)
+{
+ my ($d, $name, $varname) = @_;
+ return $typefamily{$d->{DATA}->{TYPE}}->{SIZE_FN_ARGS}->($d->{DATA}, $name, $varname);
+}
+
+$typefamily{TYPEDEF} = {
+ PUSH_FN_BODY => \&ParseTypedefPush,
+ DECL => \&DeclTypedef,
+ PULL_FN_BODY => \&ParseTypedefPull,
+ PRINT_FN_BODY => \&ParseTypedefPrint,
+ SIZE_FN_ARGS => \&ArgsTypedefNdrSize,
+ SIZE_FN_BODY => \&ParseTypedefNdrSize,
+};
+
+sub ParsePipePushChunk($$)
+{
+ my ($self, $t) = @_;
+
+ my $pipe = $t;
+ $pipe = $t->{DATA} if ($t->{TYPE} eq "TYPEDEF");
+ my $struct = $pipe->{DATA};
+
+ my $name = "$struct->{NAME}";
+ my $ndr = "ndr";
+ my $varname = "r";
+
+ my $args = $typefamily{$struct->{TYPE}}->{DECL}->($struct, "push", $name, $varname);
+
+ $self->fn_declare("push", $struct, "enum ndr_err_code ndr_push_$name(struct ndr_push *$ndr, int ndr_flags, $args)") or return;
+
+ return if has_property($t, "nopush");
+
+ $self->pidl("{");
+ $self->indent;
+
+ $self->ParseStructPush($struct, $ndr, $varname);
+ $self->pidl("");
+
+ $self->pidl("NDR_CHECK(ndr_push_pipe_chunk_trailer(ndr, ndr_flags, $varname->count));");
+ $self->pidl("");
+
+ $self->pidl("return NDR_ERR_SUCCESS;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParsePipePullChunk($$)
+{
+ my ($self, $t) = @_;
+
+ my $pipe = $t;
+ $pipe = $t->{DATA} if ($t->{TYPE} eq "TYPEDEF");
+ my $struct = $pipe->{DATA};
+
+ my $name = "$struct->{NAME}";
+ my $ndr = "ndr";
+ my $varname = "r";
+
+ my $args = $typefamily{$struct->{TYPE}}->{DECL}->($struct, "pull", $name, $varname);
+
+ $self->fn_declare("pull", $struct, "enum ndr_err_code ndr_pull_$name(struct ndr_pull *$ndr, int ndr_flags, $args)") or return;
+
+ return if has_property($struct, "nopull");
+
+ $self->pidl("{");
+ $self->indent;
+
+ $self->ParseStructPull($struct, $ndr, $varname);
+ $self->pidl("");
+
+ $self->pidl("NDR_CHECK(ndr_check_pipe_chunk_trailer($ndr, ndr_flags, $varname->count));");
+ $self->pidl("");
+
+ $self->pidl("return NDR_ERR_SUCCESS;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParsePipePrintChunk($$)
+{
+ my ($self, $t) = @_;
+
+ my $pipe = $t;
+ $pipe = $t->{DATA} if ($t->{TYPE} eq "TYPEDEF");
+ my $struct = $pipe->{DATA};
+
+ my $name = "$struct->{NAME}";
+ my $ndr = "ndr";
+ my $varname = "r";
+
+ my $args = $typefamily{$struct->{TYPE}}->{DECL}->($struct, "print", $name, $varname);
+
+ $self->pidl_hdr("void ndr_print_$name(struct ndr_print *ndr, const char *name, $args);");
+
+ return if (has_property($t, "noprint"));
+
+ $self->pidl("_PUBLIC_ void ndr_print_$name(struct ndr_print *$ndr, const char *name, $args)");
+ $self->pidl("{");
+ $self->indent;
+ $self->ParseTypePrint($struct, $ndr, $varname);
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+#####################################################################
+# parse a function - print side
+sub ParseFunctionPrint($$)
+{
+ my($self, $fn) = @_;
+ my $ndr = "ndr";
+
+ $self->pidl_hdr("void ndr_print_$fn->{NAME}(struct ndr_print *$ndr, const char *name, int flags, const struct $fn->{NAME} *r);");
+
+ return if has_property($fn, "noprint");
+
+ $self->pidl("_PUBLIC_ void ndr_print_$fn->{NAME}(struct ndr_print *$ndr, const char *name, int flags, const struct $fn->{NAME} *r)");
+ $self->pidl("{");
+ $self->indent;
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ $self->DeclareArrayVariables($e);
+ }
+
+ $self->pidl("ndr_print_struct($ndr, name, \"$fn->{NAME}\");");
+ $self->pidl("if (r == NULL) { ndr_print_null($ndr); return; }");
+ $self->pidl("$ndr->depth++;");
+
+ $self->pidl("if (flags & NDR_SET_VALUES) {");
+ $self->pidl("\t$ndr->flags |= LIBNDR_PRINT_SET_VALUES;");
+ $self->pidl("}");
+
+ $self->pidl("if (flags & NDR_IN) {");
+ $self->indent;
+ $self->pidl("ndr_print_struct($ndr, \"in\", \"$fn->{NAME}\");");
+ $self->pidl("$ndr->depth++;");
+
+ my $env = GenerateFunctionInEnv($fn);
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ if (grep(/in/,@{$e->{DIRECTION}})) {
+ $self->ParseElementPrint($e, $ndr, $env->{$e->{NAME}}, $env);
+ }
+ }
+ $self->pidl("$ndr->depth--;");
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->pidl("if (flags & NDR_OUT) {");
+ $self->indent;
+ $self->pidl("ndr_print_struct($ndr, \"out\", \"$fn->{NAME}\");");
+ $self->pidl("$ndr->depth++;");
+
+ $env = GenerateFunctionOutEnv($fn);
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ if (grep(/out/,@{$e->{DIRECTION}})) {
+ $self->ParseElementPrint($e, $ndr, $env->{$e->{NAME}}, $env);
+ }
+ }
+ if ($fn->{RETURN_TYPE}) {
+ $self->pidl("ndr_print_$fn->{RETURN_TYPE}($ndr, \"result\", r->out.result);");
+ }
+ $self->pidl("$ndr->depth--;");
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->pidl("$ndr->depth--;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+#####################################################################
+# parse a function
+sub ParseFunctionPush($$)
+{
+ my($self, $fn) = @_;
+ my $ndr = "ndr";
+
+ $self->fn_declare("push", $fn, "enum ndr_err_code ndr_push_$fn->{NAME}(struct ndr_push *$ndr, int flags, const struct $fn->{NAME} *r)") or return;
+
+ return if has_property($fn, "nopush");
+
+ $self->pidl("{");
+ $self->indent;
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ $self->DeclareArrayVariables($e);
+ }
+
+ $self->pidl("NDR_PUSH_CHECK_FN_FLAGS(ndr, flags);");
+
+ $self->pidl("if (flags & NDR_IN) {");
+ $self->indent;
+
+ my $env = GenerateFunctionInEnv($fn);
+
+ EnvSubstituteValue($env, $fn);
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ if (grep(/in/,@{$e->{DIRECTION}})) {
+ $self->CheckRefPtrs($e, $ndr, $env);
+ }
+ }
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ if (grep(/in/,@{$e->{DIRECTION}})) {
+ $self->ParseElementPush($e, $ndr, $env, 1, 1);
+ }
+ }
+
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->pidl("if (flags & NDR_OUT) {");
+ $self->indent;
+
+ $env = GenerateFunctionOutEnv($fn);
+ EnvSubstituteValue($env, $fn);
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ if (grep(/out/,@{$e->{DIRECTION}})) {
+ $self->CheckRefPtrs($e, $ndr, $env);
+ }
+ }
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ if (grep(/out/,@{$e->{DIRECTION}})) {
+ $self->ParseElementPush($e, $ndr, $env, 1, 1);
+ }
+ }
+
+ if ($fn->{RETURN_TYPE}) {
+ $self->pidl("NDR_CHECK(ndr_push_$fn->{RETURN_TYPE}($ndr, NDR_SCALARS, r->out.result));");
+ }
+
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("return NDR_ERR_SUCCESS;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub AllocateArrayLevel($$$$$$)
+{
+ my ($self,$e,$l,$ndr,$var,$size) = @_;
+
+ my $pl = GetPrevLevel($e, $l);
+ if (defined($pl) and
+ $pl->{TYPE} eq "POINTER" and
+ $pl->{POINTER_TYPE} eq "ref"
+ and not $l->{IS_ZERO_TERMINATED}) {
+ $self->pidl("if ($ndr->flags & LIBNDR_FLAG_REF_ALLOC) {");
+ $self->pidl("\tNDR_PULL_ALLOC_N($ndr, $var, $size);");
+ $self->pidl("}");
+ if (grep(/in/,@{$e->{DIRECTION}}) and
+ grep(/out/,@{$e->{DIRECTION}})) {
+ $self->pidl("memcpy(r->out.$e->{NAME}, r->in.$e->{NAME}, ($size) * sizeof(*r->in.$e->{NAME}));");
+ }
+ return;
+ }
+
+ $self->pidl("NDR_PULL_ALLOC_N($ndr, $var, $size);");
+}
+
+#####################################################################
+# parse a function
+sub ParseFunctionPull($$)
+{
+ my($self,$fn) = @_;
+ my $ndr = "ndr";
+
+ # pull function args
+ $self->fn_declare("pull", $fn, "enum ndr_err_code ndr_pull_$fn->{NAME}(struct ndr_pull *$ndr, int flags, struct $fn->{NAME} *r)") or return;
+
+ $self->pidl("{");
+ $self->indent;
+
+ # declare any internal pointers we need
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ $self->DeclarePtrVariables($e);
+ $self->DeclareArrayVariables($e, "pull");
+ }
+
+ my %double_cases = ();
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next if ($e->{TYPE} eq "EMPTY");
+ next if ($double_cases{"$e->{NAME}"});
+ $self->DeclareMemCtxVariables($e);
+ $double_cases{"$e->{NAME}"} = 1;
+ }
+
+ $self->pidl("NDR_PULL_CHECK_FN_FLAGS(ndr, flags);");
+
+ $self->pidl("if (flags & NDR_IN) {");
+ $self->indent;
+
+ # auto-init the out section of a structure. I originally argued that
+ # this was a bad idea as it hides bugs, but coping correctly
+ # with initialisation and not wiping ref vars is turning
+ # out to be too tricky (tridge)
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless grep(/out/, @{$e->{DIRECTION}});
+ $self->pidl("ZERO_STRUCT(r->out);");
+ $self->pidl("");
+ last;
+ }
+
+ my $env = GenerateFunctionInEnv($fn);
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless (grep(/in/, @{$e->{DIRECTION}}));
+ $self->ParseElementPull($e, $ndr, $env, 1, 1);
+ }
+
+ # allocate the "simple" out ref variables. FIXME: Shouldn't this have it's
+ # own flag rather than be in NDR_IN ?
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless (grep(/out/, @{$e->{DIRECTION}}));
+ next unless ($e->{LEVELS}[0]->{TYPE} eq "POINTER" and
+ $e->{LEVELS}[0]->{POINTER_TYPE} eq "ref");
+ next if (($e->{LEVELS}[1]->{TYPE} eq "DATA") and
+ ($e->{LEVELS}[1]->{DATA_TYPE} eq "string"));
+ next if ($e->{LEVELS}[1]->{TYPE} eq "PIPE");
+ next if (($e->{LEVELS}[1]->{TYPE} eq "ARRAY")
+ and $e->{LEVELS}[1]->{IS_ZERO_TERMINATED});
+
+ if ($e->{LEVELS}[1]->{TYPE} eq "ARRAY") {
+ my $size = ParseExprExt($e->{LEVELS}[1]->{SIZE_IS}, $env, $e->{ORIGINAL},
+ check_null_pointer($e, $env, sub { $self->pidl(shift); },
+ "return ndr_pull_error($ndr, NDR_ERR_INVALID_POINTER, \"NULL Pointer for size_is()\");"),
+ check_fully_dereferenced($e, $env));
+ $self->pidl("NDR_PULL_ALLOC_N($ndr, r->out.$e->{NAME}, $size);");
+
+ if (grep(/in/, @{$e->{DIRECTION}})) {
+ $self->pidl("memcpy(r->out.$e->{NAME}, r->in.$e->{NAME}, ($size) * sizeof(*r->in.$e->{NAME}));");
+ } else {
+ $self->pidl("memset(r->out.$e->{NAME}, 0, ($size) * sizeof(*r->out.$e->{NAME}));");
+ }
+ } elsif ($e->{LEVELS}[1]->{TYPE} eq "ARRAY") {
+ if (grep(/in/, @{$e->{DIRECTION}})) {
+ $self->pidl("r->out.$e->{NAME} = r->in.$e->{NAME};");
+ } else {
+ $self->pidl("r->out.$e->{NAME} = NULL;");
+ }
+ } else {
+ $self->pidl("NDR_PULL_ALLOC($ndr, r->out.$e->{NAME});");
+
+ if (grep(/in/, @{$e->{DIRECTION}})) {
+ $self->pidl("*r->out.$e->{NAME} = *r->in.$e->{NAME};");
+ } else {
+ $self->pidl("ZERO_STRUCTP(r->out.$e->{NAME});");
+ }
+ }
+ }
+
+ $self->add_deferred();
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->pidl("if (flags & NDR_OUT) {");
+ $self->indent;
+
+ $env = GenerateFunctionOutEnv($fn);
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless grep(/out/, @{$e->{DIRECTION}});
+ $self->ParseElementPull($e, $ndr, $env, 1, 1);
+ }
+
+ if ($fn->{RETURN_TYPE}) {
+ $self->pidl("NDR_CHECK(ndr_pull_$fn->{RETURN_TYPE}($ndr, NDR_SCALARS, &r->out.result));");
+ }
+
+ $self->add_deferred();
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->pidl("return NDR_ERR_SUCCESS;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub AuthServiceStruct($$$)
+{
+ my ($self, $ifacename, $authservice) = @_;
+ my @a = split /,/, $authservice;
+ my $authservice_count = $#a + 1;
+
+ $self->pidl("static const char * const $ifacename\_authservice_strings[] = {");
+ foreach my $ap (@a) {
+ $self->pidl("\t$ap, ");
+ }
+ $self->pidl("};");
+ $self->pidl("");
+
+ $self->pidl("static const struct ndr_interface_string_array $ifacename\_authservices = {");
+ $self->pidl("\t.count\t= $authservice_count,");
+ $self->pidl("\t.names\t= $ifacename\_authservice_strings");
+ $self->pidl("};");
+ $self->pidl("");
+}
+
+sub ParseGeneratePipeArray($$$)
+{
+ my ($self, $fn, $direction) = @_;
+
+ $self->pidl("static const struct ndr_interface_call_pipe $fn->{NAME}\_$direction\_pipes[] = {");
+ $self->indent;
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless ContainsPipe($e, $e->{LEVELS}[0]);
+ next unless (grep(/$direction/, @{$e->{DIRECTION}}));
+
+ my $cname = "$e->{TYPE}_chunk";
+
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("\"$direction.$e->{NAME}\",");
+ $self->pidl("\"$cname\",");
+ $self->pidl("sizeof(struct $cname),");
+ $self->pidl("(ndr_push_flags_fn_t) ndr_push_$cname,");
+ $self->pidl("(ndr_pull_flags_fn_t) ndr_pull_$cname,");
+ $self->pidl("(ndr_print_fn_t) ndr_print_$cname,");
+ $self->deindent;
+ $self->pidl("},");
+ }
+ $self->pidl("{ NULL, NULL, 0, NULL, NULL, NULL }");
+ $self->deindent;
+ $self->pidl("};");
+ $self->pidl("");
+}
+
+sub FunctionCallPipes($$)
+{
+ my ($self, $d) = @_;
+ return if not defined($d->{OPNUM});
+
+ my $in_pipes = 0;
+ my $out_pipes = 0;
+
+ foreach my $e (@{$d->{ELEMENTS}}) {
+ next unless ContainsPipe($e, $e->{LEVELS}[0]);
+
+ if (grep(/in/, @{$e->{DIRECTION}})) {
+ $in_pipes++;
+ }
+ if (grep(/out/, @{$e->{DIRECTION}})) {
+ $out_pipes++;
+ }
+ }
+
+ if ($in_pipes) {
+ $self->ParseGeneratePipeArray($d, "in");
+ }
+
+ if ($out_pipes) {
+ $self->ParseGeneratePipeArray($d, "out");
+ }
+}
+
+sub FunctionCallEntry($$)
+{
+ my ($self, $d) = @_;
+ return 0 if not defined($d->{OPNUM});
+
+ my $in_pipes = 0;
+ my $out_pipes = 0;
+
+ foreach my $e (@{$d->{ELEMENTS}}) {
+ next unless ContainsPipe($e, $e->{LEVELS}[0]);
+
+ if (grep(/in/, @{$e->{DIRECTION}})) {
+ $in_pipes++;
+ }
+ if (grep(/out/, @{$e->{DIRECTION}})) {
+ $out_pipes++;
+ }
+ }
+
+ my $in_pipes_ptr = "NULL";
+ my $out_pipes_ptr = "NULL";
+
+ if ($in_pipes) {
+ $in_pipes_ptr = "$d->{NAME}_in_pipes";
+ }
+
+ if ($out_pipes) {
+ $out_pipes_ptr = "$d->{NAME}_out_pipes";
+ }
+
+ $self->pidl("\t{");
+ $self->pidl("\t\t\"$d->{NAME}\",");
+ $self->pidl("\t\tsizeof(struct $d->{NAME}),");
+ $self->pidl("\t\t(ndr_push_flags_fn_t) ndr_push_$d->{NAME},");
+ $self->pidl("\t\t(ndr_pull_flags_fn_t) ndr_pull_$d->{NAME},");
+ $self->pidl("\t\t(ndr_print_function_t) ndr_print_$d->{NAME},");
+ $self->pidl("\t\t{ $in_pipes, $in_pipes_ptr },");
+ $self->pidl("\t\t{ $out_pipes, $out_pipes_ptr },");
+ $self->pidl("\t},");
+ return 1;
+}
+
+#####################################################################
+# produce a function call table
+sub FunctionTable($$)
+{
+ my($self,$interface) = @_;
+ my $count = 0;
+ my $uname = uc $interface->{NAME};
+
+ return if ($#{$interface->{FUNCTIONS}}+1 == 0);
+ return unless defined ($interface->{PROPERTIES}->{uuid});
+
+ foreach my $d (@{$interface->{INHERITED_FUNCTIONS}},@{$interface->{FUNCTIONS}}) {
+ $self->FunctionCallPipes($d);
+ }
+
+ $self->pidl("static const struct ndr_interface_call $interface->{NAME}\_calls[] = {");
+
+ foreach my $d (@{$interface->{INHERITED_FUNCTIONS}},@{$interface->{FUNCTIONS}}) {
+ $count += $self->FunctionCallEntry($d);
+ }
+ $self->pidl("\t{ NULL, 0, NULL, NULL, NULL }");
+ $self->pidl("};");
+ $self->pidl("");
+
+ $self->pidl("static const char * const $interface->{NAME}\_endpoint_strings[] = {");
+ foreach my $ep (@{$interface->{ENDPOINTS}}) {
+ $self->pidl("\t$ep, ");
+ }
+ my $endpoint_count = $#{$interface->{ENDPOINTS}}+1;
+
+ $self->pidl("};");
+ $self->pidl("");
+
+ $self->pidl("static const struct ndr_interface_string_array $interface->{NAME}\_endpoints = {");
+ $self->pidl("\t.count\t= $endpoint_count,");
+ $self->pidl("\t.names\t= $interface->{NAME}\_endpoint_strings");
+ $self->pidl("};");
+ $self->pidl("");
+
+ if (! defined $interface->{PROPERTIES}->{authservice}) {
+ $interface->{PROPERTIES}->{authservice} = "\"host\"";
+ }
+
+ $self->AuthServiceStruct($interface->{NAME},
+ $interface->{PROPERTIES}->{authservice});
+
+ $self->pidl("\nconst struct ndr_interface_table ndr_table_$interface->{NAME} = {");
+ $self->pidl("\t.name\t\t= \"$interface->{NAME}\",");
+ $self->pidl("\t.syntax_id\t= {");
+ $self->pidl("\t\t" . print_uuid($interface->{UUID}) .",");
+ $self->pidl("\t\tNDR_$uname\_VERSION");
+ $self->pidl("\t},");
+ $self->pidl("\t.helpstring\t= NDR_$uname\_HELPSTRING,");
+ $self->pidl("\t.num_calls\t= $count,");
+ $self->pidl("\t.calls\t\t= $interface->{NAME}\_calls,");
+ $self->pidl("\t.endpoints\t= &$interface->{NAME}\_endpoints,");
+ $self->pidl("\t.authservices\t= &$interface->{NAME}\_authservices");
+ $self->pidl("};");
+ $self->pidl("");
+
+}
+
+#####################################################################
+# generate include statements for imported idl files
+sub HeaderImport
+{
+ my $self = shift;
+ my @imports = @_;
+ foreach (@imports) {
+ $_ = unmake_str($_);
+ s/\.idl$//;
+ $self->pidl(choose_header("librpc/gen_ndr/ndr_$_\.h", "gen_ndr/ndr_$_.h"));
+ }
+}
+
+#####################################################################
+# generate include statements for included header files
+sub HeaderInclude
+{
+ my $self = shift;
+ my @includes = @_;
+ foreach (@includes) {
+ $self->pidl_hdr("#include $_");
+ }
+}
+
+#####################################################################
+# generate prototypes and defines for the interface definitions
+# FIXME: these prototypes are for the DCE/RPC client functions, not the
+# NDR parser and so do not belong here, technically speaking
+sub HeaderInterface($$$)
+{
+ my($self,$interface,$needed) = @_;
+
+ my $count = 0;
+
+ if ($needed->{"compression"}) {
+ $self->pidl(choose_header("librpc/ndr/ndr_compression.h", "ndr/compression.h"));
+ }
+
+ if (has_property($interface, "object")) {
+ $self->pidl(choose_header("librpc/gen_ndr/ndr_orpc.h", "ndr/orpc.h"));
+ }
+
+ if (defined $interface->{PROPERTIES}->{helper}) {
+ $self->HeaderInclude(split /,/, $interface->{PROPERTIES}->{helper});
+ }
+
+ if (defined $interface->{PROPERTIES}->{uuid}) {
+ my $name = uc $interface->{NAME};
+ $self->pidl_hdr("#define NDR_$name\_UUID " .
+ Parse::Pidl::Util::make_str(lc($interface->{UUID})));
+
+ $self->pidl_hdr("#define NDR_$name\_VERSION $interface->{VERSION}");
+
+ $self->pidl_hdr("#define NDR_$name\_NAME \"$interface->{NAME}\"");
+
+ if(!defined $interface->{PROPERTIES}->{helpstring}) { $interface->{PROPERTIES}->{helpstring} = "NULL"; }
+ $self->pidl_hdr("#define NDR_$name\_HELPSTRING $interface->{PROPERTIES}->{helpstring}");
+
+ $self->pidl_hdr("extern const struct ndr_interface_table ndr_table_$interface->{NAME};");
+ }
+
+ foreach (@{$interface->{FUNCTIONS}}) {
+ next if has_property($_, "noopnum");
+ next if grep(/^$_->{NAME}$/,@{$interface->{INHERITED_FUNCTIONS}});
+ my $u_name = uc $_->{NAME};
+
+ my $val = sprintf("0x%02x", $count);
+ if (defined($interface->{BASE})) {
+ $val .= " + NDR_" . uc $interface->{BASE} . "_CALL_COUNT";
+ }
+
+ $self->pidl_hdr("#define NDR_$u_name ($val)");
+
+ $self->pidl_hdr("");
+ $count++;
+ }
+
+ my $val = $count;
+
+ if (defined($interface->{BASE})) {
+ $val .= " + NDR_" . uc $interface->{BASE} . "_CALL_COUNT";
+ }
+
+ $self->pidl_hdr("#define NDR_" . uc $interface->{NAME} . "_CALL_COUNT ($val)");
+
+}
+
+sub ParseTypePush($$$$$$)
+{
+ my ($self,$e, $ndr, $varname, $primitives, $deferred) = @_;
+
+ # save the old relative_base_offset
+ $self->pidl("uint32_t _save_relative_base_offset = ndr_push_get_relative_base_offset($ndr);") if defined(has_property($e, "relative_base"));
+ $typefamily{$e->{TYPE}}->{PUSH_FN_BODY}->($self, $e, $ndr, $varname);
+ # restore the old relative_base_offset
+ $self->pidl("ndr_push_restore_relative_base_offset($ndr, _save_relative_base_offset);") if defined(has_property($e, "relative_base"));
+}
+
+sub ParseTypePushFunction($$$)
+{
+ my ($self, $e, $varname) = @_;
+ my $ndr = "ndr";
+
+ my $args = $typefamily{$e->{TYPE}}->{DECL}->($e, "push", $e->{NAME}, $varname);
+
+ $self->fn_declare("push", $e, "enum ndr_err_code ".TypeFunctionName("ndr_push", $e)."(struct ndr_push *$ndr, int ndr_flags, $args)") or return;
+
+ $self->pidl("{");
+ $self->indent;
+ $self->ParseTypePush($e, $ndr, $varname, 1, 1);
+ $self->pidl("return NDR_ERR_SUCCESS;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");;
+}
+
+sub ParseTypePull($$$$$$)
+{
+ my ($self, $e, $ndr, $varname, $primitives, $deferred) = @_;
+
+ # save the old relative_base_offset
+ $self->pidl("uint32_t _save_relative_base_offset = ndr_pull_get_relative_base_offset($ndr);") if defined(has_property($e, "relative_base"));
+ $typefamily{$e->{TYPE}}->{PULL_FN_BODY}->($self, $e, $ndr, $varname);
+ # restore the old relative_base_offset
+ $self->pidl("ndr_pull_restore_relative_base_offset($ndr, _save_relative_base_offset);") if defined(has_property($e, "relative_base"));
+}
+
+sub ParseTypePullFunction($$)
+{
+ my ($self, $e, $varname) = @_;
+ my $ndr = "ndr";
+
+ my $args = $typefamily{$e->{TYPE}}->{DECL}->($e, "pull", $e->{NAME}, $varname);
+
+ $self->fn_declare("pull", $e, "enum ndr_err_code ".TypeFunctionName("ndr_pull", $e)."(struct ndr_pull *$ndr, int ndr_flags, $args)") or return;
+
+ $self->pidl("{");
+ $self->indent;
+ $self->ParseTypePull($e, $ndr, $varname, 1, 1);
+ $self->pidl("return NDR_ERR_SUCCESS;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParseTypePrint($$$$)
+{
+ my ($self, $e, $ndr, $varname) = @_;
+
+ $typefamily{$e->{TYPE}}->{PRINT_FN_BODY}->($self, $e, $ndr, $e->{NAME}, $varname);
+}
+
+sub ParseTypePrintFunction($$$)
+{
+ my ($self, $e, $varname) = @_;
+ my $ndr = "ndr";
+
+ my $args = $typefamily{$e->{TYPE}}->{DECL}->($e, "print", $e->{NAME}, $varname);
+
+ $self->pidl_hdr("void ".TypeFunctionName("ndr_print", $e)."(struct ndr_print *ndr, const char *name, $args);");
+
+ return if (has_property($e, "noprint"));
+
+ $self->pidl("_PUBLIC_ void ".TypeFunctionName("ndr_print", $e)."(struct ndr_print *$ndr, const char *name, $args)");
+ $self->pidl("{");
+ $self->indent;
+ $self->ParseTypePrint($e, $ndr, $varname);
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub ParseTypeNdrSize($$)
+{
+ my ($self,$t) = @_;
+
+ my $varname = "r";
+ my $tf = $typefamily{$t->{TYPE}};
+ my $args = $tf->{SIZE_FN_ARGS}->($t, $t->{NAME}, $varname);
+
+ $self->fn_declare("size", $t, "size_t ndr_size_$t->{NAME}($args)") or return;
+
+ $self->pidl("{");
+ $self->indent;
+ $typefamily{$t->{TYPE}}->{SIZE_FN_BODY}->($self,$t, $t->{NAME}, $varname);
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+#####################################################################
+# parse the interface definitions
+sub ParseInterface($$$)
+{
+ my($self,$interface,$needed) = @_;
+
+ $self->pidl_hdr("#ifndef _HEADER_NDR_$interface->{NAME}");
+ $self->pidl_hdr("#define _HEADER_NDR_$interface->{NAME}");
+
+ $self->pidl_hdr("");
+
+ $self->HeaderInterface($interface, $needed);
+
+ # Typedefs
+ foreach my $d (@{$interface->{TYPES}}) {
+ if (Parse::Pidl::Typelist::typeIs($d, "PIPE")) {
+ ($needed->{TypeFunctionName("ndr_push", $d)}) &&
+ $self->ParsePipePushChunk($d);
+ ($needed->{TypeFunctionName("ndr_pull", $d)}) &&
+ $self->ParsePipePullChunk($d);
+ ($needed->{TypeFunctionName("ndr_print", $d)}) &&
+ $self->ParsePipePrintChunk($d);
+
+ $needed->{TypeFunctionName("ndr_pull", $d)} = 0;
+ $needed->{TypeFunctionName("ndr_push", $d)} = 0;
+ $needed->{TypeFunctionName("ndr_print", $d)} = 0;
+ next;
+ }
+
+ next unless(typeHasBody($d));
+
+ ($needed->{TypeFunctionName("ndr_push", $d)}) && $self->ParseTypePushFunction($d, "r");
+ ($needed->{TypeFunctionName("ndr_pull", $d)}) && $self->ParseTypePullFunction($d, "r");
+ ($needed->{TypeFunctionName("ndr_print", $d)}) && $self->ParseTypePrintFunction($d, "r");
+
+ # Make sure we don't generate a function twice...
+ $needed->{TypeFunctionName("ndr_push", $d)} =
+ $needed->{TypeFunctionName("ndr_pull", $d)} =
+ $needed->{TypeFunctionName("ndr_print", $d)} = 0;
+
+ ($needed->{"ndr_size_$d->{NAME}"}) && $self->ParseTypeNdrSize($d);
+ }
+
+ # Functions
+ foreach my $d (@{$interface->{FUNCTIONS}}) {
+ ($needed->{"ndr_push_$d->{NAME}"}) && $self->ParseFunctionPush($d);
+ ($needed->{"ndr_pull_$d->{NAME}"}) && $self->ParseFunctionPull($d);
+ ($needed->{"ndr_print_$d->{NAME}"}) && $self->ParseFunctionPrint($d);
+ }
+
+ $self->FunctionTable($interface);
+
+ $self->pidl_hdr("#endif /* _HEADER_NDR_$interface->{NAME} */");
+}
+
+sub GenerateIncludes($)
+{
+ my ($self) = @_;
+ if (is_intree()) {
+ $self->pidl("#include \"includes.h\"");
+ } else {
+ $self->pidl("#ifndef _GNU_SOURCE");
+ $self->pidl("#define _GNU_SOURCE");
+ $self->pidl("#endif");
+ $self->pidl("#include <stdint.h>");
+ $self->pidl("#include <stdlib.h>");
+ $self->pidl("#include <stdio.h>");
+ $self->pidl("#include <stdbool.h>");
+ $self->pidl("#include <stdarg.h>");
+ $self->pidl("#include <string.h>");
+ }
+}
+
+#####################################################################
+# parse a parsed IDL structure back into an IDL file
+sub Parse($$$$)
+{
+ my($self, $ndr,$gen_header,$ndr_header) = @_;
+
+ $self->pidl_hdr("/* header auto-generated by pidl */");
+ $self->pidl_hdr("");
+ $self->pidl_hdr(choose_header("librpc/ndr/libndr.h", "ndr.h"));
+ $self->pidl_hdr("#include \"$gen_header\"") if ($gen_header);
+ $self->pidl_hdr("");
+
+ $self->pidl("/* parser auto-generated by pidl */");
+ $self->pidl("");
+ $self->GenerateIncludes();
+ $self->pidl("#include \"$ndr_header\"") if ($ndr_header);
+ $self->pidl("");
+
+ my %needed = ();
+
+ foreach (@{$ndr}) {
+ ($_->{TYPE} eq "INTERFACE") && NeededInterface($_, \%needed);
+ }
+
+ foreach (@{$ndr}) {
+ ($_->{TYPE} eq "INTERFACE") && $self->ParseInterface($_, \%needed);
+ ($_->{TYPE} eq "IMPORT") && $self->HeaderImport(@{$_->{PATHS}});
+ ($_->{TYPE} eq "INCLUDE") && $self->HeaderInclude(@{$_->{PATHS}});
+ }
+
+ return ($self->{res_hdr}, $self->{res});
+}
+
+sub NeededElement($$$)
+{
+ my ($e, $dir, $needed) = @_;
+
+ return if ($e->{TYPE} eq "EMPTY");
+
+ return if (ref($e->{TYPE}) eq "HASH" and
+ not defined($e->{TYPE}->{NAME}));
+
+ my ($t, $rt);
+ if (ref($e->{TYPE}) eq "HASH") {
+ $t = $e->{TYPE}->{TYPE}."_".$e->{TYPE}->{NAME};
+ } else {
+ $t = $e->{TYPE};
+ }
+
+ if (ref($e->{REPRESENTATION_TYPE}) eq "HASH") {
+ $rt = $e->{REPRESENTATION_TYPE}->{TYPE}."_".$e->{REPRESENTATION_TYPE}->{NAME};
+ } else {
+ $rt = $e->{REPRESENTATION_TYPE};
+ }
+
+ die ("$e->{NAME} $t, $rt FOO") unless ($rt ne "");
+
+ my @fn = ();
+ if ($dir eq "print") {
+ push(@fn, TypeFunctionName("ndr_print", $e->{REPRESENTATION_TYPE}));
+ } elsif ($dir eq "pull") {
+ push (@fn, TypeFunctionName("ndr_pull", $e->{TYPE}));
+ push (@fn, "ndr_$t\_to_$rt")
+ if ($rt ne $t);
+ } elsif ($dir eq "push") {
+ push (@fn, TypeFunctionName("ndr_push", $e->{TYPE}));
+ push (@fn, "ndr_$rt\_to_$t")
+ if ($rt ne $t);
+ } else {
+ die("invalid direction `$dir'");
+ }
+
+ foreach (@fn) {
+ unless (defined($needed->{$_})) {
+ $needed->{$_} = 1;
+ }
+ }
+}
+
+sub NeededFunction($$)
+{
+ my ($fn,$needed) = @_;
+ $needed->{"ndr_pull_$fn->{NAME}"} = 1;
+ $needed->{"ndr_push_$fn->{NAME}"} = 1;
+ $needed->{"ndr_print_$fn->{NAME}"} = 1;
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ $e->{PARENT} = $fn;
+ NeededElement($e, $_, $needed) foreach ("pull", "push", "print");
+ }
+}
+
+sub NeededType($$$)
+{
+ sub NeededType($$$);
+ my ($t,$needed,$req) = @_;
+
+ NeededType($t->{DATA}, $needed, $req) if ($t->{TYPE} eq "TYPEDEF");
+ NeededType($t->{DATA}, $needed, $req) if ($t->{TYPE} eq "PIPE");
+
+ if ($t->{TYPE} eq "STRUCT" or $t->{TYPE} eq "UNION") {
+ return unless defined($t->{ELEMENTS});
+ for my $e (@{$t->{ELEMENTS}}) {
+ $e->{PARENT} = $t;
+ if (has_property($e, "compression")) {
+ $needed->{"compression"} = 1;
+ }
+ NeededElement($e, $req, $needed);
+ NeededType($e->{TYPE}, $needed, $req) if (ref($e->{TYPE}) eq "HASH");
+ }
+ }
+}
+
+#####################################################################
+# work out what parse functions are needed
+sub NeededInterface($$)
+{
+ my ($interface,$needed) = @_;
+ NeededFunction($_, $needed) foreach (@{$interface->{FUNCTIONS}});
+ foreach (reverse @{$interface->{TYPES}}) {
+
+ if (has_property($_, "public")) {
+ $needed->{TypeFunctionName("ndr_pull", $_)} = $needed->{TypeFunctionName("ndr_push", $_)} =
+ $needed->{TypeFunctionName("ndr_print", $_)} = 1;
+ }
+
+ NeededType($_, $needed, "pull") if ($needed->{TypeFunctionName("ndr_pull", $_)});
+ NeededType($_, $needed, "push") if ($needed->{TypeFunctionName("ndr_push", $_)});
+ NeededType($_, $needed, "print") if ($needed->{TypeFunctionName("ndr_print", $_)});
+ if (has_property($_, "gensize")) {
+ $needed->{"ndr_size_$_->{NAME}"} = 1;
+ }
+ }
+}
+
+sub TypeFunctionName($$)
+{
+ my ($prefix, $t) = @_;
+
+ return "$prefix\_$t->{NAME}" if (ref($t) eq "HASH" and
+ $t->{TYPE} eq "TYPEDEF");
+ return "$prefix\_$t->{TYPE}_$t->{NAME}" if (ref($t) eq "HASH");
+ return "$prefix\_$t";
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba4/NDR/Server.pm b/tools/pidl/lib/Parse/Pidl/Samba4/NDR/Server.pm
new file mode 100644
index 0000000..ad36f00
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba4/NDR/Server.pm
@@ -0,0 +1,342 @@
+###################################################
+# server boilerplate generator
+# Copyright tridge@samba.org 2003
+# Copyright metze@samba.org 2004
+# released under the GNU GPL
+
+package Parse::Pidl::Samba4::NDR::Server;
+
+use strict;
+use Parse::Pidl::Util;
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+my($res);
+
+sub pidl($)
+{
+ $res .= shift;
+}
+
+
+#####################################################
+# generate the switch statement for function dispatch
+sub gen_dispatch_switch($)
+{
+ my $interface = shift;
+
+ foreach my $fn (@{$interface->{FUNCTIONS}}) {
+ next if not defined($fn->{OPNUM});
+
+ pidl "\tcase $fn->{OPNUM}: {\n";
+ pidl "\t\tstruct $fn->{NAME} *r2 = (struct $fn->{NAME} *)r;\n";
+ pidl "\t\tif (DEBUGLEVEL >= 10) {\n";
+ pidl "\t\t\tNDR_PRINT_FUNCTION_DEBUG($fn->{NAME}, NDR_IN, r2);\n";
+ pidl "\t\t}\n";
+ if ($fn->{RETURN_TYPE} && $fn->{RETURN_TYPE} ne "void") {
+ pidl "\t\tr2->out.result = dcesrv_$fn->{NAME}(dce_call, mem_ctx, r2);\n";
+ } else {
+ pidl "\t\tdcesrv_$fn->{NAME}(dce_call, mem_ctx, r2);\n";
+ }
+ pidl "\t\tif (dce_call->state_flags & DCESRV_CALL_STATE_FLAG_ASYNC) {\n";
+ pidl "\t\t\tDEBUG(5,(\"function $fn->{NAME} will reply async\\n\"));\n";
+ pidl "\t\t}\n";
+ pidl "\t\tbreak;\n\t}\n";
+ }
+}
+
+#####################################################
+# generate the switch statement for function reply
+sub gen_reply_switch($)
+{
+ my $interface = shift;
+
+ foreach my $fn (@{$interface->{FUNCTIONS}}) {
+ next if not defined($fn->{OPNUM});
+
+ pidl "\tcase $fn->{OPNUM}: {\n";
+ pidl "\t\tstruct $fn->{NAME} *r2 = (struct $fn->{NAME} *)r;\n";
+ pidl "\t\tif (dce_call->state_flags & DCESRV_CALL_STATE_FLAG_ASYNC) {\n";
+ pidl "\t\t\tDEBUG(5,(\"function $fn->{NAME} replied async\\n\"));\n";
+ pidl "\t\t}\n";
+ pidl "\t\tif (DEBUGLEVEL >= 10 && dce_call->fault_code == 0) {\n";
+ pidl "\t\t\tNDR_PRINT_FUNCTION_DEBUG($fn->{NAME}, NDR_OUT | NDR_SET_VALUES, r2);\n";
+ pidl "\t\t}\n";
+ pidl "\t\tif (dce_call->fault_code != 0) {\n";
+ pidl "\t\t\tDEBUG(2,(\"dcerpc_fault %s in $fn->{NAME}\\n\", dcerpc_errstr(mem_ctx, dce_call->fault_code)));\n";
+ pidl "\t\t}\n";
+ pidl "\t\tbreak;\n\t}\n";
+ }
+}
+
+#####################################################################
+# produce boilerplate code for a interface
+sub Boilerplate_Iface($)
+{
+ my($interface) = shift;
+ my $name = $interface->{NAME};
+ my $uname = uc $name;
+ my $uuid = lc($interface->{UUID});
+ my $if_version = $interface->{VERSION};
+
+ pidl "
+static NTSTATUS $name\__op_bind(struct dcesrv_call_state *dce_call, const struct dcesrv_interface *iface, uint32_t if_version)
+{
+#ifdef DCESRV_INTERFACE_$uname\_BIND
+ return DCESRV_INTERFACE_$uname\_BIND(dce_call,iface);
+#else
+ return NT_STATUS_OK;
+#endif
+}
+
+static void $name\__op_unbind(struct dcesrv_connection_context *context, const struct dcesrv_interface *iface)
+{
+#ifdef DCESRV_INTERFACE_$uname\_UNBIND
+ DCESRV_INTERFACE_$uname\_UNBIND(context, iface);
+#else
+ return;
+#endif
+}
+
+static NTSTATUS $name\__op_ndr_pull(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, struct ndr_pull *pull, void **r)
+{
+ enum ndr_err_code ndr_err;
+ uint16_t opnum = dce_call->pkt.u.request.opnum;
+
+ dce_call->fault_code = 0;
+
+ if (opnum >= ndr_table_$name.num_calls) {
+ dce_call->fault_code = DCERPC_FAULT_OP_RNG_ERROR;
+ return NT_STATUS_NET_WRITE_FAULT;
+ }
+
+ *r = talloc_named(mem_ctx,
+ ndr_table_$name.calls[opnum].struct_size,
+ \"struct %s\",
+ ndr_table_$name.calls[opnum].name);
+ NT_STATUS_HAVE_NO_MEMORY(*r);
+
+ /* unravel the NDR for the packet */
+ ndr_err = ndr_table_$name.calls[opnum].ndr_pull(pull, NDR_IN, *r);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ dcerpc_log_packet(dce_call->conn->packet_log_dir,
+ &ndr_table_$name, opnum, NDR_IN,
+ &dce_call->pkt.u.request.stub_and_verifier);
+ dce_call->fault_code = DCERPC_FAULT_NDR;
+ return NT_STATUS_NET_WRITE_FAULT;
+ }
+
+ return NT_STATUS_OK;
+}
+
+static NTSTATUS $name\__op_dispatch(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, void *r)
+{
+ uint16_t opnum = dce_call->pkt.u.request.opnum;
+
+ switch (opnum) {
+";
+ gen_dispatch_switch($interface);
+
+pidl "
+ default:
+ dce_call->fault_code = DCERPC_FAULT_OP_RNG_ERROR;
+ break;
+ }
+
+ if (dce_call->fault_code != 0) {
+ dcerpc_log_packet(dce_call->conn->packet_log_dir,
+ &ndr_table_$name, opnum, NDR_IN,
+ &dce_call->pkt.u.request.stub_and_verifier);
+ return NT_STATUS_NET_WRITE_FAULT;
+ }
+
+ return NT_STATUS_OK;
+}
+
+static NTSTATUS $name\__op_reply(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, void *r)
+{
+ uint16_t opnum = dce_call->pkt.u.request.opnum;
+
+ switch (opnum) {
+";
+ gen_reply_switch($interface);
+
+pidl "
+ default:
+ dce_call->fault_code = DCERPC_FAULT_OP_RNG_ERROR;
+ break;
+ }
+
+ if (dce_call->fault_code != 0) {
+ dcerpc_log_packet(dce_call->conn->packet_log_dir,
+ &ndr_table_$name, opnum, NDR_IN,
+ &dce_call->pkt.u.request.stub_and_verifier);
+ return NT_STATUS_NET_WRITE_FAULT;
+ }
+
+ return NT_STATUS_OK;
+}
+
+static NTSTATUS $name\__op_ndr_push(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, struct ndr_push *push, const void *r)
+{
+ enum ndr_err_code ndr_err;
+ uint16_t opnum = dce_call->pkt.u.request.opnum;
+
+ ndr_err = ndr_table_$name.calls[opnum].ndr_push(push, NDR_OUT, r);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ dce_call->fault_code = DCERPC_FAULT_NDR;
+ return NT_STATUS_NET_WRITE_FAULT;
+ }
+
+ return NT_STATUS_OK;
+}
+
+static const struct dcesrv_interface dcesrv\_$name\_interface = {
+ .name = \"$name\",
+ .syntax_id = {".print_uuid($uuid).",$if_version},
+ .bind = $name\__op_bind,
+ .unbind = $name\__op_unbind,
+ .ndr_pull = $name\__op_ndr_pull,
+ .dispatch = $name\__op_dispatch,
+ .reply = $name\__op_reply,
+ .ndr_push = $name\__op_ndr_push,
+#ifdef DCESRV_INTERFACE_$uname\_FLAGS
+ .flags = DCESRV_INTERFACE_$uname\_FLAGS
+#else
+ .flags = 0
+#endif
+};
+
+";
+}
+
+#####################################################################
+# produce boilerplate code for an endpoint server
+sub Boilerplate_Ep_Server($)
+{
+ my($interface) = shift;
+ my $name = $interface->{NAME};
+ my $uname = uc $name;
+
+ pidl "
+static NTSTATUS $name\__op_init_server(struct dcesrv_context *dce_ctx, const struct dcesrv_endpoint_server *ep_server)
+{
+ int i;
+
+ for (i=0;i<ndr_table_$name.endpoints->count;i++) {
+ NTSTATUS ret;
+ const char *name = ndr_table_$name.endpoints->names[i];
+
+ ret = dcesrv_interface_register(dce_ctx, name, &dcesrv_$name\_interface, NULL);
+ if (!NT_STATUS_IS_OK(ret)) {
+ DEBUG(1,(\"$name\_op_init_server: failed to register endpoint \'%s\'\\n\",name));
+ return ret;
+ }
+ }
+
+ return NT_STATUS_OK;
+}
+
+static bool $name\__op_interface_by_uuid(struct dcesrv_interface *iface, const struct GUID *uuid, uint32_t if_version)
+{
+ if (dcesrv_$name\_interface.syntax_id.if_version == if_version &&
+ GUID_equal(\&dcesrv\_$name\_interface.syntax_id.uuid, uuid)) {
+ memcpy(iface,&dcesrv\_$name\_interface, sizeof(*iface));
+ return true;
+ }
+
+ return false;
+}
+
+static bool $name\__op_interface_by_name(struct dcesrv_interface *iface, const char *name)
+{
+ if (strcmp(dcesrv_$name\_interface.name, name)==0) {
+ memcpy(iface, &dcesrv_$name\_interface, sizeof(*iface));
+ return true;
+ }
+
+ return false;
+}
+
+NTSTATUS dcerpc_server_$name\_init(TALLOC_CTX *ctx)
+{
+ NTSTATUS ret;
+ static const struct dcesrv_endpoint_server ep_server = {
+ /* fill in our name */
+ .name = \"$name\",
+
+ /* fill in all the operations */
+#ifdef DCESRV_INTERFACE_$uname\_INIT_SERVER
+ .init_server = DCESRV_INTERFACE_$uname\_INIT_SERVER,
+#else
+ .init_server = $name\__op_init_server,
+#endif
+ .interface_by_uuid = $name\__op_interface_by_uuid,
+ .interface_by_name = $name\__op_interface_by_name
+ };
+ /* register ourselves with the DCERPC subsystem. */
+ ret = dcerpc_register_ep_server(&ep_server);
+
+ if (!NT_STATUS_IS_OK(ret)) {
+ DEBUG(0,(\"Failed to register \'$name\' endpoint server!\\n\"));
+ return ret;
+ }
+
+ return ret;
+}
+
+";
+}
+
+#####################################################################
+# dcerpc server boilerplate from a parsed IDL structure
+sub ParseInterface($)
+{
+ my($interface) = shift;
+ my $count = 0;
+
+ $res .= "NTSTATUS dcerpc_server_$interface->{NAME}\_init(TALLOC_CTX *);\n";
+ $res .= "\n";
+
+ if (!defined $interface->{PROPERTIES}->{uuid}) {
+ return $res;
+ }
+
+ if (!defined $interface->{PROPERTIES}->{version}) {
+ $interface->{PROPERTIES}->{version} = "0.0";
+ }
+
+ foreach my $fn (@{$interface->{FUNCTIONS}}) {
+ if (defined($fn->{OPNUM})) { $count++; }
+ }
+
+ if ($count == 0) {
+ return $res;
+ }
+
+ $res .= "/* $interface->{NAME} - dcerpc server boilerplate generated by pidl */\n\n";
+ Boilerplate_Iface($interface);
+ Boilerplate_Ep_Server($interface);
+
+ return $res;
+}
+
+sub Parse($$)
+{
+ my($ndr,$header) = @_;
+
+ $res = "";
+ $res .= "/* server functions auto-generated by pidl */\n";
+ $res .= "#include \"$header\"\n";
+ $res .= "#include <util/debug.h>\n";
+ $res .= "\n";
+
+ foreach my $x (@{$ndr}) {
+ ParseInterface($x) if ($x->{TYPE} eq "INTERFACE" and not defined($x->{PROPERTIES}{object}));
+ }
+
+ return $res;
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba4/Python.pm b/tools/pidl/lib/Parse/Pidl/Samba4/Python.pm
new file mode 100644
index 0000000..f418ac4
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba4/Python.pm
@@ -0,0 +1,2425 @@
+###################################################
+# Python function wrapper generator
+# Copyright jelmer@samba.org 2007-2008
+# released under the GNU GPL
+
+package Parse::Pidl::Samba4::Python;
+
+use Exporter;
+@ISA = qw(Exporter);
+
+use strict;
+use Parse::Pidl qw(warning fatal error);
+use Parse::Pidl::Typelist qw(hasType resolveType getType mapTypeName expandAlias bitmap_type_fn enum_type_fn);
+use Parse::Pidl::Util qw(has_property ParseExpr unmake_str);
+use Parse::Pidl::NDR qw(ReturnTypeElement GetPrevLevel GetNextLevel ContainsDeferred ContainsPipe is_charset_array);
+use Parse::Pidl::CUtil qw(get_value_of get_pointer_to);
+use Parse::Pidl::Samba4 qw(ArrayDynamicallyAllocated);
+use Parse::Pidl::Samba4::Header qw(GenerateFunctionInEnv GenerateFunctionOutEnv EnvSubstituteValue GenerateStructEnv);
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+sub new($) {
+ my ($class) = @_;
+ my $self = { res => "", res_hdr => "", tabs => "",
+ constants => [], constants_uniq => {},
+ module_methods => [],
+ module_objects => [], module_objects_uniq => {},
+ ready_types => [],
+ module_imports => [], module_imports_uniq => {},
+ type_imports => [], type_imports_uniq => {},
+ patch_type_calls => [], prereadycode => [],
+ postreadycode => []};
+ bless($self, $class);
+}
+
+sub pidl_hdr ($$)
+{
+ my $self = shift;
+ $self->{res_hdr} .= shift;
+}
+
+sub pidl($$)
+{
+ my ($self, $d) = @_;
+ if ($d) {
+ if ((!($d =~ /^#/))) {
+ $self->{res} .= $self->{tabs};
+ }
+ $self->{res} .= $d;
+ }
+ $self->{res} .= "\n";
+}
+
+sub indent($)
+{
+ my ($self) = @_;
+ $self->{tabs} .= "\t";
+}
+
+sub deindent($)
+{
+ my ($self) = @_;
+ $self->{tabs} = substr($self->{tabs}, 0, -1);
+}
+
+sub PrettifyTypeName($$)
+{
+ my ($name, $basename) = @_;
+
+ $basename =~ s/^.*\.([^.]+)$/\1/;
+
+ $name =~ s/^$basename\_//;
+
+
+ return $name;
+}
+
+sub Import
+{
+ my $self = shift;
+ my @imports = @_;
+ foreach (@imports) {
+ $_ = unmake_str($_);
+ s/\.idl$//;
+ $self->pidl_hdr("#include \"librpc/gen_ndr/$_\.h\"\n");
+ $self->register_module_import("samba.dcerpc.$_");
+ }
+}
+
+sub Const($$)
+{
+ my ($self, $const) = @_;
+ $self->register_constant($const->{NAME}, $const->{DTYPE}, $const->{VALUE});
+}
+
+sub register_constant($$$$)
+{
+ my ($self, $name, $type, $value) = @_;
+
+ unless (defined $self->{constants_uniq}->{$name}) {
+ my $h = {"key" => $name, "val" => [$type, $value]};
+ push @{$self->{constants}}, $h;
+ $self->{constants_uniq}->{$name} = $h;
+ }
+}
+
+sub EnumAndBitmapConsts($$$)
+{
+ my ($self, $name, $d) = @_;
+
+ return unless (defined($d->{ELEMENTS}));
+
+ foreach my $e (@{$d->{ELEMENTS}}) {
+ $e =~ /^([A-Za-z0-9_]+)/;
+ my $cname = $1;
+
+ $self->register_constant($cname, $d, $cname);
+ }
+}
+
+sub FromUnionToPythonFunction($$$$)
+{
+ my ($self, $mem_ctx, $type, $switch, $name) = @_;
+
+ $self->pidl("PyObject *ret;");
+ $self->pidl("");
+
+ $self->pidl("switch ($switch) {");
+ $self->indent;
+
+ foreach my $e (@{$type->{ELEMENTS}}) {
+ $self->pidl("$e->{CASE}:");
+
+ $self->indent;
+
+ if ($e->{NAME}) {
+ $self->ConvertObjectToPython($mem_ctx, {}, $e, "$name->$e->{NAME}", "ret", "return NULL;");
+ } else {
+ $self->pidl("ret = Py_None;");
+ $self->pidl("Py_INCREF(ret);");
+ }
+
+ $self->pidl("return ret;");
+ $self->pidl("");
+
+ $self->deindent;
+ }
+
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->pidl("PyErr_SetString(PyExc_TypeError, \"unknown union level\");");
+ $self->pidl("return NULL;");
+}
+
+sub FromPythonToUnionFunction($$$$$)
+{
+ my ($self, $type, $typename, $switch, $mem_ctx, $name) = @_;
+
+ my $has_default = 0;
+
+ $self->pidl("$typename *ret = talloc_zero($mem_ctx, $typename);");
+
+ $self->pidl("switch ($switch) {");
+ $self->indent;
+
+ foreach my $e (@{$type->{ELEMENTS}}) {
+ $self->pidl("$e->{CASE}:");
+ if ($e->{CASE} eq "default") { $has_default = 1; }
+ $self->indent;
+ if ($e->{NAME}) {
+ $self->ConvertObjectFromPython({}, $mem_ctx, $e, $name, "ret->$e->{NAME}", "talloc_free(ret); return NULL;");
+ }
+ $self->pidl("break;");
+ $self->deindent;
+ $self->pidl("");
+ }
+
+ if (!$has_default) {
+ $self->pidl("default:");
+ $self->indent;
+ $self->pidl("PyErr_SetString(PyExc_TypeError, \"invalid union level value\");");
+ $self->pidl("talloc_free(ret);");
+ $self->pidl("ret = NULL;");
+ $self->deindent;
+ }
+
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("return ret;");
+}
+
+sub PythonElementGetSet($$$$$$) {
+ my ($self, $name, $cname, $ename, $e, $env) = @_;
+
+ my $varname = "object->$ename";
+ $self->pidl("static PyObject *py_$name\_get_$e->{NAME}(PyObject *obj, void *closure)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("$cname *object = ($cname *)pytalloc_get_ptr(obj);");
+ $self->pidl("PyObject *py_$e->{NAME};");
+ $self->ConvertObjectToPython("pytalloc_get_mem_ctx(obj)", $env, $e, $varname, "py_$e->{NAME}", "return NULL;");
+ $self->pidl("return py_$e->{NAME};");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static int py_$name\_set_$e->{NAME}(PyObject *py_obj, PyObject *value, void *closure)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("$cname *object = ($cname *)pytalloc_get_ptr(py_obj);");
+ my $mem_ctx = "pytalloc_get_mem_ctx(py_obj)";
+ my $l = $e->{LEVELS}[0];
+ my $nl = GetNextLevel($e, $l);
+ if ($l->{TYPE} eq "POINTER" and
+ not ($nl->{TYPE} eq "ARRAY" and ($nl->{IS_FIXED} or is_charset_array($e, $nl))) and
+ not ($nl->{TYPE} eq "DATA" and Parse::Pidl::Typelist::scalar_is_reference($nl->{DATA_TYPE}))) {
+ $self->pidl("talloc_unlink($mem_ctx, discard_const($varname));");
+ }
+ $self->ConvertObjectFromPython($env, $mem_ctx, $e, "value", $varname, "return -1;");
+ $self->pidl("return 0;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub PythonStruct($$$$$$)
+{
+ my ($self, $modulename, $prettyname, $name, $cname, $d) = @_;
+
+ my $env = GenerateStructEnv($d, "object");
+
+ $self->pidl("");
+
+ my $getsetters = "NULL";
+
+ if ($#{$d->{ELEMENTS}} > -1) {
+ foreach my $e (@{$d->{ELEMENTS}}) {
+ $self->PythonElementGetSet($name, $cname, $e->{NAME}, $e, $env);
+ }
+
+ $getsetters = "py_$name\_getsetters";
+ $self->pidl("static PyGetSetDef ".$getsetters."[] = {");
+ $self->indent;
+ foreach my $e (@{$d->{ELEMENTS}}) {
+ my $etype = "";
+ if (ref($e->{TYPE}) eq "HASH") {
+ $etype = $e->{TYPE}->{NAME};
+ } else {
+ $etype = $e->{TYPE};
+ }
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl(".name = discard_const_p(char, \"$e->{NAME}\"),");
+ $self->pidl(".get = py_$name\_get_$e->{NAME},");
+ $self->pidl(".set = py_$name\_set_$e->{NAME},");
+ $self->pidl(".doc = discard_const_p(char, \"PIDL-generated element of base type $etype\")");
+ $self->deindent;
+ $self->pidl("},");
+ }
+ $self->pidl("{ .name = NULL }");
+ $self->deindent;
+ $self->pidl("};");
+ $self->pidl("");
+ }
+
+ $self->pidl("static PyObject *py_$name\_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("return pytalloc_new($cname, type);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ my $py_methods = "NULL";
+
+ # If the struct is not public there ndr_pull/ndr_push functions will
+ # be static so not callable from here
+ if (has_property($d, "public")) {
+ $self->pidl("static PyObject *py_$name\_ndr_pack(PyObject *py_obj)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("$cname *object = ($cname *)pytalloc_get_ptr(py_obj);");
+ $self->pidl("PyObject *ret = NULL;");
+ $self->pidl("DATA_BLOB blob;");
+ $self->pidl("enum ndr_err_code err;");
+ $self->pidl("TALLOC_CTX *tmp_ctx = talloc_new(pytalloc_get_mem_ctx(py_obj));");
+ $self->pidl("if (tmp_ctx == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_SetNdrError(NDR_ERR_ALLOC);");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("err = ndr_push_struct_blob(&blob, tmp_ctx, object, (ndr_push_flags_fn_t)ndr_push_$name);");
+ $self->pidl("if (!NDR_ERR_CODE_IS_SUCCESS(err)) {");
+ $self->indent;
+ $self->pidl("TALLOC_FREE(tmp_ctx);");
+ $self->pidl("PyErr_SetNdrError(err);");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("ret = PyBytes_FromStringAndSize((char *)blob.data, blob.length);");
+ $self->pidl("TALLOC_FREE(tmp_ctx);");
+ $self->pidl("return ret;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$name\_ndr_unpack(PyObject *py_obj, PyObject *args, PyObject *kwargs)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("$cname *object = ($cname *)pytalloc_get_ptr(py_obj);");
+ $self->pidl("DATA_BLOB blob;");
+ $self->pidl("Py_ssize_t blob_length = 0;");
+ $self->pidl("enum ndr_err_code err;");
+ $self->pidl("const char * const kwnames[] = { \"data_blob\", \"allow_remaining\", NULL };");
+ $self->pidl("PyObject *allow_remaining_obj = NULL;");
+ $self->pidl("bool allow_remaining = false;");
+ $self->pidl("");
+ $self->pidl("if (!PyArg_ParseTupleAndKeywords(args, kwargs, PYARG_BYTES_LEN \"|O:__ndr_unpack__\",");
+ $self->indent;
+ $self->pidl("discard_const_p(char *, kwnames),");
+ $self->pidl("&blob.data, &blob_length,");
+ $self->pidl("&allow_remaining_obj)) {");
+ $self->deindent;
+ $self->indent;
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("blob.length = blob_length;");
+ $self->pidl("");
+ $self->pidl("if (allow_remaining_obj && PyObject_IsTrue(allow_remaining_obj)) {");
+ $self->indent;
+ $self->pidl("allow_remaining = true;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("if (allow_remaining) {");
+ $self->indent;
+ $self->pidl("err = ndr_pull_struct_blob(&blob, pytalloc_get_mem_ctx(py_obj), object, (ndr_pull_flags_fn_t)ndr_pull_$name);");
+ $self->deindent;
+ $self->pidl("} else {");
+ $self->indent;
+ $self->pidl("err = ndr_pull_struct_blob_all(&blob, pytalloc_get_mem_ctx(py_obj), object, (ndr_pull_flags_fn_t)ndr_pull_$name);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("if (!NDR_ERR_CODE_IS_SUCCESS(err)) {");
+ $self->indent;
+ $self->pidl("PyErr_SetNdrError(err);");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("Py_RETURN_NONE;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$name\_ndr_print(PyObject *py_obj)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("$cname *object = ($cname *)pytalloc_get_ptr(py_obj);");
+ $self->pidl("PyObject *ret;");
+ $self->pidl("char *retstr;");
+ $self->pidl("");
+ $self->pidl("retstr = ndr_print_struct_string(pytalloc_get_mem_ctx(py_obj), (ndr_print_fn_t)ndr_print_$name, \"$name\", object);");
+ $self->pidl("ret = PyStr_FromString(retstr);");
+ $self->pidl("talloc_free(retstr);");
+ $self->pidl("");
+ $self->pidl("return ret;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $py_methods = "py_$name\_methods";
+ $self->pidl("static PyMethodDef $py_methods\[] = {");
+ $self->indent;
+ $self->pidl("{ \"__ndr_pack__\", (PyCFunction)py_$name\_ndr_pack, METH_NOARGS, \"S.ndr_pack(object) -> blob\\nNDR pack\" },");
+ $self->pidl("{ \"__ndr_unpack__\", (PyCFunction)py_$name\_ndr_unpack, METH_VARARGS|METH_KEYWORDS, \"S.ndr_unpack(class, blob, allow_remaining=False) -> None\\nNDR unpack\" },");
+ $self->pidl("{ \"__ndr_print__\", (PyCFunction)py_$name\_ndr_print, METH_NOARGS, \"S.ndr_print(object) -> None\\nNDR print\" },");
+ $self->pidl("{ NULL, NULL, 0, NULL }");
+ $self->deindent;
+ $self->pidl("};");
+ $self->pidl("");
+ }
+
+ $self->pidl_hdr("static PyTypeObject $name\_Type;\n");
+ $self->pidl("");
+ my $docstring = $self->DocString($d, $name);
+ my $typeobject = "$name\_Type";
+ $self->pidl("static PyTypeObject $typeobject = {");
+ $self->indent;
+ $self->pidl("PyVarObject_HEAD_INIT(NULL, 0)");
+ $self->pidl(".tp_name = \"$modulename.$prettyname\",");
+ $self->pidl(".tp_getset = $getsetters,");
+ if ($docstring) {
+ $self->pidl(".tp_doc = $docstring,");
+ }
+ $self->pidl(".tp_methods = $py_methods,");
+ $self->pidl(".tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,");
+ $self->pidl(".tp_new = py_$name\_new,");
+ $self->deindent;
+ $self->pidl("};");
+
+ $self->pidl("");
+
+ my $talloc_typename = $self->import_type_variable("talloc", "BaseObject");
+ $self->register_module_prereadycode(["$name\_Type.tp_base = $talloc_typename;",
+ "$name\_Type.tp_basicsize = pytalloc_BaseObject_size();",
+ ""]);
+
+ return "&$typeobject";
+}
+
+sub PythonFunctionStruct($$$$)
+{
+ my ($self, $modulename, $fn, $iface, $prettyname) = @_;
+
+ my $inenv = GenerateFunctionInEnv($fn, "object->");
+ my $outenv = GenerateFunctionOutEnv($fn, "object->");
+
+ my $name = "$fn->{NAME}";
+ my $cname = "struct $name";
+
+ $self->pidl("");
+
+ my $getsetters = "NULL";
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ if (grep(/in/,@{$e->{DIRECTION}})) {
+ my $inname = "$name\_in";
+ my $ename = "in.$e->{NAME}";
+ $self->PythonElementGetSet($inname, $cname, $ename, $e, $inenv);
+ }
+ if (grep(/out/,@{$e->{DIRECTION}})) {
+ my $outname = "$name\_out";
+ my $ename = "out.$e->{NAME}";
+ $self->PythonElementGetSet($outname, $cname, $ename, $e, $outenv);
+ }
+ }
+
+ if (defined($fn->{RETURN_TYPE})) {
+ my $e = ReturnTypeElement($fn);
+ my $ename = "out.result";
+ $self->PythonElementGetSet($name, $cname, $ename, $e, $outenv);
+ }
+
+ $getsetters = "py_$name\_getsetters";
+ $self->pidl("static PyGetSetDef ".$getsetters."[] = {");
+ $self->indent;
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ if (grep(/in/,@{$e->{DIRECTION}})) {
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl(".name = discard_const_p(char, \"in_$e->{NAME}\"),");
+ $self->pidl(".get = py_$name\_in_get_$e->{NAME},");
+ $self->pidl(".set = py_$name\_in_set_$e->{NAME},");
+ $self->pidl(".doc = discard_const_p(char, \"PIDL-generated element of base type $e->{TYPE}\")");
+ $self->deindent;
+ $self->pidl("},");
+ }
+ if (grep(/out/,@{$e->{DIRECTION}})) {
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl(".name = discard_const_p(char, \"out_$e->{NAME}\"),");
+ $self->pidl(".get = py_$name\_out_get_$e->{NAME},");
+ $self->pidl(".set = py_$name\_out_set_$e->{NAME},");
+ $self->pidl(".doc = discard_const_p(char, \"PIDL-generated element of base type $e->{TYPE}\")");
+ $self->deindent;
+ $self->pidl("},");
+ }
+ }
+ if (defined($fn->{RETURN_TYPE})) {
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl(".name = discard_const_p(char, \"result\"),");
+ $self->pidl(".get = py_$name\_get_result,");
+ $self->pidl(".set = py_$name\_set_result,");
+ $self->pidl(".doc = discard_const_p(char, \"PIDL-generated element of type $fn->{RETURN_TYPE}\")");
+ $self->deindent;
+ $self->pidl("},");
+ }
+ $self->pidl("{ .name = NULL }");
+ $self->deindent;
+ $self->pidl("};");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$name\_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("return pytalloc_new($cname, type);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ my $py_methods = "NULL";
+
+ my $ndr_call = "const struct ndr_interface_call *call = NULL;";
+ my $object_ptr = "$cname *object = ($cname *)pytalloc_get_ptr(py_obj);";
+
+ $self->pidl("static PyObject *py_$name\_ndr_opnum(PyTypeObject *type)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("");
+ $self->pidl("");
+ $self->pidl("return PyInt_FromLong($fn->{OPNUM});");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$name\_ndr_pack(PyObject *py_obj, int ndr_inout_flags, uint32_t ndr_push_flags)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("$ndr_call");
+ $self->pidl("$object_ptr");
+ $self->pidl("PyObject *ret = NULL;");
+ $self->pidl("struct ndr_push *push = NULL;");
+ $self->pidl("DATA_BLOB blob;");
+ $self->pidl("enum ndr_err_code err;");
+ $self->pidl("");
+ $self->pidl("if (ndr_table_$iface\.num_calls < " . ($fn->{OPNUM}+1) .
+ ") {");
+ $self->indent;
+ $self->pidl("PyErr_SetString(PyExc_TypeError, \"Internal Error, ndr_interface_call missing for py_$name\_ndr_pack\");");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("call = &ndr_table_$iface\.calls[$fn->{OPNUM}];");
+ $self->pidl("");
+ $self->pidl("push = ndr_push_init_ctx(pytalloc_get_mem_ctx(py_obj));");
+ $self->pidl("if (push == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_SetNdrError(NDR_ERR_ALLOC);");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("push->flags |= ndr_push_flags;");
+ $self->pidl("");
+ $self->pidl("err = call->ndr_push(push, ndr_inout_flags, object);");
+ $self->pidl("if (!NDR_ERR_CODE_IS_SUCCESS(err)) {");
+ $self->indent;
+ $self->pidl("TALLOC_FREE(push);");
+ $self->pidl("PyErr_SetNdrError(err);");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("blob = ndr_push_blob(push);");
+ $self->pidl("ret = PyBytes_FromStringAndSize((char *)blob.data, blob.length);");
+ $self->pidl("TALLOC_FREE(push);");
+ $self->pidl("return ret;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$name\_ndr_pack_in(PyObject *py_obj, PyObject *args, PyObject *kwargs)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("const char * const kwnames[] = { \"bigendian\", \"ndr64\", NULL };");
+ $self->pidl("PyObject *bigendian_obj = NULL;");
+ $self->pidl("PyObject *ndr64_obj = NULL;");
+ $self->pidl("uint32_t ndr_push_flags = 0;");
+ $self->pidl("");
+ $self->pidl("if (!PyArg_ParseTupleAndKeywords(args, kwargs, \"|OO:__ndr_pack_in__\",");
+ $self->indent;
+ $self->pidl("discard_const_p(char *, kwnames),");
+ $self->pidl("&bigendian_obj,");
+ $self->pidl("&ndr64_obj)) {");
+ $self->deindent;
+ $self->indent;
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("if (bigendian_obj && PyObject_IsTrue(bigendian_obj)) {");
+ $self->indent;
+ $self->pidl("ndr_push_flags |= LIBNDR_FLAG_BIGENDIAN;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("if (ndr64_obj && PyObject_IsTrue(ndr64_obj)) {");
+ $self->indent;
+ $self->pidl("ndr_push_flags |= LIBNDR_FLAG_NDR64;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("return py_$name\_ndr_pack(py_obj, NDR_IN, ndr_push_flags);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$name\_ndr_pack_out(PyObject *py_obj, PyObject *args, PyObject *kwargs)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("const char * const kwnames[] = { \"bigendian\", \"ndr64\", NULL };");
+ $self->pidl("PyObject *bigendian_obj = NULL;");
+ $self->pidl("PyObject *ndr64_obj = NULL;");
+ $self->pidl("uint32_t ndr_push_flags = 0;");
+ $self->pidl("");
+ $self->pidl("if (!PyArg_ParseTupleAndKeywords(args, kwargs, \"|OO:__ndr_pack_out__\",");
+ $self->indent;
+ $self->pidl("discard_const_p(char *, kwnames),");
+ $self->pidl("&bigendian_obj,");
+ $self->pidl("&ndr64_obj)) {");
+ $self->deindent;
+ $self->indent;
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("if (bigendian_obj && PyObject_IsTrue(bigendian_obj)) {");
+ $self->indent;
+ $self->pidl("ndr_push_flags |= LIBNDR_FLAG_BIGENDIAN;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("if (ndr64_obj && PyObject_IsTrue(ndr64_obj)) {");
+ $self->indent;
+ $self->pidl("ndr_push_flags |= LIBNDR_FLAG_NDR64;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("return py_$name\_ndr_pack(py_obj, NDR_OUT, ndr_push_flags);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$name\_ndr_unpack(PyObject *py_obj, const DATA_BLOB *blob, int ndr_inout_flags, uint32_t ndr_pull_flags, bool allow_remaining)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("$ndr_call");
+ $self->pidl("$object_ptr");
+ $self->pidl("struct ndr_pull *pull = NULL;");
+ $self->pidl("enum ndr_err_code err;");
+ $self->pidl("");
+ $self->pidl("if (ndr_table_$iface\.num_calls < " . ($fn->{OPNUM}+1) .
+ ") {");
+ $self->indent;
+ $self->pidl("PyErr_SetString(PyExc_TypeError, \"Internal Error, ndr_interface_call missing for py_$name\_ndr_unpack\");");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("call = &ndr_table_$iface\.calls[$fn->{OPNUM}];");
+ $self->pidl("");
+ $self->pidl("pull = ndr_pull_init_blob(blob, object);");
+ $self->pidl("if (pull == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_SetNdrError(NDR_ERR_ALLOC);");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("pull->flags |= ndr_pull_flags;");
+ $self->pidl("");
+ $self->pidl("err = call->ndr_pull(pull, ndr_inout_flags, object);");
+ $self->pidl("if (!NDR_ERR_CODE_IS_SUCCESS(err)) {");
+ $self->indent;
+ $self->pidl("TALLOC_FREE(pull);");
+ $self->pidl("PyErr_SetNdrError(err);");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("if (!allow_remaining) {");
+ $self->indent;
+ $self->pidl("uint32_t highest_ofs;");
+ $self->pidl("");
+ $self->pidl("if (pull->offset > pull->relative_highest_offset) {");
+ $self->indent;
+ $self->pidl("highest_ofs = pull->offset;");
+ $self->deindent;
+ $self->pidl("} else {");
+ $self->indent;
+ $self->pidl("highest_ofs = pull->relative_highest_offset;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("if (highest_ofs < pull->data_size) {");
+ $self->indent;
+ $self->pidl("err = ndr_pull_error(pull, NDR_ERR_UNREAD_BYTES,");
+ $self->indent;
+ $self->pidl("\"not all bytes consumed ofs[%u] size[%u]\",");
+ $self->pidl("highest_ofs, pull->data_size);");
+ $self->deindent;
+ $self->pidl("TALLOC_FREE(pull);");
+ $self->pidl("PyErr_SetNdrError(err);");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("TALLOC_FREE(pull);");
+ $self->pidl("Py_RETURN_NONE;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$name\_ndr_unpack_in(PyObject *py_obj, PyObject *args, PyObject *kwargs)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("DATA_BLOB blob;");
+ $self->pidl("Py_ssize_t blob_length = 0;");
+ $self->pidl("const char * const kwnames[] = { \"data_blob\", \"bigendian\", \"ndr64\", \"allow_remaining\", NULL };");
+ $self->pidl("PyObject *bigendian_obj = NULL;");
+ $self->pidl("PyObject *ndr64_obj = NULL;");
+ $self->pidl("uint32_t ndr_pull_flags = LIBNDR_FLAG_REF_ALLOC;");
+ $self->pidl("PyObject *allow_remaining_obj = NULL;");
+ $self->pidl("bool allow_remaining = false;");
+ $self->pidl("");
+ $self->pidl("if (!PyArg_ParseTupleAndKeywords(args, kwargs, PYARG_BYTES_LEN \"|OOO:__ndr_unpack_in__\",");
+ $self->indent;
+ $self->pidl("discard_const_p(char *, kwnames),");
+ $self->pidl("&blob.data, &blob_length,");
+ $self->pidl("&bigendian_obj,");
+ $self->pidl("&ndr64_obj,");
+ $self->pidl("&allow_remaining_obj)) {");
+ $self->deindent;
+ $self->indent;
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("blob.length = blob_length;");
+ $self->pidl("");
+ $self->pidl("if (bigendian_obj && PyObject_IsTrue(bigendian_obj)) {");
+ $self->indent;
+ $self->pidl("ndr_pull_flags |= LIBNDR_FLAG_BIGENDIAN;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("if (ndr64_obj && PyObject_IsTrue(ndr64_obj)) {");
+ $self->indent;
+ $self->pidl("ndr_pull_flags |= LIBNDR_FLAG_NDR64;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("if (allow_remaining_obj && PyObject_IsTrue(allow_remaining_obj)) {");
+ $self->indent;
+ $self->pidl("allow_remaining = true;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("return py_$name\_ndr_unpack(py_obj, &blob, NDR_IN, ndr_pull_flags, allow_remaining);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$name\_ndr_unpack_out(PyObject *py_obj, PyObject *args, PyObject *kwargs)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("DATA_BLOB blob;");
+ $self->pidl("Py_ssize_t blob_length = 0;");
+ $self->pidl("const char * const kwnames[] = { \"data_blob\", \"bigendian\", \"ndr64\", \"allow_remaining\", NULL };");
+ $self->pidl("PyObject *bigendian_obj = NULL;");
+ $self->pidl("PyObject *ndr64_obj = NULL;");
+ $self->pidl("uint32_t ndr_pull_flags = LIBNDR_FLAG_REF_ALLOC;");
+ $self->pidl("PyObject *allow_remaining_obj = NULL;");
+ $self->pidl("bool allow_remaining = false;");
+ $self->pidl("");
+ $self->pidl("if (!PyArg_ParseTupleAndKeywords(args, kwargs, PYARG_BYTES_LEN \"|OOO:__ndr_unpack_out__\",");
+ $self->indent;
+ $self->pidl("discard_const_p(char *, kwnames),");
+ $self->pidl("&blob.data, &blob_length,");
+ $self->pidl("&bigendian_obj,");
+ $self->pidl("&ndr64_obj,");
+ $self->pidl("&allow_remaining_obj)) {");
+ $self->deindent;
+ $self->indent;
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("blob.length = blob_length;");
+ $self->pidl("");
+ $self->pidl("if (bigendian_obj && PyObject_IsTrue(bigendian_obj)) {");
+ $self->indent;
+ $self->pidl("ndr_pull_flags |= LIBNDR_FLAG_BIGENDIAN;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("if (ndr64_obj && PyObject_IsTrue(ndr64_obj)) {");
+ $self->indent;
+ $self->pidl("ndr_pull_flags |= LIBNDR_FLAG_NDR64;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("if (allow_remaining_obj && PyObject_IsTrue(allow_remaining_obj)) {");
+ $self->indent;
+ $self->pidl("allow_remaining = true;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("return py_$name\_ndr_unpack(py_obj, &blob, NDR_OUT, ndr_pull_flags, allow_remaining);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$name\_ndr_print(PyObject *py_obj, const char *name, int ndr_inout_flags)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("$ndr_call");
+ $self->pidl("$object_ptr");
+ $self->pidl("PyObject *ret;");
+ $self->pidl("char *retstr;");
+ $self->pidl("");
+ $self->pidl("if (ndr_table_$iface\.num_calls < " . ($fn->{OPNUM}+1) .
+ ") {");
+ $self->indent;
+ $self->pidl("PyErr_SetString(PyExc_TypeError, \"Internal Error, ndr_interface_call missing for py_$name\_ndr_print\");");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("call = &ndr_table_$iface\.calls[$fn->{OPNUM}];");
+ $self->pidl("");
+ $self->pidl("retstr = ndr_print_function_string(pytalloc_get_mem_ctx(py_obj), call->ndr_print, name, ndr_inout_flags, object);");
+ $self->pidl("ret = PyStr_FromString(retstr);");
+ $self->pidl("TALLOC_FREE(retstr);");
+ $self->pidl("");
+ $self->pidl("return ret;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$name\_ndr_print_in(PyObject *py_obj)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("return py_$name\_ndr_print(py_obj, \"$name\_in\", NDR_IN);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$name\_ndr_print_out(PyObject *py_obj)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("return py_$name\_ndr_print(py_obj, \"$name\_out\", NDR_OUT);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $py_methods = "py_$name\_methods";
+ $self->pidl("static PyMethodDef $py_methods\[] = {");
+ $self->indent;
+ $self->pidl("{ \"opnum\", (PyCFunction)py_$name\_ndr_opnum, METH_NOARGS|METH_CLASS,");
+ $self->indent;
+ $self->pidl("\"$modulename.$prettyname.opnum() -> ".sprintf("%d (0x%02x)", $fn->{OPNUM}, $fn->{OPNUM})." \" },");
+ $self->deindent;
+ $self->pidl("{ \"__ndr_pack_in__\", (PyCFunction)py_$name\_ndr_pack_in, METH_VARARGS|METH_KEYWORDS,");
+ $self->indent;
+ $self->pidl("\"S.ndr_pack_in(object, bigendian=False, ndr64=False) -> blob\\nNDR pack input\" },");
+ $self->deindent;
+ $self->pidl("{ \"__ndr_pack_out__\", (PyCFunction)py_$name\_ndr_pack_out, METH_VARARGS|METH_KEYWORDS,");
+ $self->indent;
+ $self->pidl("\"S.ndr_pack_out(object, bigendian=False, ndr64=False) -> blob\\nNDR pack output\" },");
+ $self->deindent;
+ $self->pidl("{ \"__ndr_unpack_in__\", (PyCFunction)py_$name\_ndr_unpack_in, METH_VARARGS|METH_KEYWORDS,");
+ $self->indent;
+ $self->pidl("\"S.ndr_unpack_in(class, blob, bigendian=False, ndr64=False, allow_remaining=False) -> None\\nNDR unpack input\" },");
+ $self->deindent;
+ $self->pidl("{ \"__ndr_unpack_out__\", (PyCFunction)py_$name\_ndr_unpack_out, METH_VARARGS|METH_KEYWORDS,");
+ $self->indent;
+ $self->pidl("\"S.ndr_unpack_out(class, blob, bigendian=False, ndr64=False, allow_remaining=False) -> None\\nNDR unpack output\" },");
+ $self->deindent;
+ $self->pidl("{ \"__ndr_print_in__\", (PyCFunction)py_$name\_ndr_print_in, METH_NOARGS, \"S.ndr_print_in(object) -> None\\nNDR print input\" },");
+ $self->pidl("{ \"__ndr_print_out__\", (PyCFunction)py_$name\_ndr_print_out, METH_NOARGS, \"S.ndr_print_out(object) -> None\\nNDR print output\" },");
+ $self->pidl("{ NULL, NULL, 0, NULL }");
+ $self->deindent;
+ $self->pidl("};");
+ $self->pidl("");
+
+ $self->pidl_hdr("static PyTypeObject $name\_Type;\n");
+ $self->pidl("");
+ my $docstring = $self->DocString($fn, $name);
+ my $typeobject = "$name\_Type";
+ $self->pidl("static PyTypeObject $typeobject = {");
+ $self->indent;
+ $self->pidl("PyVarObject_HEAD_INIT(NULL, 0)");
+ $self->pidl(".tp_name = \"$modulename.$prettyname\",");
+ $self->pidl(".tp_getset = $getsetters,");
+ if ($docstring) {
+ $self->pidl(".tp_doc = $docstring,");
+ }
+ $self->pidl(".tp_methods = $py_methods,");
+ $self->pidl(".tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,");
+ $self->pidl(".tp_new = py_$name\_new,");
+ $self->deindent;
+ $self->pidl("};");
+
+ $self->pidl("");
+
+ my $talloc_typename = $self->import_type_variable("talloc", "BaseObject");
+ $self->register_module_prereadycode(["$name\_Type.tp_base = $talloc_typename;",
+ "$name\_Type.tp_basicsize = pytalloc_BaseObject_size();",
+ ""]);
+
+ return "&$typeobject";
+}
+
+sub get_metadata_var($)
+{
+ my ($e) = @_;
+ sub get_var($) { my $x = shift; $x =~ s/\*//g; return $x; }
+
+ if (has_property($e, "length_is")) {
+ return get_var($e->{PROPERTIES}->{length_is});
+ } elsif (has_property($e, "size_is")) {
+ return get_var($e->{PROPERTIES}->{size_is});
+ }
+
+ return undef;
+}
+
+sub find_metadata_args($)
+{
+ my ($fn) = @_;
+ my $metadata_args = { in => {}, out => {} };
+
+ # Determine arguments that are metadata for other arguments (size_is/length_is)
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ foreach my $dir (@{$e->{DIRECTION}}) {
+ my $main = get_metadata_var($e);
+ if ($main) {
+ $metadata_args->{$dir}->{$main} = $e->{NAME};
+ }
+ }
+ }
+
+ return $metadata_args;
+}
+
+sub PythonFunctionUnpackOut($$$)
+{
+ my ($self, $fn, $fnname) = @_;
+
+ my $outfnname = "unpack_$fnname\_args_out";
+ my $signature = "";
+
+ my $metadata_args = find_metadata_args($fn);
+
+ my $env = GenerateFunctionOutEnv($fn, "r->");
+ my $result_size = 0;
+
+ $self->pidl("static PyObject *$outfnname(struct $fn->{NAME} *r)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("PyObject *result;");
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless (grep(/out/,@{$e->{DIRECTION}}));
+ next if (($metadata_args->{in}->{$e->{NAME}} and grep(/in/, @{$e->{DIRECTION}})) or
+ ($metadata_args->{out}->{$e->{NAME}}) and grep(/out/, @{$e->{DIRECTION}}));
+ $self->pidl("PyObject *py_$e->{NAME};");
+ $result_size++;
+ }
+
+ if ($fn->{RETURN_TYPE}) {
+ $result_size++ unless ($fn->{RETURN_TYPE} eq "WERROR" or $fn->{RETURN_TYPE} eq "NTSTATUS");
+ }
+
+ my $i = 0;
+
+ if ($result_size > 1) {
+ $self->pidl("result = PyTuple_New($result_size);");
+ $signature .= "(";
+ } elsif ($result_size == 0) {
+ $self->pidl("result = Py_None;");
+ $self->pidl("Py_INCREF(result);");
+ $signature .= "None";
+ }
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next if ($metadata_args->{out}->{$e->{NAME}});
+ my $py_name = "py_$e->{NAME}";
+ if (grep(/out/,@{$e->{DIRECTION}})) {
+ $self->ConvertObjectToPython("r", $env, $e, "r->out.$e->{NAME}", $py_name, "return NULL;");
+ if ($result_size > 1) {
+ $self->pidl("PyTuple_SetItem(result, $i, $py_name);");
+ $i++;
+ $signature .= "$e->{NAME}, ";
+ } else {
+ $self->pidl("result = $py_name;");
+ $signature .= $e->{NAME};
+ }
+ }
+ }
+
+ if (defined($fn->{RETURN_TYPE}) and $fn->{RETURN_TYPE} eq "NTSTATUS") {
+ $self->handle_ntstatus("r->out.result", "NULL", undef);
+ } elsif (defined($fn->{RETURN_TYPE}) and $fn->{RETURN_TYPE} eq "WERROR") {
+ $self->handle_werror("r->out.result", "NULL", undef);
+ } elsif (defined($fn->{RETURN_TYPE})) {
+ my $conv = $self->ConvertObjectToPythonData("r", $fn->{RETURN_TYPE}, "r->out.result", $fn);
+ if ($result_size > 1) {
+ $self->pidl("PyTuple_SetItem(result, $i, $conv);");
+ } else {
+ $self->pidl("result = $conv;");
+ }
+ $signature .= "result";
+ }
+
+ if (substr($signature, -2) eq ", ") {
+ $signature = substr($signature, 0, -2);
+ }
+ if ($result_size > 1) {
+ $signature .= ")";
+ }
+
+ $self->pidl("return result;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ return ($outfnname, $signature);
+}
+
+sub PythonFunctionPackIn($$$)
+{
+ my ($self, $fn, $fnname) = @_;
+ my $metadata_args = find_metadata_args($fn);
+
+ my $infnname = "pack_$fnname\_args_in";
+
+ $self->pidl("static bool $infnname(PyObject *args, PyObject *kwargs, struct $fn->{NAME} *r)");
+ $self->pidl("{");
+ $self->indent;
+ my $args_format = "";
+ my $args_string = "";
+ my $args_names = "";
+ my $signature = "";
+
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless (grep(/in/,@{$e->{DIRECTION}}));
+ next if (($metadata_args->{in}->{$e->{NAME}} and grep(/in/, @{$e->{DIRECTION}})) or
+ ($metadata_args->{out}->{$e->{NAME}}) and grep(/out/, @{$e->{DIRECTION}}));
+ $self->pidl("PyObject *py_$e->{NAME};");
+ $args_format .= "O";
+ $args_string .= ", &py_$e->{NAME}";
+ $args_names .= "\"$e->{NAME}\", ";
+ $signature .= "$e->{NAME}, ";
+ }
+ if (substr($signature, -2) eq ", ") {
+ $signature = substr($signature, 0, -2);
+ }
+ $self->pidl("const char *kwnames[] = {");
+ $self->indent;
+ $self->pidl($args_names . "NULL");
+ $self->deindent;
+ $self->pidl("};");
+
+ $self->pidl("");
+ $self->pidl("if (!PyArg_ParseTupleAndKeywords(args, kwargs, \"$args_format:$fn->{NAME}\", discard_const_p(char *, kwnames)$args_string)) {");
+ $self->indent;
+ $self->pidl("return false;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ my $env = GenerateFunctionInEnv($fn, "r->");
+
+ my $fail = "return false;";
+ foreach my $e (@{$fn->{ELEMENTS}}) {
+ next unless (grep(/in/,@{$e->{DIRECTION}}));
+ if ($metadata_args->{in}->{$e->{NAME}}) {
+ my $py_var = "py_".$metadata_args->{in}->{$e->{NAME}};
+ $self->pidl("PY_CHECK_TYPE(&PyList_Type, $py_var, $fail);");
+ my $val = "PyList_GET_SIZE($py_var)";
+ if ($e->{LEVELS}[0]->{TYPE} eq "POINTER") {
+ $self->pidl("r->in.$e->{NAME} = talloc_ptrtype(r, r->in.$e->{NAME});");
+ $self->pidl("if (r->in.$e->{NAME} == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_NoMemory();");
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("*r->in.$e->{NAME} = $val;");
+ } else {
+ $self->pidl("r->in.$e->{NAME} = $val;");
+ }
+ } else {
+ $self->ConvertObjectFromPython($env, "r", $e, "py_$e->{NAME}", "r->in.$e->{NAME}", $fail);
+ }
+ }
+ $self->pidl("return true;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ return ($infnname, $signature);
+}
+
+sub PythonFunction($$$)
+{
+ my ($self, $fn, $iface, $prettyname) = @_;
+
+ my $fnname = "py_$fn->{NAME}";
+ my $docstring = $self->DocString($fn, $fn->{NAME});
+
+ my ($infn, $insignature) = $self->PythonFunctionPackIn($fn, $fnname);
+ my ($outfn, $outsignature) = $self->PythonFunctionUnpackOut($fn, $fnname);
+ my $signature = "S.$prettyname($insignature) -> $outsignature";
+ if ($docstring) {
+ $docstring = "\"$signature\\n\\n\"$docstring";
+ } else {
+ $docstring = "\"$signature\"";
+ }
+
+ return ($infn, $outfn, $docstring);
+}
+
+sub handle_werror($$$$)
+{
+ my ($self, $var, $retval, $mem_ctx) = @_;
+
+ $self->pidl("if (!W_ERROR_IS_OK($var)) {");
+ $self->indent;
+ $self->pidl("PyErr_SetWERROR($var);");
+ $self->pidl("talloc_free($mem_ctx);") if ($mem_ctx);
+ $self->pidl("return $retval;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub handle_ntstatus($$$$)
+{
+ my ($self, $var, $retval, $mem_ctx) = @_;
+
+ $self->pidl("if (NT_STATUS_IS_ERR($var)) {");
+ $self->indent;
+ $self->pidl("PyErr_SetNTSTATUS($var);");
+ $self->pidl("talloc_free($mem_ctx);") if ($mem_ctx);
+ $self->pidl("return $retval;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+}
+
+sub PythonType($$$$)
+{
+ my ($self, $modulename, $d, $interface, $basename) = @_;
+
+ my $actual_ctype = $d;
+ if ($actual_ctype->{TYPE} eq "TYPEDEF") {
+ $actual_ctype = $actual_ctype->{DATA};
+ }
+
+ if ($actual_ctype->{TYPE} eq "STRUCT") {
+ my $typeobject;
+ my $fn_name = PrettifyTypeName($d->{NAME}, $basename);
+
+ if ($d->{TYPE} eq "STRUCT") {
+ $typeobject = $self->PythonStruct($modulename, $fn_name, $d->{NAME}, mapTypeName($d), $d);
+ } else {
+ $typeobject = $self->PythonStruct($modulename, $fn_name, $d->{NAME}, mapTypeName($d), $d->{DATA});
+ }
+
+ $self->register_module_typeobject($fn_name, $typeobject, $d->{ORIGINAL});
+ }
+
+ if ($d->{TYPE} eq "ENUM" or $d->{TYPE} eq "BITMAP") {
+ $self->EnumAndBitmapConsts($d->{NAME}, $d);
+ }
+
+ if ($d->{TYPE} eq "TYPEDEF" and ($d->{DATA}->{TYPE} eq "ENUM" or $d->{DATA}->{TYPE} eq "BITMAP")) {
+ $self->EnumAndBitmapConsts($d->{NAME}, $d->{DATA});
+ }
+
+ if ($actual_ctype->{TYPE} eq "UNION" and defined($actual_ctype->{ELEMENTS})) {
+ my $prettyname = PrettifyTypeName($d->{NAME}, $basename);
+ my $typeobject = "$d->{NAME}\_Type";
+ my $docstring = $self->DocString($d, $d->{NAME});
+ my $cname = "union $d->{NAME}";
+
+ $self->pidl("static PyObject *py_import_$d->{NAME}(TALLOC_CTX *mem_ctx, int level, " .mapTypeName($d) . " *in)");
+ $self->pidl("{");
+ $self->indent;
+ $self->FromUnionToPythonFunction("mem_ctx", $actual_ctype, "level", "in") if ($actual_ctype->{TYPE} eq "UNION");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static ".mapTypeName($d) . " *py_export_$d->{NAME}(TALLOC_CTX *mem_ctx, int level, PyObject *in)");
+ $self->pidl("{");
+ $self->indent;
+ $self->FromPythonToUnionFunction($actual_ctype, mapTypeName($d), "level", "mem_ctx", "in") if ($actual_ctype->{TYPE} eq "UNION");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ my $getsetters = "NULL";
+ my $py_methods = "NULL";
+ my $typename = mapTypeName($d);
+
+ $self->pidl("static PyObject *py_$d->{NAME}\_import(PyTypeObject *type, PyObject *args, PyObject *kwargs)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("const char * const kwnames[] = { \"mem_ctx\", \"level\", \"in\", NULL };");
+ $self->pidl("PyObject *mem_ctx_obj = NULL;");
+ $self->pidl("TALLOC_CTX *mem_ctx = NULL;");
+ $self->pidl("int level = 0;");
+ $self->pidl("PyObject *in_obj = NULL;");
+ $self->pidl("$typename *in = NULL;");
+ $self->pidl("");
+ $self->pidl("if (!PyArg_ParseTupleAndKeywords(args, kwargs, \"OiO:import\",");
+ $self->indent;
+ $self->pidl("discard_const_p(char *, kwnames),");
+ $self->pidl("&mem_ctx_obj,");
+ $self->pidl("&level,");
+ $self->pidl("&in_obj)) {");
+ $self->deindent;
+ $self->indent;
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("mem_ctx = pytalloc_get_ptr(mem_ctx_obj);");
+ $self->pidl("if (mem_ctx == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_SetString(PyExc_TypeError, \"mem_ctx is NULL)!\");");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("in = ($typename *)pytalloc_get_ptr(in_obj);");
+ $self->pidl("if (in == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_Format(PyExc_TypeError, \"in needs to be a pointer to $typename!\");");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("return py_import_$d->{NAME}(mem_ctx, level, in);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$d->{NAME}\_export(PyTypeObject *type, PyObject *args, PyObject *kwargs)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("const char * const kwnames[] = { \"mem_ctx\", \"level\", \"in\", NULL };");
+ $self->pidl("PyObject *mem_ctx_obj = NULL;");
+ $self->pidl("TALLOC_CTX *mem_ctx = NULL;");
+ $self->pidl("int level = 0;");
+ $self->pidl("PyObject *in = NULL;");
+ $self->pidl("$typename *out = NULL;");
+ $self->pidl("");
+ $self->pidl("if (!PyArg_ParseTupleAndKeywords(args, kwargs, \"OiO:import\",");
+ $self->indent;
+ $self->pidl("discard_const_p(char *, kwnames),");
+ $self->pidl("&mem_ctx_obj,");
+ $self->pidl("&level,");
+ $self->pidl("&in)) {");
+ $self->deindent;
+ $self->indent;
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("mem_ctx = pytalloc_get_ptr(mem_ctx_obj);");
+ $self->pidl("if (mem_ctx == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_SetString(PyExc_TypeError, \"mem_ctx is NULL)!\");");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("out = py_export_$d->{NAME}(mem_ctx, level, in);");
+ $self->pidl("if (out == NULL) {");
+ $self->indent;
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+ $self->pidl("return pytalloc_GenericObject_reference(out);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $py_methods = "py_$d->{NAME}_methods";
+ $self->pidl("static PyMethodDef $py_methods\[] = {");
+ $self->indent;
+ $self->pidl("{ \"__import__\", (PyCFunction)py_$d->{NAME}\_import,");
+ $self->indent;
+ $self->pidl("METH_VARARGS|METH_KEYWORDS|METH_CLASS,");
+ $self->pidl("\"T.__import__(mem_ctx, level, in) => ret.\" },");
+ $self->deindent;
+ $self->pidl("{ \"__export__\", (PyCFunction)py_$d->{NAME}\_export,");
+ $self->indent;
+ $self->pidl("METH_VARARGS|METH_KEYWORDS|METH_CLASS,");
+ $self->pidl("\"T.__export__(mem_ctx, level, in) => ret.\" },");
+ $self->deindent;
+ $self->pidl("{ NULL, NULL, 0, NULL }");
+ $self->deindent;
+ $self->pidl("};");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *py_$d->{NAME}\_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("PyErr_Format(PyExc_TypeError, \"New %s Objects are not supported\", type->tp_name);");
+ $self->pidl("return NULL;");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("");
+
+ $self->pidl("");
+ $self->pidl_hdr("static PyTypeObject $typeobject;\n");
+ $self->pidl("static PyTypeObject $typeobject = {");
+ $self->indent;
+ $self->pidl("PyVarObject_HEAD_INIT(NULL, 0)");
+ $self->pidl(".tp_name = \"$modulename.$prettyname\",");
+ $self->pidl(".tp_getset = $getsetters,");
+ if ($docstring) {
+ $self->pidl(".tp_doc = $docstring,");
+ }
+ $self->pidl(".tp_methods = $py_methods,");
+ $self->pidl(".tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,");
+ $self->pidl(".tp_new = py_$d->{NAME}\_new,");
+ $self->deindent;
+ $self->pidl("};");
+
+ $self->pidl("");
+
+ my $talloc_typename = $self->import_type_variable("talloc", "BaseObject");
+ $self->register_module_prereadycode(["$typeobject.tp_base = $talloc_typename;",
+ "$typeobject.tp_basicsize = pytalloc_BaseObject_size();",
+ ""]);
+
+ $self->register_module_typeobject($prettyname, "&$typeobject", $d->{ORIGINAL});
+ }
+}
+
+sub DocString($$$)
+{
+ my ($self, $d, $name) = @_;
+ if (has_property($d, "helpstring")) {
+ my $docstring = uc("py_doc_$name");
+ $self->pidl("#define $docstring ".has_property($d, "helpstring"));
+ return $docstring;
+ }
+
+ return undef;
+}
+
+sub Interface($$$)
+{
+ my($self,$interface,$basename) = @_;
+
+ if (has_property($interface, "pyhelper")) {
+ $self->pidl("#include \"".unmake_str($interface->{PROPERTIES}->{pyhelper})."\"\n");
+ }
+
+ $self->Const($_) foreach (@{$interface->{CONSTS}});
+
+ foreach my $d (@{$interface->{TYPES}}) {
+ next if has_property($d, "nopython");
+
+ $self->PythonType($basename, $d, $interface, $basename);
+ }
+
+ if (defined $interface->{PROPERTIES}->{uuid}) {
+ $self->pidl_hdr("static PyTypeObject $interface->{NAME}_InterfaceType;\n");
+ $self->pidl("");
+
+ my @fns = ();
+
+ foreach my $d (@{$interface->{FUNCTIONS}}) {
+ next if has_property($d, "noopnum");
+ next if has_property($d, "nopython");
+ next if has_property($d, "todo");
+
+ my $skip = 0;
+ foreach my $e (@{$d->{ELEMENTS}}) {
+ if (ContainsPipe($e, $e->{LEVELS}[0])) {
+ $skip = 1;
+ last;
+ }
+ }
+ next if $skip;
+
+ my $prettyname = $d->{NAME};
+
+ $prettyname =~ s/^$interface->{NAME}_//;
+ $prettyname =~ s/^$basename\_//;
+
+ my $typeobject = $self->PythonFunctionStruct($basename, $d, $interface->{NAME}, $prettyname);
+ $self->register_module_typeobject($prettyname, $typeobject, $d->{ORIGINAL});
+
+ my ($infn, $outfn, $fndocstring) = $self->PythonFunction($d, $interface->{NAME}, $prettyname);
+
+ push (@fns, [$infn, $outfn, "dcerpc_$d->{NAME}_r", $prettyname, $fndocstring, $d->{OPNUM}]);
+ }
+
+ $self->pidl("const struct PyNdrRpcMethodDef py_ndr_$interface->{NAME}\_methods[] = {");
+ $self->indent;
+ foreach my $d (@fns) {
+ my ($infn, $outfn, $callfn, $prettyname, $docstring, $opnum) = @$d;
+ $self->pidl("{ \"$prettyname\", $docstring, (py_dcerpc_call_fn)$callfn, (py_data_pack_fn)$infn, (py_data_unpack_fn)$outfn, $opnum, &ndr_table_$interface->{NAME} },");
+ }
+ $self->pidl("{ NULL }");
+ $self->deindent;
+ $self->pidl("};");
+ $self->pidl("");
+
+ $self->pidl("static PyObject *interface_$interface->{NAME}_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("return py_dcerpc_interface_init_helper(type, args, kwargs, &ndr_table_$interface->{NAME});");
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->pidl("");
+
+ my $signature =
+"\"$interface->{NAME}(binding, lp_ctx=None, credentials=None) -> connection\\n\"
+\"\\n\"
+\"binding should be a DCE/RPC binding string (for example: ncacn_ip_tcp:127.0.0.1)\\n\"
+\"lp_ctx should be a path to a smb.conf file or a param.LoadParm object\\n\"
+\"credentials should be a credentials.Credentials object.\\n\\n\"";
+
+ my $docstring = $self->DocString($interface, $interface->{NAME});
+
+ if ($docstring) {
+ $docstring = "$signature$docstring";
+ } else {
+ $docstring = $signature;
+ }
+
+ my $if_typename = "$interface->{NAME}_InterfaceType";
+
+ $self->pidl("static PyTypeObject $if_typename = {");
+ $self->indent;
+ $self->pidl("PyVarObject_HEAD_INIT(NULL, 0)");
+ $self->pidl(".tp_name = \"$basename.$interface->{NAME}\",");
+ $self->pidl(".tp_basicsize = sizeof(dcerpc_InterfaceObject),");
+ $self->pidl(".tp_doc = $docstring,");
+ $self->pidl(".tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,");
+ $self->pidl(".tp_new = interface_$interface->{NAME}_new,");
+ $self->deindent;
+ $self->pidl("};");
+
+ $self->pidl("");
+
+ $self->register_module_typeobject($interface->{NAME}, "&$if_typename", $interface->{ORIGINAL});
+ my $dcerpc_typename = $self->import_type_variable("samba.dcerpc.base", "ClientConnection");
+ $self->register_module_prereadycode(["$if_typename.tp_base = $dcerpc_typename;", ""]);
+ $self->register_module_postreadycode(["if (!PyInterface_AddNdrRpcMethods(&$if_typename, py_ndr_$interface->{NAME}\_methods))", "\treturn NULL;", ""]);
+
+
+ $self->pidl("static PyObject *syntax_$interface->{NAME}_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("return py_dcerpc_syntax_init_helper(type, args, kwargs, &ndr_table_$interface->{NAME}.syntax_id);");
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->pidl("");
+
+ my $signature = "\"$interface->{NAME}_abstract_syntax()\\n\"";
+
+ my $docstring = $self->DocString($interface, $interface->{NAME}."_syntax");
+
+ if ($docstring) {
+ $docstring = "$signature$docstring";
+ } else {
+ $docstring = $signature;
+ }
+
+ my $syntax_typename = "$interface->{NAME}_SyntaxType";
+
+ $self->pidl("static PyTypeObject $syntax_typename = {");
+ $self->indent;
+ $self->pidl("PyVarObject_HEAD_INIT(NULL, 0)");
+ $self->pidl(".tp_name = \"$basename.$interface->{NAME}_abstract_syntax\",");
+ $self->pidl(".tp_doc = $docstring,");
+ $self->pidl(".tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,");
+ $self->pidl(".tp_new = syntax_$interface->{NAME}_new,");
+ $self->deindent;
+ $self->pidl("};");
+
+ $self->pidl("");
+
+ $self->register_module_typeobject("$interface->{NAME}_abstract_syntax", "&$syntax_typename", $interface->{ORIGINAL});
+ if (not defined($self->existing_module_object("abstract_syntax"))) {
+ # Only the first syntax gets registered with the legacy
+ # "abstract_syntax" name
+ $self->register_module_typeobject("abstract_syntax", "&$syntax_typename", $interface->{ORIGINAL});
+ }
+ my $ndr_typename = $self->import_type_variable("samba.dcerpc.misc", "ndr_syntax_id");
+ $self->register_module_prereadycode(["$syntax_typename.tp_base = $ndr_typename;",
+ "$syntax_typename.tp_basicsize = pytalloc_BaseObject_size();",
+ ""]);
+ }
+
+ $self->pidl_hdr("\n");
+}
+
+sub register_module_method($$$$$)
+{
+ my ($self, $fn_name, $pyfn_name, $flags, $doc) = @_;
+
+ push (@{$self->{module_methods}}, [$fn_name, $pyfn_name, $flags, $doc])
+}
+
+sub register_module_typeobject($$$$)
+{
+ my ($self, $name, $py_name, $location) = @_;
+
+ $self->register_module_object($name, "(PyObject *)(void *)$py_name", $location);
+
+ $self->check_ready_type($py_name);
+
+ $self->register_patch_type_call($name, $py_name);
+}
+
+sub check_ready_type($$)
+{
+ my ($self, $py_name) = @_;
+ push (@{$self->{ready_types}}, $py_name) unless (grep(/^$py_name$/,@{$self->{ready_types}}));
+}
+
+sub register_module_import($$)
+{
+ my ($self, $module_path) = @_;
+
+ my $var_name = $module_path;
+ $var_name =~ s/\./_/g;
+ $var_name = "dep_$var_name";
+
+ unless (defined $self->{module_imports_uniq}->{$var_name}) {
+ my $h = { "key" => $var_name, "val" => $module_path};
+ push @{$self->{module_imports}}, $h;
+ $self->{module_imports_uniq}->{$var_name} = $h;
+ }
+ return $var_name;
+}
+
+sub import_type_variable($$$)
+{
+ my ($self, $module, $name) = @_;
+
+ $self->register_module_import($module);
+ unless (defined $self->{type_imports_uniq}->{$name}) {
+ my $h = { "key" => $name, "val" => $module};
+ push @{$self->{type_imports}}, $h;
+ $self->{type_imports_uniq}->{$name} = $h;
+ }
+ return "$name\_Type";
+}
+
+sub use_type_variable($$)
+{
+ my ($self, $orig_ctype) = @_;
+ # FIXME: Have a global lookup table for types that look different on the
+ # wire than they are named in C?
+ if ($orig_ctype->{NAME} eq "dom_sid2" or
+ $orig_ctype->{NAME} eq "dom_sid28" or
+ $orig_ctype->{NAME} eq "dom_sid0") {
+ $orig_ctype->{NAME} = "dom_sid";
+ }
+ if ($orig_ctype->{NAME} eq "spoolss_security_descriptor") {
+ $orig_ctype->{NAME} = "security_descriptor";
+ }
+
+ my $ctype = resolveType($orig_ctype);
+ unless (defined($ctype->{BASEFILE})) {
+ return undef;
+ }
+ # If this is an external type, make sure we do the right imports.
+ if (($ctype->{BASEFILE} ne $self->{BASENAME})) {
+ return $self->import_type_variable("samba.dcerpc.$ctype->{BASEFILE}", $ctype->{NAME});
+ }
+ return "&$ctype->{NAME}_Type";
+}
+
+sub register_patch_type_call($$$)
+{
+ my ($self, $typename, $cvar) = @_;
+
+ push(@{$self->{patch_type_calls}}, [$typename, $cvar]);
+
+}
+
+sub register_module_prereadycode($$)
+{
+ my ($self, $code) = @_;
+
+ push (@{$self->{prereadycode}}, @$code);
+}
+
+sub register_module_postreadycode($$)
+{
+ my ($self, $code) = @_;
+
+ push (@{$self->{postreadycode}}, @$code);
+}
+
+sub existing_module_object($$)
+{
+ my ($self, $name) = @_;
+
+ if (defined($self->{module_object_uniq}->{$name})) {
+ return $self->{module_object_uniq}->{$name};
+ }
+
+ return undef;
+}
+
+sub register_module_object($$$$)
+{
+ my ($self, $name, $py_name, $location) = @_;
+
+ my $existing = $self->existing_module_object($name);
+ fatal($location, "module_object($name, $py_name) registered twice! $existing.") if defined($existing);
+
+ push (@{$self->{module_objects}}, [$name, $py_name]);
+ $self->{module_object_uniq}->{$name} = $py_name;
+}
+
+sub assign($$$)
+{
+ my ($self, $dest, $src) = @_;
+ if ($dest =~ /^\&/ and $src eq "NULL") {
+ $self->pidl("memset($dest, 0, sizeof(" . get_value_of($dest) . "));");
+ } elsif ($dest =~ /^\&/) {
+ my $destvar = get_value_of($dest);
+ $self->pidl("$destvar = *$src;");
+ } else {
+ $self->pidl("$dest = $src;");
+ }
+}
+
+sub ConvertStringFromPythonData($$$$$)
+{
+ my ($self, $mem_ctx, $py_var, $target, $fail) = @_;
+
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("const char *test_str;");
+ $self->pidl("const char *talloc_str;");
+ $self->pidl("PyObject *unicode = NULL;");
+ $self->pidl("if (PyUnicode_Check($py_var)) {");
+ $self->indent;
+ # FIXME: Use Unix charset setting rather than utf-8
+ $self->pidl("unicode = PyUnicode_AsEncodedString($py_var, \"utf-8\", \"ignore\");");
+ $self->pidl("if (unicode == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_NoMemory();");
+ $self->pidl("$fail");
+ $self->deindent;
+ $self->pidl("}");
+
+ $self->pidl("test_str = PyBytes_AS_STRING(unicode);");
+ $self->deindent;
+ $self->pidl("} else if (PyBytes_Check($py_var)) {");
+ $self->indent;
+ $self->pidl("test_str = PyBytes_AS_STRING($py_var);");
+ $self->deindent;
+ $self->pidl("} else {");
+ $self->indent;
+ $self->pidl("PyErr_Format(PyExc_TypeError, \"Expected string or unicode object, got %s\", Py_TYPE($py_var)->tp_name);");
+ $self->pidl("$fail");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("talloc_str = talloc_strdup($mem_ctx, test_str);");
+ $self->pidl("if (unicode != NULL) {");
+ $self->indent;
+ $self->pidl("Py_DECREF(unicode);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("if (talloc_str == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_NoMemory();");
+ $self->pidl("$fail");
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("$target = talloc_str;");
+ $self->deindent;
+ $self->pidl("}");
+}
+
+sub ConvertObjectFromPythonData($$$$$$;$$)
+{
+ my ($self, $mem_ctx, $cvar, $ctype, $target, $fail, $location, $switch) = @_;
+
+ fatal($location, "undef type for $cvar") unless(defined($ctype));
+
+ $ctype = resolveType($ctype);
+
+ my $actual_ctype = $ctype;
+ if ($actual_ctype->{TYPE} eq "TYPEDEF") {
+ $actual_ctype = $actual_ctype->{DATA};
+ }
+
+ # We need to cover ENUMs, BITMAPS and SCALAR values here, as
+ # all could otherwise be assigned invalid integer values
+ my $ctype_alias = "";
+ my $uint_max = "";
+ if ($actual_ctype->{TYPE} eq "ENUM") {
+ # Importantly, ENUM values are unsigned in pidl, and
+ # typically map to uint32
+ $ctype_alias = enum_type_fn($actual_ctype);
+ } elsif ($actual_ctype->{TYPE} eq "BITMAP") {
+ $ctype_alias = bitmap_type_fn($actual_ctype);
+ } elsif ($actual_ctype->{TYPE} eq "SCALAR") {
+ $ctype_alias = expandAlias($actual_ctype->{NAME});
+ }
+
+ # This is the unsigned Python Integer -> C integer validation
+ # case. The signed case is below.
+ if ($ctype_alias =~ /^(uint[0-9]*|hyper|udlong|udlongr
+ |NTTIME_hyper|NTTIME|NTTIME_1sec
+ |uid_t|gid_t)$/x) {
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("const unsigned long long uint_max = ndr_sizeof2uintmax(sizeof($target));");
+ $self->pidl("if (PyLong_Check($cvar)) {");
+ $self->indent;
+ $self->pidl("unsigned long long test_var;");
+ $self->pidl("test_var = PyLong_AsUnsignedLongLong($cvar);");
+ $self->pidl("if (PyErr_Occurred() != NULL) {");
+ $self->indent;
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("if (test_var > uint_max) {");
+ $self->indent;
+ $self->pidl("PyErr_Format(PyExc_OverflowError, \"Expected type %s or %s within range 0 - %llu, got %llu\",\\");
+ $self->pidl(" PyInt_Type.tp_name, PyLong_Type.tp_name, uint_max, test_var);");
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("$target = test_var;");
+ $self->deindent;
+ $self->pidl("} else if (PyInt_Check($cvar)) {");
+ $self->indent;
+ $self->pidl("long test_var;");
+ $self->pidl("test_var = PyInt_AsLong($cvar);");
+ $self->pidl("if (test_var < 0 || test_var > uint_max) {");
+ $self->indent;
+ $self->pidl("PyErr_Format(PyExc_OverflowError, \"Expected type %s or %s within range 0 - %llu, got %ld\",\\");
+ $self->pidl(" PyInt_Type.tp_name, PyLong_Type.tp_name, uint_max, test_var);");
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("$target = test_var;");
+ $self->deindent;
+ $self->pidl("} else {");
+ $self->indent;
+ $self->pidl("PyErr_Format(PyExc_TypeError, \"Expected type %s or %s\",\\");
+ $self->pidl(" PyInt_Type.tp_name, PyLong_Type.tp_name);");
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+ $self->deindent;
+ $self->pidl("}");
+ return;
+ }
+
+ # Confirm the signed python integer fits in the C type
+ # correctly. It is subtly different from the unsigned case
+ # above, so while it looks like a duplicate, it is not
+ # actually a duplicate.
+ if ($ctype_alias =~ /^(dlong|char|int[0-9]*|time_t)$/x) {
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("const long long int_max = ndr_sizeof2intmax(sizeof($target));");
+ $self->pidl("const long long int_min = -int_max - 1;");
+ $self->pidl("if (PyLong_Check($cvar)) {");
+ $self->indent;
+ $self->pidl("long long test_var;");
+ $self->pidl("test_var = PyLong_AsLongLong($cvar);");
+ $self->pidl("if (PyErr_Occurred() != NULL) {");
+ $self->indent;
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("if (test_var < int_min || test_var > int_max) {");
+ $self->indent;
+ $self->pidl("PyErr_Format(PyExc_OverflowError, \"Expected type %s or %s within range %lld - %lld, got %lld\",\\");
+ $self->pidl(" PyInt_Type.tp_name, PyLong_Type.tp_name, int_min, int_max, test_var);");
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("$target = test_var;");
+ $self->deindent;
+ $self->pidl("} else if (PyInt_Check($cvar)) {");
+ $self->indent;
+ $self->pidl("long test_var;");
+ $self->pidl("test_var = PyInt_AsLong($cvar);");
+ $self->pidl("if (test_var < int_min || test_var > int_max) {");
+ $self->indent;
+ $self->pidl("PyErr_Format(PyExc_OverflowError, \"Expected type %s or %s within range %lld - %lld, got %ld\",\\");
+ $self->pidl(" PyInt_Type.tp_name, PyLong_Type.tp_name, int_min, int_max, test_var);");
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+ $self->pidl("$target = test_var;");
+ $self->deindent;
+ $self->pidl("} else {");
+ $self->indent;
+ $self->pidl("PyErr_Format(PyExc_TypeError, \"Expected type %s or %s\",\\");
+ $self->pidl(" PyInt_Type.tp_name, PyLong_Type.tp_name);");
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+ $self->deindent;
+ $self->pidl("}");
+ return;
+ }
+
+ if ($actual_ctype->{TYPE} eq "STRUCT" or $actual_ctype->{TYPE} eq "INTERFACE") {
+ my $ctype_name = $self->use_type_variable($ctype);
+ unless (defined ($ctype_name)) {
+ error($location, "Unable to determine origin of type `" . mapTypeName($ctype) . "'");
+ $self->pidl("PyErr_SetString(PyExc_TypeError, \"Can not convert C Type " . mapTypeName($ctype) . " from Python\");");
+ return;
+ }
+ $self->pidl("PY_CHECK_TYPE($ctype_name, $cvar, $fail);");
+ $self->pidl("if (talloc_reference($mem_ctx, pytalloc_get_mem_ctx($cvar)) == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_NoMemory();");
+ $self->pidl("$fail");
+ $self->deindent;
+ $self->pidl("}");
+ $self->assign($target, "(".mapTypeName($ctype)." *)pytalloc_get_ptr($cvar)");
+ return;
+ }
+
+ if ($actual_ctype->{TYPE} eq "UNION") {
+ my $ctype_name = $self->use_type_variable($ctype);
+ unless (defined ($ctype_name)) {
+ error($location, "Unable to determine origin of type `" . mapTypeName($ctype) . "'");
+ $self->pidl("PyErr_SetString(PyExc_TypeError, \"Can not convert C Type " . mapTypeName($ctype) . " from Python\");");
+ return;
+ }
+ my $export = "pyrpc_export_union($ctype_name, $mem_ctx, $switch, $cvar, \"".mapTypeName($ctype)."\")";
+ $self->assign($target, "(".mapTypeName($ctype)." *)$export");
+ return;
+ }
+
+ if ($actual_ctype->{TYPE} eq "SCALAR" and $actual_ctype->{NAME} eq "DATA_BLOB") {
+ $self->pidl("$target = data_blob_talloc($mem_ctx, PyBytes_AS_STRING($cvar), PyBytes_GET_SIZE($cvar));");
+ return;
+ }
+
+ if ($actual_ctype->{TYPE} eq "SCALAR" and
+ ($actual_ctype->{NAME} eq "string"
+ or $actual_ctype->{NAME} eq "nbt_string"
+ or $actual_ctype->{NAME} eq "nbt_name"
+ or $actual_ctype->{NAME} eq "wrepl_nbt_name"
+ or $actual_ctype->{NAME} eq "dns_string"
+ or $actual_ctype->{NAME} eq "dnsp_string"
+ or $actual_ctype->{NAME} eq "dns_name"
+ or $actual_ctype->{NAME} eq "ipv4address"
+ or $actual_ctype->{NAME} eq "ipv6address"
+ or $actual_ctype->{NAME} eq "dnsp_name")) {
+ $self->ConvertStringFromPythonData($mem_ctx, $cvar, $target, $fail);
+ return;
+ }
+
+ if ($actual_ctype->{TYPE} eq "SCALAR" and $actual_ctype->{NAME} eq "NTSTATUS") {
+ $self->pidl("$target = NT_STATUS(PyInt_AsLong($cvar));");
+ return;
+ }
+
+ if ($actual_ctype->{TYPE} eq "SCALAR" and $actual_ctype->{NAME} eq "WERROR") {
+ $self->pidl("$target = W_ERROR(PyInt_AsLong($cvar));");
+ return;
+ }
+
+ if ($actual_ctype->{TYPE} eq "SCALAR" and $actual_ctype->{NAME} eq "HRESULT") {
+ $self->pidl("$target = HRES_ERROR(PyInt_AsLong($cvar));");
+ return;
+ }
+
+ if ($actual_ctype->{TYPE} eq "SCALAR" and $actual_ctype->{NAME} eq "string_array") {
+ $self->pidl("$target = pytalloc_get_ptr($cvar);");
+ return;
+ }
+
+ if ($actual_ctype->{TYPE} eq "SCALAR" and $actual_ctype->{NAME} eq "pointer") {
+ $self->assign($target, "pytalloc_get_ptr($cvar)");
+ return;
+ }
+
+ fatal($location, "unknown type `$actual_ctype->{TYPE}' for ".mapTypeName($ctype) . ": $cvar");
+
+}
+
+sub ConvertObjectFromPythonLevel($$$$$$$$$)
+{
+ my ($self, $env, $mem_ctx, $py_var, $e, $l, $var_name, $fail, $recurse) = @_;
+ my $nl = GetNextLevel($e, $l);
+ if ($nl and $nl->{TYPE} eq "SUBCONTEXT") {
+ $nl = GetNextLevel($e, $nl);
+ }
+ my $pl = GetPrevLevel($e, $l);
+ if ($pl and $pl->{TYPE} eq "SUBCONTEXT") {
+ $pl = GetPrevLevel($e, $pl);
+ }
+
+ if ($recurse == 0) {
+ $self->pidl("if ($py_var == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_Format(PyExc_AttributeError, \"Cannot delete NDR object: " .
+ mapTypeName($var_name) . "\");");
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+ }
+ $recurse = $recurse + 1;
+
+ if ($l->{TYPE} eq "POINTER") {
+ if ($l->{POINTER_TYPE} ne "ref") {
+ $self->pidl("if ($py_var == Py_None) {");
+ $self->indent;
+ $self->pidl("$var_name = NULL;");
+ $self->deindent;
+ $self->pidl("} else {");
+ $self->indent;
+ }
+ # if we want to handle more than one level of pointer in python interfaces
+ # then this is where we would need to allocate it
+ if ($l->{POINTER_TYPE} eq "ref") {
+ $self->pidl("$var_name = talloc_ptrtype($mem_ctx, $var_name);");
+ $self->pidl("if ($var_name == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_NoMemory();");
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+ } elsif ($nl->{TYPE} eq "DATA" and Parse::Pidl::Typelist::is_scalar($nl->{DATA_TYPE})
+ and not Parse::Pidl::Typelist::scalar_is_reference($nl->{DATA_TYPE})) {
+ $self->pidl("$var_name = talloc_ptrtype($mem_ctx, $var_name);");
+ $self->pidl("if ($var_name == NULL) {");
+ $self->indent;
+ $self->pidl("PyErr_NoMemory();");
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+ } else {
+ $self->pidl("$var_name = NULL;");
+ }
+ unless ($nl->{TYPE} eq "DATA" and Parse::Pidl::Typelist::scalar_is_reference($nl->{DATA_TYPE})) {
+ $var_name = get_value_of($var_name);
+ }
+ $self->ConvertObjectFromPythonLevel($env, $mem_ctx, $py_var, $e, $nl, $var_name, $fail, $recurse);
+ if ($l->{POINTER_TYPE} ne "ref") {
+ $self->deindent;
+ $self->pidl("}");
+ }
+ } elsif ($l->{TYPE} eq "ARRAY") {
+ if ($pl && $pl->{TYPE} eq "POINTER") {
+ $var_name = get_pointer_to($var_name);
+ }
+
+ if (is_charset_array($e, $l)) {
+ $self->ConvertStringFromPythonData($mem_ctx, $py_var, $var_name, $fail);
+ } else {
+ my $counter = "$e->{NAME}_cntr_$l->{LEVEL_INDEX}";
+ $self->pidl("PY_CHECK_TYPE(&PyList_Type, $py_var, $fail);");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("int $counter;");
+ if (ArrayDynamicallyAllocated($e, $l)) {
+ $self->pidl("$var_name = talloc_array_ptrtype($mem_ctx, $var_name, PyList_GET_SIZE($py_var));");
+ $self->pidl("if (!$var_name) { $fail; }");
+ $self->pidl("talloc_set_name_const($var_name, \"ARRAY: $var_name\");");
+ } else {
+ $self->pidl("if (ARRAY_SIZE($var_name) != PyList_GET_SIZE($py_var)) {");
+ $self->indent;
+ $self->pidl("PyErr_Format(PyExc_TypeError, \"Expected list of type %s, length %zu, got %zd\", Py_TYPE($py_var)->tp_name, ARRAY_SIZE($var_name), PyList_GET_SIZE($py_var));");
+ $self->pidl("$fail");
+ $self->deindent;
+ $self->pidl("}");
+ }
+ $self->pidl("for ($counter = 0; $counter < PyList_GET_SIZE($py_var); $counter++) {");
+ $self->indent;
+ $self->ConvertObjectFromPythonLevel($env, $var_name, "PyList_GET_ITEM($py_var, $counter)", $e, $nl, $var_name."[$counter]", $fail, 0);
+ $self->deindent;
+ $self->pidl("}");
+ $self->deindent;
+ $self->pidl("}");
+ }
+ } elsif ($l->{TYPE} eq "DATA") {
+ if (not Parse::Pidl::Typelist::is_scalar($l->{DATA_TYPE})) {
+ $var_name = get_pointer_to($var_name);
+ }
+ $self->ConvertObjectFromPythonData($mem_ctx, $py_var, $l->{DATA_TYPE}, $var_name, $fail, $e->{ORIGINAL});
+ } elsif ($l->{TYPE} eq "SWITCH") {
+ $var_name = get_pointer_to($var_name);
+ my $switch = ParseExpr($l->{SWITCH_IS}, $env, $e);
+ my $switch_ptr = "$e->{NAME}_switch_$l->{LEVEL_INDEX}";
+ $self->pidl("{");
+ $self->indent;
+ my $union_type = mapTypeName($nl->{DATA_TYPE});
+ $self->pidl("$union_type *$switch_ptr;");
+ $self->ConvertObjectFromPythonData($mem_ctx, $py_var, $nl->{DATA_TYPE}, $switch_ptr, $fail, $e->{ORIGINAL}, $switch);
+ $self->fail_on_null($switch_ptr, $fail);
+ $self->assign($var_name, "$switch_ptr");
+ $self->deindent;
+ $self->pidl("}");
+ } elsif ($l->{TYPE} eq "SUBCONTEXT") {
+ $self->ConvertObjectFromPythonLevel($env, $mem_ctx, $py_var, $e, $nl, $var_name, $fail, $recurse);
+ } else {
+ fatal($e->{ORIGINAL}, "unknown level type $l->{TYPE}");
+ }
+}
+
+sub ConvertObjectFromPython($$$$$$$)
+{
+ my ($self, $env, $mem_ctx, $ctype, $cvar, $target, $fail) = @_;
+ my $recurse = 0;
+
+ $self->ConvertObjectFromPythonLevel($env, $mem_ctx, $cvar, $ctype, $ctype->{LEVELS}[0], $target, $fail, $recurse);
+}
+
+sub ConvertScalarToPython($$$$)
+{
+ my ($self, $ctypename, $cvar, $mem_ctx) = @_;
+
+ die("expected string for $cvar, not $ctypename") if (ref($ctypename) eq "HASH");
+
+ $ctypename = expandAlias($ctypename);
+
+ if ($ctypename =~ /^(int64|dlong)$/) {
+ return "ndr_PyLong_FromLongLong($cvar)";
+ }
+
+ if ($ctypename =~ /^(uint64|hyper|NTTIME_hyper|NTTIME|NTTIME_1sec|udlong|udlongr|uid_t|gid_t)$/) {
+ return "ndr_PyLong_FromUnsignedLongLong($cvar)";
+ }
+
+ if ($ctypename =~ /^(char|int|int8|int16|int32|time_t)$/) {
+ return "PyInt_FromLong($cvar)";
+ }
+
+ # Needed to ensure unsigned values in a 32 or 16 bit enum is
+ # cast correctly to a uint32_t, not sign extended to a a
+ # possibly 64 bit unsigned long. (enums are signed in C,
+ # unsigned in NDR)
+ if ($ctypename =~ /^(uint32|uint3264)$/) {
+ return "ndr_PyLong_FromUnsignedLongLong((uint32_t)$cvar)";
+ }
+
+ if ($ctypename =~ /^(uint|uint8|uint16|uint1632)$/) {
+ return "PyInt_FromLong((uint16_t)$cvar)";
+ }
+
+ if ($ctypename eq "DATA_BLOB") {
+ return "PyBytes_FromStringAndSize((char *)($cvar).data, ($cvar).length)";
+ }
+
+ if ($ctypename eq "NTSTATUS") {
+ return "PyErr_FromNTSTATUS($cvar)";
+ }
+
+ if ($ctypename eq "WERROR") {
+ return "PyErr_FromWERROR($cvar)";
+ }
+
+ if ($ctypename eq "HRESULT") {
+ return "PyErr_FromHRESULT($cvar)";
+ }
+
+ if (($ctypename eq "string" or $ctypename eq "nbt_string" or $ctypename eq "nbt_name" or $ctypename eq "wrepl_nbt_name")) {
+ return "PyString_FromStringOrNULL($cvar)";
+ }
+
+ if (($ctypename eq "dns_string" or $ctypename eq "dns_name")) {
+ return "PyString_FromStringOrNULL($cvar)";
+ }
+
+ # Not yet supported
+ if ($ctypename eq "string_array") {
+ return "pytalloc_GenericObject_reference_ex($mem_ctx, $cvar)";
+ }
+ if ($ctypename eq "ipv4address") { return "PyString_FromStringOrNULL($cvar)"; }
+ if ($ctypename eq "ipv6address") { return "PyString_FromStringOrNULL($cvar)"; }
+ if ($ctypename eq "dnsp_name") { return "PyString_FromStringOrNULL($cvar)"; }
+ if ($ctypename eq "dnsp_string") { return "PyString_FromStringOrNULL($cvar)"; }
+ if ($ctypename eq "pointer") {
+ return "pytalloc_GenericObject_reference_ex($mem_ctx, $cvar)";
+ }
+
+ die("Unknown scalar type $ctypename");
+}
+
+sub ConvertObjectToPythonData($$$$$;$$)
+{
+ my ($self, $mem_ctx, $ctype, $cvar, $location, $switch) = @_;
+
+ die("undef type for $cvar") unless(defined($ctype));
+
+ $ctype = resolveType($ctype);
+
+ my $actual_ctype = $ctype;
+ if ($actual_ctype->{TYPE} eq "TYPEDEF") {
+ $actual_ctype = $actual_ctype->{DATA};
+ }
+
+ if ($actual_ctype->{TYPE} eq "ENUM") {
+ return $self->ConvertScalarToPython(Parse::Pidl::Typelist::enum_type_fn($actual_ctype), $cvar, $mem_ctx);
+ } elsif ($actual_ctype->{TYPE} eq "BITMAP") {
+ return $self->ConvertScalarToPython(Parse::Pidl::Typelist::bitmap_type_fn($actual_ctype), $cvar, $mem_ctx);
+ } elsif ($actual_ctype->{TYPE} eq "SCALAR") {
+ return $self->ConvertScalarToPython($actual_ctype->{NAME}, $cvar, $mem_ctx);
+ } elsif ($actual_ctype->{TYPE} eq "UNION") {
+ my $ctype_name = $self->use_type_variable($ctype);
+ unless (defined($ctype_name)) {
+ error($location, "Unable to determine origin of type `" . mapTypeName($ctype) . "'");
+ return "NULL"; # FIXME!
+ }
+ return "pyrpc_import_union($ctype_name, $mem_ctx, $switch, $cvar, \"".mapTypeName($ctype)."\")";
+ } elsif ($actual_ctype->{TYPE} eq "STRUCT" or $actual_ctype->{TYPE} eq "INTERFACE") {
+ my $ctype_name = $self->use_type_variable($ctype);
+ unless (defined($ctype_name)) {
+ error($location, "Unable to determine origin of type `" . mapTypeName($ctype) . "'");
+ return "NULL"; # FIXME!
+ }
+ return "pytalloc_reference_ex($ctype_name, $mem_ctx, $cvar)";
+ }
+
+ fatal($location, "unknown type $actual_ctype->{TYPE} for ".mapTypeName($ctype) . ": $cvar");
+}
+
+sub fail_on_null($$$)
+{
+ my ($self, $var, $fail) = @_;
+ $self->pidl("if ($var == NULL) {");
+ $self->indent;
+ $self->pidl($fail);
+ $self->deindent;
+ $self->pidl("}");
+}
+
+sub ConvertObjectToPythonLevel($$$$$$$)
+{
+ my ($self, $mem_ctx, $env, $e, $l, $var_name, $py_var, $fail, $recurse) = @_;
+ my $nl = GetNextLevel($e, $l);
+ if ($nl and $nl->{TYPE} eq "SUBCONTEXT") {
+ $nl = GetNextLevel($e, $nl);
+ }
+ my $pl = GetPrevLevel($e, $l);
+ if ($pl and $pl->{TYPE} eq "SUBCONTEXT") {
+ $pl = GetPrevLevel($e, $pl);
+ }
+
+ if ($l->{TYPE} eq "POINTER") {
+ if ($l->{POINTER_TYPE} ne "ref") {
+ if ($recurse == 0) {
+ $self->pidl("if ($var_name == NULL) {");
+ $self->indent;
+ $self->pidl("$py_var = Py_None;");
+ $self->pidl("Py_INCREF($py_var);");
+ $self->deindent;
+ $self->pidl("} else {");
+ $self->indent;
+ } else {
+ $self->pidl("{");
+ $self->indent;
+ }
+ $recurse = $recurse + 1;
+ }
+ my $var_name2 = $var_name;
+ my $recurse2 = $recurse;
+ unless ($nl->{TYPE} eq "DATA" and Parse::Pidl::Typelist::scalar_is_reference($nl->{DATA_TYPE})) {
+ $var_name2 = get_value_of($var_name);
+ $recurse2 = 0;
+ }
+ $self->ConvertObjectToPythonLevel($var_name, $env, $e, $nl, $var_name2, $py_var, $fail, $recurse2);
+ if ($l->{POINTER_TYPE} ne "ref") {
+ $self->deindent;
+ $self->pidl("}");
+ }
+ } elsif ($l->{TYPE} eq "ARRAY") {
+ if ($pl && $pl->{TYPE} eq "POINTER") {
+ $var_name = get_pointer_to($var_name);
+ }
+
+ if (is_charset_array($e, $l)) {
+ # FIXME: Use Unix charset setting rather than utf-8
+ $self->pidl("if ($var_name == NULL) {");
+ $self->indent;
+ $self->pidl("$py_var = Py_None;");
+ $self->pidl("Py_INCREF($py_var);");
+ $self->deindent;
+ $self->pidl("} else {");
+ $self->indent;
+ $self->pidl("$py_var = PyUnicode_Decode($var_name, strlen($var_name), \"utf-8\", \"ignore\");");
+ $self->deindent;
+ $self->pidl("}");
+ } else {
+ die("No SIZE_IS for array $var_name") unless (defined($l->{SIZE_IS}));
+ my $length = $l->{SIZE_IS};
+ if (defined($l->{LENGTH_IS})) {
+ $length = $l->{LENGTH_IS};
+ }
+
+ $length = ParseExpr($length, $env, $e);
+ $self->pidl("$py_var = PyList_New($length);");
+ $self->fail_on_null($py_var, $fail);
+ $self->pidl("{");
+ $self->indent;
+ my $counter = "$e->{NAME}_cntr_$l->{LEVEL_INDEX}";
+ $self->pidl("int $counter;");
+ $self->pidl("for ($counter = 0; $counter < ($length); $counter++) {");
+ $self->indent;
+ my $member_var = "py_$e->{NAME}_$l->{LEVEL_INDEX}";
+ $self->pidl("PyObject *$member_var;");
+ $self->ConvertObjectToPythonLevel($var_name, $env, $e, $nl, $var_name."[$counter]", $member_var, $fail, $recurse);
+ $self->pidl("PyList_SetItem($py_var, $counter, $member_var);");
+ $self->deindent;
+ $self->pidl("}");
+ $self->deindent;
+ $self->pidl("}");
+ }
+ } elsif ($l->{TYPE} eq "SWITCH") {
+ $var_name = get_pointer_to($var_name);
+ my $switch = ParseExpr($l->{SWITCH_IS}, $env, $e);
+ my $conv = $self->ConvertObjectToPythonData($mem_ctx, $nl->{DATA_TYPE}, $var_name, $e->{ORIGINAL}, $switch);
+ $self->pidl("$py_var = $conv;");
+ $self->fail_on_null($py_var, $fail);
+
+ } elsif ($l->{TYPE} eq "DATA") {
+ if (not Parse::Pidl::Typelist::is_scalar($l->{DATA_TYPE})) {
+ $var_name = get_pointer_to($var_name);
+ }
+ my $conv = $self->ConvertObjectToPythonData($mem_ctx, $l->{DATA_TYPE}, $var_name, $e->{ORIGINAL});
+ $self->pidl("$py_var = $conv;");
+ } elsif ($l->{TYPE} eq "SUBCONTEXT") {
+ $self->ConvertObjectToPythonLevel($mem_ctx, $env, $e, $nl, $var_name, $py_var, $fail, $recurse);
+ } else {
+ fatal($e->{ORIGINAL}, "Unknown level type $l->{TYPE} $var_name");
+ }
+}
+
+sub ConvertObjectToPython($$$$$$)
+{
+ my ($self, $mem_ctx, $env, $ctype, $cvar, $py_var, $fail) = @_;
+ my $recurse = 0;
+
+ $self->ConvertObjectToPythonLevel($mem_ctx, $env, $ctype, $ctype->{LEVELS}[0], $cvar, $py_var, $fail, $recurse);
+}
+
+sub Parse($$$$$)
+{
+ my($self,$basename,$ndr,$ndr_hdr,$hdr) = @_;
+
+ $self->{BASENAME} = $basename;
+
+ $self->pidl_hdr("
+/* Python wrapper functions auto-generated by pidl */
+#define PY_SSIZE_T_CLEAN 1 /* We use Py_ssize_t for PyArg_ParseTupleAndKeywords */
+#include <Python.h>
+#include \"python/py3compat.h\"
+#include \"includes.h\"
+#include <pytalloc.h>
+#include \"librpc/rpc/pyrpc.h\"
+#include \"librpc/rpc/pyrpc_util.h\"
+#include \"$hdr\"
+#include \"$ndr_hdr\"
+
+/*
+ * These functions are here to ensure they can be optimized out by
+ * the compiler based on the constant input values
+ */
+
+static inline unsigned long long ndr_sizeof2uintmax(size_t var_size)
+{
+ switch (var_size) {
+ case 8:
+ return UINT64_MAX;
+ case 4:
+ return UINT32_MAX;
+ case 2:
+ return UINT16_MAX;
+ case 1:
+ return UINT8_MAX;
+ }
+
+ return 0;
+}
+
+static inline long long ndr_sizeof2intmax(size_t var_size)
+{
+ switch (var_size) {
+ case 8:
+ return INT64_MAX;
+ case 4:
+ return INT32_MAX;
+ case 2:
+ return INT16_MAX;
+ case 1:
+ return INT8_MAX;
+ }
+
+ return 0;
+}
+
+static inline PyObject *ndr_PyLong_FromLongLong(long long v)
+{
+ if (v > LONG_MAX || v < LONG_MIN) {
+ return PyLong_FromLongLong(v);
+ } else {
+ return PyInt_FromLong(v);
+ }
+}
+
+static inline PyObject *ndr_PyLong_FromUnsignedLongLong(unsigned long long v)
+{
+ if (v > LONG_MAX) {
+ return PyLong_FromUnsignedLongLong(v);
+ } else {
+ return PyInt_FromLong(v);
+ }
+}
+
+");
+
+ foreach my $x (@$ndr) {
+ ($x->{TYPE} eq "IMPORT") && $self->Import(@{$x->{PATHS}});
+ ($x->{TYPE} eq "INTERFACE") && $self->Interface($x, $basename);
+ }
+
+ $self->pidl("static PyMethodDef $basename\_methods[] = {");
+ $self->indent;
+ foreach (@{$self->{module_methods}}) {
+ my ($fn_name, $pyfn_name, $flags, $doc) = @$_;
+ $self->pidl("{ \"$fn_name\", (PyCFunction)$pyfn_name, $flags, $doc },");
+ }
+
+ $self->pidl("{ NULL, NULL, 0, NULL }");
+ $self->deindent;
+ $self->pidl("};");
+
+ $self->pidl("");
+
+ $self->pidl("static struct PyModuleDef moduledef = {");
+ $self->indent;
+ $self->pidl("PyModuleDef_HEAD_INIT,");
+ $self->pidl(".m_name = \"$basename\",");
+ $self->pidl(".m_doc = \"$basename DCE/RPC\",");
+ $self->pidl(".m_size = -1,");
+ $self->pidl(".m_methods = $basename\_methods,");
+ $self->deindent;
+ $self->pidl("};");
+
+ $self->pidl("MODULE_INIT_FUNC($basename)");
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("PyObject *m;");
+ foreach my $h (@{$self->{module_imports}}) {
+ $self->pidl("PyObject *$h->{'key'};");
+ }
+ $self->pidl("");
+
+ foreach my $h (@{$self->{module_imports}}) {
+ my $var_name = $h->{'key'};
+ my $module_path = $h->{'val'};
+ $self->pidl("$var_name = PyImport_ImportModule(\"$module_path\");");
+ $self->pidl("if ($var_name == NULL)");
+ $self->pidl("\treturn NULL;");
+ $self->pidl("");
+ }
+
+ foreach my $h (@{$self->{type_imports}}) {
+ my $type_var = "$h->{'key'}\_Type";
+ my $module_path = $h->{'val'};
+ $self->pidl_hdr("static PyTypeObject *$type_var;\n");
+ my $pretty_name = PrettifyTypeName($h->{'key'}, $module_path);
+ my $module_var = "dep_$module_path";
+ $module_var =~ s/\./_/g;
+ $self->pidl("$type_var = (PyTypeObject *)PyObject_GetAttrString($module_var, \"$pretty_name\");");
+ $self->pidl("if ($type_var == NULL)");
+ $self->pidl("\treturn NULL;");
+ $self->pidl("");
+ }
+
+ $self->pidl($_) foreach (@{$self->{prereadycode}});
+
+ foreach (@{$self->{ready_types}}) {
+ $self->pidl("if (PyType_Ready($_) < 0)");
+ $self->pidl("\treturn NULL;");
+ }
+
+ $self->pidl($_) foreach (@{$self->{postreadycode}});
+
+ foreach (@{$self->{patch_type_calls}}) {
+ my ($typename, $cvar) = @$_;
+ $self->pidl("#ifdef PY_".uc($typename)."_PATCH");
+ $self->pidl("PY_".uc($typename)."_PATCH($cvar);");
+ $self->pidl("#endif");
+ }
+
+ $self->pidl("");
+
+ $self->pidl("m = PyModule_Create(&moduledef);");
+ $self->pidl("if (m == NULL)");
+ $self->pidl("\treturn NULL;");
+ $self->pidl("");
+ foreach my $h (@{$self->{constants}}) {
+ my $pretty_name = PrettifyTypeName($h->{'key'}, $basename);
+ my $py_obj;
+ my ($ctype, $cvar) = @{$h->{'val'}};
+ if ($cvar =~ /^[0-9]+$/ or $cvar =~ /^0x[0-9a-fA-F]+$/) {
+ $py_obj = "ndr_PyLong_FromUnsignedLongLong($cvar)";
+ } elsif ($cvar =~ /^".*"$/) {
+ $py_obj = "PyStr_FromString($cvar)";
+ } else {
+ $py_obj = $self->ConvertObjectToPythonData("NULL", expandAlias($ctype), $cvar, undef);
+ }
+
+ $self->pidl("PyModule_AddObject(m, \"$pretty_name\", $py_obj);");
+ }
+
+ foreach (@{$self->{module_objects}}) {
+ my ($object_name, $c_name) = @$_;
+ $self->pidl("Py_INCREF($c_name);");
+ $self->pidl("PyModule_AddObject(m, \"$object_name\", $c_name);");
+ }
+
+ $self->pidl("#ifdef PY_MOD_".uc($basename)."_PATCH");
+ $self->pidl("PY_MOD_".uc($basename)."_PATCH(m);");
+ $self->pidl("#endif");
+
+ $self->pidl("return m;");
+ $self->pidl("");
+ $self->deindent;
+ $self->pidl("}");
+ return ($self->{res_hdr} . $self->{res});
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba4/TDR.pm b/tools/pidl/lib/Parse/Pidl/Samba4/TDR.pm
new file mode 100644
index 0000000..c074930
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba4/TDR.pm
@@ -0,0 +1,283 @@
+###################################################
+# Trivial Parser Generator
+# Copyright jelmer@samba.org 2005-2007
+# released under the GNU GPL
+
+package Parse::Pidl::Samba4::TDR;
+use Parse::Pidl qw(fatal);
+use Parse::Pidl::Util qw(has_property ParseExpr is_constant);
+use Parse::Pidl::Samba4 qw(is_intree choose_header);
+use Parse::Pidl::Typelist qw(mapTypeName);
+
+use Exporter;
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(ParserType $ret $ret_hdr);
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+use strict;
+
+sub new($) {
+ my ($class) = shift;
+ my $self = { ret => "", ret_hdr => "", tabs => "" };
+ bless($self, $class);
+}
+
+sub indent($) { my $self = shift; $self->{tabs}.="\t"; }
+sub deindent($) { my $self = shift; $self->{tabs} = substr($self->{tabs}, 1); }
+sub pidl($$) { my $self = shift; $self->{ret} .= $self->{tabs}.(shift)."\n"; }
+sub pidl_hdr($$) { my $self = shift; $self->{ret_hdr} .= (shift)."\n"; }
+sub typearg($) {
+ my $t = shift;
+ return(", const char *name") if ($t eq "print");
+ return(", TALLOC_CTX *mem_ctx") if ($t eq "pull");
+ return("");
+}
+
+sub fn_declare($$$)
+{
+ my ($self, $p, $d) = @_;
+ if ($p) {
+ $self->pidl($d); $self->pidl_hdr("$d;");
+ } else {
+ $self->pidl("static $d");
+ }
+}
+
+sub ContainsArray($)
+{
+ my $e = shift;
+ foreach (@{$e->{ELEMENTS}}) {
+ next if (has_property($_, "charset") and
+ scalar(@{$_->{ARRAY_LEN}}) == 1);
+ return 1 if (defined($_->{ARRAY_LEN}) and
+ scalar(@{$_->{ARRAY_LEN}}) > 0);
+ }
+ return 0;
+}
+
+sub ParserElement($$$$)
+{
+ my ($self, $e,$t,$env) = @_;
+ my $switch = "";
+ my $array = "";
+ my $name = "";
+ my $mem_ctx = "mem_ctx";
+
+ fatal($e,"Pointers not supported in TDR") if ($e->{POINTERS} > 0);
+ fatal($e,"size_is() not supported in TDR") if (has_property($e, "size_is"));
+ fatal($e,"length_is() not supported in TDR") if (has_property($e, "length_is"));
+
+ if ($t eq "print") {
+ $name = ", \"$e->{NAME}\"$array";
+ }
+
+ if (has_property($e, "flag")) {
+ $self->pidl("{");
+ $self->indent;
+ $self->pidl("uint32_t saved_flags = tdr->flags;");
+ $self->pidl("tdr->flags |= $e->{PROPERTIES}->{flag};");
+ }
+
+ if (has_property($e, "charset")) {
+ fatal($e,"charset() on non-array element") unless (defined($e->{ARRAY_LEN}) and scalar(@{$e->{ARRAY_LEN}}) > 0);
+
+ my $len = ParseExpr(@{$e->{ARRAY_LEN}}[0], $env, $e);
+ if ($len eq "*") { $len = "-1"; }
+ $name = ", mem_ctx" if ($t eq "pull");
+ $self->pidl("TDR_CHECK(tdr_$t\_charset(tdr$name, &v->$e->{NAME}, $len, sizeof($e->{TYPE}_t), CH_$e->{PROPERTIES}->{charset}));");
+ return;
+ }
+
+ if (has_property($e, "switch_is")) {
+ $switch = ", " . ParseExpr($e->{PROPERTIES}->{switch_is}, $env, $e);
+ }
+
+ if (defined($e->{ARRAY_LEN}) and scalar(@{$e->{ARRAY_LEN}}) > 0) {
+ my $len = ParseExpr($e->{ARRAY_LEN}[0], $env, $e);
+
+ if ($t eq "pull" and not is_constant($len)) {
+ $self->pidl("TDR_ALLOC(mem_ctx, v->$e->{NAME}, $len);");
+ $mem_ctx = "v->$e->{NAME}";
+ }
+
+ $self->pidl("for (i = 0; i < $len; i++) {");
+ $self->indent;
+ $array = "[i]";
+ }
+
+ if ($t eq "pull") {
+ $name = ", $mem_ctx";
+ }
+
+ if (has_property($e, "value") && $t eq "push") {
+ $self->pidl("v->$e->{NAME} = ".ParseExpr($e->{PROPERTIES}->{value}, $env, $e).";");
+ }
+
+ $self->pidl("TDR_CHECK(tdr_$t\_$e->{TYPE}(tdr$name$switch, &v->$e->{NAME}$array));");
+
+ if ($array) { $self->deindent; $self->pidl("}"); }
+
+ if (has_property($e, "flag")) {
+ $self->pidl("tdr->flags = saved_flags;");
+ $self->deindent;
+ $self->pidl("}");
+ }
+}
+
+sub ParserStruct($$$$$)
+{
+ my ($self, $e,$t,$p) = @_;
+
+ $self->fn_declare($p,"NTSTATUS tdr_$t\_$e->{NAME} (struct tdr_$t *tdr".typearg($t).", struct $e->{NAME} *v)");
+ $self->pidl("{"); $self->indent;
+ $self->pidl("int i;") if (ContainsArray($e));
+
+ if ($t eq "print") {
+ $self->pidl("tdr->print(tdr, \"\%-25s: struct $e->{NAME}\", name);");
+ $self->pidl("tdr->level++;");
+ }
+
+ my %env = map { $_->{NAME} => "v->$_->{NAME}" } @{$e->{ELEMENTS}};
+ $env{"this"} = "v";
+ $self->ParserElement($_, $t, \%env) foreach (@{$e->{ELEMENTS}});
+
+ if ($t eq "print") {
+ $self->pidl("tdr->level--;");
+ }
+
+ $self->pidl("return NT_STATUS_OK;");
+
+ $self->deindent; $self->pidl("}");
+}
+
+sub ParserUnion($$$$)
+{
+ my ($self, $e,$t,$p) = @_;
+
+ $self->fn_declare($p,"NTSTATUS tdr_$t\_$e->{NAME}(struct tdr_$t *tdr".typearg($t).", int level, union $e->{NAME} *v)");
+ $self->pidl("{"); $self->indent;
+ $self->pidl("int i;") if (ContainsArray($e));
+
+ if ($t eq "print") {
+ $self->pidl("tdr->print(tdr, \"\%-25s: union $e->{NAME}\", name);");
+ $self->pidl("tdr->level++;");
+ }
+
+ $self->pidl("switch (level) {"); $self->indent;
+ foreach (@{$e->{ELEMENTS}}) {
+ if (has_property($_, "case")) {
+ $self->pidl("case " . $_->{PROPERTIES}->{case} . ":");
+ } elsif (has_property($_, "default")) {
+ $self->pidl("default:");
+ }
+ $self->indent; $self->ParserElement($_, $t, {}); $self->deindent;
+ $self->pidl("break;");
+ }
+ $self->deindent; $self->pidl("}");
+
+ if ($t eq "print") {
+ $self->pidl("tdr->level--;");
+ }
+
+ $self->pidl("return NT_STATUS_OK;\n");
+ $self->deindent; $self->pidl("}");
+}
+
+sub ParserBitmap($$$$)
+{
+ my ($self,$e,$t,$p) = @_;
+ return if ($p);
+ $self->pidl("#define tdr_$t\_$e->{NAME} tdr_$t\_" . Parse::Pidl::Typelist::bitmap_type_fn($e));
+}
+
+sub ParserEnum($$$$)
+{
+ my ($self,$e,$t,$p) = @_;
+ my $bt = Parse::Pidl::Typelist::enum_type_fn($e);
+ my $mt = mapTypeName($bt);
+
+ $self->fn_declare($p, "NTSTATUS tdr_$t\_$e->{NAME} (struct tdr_$t *tdr".typearg($t).", enum $e->{NAME} *v)");
+ $self->pidl("{");
+ if ($t eq "pull") {
+ $self->pidl("\t$mt r;");
+ $self->pidl("\tTDR_CHECK(tdr_$t\_$bt(tdr, mem_ctx, \&r));");
+ $self->pidl("\t*v = r;");
+ } elsif ($t eq "push") {
+ $self->pidl("\tTDR_CHECK(tdr_$t\_$bt(tdr, ($mt *)v));");
+ } elsif ($t eq "print") {
+ $self->pidl("\t/* FIXME */");
+ }
+ $self->pidl("\treturn NT_STATUS_OK;");
+ $self->pidl("}");
+}
+
+sub ParserTypedef($$$$)
+{
+ my ($self, $e,$t,$p) = @_;
+
+ $self->ParserType($e->{DATA},$t);
+}
+
+sub ParserType($$$)
+{
+ my ($self, $e,$t) = @_;
+
+ return if (has_property($e, "no$t"));
+
+ my $handlers = {
+ STRUCT => \&ParserStruct, UNION => \&ParserUnion,
+ ENUM => \&ParserEnum, BITMAP => \&ParserBitmap,
+ TYPEDEF => \&ParserTypedef
+ };
+
+ $handlers->{$e->{TYPE}}->($self, $e, $t, has_property($e, "public"))
+ if (defined($handlers->{$e->{TYPE}}));
+
+ $self->pidl("");
+}
+
+sub ParserInterface($$)
+{
+ my ($self,$x) = @_;
+
+ $self->pidl_hdr("#ifndef __TDR_$x->{NAME}_HEADER__");
+ $self->pidl_hdr("#define __TDR_$x->{NAME}_HEADER__");
+
+ foreach (@{$x->{DATA}}) {
+ $self->ParserType($_, "pull");
+ $self->ParserType($_, "push");
+ $self->ParserType($_, "print");
+ }
+
+ $self->pidl_hdr("#endif /* __TDR_$x->{NAME}_HEADER__ */");
+}
+
+sub Parser($$$$)
+{
+ my ($self,$idl,$hdrname,$baseheader) = @_;
+ $self->pidl("/* autogenerated by pidl */");
+ if (is_intree()) {
+ $self->pidl("#include \"includes.h\"");
+ } else {
+ $self->pidl("#include <stdio.h>");
+ $self->pidl("#include <stdbool.h>");
+ $self->pidl("#include <stdlib.h>");
+ $self->pidl("#include <stdint.h>");
+ $self->pidl("#include <stdarg.h>");
+ $self->pidl("#include <string.h>");
+ $self->pidl("#include <core/ntstatus.h>");
+ }
+ $self->pidl("#include \"$hdrname\"");
+ $self->pidl("");
+ $self->pidl_hdr("/* autogenerated by pidl */");
+ $self->pidl_hdr("#include \"$baseheader\"");
+ $self->pidl_hdr(choose_header("lib/tdr/tdr.h", "tdr.h"));
+ $self->pidl_hdr("");
+
+ foreach (@$idl) { $self->ParserInterface($_) if ($_->{TYPE} eq "INTERFACE"); }
+ return ($self->{ret_hdr}, $self->{ret});
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Samba4/Template.pm b/tools/pidl/lib/Parse/Pidl/Samba4/Template.pm
new file mode 100644
index 0000000..175bb12
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Samba4/Template.pm
@@ -0,0 +1,92 @@
+###################################################
+# server template function generator
+# Copyright tridge@samba.org 2003
+# released under the GNU GPL
+
+package Parse::Pidl::Samba4::Template;
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+use Parse::Pidl::Util qw(genpad);
+
+use strict;
+
+my($res);
+
+#####################################################################
+# produce boilerplate code for a interface
+sub Template($)
+{
+ my($interface) = shift;
+ my($data) = $interface->{DATA};
+ my $name = $interface->{NAME};
+
+ $res .=
+"/*
+ Unix SMB/CIFS implementation.
+
+ endpoint server for the $name pipe
+
+ Copyright (C) YOUR NAME HERE YEAR
+
+ SPDX-License-Identifier: GPL-3.0-or-later
+*/
+
+#include \"includes.h\"
+#include \"rpc_server/dcerpc_server.h\"
+#include \"librpc/gen_ndr/ndr_$name.h\"
+#include \"rpc_server/common/common.h\"
+
+";
+
+ foreach my $d (@{$data}) {
+ if ($d->{TYPE} eq "FUNCTION") {
+ my $fname = $d->{NAME};
+ my $pad = genpad("static $d->{RETURN_TYPE} dcesrv_$fname");
+ $res .=
+"
+/*
+ $fname
+*/
+
+static $d->{RETURN_TYPE} dcesrv_$fname(struct dcesrv_call_state *dce_call,
+$pad"."TALLOC_CTX *mem_ctx,
+$pad"."struct $fname *r)
+{
+";
+
+ if ($d->{RETURN_TYPE} eq "void") {
+ $res .= "\tDCESRV_FAULT_VOID(DCERPC_FAULT_OP_RNG_ERROR);\n";
+ } else {
+ $res .= "\tDCESRV_FAULT(DCERPC_FAULT_OP_RNG_ERROR);\n";
+ }
+
+ $res .= "}
+
+";
+ }
+ }
+
+ $res .=
+"
+/* include the generated boilerplate */
+#include \"librpc/gen_ndr/ndr_$name\_s.c\"
+"
+}
+
+
+#####################################################################
+# parse a parsed IDL structure back into an IDL file
+sub Parse($)
+{
+ my($idl) = shift;
+ $res = "";
+ foreach my $x (@{$idl}) {
+ ($x->{TYPE} eq "INTERFACE") &&
+ Template($x);
+ }
+ return $res;
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Typelist.pm b/tools/pidl/lib/Parse/Pidl/Typelist.pm
new file mode 100644
index 0000000..774554f
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Typelist.pm
@@ -0,0 +1,354 @@
+###################################################
+# Samba4 parser generator for IDL structures
+# Copyright jelmer@samba.org 2005
+# released under the GNU GPL
+
+package Parse::Pidl::Typelist;
+
+require Exporter;
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(hasType getType resolveType mapTypeName scalar_is_reference expandAlias
+ mapScalarType addType typeIs is_signed is_scalar enum_type_fn
+ bitmap_type_fn mapType typeHasBody is_fixed_size_scalar
+);
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+use Parse::Pidl::Util qw(has_property);
+use strict;
+
+my %types = ();
+
+my @reference_scalars = (
+ "string", "string_array", "nbt_string", "dns_string",
+ "wrepl_nbt_name", "dnsp_name", "dnsp_string",
+ "ipv4address", "ipv6address"
+);
+
+my @non_fixed_size_scalars = (
+ "string", "string_array", "nbt_string", "dns_string",
+ "wrepl_nbt_name", "dnsp_name", "dnsp_string"
+);
+
+# a list of known scalar types
+my %scalars = (
+ "void" => "void",
+ "char" => "char",
+ "int8" => "int8_t",
+ "uint8" => "uint8_t",
+ "int16" => "int16_t",
+ "uint16" => "uint16_t",
+ "int1632" => "int16_t",
+ "uint1632" => "uint16_t",
+ "int32" => "int32_t",
+ "uint32" => "uint32_t",
+ "int3264" => "int32_t",
+ "uint3264" => "uint32_t",
+ "hyper" => "uint64_t",
+ "dlong" => "int64_t",
+ "udlong" => "uint64_t",
+ "udlongr" => "uint64_t",
+ "double" => "double",
+ "pointer" => "void*",
+ "DATA_BLOB" => "DATA_BLOB",
+ "string" => "const char *",
+ "string_array" => "const char **",
+ "time_t" => "time_t",
+ "uid_t" => "uid_t",
+ "gid_t" => "gid_t",
+ "NTTIME" => "NTTIME",
+ "NTTIME_1sec" => "NTTIME",
+ "NTTIME_hyper" => "NTTIME",
+ "WERROR" => "WERROR",
+ "HRESULT" => "HRESULT",
+ "NTSTATUS" => "NTSTATUS",
+ "COMRESULT" => "COMRESULT",
+ "dns_string" => "const char *",
+ "nbt_string" => "const char *",
+ "wrepl_nbt_name"=> "struct nbt_name *",
+ "ipv4address" => "const char *",
+ "ipv6address" => "const char *",
+ "dnsp_name" => "const char *",
+ "dnsp_string" => "const char *",
+);
+
+my %aliases = (
+ "error_status_t" => "uint32",
+ "boolean8" => "uint8",
+ "boolean32" => "uint32",
+ "DWORD" => "uint32",
+ "uint" => "uint32",
+ "int" => "int32",
+ "WORD" => "uint16",
+ "char" => "uint8",
+ "long" => "int32",
+ "short" => "int16",
+ "HYPER_T" => "hyper",
+ "mode_t" => "uint32",
+);
+
+sub expandAlias($)
+{
+ my $name = shift;
+
+ return $aliases{$name} if defined($aliases{$name});
+
+ return $name;
+}
+
+# map from a IDL type to a C header type
+sub mapScalarType($)
+{
+ my $name = shift;
+
+ # it's a bug when a type is not in the list
+ # of known scalars or has no mapping
+ return $scalars{$name} if defined($scalars{$name});
+
+ die("Unknown scalar type $name");
+}
+
+sub addType($)
+{
+ my $t = shift;
+ $types{$t->{NAME}} = $t;
+}
+
+sub resolveType($)
+{
+ my ($ctype) = @_;
+
+ if (not hasType($ctype)) {
+ # assume struct typedef
+ return { TYPE => "TYPEDEF", NAME => $ctype, DATA => { TYPE => "STRUCT" } };
+ } else {
+ return getType($ctype);
+ }
+
+ return $ctype;
+}
+
+sub getType($)
+{
+ my $t = shift;
+ return ($t) if (ref($t) eq "HASH" and not defined($t->{NAME}));
+ return undef if not hasType($t);
+ return $types{$t->{NAME}} if (ref($t) eq "HASH");
+ return $types{$t};
+}
+
+sub typeIs($$);
+sub typeIs($$)
+{
+ my ($t,$tt) = @_;
+
+ if (ref($t) eq "HASH") {
+ return 1 if ($t->{TYPE} eq "TYPEDEF" and $t->{DATA}->{TYPE} eq $tt);
+ return 1 if ($t->{TYPE} eq $tt);
+ return 0;
+ }
+ if (hasType($t) and getType($t)->{TYPE} eq "TYPEDEF") {
+ return typeIs(getType($t)->{DATA}, $tt);
+ }
+ return 0;
+}
+
+sub hasType($)
+{
+ my $t = shift;
+ if (ref($t) eq "HASH") {
+ return 1 if (not defined($t->{NAME}));
+ return 1 if (defined($types{$t->{NAME}}) and
+ $types{$t->{NAME}}->{TYPE} eq $t->{TYPE});
+ return 0;
+ }
+ return 1 if defined($types{$t});
+ return 0;
+}
+
+sub is_signed($)
+{
+ my $t = shift;
+
+ return ($t eq "int8"
+ or $t eq "int16"
+ or $t eq "int32"
+ or $t eq "dlong"
+ or $t eq "int"
+ or $t eq "long"
+ or $t eq "short");
+}
+
+sub is_scalar($)
+{
+ sub is_scalar($);
+ my $type = shift;
+
+ return 1 if (ref($type) eq "HASH" and
+ ($type->{TYPE} eq "SCALAR" or $type->{TYPE} eq "ENUM" or
+ $type->{TYPE} eq "BITMAP"));
+
+ if (my $dt = getType($type)) {
+ return is_scalar($dt->{DATA}) if ($dt->{TYPE} eq "TYPEDEF");
+ return 1 if ($dt->{TYPE} eq "SCALAR" or $dt->{TYPE} eq "ENUM" or
+ $dt->{TYPE} eq "BITMAP");
+ }
+
+ return 0;
+}
+
+sub is_fixed_size_scalar($)
+{
+ my $name = shift;
+
+ return 0 unless is_scalar($name);
+ return 0 if (grep(/^$name$/, @non_fixed_size_scalars));
+ return 1;
+}
+
+sub scalar_is_reference($)
+{
+ my $name = shift;
+
+ return 1 if (grep(/^$name$/, @reference_scalars));
+ return 0;
+}
+
+sub RegisterScalars()
+{
+ foreach (keys %scalars) {
+ addType({
+ NAME => $_,
+ TYPE => "TYPEDEF",
+ BASEFILE => "<builtin>",
+ DATA => {
+ TYPE => "SCALAR",
+ NAME => $_
+ }
+ }
+ );
+ }
+}
+
+sub enum_type_fn($)
+{
+ my $enum = shift;
+ $enum->{TYPE} eq "ENUM" or die("not an enum");
+
+ # for typedef enum { } we need to check $enum->{PARENT}
+ if (has_property($enum, "enum8bit")) {
+ return "uint8";
+ } elsif (has_property($enum, "enum16bit")) {
+ return "uint16";
+ } elsif (has_property($enum, "v1_enum")) {
+ return "uint32";
+ } elsif (has_property($enum->{PARENT}, "enum8bit")) {
+ return "uint8";
+ } elsif (has_property($enum->{PARENT}, "enum16bit")) {
+ return "uint16";
+ } elsif (has_property($enum->{PARENT}, "v1_enum")) {
+ return "uint32";
+ }
+ return "uint1632";
+}
+
+sub bitmap_type_fn($)
+{
+ my $bitmap = shift;
+
+ $bitmap->{TYPE} eq "BITMAP" or die("not a bitmap");
+
+ if (has_property($bitmap, "bitmap8bit")) {
+ return "uint8";
+ } elsif (has_property($bitmap, "bitmap16bit")) {
+ return "uint16";
+ } elsif (has_property($bitmap, "bitmap64bit")) {
+ return "hyper";
+ }
+ return "uint32";
+}
+
+sub typeHasBody($)
+{
+ sub typeHasBody($);
+ my ($e) = @_;
+
+ if ($e->{TYPE} eq "TYPEDEF") {
+ return 0 unless(defined($e->{DATA}));
+ return typeHasBody($e->{DATA});
+ }
+
+ return defined($e->{ELEMENTS});
+}
+
+sub mapType($$)
+{
+ sub mapType($$);
+ my ($t, $n) = @_;
+
+ return mapType($t->{DATA}, $n) if ($t->{TYPE} eq "TYPEDEF");
+ return mapScalarType($n) if ($t->{TYPE} eq "SCALAR");
+ return "enum $n" if ($t->{TYPE} eq "ENUM");
+ return "struct $n" if ($t->{TYPE} eq "STRUCT" or $t->{TYPE} eq "INTERFACE");
+ return "union $n" if ($t->{TYPE} eq "UNION");
+ return mapScalarType(bitmap_type_fn($t)) if ($t->{TYPE} eq "BITMAP");
+ return "struct $n" if ($t->{TYPE} eq "PIPE");
+ die("Unknown type $t->{TYPE}");
+}
+
+sub mapTypeName($)
+{
+ my $t = shift;
+ return "void" unless defined($t);
+ my $dt;
+ $t = expandAlias($t);
+
+ if ($dt = getType($t)) {
+ return mapType($dt, $dt->{NAME});
+ } elsif (ref($t) eq "HASH" and defined($t->{NAME})) {
+ return mapType($t, $t->{NAME});
+ } else {
+ # Best guess
+ return "struct $t";
+ }
+
+}
+
+sub LoadIdl($;$)
+{
+ my $idl = shift;
+ my $basename = shift;
+
+ foreach my $x (@{$idl}) {
+ next if $x->{TYPE} ne "INTERFACE";
+
+ # DCOM interfaces can be types as well
+ addType({
+ NAME => $x->{NAME},
+ TYPE => "TYPEDEF",
+ DATA => $x,
+ BASEFILE => $basename,
+ }) if (has_property($x, "object"));
+
+ foreach my $y (@{$x->{DATA}}) {
+ if ($y->{TYPE} eq "TYPEDEF"
+ or $y->{TYPE} eq "UNION"
+ or $y->{TYPE} eq "STRUCT"
+ or $y->{TYPE} eq "ENUM"
+ or $y->{TYPE} eq "BITMAP"
+ or $y->{TYPE} eq "PIPE") {
+ $y->{BASEFILE} = $basename;
+ addType($y);
+ }
+ }
+ }
+}
+
+sub GenerateTypeLib()
+{
+ return Parse::Pidl::Util::MyDumper(\%types);
+}
+
+RegisterScalars();
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Util.pm b/tools/pidl/lib/Parse/Pidl/Util.pm
new file mode 100644
index 0000000..83e2393
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Util.pm
@@ -0,0 +1,197 @@
+###################################################
+# utility functions to support pidl
+# Copyright tridge@samba.org 2000
+# released under the GNU GPL
+package Parse::Pidl::Util;
+
+require Exporter;
+@ISA = qw(Exporter);
+@EXPORT = qw(has_property property_matches ParseExpr ParseExprExt is_constant make_str unmake_str print_uuid MyDumper genpad);
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+use strict;
+
+use Parse::Pidl::Expr;
+use Parse::Pidl qw(error);
+
+=head1 NAME
+
+Parse::Pidl::Util - Generic utility functions for pidl
+
+=head1 SYNOPSIS
+
+use Parse::Pidl::Util;
+
+=head1 DESCRIPTION
+
+Simple module that contains a couple of trivial helper functions
+used throughout the various pidl modules.
+
+=head1 FUNCTIONS
+
+=over 4
+
+=cut
+
+=item B<MyDumper>
+a dumper wrapper to prevent dependence on the Data::Dumper module
+unless we actually need it
+
+=cut
+
+sub MyDumper($)
+{
+ require Data::Dumper;
+ $Data::Dumper::Sortkeys = 1;
+ my $s = shift;
+ return Data::Dumper::Dumper($s);
+}
+
+=item B<has_property>
+see if a pidl property list contains a given property
+
+=cut
+sub has_property($$)
+{
+ my($e, $p) = @_;
+
+ return undef if (not defined($e->{PROPERTIES}));
+
+ return $e->{PROPERTIES}->{$p};
+}
+
+=item B<property_matches>
+see if a pidl property matches a value
+
+=cut
+sub property_matches($$$)
+{
+ my($e,$p,$v) = @_;
+
+ if (!defined has_property($e, $p)) {
+ return undef;
+ }
+
+ if ($e->{PROPERTIES}->{$p} =~ /$v/) {
+ return 1;
+ }
+
+ return undef;
+}
+
+=item B<is_constant>
+return 1 if the string is a C constant
+
+=cut
+sub is_constant($)
+{
+ my $s = shift;
+ return 1 if ($s =~ /^\d+$/);
+ return 1 if ($s =~ /^0x[0-9A-Fa-f]+$/);
+ return 0;
+}
+
+=item B<make_str>
+return a "" quoted string, unless already quoted
+
+=cut
+sub make_str($)
+{
+ my $str = shift;
+ if (substr($str, 0, 1) eq "\"") {
+ return $str;
+ }
+ return "\"$str\"";
+}
+
+=item B<unmake_str>
+unquote a "" quoted string
+
+=cut
+sub unmake_str($)
+{
+ my $str = shift;
+
+ $str =~ s/^\"(.*)\"$/$1/;
+
+ return $str;
+}
+
+=item B<print_uuid>
+Print C representation of a UUID.
+
+=cut
+sub print_uuid($)
+{
+ my ($uuid) = @_;
+ $uuid =~ s/"//g;
+ my ($time_low,$time_mid,$time_hi,$clock_seq,$node) = split /-/, $uuid;
+ return undef if not defined($node);
+
+ my @clock_seq = $clock_seq =~ /(..)/g;
+ my @node = $node =~ /(..)/g;
+
+ return "{0x$time_low,0x$time_mid,0x$time_hi," .
+ "{".join(',', map {"0x$_"} @clock_seq)."}," .
+ "{".join(',', map {"0x$_"} @node)."}}";
+}
+
+=item B<ParseExpr>
+Interpret an IDL expression, substituting particular variables.
+
+=cut
+sub ParseExpr($$$)
+{
+ my($expr, $varlist, $e) = @_;
+
+ my $x = new Parse::Pidl::Expr();
+
+ return $x->Run($expr, sub { my $x = shift; error($e, $x); },
+ # Lookup fn
+ sub { my $x = shift;
+ return($varlist->{$x}) if (defined($varlist->{$x}));
+ return $x;
+ },
+ undef, undef);
+}
+
+=item B<ParseExprExt>
+Interpret an IDL expression, substituting particular variables. Can call
+callbacks when pointers are being dereferenced or variables are being used.
+
+=cut
+sub ParseExprExt($$$$$)
+{
+ my($expr, $varlist, $e, $deref, $use) = @_;
+
+ my $x = new Parse::Pidl::Expr();
+
+ return $x->Run($expr, sub { my $x = shift; error($e, $x); },
+ # Lookup fn
+ sub { my $x = shift;
+ return($varlist->{$x}) if (defined($varlist->{$x}));
+ return $x;
+ },
+ $deref, $use);
+}
+
+=item B<genpad>
+return an empty string consisting of tabs and spaces suitable for proper indent
+of C-functions.
+
+=cut
+sub genpad($)
+{
+ my ($s) = @_;
+ my $nt = int((length($s)+1)/8);
+ my $lt = ($nt*8)-1;
+ my $ns = (length($s)-$lt);
+ return "\t"x($nt)." "x($ns);
+}
+
+=back
+
+=cut
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Wireshark/Conformance.pm b/tools/pidl/lib/Parse/Pidl/Wireshark/Conformance.pm
new file mode 100644
index 0000000..01a8c47
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Wireshark/Conformance.pm
@@ -0,0 +1,509 @@
+###################################################
+# parse an Wireshark conformance file
+# Copyright jelmer@samba.org 2005
+# released under the GNU GPL
+
+=pod
+
+=head1 NAME
+
+Parse::Pidl::Wireshark::Conformance - Conformance file parser for Wireshark
+
+=head1 DESCRIPTION
+
+This module supports parsing Wireshark conformance files (*.cnf).
+
+=head1 FILE FORMAT
+
+Pidl needs additional data for Wireshark output. This data is read from
+so-called conformance files. This section describes the format of these
+files.
+
+Conformance files are simple text files with a single command on each line.
+Empty lines and lines starting with a '#' character are ignored.
+Arguments to commands are separated by spaces.
+
+The following commands are currently supported:
+
+=over 4
+
+=item I<TYPE> name dissector ft_type base_type mask valsstring alignment
+
+Register new data type with specified name, what dissector function to call
+and what properties to give header fields for elements of this type.
+
+=item I<NOEMIT> type
+
+Suppress emitting a dissect_type function for the specified type
+
+=item I<PARAM_VALUE> type param
+
+Set parameter to specify to dissector function for given type.
+
+=item I<HF_FIELD> hf title filter ft_type base_type valsstring mask description
+
+Generate a custom header field with specified properties.
+
+=item I<HF_RENAME> old_hf_name new_hf_name
+
+Force the use of new_hf_name when the parser generator was going to
+use old_hf_name.
+
+This can be used in conjunction with HF_FIELD in order to make more than
+one element use the same filter name.
+
+=item I<ETT_FIELD> ett
+
+Register a custom ett field
+
+=item I<STRIP_PREFIX> prefix
+
+Remove the specified prefix from all function names (if present).
+
+=item I<PROTOCOL> longname shortname filtername
+
+Change the short-, long- and filter-name for the current interface in
+Wireshark.
+
+=item I<FIELD_DESCRIPTION> field desc
+
+Change description for the specified header field. `field' is the hf name of the field.
+
+=item I<IMPORT> dissector code...
+
+Code to insert when generating the specified dissector. @HF@ and
+@PARAM@ will be substituted.
+
+=item I<INCLUDE> filename
+
+Include conformance data from the specified filename in the dissector.
+
+=item I<TFS> hf_name "true string" "false string"
+
+Override the text shown when a bitmap boolean value is enabled or disabled.
+
+=item I<MANUAL> fn_name
+
+Force pidl to not generate a particular function but allow the user
+to write a function manually. This can be used to remove the function
+for only one level for a particular element rather than all the functions and
+ett/hf variables for a particular element as the NOEMIT command does.
+
+=item I<CODE START>/I<CODE END>
+Begin and end a section of code to be put directly into the generated
+source file for the dissector.
+
+=item I<HEADER START>/I<HEADER END>
+Begin and end a section of code to be put directly into the generated
+header file for the dissector.
+
+=back
+
+=head1 EXAMPLE
+
+ INFO_KEY OpenKey.Ke
+
+=cut
+
+package Parse::Pidl::Wireshark::Conformance;
+
+require Exporter;
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(ReadConformance ReadConformanceFH valid_ft_type valid_base_type);
+
+use strict;
+
+use Parse::Pidl qw(fatal warning error);
+use Parse::Pidl::Util qw(has_property);
+use Parse::Pidl::Typelist qw(addType);
+
+sub handle_type($$$$$$$$$$)
+{
+ my ($pos,$data,$name,$dissectorname,$ft_type,$base_type,$mask,$valsstring,$alignment) = @_;
+
+ unless(defined($alignment)) {
+ error($pos, "incomplete TYPE command");
+ return;
+ }
+
+ unless ($dissectorname =~ /.*dissect_.*/) {
+ warning($pos, "dissector name does not contain `dissect'");
+ }
+
+ unless(valid_ft_type($ft_type)) {
+ warning($pos, "invalid FT_TYPE `$ft_type'");
+ }
+
+ unless (valid_base_type($base_type)) {
+ warning($pos, "invalid BASE_TYPE `$base_type'");
+ }
+
+ $dissectorname =~ s/^\"(.*)\"$/$1/g;
+
+ if (not ($dissectorname =~ /;$/)) {
+ warning($pos, "missing semicolon");
+ }
+
+ $data->{types}->{$name} = {
+ NAME => $name,
+ POS => $pos,
+ USED => 0,
+ DISSECTOR_NAME => $dissectorname,
+ FT_TYPE => $ft_type,
+ BASE_TYPE => $base_type,
+ MASK => $mask,
+ VALSSTRING => $valsstring,
+ ALIGNMENT => $alignment
+ };
+
+ addType({
+ NAME => $name,
+ TYPE => "CONFORMANCE",
+ BASEFILE => "conformance file",
+ DATA => {
+ NAME => $name,
+ TYPE => "CONFORMANCE",
+ ALIGN => $alignment
+ }
+ });
+}
+
+sub handle_tfs($$$$$)
+{
+ my ($pos,$data,$hf,$trues,$falses) = @_;
+
+ unless(defined($falses)) {
+ error($pos, "incomplete TFS command");
+ return;
+ }
+
+ $data->{tfs}->{$hf} = {
+ TRUE_STRING => $trues,
+ FALSE_STRING => $falses
+ };
+}
+
+sub handle_hf_rename($$$$)
+{
+ my ($pos,$data,$old,$new) = @_;
+
+ unless(defined($new)) {
+ warning($pos, "incomplete HF_RENAME command");
+ return;
+ }
+
+ $data->{hf_renames}->{$old} = {
+ OLDNAME => $old,
+ NEWNAME => $new,
+ POS => $pos,
+ USED => 0
+ };
+}
+
+sub handle_param_value($$$$)
+{
+ my ($pos,$data,$dissector_name,$value) = @_;
+
+ unless(defined($value)) {
+ error($pos, "incomplete PARAM_VALUE command");
+ return;
+ }
+
+ $data->{dissectorparams}->{$dissector_name} = {
+ DISSECTOR => $dissector_name,
+ PARAM => $value,
+ POS => $pos,
+ USED => 0
+ };
+}
+
+sub valid_base_type($)
+{
+ my $t = shift;
+ return 0 unless($t =~ /^BASE_.*/);
+ return 1;
+}
+
+sub valid_ft_type($)
+{
+ my $t = shift;
+ return 0 unless($t =~ /^FT_.*/);
+ return 1;
+}
+
+sub handle_hf_field($$$$$$$$$$)
+{
+ my ($pos,$data,$index,$name,$filter,$ft_type,$base_type,$valsstring,$mask,$blurb) = @_;
+
+ unless(defined($blurb)) {
+ error($pos, "incomplete HF_FIELD command");
+ return;
+ }
+
+ unless(valid_ft_type($ft_type)) {
+ warning($pos, "invalid FT_TYPE `$ft_type'");
+ }
+
+ unless(valid_base_type($base_type)) {
+ warning($pos, "invalid BASE_TYPE `$base_type'");
+ }
+
+ $data->{header_fields}->{$index} = {
+ INDEX => $index,
+ POS => $pos,
+ USED => 0,
+ NAME => $name,
+ FILTER => $filter,
+ FT_TYPE => $ft_type,
+ BASE_TYPE => $base_type,
+ VALSSTRING => $valsstring,
+ MASK => $mask,
+ BLURB => $blurb
+ };
+}
+
+sub handle_strip_prefix($$$)
+{
+ my ($pos,$data,$x) = @_;
+
+ push (@{$data->{strip_prefixes}}, $x);
+}
+
+sub handle_noemit($$$)
+{
+ my ($pos,$data,$type) = @_;
+
+ if (defined($type)) {
+ $data->{noemit}->{$type} = 1;
+ } else {
+ $data->{noemit_dissector} = 1;
+ }
+}
+
+sub handle_manual($$$)
+{
+ my ($pos,$data,$fn) = @_;
+
+ unless(defined($fn)) {
+ warning($pos, "incomplete MANUAL command");
+ return;
+ }
+
+ $data->{manual}->{$fn} = 1;
+}
+
+sub handle_protocol($$$$$$)
+{
+ my ($pos, $data, $name, $longname, $shortname, $filtername) = @_;
+
+ $data->{protocols}->{$name} = {
+ LONGNAME => $longname,
+ SHORTNAME => $shortname,
+ FILTERNAME => $filtername
+ };
+}
+
+sub handle_fielddescription($$$$)
+{
+ my ($pos,$data,$field,$desc) = @_;
+
+ unless(defined($desc)) {
+ warning($pos, "incomplete FIELD_DESCRIPTION command");
+ return;
+ }
+
+ $data->{fielddescription}->{$field} = {
+ DESCRIPTION => $desc,
+ POS => $pos,
+ USED => 0
+ };
+}
+
+sub handle_import
+{
+ my $pos = shift @_;
+ my $data = shift @_;
+ my $dissectorname = shift @_;
+
+ unless(defined($dissectorname)) {
+ error($pos, "no dissectorname specified");
+ return;
+ }
+
+ $data->{imports}->{$dissectorname} = {
+ NAME => $dissectorname,
+ DATA => join(' ', @_),
+ USED => 0,
+ POS => $pos
+ };
+}
+
+sub handle_ett_field
+{
+ my $pos = shift @_;
+ my $data = shift @_;
+ my $ett = shift @_;
+
+ unless(defined($ett)) {
+ error($pos, "incomplete ETT_FIELD command");
+ return;
+ }
+
+ push (@{$data->{ett}}, $ett);
+}
+
+sub handle_include
+{
+ my $pos = shift @_;
+ my $data = shift @_;
+ my $fn = shift @_;
+
+ unless(defined($fn)) {
+ error($pos, "incomplete INCLUDE command");
+ return;
+ }
+
+ ReadConformance($fn, $data);
+}
+
+my %field_handlers = (
+ TYPE => \&handle_type,
+ NOEMIT => \&handle_noemit,
+ MANUAL => \&handle_manual,
+ PARAM_VALUE => \&handle_param_value,
+ HF_FIELD => \&handle_hf_field,
+ HF_RENAME => \&handle_hf_rename,
+ ETT_FIELD => \&handle_ett_field,
+ TFS => \&handle_tfs,
+ STRIP_PREFIX => \&handle_strip_prefix,
+ PROTOCOL => \&handle_protocol,
+ FIELD_DESCRIPTION => \&handle_fielddescription,
+ IMPORT => \&handle_import,
+ INCLUDE => \&handle_include
+);
+
+sub ReadConformance($$)
+{
+ my ($f,$data) = @_;
+ my $ret;
+
+ open(IN,"<$f") or return undef;
+
+ $ret = ReadConformanceFH(*IN, $data, $f);
+
+ close(IN);
+
+ return $ret;
+}
+
+sub ReadConformanceFH($$$)
+{
+ my ($fh,$data,$f) = @_;
+
+ my $incodeblock = 0;
+ my $inheaderblock = 0;
+
+ my $ln = 0;
+
+ foreach (<$fh>) {
+ $ln++;
+ next if (/^#.*$/);
+ next if (/^$/);
+
+ s/[\r\n]//g;
+
+ if ($_ eq "CODE START") {
+ if ($incodeblock) {
+ warning({ FILE => $f, LINE => $ln },
+ "CODE START inside CODE section");
+ }
+ if ($inheaderblock) {
+ error({ FILE => $f, LINE => $ln },
+ "CODE START inside HEADER section");
+ return undef;
+ }
+ $incodeblock = 1;
+ next;
+ } elsif ($_ eq "CODE END") {
+ if (!$incodeblock) {
+ warning({ FILE => $f, LINE => $ln },
+ "CODE END outside CODE section");
+ }
+ if ($inheaderblock) {
+ error({ FILE => $f, LINE => $ln },
+ "CODE END inside HEADER section");
+ return undef;
+ }
+ $incodeblock = 0;
+ next;
+ } elsif ($incodeblock) {
+ if (exists $data->{override}) {
+ $data->{override}.="$_\n";
+ } else {
+ $data->{override} = "$_\n";
+ }
+ next;
+ } elsif ($_ eq "HEADER START") {
+ if ($inheaderblock) {
+ warning({ FILE => $f, LINE => $ln },
+ "HEADER START inside HEADER section");
+ }
+ if ($incodeblock) {
+ error({ FILE => $f, LINE => $ln },
+ "HEADER START inside CODE section");
+ return undef;
+ }
+ $inheaderblock = 1;
+ next;
+ } elsif ($_ eq "HEADER END") {
+ if (!$inheaderblock) {
+ warning({ FILE => $f, LINE => $ln },
+ "HEADER END outside HEADER section");
+ }
+ if ($incodeblock) {
+ error({ FILE => $f, LINE => $ln },
+ "CODE END inside HEADER section");
+ return undef;
+ }
+ $inheaderblock = 0;
+ next;
+ } elsif ($inheaderblock) {
+ if (exists $data->{header}) {
+ $data->{header}.="$_\n";
+ } else {
+ $data->{header} = "$_\n";
+ }
+ next;
+ }
+
+ my @fields = /([^ "]+|"[^"]+")/g;
+
+ my $cmd = $fields[0];
+
+ shift @fields;
+
+ my $pos = { FILE => $f, LINE => $ln };
+
+ next unless(defined($cmd));
+
+ if (not defined($field_handlers{$cmd})) {
+ warning($pos, "Unknown command `$cmd'");
+ next;
+ }
+
+ $field_handlers{$cmd}($pos, $data, @fields);
+ }
+
+ if ($incodeblock) {
+ warning({ FILE => $f, LINE => $ln },
+ "Expecting CODE END");
+ return undef;
+ }
+
+ return 1;
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Pidl/Wireshark/NDR.pm b/tools/pidl/lib/Parse/Pidl/Wireshark/NDR.pm
new file mode 100644
index 0000000..ada8dd6
--- /dev/null
+++ b/tools/pidl/lib/Parse/Pidl/Wireshark/NDR.pm
@@ -0,0 +1,1401 @@
+##################################################
+# Wireshark NDR parser generator for IDL structures
+# Copyright tridge@samba.org 2000-2003
+# Copyright tpot@samba.org 2001,2005
+# Copyright jelmer@samba.org 2004-2007
+# Portions based on idl2eth.c by Ronnie Sahlberg
+# released under the GNU GPL
+
+=pod
+
+=head1 NAME
+
+Parse::Pidl::Wireshark::NDR - Parser generator for Wireshark
+
+=cut
+
+package Parse::Pidl::Wireshark::NDR;
+
+use Exporter;
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(field2name %res PrintIdl StripPrefixes RegisterInterfaceHandoff register_hf_field CheckUsed ProcessImport ProcessInclude find_type DumpEttList DumpEttDeclaration DumpHfList DumpHfDeclaration DumpFunctionTable register_type register_ett);
+
+use strict;
+use Parse::Pidl qw(error warning);
+use Parse::Pidl::Typelist qw(getType);
+use Parse::Pidl::Util qw(has_property property_matches make_str);
+use Parse::Pidl::NDR qw(ContainsString GetNextLevel);
+use Parse::Pidl::Dump qw(DumpType DumpFunction);
+use Parse::Pidl::Wireshark::Conformance qw(ReadConformance);
+use File::Basename;
+
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+my %return_types = ();
+my %dissector_used = ();
+
+my %ptrtype_mappings = (
+ "unique" => "NDR_POINTER_UNIQUE",
+ "ref" => "NDR_POINTER_REF",
+ "ptr" => "NDR_POINTER_PTR"
+);
+
+sub StripPrefixes($$)
+{
+ my ($s, $prefixes) = @_;
+
+ foreach (@$prefixes) {
+ $s =~ s/^$_\_//g;
+ }
+
+ return $s;
+}
+
+# Convert a IDL structure field name (e.g access_mask) to a prettier
+# string like 'Access Mask'.
+
+sub field2name($)
+{
+ my($field) = shift;
+
+ $field =~ s/^(_)*//g; # Remove any starting underscores
+ $field =~ s/_/ /g; # Replace underscores with spaces
+ $field =~ s/(\w+)/\u$1/g; # Capitalise each word
+
+ return $field;
+}
+
+sub new($)
+{
+ my ($class) = @_;
+ my $self = {res => {hdr => "", def => "", code => ""}, tabs => "", cur_fn => undef,
+ hf_used => {}, ett => [], conformance => undef
+
+ };
+ bless($self, $class);
+}
+
+sub pidl_fn_start($$)
+{
+ my ($self, $fn) = @_;
+ $self->{cur_fn} = $fn;
+}
+sub pidl_fn_end($$)
+{
+ my ($self, $fn) = @_;
+ die("Inconsistent state: $fn != $self->{cur_fn}") if ($fn ne $self->{cur_fn});
+ $self->{cur_fn} = undef;
+}
+
+sub pidl_code($$)
+{
+ my ($self, $d) = @_;
+ return if (defined($self->{cur_fn}) and defined($self->{conformance}->{manual}->{$self->{cur_fn}}));
+
+ if ($d) {
+ $self->{res}->{code} .= $self->{tabs};
+ $self->{res}->{code} .= $d;
+ }
+ $self->{res}->{code} .="\n";
+}
+
+sub pidl_hdr($$) { my ($self,$x) = @_; $self->{res}->{hdr} .= "$x\n"; }
+sub pidl_def($$) { my ($self,$x) = @_; $self->{res}->{def} .= "$x\n"; }
+
+sub indent($)
+{
+ my ($self) = @_;
+ $self->{tabs} .= "\t";
+}
+
+sub deindent($)
+{
+ my ($self) = @_;
+ $self->{tabs} = substr($self->{tabs}, 0, -1);
+}
+
+sub PrintIdl($$)
+{
+ my ($self, $idl) = @_;
+
+ foreach (split /\n/, $idl) {
+ $self->pidl_code("/* IDL: $_ */");
+ }
+
+ $self->pidl_code("");
+}
+
+#####################################################################
+# parse the interface definitions
+sub Interface($$)
+{
+ my($self, $interface) = @_;
+ $self->Const($_,$interface->{NAME}) foreach (@{$interface->{CONSTS}});
+ $self->Type($_, $_->{NAME}, $interface->{NAME}) foreach (@{$interface->{TYPES}});
+ $self->Function($_,$interface->{NAME}) foreach (@{$interface->{FUNCTIONS}});
+}
+
+sub Enum($$$$)
+{
+ my ($self, $e,$name,$ifname) = @_;
+ my $valsstring = "$ifname\_$name\_vals";
+ my $dissectorname = "$ifname\_dissect\_enum\_".StripPrefixes($name, $self->{conformance}->{strip_prefixes});
+
+ return if (defined($self->{conformance}->{noemit}->{StripPrefixes($name, $self->{conformance}->{strip_prefixes})}));
+
+ foreach (@{$e->{ELEMENTS}}) {
+ if (/([^=]*)=(.*)/) {
+ $self->pidl_hdr("#define $1 ($2)");
+ }
+ }
+
+ $self->pidl_hdr("extern const value_string $valsstring\[];");
+ $self->pidl_hdr("int $dissectorname(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_, dcerpc_info* di _U_, guint8 *drep _U_, int hf_index _U_, g$e->{BASE_TYPE} *param _U_);");
+
+ $self->pidl_def("const value_string ".$valsstring."[] = {");
+ foreach (@{$e->{ELEMENTS}}) {
+ next unless (/([^=]*)=(.*)/);
+ $self->pidl_def("\t{ $1, \"$1\" },");
+ }
+
+ $self->pidl_def("{ 0, NULL }");
+ $self->pidl_def("};");
+
+ $self->pidl_fn_start($dissectorname);
+ $self->pidl_code("int");
+ $self->pidl_code("$dissectorname(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_, dcerpc_info* di _U_, guint8 *drep _U_, int hf_index _U_, g$e->{BASE_TYPE} *param _U_)");
+ $self->pidl_code("{");
+ $self->indent;
+ $self->pidl_code("g$e->{BASE_TYPE} parameter=0;");
+ $self->pidl_code("if (param) {");
+ $self->indent;
+ $self->pidl_code("parameter = *param;");
+ $self->deindent;
+ $self->pidl_code("}");
+ $self->pidl_code("offset = dissect_ndr_$e->{BASE_TYPE}(tvb, offset, pinfo, tree, di, drep, hf_index, &parameter);");
+ $self->pidl_code("if (param) {");
+ $self->indent;
+ $self->pidl_code("*param = parameter;");
+ $self->deindent;
+ $self->pidl_code("}");
+ $self->pidl_code("return offset;");
+ $self->deindent;
+ $self->pidl_code("}\n");
+ $self->pidl_fn_end($dissectorname);
+
+ my $enum_size = $e->{BASE_TYPE};
+ $enum_size =~ s/uint//g;
+ $self->register_type($name, "offset = $dissectorname(tvb, offset, pinfo, tree, di, drep, \@HF\@, \@PARAM\@);", "FT_UINT$enum_size", "BASE_DEC", "0", "VALS($valsstring)", $enum_size / 8);
+}
+
+sub Pipe($$$$)
+{
+ my ($self,$e,$name,$ifname) = @_;
+ error($e->{ORIGINAL}, "Pipe not yet supported");
+ return;
+}
+
+sub Bitmap($$$$)
+{
+ my ($self,$e,$name,$ifname) = @_;
+ my $dissectorname = "$ifname\_dissect\_bitmap\_".StripPrefixes($name, $self->{conformance}->{strip_prefixes});
+ my $element_count = 0;
+ my $total_ev = 0;
+
+ $self->register_ett("ett_$ifname\_$name");
+
+ $self->pidl_hdr("int $dissectorname(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_, dcerpc_info* di _U_, guint8 *drep _U_, int hf_index _U_, guint32 param _U_);");
+
+ $self->pidl_fn_start($dissectorname);
+ $self->pidl_code("int");
+ $self->pidl_code("$dissectorname(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *parent_tree _U_, dcerpc_info* di _U_, guint8 *drep _U_, int hf_index _U_, guint32 param _U_)");
+ $self->pidl_code("{");
+ $self->indent;
+ foreach (@{$e->{ELEMENTS}}) {
+ next unless (/([^ ]*) (.*)/);
+ $element_count++;
+ }
+ if ($element_count > 0) {
+ $self->pidl_code("proto_item *item;");
+ $self->pidl_code("static int * const $ifname\_$name\_fields[] = {");
+ $self->indent;
+ foreach (@{$e->{ELEMENTS}}) {
+ next unless (/([^ ]*) (.*)/);
+ my ($en,$ev) = ($1,$2);
+ my $hf_bitname = "hf_$ifname\_$name\_$1";
+
+ $ev =~ s/[()\s]//g;
+ if (hex($ev) != 0) {
+ $total_ev += hex($ev);
+ $self->pidl_code("&$hf_bitname,");
+ }
+ }
+ $self->pidl_code("NULL");
+ $self->deindent;
+ $self->pidl_code("};");
+ }
+
+ $self->pidl_code("g$e->{BASE_TYPE} flags;");
+ if ($e->{ALIGN} > 1) {
+ $self->pidl_code("ALIGN_TO_$e->{ALIGN}_BYTES;");
+ }
+
+ $self->pidl_code("");
+
+ if ($element_count > 0) {
+ $self->pidl_code("item = proto_tree_add_bitmask_with_flags(parent_tree, tvb, offset, hf_index,");
+ $self->pidl_code("\t\t\tett_$ifname\_$name, $ifname\_$name\_fields, DREP_ENC_INTEGER(drep), BMT_NO_FALSE);");
+ $self->pidl_code("");
+
+ $self->pidl_code("offset = dissect_ndr_$e->{BASE_TYPE}(tvb, offset, pinfo, parent_tree, di, drep, -1, &flags);");
+ $self->pidl_code("");
+
+ $self->pidl_code("if (!flags)");
+ $self->pidl_code("\tproto_item_append_text(item, \": (No values set)\");\n");
+ } else {
+ $self->pidl_code("proto_tree_add_item(parent_tree, hf_index, tvb, offset, $e->{ALIGN}, DREP_ENC_INTEGER(drep));");
+ $self->pidl_code("");
+
+ $self->pidl_code("offset = dissect_ndr_$e->{BASE_TYPE}(tvb, offset, pinfo, parent_tree, di, drep, -1, &flags);");
+ $self->pidl_code("");
+ }
+
+ foreach (@{$e->{ELEMENTS}}) {
+ next unless (/([^ ]*) (.*)/);
+ my ($en,$ev) = ($1,$2);
+ my $hf_bitname = "hf_$ifname\_$name\_$en";
+ my $filtername = "$ifname\.$name\.$en";
+
+ $self->{hf_used}->{$hf_bitname} = 1;
+
+ $ev =~ s/[()\s]//g;
+ if (hex($ev) != 0) {
+ $self->register_hf_field($hf_bitname, field2name($en), $filtername, "FT_BOOLEAN", $e->{ALIGN} * 8, "TFS(&$name\_$en\_tfs)", "( $ev )", "");
+
+ $self->pidl_def("static const true_false_string $name\_$en\_tfs = {");
+ if (defined($self->{conformance}->{tfs}->{$hf_bitname})) {
+ $self->pidl_def(" $self->{conformance}->{tfs}->{$hf_bitname}->{TRUE_STRING},");
+ $self->pidl_def(" $self->{conformance}->{tfs}->{$hf_bitname}->{FALSE_STRING},");
+ $self->{conformance}->{tfs}->{$hf_bitname}->{USED} = 1;
+ } else {
+ $self->pidl_def(" \"$en is SET\",");
+ $self->pidl_def(" \"$en is NOT SET\",");
+ }
+ $self->pidl_def("};");
+ }
+ }
+
+ if ($element_count > 0) {
+ my $total_ev_hex = sprintf("0x%08x", $total_ev);
+ $self->pidl_code("if (flags & (~$total_ev_hex)) {");
+ $self->pidl_code("\tflags &= (~$total_ev_hex);");
+ $self->pidl_code("\tproto_item_append_text(item, \"Unknown bitmap value 0x%x\", flags);");
+ $self->pidl_code("}\n");
+ }
+ $self->pidl_code("return offset;");
+ $self->deindent;
+ $self->pidl_code("}\n");
+ $self->pidl_fn_end($dissectorname);
+
+ my $size = $e->{BASE_TYPE};
+ $size =~ s/uint//g;
+ $self->register_type($name, "offset = $dissectorname(tvb, offset, pinfo, tree, di, drep, \@HF\@, \@PARAM\@);", "FT_UINT$size", "BASE_HEX", "0", "NULL", $size/8);
+}
+
+sub ElementLevel($$$$$$$$)
+{
+ my ($self,$e,$l,$hf,$myname,$pn,$ifname,$param) = @_;
+
+ if (defined($self->{conformance}->{dissectorparams}->{$myname})) {
+ $param = $self->{conformance}->{dissectorparams}->{$myname}->{PARAM};
+ }
+
+ if ($l->{TYPE} eq "POINTER") {
+ my $type;
+ if ($l->{LEVEL} eq "TOP") {
+ $type = "toplevel";
+ } elsif ($l->{LEVEL} eq "EMBEDDED") {
+ $type = "embedded";
+ }
+ $self->pidl_code("offset = dissect_ndr_$type\_pointer(tvb, offset, pinfo, tree, di, drep, $myname\_, $ptrtype_mappings{$l->{POINTER_TYPE}}, \"Pointer to ".field2name(StripPrefixes($e->{NAME}, $self->{conformance}->{strip_prefixes})) . " ($e->{TYPE})\",$hf);");
+ } elsif ($l->{TYPE} eq "ARRAY") {
+ if ($l->{IS_INLINE}) {
+ error($e->{ORIGINAL}, "Inline arrays not supported");
+ } elsif ($l->{IS_FIXED}) {
+ $self->pidl_code("int i;");
+ $self->pidl_code("for (i = 0; i < $l->{SIZE_IS}; i++)");
+ $self->pidl_code("\toffset = $myname\_(tvb, offset, pinfo, tree, di, drep);");
+ } else {
+ my $type = "";
+ $type .= "c" if ($l->{IS_CONFORMANT});
+ $type .= "v" if ($l->{IS_VARYING});
+
+ unless ($l->{IS_ZERO_TERMINATED}) {
+ $self->pidl_code("offset = dissect_ndr_u" . $type . "array(tvb, offset, pinfo, tree, di, drep, $myname\_);");
+ } else {
+ my $nl = GetNextLevel($e,$l);
+ $self->pidl_code("char *data;");
+ $self->pidl_code("");
+ $self->pidl_code("offset = dissect_ndr_$type" . "string(tvb, offset, pinfo, tree, di, drep, sizeof(g$nl->{DATA_TYPE}), $hf, FALSE, &data);");
+ $self->pidl_code("proto_item_append_text(tree, \": %s\", data);");
+ }
+ }
+ } elsif ($l->{TYPE} eq "DATA") {
+ if ($l->{DATA_TYPE} eq "string") {
+ my $bs = 2; # Byte size defaults to that of UCS2
+
+
+ ($bs = 1) if (property_matches($e, "flag", ".*LIBNDR_FLAG_STR_ASCII.*"));
+
+ if (property_matches($e, "flag", ".*LIBNDR_FLAG_STR_SIZE4.*") and property_matches($e, "flag", ".*LIBNDR_FLAG_STR_LEN4.*")) {
+ $self->pidl_code("char *data;\n");
+ $self->pidl_code("offset = dissect_ndr_cvstring(tvb, offset, pinfo, tree, di, drep, $bs, $hf, FALSE, &data);");
+ $self->pidl_code("proto_item_append_text(tree, \": %s\", data);");
+ } elsif (property_matches($e, "flag", ".*LIBNDR_FLAG_STR_SIZE4.*")) {
+ $self->pidl_code("offset = dissect_ndr_vstring(tvb, offset, pinfo, tree, di, drep, $bs, $hf, FALSE, NULL);");
+ } elsif (property_matches($e, "flag", ".*STR_NULLTERM.*")) {
+ if ($bs == 2) {
+ $self->pidl_code("offset = dissect_null_term_wstring(tvb, offset, pinfo, tree, drep, $hf , 0);")
+ } else {
+ $self->pidl_code("offset = dissect_null_term_string(tvb, offset, pinfo, tree, drep, $hf , 0);")
+ }
+ } else {
+ warn("Unable to handle string with flags $e->{PROPERTIES}->{flag}");
+ }
+ } elsif ($l->{DATA_TYPE} eq "DATA_BLOB") {
+ my $remain = 0;
+ $remain = 1 if (property_matches($e->{ORIGINAL}, "flag", ".*LIBNDR_FLAG_REMAINING.*"));
+ $self->pidl_code("offset = dissect_ndr_datablob(tvb, offset, pinfo, tree, di, drep, $hf, $remain);");
+ } else {
+ my $call;
+
+ if ($self->{conformance}->{imports}->{$l->{DATA_TYPE}}) {
+ $call = $self->{conformance}->{imports}->{$l->{DATA_TYPE}}->{DATA};
+ $self->{conformance}->{imports}->{$l->{DATA_TYPE}}->{USED} = 1;
+ } elsif (defined($self->{conformance}->{imports}->{"$pn.$e->{NAME}"})) {
+ $call = $self->{conformance}->{imports}->{"$pn.$e->{NAME}"}->{DATA};
+ $self->{conformance}->{imports}->{"$pn.$e->{NAME}"}->{USED} = 1;
+ } elsif (defined($self->{conformance}->{types}->{$l->{DATA_TYPE}})) {
+ $call= $self->{conformance}->{types}->{$l->{DATA_TYPE}}->{DISSECTOR_NAME};
+ $self->{conformance}->{types}->{$l->{DATA_TYPE}}->{USED} = 1;
+ } else {
+ my $t;
+ if (ref($l->{DATA_TYPE}) eq "HASH" ) {
+ $t = "$l->{DATA_TYPE}->{TYPE}_$l->{DATA_TYPE}->{NAME}";
+ } else {
+ $t = $l->{DATA_TYPE};
+ }
+
+ $self->pidl_code("offset = $ifname\_dissect_struct_" . $t . "(tvb,offset,pinfo,tree,di,drep,$hf,$param);");
+
+ return;
+ }
+
+ $call =~ s/\@HF\@/$hf/g;
+ $call =~ s/\@PARAM\@/$param/g;
+ $self->pidl_code($call);
+ }
+ } elsif ($_->{TYPE} eq "SUBCONTEXT") {
+ my $varswitch;
+ if (has_property($e, "switch_is")) {
+ $varswitch = $e->{PROPERTIES}->{switch_is};
+ }
+ my $num_bits = ($l->{HEADER_SIZE}*8);
+ my $hf2 = $self->register_hf_field($hf."_", "Subcontext length", "$ifname.$pn.$_->{NAME}subcontext", "FT_UINT$num_bits", "BASE_HEX", "NULL", 0, "");
+ $num_bits = 3264 if ($num_bits == 32);
+ $self->{hf_used}->{$hf2} = 1;
+ $self->pidl_code("guint$num_bits size;");
+ $self->pidl_code("int conformant = di->conformant_run;");
+ $self->pidl_code("tvbuff_t *subtvb;");
+ $self->pidl_code("");
+ # We need to be able to dissect the length of the context in every case
+ # and conformant run skips the dissections of scalars ...
+ $self->pidl_code("if (!conformant) {");
+ $self->indent;
+ $self->pidl_code("guint32 saved_flags = di->call_data->flags;");
+ $self->pidl_code("offset = dissect_ndr_uint$num_bits(tvb, offset, pinfo, tree, di, drep, $hf2, &size);");
+ # This is a subcontext, there is normally no such thing as
+ # 64 bit NDR is subcontext so we clear the flag so that we can
+ # continue to dissect handmarshalled stuff with pidl
+ $self->pidl_code("di->call_data->flags &= ~DCERPC_IS_NDR64;");
+
+ $self->pidl_code("subtvb = tvb_new_subset_length_caplen(tvb, offset, (const gint)size, -1);");
+ if ($param ne 0) {
+ $self->pidl_code("$myname\_(subtvb, 0, pinfo, tree, di, drep, $param);");
+ } else {
+ $self->pidl_code("$myname\_(subtvb, 0, pinfo, tree, di, drep);");
+ }
+ $self->pidl_code("offset += (int)size;");
+ $self->pidl_code("di->call_data->flags = saved_flags;");
+ $self->deindent;
+ $self->pidl_code("}");
+ } elsif ($_->{TYPE} eq "PIPE") {
+ error($e->{ORIGINAL}, "Type PIPE not yet supported");
+ } else {
+ die("Unknown type `$_->{TYPE}'");
+ }
+}
+
+sub SwitchType($$;$)
+{
+ my ($e, $type, $nodiscriminant) = @_;
+
+ my $switch_dt = getType($type);
+ my $switch_type = undef;
+ if ($switch_dt->{DATA}->{TYPE} eq "ENUM") {
+ $switch_type = Parse::Pidl::Typelist::enum_type_fn($switch_dt->{DATA});
+ } elsif ($switch_dt->{DATA}->{TYPE} eq "BITMAP") {
+ $switch_type = Parse::Pidl::Typelist::bitmap_type_fn($switch_dt->{DATA});
+ } elsif ($switch_dt->{DATA}->{TYPE} eq "SCALAR") {
+ if (defined $e->{SWITCH_TYPE}) {
+ $switch_type = "$e->{SWITCH_TYPE}";
+ } else {
+ $switch_type = "$switch_dt->{DATA}->{NAME}";
+ }
+ } elsif (not defined $e->{SWITCH_TYPE}) {
+ $switch_type = $nodiscriminant;
+ }
+
+ return $switch_type
+}
+
+sub Element($$$$$$)
+{
+ my ($self,$e,$pn,$ifname,$isoruseswitch,%switchvars) = @_;
+
+ my $dissectorname = "$ifname\_dissect\_element\_".StripPrefixes($pn, $self->{conformance}->{strip_prefixes})."\_".StripPrefixes($e->{NAME}, $self->{conformance}->{strip_prefixes});
+
+ my ($call_code, $moreparam);
+ my $param = 0;
+ if (defined $isoruseswitch) {
+ my $type = $isoruseswitch->[0];
+ my $name = $isoruseswitch->[1];
+
+ my $switch_dt = getType($type);
+ my $switch_raw_type = SwitchType($e, $type, "uint32");
+ if (not defined($switch_raw_type)) {
+ die("Unknown type[$type]\n");
+ }
+ my $switch_type = "g${switch_raw_type}";
+
+ if ($name ne "") {
+ $moreparam = ", $switch_type *".$name;
+ } else {
+ $moreparam = "";
+ }
+ if (($e->{PROPERTIES}->{switch_is} eq "") && ($switchvars{$name}) &&
+ #not a "native" type
+ (!($type =~ /^uint(8|16|1632|32|3264|64)/))) {
+ $param = $name;
+ } elsif ( $switch_dt->{DATA}->{TYPE} eq "ENUM") {
+ $param = $name;
+ } elsif ($name ne "") {
+ $param = "*".$name;
+ }
+
+ if ($name ne "") {
+ $call_code = "offset = $dissectorname(tvb, offset, pinfo, tree, di, drep, &$name);";
+ } else {
+ $call_code = "offset = $dissectorname(tvb, offset, pinfo, tree, di, drep);";
+ }
+ } else {
+ $moreparam = "";
+ $call_code = "offset = $dissectorname(tvb, offset, pinfo, tree, di, drep);";
+ }
+
+
+ my $type = $self->find_type($e->{TYPE});
+
+ if (not defined($type)) {
+ # default settings
+ $type = {
+ MASK => 0,
+ VALSSTRING => "NULL",
+ FT_TYPE => "FT_NONE",
+ BASE_TYPE => "BASE_NONE"
+ };
+ }
+
+ if (ContainsString($e)) {
+ $type = {
+ MASK => 0,
+ VALSSTRING => "NULL",
+ FT_TYPE => "FT_STRING",
+ BASE_TYPE => "BASE_NONE"
+ };
+ }
+ if (property_matches($e, "flag", ".*LIBNDR_FLAG_ALIGN.*")) {
+ my $align_flag = $e->{PROPERTIES}->{flag};
+ if ($align_flag =~ m/LIBNDR_FLAG_ALIGN(\d+)/) {
+ $call_code = "ALIGN_TO_$1_BYTES; ".$call_code;
+ }
+ }
+
+ my $hf = $self->register_hf_field("hf_$ifname\_$pn\_$e->{NAME}", field2name($e->{NAME}), "$ifname.$pn.$e->{NAME}", $type->{FT_TYPE}, $type->{BASE_TYPE}, $type->{VALSSTRING}, $type->{MASK}, "");
+ $self->{hf_used}->{$hf} = 1;
+
+ my $eltname = StripPrefixes($pn, $self->{conformance}->{strip_prefixes}) . ".$e->{NAME}";
+ if (defined($self->{conformance}->{noemit}->{$eltname})) {
+ return $call_code;
+ }
+
+ my $add = "";
+
+ my $oldparam = undef;
+ foreach (@{$e->{LEVELS}}) {
+ if (defined $_->{SWITCH_IS}) {
+ $oldparam = $param;
+ if (($param ne "0") && (!($param =~ /\*/))) {
+ $param = "*$param";
+ }
+ }
+ next if ($_->{TYPE} eq "SWITCH");
+ next if (defined($self->{conformance}->{noemit}->{"$dissectorname$add"}));
+ $self->pidl_def("static int $dissectorname$add(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_, dcerpc_info* di _U_, guint8 *drep _U_$moreparam);");
+ $self->pidl_fn_start("$dissectorname$add");
+ $self->pidl_code("static int");
+ $self->pidl_code("$dissectorname$add(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_, dcerpc_info* di _U_, guint8 *drep _U_$moreparam)");
+ $self->pidl_code("{");
+ $self->indent;
+
+ $self->ElementLevel($e,$_,$hf,$dissectorname.$add,$pn,$ifname,$param);
+ if (defined $oldparam) {
+ $param = $oldparam;
+ }
+
+ $self->pidl_code("");
+ $self->pidl_code("return offset;");
+ $self->deindent;
+ $self->pidl_code("}\n");
+ $self->pidl_fn_end("$dissectorname$add");
+ $add.="_";
+ last if ($_->{TYPE} eq "ARRAY" and $_->{IS_ZERO_TERMINATED});
+ }
+
+ return $call_code;
+}
+
+sub Function($$$)
+{
+ my ($self, $fn,$ifname) = @_;
+
+ my %dissectornames;
+
+ foreach (@{$fn->{ELEMENTS}}) {
+ $dissectornames{$_->{NAME}} = $self->Element($_, $fn->{NAME}, $ifname, undef, undef) if not defined($dissectornames{$_->{NAME}});
+ }
+
+ my $fn_name = $_->{NAME};
+ $fn_name =~ s/^${ifname}_//;
+
+ $self->PrintIdl(DumpFunction($fn->{ORIGINAL}));
+ $self->pidl_fn_start("$ifname\_dissect\_$fn_name\_response");
+ $self->pidl_code("static int");
+ $self->pidl_code("$ifname\_dissect\_${fn_name}_response(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_, dcerpc_info* di _U_, guint8 *drep _U_)");
+ $self->pidl_code("{");
+ $self->indent;
+ if ( not defined($fn->{RETURN_TYPE})) {
+ } elsif ($fn->{RETURN_TYPE} eq "NTSTATUS" or $fn->{RETURN_TYPE} eq "WERROR" or $fn->{RETURN_TYPE} eq "HRESULT")
+ {
+ $self->pidl_code("guint32 status;\n");
+ } elsif (my $type = getType($fn->{RETURN_TYPE})) {
+ if ($type->{DATA}->{TYPE} eq "ENUM") {
+ $self->pidl_code("g".Parse::Pidl::Typelist::enum_type_fn($type->{DATA}) . " status;\n");
+ } elsif ($type->{DATA}->{TYPE} eq "SCALAR") {
+ $self->pidl_code("g$fn->{RETURN_TYPE} status;\n");
+ } else {
+ error($fn, "return type `$fn->{RETURN_TYPE}' not yet supported");
+ }
+ } else {
+ error($fn, "unknown return type `$fn->{RETURN_TYPE}'");
+ }
+
+ $self->pidl_code("di->dcerpc_procedure_name=\"${fn_name}\";");
+ foreach (@{$fn->{ELEMENTS}}) {
+ if (grep(/out/,@{$_->{DIRECTION}})) {
+ $self->pidl_code("$dissectornames{$_->{NAME}}");
+ $self->pidl_code("offset = dissect_deferred_pointers(pinfo, tvb, offset, di, drep);");
+ $self->pidl_code("");
+ }
+ }
+
+ if (not defined($fn->{RETURN_TYPE})) {
+ } elsif ($fn->{RETURN_TYPE} eq "NTSTATUS") {
+ $self->pidl_code("offset = dissect_ntstatus(tvb, offset, pinfo, tree, di, drep, hf\_$ifname\_status, &status);\n");
+ $self->pidl_code("if (status != 0)");
+ $self->pidl_code("\tcol_append_fstr(pinfo->cinfo, COL_INFO, \", Error: %s\", val_to_str(status, NT_errors, \"Unknown NT status 0x%08x\"));\n");
+ $return_types{$ifname}->{"status"} = ["NTSTATUS", "NT Error"];
+ } elsif ($fn->{RETURN_TYPE} eq "WERROR") {
+ $self->pidl_code("offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf\_$ifname\_werror, &status);\n");
+ $self->pidl_code("if (status != 0)");
+ $self->pidl_code("\tcol_append_fstr(pinfo->cinfo, COL_INFO, \", Error: %s\", val_to_str(status, WERR_errors, \"Unknown DOS error 0x%08x\"));\n");
+
+ $return_types{$ifname}->{"werror"} = ["WERROR", "Windows Error"];
+ } elsif ($fn->{RETURN_TYPE} eq "HRESULT") {
+ $self->pidl_code("offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf\_$ifname\_hresult, &status);\n");
+ $self->pidl_code("if (status != 0)");
+ $self->pidl_code("\tcol_append_fstr(pinfo->cinfo, COL_INFO, \", Error: %s\", val_to_str(status, HRES_errors, \"Unknown HRES error 0x%08x\"));\n");
+ $return_types{$ifname}->{"hresult"} = ["HRESULT", "HRES Windows Error"];
+ } elsif (my $type = getType($fn->{RETURN_TYPE})) {
+ if ($type->{DATA}->{TYPE} eq "ENUM") {
+ my $return_type = "g".Parse::Pidl::Typelist::enum_type_fn($type->{DATA});
+ my $return_dissect = "dissect_ndr_" .Parse::Pidl::Typelist::enum_type_fn($type->{DATA});
+
+ $self->pidl_code("offset = $return_dissect(tvb, offset, pinfo, tree, di, drep, hf\_$ifname\_$fn->{RETURN_TYPE}_status, &status);");
+ $self->pidl_code("if (status != 0)");
+ $self->pidl_code("\tcol_append_fstr(pinfo->cinfo, COL_INFO, \", Status: %s\", val_to_str(status, $ifname\_$fn->{RETURN_TYPE}\_vals, \"Unknown " . $fn->{RETURN_TYPE} . " error 0x%08x\"));\n");
+ $return_types{$ifname}->{$fn->{RETURN_TYPE}."_status"} = [$fn->{RETURN_TYPE}, $fn->{RETURN_TYPE}];
+ } elsif ($type->{DATA}->{TYPE} eq "SCALAR") {
+ $self->pidl_code("offset = dissect_ndr_$fn->{RETURN_TYPE}(tvb, offset, pinfo, tree, di, drep, hf\_$ifname\_$fn->{RETURN_TYPE}_status, &status);");
+ $self->pidl_code("if (status != 0)");
+ $self->pidl_code("\tcol_append_fstr(pinfo->cinfo, COL_INFO, \", Status: %d\", status);\n");
+ $return_types{$ifname}->{$fn->{RETURN_TYPE}."_status"} = [$fn->{RETURN_TYPE}, $fn->{RETURN_TYPE}];
+ }
+ }
+
+ $self->pidl_code("return offset;");
+ $self->deindent;
+ $self->pidl_code("}\n");
+ $self->pidl_fn_end("$ifname\_dissect\_$fn_name\_response");
+
+ $self->pidl_fn_start("$ifname\_dissect\_$fn_name\_request");
+ $self->pidl_code("static int");
+ $self->pidl_code("$ifname\_dissect\_${fn_name}_request(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_, dcerpc_info* di _U_, guint8 *drep _U_)");
+ $self->pidl_code("{");
+ $self->indent;
+ $self->pidl_code("di->dcerpc_procedure_name=\"${fn_name}\";");
+ foreach (@{$fn->{ELEMENTS}}) {
+ if (grep(/in/,@{$_->{DIRECTION}})) {
+ $self->pidl_code("$dissectornames{$_->{NAME}}");
+ $self->pidl_code("offset = dissect_deferred_pointers(pinfo, tvb, offset, di, drep);");
+ }
+
+ }
+
+ $self->pidl_code("return offset;");
+ $self->deindent;
+ $self->pidl_code("}\n");
+ $self->pidl_fn_end("$ifname\_dissect\_$fn_name\_request");
+}
+
+sub Struct($$$$)
+{
+ my ($self,$e,$name,$ifname) = @_;
+ my $dissectorname = "$ifname\_dissect\_struct\_".StripPrefixes($name, $self->{conformance}->{strip_prefixes});
+
+ return if (defined($self->{conformance}->{noemit}->{StripPrefixes($name, $self->{conformance}->{strip_prefixes})}));
+
+ $self->register_ett("ett_$ifname\_$name");
+
+ my $res = "";
+ my $varswitchs = {};
+ # will contain the switch var declaration;
+ my $vars = [];
+ my %switch_hash;
+ foreach (@{$e->{ELEMENTS}}) {
+ if (has_property($_, "switch_is")) {
+ $varswitchs->{$_->{PROPERTIES}->{switch_is}} = [];
+ $switch_hash{ $_->{PROPERTIES}->{switch_is}} = $_->{PROPERTIES}->{switch_is};
+ }
+ }
+ foreach (@{$e->{ELEMENTS}}) {
+ my $switch_info = undef;
+
+ my $v = $_->{NAME};
+ if (scalar(grep {/^$v$/} keys(%$varswitchs)) == 1) {
+ # This element is one of the switch attribute
+ my $switch_raw_type = SwitchType($e, $_->{TYPE}, "uint32");
+ if (not defined($switch_raw_type)) {
+ die("Unknown type[$_->{TYPE}]\n");
+ }
+ my $switch_type = "g${switch_raw_type}";
+
+ if ($switch_type ne "") {
+ push @$vars, "$switch_type $v = 0;";
+ }
+ $switch_info = [ $_->{TYPE}, $v ];
+ $varswitchs->{$v} = $switch_info;
+ }
+
+ if (has_property($_, "switch_is")) {
+ my $varswitch = $_->{PROPERTIES}->{switch_is};
+ $switch_info = $varswitchs->{$varswitch};
+ }
+
+ $res.="\t".$self->Element($_, $name, $ifname, $switch_info, %switch_hash)."\n\n";
+ }
+
+ my $doalign = undef;
+ if ($e->{ALIGN} > 1 and not property_matches($e, "flag", ".*LIBNDR_FLAG_NOALIGN.*")) {
+ $doalign = 1;
+ } elsif (property_matches($e, "flag", ".*LIBNDR_FLAG_NOALIGN.*")) {
+ $doalign = 0;
+ }
+
+ $self->pidl_hdr("int $dissectorname(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *parent_tree _U_, dcerpc_info* di _U_, guint8 *drep _U_, int hf_index _U_, guint32 param _U_);");
+
+ $self->pidl_fn_start($dissectorname);
+ $self->pidl_code("int");
+ $self->pidl_code("$dissectorname(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *parent_tree _U_, dcerpc_info* di _U_, guint8 *drep _U_, int hf_index _U_, guint32 param _U_)");
+ $self->pidl_code("{");
+ $self->indent;
+ $self->pidl_code($_) foreach (@$vars);
+ $self->pidl_code("proto_item *item = NULL;");
+ if($res) {
+ $self->pidl_code("proto_tree *tree = NULL;");
+ }
+ if (defined($doalign) and $doalign == 0) {
+ $self->pidl_code("gboolean oldalign = di->no_align;");
+ }
+ $self->pidl_code("int old_offset;");
+ $self->pidl_code("");
+
+ if (defined($doalign)) {
+ if ($doalign == 1) {
+ $self->pidl_code("ALIGN_TO_$e->{ALIGN}_BYTES;");
+ }
+ if ($doalign == 0) {
+ $self->pidl_code("di->no_align = TRUE;");
+ }
+ $self->pidl_code("");
+ }
+
+ $self->pidl_code("old_offset = offset;");
+ $self->pidl_code("");
+ $self->pidl_code("if (parent_tree) {");
+ $self->indent;
+ $self->pidl_code("item = proto_tree_add_item(parent_tree, hf_index, tvb, offset, -1, ENC_NA);");
+ if($res) {
+ $self->pidl_code("tree = proto_item_add_subtree(item, ett_$ifname\_$name);");
+ }
+ $self->deindent;
+ $self->pidl_code("}");
+ $self->pidl_code("");
+
+ $self->deindent;
+ $self->pidl_code("$res");
+ $self->indent;
+
+ $self->pidl_code("proto_item_set_len(item, offset-old_offset);\n");
+ if (defined($doalign) and $doalign == 1) {
+ $self->pidl_code("");
+ $self->pidl_code("if (di->call_data->flags & DCERPC_IS_NDR64) {");
+ $self->indent;
+ $self->pidl_code("ALIGN_TO_$e->{ALIGN}_BYTES;");
+ $self->deindent;
+ $self->pidl_code("}");
+ }
+ if (defined($doalign) and $doalign == 0) {
+ $self->pidl_code("");
+ $self->pidl_code("di->no_align = oldalign;");
+ }
+ $self->pidl_code("");
+ $self->pidl_code("return offset;");
+ $self->deindent;
+ $self->pidl_code("}\n");
+ $self->pidl_fn_end($dissectorname);
+
+ $self->register_type($name, "offset = $dissectorname(tvb,offset,pinfo,tree,di,drep,\@HF\@,\@PARAM\@);", "FT_NONE", "BASE_NONE", 0, "NULL", 0);
+}
+
+sub Union($$$$)
+{
+ my ($self,$e,$name,$ifname) = @_;
+
+ my $dissectorname = "$ifname\_dissect_".StripPrefixes($name, $self->{conformance}->{strip_prefixes});
+
+ return if (defined($self->{conformance}->{noemit}->{StripPrefixes($name, $self->{conformance}->{strip_prefixes})}));
+
+ $self->register_ett("ett_$ifname\_$name");
+
+ my $res = "";
+ foreach (@{$e->{ELEMENTS}}) {
+ $res.="\n\t\t$_->{CASE}:\n";
+ if ($_->{TYPE} ne "EMPTY") {
+ $res.="\t\t\t".$self->Element($_, $name, $ifname, undef, undef)."\n";
+ }
+ $res.="\t\tbreak;\n";
+ }
+
+ my $switch_type = undef;
+ my $switch_dissect = undef;
+ my $switch_raw_type = SwitchType($e, $e->{SWITCH_TYPE});
+ if (defined($switch_raw_type)) {
+ $switch_type = "g${switch_raw_type}";
+ $switch_dissect = "dissect_ndr_${switch_raw_type}";
+ }
+
+ $self->pidl_fn_start($dissectorname);
+ $self->pidl_code("static int");
+ $self->pidl_code("$dissectorname(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *parent_tree _U_, dcerpc_info* di _U_, guint8 *drep _U_, int hf_index _U_, guint32 param _U_)");
+ $self->pidl_code("{");
+ $self->indent;
+ $self->pidl_code("proto_item *item = NULL;");
+ $self->pidl_code("proto_tree *tree = NULL;");
+ $self->pidl_code("int old_offset;");
+ if (!defined $switch_type) {
+ $self->pidl_code("guint32 level = param;");
+ } else {
+ $self->pidl_code("$switch_type level;");
+ }
+ $self->pidl_code("");
+
+ $self->pidl_code("old_offset = offset;");
+ $self->pidl_code("if (parent_tree) {");
+ $self->indent;
+ $self->pidl_code("tree = proto_tree_add_subtree(parent_tree, tvb, offset, -1, ett_$ifname\_$name, &item, \"$name\");");
+ $self->deindent;
+ $self->pidl_code("}");
+
+ $self->pidl_code("");
+
+ if (defined $switch_type) {
+ $self->pidl_code("offset = $switch_dissect(tvb, offset, pinfo, tree, di, drep, hf_index, &level);");
+
+ if ($e->{ALIGN} > 1) {
+ $self->pidl_code("ALIGN_TO_$e->{ALIGN}_BYTES;");
+ $self->pidl_code("");
+ }
+ }
+
+
+ $self->pidl_code("switch(level) {$res\t}");
+ $self->pidl_code("proto_item_set_len(item, offset-old_offset);\n");
+ $self->pidl_code("");
+
+ $self->pidl_code("return offset;");
+ $self->deindent;
+ $self->pidl_code("}");
+ $self->pidl_fn_end($dissectorname);
+
+ $self->register_type($name, "offset = $dissectorname(tvb, offset, pinfo, tree, di, drep, \@HF\@, \@PARAM\@);", "FT_NONE", "BASE_NONE", 0, "NULL", 0);
+}
+
+sub Const($$$)
+{
+ my ($self,$const,$ifname) = @_;
+
+ if (!defined($const->{ARRAY_LEN}[0])) {
+ $self->pidl_hdr("#define $const->{NAME}\t( $const->{VALUE} )\n");
+ } else {
+ $self->pidl_hdr("#define $const->{NAME}\t $const->{VALUE}\n");
+ }
+}
+
+sub Typedef($$$$)
+{
+ my ($self,$e,$name,$ifname) = @_;
+
+ $self->Type($e->{DATA}, $name, $ifname);
+}
+
+sub Type($$$$)
+{
+ my ($self, $e, $name, $ifname) = @_;
+
+ $self->PrintIdl(DumpType($e->{ORIGINAL}));
+ {
+ ENUM => \&Enum,
+ STRUCT => \&Struct,
+ UNION => \&Union,
+ BITMAP => \&Bitmap,
+ TYPEDEF => \&Typedef,
+ PIPE => \&Pipe
+ }->{$e->{TYPE}}->($self, $e, $name, $ifname);
+}
+
+sub RegisterInterface($$)
+{
+ my ($self, $x) = @_;
+
+ $self->pidl_fn_start("proto_register_dcerpc_$x->{NAME}");
+ $self->pidl_code("void proto_register_dcerpc_$x->{NAME}(void)");
+ $self->pidl_code("{");
+ $self->indent;
+
+ $self->{res}->{headers} .= "void proto_register_dcerpc_$x->{NAME}(void);\n";
+
+ $self->{res}->{code}.=$self->DumpHfList()."\n";
+ $self->{res}->{code}.="\n".DumpEttList($self->{ett})."\n";
+
+ if (defined($x->{UUID})) {
+ # These can be changed to non-pidl_code names if the old
+ # dissectors in epan/dissectors are deleted.
+
+ my $name = uc($x->{NAME}) . " (pidl)";
+ my $short_name = uc($x->{NAME});
+ my $filter_name = $x->{NAME};
+
+ if (has_property($x, "helpstring")) {
+ $name = $x->{PROPERTIES}->{helpstring};
+ }
+
+ if (defined($self->{conformance}->{protocols}->{$x->{NAME}})) {
+ $short_name = $self->{conformance}->{protocols}->{$x->{NAME}}->{SHORTNAME};
+ $name = $self->{conformance}->{protocols}->{$x->{NAME}}->{LONGNAME};
+ $filter_name = $self->{conformance}->{protocols}->{$x->{NAME}}->{FILTERNAME};
+ }
+
+ $self->pidl_code("proto_dcerpc_$x->{NAME} = proto_register_protocol(".make_str($name).", ".make_str($short_name).", ".make_str($filter_name).");");
+
+ $self->pidl_code("proto_register_field_array(proto_dcerpc_$x->{NAME}, hf, array_length (hf));");
+ $self->pidl_code("proto_register_subtree_array(ett, array_length(ett));");
+ } else {
+ $self->pidl_code("proto_dcerpc = proto_get_id_by_filter_name(\"dcerpc\");");
+ $self->pidl_code("proto_register_field_array(proto_dcerpc, hf, array_length(hf));");
+ $self->pidl_code("proto_register_subtree_array(ett, array_length(ett));");
+ }
+
+ $self->deindent;
+ $self->pidl_code("}\n");
+ $self->pidl_fn_end("proto_register_dcerpc_$x->{NAME}");
+}
+
+sub RegisterInterfaceHandoff($$)
+{
+ my ($self,$x) = @_;
+
+ if (defined($x->{UUID})) {
+ $self->pidl_fn_start("proto_reg_handoff_dcerpc_$x->{NAME}");
+ $self->pidl_code("void proto_reg_handoff_dcerpc_$x->{NAME}(void)");
+ $self->pidl_code("{");
+ $self->indent;
+ $self->pidl_code("dcerpc_init_uuid(proto_dcerpc_$x->{NAME}, ett_dcerpc_$x->{NAME},");
+ $self->pidl_code("\t&uuid_dcerpc_$x->{NAME}, ver_dcerpc_$x->{NAME},");
+ $self->pidl_code("\t$x->{NAME}_dissectors, hf_$x->{NAME}_opnum);");
+ $self->deindent;
+ $self->pidl_code("}");
+ $self->pidl_fn_end("proto_reg_handoff_dcerpc_$x->{NAME}");
+
+ $self->{res}->{headers} .= "void proto_reg_handoff_dcerpc_$x->{NAME}(void);\n";
+
+ $self->{hf_used}->{"hf_$x->{NAME}_opnum"} = 1;
+ }
+}
+
+sub ProcessInclude
+{
+ my $self = shift;
+ my @includes = @_;
+ foreach (@includes) {
+ $self->pidl_hdr("#include \"$_\"");
+ }
+ $self->pidl_hdr("");
+}
+
+sub ProcessImport
+{
+ my $self = shift;
+ my @imports = @_;
+ foreach (@imports) {
+ next if($_ eq "security");
+ s/^\"//;
+ s/\.idl"?$//;
+ s/^.*\///;
+ $self->pidl_hdr("#include \"packet-dcerpc-$_\.h\"");
+ }
+ $self->pidl_hdr("");
+}
+
+sub ProcessInterface($$)
+{
+ my ($self, $x) = @_;
+
+ push(@{$self->{conformance}->{strip_prefixes}}, $x->{NAME});
+
+ my $define = "__PACKET_DCERPC_" . uc($_->{NAME}) . "_H";
+ $self->pidl_hdr("#ifndef $define");
+ $self->pidl_hdr("#define $define");
+ $self->pidl_hdr("");
+
+ $self->pidl_def("static gint proto_dcerpc_$x->{NAME} = -1;");
+ $self->register_ett("ett_dcerpc_$x->{NAME}");
+ $self->register_hf_field("hf_$x->{NAME}_opnum", "Operation", "$x->{NAME}.opnum", "FT_UINT16", "BASE_DEC", "NULL", 0, "");
+
+ if (defined($x->{UUID})) {
+ my $if_uuid = $x->{UUID};
+
+ $self->pidl_def("/* Version information */\n\n");
+
+ $self->pidl_def("static e_guid_t uuid_dcerpc_$x->{NAME} = {");
+ $self->pidl_def("\t0x" . substr($if_uuid, 1, 8)
+ . ", 0x" . substr($if_uuid, 10, 4)
+ . ", 0x" . substr($if_uuid, 15, 4) . ",");
+ $self->pidl_def("\t{ 0x" . substr($if_uuid, 20, 2)
+ . ", 0x" . substr($if_uuid, 22, 2)
+ . ", 0x" . substr($if_uuid, 25, 2)
+ . ", 0x" . substr($if_uuid, 27, 2)
+ . ", 0x" . substr($if_uuid, 29, 2)
+ . ", 0x" . substr($if_uuid, 31, 2)
+ . ", 0x" . substr($if_uuid, 33, 2)
+ . ", 0x" . substr($if_uuid, 35, 2) . " }");
+ $self->pidl_def("};");
+
+ my $maj = 0x0000FFFF & $x->{VERSION};
+ $maj =~ s/\.(.*)$//g;
+ $self->pidl_def("static guint16 ver_dcerpc_$x->{NAME} = $maj;");
+ $self->pidl_def("");
+ }
+
+ $return_types{$x->{NAME}} = {};
+
+ $self->Interface($x);
+ $self->pidl_code("\n".DumpFunctionTable($x));
+
+ foreach (sort(keys %{$return_types{$x->{NAME}}})) {
+ my ($type, $desc) = @{$return_types{$x->{NAME}}->{$_}};
+ my $dt = $self->find_type($type);
+ $dt or die("Unable to find information about return type `$type'");
+ $self->register_hf_field("hf_$x->{NAME}_$_", $desc, "$x->{NAME}.$_", $dt->{FT_TYPE}, "BASE_HEX", $dt->{VALSSTRING}, 0, "");
+ $self->{hf_used}->{"hf_$x->{NAME}_$_"} = 1;
+ }
+
+ $self->RegisterInterface($x);
+ $self->RegisterInterfaceHandoff($x);
+
+ if (exists ($self->{conformance}->{header})) {
+ $self->pidl_hdr($self->{conformance}->{header});
+ }
+
+ $self->pidl_hdr("#endif /* $define */");
+}
+
+sub find_type($$)
+{
+ my ($self, $n) = @_;
+
+ return $self->{conformance}->{types}->{$n};
+}
+
+sub register_type($$$$$$$$)
+{
+ my ($self, $type,$call,$ft,$base,$mask,$vals,$length) = @_;
+
+ return if (defined($self->{conformance}->{types}->{$type}));
+
+ $self->{conformance}->{types}->{$type} = {
+ NAME => $type,
+ DISSECTOR_NAME => $call,
+ FT_TYPE => $ft,
+ BASE_TYPE => $base,
+ MASK => $mask,
+ VALSSTRING => $vals,
+ ALIGNMENT => $length
+ };
+}
+
+# Loads the default types
+sub Initialize($$)
+{
+ my ($self, $cnf_file) = @_;
+
+ $self->{conformance} = {
+ imports => {},
+ header_fields=> {}
+ };
+
+ ReadConformance($cnf_file, $self->{conformance}) or print STDERR "warning: No conformance file `$cnf_file'\n";
+
+ foreach my $bytes (qw(1 2 4 8)) {
+ my $bits = $bytes * 8;
+ $self->register_type("uint$bits", "offset = PIDL_dissect_uint$bits(tvb, offset, pinfo, tree, di, drep, \@HF\@, \@PARAM\@);", "FT_UINT$bits", "BASE_DEC", 0, "NULL", $bytes);
+ $self->register_type("int$bits", "offset = PIDL_dissect_uint$bits(tvb, offset, pinfo, tree, di, drep, \@HF\@, \@PARAM\@);", "FT_INT$bits", "BASE_DEC", 0, "NULL", $bytes);
+ }
+
+ $self->register_type("uint3264", "offset = dissect_ndr_uint3264(tvb, offset, pinfo, tree, di, drep, \@HF\@, NULL);", "FT_UINT32", "BASE_DEC", 0, "NULL", 8);
+ $self->register_type("hyper", "offset = dissect_ndr_uint64(tvb, offset, pinfo, tree, di, drep, \@HF\@, NULL);", "FT_UINT64", "BASE_DEC", 0, "NULL", 8);
+ $self->register_type("udlong", "offset = dissect_ndr_duint32(tvb, offset, pinfo, tree, di, drep, \@HF\@, NULL);", "FT_UINT64", "BASE_DEC", 0, "NULL", 4);
+ $self->register_type("bool8", "offset = PIDL_dissect_uint8(tvb, offset, pinfo, tree, di, drep, \@HF\@, \@PARAM\@);","FT_INT8", "BASE_DEC", 0, "NULL", 1);
+ $self->register_type("char", "offset = PIDL_dissect_uint8(tvb, offset, pinfo, tree, di, drep, \@HF\@, \@PARAM\@);","FT_INT8", "BASE_DEC", 0, "NULL", 1);
+ $self->register_type("long", "offset = PIDL_dissect_uint32(tvb, offset, pinfo, tree, di, drep, \@HF\@, \@PARAM\@);","FT_INT32", "BASE_DEC", 0, "NULL", 4);
+ $self->register_type("dlong", "offset = dissect_ndr_duint32(tvb, offset, pinfo, tree, di, drep, \@HF\@, NULL);","FT_INT64", "BASE_DEC", 0, "NULL", 8);
+ $self->register_type("GUID", "offset = dissect_ndr_uuid_t(tvb, offset, pinfo, tree, di, drep, \@HF\@, NULL);","FT_GUID", "BASE_NONE", 0, "NULL", 4);
+ $self->register_type("policy_handle", "offset = PIDL_dissect_policy_hnd(tvb, offset, pinfo, tree, di, drep, \@HF\@, \@PARAM\@);","FT_BYTES", "BASE_NONE", 0, "NULL", 4);
+ $self->register_type("NTTIME", "offset = dissect_ndr_nt_NTTIME(tvb, offset, pinfo, tree, di, drep, \@HF\@);","FT_ABSOLUTE_TIME", "ABSOLUTE_TIME_LOCAL", 0, "NULL", 4);
+ $self->register_type("NTTIME_hyper", "offset = dissect_ndr_nt_NTTIME(tvb, offset, pinfo, tree, di, drep, \@HF\@);","FT_ABSOLUTE_TIME", "ABSOLUTE_TIME_LOCAL", 0, "NULL", 4);
+ $self->register_type("time_t", "offset = dissect_ndr_time_t(tvb, offset, pinfo,tree, di, drep, \@HF\@, NULL);","FT_ABSOLUTE_TIME", "ABSOLUTE_TIME_LOCAL", 0, "NULL", 4);
+ $self->register_type("NTTIME_1sec", "offset = dissect_ndr_nt_NTTIME(tvb, offset, pinfo, tree, di, drep, \@HF\@);", "FT_ABSOLUTE_TIME", "ABSOLUTE_TIME_LOCAL", 0, "NULL", 4);
+ $self->register_type("dom_sid28",
+ "offset = dissect_ndr_nt_SID28(tvb, offset, pinfo, tree, di, drep, \@HF\@);", "FT_STRING", "BASE_NONE", 0, "NULL", 4);
+ $self->register_type("SID",
+ "offset = dissect_ndr_nt_SID_with_options(tvb, offset, pinfo, tree, di, drep, param, \@HF\@);","FT_STRING", "BASE_NONE", 0, "NULL", 4);
+ $self->register_type("WERROR",
+ "offset = PIDL_dissect_uint32(tvb, offset, pinfo, tree, di, drep, \@HF\@, \@PARAM\@);","FT_UINT32", "BASE_DEC", 0, "VALS(WERR_errors)", 4);
+ $self->register_type("NTSTATUS",
+ "offset = PIDL_dissect_uint32(tvb, offset, pinfo, tree, di, drep, \@HF\@, \@PARAM\@);","FT_UINT32", "BASE_DEC", 0, "VALS(NT_errors)", 4);
+ $self->register_type("HRESULT",
+ "offset = PIDL_dissect_uint32(tvb, offset, pinfo, tree, di, drep, \@HF\@, \@PARAM\@);","FT_UINT32", "BASE_DEC", 0, "VALS(HRES_errors)", 4);
+ $self->register_type("ipv6address", "proto_tree_add_item(tree, \@HF\@, tvb, offset, 16, ENC_NA); offset += 16;", "FT_IPv6", "BASE_NONE", 0, "NULL", 16);
+ $self->register_type("ipv4address", "proto_tree_add_item(tree, \@HF\@, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4;", "FT_IPv4", "BASE_NONE", 0, "NULL", 4);
+
+}
+
+#####################################################################
+# Generate Wireshark parser and header code
+sub Parse($$$$$)
+{
+ my($self,$ndr,$idl_file,$h_filename,$cnf_file) = @_;
+
+ $self->Initialize($cnf_file);
+
+ return (undef, undef) if defined($self->{conformance}->{noemit_dissector});
+
+ my $notice =
+"/* DO NOT EDIT
+ This file was automatically generated by Pidl
+ from $idl_file and $cnf_file.
+
+ Pidl is a perl based IDL compiler for DCE/RPC idl files.
+ It is maintained by the Samba team, not the Wireshark team.
+ Instructions on how to download and install Pidl can be
+ found at https://gitlab.com/wireshark/wireshark/-/wikis/Pidl
+*/
+
+";
+
+ $self->{res}->{headers} = "\n";
+ $self->{res}->{headers} .= "#include \"config.h\"\n";
+
+ $self->{res}->{headers} .= "#include <glib.h>\n";
+ $self->{res}->{headers} .= "#include <string.h>\n";
+ $self->{res}->{headers} .= "#include <epan/packet.h>\n\n";
+
+ $self->{res}->{headers} .= "#include \"packet-dcerpc.h\"\n";
+ $self->{res}->{headers} .= "#include \"packet-dcerpc-nt.h\"\n";
+ $self->{res}->{headers} .= "#include \"packet-windows-common.h\"\n";
+
+ my $h_basename = basename($h_filename);
+
+ $self->{res}->{headers} .= "#include \"$h_basename\"\n";
+ $self->pidl_code("");
+
+ if (defined($self->{conformance}->{ett})) {
+ register_ett($self,$_) foreach(@{$self->{conformance}->{ett}})
+ }
+
+ # Wireshark protocol registration
+
+ foreach (@$ndr) {
+ $self->ProcessInterface($_) if ($_->{TYPE} eq "INTERFACE");
+ $self->ProcessImport(@{$_->{PATHS}}) if ($_->{TYPE} eq "IMPORT");
+ $self->ProcessInclude(@{$_->{PATHS}}) if ($_->{TYPE} eq "INCLUDE");
+ }
+
+ $self->{res}->{ett} = DumpEttDeclaration($self->{ett});
+ $self->{res}->{hf} = $self->DumpHfDeclaration();
+
+ my $parser = $notice;
+ $parser.= $self->{res}->{headers};
+ $parser.=$self->{res}->{ett};
+ $parser.=$self->{res}->{hf};
+ $parser.=$self->{res}->{def};
+ if (exists ($self->{conformance}->{override})) {
+ $parser.=$self->{conformance}->{override};
+ }
+ $parser.=$self->{res}->{code};
+
+ my $header = $notice;
+ $header.=$self->{res}->{hdr};
+
+ $self->CheckUsed($self->{conformance});
+
+ return ($parser,$header);
+}
+
+###############################################################################
+# ETT
+###############################################################################
+
+sub register_ett($$)
+{
+ my ($self, $name) = @_;
+
+ push (@{$self->{ett}}, $name);
+}
+
+sub DumpEttList
+{
+ my ($ett) = @_;
+ my $res = "\tstatic gint *ett[] = {\n";
+ foreach (@$ett) {
+ $res .= "\t\t&$_,\n";
+ }
+
+ return "$res\t};\n";
+}
+
+sub DumpEttDeclaration
+{
+ my ($ett) = @_;
+ my $res = "\n/* Ett declarations */\n";
+ foreach (@$ett) {
+ $res .= "static gint $_ = -1;\n";
+ }
+
+ return "$res\n";
+}
+
+###############################################################################
+# HF
+###############################################################################
+
+sub register_hf_field($$$$$$$$$)
+{
+ my ($self,$index,$name,$filter_name,$ft_type,$base_type,$valsstring,$mask,$blurb) = @_;
+
+ if (defined ($self->{conformance}->{hf_renames}->{$index})) {
+ $self->{conformance}->{hf_renames}->{$index}->{USED} = 1;
+ return $self->{conformance}->{hf_renames}->{$index}->{NEWNAME};
+ }
+
+ $self->{conformance}->{header_fields}->{$index} = {
+ INDEX => $index,
+ NAME => $name,
+ FILTER => $filter_name,
+ FT_TYPE => $ft_type,
+ BASE_TYPE => $base_type,
+ VALSSTRING => $valsstring,
+ MASK => $mask,
+ BLURB => $blurb
+ };
+
+ if ((not defined($blurb) or $blurb eq "") and
+ defined($self->{conformance}->{fielddescription}->{$index})) {
+ $self->{conformance}->{header_fields}->{$index}->{BLURB} =
+ $self->{conformance}->{fielddescription}->{$index}->{DESCRIPTION};
+ $self->{conformance}->{fielddescription}->{$index}->{USED} = 1;
+ }
+
+ return $index;
+}
+
+sub change_hf_field_type($$$$)
+{
+ my ($self,$index,$ft_type,$base_type) = @_;
+ if (defined ($self->{conformance}->{hf_renames}->{$index})) {
+ print "Field $index has been renamed to ".$self->{conformance}->{hf_renames}->{$index}->{NEWNAME}." you can't change it's type";
+ return 0;
+ }
+
+ if (!defined ($self->{conformance}->{header_fields}->{$index})) {
+ print "Field $index doesn't exists";
+ return 0;
+ }
+ $self->{conformance}->{header_fields}->{$index}->{FT_TYPE} = $ft_type;
+ $self->{conformance}->{header_fields}->{$index}->{BASE_TYPE} = $base_type;
+ return 1;
+}
+
+sub DumpHfDeclaration($)
+{
+ my ($self) = @_;
+ my $res = "";
+
+ $res = "\n/* Header field declarations */\n";
+
+ foreach (sort(keys %{$self->{conformance}->{header_fields}}))
+ {
+ $res .= "static gint $_ = -1;\n";
+ }
+
+ return "$res\n";
+}
+
+sub make_str_or_null($)
+{
+ my $str = shift;
+ if (substr($str, 0, 1) eq "\"") {
+ $str = substr($str, 1, length($str)-2);
+ }
+ $str =~ s/^\s*//;
+ $str =~ s/\s*$//;
+ if ($str eq "") {
+ return "NULL";
+ }
+ return make_str($str);
+}
+
+sub DumpHfList($)
+{
+ my ($self) = @_;
+ my $res = "\tstatic hf_register_info hf[] = {\n";
+
+ foreach (sort {$a->{INDEX} cmp $b->{INDEX}} values %{$self->{conformance}->{header_fields}})
+ {
+ $res .= "\t{ &$_->{INDEX},\n".
+ "\t { ".make_str($_->{NAME}).", ".make_str($_->{FILTER}).", $_->{FT_TYPE}, $_->{BASE_TYPE}, $_->{VALSSTRING}, $_->{MASK}, ".make_str_or_null($_->{BLURB}).", HFILL }},\n";
+ }
+
+ return $res."\t};\n";
+}
+
+
+###############################################################################
+# Function table
+###############################################################################
+
+sub DumpFunctionTable($)
+{
+ my $if = shift;
+
+ my $res = "static dcerpc_sub_dissector $if->{NAME}\_dissectors[] = {\n";
+ foreach (@{$if->{FUNCTIONS}}) {
+ my $fn_name = $_->{NAME};
+ $fn_name =~ s/^$if->{NAME}_//;
+ $res.= "\t{ $_->{OPNUM}, \"$fn_name\",\n";
+ $res.= "\t $if->{NAME}_dissect_${fn_name}_request, $if->{NAME}_dissect_${fn_name}_response},\n";
+ }
+
+ $res .= "\t{ 0, NULL, NULL, NULL }\n";
+
+ return "$res};\n";
+}
+
+sub CheckUsed($$)
+{
+ my ($self, $conformance) = @_;
+ foreach (values %{$conformance->{header_fields}}) {
+ if (not defined($self->{hf_used}->{$_->{INDEX}})) {
+ warning($_->{POS}, "hf field `$_->{INDEX}' not used");
+ }
+ }
+
+ foreach (values %{$conformance->{hf_renames}}) {
+ if (not $_->{USED}) {
+ warning($_->{POS}, "hf field `$_->{OLDNAME}' not used");
+ }
+ }
+
+ foreach (values %{$conformance->{dissectorparams}}) {
+ if (not $_->{USED}) {
+ warning($_->{POS}, "dissector param never used");
+ }
+ }
+
+ foreach (values %{$conformance->{imports}}) {
+ if (not $_->{USED}) {
+ warning($_->{POS}, "import never used");
+ }
+ }
+
+ foreach (values %{$conformance->{types}}) {
+ if (not $_->{USED} and defined($_->{POS})) {
+ warning($_->{POS}, "type never used");
+ }
+ }
+
+ foreach (values %{$conformance->{fielddescription}}) {
+ if (not $_->{USED}) {
+ warning($_->{POS}, "description never used");
+ }
+ }
+
+ foreach (values %{$conformance->{tfs}}) {
+ if (not $_->{USED}) {
+ warning($_->{POS}, "True/False description never used");
+ }
+ }
+}
+
+1;
diff --git a/tools/pidl/lib/Parse/Yapp/Driver.pm b/tools/pidl/lib/Parse/Yapp/Driver.pm
new file mode 100644
index 0000000..3652be0
--- /dev/null
+++ b/tools/pidl/lib/Parse/Yapp/Driver.pm
@@ -0,0 +1,471 @@
+#
+# Module Parse::Yapp::Driver
+#
+# This module is part of the Parse::Yapp package available on your
+# nearest CPAN
+#
+# Any use of this module in a standalone parser make the included
+# text under the same copyright as the Parse::Yapp module itself.
+#
+# This notice should remain unchanged.
+#
+# (c) Copyright 1998-2001 Francois Desarmenien, all rights reserved.
+# (see the pod text in Parse::Yapp module for use and distribution rights)
+#
+
+package Parse::Yapp::Driver;
+
+require 5.004;
+
+use strict;
+
+use vars qw ( $VERSION $COMPATIBLE $FILENAME );
+
+$VERSION = '1.05';
+$COMPATIBLE = '0.07';
+$FILENAME=__FILE__;
+
+use Carp;
+
+#Known parameters, all starting with YY (leading YY will be discarded)
+my(%params)=(YYLEX => 'CODE', 'YYERROR' => 'CODE', YYVERSION => '',
+ YYRULES => 'ARRAY', YYSTATES => 'ARRAY', YYDEBUG => '');
+#Mandatory parameters
+my(@params)=('LEX','RULES','STATES');
+
+sub new {
+ my($class)=shift;
+ my($errst,$nberr,$token,$value,$check,$dotpos);
+ my($self)={ ERROR => \&_Error,
+ ERRST => \$errst,
+ NBERR => \$nberr,
+ TOKEN => \$token,
+ VALUE => \$value,
+ DOTPOS => \$dotpos,
+ STACK => [],
+ DEBUG => 0,
+ CHECK => \$check };
+
+ _CheckParams( [], \%params, \@_, $self );
+
+ exists($$self{VERSION})
+ and $$self{VERSION} < $COMPATIBLE
+ and croak "Yapp driver version $VERSION ".
+ "incompatible with version $$self{VERSION}:\n".
+ "Please recompile parser module.";
+
+ ref($class)
+ and $class=ref($class);
+
+ bless($self,$class);
+}
+
+sub YYParse {
+ my($self)=shift;
+ my($retval);
+
+ _CheckParams( \@params, \%params, \@_, $self );
+
+ if($$self{DEBUG}) {
+ _DBLoad();
+ $retval = eval '$self->_DBParse()';#Do not create stab entry on compile
+ $@ and die $@;
+ }
+ else {
+ $retval = $self->_Parse();
+ }
+ $retval
+}
+
+sub YYData {
+ my($self)=shift;
+
+ exists($$self{USER})
+ or $$self{USER}={};
+
+ $$self{USER};
+
+}
+
+sub YYErrok {
+ my($self)=shift;
+
+ ${$$self{ERRST}}=0;
+ undef;
+}
+
+sub YYNberr {
+ my($self)=shift;
+
+ ${$$self{NBERR}};
+}
+
+sub YYRecovering {
+ my($self)=shift;
+
+ ${$$self{ERRST}} != 0;
+}
+
+sub YYAbort {
+ my($self)=shift;
+
+ ${$$self{CHECK}}='ABORT';
+ undef;
+}
+
+sub YYAccept {
+ my($self)=shift;
+
+ ${$$self{CHECK}}='ACCEPT';
+ undef;
+}
+
+sub YYError {
+ my($self)=shift;
+
+ ${$$self{CHECK}}='ERROR';
+ undef;
+}
+
+sub YYSemval {
+ my($self)=shift;
+ my($index)= $_[0] - ${$$self{DOTPOS}} - 1;
+
+ $index < 0
+ and -$index <= @{$$self{STACK}}
+ and return $$self{STACK}[$index][1];
+
+ undef; #Invalid index
+}
+
+sub YYCurtok {
+ my($self)=shift;
+
+ @_
+ and ${$$self{TOKEN}}=$_[0];
+ ${$$self{TOKEN}};
+}
+
+sub YYCurval {
+ my($self)=shift;
+
+ @_
+ and ${$$self{VALUE}}=$_[0];
+ ${$$self{VALUE}};
+}
+
+sub YYExpect {
+ my($self)=shift;
+
+ keys %{$self->{STATES}[$self->{STACK}[-1][0]]{ACTIONS}}
+}
+
+sub YYLexer {
+ my($self)=shift;
+
+ $$self{LEX};
+}
+
+
+#################
+# Private stuff #
+#################
+
+
+sub _CheckParams {
+ my($mandatory,$checklist,$inarray,$outhash)=@_;
+ my($prm,$value);
+ my($prmlst)={};
+
+ while(($prm,$value)=splice(@$inarray,0,2)) {
+ $prm=uc($prm);
+ exists($$checklist{$prm})
+ or croak("Unknown parameter '$prm'");
+ ref($value) eq $$checklist{$prm}
+ or croak("Invalid value for parameter '$prm'");
+ $prm=unpack('@2A*',$prm);
+ $$outhash{$prm}=$value;
+ }
+ for (@$mandatory) {
+ exists($$outhash{$_})
+ or croak("Missing mandatory parameter '".lc($_)."'");
+ }
+}
+
+sub _Error {
+ print "Parse error.\n";
+}
+
+sub _DBLoad {
+ {
+ no strict 'refs';
+
+ exists(${__PACKAGE__.'::'}{_DBParse})#Already loaded ?
+ and return;
+ }
+ my($fname)=__FILE__;
+ my(@drv);
+ open(DRV,"<$fname") or die "Report this as a BUG: Cannot open $fname";
+ while(<DRV>) {
+ /^\s*sub\s+_Parse\s*{\s*$/ .. /^\s*}\s*#\s*_Parse\s*$/
+ and do {
+ s/^#DBG>//;
+ push(@drv,$_);
+ }
+ }
+ close(DRV);
+
+ $drv[0]=~s/_P/_DBP/;
+ eval join('',@drv);
+}
+
+#Note that for loading debugging version of the driver,
+#this file will be parsed from 'sub _Parse' up to '}#_Parse' inclusive.
+#So, DO NOT remove comment at end of sub !!!
+sub _Parse {
+ my($self)=shift;
+
+ my($rules,$states,$lex,$error)
+ = @$self{ 'RULES', 'STATES', 'LEX', 'ERROR' };
+ my($errstatus,$nberror,$token,$value,$stack,$check,$dotpos)
+ = @$self{ 'ERRST', 'NBERR', 'TOKEN', 'VALUE', 'STACK', 'CHECK', 'DOTPOS' };
+
+#DBG> my($debug)=$$self{DEBUG};
+#DBG> my($dbgerror)=0;
+
+#DBG> my($ShowCurToken) = sub {
+#DBG> my($tok)='>';
+#DBG> for (split('',$$token)) {
+#DBG> $tok.= (ord($_) < 32 or ord($_) > 126)
+#DBG> ? sprintf('<%02X>',ord($_))
+#DBG> : $_;
+#DBG> }
+#DBG> $tok.='<';
+#DBG> };
+
+ $$errstatus=0;
+ $$nberror=0;
+ ($$token,$$value)=(undef,undef);
+ @$stack=( [ 0, undef ] );
+ $$check='';
+
+ while(1) {
+ my($actions,$act,$stateno);
+
+ $stateno=$$stack[-1][0];
+ $actions=$$states[$stateno];
+
+#DBG> print STDERR ('-' x 40),"\n";
+#DBG> $debug & 0x2
+#DBG> and print STDERR "In state $stateno:\n";
+#DBG> $debug & 0x08
+#DBG> and print STDERR "Stack:[".
+#DBG> join(',',map { $$_[0] } @$stack).
+#DBG> "]\n";
+
+
+ if (exists($$actions{ACTIONS})) {
+
+ defined($$token)
+ or do {
+ ($$token,$$value)=&$lex($self);
+#DBG> $debug & 0x01
+#DBG> and print STDERR "Need token. Got ".&$ShowCurToken."\n";
+ };
+
+ $act= exists($$actions{ACTIONS}{$$token})
+ ? $$actions{ACTIONS}{$$token}
+ : exists($$actions{DEFAULT})
+ ? $$actions{DEFAULT}
+ : undef;
+ }
+ else {
+ $act=$$actions{DEFAULT};
+#DBG> $debug & 0x01
+#DBG> and print STDERR "Don't need token.\n";
+ }
+
+ defined($act)
+ and do {
+
+ $act > 0
+ and do { #shift
+
+#DBG> $debug & 0x04
+#DBG> and print STDERR "Shift and go to state $act.\n";
+
+ $$errstatus
+ and do {
+ --$$errstatus;
+
+#DBG> $debug & 0x10
+#DBG> and $dbgerror
+#DBG> and $$errstatus == 0
+#DBG> and do {
+#DBG> print STDERR "**End of Error recovery.\n";
+#DBG> $dbgerror=0;
+#DBG> };
+ };
+
+
+ push(@$stack,[ $act, $$value ]);
+
+ $$token ne '' #Don't eat the eof
+ and $$token=$$value=undef;
+ next;
+ };
+
+ #reduce
+ my($lhs,$len,$code,@sempar,$semval);
+ ($lhs,$len,$code)=@{$$rules[-$act]};
+
+#DBG> $debug & 0x04
+#DBG> and $act
+#DBG> and print STDERR "Reduce using rule ".-$act." ($lhs,$len): ";
+
+ $act
+ or $self->YYAccept();
+
+ $$dotpos=$len;
+
+ unpack('A1',$lhs) eq '@' #In line rule
+ and do {
+ $lhs =~ /^\@[0-9]+\-([0-9]+)$/
+ or die "In line rule name '$lhs' ill formed: ".
+ "report it as a BUG.\n";
+ $$dotpos = $1;
+ };
+
+ @sempar = $$dotpos
+ ? map { $$_[1] } @$stack[ -$$dotpos .. -1 ]
+ : ();
+
+ $semval = $code ? &$code( $self, @sempar )
+ : @sempar ? $sempar[0] : undef;
+
+ splice(@$stack,-$len,$len);
+
+ $$check eq 'ACCEPT'
+ and do {
+
+#DBG> $debug & 0x04
+#DBG> and print STDERR "Accept.\n";
+
+ return($semval);
+ };
+
+ $$check eq 'ABORT'
+ and do {
+
+#DBG> $debug & 0x04
+#DBG> and print STDERR "Abort.\n";
+
+ return(undef);
+
+ };
+
+#DBG> $debug & 0x04
+#DBG> and print STDERR "Back to state $$stack[-1][0], then ";
+
+ $$check eq 'ERROR'
+ or do {
+#DBG> $debug & 0x04
+#DBG> and print STDERR
+#DBG> "go to state $$states[$$stack[-1][0]]{GOTOS}{$lhs}.\n";
+
+#DBG> $debug & 0x10
+#DBG> and $dbgerror
+#DBG> and $$errstatus == 0
+#DBG> and do {
+#DBG> print STDERR "**End of Error recovery.\n";
+#DBG> $dbgerror=0;
+#DBG> };
+
+ push(@$stack,
+ [ $$states[$$stack[-1][0]]{GOTOS}{$lhs}, $semval ]);
+ $$check='';
+ next;
+ };
+
+#DBG> $debug & 0x04
+#DBG> and print STDERR "Forced Error recovery.\n";
+
+ $$check='';
+
+ };
+
+ #Error
+ $$errstatus
+ or do {
+
+ $$errstatus = 1;
+ &$error($self);
+ $$errstatus # if 0, then YYErrok has been called
+ or next; # so continue parsing
+
+#DBG> $debug & 0x10
+#DBG> and do {
+#DBG> print STDERR "**Entering Error recovery.\n";
+#DBG> ++$dbgerror;
+#DBG> };
+
+ ++$$nberror;
+
+ };
+
+ $$errstatus == 3 #The next token is not valid: discard it
+ and do {
+ $$token eq '' # End of input: no hope
+ and do {
+#DBG> $debug & 0x10
+#DBG> and print STDERR "**At eof: aborting.\n";
+ return(undef);
+ };
+
+#DBG> $debug & 0x10
+#DBG> and print STDERR "**Dicard invalid token ".&$ShowCurToken.".\n";
+
+ $$token=$$value=undef;
+ };
+
+ $$errstatus=3;
+
+ while( @$stack
+ and ( not exists($$states[$$stack[-1][0]]{ACTIONS})
+ or not exists($$states[$$stack[-1][0]]{ACTIONS}{error})
+ or $$states[$$stack[-1][0]]{ACTIONS}{error} <= 0)) {
+
+#DBG> $debug & 0x10
+#DBG> and print STDERR "**Pop state $$stack[-1][0].\n";
+
+ pop(@$stack);
+ }
+
+ @$stack
+ or do {
+
+#DBG> $debug & 0x10
+#DBG> and print STDERR "**No state left on stack: aborting.\n";
+
+ return(undef);
+ };
+
+ #shift the error token
+
+#DBG> $debug & 0x10
+#DBG> and print STDERR "**Shift \$error token and go to state ".
+#DBG> $$states[$$stack[-1][0]]{ACTIONS}{error}.
+#DBG> ".\n";
+
+ push(@$stack, [ $$states[$$stack[-1][0]]{ACTIONS}{error}, undef ]);
+
+ }
+
+ #never reached
+ croak("Error in driver logic. Please, report it as a BUG");
+
+}#_Parse
+#DO NOT remove comment
+
+1;
+
diff --git a/tools/pidl/lib/wscript_build b/tools/pidl/lib/wscript_build
new file mode 100644
index 0000000..54b3170
--- /dev/null
+++ b/tools/pidl/lib/wscript_build
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# install the pidl modules
+bld.INSTALL_FILES(bld.env.PERL_LIB_INSTALL_DIR,
+ '''
+ Parse/Pidl.pm
+ Parse/Pidl/Samba4.pm
+ Parse/Pidl/CUtil.pm
+ Parse/Pidl/Expr.pm
+ Parse/Pidl/Wireshark/Conformance.pm
+ Parse/Pidl/Wireshark/NDR.pm
+ Parse/Pidl/ODL.pm
+ Parse/Pidl/Dump.pm
+ Parse/Pidl/Util.pm
+ Parse/Pidl/Samba4/Header.pm
+ Parse/Pidl/Samba4/COM/Header.pm
+ Parse/Pidl/Samba4/COM/Proxy.pm
+ Parse/Pidl/Samba4/COM/Stub.pm
+ Parse/Pidl/Samba4/TDR.pm
+ Parse/Pidl/Samba4/NDR/Server.pm
+ Parse/Pidl/Samba4/NDR/Client.pm
+ Parse/Pidl/Samba4/NDR/Parser.pm
+ Parse/Pidl/Samba4/Python.pm
+ Parse/Pidl/Samba4/Template.pm
+ Parse/Pidl/IDL.pm
+ Parse/Pidl/Typelist.pm
+ Parse/Pidl/Samba3/ClientNDR.pm
+ Parse/Pidl/Samba3/ServerNDR.pm
+ Parse/Pidl/Compat.pm
+ Parse/Pidl/NDR.pm
+ ''',
+ flat=False)
+
+if not bld.CONFIG_SET('USING_SYSTEM_PARSE_YAPP_DRIVER'):
+ bld.INSTALL_FILES(bld.env.PERL_LIB_INSTALL_DIR,
+ 'Parse/Yapp/Driver.pm',
+ flat=False)
diff --git a/tools/pidl/pidl b/tools/pidl/pidl
new file mode 100755
index 0000000..e8e6941
--- /dev/null
+++ b/tools/pidl/pidl
@@ -0,0 +1,804 @@
+#!/usr/bin/env perl
+
+###################################################
+# package to parse IDL files and generate code for
+# rpc functions in Samba
+# Copyright tridge@samba.org 2000-2003
+# Copyright jelmer@samba.org 2005-2007
+# released under the GNU GPL
+
+=pod
+
+=head1 NAME
+
+pidl - An IDL compiler written in Perl
+
+=head1 SYNOPSIS
+
+pidl --help
+
+pidl [--outputdir[=OUTNAME]] [--includedir DIR...] [--parse-idl-tree] [--dump-idl-tree] [--dump-ndr-tree] [--header[=OUTPUT]] [--python[=OUTPUT]] [--ndr-parser[=OUTPUT]] [--client] [--server] [--warn-compat] [--quiet] [--verbose] [--template] [--ws-parser[=OUTPUT]] [--diff] [--dump-idl] [--tdr-parser[=OUTPUT]] [--samba3-ndr-client[=OUTPUT]] [--samba3-ndr-server[=OUTPUT]] [--typelib=[OUTPUT]] [<idlfile>.idl]...
+
+=head1 DESCRIPTION
+
+pidl is an IDL compiler written in Perl that aims to be somewhat
+compatible with the midl compiler. IDL is short for
+"Interface Definition Language".
+
+pidl can generate stubs for DCE/RPC server code, DCE/RPC
+client code and Wireshark dissectors for DCE/RPC traffic.
+
+IDL compilers like pidl take a description
+of an interface as their input and use it to generate C
+(though support for other languages may be added later) code that
+can use these interfaces, pretty print data sent
+using these interfaces, or even generate Wireshark
+dissectors that can parse data sent over the
+wire by these interfaces.
+
+pidl takes IDL files in the same format as is used by midl,
+converts it to a .pidl file (which contains pidl's internal representation of the interface) and can then generate whatever output you need.
+.pidl files should be used for debugging purposes only. Write your
+interface definitions in .idl format.
+
+The goal of pidl is to implement a IDL compiler that can be used
+while developing the RPC subsystem in Samba (for
+both marshalling/unmarshalling and debugging purposes).
+
+=head1 OPTIONS
+
+=over 4
+
+=item I<--help>
+
+Show list of available options.
+
+=item I<--version>
+
+Show pidl version
+
+=item I<--outputdir OUTNAME>
+
+Write output files to the specified directory. Defaults to the current
+directory.
+
+=item I<--includedir DIR>
+
+Add DIR to the search path used by the preprocessor. This option can be
+specified multiple times.
+
+=item I<--parse-idl-tree>
+
+Read internal tree structure from input files rather
+than assuming they contain IDL.
+
+=item I<--dump-idl>
+
+Generate a new IDL file. File will be named OUTNAME.idl.
+
+=item I<--header>
+
+Generate a C header file for the specified interface. Filename defaults to OUTNAME.h.
+
+=item I<--ndr-parser>
+
+Generate a C file and C header containing NDR parsers. The filename for
+the parser defaults to ndr_OUTNAME.c. The header filename will be the
+parser filename with the extension changed from .c to .h.
+
+=item I<--tdr-parser>
+
+Generate a C file and C header containing TDR parsers. The filename for
+the parser defaults to tdr_OUTNAME.c. The header filename will be the
+parser filename with the extension changed from .c to .h.
+
+=item I<--typelib>
+
+Write type information to the specified file.
+
+=item I<--server>
+
+Generate boilerplate for the RPC server that implements
+the interface. Filename defaults to ndr_OUTNAME_s.c.
+
+=item I<--template>
+
+Generate stubs for a RPC server that implements the interface. Output will
+be written to stdout.
+
+=item I<--ws-parser>
+
+Generate an Wireshark dissector (in C) and header file. The dissector filename
+defaults to packet-dcerpc-OUTNAME.c while the header filename defaults to
+packet-dcerpc-OUTNAME.h.
+
+Pidl will read additional data from an Wireshark conformance file if present.
+Such a file should have the same location as the IDL file but with the
+extension I<cnf> rather than I<idl>. See L<Parse::Pidl::Wireshark::Conformance>
+for details on the format of this file.
+
+=item I<--diff>
+
+Parse an IDL file, generate a new IDL file based on the internal data
+structures and see if there are any differences with the original IDL file.
+Useful for debugging pidl.
+
+=item I<--dump-idl-tree>
+
+Tell pidl to dump the internal tree representation of an IDL
+file the to disk. Useful for debugging pidl.
+
+=item I<--dump-ndr-tree>
+
+Tell pidl to dump the internal NDR information tree it generated
+from the IDL file to disk. Useful for debugging pidl.
+
+=item I<--samba3-ndr-client>
+
+Generate client calls for Samba3, to be placed in rpc_client/. Instead of
+calling out to the code in Samba3's rpc_parse/, this will call out to
+Samba4's NDR code instead.
+
+=item I<--samba3-ndr-server>
+
+Generate server calls for Samba3, to be placed in rpc_server/. Instead of
+calling out to the code in Samba3's rpc_parse/, this will call out to
+Samba4's NDR code instead.
+
+=back
+
+=head1 IDL SYNTAX
+
+IDL files are always preprocessed using the C preprocessor.
+
+Pretty much everything in an interface (the interface itself, functions,
+parameters) can have attributes (or properties whatever name you give them).
+Attributes always prepend the element they apply to and are surrounded
+by square brackets ([]). Multiple attributes are separated by comma's;
+arguments to attributes are specified between parentheses.
+
+See the section COMPATIBILITY for the list of attributes that
+pidl supports.
+
+C-style comments can be used.
+
+=head2 CONFORMANT ARRAYS
+
+A conformant array is one with that ends in [*] or []. The strange
+things about conformant arrays are that they can only appear as the last
+element of a structure (unless there is a pointer to the conformant array,
+of course) and the array size appears before the structure itself on the wire.
+
+So, in this example:
+
+ typedef struct {
+ long abc;
+ long count;
+ long foo;
+ [size_is(count)] long s[*];
+ } Struct1;
+
+it appears like this:
+
+ [size_is] [abc] [count] [foo] [s...]
+
+the first [size_is] field is the allocation size of the array, and
+occurs before the array elements and even before the structure
+alignment.
+
+Note that size_is() can refer to a constant, but that doesn't change
+the wire representation. It does not make the array a fixed array.
+
+midl.exe would write the above array as the following C header:
+
+ typedef struct {
+ long abc;
+ long count;
+ long foo;
+ long s[1];
+ } Struct1;
+
+pidl takes a different approach, and writes it like this:
+
+ typedef struct {
+ long abc;
+ long count;
+ long foo;
+ long *s;
+ } Struct1;
+
+=head2 VARYING ARRAYS
+
+A varying array looks like this:
+
+ typedef struct {
+ long abc;
+ long count;
+ long foo;
+ [size_is(count)] long *s;
+ } Struct1;
+
+This will look like this on the wire:
+
+ [abc] [count] [foo] [PTR_s] [count] [s...]
+
+=head2 FIXED ARRAYS
+
+A fixed array looks like this:
+
+ typedef struct {
+ long s[10];
+ } Struct1;
+
+The NDR representation looks just like 10 separate long
+declarations. The array size is not encoded on the wire.
+
+pidl also supports "inline" arrays, which are not part of the IDL/NDR
+standard. These are declared like this:
+
+ typedef struct {
+ uint32 foo;
+ uint32 count;
+ uint32 bar;
+ long s[count];
+ } Struct1;
+
+This appears like this:
+
+ [foo] [count] [bar] [s...]
+
+Fixed arrays are an extension added to support some of the strange
+embedded structures in security descriptors and spoolss.
+
+This section is by no means complete. See the OpenGroup and MSDN
+ documentation for additional information.
+
+=head1 COMPATIBILITY WITH MIDL
+
+=head2 Missing features in pidl
+
+The following MIDL features are not (yet) implemented in pidl
+or are implemented with an incompatible interface:
+
+=over
+
+=item *
+
+Asynchronous communication
+
+=item *
+
+Typelibs (.tlb files)
+
+=item *
+
+Datagram support (ncadg_*)
+
+=back
+
+=head2 Supported attributes and statements
+
+in, out, ref, length_is, switch_is, size_is, uuid, case, default, string,
+unique, ptr, pointer_default, v1_enum, object, helpstring, range, local,
+call_as, endpoint, switch_type, progid, coclass, iid_is, represent_as,
+transmit_as, import, include, cpp_quote.
+
+=head2 PIDL Specific properties
+
+=over 4
+
+=item public
+
+The [public] property on a structure or union is a pidl extension that
+forces the generated pull/push functions to be non-static. This allows
+you to declare types that can be used between modules. If you don't
+specify [public] then pull/push functions for other than top-level
+functions are declared static.
+
+=item noprint
+
+The [noprint] property is a pidl extension that allows you to specify
+that pidl should not generate a ndr_print_*() function for that
+structure or union. This is used when you wish to define your own
+print function that prints a structure in a nicer manner. A good
+example is the use of [noprint] on dom_sid, which allows the
+pretty-printing of SIDs.
+
+=item value
+
+The [value(expression)] property is a pidl extension that allows you
+to specify the value of a field when it is put on the wire. This
+allows fields that always have a well-known value to be automatically
+filled in, thus making the API more programmer friendly. The
+expression can be any C expression.
+
+=item relative
+
+The [relative] property can be supplied on a pointer. When it is used
+it declares the pointer as a spoolss style "relative" pointer, which
+means it appears on the wire as an offset within the current
+encapsulating structure. This is not part of normal IDL/NDR, but it is
+a very useful extension as it avoids the manual encoding of many
+complex structures.
+
+=item subcontext(length)
+
+Specifies that a size of I<length>
+bytes should be read, followed by a blob of that size,
+which will be parsed as NDR.
+
+subcontext() is deprecated now, and should not be used in new code.
+Instead, use represent_as() or transmit_as().
+
+=item flag
+
+Specify boolean options, mostly used for
+low-level NDR options. Several options
+can be specified using the | character.
+Note that flags are inherited by substructures!
+
+=item nodiscriminant
+
+The [nodiscriminant] property on a union means that the usual uint16
+discriminent field at the start of the union on the wire is
+omitted. This is not normally allowed in IDL/NDR, but is used for some
+spoolss structures.
+
+=item charset(name)
+
+Specify that the array or string uses the specified
+charset. If this attribute is specified, pidl will
+take care of converting the character data from this format
+to the host format. Commonly used values are UCS2, DOS and UTF8.
+
+=back
+
+=head2 Unsupported MIDL properties or statements
+
+aggregatable, appobject, async_uuid, bindable, control,
+defaultbind, defaultcollelem, defaultvalue, defaultvtable, dispinterface,
+displaybind, dual, entry, first_is, helpcontext, helpfile, helpstringcontext,
+helpstringdll, hidden, idl_module, idl_quote, id, immediatebind, importlib,
+includelib, last_is, lcid, licensed, max_is, module,
+ms_union, no_injected_text, nonbrowsable, noncreatable, nonextensible, odl,
+oleautomation, optional, pragma, propget, propputref, propput, readonly,
+requestedit, restricted, retval, source, uidefault,
+usesgetlasterror, vararg, vi_progid, wire_marshal.
+
+=head1 EXAMPLES
+
+ # Generating an Wireshark parser
+ $ ./pidl --ws-parser -- atsvc.idl
+
+ # Generating a TDR parser and header
+ $ ./pidl --tdr-parser --header -- regf.idl
+
+ # Generating a Samba3 client and server
+ $ ./pidl --samba3-ndr-client --samba3-ndr-server -- dfs.idl
+
+ # Generating a Samba4 NDR parser, client and server
+ $ ./pidl --ndr-parser --ndr-client --ndr-server -- samr.idl
+
+=head1 SEE ALSO
+
+L<https://msdn.microsoft.com/en-us/library/windows/desktop/aa373864%28v=vs.85%29.aspx>
+L<https://gitlab.com/wireshark/wireshark/-/wikis/DCE/RPC>,
+L<https://www.samba.org/>,
+L<yapp(1)>
+
+=head1 LICENSE
+
+pidl is licensed under the GNU General Public License L<https://www.gnu.org/licenses/gpl.html>.
+
+=head1 AUTHOR
+
+pidl was written by Andrew Tridgell, Stefan Metzmacher, Tim Potter and Jelmer
+Vernooij. The current maintainer is Jelmer Vernooij.
+
+This manpage was written by Jelmer Vernooij, partially based on the original
+pidl README by Andrew Tridgell.
+
+=cut
+
+
+use strict;
+use FindBin qw($RealBin $Script);
+use lib "$RealBin/lib";
+use Getopt::Long;
+use File::Basename;
+use Parse::Pidl qw ( $VERSION );
+use Parse::Pidl::Util;
+use Parse::Pidl::ODL;
+
+#####################################################################
+# save a data structure into a file
+sub SaveStructure($$)
+{
+ my($filename,$v) = @_;
+ FileSave($filename, Parse::Pidl::Util::MyDumper($v));
+}
+
+#####################################################################
+# load a data structure from a file (as saved with SaveStructure)
+sub LoadStructure($)
+{
+ my $f = shift;
+ my $contents = FileLoad($f);
+ defined $contents || return undef;
+ return eval "$contents";
+}
+
+#####################################################################
+# read a file into a string
+sub FileLoad($)
+{
+ my($filename) = shift;
+ local(*INPUTFILE);
+ open(INPUTFILE, $filename) || return undef;
+ my($saved_delim) = $/;
+ undef $/;
+ my($data) = <INPUTFILE>;
+ close(INPUTFILE);
+ $/ = $saved_delim;
+ return $data;
+}
+
+#####################################################################
+# write a string into a file
+sub FileSave($$)
+{
+ my($filename) = shift;
+ my($v) = shift;
+ local(*FILE);
+ open(FILE, ">$filename") || die "can't open $filename";
+ print FILE $v;
+ close(FILE);
+}
+
+my(@opt_incdirs) = ();
+my($opt_help) = 0;
+my($opt_version) = 0;
+my($opt_parse_idl_tree) = 0;
+my($opt_dump_idl_tree);
+my($opt_dump_ndr_tree);
+my($opt_dump_idl) = 0;
+my($opt_diff) = 0;
+my($opt_header);
+my($opt_samba3_header);
+my($opt_samba3_parser);
+my($opt_samba3_server);
+my($opt_samba3_ndr_client);
+my($opt_samba3_ndr_server);
+my($opt_samba3_template) = 0;
+my($opt_template) = 0;
+my($opt_client);
+my($opt_typelib);
+my($opt_server);
+my($opt_ndr_parser);
+my($opt_tdr_parser);
+my($opt_ws_parser);
+my($opt_python);
+my($opt_quiet) = 0;
+my($opt_outputdir) = '.';
+my($opt_verbose) = 0;
+my($opt_warn_compat) = 0;
+my($opt_dcom_proxy);
+my($opt_com_header);
+
+#########################################
+# display help text
+sub ShowHelp()
+{
+print "perl IDL parser and code generator\n";
+ShowVersion();
+print"
+Copyright (C) Andrew Tridgell <tridge\@samba.org>
+Copyright (C) Jelmer Vernooij <jelmer\@samba.org>
+
+Usage: $Script [options] [--] <idlfile> [<idlfile>...]
+
+Generic Options:
+ --help this help page
+ --version show pidl version
+ --outputdir=OUTDIR put output in OUTDIR/ [.]
+ --warn-compat warn about incompatibility with other compilers
+ --quiet be quiet
+ --verbose be verbose
+ --includedir DIR search DIR for included files
+
+Debugging:
+ --dump-idl-tree[=FILE] dump internal representation to file [BASENAME.pidl]
+ --parse-idl-tree read internal representation instead of IDL
+ --dump-ndr-tree[=FILE] dump internal NDR data tree to file [BASENAME.ndr]
+ --dump-idl regenerate IDL file
+ --diff run diff on original IDL and dumped output
+ --typelib print type information
+
+Samba 4 output:
+ --header[=OUTFILE] create generic header file [BASENAME.h]
+ --ndr-parser[=OUTFILE] create a C NDR parser [ndr_BASENAME.c]
+ --client[=OUTFILE] create a C NDR client [ndr_BASENAME_c.c]
+ --tdr-parser[=OUTFILE] create a C TDR parser [tdr_BASENAME.c]
+ --python[=OUTFILE] create python wrapper file [py_BASENAME.c]
+ --server[=OUTFILE] create server boilerplate [ndr_BASENAME_s.c]
+ --template print a template for a pipe
+ --dcom-proxy[=OUTFILE] create DCOM proxy [ndr_BASENAME_p.c]
+ --com-header[=OUTFILE] create header for COM [com_BASENAME.h]
+
+Samba 3 output:
+ --samba3-ndr-client[=OUTF] create client calls for Samba3
+ using Samba4's NDR code [cli_BASENAME.c]
+ --samba3-ndr-server[=OUTF] create server call wrapper for Samba3
+ using Samba4's NDR code [srv_BASENAME.c]
+ --samba3-template print a template for a pipe
+
+Wireshark parsers:
+ --ws-parser[=OUTFILE] create Wireshark parser and header
+\n";
+ exit(0);
+}
+
+#########################################
+# Display version
+sub ShowVersion()
+{
+ print "perl IDL version $VERSION\n";
+}
+
+# main program
+my $result = GetOptions (
+ 'help|h|?' => \$opt_help,
+ 'version' => \$opt_version,
+ 'outputdir=s' => \$opt_outputdir,
+ 'dump-idl' => \$opt_dump_idl,
+ 'dump-idl-tree:s' => \$opt_dump_idl_tree,
+ 'parse-idl-tree' => \$opt_parse_idl_tree,
+ 'dump-ndr-tree:s' => \$opt_dump_ndr_tree,
+ 'samba3-ndr-client:s' => \$opt_samba3_ndr_client,
+ 'samba3-ndr-server:s' => \$opt_samba3_ndr_server,
+ 'samba3-template' => \$opt_samba3_template,
+ 'header:s' => \$opt_header,
+ 'server:s' => \$opt_server,
+ 'typelib:s' => \$opt_typelib,
+ 'tdr-parser:s' => \$opt_tdr_parser,
+ 'template' => \$opt_template,
+ 'ndr-parser:s' => \$opt_ndr_parser,
+ 'client:s' => \$opt_client,
+ 'ws-parser:s' => \$opt_ws_parser,
+ 'python' => \$opt_python,
+ 'diff' => \$opt_diff,
+ 'dcom-proxy:s' => \$opt_dcom_proxy,
+ 'com-header:s' => \$opt_com_header,
+ 'quiet' => \$opt_quiet,
+ 'verbose' => \$opt_verbose,
+ 'warn-compat' => \$opt_warn_compat,
+ 'includedir=s@' => \@opt_incdirs
+ );
+
+if (not $result) {
+ exit(1);
+}
+
+if ($opt_help) {
+ ShowHelp();
+ exit(0);
+}
+
+if ($opt_version) {
+ ShowVersion();
+ exit(0);
+}
+
+sub process_file($)
+{
+ my $idl_file = shift;
+ my $outputdir = $opt_outputdir;
+ my $pidl;
+ my $ndr;
+
+ my $basename = basename($idl_file, ".idl");
+
+ unless ($opt_quiet) { print "Compiling $idl_file\n"; }
+
+ if ($opt_parse_idl_tree) {
+ $pidl = LoadStructure($idl_file);
+ defined $pidl || die "Failed to load $idl_file";
+ } else {
+ require Parse::Pidl::IDL;
+
+ $pidl = Parse::Pidl::IDL::parse_file($idl_file, \@opt_incdirs);
+ defined $pidl || die "Failed to parse $idl_file";
+ }
+
+ require Parse::Pidl::Typelist;
+ Parse::Pidl::Typelist::LoadIdl($pidl, $basename);
+
+ if (defined($opt_dump_idl_tree)) {
+ my($pidl_file) = ($opt_dump_idl_tree or "$outputdir/$basename.pidl");
+ SaveStructure($pidl_file, $pidl) or die "Failed to save $pidl_file\n";
+ }
+
+ if ($opt_dump_idl) {
+ require Parse::Pidl::Dump;
+ print Parse::Pidl::Dump($pidl);
+ }
+
+ if ($opt_diff) {
+ my($tempfile) = "$outputdir/$basename.tmp";
+ FileSave($tempfile, IdlDump::Dump($pidl));
+ system("diff -wu $idl_file $tempfile");
+ unlink($tempfile);
+ }
+
+ my $comh_filename = ($opt_com_header or "$outputdir/com_$basename.h");
+ if (defined($opt_com_header)) {
+ require Parse::Pidl::Samba4::COM::Header;
+ my $res = Parse::Pidl::Samba4::COM::Header::Parse($pidl,"$outputdir/ndr_$basename.h");
+ if ($res) {
+ FileSave($comh_filename, $res);
+ }
+ }
+
+ if (defined($opt_dcom_proxy)) {
+ require Parse::Pidl::Samba4::COM::Proxy;
+ my $res = Parse::Pidl::Samba4::COM::Proxy::Parse($pidl,$comh_filename);
+ if ($res) {
+ my ($client) = ($opt_dcom_proxy or "$outputdir/$basename\_p.c");
+ FileSave($client, $res);
+ }
+ }
+
+ if ($opt_warn_compat) {
+ require Parse::Pidl::Compat;
+ Parse::Pidl::Compat::Check($pidl);
+ }
+
+ $pidl = Parse::Pidl::ODL::ODL2IDL($pidl, dirname($idl_file), \@opt_incdirs);
+
+ if (defined($opt_ws_parser)) {
+ require Parse::Pidl::Wireshark::NDR;
+
+ my $cnffile = $idl_file;
+ $cnffile =~ s/\.idl$/\.cnf/;
+
+ my $generator = new Parse::Pidl::Wireshark::NDR();
+ $generator->Initialize($cnffile);
+ }
+
+
+ if (defined($opt_ws_parser) or
+ defined($opt_client) or
+ defined($opt_server) or
+ defined($opt_header) or
+ defined($opt_ndr_parser) or
+ defined($opt_python) or
+ defined($opt_dump_ndr_tree) or
+ defined($opt_samba3_header) or
+ defined($opt_samba3_parser) or
+ defined($opt_samba3_server) or
+ defined($opt_samba3_ndr_client) or
+ defined($opt_samba3_ndr_server)) {
+ require Parse::Pidl::NDR;
+ $ndr = Parse::Pidl::NDR::Parse($pidl);
+ }
+
+ if (defined($opt_dump_ndr_tree)) {
+ my($ndr_file) = ($opt_dump_ndr_tree or "$outputdir/$basename.ndr");
+ SaveStructure($ndr_file, $ndr) or die "Failed to save $ndr_file\n";
+ }
+
+ my $gen_header = ($opt_header or "$outputdir/$basename.h");
+ if (defined($opt_header)) {
+ require Parse::Pidl::Samba4::Header;
+ FileSave($gen_header, Parse::Pidl::Samba4::Header::Parse($ndr));
+ }
+
+ my $h_filename = "$outputdir/ndr_$basename.h";
+ my $c_header = "$outputdir/ndr_$basename\_c.h";
+ if (defined($opt_client) or defined($opt_samba3_ndr_client)) {
+ require Parse::Pidl::Samba4::NDR::Client;
+ my ($c_client) = ($opt_client or "$outputdir/ndr_$basename\_c.c");
+ $c_header = $c_client;
+ $c_header =~ s/\.c$/.h/;
+
+ my $generator = new Parse::Pidl::Samba4::NDR::Client();
+ my ($srcd,$hdrd) = $generator->Parse(
+ $ndr,$gen_header,$h_filename,$c_header);
+
+ FileSave($c_client, $srcd);
+ FileSave($c_header, $hdrd);
+ }
+
+ if (defined($opt_python)) {
+ require Parse::Pidl::Samba4::Python;
+ my $generator = new Parse::Pidl::Samba4::Python();
+ my ($prsr) = $generator->Parse($basename, $ndr,
+ "$outputdir/ndr_$basename\_c.h", $h_filename);
+ FileSave("$outputdir/py_$basename.c", $prsr);
+ }
+
+ if (defined($opt_server)) {
+ require Parse::Pidl::Samba4::NDR::Server;
+
+ FileSave(($opt_server or "$outputdir/ndr_$basename\_s.c"), Parse::Pidl::Samba4::NDR::Server::Parse($ndr,$h_filename));
+ }
+
+ if (defined($opt_ndr_parser)) {
+ my $parser_fname = ($opt_ndr_parser or "$outputdir/ndr_$basename.c");
+ require Parse::Pidl::Samba4::NDR::Parser;
+ my $generator = new Parse::Pidl::Samba4::NDR::Parser();
+ my ($header,$parser) = $generator->Parse($ndr, $gen_header, $h_filename);
+
+ FileSave($parser_fname, $parser);
+ FileSave($h_filename, $header);
+
+ }
+
+ if (defined($opt_ws_parser)) {
+ require Parse::Pidl::Wireshark::NDR;
+ my($eparser) = ($opt_ws_parser or "$outputdir/packet-dcerpc-$basename.c");
+ my $eheader = $eparser;
+ $eheader =~ s/\.c$/\.h/;
+ my $cnffile = $idl_file;
+ $cnffile =~ s/\.idl$/\.cnf/;
+
+ my $generator = new Parse::Pidl::Wireshark::NDR();
+ my ($dp, $dh) = $generator->Parse($ndr, $idl_file, $eheader, $cnffile);
+ FileSave($eparser, $dp) if defined($dp);
+ FileSave($eheader, $dh) if defined($dh);
+ }
+
+ if (defined($opt_tdr_parser)) {
+ my $tdr_parser = ($opt_tdr_parser or "$outputdir/tdr_$basename.c");
+ my $tdr_header = $tdr_parser;
+ $tdr_header =~ s/\.c$/\.h/;
+ require Parse::Pidl::Samba4::TDR;
+ my $generator = new Parse::Pidl::Samba4::TDR();
+ my ($hdr,$prsr) = $generator->Parser($pidl, $tdr_header, $gen_header);
+ FileSave($tdr_parser, $prsr);
+ FileSave($tdr_header, $hdr);
+ }
+
+ if (defined($opt_typelib)) {
+ my $typelib = ($opt_typelib or "$outputdir/$basename.tlb");
+ require Parse::Pidl::Typelist;
+ FileSave($typelib, Parse::Pidl::Typelist::GenerateTypeLib());
+ }
+
+ if ($opt_template) {
+ require Parse::Pidl::Samba4::Template;
+ print Parse::Pidl::Samba4::Template::Parse($pidl);
+ }
+
+ if ($opt_samba3_template) {
+ require Parse::Pidl::Samba3::Template;
+ print Parse::Pidl::Samba3::Template::Parse($pidl);
+ }
+
+ if (defined($opt_samba3_ndr_client)) {
+ my $client = ($opt_samba3_ndr_client or "$outputdir/cli_$basename.c");
+ my $header = $client; $header =~ s/\.c$/\.h/;
+ require Parse::Pidl::Samba3::ClientNDR;
+ my $generator = new Parse::Pidl::Samba3::ClientNDR();
+ my ($c_code,$h_code) = $generator->Parse($ndr, $header, $c_header);
+ FileSave($client, $c_code);
+ FileSave($header, $h_code);
+ }
+
+ if (defined($opt_samba3_ndr_server)) {
+ my $server = ($opt_samba3_ndr_server or "$outputdir/srv_$basename.c");
+ my $header = $server; $header =~ s/\.c$/\.h/;
+ require Parse::Pidl::Samba3::ServerNDR;
+ my ($c_code,$h_code) = Parse::Pidl::Samba3::ServerNDR::Parse($ndr, $header, $h_filename);
+ FileSave($server, $c_code);
+ FileSave($header, $h_code);
+ }
+
+}
+
+if (scalar(@ARGV) == 0) {
+ print "$Script: no input files\n";
+ exit(1);
+}
+
+process_file($_) foreach (@ARGV);
diff --git a/tools/pidl/tests/Util.pm b/tools/pidl/tests/Util.pm
new file mode 100644
index 0000000..86b521b
--- /dev/null
+++ b/tools/pidl/tests/Util.pm
@@ -0,0 +1,181 @@
+# Some simple utility functions for pidl tests
+# Copyright (C) 2005-2006 Jelmer Vernooij
+# Published under the GNU General Public License
+
+package Util;
+
+require Exporter;
+@ISA = qw(Exporter);
+@EXPORT = qw(test_samba4_ndr test_warnings test_errors);
+
+use strict;
+
+use FindBin qw($RealBin);
+use lib "$RealBin/../lib";
+
+use Parse::Pidl::Samba4 qw(is_intree);
+
+use Parse::Pidl;
+my $warnings = "";
+undef &Parse::Pidl::warning;
+*Parse::Pidl::warning = sub {
+ my ($e, $l) = @_;
+ if (defined($e)) {
+ $warnings .= "$e->{FILE}:$e->{LINE}: $l\n";
+ } else {
+ $warnings .= "$l\n";
+ }
+};
+
+my $errors = "";
+undef &Parse::Pidl::error;
+*Parse::Pidl::error = sub {
+ my ($e, $l) = @_;
+ if (defined($e)) {
+ $errors .= "$e->{FILE}:$e->{LINE}: $l\n";
+ } else {
+ $errors .= "$l\n";
+ }
+};
+
+use Test::More;
+use Parse::Pidl::IDL;
+use Parse::Pidl::NDR;
+use Parse::Pidl::Samba4::NDR::Parser;
+use Parse::Pidl::Samba4::Header;
+
+# Generate a Samba4 parser for an IDL fragment and run it with a specified
+# piece of code to check whether the parser works as expected
+sub test_samba4_ndr
+{
+ my ($name,$idl,$c,$extra) = @_;
+
+ $extra = "" unless defined($extra);
+
+ my $pidl = Parse::Pidl::IDL::parse_string("interface echo { $idl }; ", "<$name>");
+ ok(defined($pidl), "($name) parse idl");
+
+ my $pndr = Parse::Pidl::NDR::Parse($pidl);
+ ok(defined($pndr), "($name) generate NDR tree");
+
+ my $header = Parse::Pidl::Samba4::Header::Parse($pndr);
+ ok(defined($header), "($name) generate generic header");
+
+ my $generator = new Parse::Pidl::Samba4::NDR::Parser();
+ my ($ndrheader,$ndrparser) = $generator->Parse($pndr, undef, undef);
+ ok(defined($ndrparser), "($name) generate NDR parser");
+ ok(defined($ndrheader), "($name) generate NDR header");
+
+SKIP: {
+
+ my $flags;
+ if (system("pkg-config --exists ndr") == 0 and !is_intree()) {
+ $flags = `pkg-config --libs --cflags ndr`;
+ } else {
+ skip "no samba environment available, skipping compilation", 3;
+ }
+
+ my $main = "
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <util/data_blob.h>
+
+/* header start */
+$header
+/* header end */
+
+/* ndrheader start */
+$ndrheader
+/* ndrheader end */
+
+/* extra start */
+$extra
+/* extra end */
+
+/* ndrparser start */
+$ndrparser
+/* ndrparser end */
+
+/* main start */
+int main(int argc, const char **argv)
+{
+ TALLOC_CTX *mem_ctx = talloc_init(NULL);
+
+$c
+
+ talloc_free(mem_ctx);
+
+ return 0;
+}
+/* main end */
+\n";
+
+ my $main_debug = "# ".join("\n# ", split("\n", $main));
+
+ my $test_data_prefix = $ENV{TEST_DATA_PREFIX};
+ my $outfile;
+ if (defined($test_data_prefix)) {
+ $outfile = "$test_data_prefix/test-$name";
+ } else {
+ $outfile = "./test-$name";
+ }
+
+ my $cflags = $ENV{CFLAGS};
+ unless (defined($cflags)) {
+ $cflags = "";
+ }
+
+ my $ldflags = $ENV{LDFLAGS};
+ unless (defined($ldflags)) {
+ $ldflags = "";
+ }
+
+ my $cc = $ENV{CC};
+ unless (defined($cc)) {
+ $cc = "cc";
+ }
+
+ my $cmd = "$cc $cflags -x c - -o $outfile $flags $ldflags";
+ $cmd =~ s/\n//g;
+ open CC, "|$cmd";
+ print CC $main;
+ close CC;
+
+ ok(-f $outfile, "($name) compile");
+
+ my $ret = system($outfile, ()) >> 8;
+ print "# code:\n#\n$main_debug\n" if ($ret != 0);
+ print "# cmd: $cmd\n" if ($ret != 0);
+ print "# return code: $ret\n" if ($ret != 0);
+
+ ok($ret == 0, "($name) run");
+
+ ok(unlink($outfile), "($name) remove");
+
+ }
+}
+
+sub test_warnings($$)
+{
+ my ($exp, $code) = @_;
+
+ $warnings = "";
+
+ $code->();
+
+ is($warnings, $exp);
+}
+
+sub test_errors($$)
+{
+ my ($exp, $code) = @_;
+ $errors = "";
+ $code->();
+
+ is($errors, $exp);
+}
+
+1;
diff --git a/tools/pidl/tests/cutil.pl b/tools/pidl/tests/cutil.pl
new file mode 100755
index 0000000..78c8bce
--- /dev/null
+++ b/tools/pidl/tests/cutil.pl
@@ -0,0 +1,21 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+use warnings;
+
+use Test::More tests => 7;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl::Util qw(MyDumper);
+use Parse::Pidl::CUtil qw(get_pointer_to get_value_of);
+
+is("&foo", get_pointer_to("foo"));
+is("&(&foo)", get_pointer_to(get_pointer_to("foo")));
+is("*foo", get_pointer_to("**foo"));
+is("foo", get_pointer_to("*foo"));
+
+is("foo", get_value_of("&foo"));
+is("*foo", get_value_of("foo"));
+is("**foo", get_value_of("*foo"));
diff --git a/tools/pidl/tests/dump.pl b/tools/pidl/tests/dump.pl
new file mode 100755
index 0000000..d1a56f0
--- /dev/null
+++ b/tools/pidl/tests/dump.pl
@@ -0,0 +1,15 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+use warnings;
+
+use Test::More tests => 1;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl::Dump qw(DumpStruct);
+
+is (DumpStruct({ NAME => "foo", ELEMENTS => []}),
+ "struct foo {\n}");
+
diff --git a/tools/pidl/tests/header.pl b/tools/pidl/tests/header.pl
new file mode 100755
index 0000000..db59484
--- /dev/null
+++ b/tools/pidl/tests/header.pl
@@ -0,0 +1,108 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+use warnings;
+
+use Test::More tests => 27;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl::Util qw(MyDumper);
+use Parse::Pidl::Samba4::Header qw(
+ GenerateFunctionInEnv GenerateFunctionOutEnv GenerateStructEnv
+ EnvSubstituteValue);
+use Parse::Pidl::IDL qw(parse_string);
+use Parse::Pidl::NDR;
+
+sub parse_idl($)
+{
+ my $text = shift;
+ my $idl = Parse::Pidl::IDL::parse_string($text, "nofile");
+ my $ndr = Parse::Pidl::NDR::Parse($idl);
+ return Parse::Pidl::Samba4::Header::Parse($ndr);
+}
+
+like(parse_idl(""), qr/\/\* header auto-generated by pidl \*\/\n/sm, "includes work");
+like(parse_idl("interface x {}"), qr/\/\* header auto-generated by pidl \*\/\n/sm, "simple empty interface doesn't cause overhead");
+like(parse_idl("interface p { typedef struct { int y; } x; };"),
+ qr/.*#ifndef _HEADER_p\n#define _HEADER_p\n.+\n#endif \/\* _HEADER_p \*\/.*/ms, "ifdefs are created");
+like(parse_idl("interface p { typedef struct { int y; } x; };"),
+ qr/struct x.*{.*int32_t y;.*}.*;/sm, "interface member generated properly");
+like(parse_idl("interface x { void foo (void); };"),
+ qr/struct foo.*{\s+int _dummy_element;\s+};/sm, "void fn contains dummy element");
+like(parse_idl("interface x { void foo ([in] uint32 x); };"),
+ qr/struct foo.*{\s+struct\s+{\s+uint32_t x;\s+} in;\s+};/sm, "fn in arg works");
+like(parse_idl("interface x { void foo ([out] uint32 x); };"),
+ qr/struct foo.*{.*struct\s+{\s+uint32_t x;\s+} out;.*};/sm, "fn out arg works");
+like(parse_idl("interface x { void foo ([in,out] uint32 x); };"),
+ qr/struct foo.*{.*struct\s+{\s+uint32_t x;\s+} in;\s+struct\s+{\s+uint32_t x;\s+} out;.*};/sm, "fn in,out arg works");
+like(parse_idl("interface x { void foo (uint32 x); };"), qr/struct foo.*{.*struct\s+{\s+uint32_t x;\s+} in;\s+struct\s+{\s+uint32_t x;\s+} out;.*};/sm, "fn with no props implies in,out");
+like(parse_idl("interface p { struct x { int y; }; };"),
+ qr/struct x.*{.*int32_t y;.*}.*;/sm, "interface member generated properly");
+
+like(parse_idl("interface p { struct x { struct y z; }; };"),
+ qr/struct x.*{.*struct y z;.*}.*;/sm, "tagged type struct member");
+
+like(parse_idl("interface p { struct x { union y z; }; };"),
+ qr/struct x.*{.*union y z;.*}.*;/sm, "tagged type union member");
+
+like(parse_idl("interface p { struct x { }; };"),
+ qr/struct x.*{.*char _empty_;.*}.*;/sm, "empty struct");
+
+like(parse_idl("interface p { struct x; };"),
+ qr/struct x;/sm, "struct declaration");
+
+like(parse_idl("interface p { typedef struct x { int p; } x; };"),
+ qr/struct x.*{.*int32_t p;.*};/sm, "double struct declaration");
+
+like(parse_idl("cpp_quote(\"some-foo\")"),
+ qr/some-foo/sm, "cpp quote");
+
+# Make sure GenerateFunctionInEnv and GenerateFunctionOutEnv work
+my $fn = { ELEMENTS => [ { DIRECTION => ["in"], NAME => "foo" } ] };
+is_deeply({ "foo" => "r->in.foo" }, GenerateFunctionInEnv($fn));
+
+$fn = { ELEMENTS => [ { DIRECTION => ["out"], NAME => "foo" } ] };
+is_deeply({ "foo" => "r->out.foo" }, GenerateFunctionOutEnv($fn));
+
+$fn = { ELEMENTS => [ { DIRECTION => ["out", "in"], NAME => "foo" } ] };
+is_deeply({ "foo" => "r->in.foo" }, GenerateFunctionInEnv($fn));
+
+$fn = { ELEMENTS => [ { DIRECTION => ["out", "in"], NAME => "foo" } ] };
+is_deeply({ "foo" => "r->out.foo" }, GenerateFunctionOutEnv($fn));
+
+$fn = { ELEMENTS => [ { DIRECTION => ["in"], NAME => "foo" } ] };
+is_deeply({ "foo" => "r->in.foo" }, GenerateFunctionOutEnv($fn));
+
+$fn = { ELEMENTS => [ { DIRECTION => ["out"], NAME => "foo" } ] };
+is_deeply({ }, GenerateFunctionInEnv($fn));
+
+$fn = { ELEMENTS => [ { NAME => "foo" }, { NAME => "bar" } ] };
+is_deeply({ foo => "r->foo", bar => "r->bar", this => "r" },
+ GenerateStructEnv($fn, "r"));
+
+$fn = { ELEMENTS => [ { NAME => "foo" }, { NAME => "bar" } ] };
+is_deeply({ foo => "some->complex.variable->foo",
+ bar => "some->complex.variable->bar",
+ this => "some->complex.variable" },
+ GenerateStructEnv($fn, "some->complex.variable"));
+
+$fn = { ELEMENTS => [ { NAME => "foo", PROPERTIES => { value => 3 }} ] };
+
+my $env = GenerateStructEnv($fn, "r");
+EnvSubstituteValue($env, $fn);
+is_deeply($env, { foo => 3, this => "r" });
+
+$fn = { ELEMENTS => [ { NAME => "foo" }, { NAME => "bar" } ] };
+$env = GenerateStructEnv($fn, "r");
+EnvSubstituteValue($env, $fn);
+is_deeply($env, { foo => 'r->foo', bar => 'r->bar', this => "r" });
+
+$fn = { ELEMENTS => [ { NAME => "foo", PROPERTIES => { value => 0 }} ] };
+
+$env = GenerateStructEnv($fn, "r");
+EnvSubstituteValue($env, $fn);
+is_deeply($env, { foo => 0, this => "r" });
+
+
diff --git a/tools/pidl/tests/ndr.pl b/tools/pidl/tests/ndr.pl
new file mode 100755
index 0000000..b6fd489
--- /dev/null
+++ b/tools/pidl/tests/ndr.pl
@@ -0,0 +1,561 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+use warnings;
+
+use Test::More tests => 47;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl::Util qw(MyDumper);
+use Parse::Pidl::NDR qw(GetElementLevelTable ParseElement align_type mapToScalar ParseType can_contain_deferred);
+
+# Case 1
+
+my $e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => {},
+ 'POINTERS' => 0,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'STRUCT' },
+ 'LINE' => 42 };
+
+is_deeply(GetElementLevelTable($e, "unique", 0), [
+ {
+ 'IS_DEFERRED' => 0,
+ 'LEVEL_INDEX' => 0,
+ 'DATA_TYPE' => 'uint8',
+ 'CONTAINS_DEFERRED' => 0,
+ 'TYPE' => 'DATA',
+ 'IS_SURROUNDING' => 0,
+ }
+]);
+
+my $ne = ParseElement($e, "unique", 0);
+is($ne->{ORIGINAL}, $e);
+is($ne->{NAME}, "v");
+is($ne->{ALIGN}, 1);
+is($ne->{TYPE}, "uint8");
+is_deeply($ne->{LEVELS}, [
+ {
+ 'IS_DEFERRED' => 0,
+ 'LEVEL_INDEX' => 0,
+ 'DATA_TYPE' => 'uint8',
+ 'CONTAINS_DEFERRED' => 0,
+ 'TYPE' => 'DATA',
+ 'IS_SURROUNDING' => 0,
+ }
+]);
+
+# Case 2 : pointers
+#
+$e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => {"unique" => 1},
+ 'POINTERS' => 1,
+ 'PARENT' => { TYPE => 'STRUCT' },
+ 'TYPE' => 'uint8',
+ 'LINE' => 42 };
+
+is_deeply(GetElementLevelTable($e, "unique", 0), [
+ {
+ LEVEL_INDEX => 0,
+ IS_DEFERRED => 0,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "unique",
+ POINTER_INDEX => 0,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ 'IS_DEFERRED' => 1,
+ 'LEVEL_INDEX' => 1,
+ 'DATA_TYPE' => 'uint8',
+ 'CONTAINS_DEFERRED' => 0,
+ 'TYPE' => 'DATA',
+ 'IS_SURROUNDING' => 0,
+ }
+]);
+
+# Case 3 : double pointers
+#
+$e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => {"unique" => 1},
+ 'POINTERS' => 2,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'STRUCT' },
+ 'LINE' => 42 };
+
+is_deeply(GetElementLevelTable($e, "unique", 0), [
+ {
+ LEVEL_INDEX => 0,
+ IS_DEFERRED => 0,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "unique",
+ POINTER_INDEX => 0,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ LEVEL_INDEX => 1,
+ IS_DEFERRED => 1,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "unique",
+ POINTER_INDEX => 1,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ 'IS_DEFERRED' => 1,
+ 'LEVEL_INDEX' => 2,
+ 'DATA_TYPE' => 'uint8',
+ 'CONTAINS_DEFERRED' => 0,
+ 'TYPE' => 'DATA',
+ 'IS_SURROUNDING' => 0,
+ }
+]);
+
+# Case 3 : ref pointers
+#
+$e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => {"ref" => 1},
+ 'POINTERS' => 1,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'STRUCT' },
+ 'LINE' => 42 };
+
+is_deeply(GetElementLevelTable($e, "unique", 0), [
+ {
+ LEVEL_INDEX => 0,
+ IS_DEFERRED => 0,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "ref",
+ POINTER_INDEX => 0,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ 'IS_DEFERRED' => 1,
+ 'LEVEL_INDEX' => 1,
+ 'DATA_TYPE' => 'uint8',
+ 'CONTAINS_DEFERRED' => 0,
+ 'TYPE' => 'DATA',
+ 'IS_SURROUNDING' => 0,
+ }
+]);
+
+# Case 3 : ref pointers
+#
+$e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => {"ref" => 1},
+ 'POINTERS' => 3,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'STRUCT' },
+ 'LINE' => 42 };
+
+is_deeply(GetElementLevelTable($e, "unique", 0), [
+ {
+ LEVEL_INDEX => 0,
+ IS_DEFERRED => 0,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "ref",
+ POINTER_INDEX => 0,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ LEVEL_INDEX => 1,
+ IS_DEFERRED => 1,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "unique",
+ POINTER_INDEX => 1,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ LEVEL_INDEX => 2,
+ IS_DEFERRED => 1,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "unique",
+ POINTER_INDEX => 2,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ 'IS_DEFERRED' => 1,
+ 'LEVEL_INDEX' => 3,
+ 'DATA_TYPE' => 'uint8',
+ 'CONTAINS_DEFERRED' => 0,
+ 'TYPE' => 'DATA',
+ 'IS_SURROUNDING' => 0,
+ }
+]);
+
+# Case 3 : ref pointers
+#
+$e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => {"ref" => 1},
+ 'POINTERS' => 3,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'STRUCT' },
+ 'LINE' => 42 };
+
+is_deeply(GetElementLevelTable($e, "ref", 0), [
+ {
+ LEVEL_INDEX => 0,
+ IS_DEFERRED => 0,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "ref",
+ POINTER_INDEX => 0,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ LEVEL_INDEX => 1,
+ IS_DEFERRED => 1,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "ref",
+ POINTER_INDEX => 1,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ LEVEL_INDEX => 2,
+ IS_DEFERRED => 1,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "ref",
+ POINTER_INDEX => 2,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ 'IS_DEFERRED' => 1,
+ 'LEVEL_INDEX' => 3,
+ 'DATA_TYPE' => 'uint8',
+ 'CONTAINS_DEFERRED' => 0,
+ 'TYPE' => 'DATA',
+ 'IS_SURROUNDING' => 0,
+ }
+]);
+
+# Case 4 : top-level ref pointers
+#
+$e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => {"ref" => 1},
+ 'POINTERS' => 1,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'FUNCTION' },
+ 'LINE' => 42 };
+
+is_deeply(GetElementLevelTable($e, "unique", 0), [
+ {
+ LEVEL_INDEX => 0,
+ IS_DEFERRED => 0,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "ref",
+ POINTER_INDEX => 0,
+ LEVEL => 'TOP'
+ },
+ {
+ 'IS_DEFERRED' => 0,
+ 'LEVEL_INDEX' => 1,
+ 'DATA_TYPE' => 'uint8',
+ 'CONTAINS_DEFERRED' => 0,
+ 'TYPE' => 'DATA',
+ 'IS_SURROUNDING' => 0,
+ }
+]);
+
+# Case 4 : top-level ref pointers, triple with pointer_default("unique")
+#
+$e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => {"ref" => 1},
+ 'POINTERS' => 3,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'FUNCTION' },
+ 'LINE' => 42 };
+
+is_deeply(GetElementLevelTable($e, "unique", 0), [
+ {
+ LEVEL_INDEX => 0,
+ IS_DEFERRED => 0,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "ref",
+ POINTER_INDEX => 0,
+ LEVEL => 'TOP'
+ },
+ {
+ LEVEL_INDEX => 1,
+ IS_DEFERRED => 0,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "unique",
+ POINTER_INDEX => 1,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ LEVEL_INDEX => 2,
+ IS_DEFERRED => 1,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "unique",
+ POINTER_INDEX => 2,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ 'IS_DEFERRED' => 1,
+ 'LEVEL_INDEX' => 3,
+ 'DATA_TYPE' => 'uint8',
+ 'CONTAINS_DEFERRED' => 0,
+ 'TYPE' => 'DATA',
+ 'IS_SURROUNDING' => 0,
+ }
+]);
+
+# Case 4 : top-level unique pointers, triple with pointer_default("unique")
+#
+$e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => {"unique" => 1, "in" => 1},
+ 'POINTERS' => 3,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'FUNCTION' },
+ 'LINE' => 42 };
+
+is_deeply(GetElementLevelTable($e, "unique", 0), [
+ {
+ LEVEL_INDEX => 0,
+ IS_DEFERRED => 0,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "unique",
+ POINTER_INDEX => 0,
+ LEVEL => 'TOP'
+ },
+ {
+ LEVEL_INDEX => 1,
+ IS_DEFERRED => 1,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "unique",
+ POINTER_INDEX => 1,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ LEVEL_INDEX => 2,
+ IS_DEFERRED => 1,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "unique",
+ POINTER_INDEX => 2,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ 'IS_DEFERRED' => 1,
+ 'LEVEL_INDEX' => 3,
+ 'DATA_TYPE' => 'uint8',
+ 'CONTAINS_DEFERRED' => 0,
+ 'TYPE' => 'DATA',
+ 'IS_SURROUNDING' => 0,
+ }
+]);
+
+# Case 4 : top-level unique pointers, triple with pointer_default("ref")
+#
+$e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => {"unique" => 1, "in" => 1},
+ 'POINTERS' => 3,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'FUNCTION' },
+ 'LINE' => 42 };
+
+is_deeply(GetElementLevelTable($e, "ref", 0), [
+ {
+ LEVEL_INDEX => 0,
+ IS_DEFERRED => 0,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "unique",
+ POINTER_INDEX => 0,
+ LEVEL => 'TOP'
+ },
+ {
+ LEVEL_INDEX => 1,
+ IS_DEFERRED => 1,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "ref",
+ POINTER_INDEX => 1,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ LEVEL_INDEX => 2,
+ IS_DEFERRED => 1,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "ref",
+ POINTER_INDEX => 2,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ 'IS_DEFERRED' => 1,
+ 'LEVEL_INDEX' => 3,
+ 'DATA_TYPE' => 'uint8',
+ 'CONTAINS_DEFERRED' => 0,
+ 'TYPE' => 'DATA',
+ 'IS_SURROUNDING' => 0,
+ }
+]);
+
+# Case 4 : top-level ref pointers, triple with pointer_default("ref")
+#
+$e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => {"ref" => 1},
+ 'POINTERS' => 3,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'FUNCTION' },
+ 'LINE' => 42 };
+
+is_deeply(GetElementLevelTable($e, "ref", 0), [
+ {
+ LEVEL_INDEX => 0,
+ IS_DEFERRED => 0,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "ref",
+ POINTER_INDEX => 0,
+ LEVEL => 'TOP'
+ },
+ {
+ LEVEL_INDEX => 1,
+ IS_DEFERRED => 0,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "ref",
+ POINTER_INDEX => 1,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ LEVEL_INDEX => 2,
+ IS_DEFERRED => 1,
+ TYPE => 'POINTER',
+ POINTER_TYPE => "ref",
+ POINTER_INDEX => 2,
+ LEVEL => 'EMBEDDED'
+ },
+ {
+ 'IS_DEFERRED' => 1,
+ 'LEVEL_INDEX' => 3,
+ 'DATA_TYPE' => 'uint8',
+ 'CONTAINS_DEFERRED' => 0,
+ 'TYPE' => 'DATA',
+ 'IS_SURROUNDING' => 0,
+ }
+]);
+
+# representation_type
+$e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => { represent_as => "bar" },
+ 'POINTERS' => 0,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'STRUCT' },
+ 'LINE' => 42 };
+
+$ne = ParseElement($e, undef, 0);
+is($ne->{REPRESENTATION_TYPE}, "bar");
+
+# representation_type
+$e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => { },
+ 'POINTERS' => 0,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'STRUCT' },
+ 'LINE' => 42 };
+
+$ne = ParseElement($e, undef, 0);
+is($ne->{REPRESENTATION_TYPE}, "uint8");
+
+is(align_type("hyper"), 8);
+is(align_type("double"), 8);
+is(align_type("uint32"), 4);
+is(align_type("uint16"), 2);
+is(align_type("uint8"), 1);
+is(align_type({ TYPE => "STRUCT", "NAME" => "bla",
+ ELEMENTS => [ { TYPE => "uint16" } ] }), 4);
+is(align_type({ TYPE => "STRUCT",
+ ELEMENTS => [ { TYPE => "hyper" } ] }), 8);
+is(align_type({ TYPE => "TYPEDEF", DATA => {
+ TYPE => "STRUCT",
+ ELEMENTS => [ { TYPE => "hyper" } ] }}), 8);
+# typedef of struct without body
+is(align_type({ TYPE => "TYPEDEF", DATA => {
+ TYPE => "STRUCT", ELEMENTS => undef }}), 4);
+# struct without body
+is(align_type({ TYPE => "STRUCT", ELEMENTS => undef }), 4);
+# empty struct
+is(align_type({ TYPE => "STRUCT", ELEMENTS => [] }), 1);
+is(align_type({ TYPE => "STRUCT", "NAME" => "bla",
+ ELEMENTS => [ { TYPE => "uint8" } ] }), 4);
+
+is(mapToScalar("someverymuchnotexistingtype"), undef);
+is(mapToScalar("uint32"), "uint32");
+is(mapToScalar({TYPE => "ENUM", PARENT => { PROPERTIES => { enum8bit => 1 } } }), "uint8");
+is(mapToScalar({TYPE => "BITMAP", PROPERTIES => { bitmap64bit => 1 } }),
+ "hyper");
+is(mapToScalar({TYPE => "TYPEDEF", DATA => {TYPE => "ENUM", PARENT => { PROPERTIES => { enum8bit => 1 } } }}), "uint8");
+
+my $t;
+$t = {
+ TYPE => "STRUCT",
+ NAME => "foo",
+ SURROUNDING_ELEMENT => undef,
+ ELEMENTS => undef,
+ PROPERTIES => undef,
+ ORIGINAL => {
+ TYPE => "STRUCT",
+ NAME => "foo"
+ },
+ ALIGN => undef
+};
+is_deeply(ParseType($t->{ORIGINAL}, "ref", 0), $t);
+
+$t = {
+ TYPE => "UNION",
+ NAME => "foo",
+ SWITCH_TYPE => "uint32",
+ ELEMENTS => undef,
+ PROPERTIES => undef,
+ HAS_DEFAULT => 0,
+ IS_MS_UNION => 0,
+ ORIGINAL => {
+ TYPE => "UNION",
+ NAME => "foo"
+ },
+ ALIGN => undef
+};
+is_deeply(ParseType($t->{ORIGINAL}, "ref", 0), $t);
+
+ok(not can_contain_deferred("uint32"));
+ok(can_contain_deferred("some_unknown_type"));
+ok(can_contain_deferred({ TYPE => "STRUCT",
+ ELEMENTS => [ { TYPE => "uint32", POINTERS => 40 } ]}));
+ok(can_contain_deferred({ TYPE => "TYPEDEF",
+ DATA => { TYPE => "STRUCT",
+ ELEMENTS => [ { TYPE => "uint32", POINTERS => 40 } ]}}));
+ok(not can_contain_deferred({ TYPE => "STRUCT",
+ ELEMENTS => [ { TYPE => "uint32" } ]}));
+ok(not can_contain_deferred({ TYPE => "TYPEDEF",
+ DATA => { TYPE => "STRUCT",
+ ELEMENTS => [ { TYPE => "uint32" } ]}}));
+ok(can_contain_deferred({ TYPE => "STRUCT",
+ ELEMENTS => [ { TYPE => "someunknowntype" } ]}));
+# Make sure the elements for a enum without body aren't filled in
+ok(not defined(ParseType({TYPE => "ENUM", NAME => "foo" }, "ref", 0)->{ELEMENTS}));
+# Make sure the elements for a bitmap without body aren't filled in
+ok(not defined(ParseType({TYPE => "BITMAP", NAME => "foo" }, "ref", 0)->{ELEMENTS}));
+# Make sure the elements for a union without body aren't filled in
+ok(not defined(ParseType({TYPE => "UNION", NAME => "foo" }, "ref", 0)->{ELEMENTS}));
diff --git a/tools/pidl/tests/ndr_align.pl b/tools/pidl/tests/ndr_align.pl
new file mode 100755
index 0000000..cc089ea
--- /dev/null
+++ b/tools/pidl/tests/ndr_align.pl
@@ -0,0 +1,143 @@
+#!/usr/bin/perl
+# NDR alignment tests
+# (C) 2005 Jelmer Vernooij. Published under the GNU GPL
+use strict;
+
+use Test::More tests => 5 * 8;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util qw(test_samba4_ndr);
+
+test_samba4_ndr('align-uint8-uint16',
+'
+ typedef [public] struct {
+ uint8 x;
+ uint16 y;
+ } bla;
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct bla r;
+ uint8_t expected[] = { 0x0D, 0x00, 0xef, 0xbe };
+ DATA_BLOB expected_blob = { expected, 4 };
+ DATA_BLOB result_blob;
+ r.x = 13;
+ r.y = 0xbeef;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_bla(ndr, NDR_SCALARS|NDR_BUFFERS, &r)))
+ return 1;
+
+ result_blob = ndr_push_blob(ndr);
+
+ if (data_blob_cmp(&result_blob, &expected_blob) != 0)
+ return 2;
+');
+
+test_samba4_ndr('align-uint8-uint32',
+'
+ typedef [public] struct {
+ uint8 x;
+ uint32 y;
+ } bla;
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct bla r;
+ uint8_t expected[] = { 0x0D, 0x00, 0x00, 0x00, 0xef, 0xbe, 0xef, 0xbe };
+ DATA_BLOB expected_blob = { expected, 8 };
+ DATA_BLOB result_blob;
+ r.x = 13;
+ r.y = 0xbeefbeef;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_bla(ndr, NDR_SCALARS|NDR_BUFFERS, &r)))
+ return 1;
+
+ result_blob = ndr_push_blob(ndr);
+
+ if (data_blob_cmp(&result_blob, &expected_blob) != 0)
+ return 2;
+');
+
+
+test_samba4_ndr('align-uint8-hyper',
+'
+ typedef [public] struct {
+ uint8 x;
+ hyper y;
+ } bla;
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct bla r;
+ uint8_t expected[] = { 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xef, 0xbe, 0xef, 0xbe, 0xef, 0xbe, 0xef, 0xbe };
+ DATA_BLOB expected_blob = { expected, 16 };
+ DATA_BLOB result_blob;
+ r.x = 13;
+ r.y = 0xbeefbeefbeefbeefLLU;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_bla(ndr, NDR_SCALARS|NDR_BUFFERS, &r)))
+ return 1;
+
+ result_blob = ndr_push_blob(ndr);
+
+ if (data_blob_cmp(&result_blob, &expected_blob) != 0)
+ return 2;
+');
+
+test_samba4_ndr('noalignflag-uint8-uint16',
+'
+ typedef [public] struct {
+ uint8 x;
+ uint16 y;
+ } bla;
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct bla r;
+ uint8_t expected[] = { 0x0D, 0xef, 0xbe };
+ DATA_BLOB expected_blob = { expected, 3 };
+ DATA_BLOB result_blob;
+ ndr->flags |= LIBNDR_FLAG_NOALIGN;
+
+ r.x = 13;
+ r.y = 0xbeef;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_bla(ndr, NDR_SCALARS|NDR_BUFFERS, &r)))
+ return 1;
+
+ result_blob = ndr_push_blob(ndr);
+
+ if (data_blob_cmp(&result_blob, &expected_blob) != 0)
+ return 2;
+');
+
+test_samba4_ndr('align-blob-align2',
+'
+ typedef [public] struct {
+ uint8 x;
+ [flag(LIBNDR_FLAG_ALIGN2)] DATA_BLOB data;
+ uint8 y;
+ } blie;
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct blie r;
+ uint8_t data[] = { 0x01, 0x02 };
+ uint8_t expected[] = { 0x0D, 0x00, 0x0E };
+ DATA_BLOB expected_blob = { expected, 3 };
+ DATA_BLOB result_blob;
+
+ r.x = 13;
+ r.y = 14;
+ r.data.data = data;
+ r.data.length = 2;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_blie(ndr, NDR_SCALARS|NDR_BUFFERS, &r)))
+ return 1;
+
+ result_blob = ndr_push_blob(ndr);
+
+ if (data_blob_cmp(&result_blob, &expected_blob) != 0)
+ return 2;
+');
diff --git a/tools/pidl/tests/ndr_alloc.pl b/tools/pidl/tests/ndr_alloc.pl
new file mode 100755
index 0000000..399fbd2
--- /dev/null
+++ b/tools/pidl/tests/ndr_alloc.pl
@@ -0,0 +1,118 @@
+#!/usr/bin/perl
+# NDR allocation tests
+# (C) 2005 Jelmer Vernooij. Published under the GNU GPL
+use strict;
+
+use Test::More tests => 5 * 8;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util qw(test_samba4_ndr);
+
+# Check that an outgoing scalar pointer is allocated correctly
+
+test_samba4_ndr("alloc-scalar",
+'
+ typedef struct {
+ uint8 *x;
+ } bla;
+
+ [public] void TestAlloc([in] bla foo);
+','
+ uint8_t data[] = { 0xde, 0xad, 0xbe, 0xef, 0x03 };
+ DATA_BLOB b = { data, 5 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL, NULL);
+ struct TestAlloc r;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_TestAlloc(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (r.in.foo.x == NULL)
+ return 2;
+
+ if (*r.in.foo.x != 0x03)
+ return 3;
+'
+);
+
+# Check that an outgoing buffer pointer is allocated correctly
+test_samba4_ndr("alloc-buffer",
+'
+ typedef struct { uint8 data; } blie;
+ typedef struct { blie *x; } bla;
+
+ [public] void TestAlloc([in] bla foo);
+','
+ uint8_t data[] = { 0xde, 0xad, 0xbe, 0xef, 0x03 };
+ DATA_BLOB b = { data, 5 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL, NULL);
+ struct TestAlloc r;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_TestAlloc(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (r.in.foo.x == NULL)
+ return 2;
+
+ if (r.in.foo.x->data != 0x03)
+ return 3;
+'
+);
+
+# Check that ref pointers aren't allocated by default
+test_samba4_ndr("ref-noalloc-null",
+'
+ [public] void TestAlloc([in,ref] uint8 *t);
+','
+ uint8_t data[] = { 0x03 };
+ DATA_BLOB b = { data, 1 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL, NULL);
+ struct TestAlloc r;
+ r.in.t = NULL;
+
+ if (NDR_ERR_CODE_IS_SUCCESS(ndr_pull_TestAlloc(ndr, NDR_IN, &r)))
+ return 1;
+'
+);
+
+# Check that ref pointers aren't allocated by default
+test_samba4_ndr("ref-noalloc",
+'
+ [public] void TestAlloc([in,ref] uint8 *t);
+','
+ uint8_t data[] = { 0x03 };
+ DATA_BLOB b = { data, 1 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL, NULL);
+ struct TestAlloc r;
+ uint8_t x;
+ r.in.t = &x;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_TestAlloc(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (*r.in.t != 0x03)
+ return 2;
+'
+);
+
+# Check that an outgoing ref pointer is allocated correctly
+test_samba4_ndr("ref-alloc",
+'
+ [public] void TestAlloc([in,ref] uint8 *t);
+','
+ uint8_t data[] = { 0x03 };
+ DATA_BLOB b = { data, 1 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL, NULL);
+ struct TestAlloc r;
+ ndr->flags |= LIBNDR_FLAG_REF_ALLOC;
+ r.in.t = NULL;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_TestAlloc(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (r.in.t == NULL)
+ return 2;
+
+ if (*r.in.t != 0x03)
+ return 3;
+'
+);
diff --git a/tools/pidl/tests/ndr_array.pl b/tools/pidl/tests/ndr_array.pl
new file mode 100755
index 0000000..2a6b5bb
--- /dev/null
+++ b/tools/pidl/tests/ndr_array.pl
@@ -0,0 +1,37 @@
+#!/usr/bin/perl
+# Array testing
+# (C) 2005 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+
+use Test::More tests => 8;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util qw(test_samba4_ndr);
+
+test_samba4_ndr(
+ 'Fixed-Array',
+
+ '[public] void Test([in] uint8 x[10]);',
+
+ '
+ uint8_t data[] = {1,2,3,4,5,6,7,8,9,10};
+ int i;
+ DATA_BLOB b;
+ struct ndr_pull *ndr;
+ struct Test r;
+
+ b.data = data;
+ b.length = 10;
+ ndr = ndr_pull_init_blob(&b, mem_ctx, NULL);
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_Test(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 10)
+ return 2;
+
+ for (i = 0; i < 10; i++) {
+ if (r.in.x[i] != i+1) return 3;
+ }
+');
diff --git a/tools/pidl/tests/ndr_compat.pl b/tools/pidl/tests/ndr_compat.pl
new file mode 100755
index 0000000..355e7f6
--- /dev/null
+++ b/tools/pidl/tests/ndr_compat.pl
@@ -0,0 +1,21 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+
+use Test::More tests => 2;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl;
+use Parse::Pidl::IDL;
+
+sub parse_idl($)
+{
+ my $idl = shift;
+ my $pidl = Parse::Pidl::IDL::parse_string("interface echo { $idl }; ", "nofile");
+ Parse::Pidl::NDR::Parse($pidl);
+}
+
+test_warnings("", sub {parse_idl("void x();"); });
+test_warnings("nofile:0: top-level [out] pointer `x' is not a [ref] pointer\n", sub {parse_idl("void x([out,unique] int *x);"); });
diff --git a/tools/pidl/tests/ndr_deprecations.pl b/tools/pidl/tests/ndr_deprecations.pl
new file mode 100755
index 0000000..86828e5
--- /dev/null
+++ b/tools/pidl/tests/ndr_deprecations.pl
@@ -0,0 +1,26 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+use warnings;
+
+use Test::More tests => 1;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl::Util qw(MyDumper);
+use Parse::Pidl::NDR qw(ValidElement);
+
+# Case 1
+
+my $e = {
+ 'FILE' => 'foo.idl',
+ 'NAME' => 'v',
+ 'PROPERTIES' => {"subcontext" => 1},
+ 'POINTERS' => 0,
+ 'TYPE' => 'uint8',
+ 'PARENT' => { TYPE => 'STRUCT' },
+ 'LINE' => 42 };
+
+test_warnings("foo.idl:42: subcontext() is deprecated. Use represent_as() or transmit_as() instead\n",
+ sub { ValidElement($e); });
diff --git a/tools/pidl/tests/ndr_fullptr.pl b/tools/pidl/tests/ndr_fullptr.pl
new file mode 100755
index 0000000..cc6fca7
--- /dev/null
+++ b/tools/pidl/tests/ndr_fullptr.pl
@@ -0,0 +1,44 @@
+#!/usr/bin/perl
+# Simple tests for unique pointers
+# (C) 2006 Jelmer Vernooij <jelmer@samba.org>.
+# Published under the GNU General Public License.
+use strict;
+
+use Test::More tests => 1 * 8;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util qw(test_samba4_ndr);
+
+SKIP: {
+ skip "full pointers not supported yet", 8;
+
+test_samba4_ndr("fullptr-push-dup",
+'
+ [public] uint16 echo_TestFull([in,ptr] uint32 *x, [in,ptr] uint32 *y);
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ uint32_t v = 13;
+ struct echo_TestFull r;
+ r.in.x = &v;
+ r.in.y = &v;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestFull(ndr, NDR_IN, &r))) {
+ fprintf(stderr, "push failed\n");
+ return 1;
+ }
+
+ if (ndr->offset != 12) {
+ fprintf(stderr, "Offset(%d) != 12\n", ndr->offset);
+ return 2;
+ }
+
+ if (ndr->data[0] != ndr->data[8] ||
+ ndr->data[1] != ndr->data[9] ||
+ ndr->data[2] != ndr->data[10] ||
+ ndr->data[3] != ndr->data[11]) {
+ fprintf(stderr, "Data incorrect\n");
+ return 3;
+ }
+');
+}
diff --git a/tools/pidl/tests/ndr_refptr.pl b/tools/pidl/tests/ndr_refptr.pl
new file mode 100755
index 0000000..d5dd839
--- /dev/null
+++ b/tools/pidl/tests/ndr_refptr.pl
@@ -0,0 +1,526 @@
+#!/usr/bin/perl
+# Simple tests for pidl's handling of ref pointers, based
+# on tridge's ref_notes.txt
+# (C) 2005 Jelmer Vernooij <jelmer@samba.org>.
+# Published under the GNU General Public License.
+use strict;
+
+use Test::More tests => 22 * 8;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util qw(test_samba4_ndr);
+
+test_samba4_ndr("noptr-push",
+' typedef struct {
+ uint16 x;
+ } xstruct;
+
+ [public] uint16 echo_TestRef([in] xstruct foo);
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ uint16_t v = 13;
+ struct echo_TestRef r;
+ r.in.foo.x = v;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r))) {
+ fprintf(stderr, "push failed\n");
+ return 1;
+ }
+
+ if (ndr->offset != 2) {
+ fprintf(stderr, "Offset(%d) != 2\n", ndr->offset);
+ return 2;
+ }
+
+ if (ndr->data[0] != 13 || ndr->data[1] != 0) {
+ fprintf(stderr, "Data incorrect\n");
+ return 3;
+ }
+');
+
+test_samba4_ndr("ptr-embedded-push",
+' typedef struct {
+ uint16 *x;
+ } xstruct;
+
+ [public] uint16 echo_TestRef([in] xstruct foo);
+',
+'
+ uint16_t v = 13;
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ r.in.foo.x = &v;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 6)
+ return 2;
+
+ if (ndr->data[0] == 0 && ndr->data[1] == 0 &&
+ ndr->data[2] == 0 && ndr->data[3] == 0)
+ return 3;
+
+ if (ndr->data[4] != 13 || ndr->data[5] != 0)
+ return 4;
+');
+
+test_samba4_ndr("ptr-embedded-push-null",
+' typedef struct {
+ uint16 *x;
+ } xstruct;
+
+ [public] uint16 echo_TestRef([in] xstruct foo);
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ r.in.foo.x = NULL;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 4)
+ return 2;
+
+ if (ndr->data[0] != 0 || ndr->data[1] != 0 ||
+ ndr->data[2] != 0 || ndr->data[3] != 0)
+ return 3;
+');
+
+test_samba4_ndr("refptr-embedded-push",
+'
+ typedef struct {
+ [ref] uint16 *x;
+ } xstruct;
+
+ [public] uint16 echo_TestRef([in] xstruct foo);
+',
+'
+ uint16_t v = 13;
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ r.in.foo.x = &v;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 6)
+ return 2;
+
+ if (ndr->data[0] == 0 && ndr->data[1] == 0 &&
+ ndr->data[2] == 0 && ndr->data[3] == 0)
+ return 3;
+
+ if (ndr->data[4] != 13 || ndr->data[5] != 0)
+ return 4;
+');
+
+test_samba4_ndr("refptr-embedded-push-null",
+'
+ typedef struct {
+ [ref] uint16 *x;
+ } xstruct;
+
+ [public] uint16 echo_TestRef([in] xstruct foo);
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ r.in.foo.x = NULL;
+
+ if (NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+ /* Windows gives [client runtime error 0x6f4] */
+');
+
+test_samba4_ndr("ptr-top-push",
+'
+ typedef struct {
+ uint16 x;
+ } xstruct;
+
+ [public] uint16 echo_TestRef([in] xstruct *foo);
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ struct xstruct s;
+ s.x = 13;
+ r.in.foo = &s;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 2)
+ return 2;
+
+ if (ndr->data[0] != 13 || ndr->data[1] != 0)
+ return 3;
+');
+
+test_samba4_ndr("ptr-top-push-null",
+'
+ typedef struct {
+ uint16 x;
+ } xstruct;
+
+ [public] uint16 echo_TestRef([in] xstruct *foo);
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ r.in.foo = NULL;
+
+ if (NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ /* Windows gives [client runtime error 0x6f4] */
+');
+
+
+test_samba4_ndr("refptr-top-push",
+'
+ typedef struct {
+ uint16 x;
+ } xstruct;
+
+ [public] uint16 echo_TestRef([in,ref] xstruct *foo);
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ struct xstruct s;
+ s.x = 13;
+ r.in.foo = &s;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 2)
+ return 2;
+
+ if (ndr->data[0] != 13 || ndr->data[1] != 0)
+ return 3;
+');
+
+test_samba4_ndr("refptr-top-push-null",
+'
+ typedef struct {
+ uint16 x;
+ } xstruct;
+
+ [public] uint16 echo_TestRef([in,ref] xstruct *foo);
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ r.in.foo = NULL;
+
+ if (NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ /* Windows gives [client runtime error 0x6f4] */
+');
+
+
+test_samba4_ndr("uniqueptr-top-push",
+' typedef struct {
+ uint16 x;
+ } xstruct;
+
+ [public] uint16 echo_TestRef([in,unique] xstruct *foo);
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ struct xstruct s;
+ s.x = 13;
+ r.in.foo = &s;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 6)
+ return 2;
+
+ if (ndr->data[0] == 0 && ndr->data[1] == 0 &&
+ ndr->data[2] == 0 && ndr->data[3] == 0)
+ return 3;
+
+ if (ndr->data[4] != 13 || ndr->data[5] != 0)
+ return 4;
+');
+
+test_samba4_ndr("uniqueptr-top-push-null",
+' typedef struct {
+ uint16 x;
+ } xstruct;
+
+ [public] uint16 echo_TestRef([in,unique] xstruct *foo);
+',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ r.in.foo = NULL;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 4)
+ return 2;
+
+ if (ndr->data[0] != 0 || ndr->data[1] != 0 ||
+ ndr->data[2] != 0 || ndr->data[3] != 0)
+ return 3;
+');
+
+
+test_samba4_ndr("ptr-top-out-pull",
+'
+ typedef struct {
+ uint16 x;
+ } xstruct;
+
+ [public] void echo_TestRef([out] xstruct *foo);
+',
+'
+ uint8_t data[] = { 0x0D, 0x00 };
+ DATA_BLOB b = { data, 2 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL, NULL);
+ struct xstruct s;
+ struct echo_TestRef r;
+
+ r.out.foo = &s;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_echo_TestRef(ndr, NDR_OUT, &r)))
+ return 1;
+
+ if (!r.out.foo)
+ return 2;
+
+ if (r.out.foo->x != 13)
+ return 3;
+');
+
+test_samba4_ndr("ptr-top-out-pull-null",
+'
+ typedef struct {
+ uint16 x;
+ } xstruct;
+
+ [public] void echo_TestRef([out] xstruct *foo);
+',
+'
+ uint8_t data[] = { 0x0D, 0x00 };
+ DATA_BLOB b = { data, 2 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL, NULL);
+ struct echo_TestRef r;
+
+ r.out.foo = NULL;
+
+ if (NDR_ERR_CODE_IS_SUCCESS(ndr_pull_echo_TestRef(ndr, NDR_OUT, &r)))
+ return 1;
+
+ /* Windows gives [client runtime error 0x6f4] */
+');
+
+
+test_samba4_ndr("refptr-top-out-pull",
+'
+ typedef struct {
+ uint16 x;
+ } xstruct;
+
+ [public] void echo_TestRef([out,ref] xstruct *foo);
+',
+'
+ uint8_t data[] = { 0x0D, 0x00 };
+ DATA_BLOB b = { data, 2 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL, NULL);
+ struct xstruct s;
+ struct echo_TestRef r;
+
+ r.out.foo = &s;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_echo_TestRef(ndr, NDR_OUT, &r)))
+ return 1;
+
+ if (!r.out.foo)
+ return 2;
+
+ if (r.out.foo->x != 13)
+ return 3;
+');
+
+test_samba4_ndr("refptr-top-out-pull-null",
+'
+ typedef struct {
+ uint16 x;
+ } xstruct;
+
+ [public] void echo_TestRef([out,ref] xstruct *foo);
+',
+'
+ uint8_t data[] = { 0x0D, 0x00 };
+ DATA_BLOB b = { data, 2 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL, NULL);
+ struct echo_TestRef r;
+
+ r.out.foo = NULL;
+
+ if (NDR_ERR_CODE_IS_SUCCESS(ndr_pull_echo_TestRef(ndr, NDR_OUT, &r)))
+ return 1;
+
+ /* Windows gives [client runtime error 0x6f4] */
+');
+
+
+test_samba4_ndr("ptr-top-push-double",
+'
+ [public] void echo_TestRef([in] uint16 **foo);
+',
+' struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ uint16_t v = 13;
+ uint16_t *pv = &v;
+ r.in.foo = &pv;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 6)
+ return 2;
+
+ if (ndr->data[0] == 0 && ndr->data[1] == 0 &&
+ ndr->data[2] == 0 && ndr->data[3] == 0)
+ return 3;
+
+ if (ndr->data[4] != 0x0D || ndr->data[5] != 0x00)
+ return 4;
+');
+
+SKIP: {
+ skip "ptr-top-push-double-sndnull is known to fail", 8;
+
+test_samba4_ndr("ptr-top-push-double-sndnull",
+'
+ [public] void echo_TestRef([in] uint16 **foo);
+',
+' struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ uint16_t *pv = NULL;
+ r.in.foo = &pv;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 4)
+ return 2;
+
+ if (ndr->data[0] != 0 || ndr->data[1] != 0 ||
+ ndr->data[2] != 0 || ndr->data[3] != 0)
+ return 3;
+');
+}
+
+test_samba4_ndr("ptr-top-push-double-fstnull",
+'
+ [public] void echo_TestRef([in] uint16 **foo);
+',
+' struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ r.in.foo = NULL;
+
+ if (NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ /* Windows gives [client runtime error 0x6f4] */
+
+');
+
+
+test_samba4_ndr("refptr-top-push-double",
+'
+ [public] void echo_TestRef([in,ref] uint16 **foo);
+',
+' struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ uint16_t v = 13;
+ uint16_t *pv = &v;
+ r.in.foo = &pv;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 6)
+ return 2;
+
+ if (ndr->data[0] == 0 && ndr->data[1] == 0 &&
+ ndr->data[2] == 0 && ndr->data[3] == 0)
+ return 3;
+
+ if (ndr->data[4] != 0x0D || ndr->data[5] != 0x00)
+ return 4;
+');
+
+SKIP: {
+
+ skip "refptr-top-push-double-sndnull is known to fail", 8;
+
+test_samba4_ndr("refptr-top-push-double-sndnull",
+'
+ [public] void echo_TestRef([in,ref] uint16 **foo);
+',
+' struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ uint16_t *pv = NULL;
+ r.in.foo = &pv;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 4)
+ return 2;
+
+ if (ndr->data[0] != 0 || ndr->data[1] != 0 ||
+ ndr->data[2] != 0 || ndr->data[3] != 0)
+ return 3;
+');
+}
+
+test_samba4_ndr("refptr-top-push-double-fstnull",
+'
+ [public] void echo_TestRef([in,ref] uint16 **foo);
+',
+' struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ r.in.foo = NULL;
+
+ if (NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ /* Windows gives [client runtime error 0x6f4] */
+
+');
+
+SKIP: {
+ skip "ignore-ptrs are not supported yet", 8;
+test_samba4_ndr("ignore-ptr",
+'
+ [public] void echo_TestRef([in,ignore] uint16 *foo, [in] uint16 *bar);
+',
+' struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct echo_TestRef r;
+ uint16_t v = 10;
+ r.in.foo = &v;
+ r.in.bar = &v;
+
+ if (NDR_ERR_CODE_IS_SUCCESS(ndr_push_echo_TestRef(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (ndr->offset != 4)
+ return 2;
+');
+}
diff --git a/tools/pidl/tests/ndr_represent.pl b/tools/pidl/tests/ndr_represent.pl
new file mode 100755
index 0000000..2d65fb9
--- /dev/null
+++ b/tools/pidl/tests/ndr_represent.pl
@@ -0,0 +1,71 @@
+#!/usr/bin/perl
+# NDR represent_as() / transmit_as() tests
+# (C) 2006 Jelmer Vernooij. Published under the GNU GPL
+use strict;
+
+use Test::More tests => 2 * 8;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util qw(test_samba4_ndr);
+
+test_samba4_ndr('represent_as-simple',
+'
+ void bla([in,represent_as(uint32)] uint8 x);
+',
+'
+ uint8_t expected[] = { 0x0D };
+ DATA_BLOB in_blob = { expected, 1 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&in_blob, NULL, NULL);
+ struct bla r;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_bla(ndr, NDR_SCALARS|NDR_BUFFERS, &r)))
+ return 1;
+
+ if (r.in.x != 13)
+ return 2;
+',
+'
+enum ndr_err_code ndr_uint8_to_uint32(uint8_t from, uint32_t *to)
+{
+ *to = from;
+ return NDR_ERR_SUCCESS;
+}
+
+enum ndr_err_code ndr_uint32_to_uint8(uint32_t from, uint8_t *to)
+{
+ *to = from;
+ return NDR_ERR_SUCCESS;
+}
+'
+);
+
+test_samba4_ndr('transmit_as-simple',
+'
+ void bla([in,transmit_as(uint32)] uint8 x);
+',
+'
+ uint8_t expected[] = { 0x0D };
+ DATA_BLOB in_blob = { expected, 1 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&in_blob, NULL, NULL);
+ struct bla r;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_bla(ndr, NDR_SCALARS|NDR_BUFFERS, &r)))
+ return 1;
+
+ if (r.in.x != 13)
+ return 2;
+',
+'
+enum ndr_err_code ndr_uint8_to_uint32(uint8_t from, uint32_t *to)
+{
+ *to = from;
+ return NDR_ERR_SUCCESS;
+}
+
+enum ndr_err_code ndr_uint32_to_uint8(uint32_t from, uint8_t *to)
+{
+ *to = from;
+ return NDR_ERR_SUCCESS;
+}
+'
+);
diff --git a/tools/pidl/tests/ndr_simple.pl b/tools/pidl/tests/ndr_simple.pl
new file mode 100755
index 0000000..15e07d5
--- /dev/null
+++ b/tools/pidl/tests/ndr_simple.pl
@@ -0,0 +1,28 @@
+#!/usr/bin/perl
+# Some simple tests for pidl
+# (C) 2005 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+
+use Test::More tests => 8;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util qw(test_samba4_ndr);
+
+test_samba4_ndr("simple", "void Test(); ",
+"
+ uint8_t data[] = { 0x02 };
+ uint8_t result;
+ DATA_BLOB b;
+ struct ndr_pull *ndr;
+
+ b.data = data;
+ b.length = 1;
+ ndr = ndr_pull_init_blob(&b, mem_ctx, NULL);
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_uint8(ndr, NDR_SCALARS, &result)))
+ return 1;
+
+ if (result != 0x02)
+ return 2;
+");
diff --git a/tools/pidl/tests/ndr_string.pl b/tools/pidl/tests/ndr_string.pl
new file mode 100755
index 0000000..8e8b8ec
--- /dev/null
+++ b/tools/pidl/tests/ndr_string.pl
@@ -0,0 +1,192 @@
+#!/usr/bin/perl
+# String tests for pidl
+# (C) 2005 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+
+use Test::More tests => 6 * 8;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util qw(test_samba4_ndr);
+
+test_samba4_ndr("string-pull-empty",
+' [public] void TestString([in,flag(STR_ASCII|LIBNDR_FLAG_STR_SIZE4)] string data);',
+'
+ uint8_t data[] = { 0x00, 0x00, 0x00, 0x00 };
+ DATA_BLOB b = { data, 4 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL);
+ struct TestString r;
+ r.in.data = NULL;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_TestString(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (r.in.data == NULL)
+ return 2;
+
+ if (r.in.data[0] != 0)
+ return 3;
+');
+
+test_samba4_ndr("string-ascii-pull",
+'
+ [public] void TestString([in,flag(STR_ASCII|LIBNDR_FLAG_STR_SIZE4)] string data);
+',
+'
+ uint8_t data[] = { 0x03, 0x00, 0x00, 0x00,
+ \'f\', \'o\', \'o\', 0 };
+ DATA_BLOB b = { data, 8 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL);
+ struct TestString r;
+ r.in.data = NULL;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_TestString(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (r.in.data == NULL)
+ return 2;
+
+ if (strncmp(r.in.data, "foo", 3) != 0)
+ return 3;
+
+ if (r.in.data[4] != 0)
+ return 4;
+');
+
+test_samba4_ndr("string-wchar-fixed-array-01",
+'
+ typedef struct {
+ uint32 l1;
+ [string,charset(UTF16)] uint16 str[6];
+ uint32 l2;
+ } TestStringStruct;
+
+ [public] void TestString([in,ref] TestStringStruct *str);
+',
+'
+ uint8_t data[] = { 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00,
+ \'f\', 0x00, \'o\', 0x00,
+ \'o\', 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00
+ };
+ DATA_BLOB b = { data, sizeof(data) };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL);
+ struct TestString r;
+ struct TestStringStruct str;
+ r.in.str = &str;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_TestString(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (r.in.str == NULL)
+ return 2;
+
+ if (r.in.str->l1 != 0x00000001)
+ return 3;
+
+ if (strncmp(str.str, "foo", 3) != 0)
+ return 4;
+
+ if (r.in.str->str[4] != 0)
+ return 5;
+
+ if (r.in.str->l2 != 0x00000002)
+ return 6;
+');
+
+test_samba4_ndr("string-wchar-fixed-array-02",
+'
+ typedef struct {
+ uint32 l1;
+ [string,charset(UTF16)] uint16 str[6];
+ uint32 l2;
+ } TestStringStruct;
+
+ [public] void TestString([in,ref] TestStringStruct *str);
+',
+'
+ uint8_t data[] = { 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00,
+ \'f\', 0x00, \'o\', 0x00,
+ \'o\', 0x00, \'b\', 0x00,
+ \'a\', 0x00, \'r\', 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00
+ };
+ DATA_BLOB b = { data, sizeof(data) };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL);
+ struct TestString r;
+ struct TestStringStruct str;
+ r.in.str = &str;
+
+ /* the string terminator is wrong */
+ if (NDR_ERR_CODE_IS_SUCCESS(ndr_pull_TestString(ndr, NDR_IN, &r)))
+ return 1;
+');
+
+test_samba4_ndr("string-wchar-fixed-array-03",
+'
+ typedef struct {
+ uint32 l1;
+ [string,charset(UTF16)] uint16 str[6];
+ uint32 l2;
+ } TestStringStruct;
+
+ [public] void TestString([in,ref] TestStringStruct *str);
+',
+'
+ uint8_t data[] = { 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00,
+ \'f\', 0x00, \'o\', 0x00,
+ \'o\', 0x00, \'b\', 0x00,
+ \'a\', 0x00, \'r\', 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00
+ };
+ DATA_BLOB b = { data, sizeof(data) };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL);
+ struct TestString r;
+ struct TestStringStruct str;
+ r.in.str = &str;
+
+ /* the length 0x07 is to large */
+ if (NDR_ERR_CODE_IS_SUCCESS(ndr_pull_TestString(ndr, NDR_IN, &r)))
+ return 1;
+');
+
+SKIP: {
+ skip "doesn't seem to work yet", 8;
+
+test_samba4_ndr("string-out",
+'
+ [public] void TestString([out,string,charset(UNIX)] uint8 **data);
+',
+'
+ uint8_t data[] = { 0x03, 0x00, 0x00, 0x00,
+ \'f\', \'o\', \'o\', 0 };
+ DATA_BLOB b = { data, 8 };
+ struct ndr_pull *ndr = ndr_pull_init_blob(&b, NULL);
+ struct TestString r;
+ char *str = NULL;
+ r.out.data = &str;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_pull_TestString(ndr, NDR_IN, &r)))
+ return 1;
+
+ if (r.out.data == NULL)
+ return 2;
+
+ if (*r.out.data == NULL)
+ return 3;
+
+ if (strncmp(r.out.data, "foo", 3) != 0)
+ return 4;
+
+ if (r.out.data[4] != 0)
+ return 5;
+');
+}
diff --git a/tools/pidl/tests/ndr_tagtype.pl b/tools/pidl/tests/ndr_tagtype.pl
new file mode 100755
index 0000000..3f9b717
--- /dev/null
+++ b/tools/pidl/tests/ndr_tagtype.pl
@@ -0,0 +1,66 @@
+#!/usr/bin/perl
+# Support for tagged types
+# (C) 2005 Jelmer Vernooij. Published under the GNU GPL
+use strict;
+
+use Test::More tests => 3 * 8;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util qw(test_samba4_ndr);
+
+test_samba4_ndr('struct-notypedef', '[public] struct bla { uint8 x; }; ',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct bla r;
+ uint8_t expected[] = { 0x0D };
+ DATA_BLOB expected_blob = { expected, 1 };
+ DATA_BLOB result_blob;
+ r.x = 13;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_STRUCT_bla(ndr, NDR_SCALARS|NDR_BUFFERS, &r)))
+ return 1;
+
+ result_blob = ndr_push_blob(ndr);
+
+ if (data_blob_cmp(&result_blob, &expected_blob) != 0)
+ return 2;
+');
+
+test_samba4_ndr('struct-notypedef-used', '[public] struct bla { uint8 x; };
+ [public] void myfn([in] struct bla r); ',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct myfn fn;
+ uint8_t expected[] = { 0x0D };
+ DATA_BLOB expected_blob = { expected, 1 };
+ DATA_BLOB result_blob;
+ fn.in.r.x = 13;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_myfn(ndr, NDR_IN, &fn)))
+ return 1;
+
+ result_blob = ndr_push_blob(ndr);
+
+ if (data_blob_cmp(&result_blob, &expected_blob) != 0)
+ return 2;
+');
+
+
+test_samba4_ndr('struct-notypedef-embedded', 'struct bla { uint8 x; };
+ [public] struct myst { struct bla r; }; ',
+'
+ struct ndr_push *ndr = ndr_push_init_ctx(NULL, NULL);
+ struct myst st;
+ uint8_t expected[] = { 0x0D };
+ DATA_BLOB expected_blob = { expected, 1 };
+ DATA_BLOB result_blob;
+ st.r.x = 13;
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_push_STRUCT_myst(ndr, NDR_IN, &st)))
+ return 1;
+
+ result_blob = ndr_push_blob(ndr);
+
+ if (data_blob_cmp(&result_blob, &expected_blob) != 0)
+ return 2;
+');
diff --git a/tools/pidl/tests/parse_idl.pl b/tools/pidl/tests/parse_idl.pl
new file mode 100755
index 0000000..14138a3
--- /dev/null
+++ b/tools/pidl/tests/parse_idl.pl
@@ -0,0 +1,243 @@
+#!/usr/bin/perl
+# Some simple tests for pidls parsing routines
+# (C) 2005 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+
+use Test::More tests => 65 * 2 + 7;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util qw(test_errors);
+use Parse::Pidl::IDL;
+use Parse::Pidl::NDR;
+
+sub testok($$)
+{
+ my ($name, $data) = @_;
+
+ test_errors("", sub {
+ my $pidl = Parse::Pidl::IDL::parse_string($data, "<$name>");
+ ok (defined($pidl), $name);
+ });
+}
+
+sub testfail($$$)
+{
+ my ($name, $data, $error) = @_;
+
+ test_errors($error, sub {
+ my $pidl = Parse::Pidl::IDL::parse_string($data, "<$name>");
+
+ ok ((not defined $pidl), $name);
+ });
+}
+
+testfail "unknowntag", "bla test {};",
+ "<unknowntag>:0: Syntax error near 'bla'\n";
+testok "test1", "interface test { void Test(); }; ";
+testok "voidtest", "interface test { int Testx(void); }; ";
+testfail "voidtest", "interface test { Test(); }; ",
+ "<voidtest>:0: Syntax error near '('\n";
+testok "argtest", "interface test { int Test(int a, long b, uint32 c); }; ";
+testok "array1", "interface test { int Test(int a[]); };";
+testok "array2", "interface test { int Test(int a[2]); };";
+testok "array3", "interface test { int Test(int a[b]); };";
+testfail "array4", "interface test { int Test(int[] a); };",
+ "<array4>:0: Syntax error near '['\n";
+testok "ptr1", "interface test { int Test(int *a); };";
+testok "ptr2", "interface test { int Test(int **a); };";
+testok "ptr3", "interface test { int Test(int ***a); };";
+testfail "empty1", "interface test { };", "<empty1>:0: Syntax error near '}'\n";
+testfail "empty2", "", "";
+testok "attr1", "[uuid(\"myuuid\"),attr] interface test { int Test(int ***a); };";
+testok "attr2", "interface test { [public] int Test(); };";
+testok "attr3", "[attr1] [attr2] interface test { [public] int Test(); };";
+testok "multfn", "interface test { int test1(); int test2(); };";
+testok "multif", "interface test { int test1(); }; interface test2 { int test2(); };";
+testok "tdstruct1", "interface test { typedef struct { } foo; };";
+testok "tdstruct2", "interface test { typedef struct { int a; } foo; };";
+testok "tdstruct3", "interface test { typedef struct { int a; int b; } foo; };";
+testfail "tdstruct4", "interface test { typedef struct { int a, int b; } foo; };",
+ "<tdstruct4>:0: Syntax error near ','\n";
+testok "struct1", "interface test { struct x { }; };";
+testok "struct2", "interface test { struct x { int a; }; };";
+testok "struct3", "interface test { struct x { int a; int b; }; };";
+testfail "struct4", "interface test { struct x { int a, int b; }; };",
+ "<struct4>:0: Syntax error near ','\n";
+testfail "struct5", "interface test { struct { int a; } x; };",
+ "<struct5>:0: Syntax error near 'x'\n";
+testok "tdunion1", "interface test { typedef union { } a; };";
+testok "tdunion2", "interface test { typedef union { int a; } a; };";
+testok "union1", "interface test { union a { }; };";
+testok "union2", "interface test { union x { int a; }; };";
+testfail "union3", "interface test { union { int a; } x; };",
+ "<union3>:0: Syntax error near 'x'\n";
+testok "typedef1", "interface test { typedef int a; };";
+testfail "typedef2", "interface test { typedef x; };",
+ "<typedef2>:0: Syntax error near ';'\n";
+testok "tdenum1", "interface test { typedef enum { A=1, B=2, C} a; };";
+testok "enum1", "interface test { enum a { A=1, B=2, C}; };";
+testfail "enum2", "interface test { enum { A=1, B=2, C} a; };",
+ "<enum2>:0: Syntax error near 'a'\n";
+testok "nested1", "interface test { struct x { struct { int a; } z; }; };";
+testok "nested2", "interface test { struct x { struct y { int a; } z; }; };";
+testok "bitmap1", "interface test { bitmap x { a=1 }; };";
+testok "unsigned", "interface test { struct x { unsigned short y; }; };";
+testok "struct-property", "interface test { [public] struct x { short y; }; };";
+testok "signed", "interface test { struct x { signed short y; }; };";
+testok "declarg", "interface test { void test(struct { int x; } a); };";
+testok "structarg", "interface test { void test(struct a b); };";
+testfail "structargmissing", "interface test { void test(struct a); };",
+ "<structargmissing>:0: Syntax error near ')'\n";
+testok "structqual", "interface test { struct x { struct y z; }; };";
+testok "unionqual", "interface test { struct x { union y z; }; };";
+testok "enumqual", "interface test { struct x { enum y z; }; };";
+testok "bitmapqual", "interface test { struct x { bitmap y z; }; };";
+testok "emptystructdecl", "interface test { struct x; };";
+testok "emptyenumdecl", "interface test { enum x; };";
+testok "emptytdstructdecl", "interface test { typedef struct x y; };";
+testok "import", "import \"foo.idl\";";
+testok "include", "include \"foo.h\";";
+testfail "import-noquotes", "import foo.idl;",
+ "<import-noquotes>:0: Syntax error near 'foo'\n";
+testfail "include-noquotes", "include foo.idl;",
+ "<include-noquotes>:0: Syntax error near 'foo'\n";
+testok "importlib", "importlib \"foo.idl\";";
+testfail "import-nosemicolon", "import \"foo.idl\"",
+ "<import-nosemicolon>:0: Syntax error near 'foo.idl'\n";
+testok "import-multiple", "import \"foo.idl\", \"bar.idl\";";
+testok "include-multiple", "include \"foo.idl\", \"bar.idl\";";
+testok "empty-struct", "interface test { struct foo { }; }";
+testok "typedef-double", "interface test { typedef struct foo { } foo; }";
+testok "cpp-quote", "cpp_quote(\"bla\")";
+
+my $x = Parse::Pidl::IDL::parse_string("interface foo { struct x {}; }", "<foo>");
+
+is_deeply($x, [ {
+ 'TYPE' => 'INTERFACE',
+ 'NAME' => 'foo',
+ 'DATA' => [ {
+ 'TYPE' => 'STRUCT',
+ 'NAME' => 'x',
+ 'ELEMENTS' => [],
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+ } ],
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+}]);
+
+$x = Parse::Pidl::IDL::parse_string("interface foo { struct x; }", "<foo>");
+is_deeply($x, [ {
+ 'TYPE' => 'INTERFACE',
+ 'NAME' => 'foo',
+ 'DATA' => [ {
+ 'TYPE' => 'STRUCT',
+ 'NAME' => 'x',
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+ } ],
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+}]);
+
+$x = Parse::Pidl::IDL::parse_string("cpp_quote(\"foobar\")", "<quote>");
+is_deeply($x, [ {
+ 'TYPE' => 'CPP_QUOTE',
+ 'DATA' => '"foobar"',
+ 'FILE' => '<quote>',
+ 'LINE' => 0
+}]);
+
+# A typedef of a struct without body
+$x = Parse::Pidl::IDL::parse_string("interface foo { typedef struct x y; }", "<foo>");
+
+is_deeply($x, [ {
+ 'TYPE' => 'INTERFACE',
+ 'NAME' => 'foo',
+ 'DATA' => [ {
+ 'TYPE' => 'TYPEDEF',
+ 'NAME' => 'y',
+ 'POINTERS' => 0,
+ 'DATA' => {
+ 'TYPE' => 'STRUCT',
+ 'NAME' => 'x',
+ 'FILE' => '<foo>',
+ 'LINE' => 0,
+ },
+ 'FILE' => '<foo>',
+ 'LINE' => 0,
+ } ],
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+}]);
+
+# A typedef of a struct with empty body
+$x = Parse::Pidl::IDL::parse_string("interface foo { typedef struct {} y; }", "<foo>");
+
+is_deeply($x, [ {
+ 'TYPE' => 'INTERFACE',
+ 'NAME' => 'foo',
+ 'DATA' => [ {
+ 'TYPE' => 'TYPEDEF',
+ 'NAME' => 'y',
+ 'POINTERS' => 0,
+ 'DATA' => {
+ 'TYPE' => 'STRUCT',
+ 'ELEMENTS' => [],
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+ },
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+ } ],
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+}]);
+
+# A typedef of a bitmap with no body
+$x = Parse::Pidl::IDL::parse_string("interface foo { typedef bitmap x y; }", "<foo>");
+
+is_deeply($x, [ {
+ 'TYPE' => 'INTERFACE',
+ 'NAME' => 'foo',
+ 'DATA' => [ {
+ 'TYPE' => 'TYPEDEF',
+ 'NAME' => 'y',
+ 'POINTERS' => 0,
+ 'DATA' => {
+ 'TYPE' => 'BITMAP',
+ 'NAME' => 'x',
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+ },
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+ } ],
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+}]);
+
+
+# A typedef of a union with no body
+$x = Parse::Pidl::IDL::parse_string("interface foo { typedef union x y; }", "<foo>");
+
+is_deeply($x, [ {
+ 'TYPE' => 'INTERFACE',
+ 'NAME' => 'foo',
+ 'DATA' => [ {
+ 'TYPE' => 'TYPEDEF',
+ 'NAME' => 'y',
+ 'POINTERS' => 0,
+ 'DATA' => {
+ 'TYPE' => 'UNION',
+ 'NAME' => 'x',
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+ },
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+ } ],
+ 'FILE' => '<foo>',
+ 'LINE' => 0
+}]);
diff --git a/tools/pidl/tests/samba-ndr.pl b/tools/pidl/tests/samba-ndr.pl
new file mode 100755
index 0000000..7c53cbc
--- /dev/null
+++ b/tools/pidl/tests/samba-ndr.pl
@@ -0,0 +1,300 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+use warnings;
+
+use Test::More tests => 31;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use strict;
+use Parse::Pidl::Util qw(MyDumper);
+use Parse::Pidl::Samba4::NDR::Parser qw(check_null_pointer
+ NeededFunction NeededElement NeededType
+ NeededInterface TypeFunctionName ParseElementPrint);
+
+my $output;
+sub print_fn($) { my $x = shift; $output.=$x; }
+
+# Test case 1: Simple unique pointer dereference
+
+$output = "";
+my $fn = check_null_pointer({
+ PARENT => {
+ ELEMENTS => [
+ {
+ NAME => "bla",
+ LEVELS => [
+ { TYPE => "POINTER",
+ POINTER_INDEX => 0,
+ POINTER_TYPE => "unique" },
+ { TYPE => "DATA" }
+ ],
+ },
+ ]
+ }
+}, { bla => "r->in.bla" }, \&print_fn, "return;");
+
+
+test_warnings("", sub { $fn->("r->in.bla"); });
+
+is($output, "if (r->in.bla == NULL) return;");
+
+# Test case 2: Simple ref pointer dereference
+
+$output = "";
+$fn = check_null_pointer({
+ PARENT => {
+ ELEMENTS => [
+ {
+ NAME => "bla",
+ LEVELS => [
+ { TYPE => "POINTER",
+ POINTER_INDEX => 0,
+ POINTER_TYPE => "ref" },
+ { TYPE => "DATA" }
+ ],
+ },
+ ]
+ }
+}, { bla => "r->in.bla" }, \&print_fn, undef);
+
+test_warnings("", sub { $fn->("r->in.bla"); });
+
+is($output, "");
+
+# Test case 3: Illegal dereference
+
+$output = "";
+$fn = check_null_pointer({
+ FILE => "nofile",
+ LINE => 1,
+ PARENT => {
+ ELEMENTS => [
+ {
+ NAME => "bla",
+ LEVELS => [
+ { TYPE => "DATA" }
+ ],
+ },
+ ]
+ }
+}, { bla => "r->in.bla" }, \&print_fn, undef);
+
+test_warnings("nofile:1: too much dereferences for `bla'\n",
+ sub { $fn->("r->in.bla"); });
+
+is($output, "");
+
+# Test case 4: Double pointer dereference
+
+$output = "";
+$fn = check_null_pointer({
+ PARENT => {
+ ELEMENTS => [
+ {
+ NAME => "bla",
+ LEVELS => [
+ { TYPE => "POINTER",
+ POINTER_INDEX => 0,
+ POINTER_TYPE => "unique" },
+ { TYPE => "POINTER",
+ POINTER_INDEX => 1,
+ POINTER_TYPE => "unique" },
+ { TYPE => "DATA" }
+ ],
+ },
+ ]
+ }
+}, { bla => "r->in.bla" }, \&print_fn, "return;");
+
+test_warnings("",
+ sub { $fn->("*r->in.bla"); });
+
+is($output, "if (*r->in.bla == NULL) return;");
+
+# Test case 5: Unknown variable
+
+$output = "";
+$fn = check_null_pointer({
+ FILE => "nofile",
+ LINE => 2,
+ PARENT => {
+ ELEMENTS => [
+ {
+ NAME => "bla",
+ LEVELS => [
+ { TYPE => "DATA" }
+ ],
+ },
+ ]
+ }
+}, { }, \&print_fn, "return;");
+
+test_warnings("nofile:2: unknown dereferenced expression `r->in.bla'\n",
+ sub { $fn->("r->in.bla"); });
+
+is($output, "if (r->in.bla == NULL) return;");
+
+my $needed = {};
+NeededElement({ TYPE => "foo", REPRESENTATION_TYPE => "foo" }, "pull", $needed);
+is_deeply($needed, { ndr_pull_foo => 1 });
+
+# old settings should be kept
+$needed = { ndr_pull_foo => 0 };
+NeededElement({ TYPE => "foo", REPRESENTATION_TYPE => "foo" }, "pull", $needed);
+is_deeply($needed, { ndr_pull_foo => 0 });
+
+# print/pull/push are independent of each other
+$needed = { ndr_pull_foo => 0 };
+NeededElement({ TYPE => "foo", REPRESENTATION_TYPE => "foo" }, "print", $needed);
+is_deeply($needed, { ndr_pull_foo => 0, ndr_print_foo => 1 });
+
+$needed = { };
+NeededFunction({ NAME => "foo", ELEMENTS => [ { TYPE => "bar", REPRESENTATION_TYPE => "bar" } ] }, $needed);
+is_deeply($needed, { ndr_pull_foo => 1, ndr_print_foo => 1, ndr_push_foo => 1,
+ ndr_pull_bar => 1, ndr_print_bar => 1, ndr_push_bar => 1});
+
+# push/pull/print are always set for functions
+$needed = { ndr_pull_foo => 0 };
+NeededFunction({ NAME => "foo", ELEMENTS => [ { TYPE => "bar", REPRESENTATION_TYPE => "bar" } ] }, $needed);
+is_deeply($needed, { ndr_pull_foo => 1, ndr_print_foo => 1, ndr_push_foo => 1,
+ ndr_pull_bar => 1, ndr_push_bar => 1, ndr_print_bar => 1});
+
+# public structs are always needed
+$needed = {};
+NeededType({ NAME => "bla", TYPE => "TYPEDEF",
+ DATA => { TYPE => "STRUCT", ELEMENTS => [] } },
+ $needed, "pull");
+is_deeply($needed, { });
+
+$needed = {};
+NeededInterface({ TYPES => [ { PROPERTIES => { public => 1 }, NAME => "bla",
+ TYPE => "TYPEDEF",
+ DATA => { TYPE => "STRUCT", ELEMENTS => [] } } ] },
+ $needed);
+is_deeply($needed, { ndr_pull_bla => 1, ndr_push_bla => 1, ndr_print_bla => 1 });
+
+# make sure types for elements are set too
+$needed = {};
+NeededInterface({ TYPES => [ { PROPERTIES => { public => 1 }, NAME => "bla",
+ TYPE => "TYPEDEF",
+ DATA => { TYPE => "STRUCT",
+ ELEMENTS => [ { TYPE => "bar", REPRESENTATION_TYPE => "bar" } ] } } ] },
+ $needed);
+is_deeply($needed, { ndr_pull_bla => 1, ndr_pull_bar => 1, ndr_push_bla => 1, ndr_push_bar => 1,
+ ndr_print_bla => 1, ndr_print_bar => 1});
+
+$needed = {};
+NeededInterface({ TYPES => [ { PROPERTIES => { gensize => 1}, NAME => "bla",
+ TYPE => "TYPEDEF",
+ DATA => { TYPE => "STRUCT",
+ ELEMENTS => [ { TYPE => "bar", REPRESENTATION_TYPE => "bar" } ] } } ] },
+ $needed);
+is_deeply($needed, { ndr_size_bla => 1 });
+
+# make sure types for elements are set too
+$needed = { ndr_pull_bla => 1 };
+NeededType({ NAME => "bla",
+ TYPE => "TYPEDEF",
+ DATA => { TYPE => "STRUCT",
+ ELEMENTS => [ { TYPE => "bar", REPRESENTATION_TYPE => "bar" } ] } },
+ $needed, "pull");
+is_deeply($needed, { ndr_pull_bla => 1, ndr_pull_bar => 1 });
+
+$needed = {};
+NeededInterface({ TYPES => [ { PROPERTIES => { public => 1},
+ NAME => "bla",
+ TYPE => "TYPEDEF",
+ DATA => { TYPE => "STRUCT",
+ ELEMENTS => [ { TYPE => "bar", REPRESENTATION_TYPE => "rep" } ] } } ] }, $needed);
+is_deeply($needed, { ndr_pull_bla => 1, ndr_push_bla => 1, ndr_print_bla => 1,
+ ndr_print_rep => 1,
+ ndr_pull_bar => 1, ndr_push_bar => 1,
+ ndr_bar_to_rep => 1, ndr_rep_to_bar => 1});
+
+my $generator = new Parse::Pidl::Samba4::NDR::Parser();
+$generator->ParseStructPush({
+ NAME => "mystruct",
+ TYPE => "STRUCT",
+ PROPERTIES => {},
+ ALIGN => 4,
+ ELEMENTS => [ ]}, "ndr", "x");
+is($generator->{res}, "NDR_PUSH_CHECK_FLAGS(ndr, ndr_flags);
+if (ndr_flags & NDR_SCALARS) {
+ NDR_CHECK(ndr_push_align(ndr, 4));
+ NDR_CHECK(ndr_push_trailer_align(ndr, 4));
+}
+if (ndr_flags & NDR_BUFFERS) {
+}
+");
+
+$generator = new Parse::Pidl::Samba4::NDR::Parser();
+my $e = {
+ NAME => "el1",
+ TYPE => "mytype",
+ REPRESENTATION_TYPE => "mytype",
+ PROPERTIES => {},
+ LEVELS => [
+ { LEVEL_INDEX => 0, TYPE => "DATA", DATA_TYPE => "mytype" }
+] };
+$generator->ParseStructPush({
+ NAME => "mystruct",
+ TYPE => "STRUCT",
+ PROPERTIES => {},
+ ALIGN => 4,
+ SURROUNDING_ELEMENT => $e,
+ ELEMENTS => [ $e ]}, "ndr", "x");
+is($generator->{res}, "NDR_PUSH_CHECK_FLAGS(ndr, ndr_flags);
+if (ndr_flags & NDR_SCALARS) {
+ NDR_CHECK(ndr_push_uint3264(ndr, NDR_SCALARS, ndr_string_array_size(ndr, x->el1)));
+ NDR_CHECK(ndr_push_align(ndr, 4));
+ NDR_CHECK(ndr_push_mytype(ndr, NDR_SCALARS, &x->el1));
+ NDR_CHECK(ndr_push_trailer_align(ndr, 4));
+}
+if (ndr_flags & NDR_BUFFERS) {
+}
+");
+
+is(TypeFunctionName("ndr_pull", "uint32"), "ndr_pull_uint32");
+is(TypeFunctionName("ndr_pull", {TYPE => "ENUM", NAME => "bar"}), "ndr_pull_ENUM_bar");
+is(TypeFunctionName("ndr_pull", {TYPE => "TYPEDEF", NAME => "bar", DATA => undef}), "ndr_pull_bar");
+is(TypeFunctionName("ndr_push", {TYPE => "STRUCT", NAME => "bar"}), "ndr_push_STRUCT_bar");
+
+# check noprint works
+$generator = new Parse::Pidl::Samba4::NDR::Parser();
+$generator->ParseElementPrint({ NAME => "x", TYPE => "rt", REPRESENTATION_TYPE => "rt",
+ PROPERTIES => { noprint => 1},
+ LEVELS => [ { TYPE => "DATA", DATA_TYPE => "rt"} ]},
+ "ndr", "var", { "x" => "r->foobar" } );
+is($generator->{res}, "");
+
+$generator = new Parse::Pidl::Samba4::NDR::Parser();
+$generator->ParseElementPrint({ NAME => "x", TYPE => "rt", REPRESENTATION_TYPE => "rt",
+ PROPERTIES => {},
+ LEVELS => [ { TYPE => "DATA", DATA_TYPE => "rt" }]},
+ "ndr", "var", { "x" => "r->foobar" } );
+is($generator->{res}, "ndr_print_rt(ndr, \"x\", &var);\n");
+
+# make sure that a print function for an element with value() set works
+$generator = new Parse::Pidl::Samba4::NDR::Parser();
+$generator->ParseElementPrint({ NAME => "x", TYPE => "uint32", REPRESENTATION_TYPE => "uint32",
+ PROPERTIES => { value => "23" },
+ LEVELS => [ { TYPE => "DATA", DATA_TYPE => "uint32"} ]},
+ "ndr", "var", { "x" => "r->foobar" } );
+is($generator->{res}, "ndr_print_uint32(ndr, \"x\", (ndr->flags & LIBNDR_PRINT_SET_VALUES)?23:var);\n");
+
+$generator = new Parse::Pidl::Samba4::NDR::Parser();
+$generator->AuthServiceStruct("bridge", "\"rot13\",\"onetimepad\"");
+is($generator->{res}, "static const char * const bridge_authservice_strings[] = {
+ \"rot13\",
+ \"onetimepad\",
+};
+
+static const struct ndr_interface_string_array bridge_authservices = {
+ .count = 2,
+ .names = bridge_authservice_strings
+};
+
+");
diff --git a/tools/pidl/tests/samba3-cli.pl b/tools/pidl/tests/samba3-cli.pl
new file mode 100755
index 0000000..c758ef4
--- /dev/null
+++ b/tools/pidl/tests/samba3-cli.pl
@@ -0,0 +1,236 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+use warnings;
+
+use Test::More tests => 8;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl::Util qw(MyDumper);
+use Parse::Pidl::Samba3::ClientNDR qw(ParseFunction);
+use Parse::Pidl::Samba4::Header qw(GenerateFunctionInEnv GenerateFunctionOutEnv);
+
+# Make sure GenerateFunctionInEnv and GenerateFunctionOutEnv work
+my $fn = { ELEMENTS => [ { DIRECTION => ["in"], NAME => "foo" } ] };
+is_deeply({ "foo" => "r.in.foo" }, GenerateFunctionInEnv($fn, "r."));
+is_deeply({ "foo" => "r.in.foo" }, GenerateFunctionOutEnv($fn, "r."));
+
+$fn = { ELEMENTS => [ { DIRECTION => ["out", "in"], NAME => "foo" } ] };
+is_deeply({ "foo" => "r.in.foo" }, GenerateFunctionInEnv($fn, "r."));
+is_deeply({ "foo" => "r.out.foo" }, GenerateFunctionOutEnv($fn, "r."));
+
+$fn = { ELEMENTS => [ { DIRECTION => ["out"], NAME => "foo" } ] };
+is_deeply({ }, GenerateFunctionInEnv($fn, "r."));
+is_deeply({ "foo" => "r.out.foo" }, GenerateFunctionOutEnv($fn, "r."));
+
+my $x = new Parse::Pidl::Samba3::ClientNDR();
+
+$fn = { NAME => "bar", ELEMENTS => [ ] };
+$x->ParseFunction("foo", $fn);
+is($x->{res},
+"struct rpccli_bar_state {
+ TALLOC_CTX *out_mem_ctx;
+};
+
+static void rpccli_bar_done(struct tevent_req *subreq);
+
+struct tevent_req *rpccli_bar_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct rpc_pipe_client *cli)
+{
+ struct tevent_req *req;
+ struct rpccli_bar_state *state;
+ struct tevent_req *subreq;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct rpccli_bar_state);
+ if (req == NULL) {
+ return NULL;
+ }
+ state->out_mem_ctx = NULL;
+
+ subreq = dcerpc_bar_send(state,
+ ev,
+ cli->binding_handle);
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, rpccli_bar_done, req);
+ return req;
+}
+
+static void rpccli_bar_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct rpccli_bar_state *state = tevent_req_data(
+ req, struct rpccli_bar_state);
+ NTSTATUS status;
+ TALLOC_CTX *mem_ctx;
+
+ if (state->out_mem_ctx) {
+ mem_ctx = state->out_mem_ctx;
+ } else {
+ mem_ctx = state;
+ }
+
+ status = dcerpc_bar_recv(subreq,
+ mem_ctx);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ tevent_req_done(req);
+}
+
+NTSTATUS rpccli_bar_recv(struct tevent_req *req,
+ TALLOC_CTX *mem_ctx)
+{
+ struct rpccli_bar_state *state = tevent_req_data(
+ req, struct rpccli_bar_state);
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ tevent_req_received(req);
+ return status;
+ }
+
+ /* Steal possible out parameters to the callers context */
+ talloc_steal(mem_ctx, state->out_mem_ctx);
+
+ tevent_req_received(req);
+ return NT_STATUS_OK;
+}
+
+NTSTATUS rpccli_bar(struct rpc_pipe_client *cli,
+ TALLOC_CTX *mem_ctx)
+{
+ NTSTATUS status;
+
+ status = dcerpc_bar(cli->binding_handle,
+ mem_ctx);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+
+ /* Return result */
+ return NT_STATUS_OK;
+}
+
+");
+
+$x = new Parse::Pidl::Samba3::ClientNDR();
+
+$fn = { NAME => "bar", ELEMENTS => [ ], RETURN_TYPE => "WERROR" };
+$x->ParseFunction("foo", $fn);
+is($x->{res},
+"struct rpccli_bar_state {
+ TALLOC_CTX *out_mem_ctx;
+ WERROR result;
+};
+
+static void rpccli_bar_done(struct tevent_req *subreq);
+
+struct tevent_req *rpccli_bar_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct rpc_pipe_client *cli)
+{
+ struct tevent_req *req;
+ struct rpccli_bar_state *state;
+ struct tevent_req *subreq;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct rpccli_bar_state);
+ if (req == NULL) {
+ return NULL;
+ }
+ state->out_mem_ctx = NULL;
+
+ subreq = dcerpc_bar_send(state,
+ ev,
+ cli->binding_handle);
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, rpccli_bar_done, req);
+ return req;
+}
+
+static void rpccli_bar_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct rpccli_bar_state *state = tevent_req_data(
+ req, struct rpccli_bar_state);
+ NTSTATUS status;
+ TALLOC_CTX *mem_ctx;
+
+ if (state->out_mem_ctx) {
+ mem_ctx = state->out_mem_ctx;
+ } else {
+ mem_ctx = state;
+ }
+
+ status = dcerpc_bar_recv(subreq,
+ mem_ctx,
+ &state->result);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ tevent_req_done(req);
+}
+
+NTSTATUS rpccli_bar_recv(struct tevent_req *req,
+ TALLOC_CTX *mem_ctx,
+ WERROR *result)
+{
+ struct rpccli_bar_state *state = tevent_req_data(
+ req, struct rpccli_bar_state);
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ tevent_req_received(req);
+ return status;
+ }
+
+ /* Steal possible out parameters to the callers context */
+ talloc_steal(mem_ctx, state->out_mem_ctx);
+
+ /* Return result */
+ *result = state->result;
+
+ tevent_req_received(req);
+ return NT_STATUS_OK;
+}
+
+NTSTATUS rpccli_bar(struct rpc_pipe_client *cli,
+ TALLOC_CTX *mem_ctx,
+ WERROR *werror)
+{
+ WERROR result;
+ NTSTATUS status;
+
+ status = dcerpc_bar(cli->binding_handle,
+ mem_ctx,
+ &result);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+
+ /* Return result */
+ if (werror) {
+ *werror = result;
+ }
+
+ return werror_to_ntstatus(result);
+}
+
+");
+
diff --git a/tools/pidl/tests/samba3-srv.pl b/tools/pidl/tests/samba3-srv.pl
new file mode 100755
index 0000000..d1e2bc9
--- /dev/null
+++ b/tools/pidl/tests/samba3-srv.pl
@@ -0,0 +1,18 @@
+#!/usr/bin/perl
+# (C) 2008 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+use warnings;
+
+use Test::More tests => 1;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl::Util qw(MyDumper has_property);
+use Parse::Pidl::Samba3::ServerNDR qw(DeclLevel);
+
+my $l = { TYPE => "DATA", DATA_TYPE => "uint32" };
+my $e = { FILE => "foo", LINE => 0, PROPERTIES => { }, TYPE => "uint32",
+ LEVELS => [ $l ] };
+
+is("uint32_t", DeclLevel($e, 0));
diff --git a/tools/pidl/tests/tdr.pl b/tools/pidl/tests/tdr.pl
new file mode 100755
index 0000000..d6cd7a0
--- /dev/null
+++ b/tools/pidl/tests/tdr.pl
@@ -0,0 +1,49 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+use warnings;
+
+use Test::More tests => 6;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl::Samba4::TDR qw(ParserType);
+
+my $tdr = new Parse::Pidl::Samba4::TDR();
+
+$tdr->ParserType({TYPE => "STRUCT", NAME => "foo", PROPERTIES => {public => 1}}, "pull");
+is($tdr->{ret}, "NTSTATUS tdr_pull_foo (struct tdr_pull *tdr, TALLOC_CTX *mem_ctx, struct foo *v)
+{
+ return NT_STATUS_OK;
+}
+
+");
+is($tdr->{ret_hdr}, "NTSTATUS tdr_pull_foo (struct tdr_pull *tdr, TALLOC_CTX *mem_ctx, struct foo *v);\n");
+
+
+$tdr = new Parse::Pidl::Samba4::TDR();
+$tdr->ParserType({TYPE => "UNION", NAME => "bar", PROPERTIES => {public => 1}}, "pull");
+is($tdr->{ret}, "NTSTATUS tdr_pull_bar(struct tdr_pull *tdr, TALLOC_CTX *mem_ctx, int level, union bar *v)
+{
+ switch (level) {
+ }
+ return NT_STATUS_OK;
+
+}
+
+");
+is($tdr->{ret_hdr}, "NTSTATUS tdr_pull_bar(struct tdr_pull *tdr, TALLOC_CTX *mem_ctx, int level, union bar *v);\n");
+
+$tdr = new Parse::Pidl::Samba4::TDR();
+$tdr->ParserType({TYPE => "UNION", NAME => "bar", PROPERTIES => {}}, "pull");
+is($tdr->{ret}, "static NTSTATUS tdr_pull_bar(struct tdr_pull *tdr, TALLOC_CTX *mem_ctx, int level, union bar *v)
+{
+ switch (level) {
+ }
+ return NT_STATUS_OK;
+
+}
+
+");
+is($tdr->{ret_hdr}, "");
diff --git a/tools/pidl/tests/test_util.pl b/tools/pidl/tests/test_util.pl
new file mode 100755
index 0000000..2d59f62
--- /dev/null
+++ b/tools/pidl/tests/test_util.pl
@@ -0,0 +1,21 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+
+use Test::More tests => 6;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util qw(test_warnings test_errors);
+use Parse::Pidl qw(warning error);
+
+test_warnings("", sub {});
+
+test_warnings("x:1: msg\n", sub { warning({FILE => "x", LINE => 1}, "msg"); });
+test_warnings("", sub {});
+
+test_errors("", sub {});
+
+test_errors("x:1: msg\n", sub { error({FILE => "x", LINE => 1}, "msg"); });
+test_errors("", sub {});
+
diff --git a/tools/pidl/tests/typelist.pl b/tools/pidl/tests/typelist.pl
new file mode 100755
index 0000000..681c0ea
--- /dev/null
+++ b/tools/pidl/tests/typelist.pl
@@ -0,0 +1,93 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+use warnings;
+
+use Test::More tests => 56;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl::Typelist qw(hasType typeHasBody getType mapTypeName expandAlias
+ mapScalarType addType typeIs is_scalar scalar_is_reference
+ enum_type_fn bitmap_type_fn mapType);
+
+is("foo", expandAlias("foo"));
+is("uint32", expandAlias("DWORD"));
+is("int32", expandAlias("int"));
+is("", expandAlias(""));
+is("int32", expandAlias("int32"));
+
+is("uint32_t", mapScalarType("uint32"));
+is("void", mapScalarType("void"));
+is("uint64_t", mapScalarType("hyper"));
+is("double", mapScalarType("double"));
+
+my $x = { TYPE => "ENUM", NAME => "foo", EXTRADATA => 1 };
+addType($x);
+is_deeply($x, getType("foo"));
+is(undef, getType("bloebla"));
+is_deeply(getType({ TYPE => "STRUCT" }), { TYPE => "STRUCT" });
+is_deeply(getType({ TYPE => "ENUM", NAME => "foo" }), $x);
+is_deeply(getType("uint16"), {
+ NAME => "uint16",
+ BASEFILE => "<builtin>",
+ TYPE => "TYPEDEF",
+ DATA => { NAME => "uint16", TYPE => "SCALAR" }});
+
+is_deeply(getType("double"), {
+ NAME => "double",
+ BASEFILE => "<builtin>",
+ TYPE => "TYPEDEF",
+ DATA => { NAME => "double", TYPE => "SCALAR" }});
+
+is(0, typeIs("someUnknownType", "ENUM"));
+is(0, typeIs("foo", "ENUM"));
+addType({NAME => "mytypedef", TYPE => "TYPEDEF", DATA => { TYPE => "ENUM" }});
+is(1, typeIs("mytypedef", "ENUM"));
+is(0, typeIs("mytypedef", "BITMAP"));
+is(1, typeIs({ TYPE => "ENUM"}, "ENUM"));
+is(0, typeIs({ TYPE => "BITMAP"}, "ENUM"));
+is(1, typeIs("uint32", "SCALAR"));
+is(0, typeIs("uint32", "ENUM"));
+
+is(1, hasType("foo"));
+is(0, hasType("nonexistent"));
+is(0, hasType({TYPE => "ENUM", NAME => "someUnknownType"}));
+is(1, hasType({TYPE => "ENUM", NAME => "foo"}));
+is(1, hasType({TYPE => "ENUM"}));
+is(1, hasType({TYPE => "STRUCT"}));
+
+is(1, is_scalar("uint32"));
+is(0, is_scalar("nonexistent"));
+is(1, is_scalar({TYPE => "ENUM"}));
+is(0, is_scalar({TYPE => "STRUCT"}));
+is(1, is_scalar({TYPE => "TYPEDEF", DATA => {TYPE => "ENUM" }}));
+is(1, is_scalar("mytypedef"));
+
+is(1, scalar_is_reference("string"));
+is(0, scalar_is_reference("uint32"));
+is(0, scalar_is_reference({TYPE => "STRUCT", NAME => "echo_foobar"}));
+
+is("uint8", enum_type_fn({TYPE => "ENUM", PARENT=>{PROPERTIES => {enum8bit => 1}}}));
+is("uint32", enum_type_fn({TYPE => "ENUM", PARENT=>{PROPERTIES => {v1_enum => 1}}}));
+is("uint1632", enum_type_fn({TYPE => "ENUM", PARENT=>{PROPERTIES => {}}}));
+
+is("uint8", bitmap_type_fn({TYPE => "BITMAP", PROPERTIES => {bitmap8bit => 1}}));
+is("uint16", bitmap_type_fn({TYPE => "BITMAP", PROPERTIES => {bitmap16bit => 1}}));
+is("hyper", bitmap_type_fn({TYPE => "BITMAP", PROPERTIES => {bitmap64bit => 1}}));
+is("uint32", bitmap_type_fn({TYPE => "BITMAP", PROPERTIES => {}}));
+
+is("enum foo", mapType({TYPE => "ENUM"}, "foo"));
+is("union foo", mapType({TYPE => "UNION"}, "foo"));
+is("struct foo", mapType({TYPE => "STRUCT"}, "foo"));
+is("uint8_t", mapType({TYPE => "BITMAP", PROPERTIES => {bitmap8bit => 1}}, "foo"));
+is("uint8_t", mapType({TYPE => "SCALAR"}, "uint8"));
+is("uint32_t", mapType({TYPE => "TYPEDEF", DATA => {TYPE => "SCALAR"}}, "uint32"));
+
+is("void", mapTypeName(undef));
+is("uint32_t", mapTypeName("uint32"));
+is("int32_t", mapTypeName("int"));
+
+ok(not typeHasBody({TYPE => "TYPEDEF", DATA => { TYPE => "STRUCT" }}));
+ok(typeHasBody({TYPE => "TYPEDEF", DATA => { TYPE => "STRUCT", ELEMENTS => [] }}));
diff --git a/tools/pidl/tests/util.pl b/tools/pidl/tests/util.pl
new file mode 100755
index 0000000..cb77f34
--- /dev/null
+++ b/tools/pidl/tests/util.pl
@@ -0,0 +1,115 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+use strict;
+use warnings;
+
+use Test::More tests => 72;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl qw(error);
+use Parse::Pidl::Util;
+
+# has_property()
+is(undef, has_property({}, "foo"));
+is(undef, has_property({PROPERTIES => {}}, "foo"));
+is("data", has_property({PROPERTIES => {foo => "data"}}, "foo"));
+is(undef, has_property({PROPERTIES => {foo => undef}}, "foo"));
+
+# is_constant()
+ok(is_constant("2"));
+ok(is_constant("256"));
+ok(is_constant("0x400"));
+ok(is_constant("0x4BC"));
+ok(not is_constant("0x4BGC"));
+ok(not is_constant("str"));
+ok(not is_constant("2 * expr"));
+
+# make_str()
+is("\"bla\"", make_str("bla"));
+is("\"bla\"", make_str("\"bla\""));
+is("\"\"bla\"\"", make_str("\"\"bla\"\""));
+is("\"bla\"\"", make_str("bla\""));
+is("\"foo\"bar\"", make_str("foo\"bar"));
+
+is("bla", unmake_str("\"bla\""));
+is("\"bla\"", unmake_str("\"\"bla\"\""));
+
+# print_uuid()
+is(undef, print_uuid("invalid"));
+is("{0x12345778,0x1234,0xabcd,{0xef,0x00},{0x01,0x23,0x45,0x67,0x89,0xac}}",
+ print_uuid("12345778-1234-abcd-ef00-0123456789ac"));
+is("{0x12345778,0x1234,0xabcd,{0xef,0x00},{0x01,0x23,0x45,0x67,0x89,0xac}}",
+ print_uuid("\"12345778-1234-abcd-ef00-0123456789ac\""));
+
+# property_matches()
+# missing property
+ok(not property_matches({PROPERTIES => {}}, "x", "data"));
+# data not matching
+ok(not property_matches({PROPERTIES => {x => "bar"}}, "x", "data"));
+# data matching exactly
+ok(property_matches({PROPERTIES => {x => "data"}}, "x", "data"));
+# regex matching
+ok(property_matches({PROPERTIES => {x => "data"}}, "x", "^([dat]+)\$"));
+
+# ParseExpr()
+is(undef, ParseExpr("", {}, undef));
+is("a", ParseExpr("a", {"b" => "2"}, undef));
+is("2", ParseExpr("a", {"a" => "2"}, undef));
+is("2 * 2", ParseExpr("a*a", {"a" => "2"}, undef));
+is("r->length + r->length",
+ ParseExpr("length+length", {"length" => "r->length"}, undef));
+is("2 / 2 * (r->length)",
+ ParseExpr("constant/constant*(len)", {"constant" => "2",
+ "len" => "r->length"}, undef));
+is("2 + 2 - r->length",
+ ParseExpr("constant+constant-len", {"constant" => "2",
+ "len" => "r->length"}, undef));
+is("*r->length", ParseExpr("*len", { "len" => "r->length"}, undef));
+is("**r->length", ParseExpr("**len", { "len" => "r->length"}, undef));
+is("r->length & 2", ParseExpr("len&2", { "len" => "r->length"}, undef));
+is("&r->length", ParseExpr("&len", { "len" => "r->length"}, undef));
+is("calc()", ParseExpr("calc()", { "foo" => "2"}, undef));
+is("calc(2 * 2)", ParseExpr("calc(foo * 2)", { "foo" => "2"}, undef));
+is("strlen(\"data\")", ParseExpr("strlen(foo)", { "foo" => "\"data\""}, undef));
+is("strlen(\"data\", 4)", ParseExpr("strlen(foo, 4)", { "foo" => "\"data\""}, undef));
+is("foo / bar", ParseExpr("foo / bar", { "bla" => "\"data\""}, undef));
+is("r->length % 2", ParseExpr("len%2", { "len" => "r->length"}, undef));
+is("r->length == 2", ParseExpr("len==2", { "len" => "r->length"}, undef));
+is("r->length != 2", ParseExpr("len!=2", { "len" => "r->length"}, undef));
+is("pr->length", ParseExpr("pr->length", { "p" => "r"}, undef));
+is("r->length", ParseExpr("p->length", { "p" => "r"}, undef));
+is("_foo / bla32", ParseExpr("_foo / bla32", { "bla" => "\"data\""}, undef));
+is("foo.bar.blah", ParseExpr("foo.blah", { "foo" => "foo.bar"}, undef));
+is("\"bla\"", ParseExpr("\"bla\"", {}, undef));
+is("1 << 2", ParseExpr("1 << 2", {}, undef));
+is("1 >> 2", ParseExpr("1 >> 2", {}, undef));
+is("0x200", ParseExpr("0x200", {}, undef));
+is("2?3:0", ParseExpr("2?3:0", {}, undef));
+is("~0", ParseExpr("~0", {}, undef));
+is("b->a->a", ParseExpr("a->a->a", {"a" => "b"}, undef));
+is("b.a.a", ParseExpr("a.a.a", {"a" => "b"}, undef));
+
+test_errors("nofile:0: Parse error in `~' near `~'\n", sub {
+ is(undef, ParseExpr("~", {}, {FILE => "nofile", LINE => 0})); });
+
+test_errors("nofile:0: Got pointer, expected integer\n", sub {
+ is(undef, ParseExprExt("foo", {}, {FILE => "nofile", LINE => 0},
+ undef, sub { my $x = shift;
+ error({FILE => "nofile", LINE => 0},
+ "Got pointer, expected integer");
+ return undef; }))});
+
+is("b.a.a", ParseExpr("b.a.a", {"a" => "b"}, undef));
+is("((rr_type) == NBT_QTYPE_NETBIOS)", ParseExpr("((rr_type)==NBT_QTYPE_NETBIOS)", {}, undef));
+is("talloc_check_name", ParseExpr("talloc_check_name", {}, undef));
+is("talloc_check_name()", ParseExpr("talloc_check_name()", {}, undef));
+is("talloc_check_name(ndr)", ParseExpr("talloc_check_name(ndr)", {}, undef));
+is("talloc_check_name(ndr, 1)", ParseExpr("talloc_check_name(ndr,1)", {}, undef));
+is("talloc_check_name(ndr, \"struct ndr_push\")", ParseExpr("talloc_check_name(ndr,\"struct ndr_push\")", {}, undef));
+is("((rr_type) == NBT_QTYPE_NETBIOS) && talloc_check_name(ndr, \"struct ndr_push\")", ParseExpr("((rr_type)==NBT_QTYPE_NETBIOS)&&talloc_check_name(ndr,\"struct ndr_push\")", {}, undef));
+is("(rdata).data.length", ParseExpr("(rdata).data.length", {}, undef));
+is("((rdata).data.length == 2)", ParseExpr("((rdata).data.length==2)", {}, undef));
+is("((rdata).data.length == 2)?0:rr_type", ParseExpr("((rdata).data.length==2)?0:rr_type", {}, undef));
+is("((((rr_type) == NBT_QTYPE_NETBIOS) && talloc_check_name(ndr, \"struct ndr_push\") && ((rdata).data.length == 2))?0:rr_type)", ParseExpr("((((rr_type)==NBT_QTYPE_NETBIOS)&&talloc_check_name(ndr,\"struct ndr_push\")&&((rdata).data.length==2))?0:rr_type)", {}, undef));
diff --git a/tools/pidl/tests/wireshark-conf.pl b/tools/pidl/tests/wireshark-conf.pl
new file mode 100755
index 0000000..fff89f6
--- /dev/null
+++ b/tools/pidl/tests/wireshark-conf.pl
@@ -0,0 +1,205 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+# test parsing wireshark conformance files
+use strict;
+use warnings;
+
+use Test::More tests => 49;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl::Util qw(MyDumper);
+use Parse::Pidl::Wireshark::Conformance qw(ReadConformanceFH valid_ft_type valid_base_type);
+
+sub parse_conf($)
+{
+ my $str = shift;
+ open(TMP, "+>", undef) or die("unable to open temp file");
+ print TMP $str;
+ seek(TMP, 0, 0);
+ my $data = {};
+ ReadConformanceFH(*TMP, $data, "nofile") or return undef;
+ close(TMP);
+ return $data;
+}
+
+ok(parse_conf("\n"), undef);
+ok(parse_conf(" \n"), undef);
+ok(parse_conf("CODE START\nCODE END\n"));
+test_warnings("nofile:1: Expecting CODE END\n", sub { is(parse_conf("CODE START\n"), undef); });
+ok(parse_conf("#foobar\n"), undef);
+test_warnings("nofile:1: Unknown command `foobar'\n",
+ sub { ok(parse_conf("foobar\n"), undef); });
+
+test_warnings("nofile:1: incomplete HF_RENAME command\n",
+ sub { parse_conf("HF_RENAME\n"); });
+
+is_deeply(parse_conf("HF_RENAME foo bar\n")->{hf_renames}->{foo},
+ { OLDNAME => "foo", NEWNAME => "bar", POS => {FILE => "nofile", LINE => 1}, USED => 0});
+
+is_deeply(parse_conf("NOEMIT\n"), { "noemit_dissector" => 1 });
+is_deeply(parse_conf("NOEMIT foo\n"), { "noemit" => { "foo" => 1 } });
+
+test_warnings("nofile:1: incomplete MANUAL command\n",
+ sub { parse_conf("MANUAL\n"); } );
+
+is_deeply(parse_conf("MANUAL foo\n"), { manual => {foo => 1}});
+
+test_errors("nofile:1: incomplete INCLUDE command\n",
+ sub { parse_conf("INCLUDE\n"); } );
+
+test_warnings("nofile:1: incomplete FIELD_DESCRIPTION command\n",
+ sub { parse_conf("FIELD_DESCRIPTION foo\n"); });
+
+is_deeply(parse_conf("FIELD_DESCRIPTION foo \"my description\"\n"),
+ { fielddescription => { foo => { DESCRIPTION => "\"my description\"", POS => { FILE => "nofile", LINE => 1}, USED => 0 }}});
+
+is_deeply(parse_conf("FIELD_DESCRIPTION foo my description\n"),
+ { fielddescription => { foo => { DESCRIPTION => "my", POS => { FILE => "nofile", LINE => 1}, USED => 0 }}});
+
+is_deeply(parse_conf("CODE START\ndata\nCODE END\n"), { override => "data\n" });
+is_deeply(parse_conf("CODE START\ndata\nmore data\nCODE END\n"), { override => "data\nmore data\n" });
+test_warnings("nofile:1: CODE END outside CODE section\n",
+ sub { parse_conf("CODE END\n"); } );
+
+is_deeply(parse_conf("TYPE winreg_String dissect_myminregstring(); FT_STRING BASE_DEC 0 0 2\n"), { types => { winreg_String => {
+ NAME => "winreg_String",
+ POS => { FILE => "nofile", LINE => 1 },
+ USED => 0,
+ DISSECTOR_NAME => "dissect_myminregstring();",
+ FT_TYPE => "FT_STRING",
+ BASE_TYPE => "BASE_DEC",
+ MASK => 0,
+ VALSSTRING => 0,
+ ALIGNMENT => 2}}});
+
+ok(valid_ft_type("FT_UINT32"));
+ok(not valid_ft_type("BLA"));
+ok(not valid_ft_type("ft_uint32"));
+ok(valid_ft_type("FT_BLA"));
+
+ok(valid_base_type("BASE_DEC"));
+ok(valid_base_type("BASE_HEX"));
+ok(not valid_base_type("base_dec"));
+ok(not valid_base_type("BLA"));
+ok(not valid_base_type("BASEDEC"));
+
+test_errors("nofile:1: incomplete TYPE command\n",
+ sub { parse_conf("TYPE mytype dissector\n"); });
+
+test_warnings("nofile:1: dissector name does not contain `dissect'\n",
+ sub { parse_conf("TYPE winreg_String myminregstring; FT_STRING BASE_DEC 0 0 2\n"); });
+
+test_warnings("nofile:1: invalid FT_TYPE `BLA'\n",
+ sub { parse_conf("TYPE winreg_String dissect_myminregstring; BLA BASE_DEC 0 0 2\n"); });
+
+test_warnings("nofile:1: invalid BASE_TYPE `BLOE'\n",
+ sub { parse_conf("TYPE winreg_String dissect_myminregstring; FT_UINT32 BLOE 0 0 2\n"); });
+
+is_deeply(parse_conf("TFS hf_bla \"True string\" \"False String\"\n"),
+ { tfs => { hf_bla => {
+ TRUE_STRING => "\"True string\"",
+ FALSE_STRING => "\"False String\"" } } });
+
+test_errors("nofile:1: incomplete TFS command\n",
+ sub { parse_conf("TFS hf_bla \"Trues\""); } );
+
+test_errors("nofile:1: incomplete PARAM_VALUE command\n",
+ sub { parse_conf("PARAM_VALUE\n"); });
+
+is_deeply(parse_conf("PARAM_VALUE Life 42\n"),
+ { dissectorparams => {
+ Life => {
+ DISSECTOR => "Life",
+ POS => { FILE => "nofile", LINE => 1 },
+ PARAM => 42,
+ USED => 0
+ }
+ }
+ });
+
+is_deeply(parse_conf("STRIP_PREFIX bla_\n"),
+ { strip_prefixes => [ "bla_" ] });
+
+is_deeply(parse_conf("STRIP_PREFIX bla_\nSTRIP_PREFIX bloe\n"),
+ { strip_prefixes => [ "bla_", "bloe" ] });
+
+is_deeply(parse_conf("PROTOCOL atsvc \"Scheduling jobs on remote machines\" \"at\" \"atsvc\"\n"),
+ { protocols => {
+ atsvc => {
+ LONGNAME => "\"Scheduling jobs on remote machines\"",
+ SHORTNAME => "\"at\"",
+ FILTERNAME => "\"atsvc\""
+ }
+ }
+ }
+);
+
+is_deeply(parse_conf("IMPORT bla\n"), {
+ imports => {
+ bla => {
+ NAME => "bla",
+ DATA => "",
+ USED => 0,
+ POS => { FILE => "nofile", LINE => 1 }
+ }
+ }
+ }
+);
+
+is_deeply(parse_conf("IMPORT bla fn1 fn2 fn3\n"), {
+ imports => {
+ bla => {
+ NAME => "bla",
+ DATA => "fn1 fn2 fn3",
+ USED => 0,
+ POS => { FILE => "nofile", LINE => 1 }
+ }
+ }
+ }
+);
+
+test_errors("nofile:1: no dissectorname specified\n",
+ sub { parse_conf("IMPORT\n"); } );
+
+test_errors("nofile:1: incomplete HF_FIELD command\n",
+ sub { parse_conf("HF_FIELD hf_idx\n"); });
+
+test_errors("nofile:1: incomplete ETT_FIELD command\n",
+ sub { parse_conf("ETT_FIELD\n"); });
+
+is_deeply(parse_conf("TYPE winreg_String dissect_myminregstring(); FT_STRING BASE_DEC 0 0 0 2\n"), {
+ types => {
+ winreg_String => {
+ NAME => "winreg_String",
+ POS => { FILE => "nofile", LINE => 1 },
+ USED => 0,
+ DISSECTOR_NAME => "dissect_myminregstring();",
+ FT_TYPE => "FT_STRING",
+ BASE_TYPE => "BASE_DEC",
+ MASK => 0,
+ VALSSTRING => 0,
+ ALIGNMENT => 0
+ }
+ }
+ }
+);
+
+
+is_deeply(parse_conf("TYPE winreg_String \"offset = dissect_myminregstring(\@HF\@);\" FT_STRING BASE_DEC 0 0 0 2\n"), {
+ types => {
+ winreg_String => {
+ NAME => "winreg_String",
+ POS => { FILE => "nofile", LINE => 1 },
+ USED => 0,
+ DISSECTOR_NAME => "offset = dissect_myminregstring(\@HF\@);",
+ FT_TYPE => "FT_STRING",
+ BASE_TYPE => "BASE_DEC",
+ MASK => 0,
+ VALSSTRING => 0,
+ ALIGNMENT => 0
+ }
+ }
+ }
+);
diff --git a/tools/pidl/tests/wireshark-ndr.pl b/tools/pidl/tests/wireshark-ndr.pl
new file mode 100755
index 0000000..229315b
--- /dev/null
+++ b/tools/pidl/tests/wireshark-ndr.pl
@@ -0,0 +1,274 @@
+#!/usr/bin/perl
+# (C) 2007 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU General Public License
+# test parsing wireshark conformance files
+use strict;
+use warnings;
+
+use Test::More tests => 40;
+use FindBin qw($RealBin);
+use lib "$RealBin";
+use Util;
+use Parse::Pidl::Util qw(MyDumper);
+use strict;
+use Parse::Pidl::Wireshark::NDR qw(field2name %res PrintIdl StripPrefixes RegisterInterfaceHandoff register_hf_field ProcessImport ProcessInclude find_type DumpEttList DumpEttDeclaration DumpHfList DumpHfDeclaration DumpFunctionTable register_type register_ett);
+
+is("Access Mask", field2name("access_mask"));
+is("AccessMask", field2name("AccessMask"));
+
+my $x = new Parse::Pidl::Wireshark::NDR();
+$x->PrintIdl("foo\nbar\n");
+is("/* IDL: foo */
+/* IDL: bar */
+
+", $x->{res}->{code});
+
+is("bla_foo", StripPrefixes("bla_foo", []));
+is("foo", StripPrefixes("bla_foo", ["bla"]));
+is("foo_bla", StripPrefixes("foo_bla", ["bla"]));
+
+$x = new Parse::Pidl::Wireshark::NDR();
+$x->RegisterInterfaceHandoff({});
+is($x->{res}->{code}, "");
+ok(not defined($x->{hf_used}->{hf_bla_opnum}));
+
+$x = new Parse::Pidl::Wireshark::NDR();
+$x->{res}->{code} = "";
+$x->RegisterInterfaceHandoff({UUID => "uuid", NAME => "bla"});
+is($x->{res}->{code}, 'void proto_reg_handoff_dcerpc_bla(void)
+{
+ dcerpc_init_uuid(proto_dcerpc_bla, ett_dcerpc_bla,
+ &uuid_dcerpc_bla, ver_dcerpc_bla,
+ bla_dissectors, hf_bla_opnum);
+}
+');
+is($x->{hf_used}->{hf_bla_opnum}, 1);
+
+$x->{conformance} = {};
+is("hf_bla_idx",
+ $x->register_hf_field("hf_bla_idx", "bla", "my.filter", "FT_UINT32", "BASE_HEX", "NULL", 0xF, undef));
+is_deeply($x->{conformance}, {
+ header_fields => {
+ "hf_bla_idx" => {
+ INDEX => "hf_bla_idx",
+ NAME => "bla",
+ FILTER => "my.filter",
+ BASE_TYPE => "BASE_HEX",
+ FT_TYPE => "FT_UINT32",
+ VALSSTRING => "NULL",
+ BLURB => undef,
+ MASK => 0xF
+ }
+ },
+ hf_renames => {},
+ fielddescription => {}
+});
+
+$x->{conformance} = { fielddescription => { hf_bla_idx => { DESCRIPTION => "Some Description" }}};
+is("hf_bla_idx",
+ $x->register_hf_field("hf_bla_idx", "bla", "my.filter", "FT_UINT32", "BASE_HEX", "NULL", 0xF, undef));
+is_deeply($x->{conformance}, {
+ fielddescription => {
+ hf_bla_idx => {
+ DESCRIPTION => "Some Description",
+ USED => 1
+ }
+ },
+ header_fields => {
+ "hf_bla_idx" => {
+ INDEX => "hf_bla_idx",
+ NAME => "bla",
+ FILTER => "my.filter",
+ BASE_TYPE => "BASE_HEX",
+ FT_TYPE => "FT_UINT32",
+ VALSSTRING => "NULL",
+ BLURB => "Some Description",
+ MASK => 0xF
+ }
+ },
+ hf_renames => {},
+});
+
+$x->{conformance} = { fielddescription => { hf_bla_idx => { DESCRIPTION => "Some Description" }}};
+is("hf_bla_idx",
+ $x->register_hf_field("hf_bla_idx", "bla", "my.filter", "FT_UINT32", "BASE_HEX", "NULL", 0xF,
+ "Actual Description"));
+is_deeply($x->{conformance}, {
+ fielddescription => {
+ hf_bla_idx => { DESCRIPTION => "Some Description" }
+ },
+ header_fields => {
+ "hf_bla_idx" => {
+ INDEX => "hf_bla_idx",
+ NAME => "bla",
+ FILTER => "my.filter",
+ BASE_TYPE => "BASE_HEX",
+ FT_TYPE => "FT_UINT32",
+ VALSSTRING => "NULL",
+ BLURB => "Actual Description",
+ MASK => 0xF
+ }
+ },
+ hf_renames => {},
+});
+
+
+
+$x->{conformance} = { hf_renames => { "hf_bla_idx" => { NEWNAME => "hf_bloe_idx" } } };
+$x->register_hf_field("hf_bla_idx", "bla", "my.filter", "FT_UINT32", "BASE_HEX", "NULL", 0xF, undef);
+is_deeply($x->{conformance}, {
+ hf_renames => { hf_bla_idx => { USED => 1, NEWNAME => "hf_bloe_idx" } } });
+
+$x->{hf_used} = { hf_bla => 1 };
+test_warnings("", sub {
+ $x->CheckUsed({ header_fields => { foo => { INDEX => "hf_bla" }}})});
+
+$x->{hf_used} = { };
+test_warnings("hf field `hf_bla' not used\n", sub {
+ $x->CheckUsed({ header_fields => { foo => { INDEX => "hf_bla" }}})});
+
+test_warnings("hf field `hf_id' not used\n",
+ sub { $x->CheckUsed({
+ hf_renames => {
+ hf_id => {
+ OLDNAME => "hf_id",
+ NEWNAME => "hf_newid",
+ USED => 0
+ }
+ }
+}); } );
+
+test_warnings("dissector param never used\n",
+ sub { $x->CheckUsed({
+ dissectorparams => {
+ dissect_foo => {
+ PARAM => 42,
+ USED => 0
+ }
+ }
+}); } );
+
+test_warnings("description never used\n",
+ sub { $x->CheckUsed({
+ fielddescription => {
+ hf_bla => {
+ USED => 0
+ }
+ }
+}); } );
+
+test_warnings("import never used\n",
+ sub { $x->CheckUsed({
+ imports => {
+ bla => {
+ USED => 0
+ }
+ }
+}); } );
+
+test_warnings("nofile:1: type never used\n",
+ sub { $x->CheckUsed({
+ types => {
+ bla => {
+ USED => 0,
+ POS => { FILE => "nofile", LINE => 1 }
+ }
+ }
+}); } );
+
+test_warnings("True/False description never used\n",
+ sub { $x->CheckUsed({
+ tfs => {
+ hf_bloe => {
+ USED => 0
+ }
+ }
+}); } );
+
+$x = new Parse::Pidl::Wireshark::NDR();
+$x->ProcessImport("security", "bla");
+is($x->{res}->{hdr}, "#include \"packet-dcerpc-bla.h\"\n\n");
+
+$x = new Parse::Pidl::Wireshark::NDR();
+$x->ProcessImport("\"bla.idl\"", "\"foo.idl\"");
+is($x->{res}->{hdr}, "#include \"packet-dcerpc-bla.h\"\n" .
+ "#include \"packet-dcerpc-foo.h\"\n\n");
+
+$x = new Parse::Pidl::Wireshark::NDR();
+$x->ProcessInclude("foo.h", "bla.h", "bar.h");
+is($x->{res}->{hdr}, "#include \"foo.h\"\n" .
+ "#include \"bla.h\"\n" .
+ "#include \"bar.h\"\n\n");
+
+$x->{conformance} = {types => { bla => "brainslug" } };
+is("brainslug", $x->find_type("bla"));
+
+is(DumpEttList(["ett_t1", "ett_bla"]),
+ "\tstatic gint *ett[] = {\n" .
+ "\t\t&ett_t1,\n" .
+ "\t\t&ett_bla,\n" .
+ "\t};\n");
+
+is(DumpEttList(), "\tstatic gint *ett[] = {\n\t};\n");
+is(DumpEttList(["bla"]), "\tstatic gint *ett[] = {\n\t\t&bla,\n\t};\n");
+
+is(DumpEttDeclaration(["void", "zoid"]),
+ "\n/* Ett declarations */\n" .
+ "static gint void = -1;\n" .
+ "static gint zoid = -1;\n" .
+ "\n");
+
+is(DumpEttDeclaration(), "\n/* Ett declarations */\n\n");
+
+$x->{conformance} = {
+ header_fields => {
+ hf_bla => { INDEX => "hf_bla", NAME => "Bla", FILTER => "bla.field", FT_TYPE => "FT_UINT32", BASE_TYPE => "BASE_DEC", VALSSTRING => "NULL", MASK => 0xFF, BLURB => "NULL" }
+ }
+};
+
+is($x->DumpHfList(), "\tstatic hf_register_info hf[] = {
+ { &hf_bla,
+ { \"Bla\", \"bla.field\", FT_UINT32, BASE_DEC, NULL, 255, \"NULL\", HFILL }},
+ };
+");
+
+is($x->DumpHfDeclaration(), "
+/* Header field declarations */
+static gint hf_bla = -1;
+
+");
+
+is(DumpFunctionTable({
+ NAME => "someif",
+ FUNCTIONS => [ { NAME => "fn1", OPNUM => 3 }, { NAME => "someif_fn2", OPNUM => 2 } ] }),
+'static dcerpc_sub_dissector someif_dissectors[] = {
+ { 3, "fn1",
+ someif_dissect_fn1_request, someif_dissect_fn1_response},
+ { 2, "fn2",
+ someif_dissect_fn2_request, someif_dissect_fn2_response},
+ { 0, NULL, NULL, NULL }
+};
+');
+
+$x->{conformance} = {};
+$x->register_type("bla_type", "dissect_bla", "FT_UINT32", "BASE_HEX", 0xFF, "NULL", 4);
+is_deeply($x->{conformance}, {
+ types => {
+ bla_type => {
+ NAME => "bla_type",
+ DISSECTOR_NAME => "dissect_bla",
+ FT_TYPE => "FT_UINT32",
+ BASE_TYPE => "BASE_HEX",
+ MASK => 255,
+ VALSSTRING => "NULL",
+ ALIGNMENT => 4
+ }
+ }
+ }
+);
+
+$x->{ett} = [];
+$x->register_ett("name");
+is_deeply($x->{ett}, ["name"]);
+$x->register_ett("leela");
+is_deeply($x->{ett}, ["name", "leela"]);
diff --git a/tools/pidl/wscript b/tools/pidl/wscript
new file mode 100644
index 0000000..f4ff902
--- /dev/null
+++ b/tools/pidl/wscript
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+
+import os, Logs
+from samba_utils import MODE_755
+
+# This function checks if a perl module is installed on the system.
+def check_system_perl_module(conf, module, version=None):
+ bundle_name = module.replace('::', '_')
+ module_check = module
+
+ # Create module string with version
+ if version:
+ module_check = module + ' ' + str(version)
+
+ # Check if we have to bundle it.
+ if conf.LIB_MUST_BE_BUNDLED(bundle_name.lower()):
+ return False
+
+ # Check for system perl module
+ if not conf.check_perl_module(module_check):
+ return False
+
+ conf.define('USING_SYSTEM_%s' % bundle_name.upper(), 1)
+
+ return True
+
+def set_options(opt):
+ return
+
+def configure(conf):
+ # Check if perl(Parse::Yapp::Driver) is available.
+ check_system_perl_module(conf, "Parse::Yapp::Driver", 1.05)
+
+ # we need a recent version of MakeMaker to get the right man page names
+ if conf.CHECK_PERL_MANPAGE():
+ conf.env.PERLMAN1EXT = conf.CHECK_PERL_MANPAGE(section='1')
+ conf.env.PERLMAN3EXT = conf.CHECK_PERL_MANPAGE(section='3')
+ conf.DEFINE('HAVE_PERL_MAKEMAKER', 1)
+
+ # yapp is used for building the parser
+ conf.find_program('yapp', var='YAPP')
+ conf.find_program('pod2man', var='POD2MAN')
+
+def build(bld):
+ bld.INSTALL_FILES('${BINDIR}', 'pidl', chmod=MODE_755, perl_fixup=True)
+
+ bld.RECURSE('lib')
+
+ if not bld.CONFIG_SET('HAVE_PERL_MAKEMAKER'):
+ return
+
+ pidl_manpages = {
+ 'pidl': 'man1/pidl.${PERLMAN1EXT}',
+ 'lib/Parse/Pidl/NDR.pm': 'man3/Parse::Pidl::NDR.${PERLMAN3EXT}',
+ 'lib/Parse/Pidl/Wireshark/Conformance.pm': 'man3/Parse::Pidl::Wireshark::Conformance.${PERLMAN3EXT}',
+ 'lib/Parse/Pidl/Dump.pm': 'man3/Parse::Pidl::Dump.${PERLMAN3EXT}',
+ 'lib/Parse/Pidl/Util.pm': 'man3/Parse::Pidl::Util.${PERLMAN3EXT}',
+ 'lib/Parse/Pidl/Wireshark/NDR.pm': 'man3/Parse::Pidl::Wireshark::NDR.${PERLMAN3EXT}'
+ }
+
+ for k, v in pidl_manpages.iteritems():
+ pidl_manpages[k] = bld.EXPAND_VARIABLES(v)
+
+ # use perl to build the manpages
+ bld.env.pidl_srcdir = os.path.join(bld.srcnode.abspath(), 'pidl')
+
+ bld.SET_BUILD_GROUP('final')
+ if 'POD2MAN' in bld.env and bld.env['POD2MAN'] != '':
+ for src, manpage in pidl_manpages.iteritems():
+ bld(rule='${POD2MAN} -c "Samba Documentation" ${SRC} ${TGT}',
+ shell=True,
+ source=src,
+ install_path=os.path.dirname(bld.EXPAND_VARIABLES('${MANDIR}/'+manpage)),
+ target=os.path.basename(manpage))
+
+ # we want to prefer the git version of the parsers if we can.
+ # Only if the source has changed do we want to re-run yapp
+ # But we force the developer to use the pidl standalone build
+ # to regenerate the files.
+ # TODO: only warn in developer mode and if 'git diff HEAD'
+ # shows a difference
+ warn_about_grammar_changes = ('PIDL_BUILD_WARNINGS' in bld.env and (
+ bld.IS_NEWER('idl.yp', 'lib/Parse/Pidl/IDL.pm') or
+ bld.IS_NEWER('expr.yp', 'lib/Parse/Pidl/Expr.pm')))
+
+ if warn_about_grammar_changes:
+ Logs.warn('''
+Pidl grammar files have changed. Please use the pidl standalone build
+to regenerate them with yapp.
+
+$ cd ../pidl
+$ perl Makefile.PL
+$ make lib/Parse/Pidl/IDL.pm lib/Parse/Pidl/Expr.pm
+$ git add lib/Parse/Pidl/IDL.pm lib/Parse/Pidl/Expr.pm
+$ git commit
+$ cd -
+
+If your 100% sure you haven't changed idl.yp and expr.yp
+try this to avoid this message:
+
+$ touch ../pidl/lib/Parse/Pidl/IDL.pm ../pidl/lib/Parse/Pidl/Expr.pm
+''')
+
diff --git a/tools/pkt-from-core.py b/tools/pkt-from-core.py
new file mode 100755
index 0000000..efc252d
--- /dev/null
+++ b/tools/pkt-from-core.py
@@ -0,0 +1,477 @@
+#!/usr/bin/env python
+"""
+Retrieve a packet from a wireshark/tshark core file
+and save it in a packet-capture file.
+"""
+
+# Copyright (C) 2013 by Gilbert Ramirez <gram@alumni.rice.edu>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import getopt
+import os
+import re
+import sys
+import tempfile
+
+exec_file = None
+core_file = None
+output_file = None
+
+verbose = 0
+debug = 0
+
+class BackTrace:
+ re_frame = re.compile(r"^#(?P<num>\d+) ")
+ re_func1 = re.compile(r"^#\d+\s+(?P<func>\w+) \(")
+ re_func2 = re.compile(r"^#\d+\s+0x[A-Fa-f\d]+ in (?P<func>\w+) \(")
+
+ def __init__(self, lines):
+
+ # In order; each item is the function name.
+ self.frames = []
+ found_non_bt_frame = 0
+ frame_will_be = 0
+
+ for line in lines:
+ m = self.re_frame.search(line)
+ if m:
+ # Skip the first frame that gdb shows,
+ # which is not part of the backtrace.
+ if not found_non_bt_frame:
+ found_non_bt_frame = 1
+ continue
+
+ # Get the frame number and make sure it's
+ # what we expect it should be.
+ frame_num = int(m.group("num"))
+ if frame_num != frame_will_be:
+ sys.exit("Found frame %d instead of %d" %
+ (frame_num, frame_will_be))
+
+ # Find the function name. XXX - need to handle '???'
+ n = self.re_func1.search(line)
+ if not n:
+ n = self.re_func2.search(line)
+
+ if n:
+ func = n.group("func")
+ else:
+ sys.exit("Function name not found in %s" % (line,))
+
+ # Save the info
+ self.frames.append(func)
+ frame_will_be += 1
+
+ def Frames(self):
+ return self.frames
+
+
+ def HasFunction(self, func):
+ return func in self.frames
+
+ def Frame(self, func):
+ return self.frames.index(func)
+
+
+# Some values from wiretap; wiretap should be a shared
+# libray and a Python module should be created for it so
+# this program could just write a libpcap file directly.
+WTAP_ENCAP_NONE = -2
+WTAP_ENCAP_PER_PACKET = -1
+WTAP_ENCAP_UNKNOWN = 0
+WTAP_ENCAP_ETHERNET = 1
+WTAP_ENCAP_TOKEN_RING = 2
+WTAP_ENCAP_SLIP = 3
+WTAP_ENCAP_PPP = 4
+WTAP_ENCAP_FDDI = 5
+WTAP_ENCAP_FDDI_BITSWAPPED = 6
+WTAP_ENCAP_RAW_IP = 7
+WTAP_ENCAP_ARCNET = 8
+WTAP_ENCAP_ATM_RFC1483 = 9
+WTAP_ENCAP_LINUX_ATM_CLIP = 10
+WTAP_ENCAP_LAPB = 11
+WTAP_ENCAP_ATM_SNIFFER = 12
+WTAP_ENCAP_NULL = 13
+WTAP_ENCAP_ASCEND = 14
+WTAP_ENCAP_LAPD = 15
+WTAP_ENCAP_V120 = 16
+WTAP_ENCAP_PPP_WITH_PHDR = 17
+WTAP_ENCAP_IEEE_802_11 = 18
+WTAP_ENCAP_SLL = 19
+WTAP_ENCAP_FRELAY = 20
+WTAP_ENCAP_CHDLC = 21
+WTAP_ENCAP_CISCO_IOS = 22
+WTAP_ENCAP_LOCALTALK = 23
+WTAP_ENCAP_PRISM_HEADER = 24
+WTAP_ENCAP_PFLOG = 25
+WTAP_ENCAP_AIROPEEK = 26
+WTAP_ENCAP_HHDLC = 27
+# last WTAP_ENCAP_ value + 1
+WTAP_NUM_ENCAP_TYPES = 28
+
+wtap_to_pcap_map = {
+ WTAP_ENCAP_NULL : 0,
+ WTAP_ENCAP_ETHERNET : 1,
+ WTAP_ENCAP_TOKEN_RING : 6,
+ WTAP_ENCAP_ARCNET : 7,
+ WTAP_ENCAP_SLIP : 8,
+ WTAP_ENCAP_PPP : 9,
+ WTAP_ENCAP_FDDI_BITSWAPPED : 10,
+ WTAP_ENCAP_FDDI : 10,
+ WTAP_ENCAP_ATM_RFC1483 : 11,
+ WTAP_ENCAP_RAW_IP : 12,
+ WTAP_ENCAP_LINUX_ATM_CLIP : 16, # or 18, or 19...
+ WTAP_ENCAP_CHDLC : 104,
+ WTAP_ENCAP_IEEE_802_11 : 105,
+ WTAP_ENCAP_SLL : 113,
+ WTAP_ENCAP_LOCALTALK : 114,
+ WTAP_ENCAP_PFLOG : 117,
+ WTAP_ENCAP_CISCO_IOS : 118,
+ WTAP_ENCAP_PRISM_HEADER : 119,
+ WTAP_ENCAP_HHDLC : 121,
+}
+
+
+wtap_name = {
+ WTAP_ENCAP_NONE : "None",
+ WTAP_ENCAP_UNKNOWN : "Unknown",
+ WTAP_ENCAP_ETHERNET : "Ethernet",
+ WTAP_ENCAP_TOKEN_RING : "Token-Ring",
+ WTAP_ENCAP_SLIP : "SLIP",
+ WTAP_ENCAP_PPP : "PPP",
+ WTAP_ENCAP_FDDI : "FDDI",
+ WTAP_ENCAP_FDDI_BITSWAPPED : "FDDI (Bitswapped)",
+ WTAP_ENCAP_RAW_IP : "Raw IP",
+ WTAP_ENCAP_ARCNET : "ARCNET",
+ WTAP_ENCAP_ATM_RFC1483 : "ATM RFC1483",
+ WTAP_ENCAP_LINUX_ATM_CLIP : "Linux ATM CLIP",
+ WTAP_ENCAP_LAPB : "LAPB",
+ WTAP_ENCAP_ATM_SNIFFER : "ATM Sniffer",
+ WTAP_ENCAP_NULL : "Null",
+ WTAP_ENCAP_ASCEND : "Ascend",
+ WTAP_ENCAP_LAPD : "LAPD",
+ WTAP_ENCAP_V120 : "V.120",
+ WTAP_ENCAP_PPP_WITH_PHDR : "PPP (with PHDR)",
+ WTAP_ENCAP_IEEE_802_11 : "IEEE 802.11",
+ WTAP_ENCAP_SLL : "SLL",
+ WTAP_ENCAP_FRELAY : "Frame Relay",
+ WTAP_ENCAP_CHDLC : "Cisco HDLC",
+ WTAP_ENCAP_CISCO_IOS : "Cisco IOS",
+ WTAP_ENCAP_LOCALTALK : "LocalTalk",
+ WTAP_ENCAP_PRISM_HEADER : "Prism Header",
+ WTAP_ENCAP_PFLOG : "PFLog",
+ WTAP_ENCAP_AIROPEEK : "AiroPeek",
+ WTAP_ENCAP_HHDLC : "HHDLC",
+}
+
+def wtap_to_pcap(wtap):
+ if not wtap_to_pcap_map.has_key(wtap):
+ sys.exit("Don't know how to convert wiretap encoding %d to libpcap." % \
+ (wtap))
+
+ return wtap_to_pcap_map[wtap]
+
+
+def run_gdb(*commands):
+ if len(commands) == 0:
+ return []
+
+ # Create a temporary file
+ fname = tempfile.mktemp()
+ try:
+ fh = open(fname, "w")
+ except IOError, err:
+ sys.exit("Cannot open %s for writing: %s" % (fname, err))
+
+ # Put the commands in it
+ for cmd in commands:
+ fh.write(cmd)
+ fh.write("\n")
+
+ fh.write("quit\n")
+ try:
+ fh.close()
+ except IOError, err:
+ try:
+ os.unlink(fname)
+ except Exception:
+ pass
+ sys.exit("Cannot close %s: %s" % (fname, err))
+
+
+ # Run gdb
+ cmd = "gdb --nw --quiet --command=%s %s %s" % (fname, exec_file, core_file)
+ if verbose:
+ print "Invoking %s" % (cmd,)
+ try:
+ pipe = os.popen(cmd)
+ except OSError, err:
+ try:
+ os.unlink(fname)
+ except Exception:
+ pass
+ sys.exit("Cannot run gdb: %s" % (err,))
+
+ # Get gdb's output
+ result = pipe.readlines()
+ error = pipe.close()
+ if error is not None:
+ try:
+ os.unlink(fname)
+ except Exception:
+ pass
+ sys.exit("gdb returned an exit value of %s" % (error,))
+
+
+ # Remove the temp file and return the results
+ try:
+ os.unlink(fname)
+ except Exception:
+ pass
+ return result
+
+def get_value_from_frame(frame_num, variable, fmt=""):
+ cmds = []
+ if frame_num > 0:
+ cmds.append("up %d" % (frame_num,))
+
+ cmds.append("print %s %s" % (fmt, variable))
+ lines = apply(run_gdb, cmds)
+
+ LOOKING_FOR_START = 0
+ READING_VALUE = 1
+ state = LOOKING_FOR_START
+ result = ""
+ for line in lines:
+ if line[-1] == "\n":
+ line = line[0:-1]
+ if line[-1] == "\r":
+ line = line[0:-1]
+
+ if state == LOOKING_FOR_START:
+ if len(line) < 4:
+ continue
+ else:
+ if line[0:4] == "$1 =":
+ result = line[4:]
+ state = READING_VALUE
+
+ elif state == READING_VALUE:
+ result += line
+
+ return result
+
+def get_int_from_frame(frame_num, variable):
+ text = get_value_from_frame(frame_num, variable)
+ try:
+ integer = int(text)
+ except ValueError:
+ sys.exit("Could not convert '%s' to integer." % (text,))
+ return integer
+
+
+def get_byte_array_from_frame(frame_num, variable, length):
+ cmds = []
+ if frame_num > 0:
+ cmds.append("up %d" % (frame_num,))
+
+ cmds.append("print %s" % (variable,))
+ cmds.append("x/%dxb %s" % (length, variable))
+ lines = apply(run_gdb, cmds)
+ if debug:
+ print lines
+
+ bytes = []
+
+ LOOKING_FOR_START = 0
+ BYTES = 1
+ state = LOOKING_FOR_START
+
+ for line in lines:
+ if state == LOOKING_FOR_START:
+ if len(line) < 3:
+ continue
+ elif line[0:3] == "$1 ":
+ state = BYTES
+ elif state == BYTES:
+ line.rstrip()
+ fields = line.split('\t')
+ if fields[0][-1] != ":":
+ print "Failed to parse byte array from gdb:"
+ print line
+ sys.exit(1)
+
+ for field in fields[1:]:
+ val = int(field, 16)
+ bytes.append(val)
+ else:
+ assert 0
+
+ return bytes
+
+def make_cap_file(pkt_data, lnk_t):
+
+ pcap_lnk_t = wtap_to_pcap(lnk_t)
+
+ # Create a temporary file
+ fname = tempfile.mktemp()
+ try:
+ fh = open(fname, "w")
+ except IOError, err:
+ sys.exit("Cannot open %s for writing: %s" % (fname, err))
+
+ print "Packet Data:"
+
+ # Put the hex dump in it
+ offset = 0
+ BYTES_IN_ROW = 16
+ for byte in pkt_data:
+ if (offset % BYTES_IN_ROW) == 0:
+ print >> fh, "\n%08X " % (offset,),
+ print "\n%08X " % (offset,),
+
+ print >> fh, "%02X " % (byte,),
+ print "%02X " % (byte,),
+ offset += 1
+
+ print >> fh, "\n"
+ print "\n"
+
+ try:
+ fh.close()
+ except IOError, err:
+ try:
+ os.unlink(fname)
+ except Exception:
+ pass
+ sys.exit("Cannot close %s: %s" % (fname, err))
+
+
+ # Run text2pcap
+ cmd = "text2pcap -q -l %s %s %s" % (pcap_lnk_t, fname, output_file)
+# print "Command is %s" % (cmd,)
+ try:
+ retval = os.system(cmd)
+ except OSError, err:
+ try:
+ os.unlink(fname)
+ except Exception:
+ pass
+ sys.exit("Cannot run text2pcap: %s" % (err,))
+
+ # Remove the temp file
+ try:
+ os.unlink(fname)
+ except Exception:
+ pass
+
+ if retval == 0:
+ print "%s created with %d bytes in packet, and %s encoding." % \
+ (output_file, len(pkt_data), wtap_name[lnk_t])
+ else:
+ sys.exit("text2pcap did not run successfully.")
+
+
+
+
+def try_frame(func_text, cap_len_text, lnk_t_text, data_text):
+
+ # Get the back trace
+ bt_text = run_gdb("bt")
+ bt = BackTrace(bt_text)
+ if not bt.HasFunction(func_text):
+ print "%s() not found in backtrace." % (func_text,)
+ return 0
+ else:
+ print "%s() found in backtrace." % (func_text,)
+
+ # Figure out where the call to epan_dissect_run is.
+ frame_num = bt.Frame(func_text)
+
+ # Get the capture length
+ cap_len = get_int_from_frame(frame_num, cap_len_text)
+
+ # Get the encoding type
+ lnk_t = get_int_from_frame(frame_num, lnk_t_text)
+
+ # Get the packet data
+ pkt_data = get_byte_array_from_frame(frame_num, data_text, cap_len)
+
+ if verbose:
+ print "Length=%d" % (cap_len,)
+ print "Encoding=%d" % (lnk_t,)
+ print "Data (%d bytes) = %s" % (len(pkt_data), pkt_data)
+ make_cap_file(pkt_data, lnk_t)
+ return 1
+
+def run():
+ if try_frame("epan_dissect_run",
+ "fd->cap_len", "fd->lnk_t", "data"):
+ return
+ elif try_frame("add_packet_to_packet_list",
+ "fdata->cap_len", "fdata->lnk_t", "buf"):
+ return
+ else:
+ sys.exit("A packet cannot be pulled from this core.")
+
+
+def usage():
+ print "pkt-from-core.py [-v] -w capture_file executable-file (core-file or process-id)"
+ print ""
+ print "\tGiven an executable file and a core file, this tool"
+ print "\tuses gdb to retrieve the packet that was being dissected"
+ print "\tat the time wireshark/tshark stopped running. The packet"
+ print "\tis saved in the capture_file specified by the -w option."
+ print ""
+ print "\t-v : verbose"
+ sys.exit(1)
+
+def main():
+ global exec_file
+ global core_file
+ global output_file
+ global verbose
+ global debug
+
+ optstring = "dvw:"
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], optstring)
+ except getopt.error:
+ usage()
+
+ for opt, arg in opts:
+ if opt == "-w":
+ output_file = arg
+ elif opt == "-v":
+ verbose = 1
+ elif opt == "-d":
+ debug = 1
+ else:
+ assert 0
+
+ if output_file is None:
+ usage()
+
+ if len(args) != 2:
+ usage()
+
+ exec_file = args[0]
+ core_file = args[1]
+
+ run()
+
+if __name__ == '__main__':
+ main()
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 4
+# indent-tabs-mode: nil
+# End:
+#
+# vi: set shiftwidth=4 expandtab:
+# :indentSize=4:noTabs=true:
+#
diff --git a/tools/pre-commit b/tools/pre-commit
new file mode 100755
index 0000000..be74103
--- /dev/null
+++ b/tools/pre-commit
@@ -0,0 +1,135 @@
+#!/bin/sh
+# Copyright 2013, Alexis La Goutte (See AUTHORS file)
+#
+# For git user: copy tools/pre-commit to .git/hooks/ folder and make it
+# executable. To bypass it for a single commit, use the --no-verify argument.
+# Using --no-verify will then fail during git review because of a missing
+# ChangeID. Fix that by running git review -i. Do not use -i during normal
+# operation.
+#
+# Alternatively, invoke it directly with the commit ID. Example for checking the
+# last commit:
+#
+# tools/pre-commit HEAD~
+#
+# Relative paths are also supported. For instance, if you are in epan/, then you
+# could invoke `../tools/pre-commit HEAD` to check for changes to staged files.
+#
+# From
+# http://mark-story.com/posts/view/using-git-commit-hooks-to-prevent-stupid-mistakes
+#
+
+# If the commit identifier is not given, use HEAD instead.
+COMMIT_ID="${1:-HEAD}"
+
+UNAME=$( uname -a )
+
+case "$UNAME" in
+ *\ Msys)
+ pyvar="pythonw.exe"
+ ;;
+ *)
+ pyvar="python3"
+ ;;
+esac
+
+PYBIN=${WS_GITHOOK_PYTHON:-$pyvar}
+
+# Path to hook script in the .git directory
+hook_script=${GIT_DIR:-.git}/hooks/pre-commit
+
+# Always start in the root directory of the source tree, this allows for
+# invocations via relative paths (such as ../tools/pre-commit):
+if ! cd "$(git rev-parse --show-toplevel)" ; then
+ echo "Can't change to the top-level source directory."
+ exit 1
+fi
+
+# Check for newer (actually, different) versions of the pre-commit script
+# (but only if invoked as hook, i.e. the commit ID is not given as argument).
+if [ -z "$1" ] && [ -f "$hook_script" ]; then
+ if ! cmp -s "$hook_script" tools/pre-commit; then
+ echo "Pre-commit hook script is outdated, please update! (cp tools/pre-commit ${hook_script})"
+ fi
+fi
+
+exit_status=0
+
+COMMIT_FILES=$( git diff-index --cached --name-status "${COMMIT_ID}" | grep -v "^D" | cut -f2 | grep "\\.[ch]$" )
+DIAMETER_FILES=$( git diff-index --cached --name-status "${COMMIT_ID}" | grep -v "^D" | cut -f2 | grep diameter/ )
+
+# Path to filter script in the tools directory
+filter_script=${PWD}/tools/pre-commit-ignore.py
+filter_conf=${PWD}/tools/pre-commit-ignore.conf
+
+if [ -f "$filter_script" ] && [ -f "$filter_conf" ]; then
+ CHECK_FILES=$( echo "$COMMIT_FILES" | "$PYBIN" "$filter_script" "$filter_conf" ) || exit
+else
+ CHECK_FILES="$COMMIT_FILES"
+fi
+
+bad_alloc_patterns=${PWD}/tools/detect_bad_alloc_patterns.py
+echo "$COMMIT_FILES" | $PYBIN "$bad_alloc_patterns"
+
+# On windows python will output \r\n line endings - we don't want that.
+#
+# Do not use sed, as not all versions of sed support \r as meaning CR
+# in a regexp - the only version that does so might be GNU sed; the
+# GNU sed documentation says that only \n and \\ can be used in a
+# portable script.
+#
+# The Single UNIX Specification says that tr supports \r; most if not
+# all modern UN*Xes should support it.
+CHECK_FILES=$( echo "$CHECK_FILES" | tr -d '\r' )
+
+for FILE in $CHECK_FILES; do
+ # Skip some special cases
+ FILE_BASENAME="$( basename "$FILE" )"
+ # This should only be done on code that's part of one or more
+ # Wireshark programs; idl2wrs.c is a developer tool, not a
+ # Wireshark program, so these tests don't apply.
+ if test "$FILE_BASENAME" = "idl2wrs.c"
+ then
+ continue
+ fi
+ if test "$FILE_BASENAME" = "wmem_test.c"
+ then
+ continue
+ fi
+
+ #Check if checkhf is good
+ ./tools/checkhf.pl "$FILE" || exit_status=1
+
+ #Check if checkAPIs is good
+ ./tools/checkAPIs.pl -p "$FILE" || exit_status=1
+
+ #Check if fix-encoding-args is good
+ ./tools/fix-encoding-args.pl "$FILE" || exit_status=1
+
+ #Check if checkfiltername is good
+ ./tools/checkfiltername.pl "$FILE" || exit_status=1
+
+ # If there are whitespace errors, print the offending file names and fail. (from git pre-commit.sample)
+ git diff-index --check --cached "${COMMIT_ID}" "$FILE" || exit_status=1
+
+done
+
+if [ "x$DIAMETER_FILES" != x ]
+then
+ ./tools/validate-diameter-xml.sh > /dev/null || exit_status=1
+fi
+
+exit $exit_status
+
+#
+# Editor modelines
+#
+# Local Variables:
+# c-basic-offset: 4
+# tab-width: 8
+# indent-tabs-mode: nil
+# End:
+#
+# ex: set shiftwidth=4 tabstop=8 expandtab:
+# :indentSize=4:tabSize=8:noTabs=true:
+#
diff --git a/tools/pre-commit-ignore.conf b/tools/pre-commit-ignore.conf
new file mode 100644
index 0000000..09c40a8
--- /dev/null
+++ b/tools/pre-commit-ignore.conf
@@ -0,0 +1,27 @@
+# Files listed here are ignored by the git pre-commit hook for the purpose
+# of checking for forbidden APIs and other dissector-specific glitches.
+#
+# Each line is compared against the output of 'git diff-index --name-only'.
+# For example to skip checking this file add:
+#
+# tools/pre-commit-ignore.conf
+#
+# The pathname wildcards allowed are: '*', '?', character set '[abc]' or
+# negated with '[!abc]'.
+
+cli_main.c
+doc/packet-PROTOABBREV.c
+epan/dissectors/asn1/*/*asn
+epan/dissectors/asn1/*/packet-*-template.c
+epan/dissectors/packet-http.c
+epan/nghttp2/*
+epan/wmem/wmem_strbuf.c
+epan/wmem/wmem_strutil.c
+epan/wslua/init_wslua.c
+extcap/*
+resources/stock_icons/*
+mmdbresolve.c
+packaging/*
+tools/lemon/*
+wsutil/file_util.h
+wsutil/strptime.c
diff --git a/tools/pre-commit-ignore.py b/tools/pre-commit-ignore.py
new file mode 100755
index 0000000..63ecf3e
--- /dev/null
+++ b/tools/pre-commit-ignore.py
@@ -0,0 +1,59 @@
+#!/bin/env python3
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import sys
+import os
+import fnmatch
+
+IGNORE_CONF = "pre-commit-ignore.conf"
+
+if len(sys.argv) > 2:
+ print("Usage: {0} [path/to/ignore.conf]".format(sys.argv[0]))
+ sys.exit(1)
+
+if len(sys.argv) == 2:
+ ignore_path = sys.argv[1]
+else:
+ ignore_path = IGNORE_CONF
+
+# Function to load our patterns from 'path' for modified files
+# to be ignored (skipping any comments)
+def load_checkignore(path):
+ try:
+ with open(path) as f:
+ patterns = f.read()
+ except OSError as err:
+ sys.exit(str(err))
+ ign = [l.strip() for l in patterns.splitlines()]
+ ign = [l for l in ign if l and not l.startswith("#")]
+ return ign
+
+ignore_list = load_checkignore(ignore_path)
+
+def ignore_match(f):
+ for p in ignore_list:
+ if fnmatch.fnmatchcase(f, p):
+ return True
+ return False
+
+for line in sys.stdin:
+ line = line.strip()
+ if not ignore_match(line):
+ print(line)
+
+#
+# Editor modelines
+#
+# Local Variables:
+# c-basic-offset: 4
+# indent-tabs-mode: nil
+# End:
+#
+# ex: set shiftwidth=4 expandtab:
+# :indentSize=4:noTabs=true:
+#
diff --git a/tools/process-x11-fields.pl b/tools/process-x11-fields.pl
new file mode 100755
index 0000000..9b66a8d
--- /dev/null
+++ b/tools/process-x11-fields.pl
@@ -0,0 +1,165 @@
+#!/usr/bin/perl
+#
+# Script to convert "x11-fields" file, listing fields for
+# X11 dissector, into header files declaring field-index
+# values and field definitions for those fields.
+#
+# Instructions for using this script are in epan/dissectors/README.X11
+#
+# Copyright 2000, Christophe Tronche <ch.tronche[AT]computer.org>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+use File::Spec;
+
+my $srcdir = shift;
+die "'$srcdir' is not a directory" unless -d $srcdir;
+
+open(DECL, "> $srcdir/x11-declarations.h") || die;
+open(REG, "> $srcdir/x11-register-info.h") || die;
+
+my $script_name = File::Spec->abs2rel ($0, $srcdir);
+
+sub add_generated_header {
+ my ($out) = @_;
+
+ print $out <<eot
+/* Do not modify this file. */
+/* It was automatically generated by $script_name. */
+eot
+ ;
+
+ # Add license text
+ print $out <<eot
+/*
+ * Copyright 2000, Christophe Tronche <ch.tronche[AT]computer.org>
+ *
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald[AT]wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+eot
+ ;
+}
+
+add_generated_header(DECL);
+add_generated_header(REG);
+
+$prefix = '';
+$subfieldStringLength = 0;
+
+while(<>) {
+ s/#.*$//go;
+ next if /^\s*$/o;
+ s/^(\s*)//o;
+ $subfield = $1;
+
+ if (length $subfield != $subfieldStringLength) {
+ if (!length $subfield) {
+ $prefix = '';
+ } elsif (length $subfield > $subfieldStringLength) {
+ $prefix .= "$lastAbbrev.";
+ } else {
+ $prefix =~ s/^(.*)\.[^\.]+\.$/$1./o;
+ }
+ $subfieldStringLength = length $subfield;
+ }
+
+ @fields = split /\s+/o ;
+ if ($fields[0] eq '#') {
+ #
+ # If the line begins with "#", treat it as a comment, by
+ # ignoring it.
+ #
+ # (We don't support comments at the end of a line; that would
+ # require some more pain in our simple parser.)
+ #
+ next;
+ }
+ $abbrev = shift @fields;
+ $type = shift @fields;
+ $lastAbbrev = $abbrev;
+
+ $field = $prefix.$abbrev;
+
+ if ($fields[0] =~ /^\d+$/o) {
+ #
+ # This is presumably a Boolean bitfield, and this is the number
+ # of bits in the parent field.
+ #
+ $fieldDisplay = shift @fields;
+ } else {
+ #
+ # The next token is the base for the field.
+ #
+ $fieldDisplay = "BASE_".shift @fields;
+ }
+
+ if ($fields[0] eq 'VALS') {
+ #
+ # It's an enumerated field, with the value_string table having a
+ # name based on the field's name.
+ #
+ shift @fields;
+ $fieldStrings = "VALS(${abbrev}_vals)";
+ $fieldStrings =~ s/-/_/go;
+ } elsif ($fields[0] =~ /^VALS\(/o) {
+ #
+ # It's an enumerated field, with a specified name for the
+ # value_string table.
+ #
+ $fieldStrings = shift @fields;
+ $fieldStrings =~ s/\)/_vals\)/o;
+ } else {
+ #
+ # It's not an enumerated field.
+ #
+ $fieldStrings = 'NULL';
+ }
+
+ if ($fields[0] =~ /^0x/) {
+ #
+ # The next token looks like a bitmask for a bitfield.
+ #
+ $mask = shift @fields;
+ } else {
+ $mask = 0;
+ }
+
+ $rest = join(' ', @fields);
+ $longName = uc $name;
+ $longName = $rest if ($rest);
+ # Don't allow empty blurbs
+ $longName = $longName eq "" ? "NULL" : "\"$longName\"";
+
+ $variable = $field;
+ $variable =~ s/-/_/go;
+ $variable =~ s/\./_/go;
+
+ print DECL "static int hf_x11_$variable = -1;\n";
+
+ print REG <<END;
+{ &hf_x11_$variable, { "$abbrev", "x11.$field", FT_$type, $fieldDisplay, $fieldStrings, $mask, $longName, HFILL }},
+END
+}
+
+#
+# Editor modelines
+#
+# Local Variables:
+# c-basic-offset: 4
+# tab-width: 8
+# indent-tabs-mode: nil
+# End:
+#
+# ex: set shiftwidth=4 tabstop=8 expandtab:
+# :indentSize=4:tabSize=8:noTabs=true:
+#
diff --git a/tools/process-x11-xcb.pl b/tools/process-x11-xcb.pl
new file mode 100755
index 0000000..91dcf42
--- /dev/null
+++ b/tools/process-x11-xcb.pl
@@ -0,0 +1,1946 @@
+#!/usr/bin/perl
+#
+# Script to convert xcbproto and mesa protocol files for
+# X11 dissector. Creates header files containing code to
+# dissect X11 extensions.
+#
+# Instructions for using this script are in epan/dissectors/README.X11
+#
+# Copyright 2008, 2009, 2013, 2014 Open Text Corporation <pharris[AT]opentext.com>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+#TODO
+# - support constructs that are legal in XCB, but don't appear to be used
+
+use 5.010;
+
+use warnings;
+use strict;
+
+# given/when is going to be removed (and/or dramatically altered)
+# in 5.20. Patches welcome.
+# Patches even more welcome if they rewrite this whole thing in a
+# language with a proper compatibility document, such as
+# http://golang.org/doc/go1compat
+no if $] >= 5.018, warnings => "experimental::smartmatch";
+
+use IO::File;
+use XML::Twig;
+
+use File::Spec;
+
+my $srcdir = shift;
+die "'$srcdir' is not a directory" unless -d $srcdir;
+
+my @reslist = grep {!/xproto\.xml$/} glob File::Spec->catfile($srcdir, 'xcbproto', 'src', '*.xml');
+my @register;
+
+my $script_name = File::Spec->abs2rel ($0, $srcdir);
+
+my %basictype = (
+ char => { size => 1, encoding => 'ENC_ASCII|ENC_NA', type => 'FT_STRING', base => 'BASE_NONE', get => 'tvb_get_guint8', list => 'listOfByte', },
+ void => { size => 1, encoding => 'ENC_NA', type => 'FT_BYTES', base => 'BASE_NONE', get => 'tvb_get_guint8', list => 'listOfByte', },
+ BYTE => { size => 1, encoding => 'ENC_NA', type => 'FT_BYTES', base => 'BASE_NONE', get => 'tvb_get_guint8', list => 'listOfByte', },
+ CARD8 => { size => 1, encoding => 'byte_order', type => 'FT_UINT8', base => 'BASE_HEX_DEC', get => 'tvb_get_guint8', list => 'listOfByte', },
+ CARD16 => { size => 2, encoding => 'byte_order', type => 'FT_UINT16', base => 'BASE_HEX_DEC', get => 'tvb_get_guint16', list => 'listOfCard16', },
+ CARD32 => { size => 4, encoding => 'byte_order', type => 'FT_UINT32', base => 'BASE_HEX_DEC', get => 'tvb_get_guint32', list => 'listOfCard32', },
+ CARD64 => { size => 8, encoding => 'byte_order', type => 'FT_UINT64', base => 'BASE_HEX_DEC', get => 'tvb_get_guint64', list => 'listOfCard64', },
+ INT8 => { size => 1, encoding => 'byte_order', type => 'FT_INT8', base => 'BASE_DEC', get => 'tvb_get_guint8', list => 'listOfByte', },
+ INT16 => { size => 2, encoding => 'byte_order', type => 'FT_INT16', base => 'BASE_DEC', get => 'tvb_get_guint16', list => 'listOfInt16', },
+ INT32 => { size => 4, encoding => 'byte_order', type => 'FT_INT32', base => 'BASE_DEC', get => 'tvb_get_guint32', list => 'listOfInt32', },
+ INT64 => { size => 8, encoding => 'byte_order', type => 'FT_INT64', base => 'BASE_DEC', get => 'tvb_get_guint64', list => 'listOfInt64', },
+ float => { size => 4, encoding => 'byte_order', type => 'FT_FLOAT', base => 'BASE_NONE', get => 'tvb_get_ieee_float', list => 'listOfFloat', },
+ double => { size => 8, encoding => 'byte_order', type => 'FT_DOUBLE', base => 'BASE_NONE', get => 'tvb_get_ieee_double', list => 'listOfDouble', },
+ BOOL => { size => 1, encoding => 'byte_order', type => 'FT_BOOLEAN',base => 'BASE_NONE', get => 'tvb_get_guint8', list => 'listOfByte', },
+);
+
+my %simpletype; # Reset at the beginning of each extension
+my %gltype; # No need to reset, since it's only used once
+
+my %struct = # Not reset; contains structures already defined.
+ # Also contains this black-list of structures never used by any
+ # extension (to avoid generating useless code).
+(
+ # structures defined by xproto, but not used by any extension
+ 'xproto:CHAR2B' => 1,
+ 'xproto:ARC' => 1,
+ 'xproto:FORMAT' => 1,
+ 'xproto:VISUALTYPE' => 1,
+ 'xproto:DEPTH' => 1,
+ 'xproto:SCREEN' => 1,
+ 'xproto:SetupRequest' => 1,
+ 'xproto:SetupFailed' => 1,
+ 'xproto:SetupAuthenticate' => 1,
+ 'xproto:Setup' => 1,
+ 'xproto:TIMECOORD' => 1,
+ 'xproto:FONTPROP' => 1,
+ 'xproto:CHARINFO' => 1,
+ 'xproto:SEGMENT' => 1,
+ 'xproto:COLORITEM' => 1,
+ 'xproto:RGB' => 1,
+ 'xproto:HOST' => 1,
+ 'xproto:POINT' => 1,
+
+ # structures defined by xinput, but never used (except by each other)(bug in xcb?)
+ 'xinput:KeyInfo' => 1,
+ 'xinput:ButtonInfo' => 1,
+ 'xinput:ValuatorInfo' => 1,
+ 'xinput:KbdFeedbackState' => 1,
+ 'xinput:PtrFeedbackState' => 1,
+ 'xinput:IntegerFeedbackState' => 1,
+ 'xinput:StringFeedbackState' => 1,
+ 'xinput:BellFeedbackState' => 1,
+ 'xinput:LedFeedbackState' => 1,
+ 'xinput:KbdFeedbackCtl' => 1,
+ 'xinput:PtrFeedbackCtl' => 1,
+ 'xinput:IntegerFeedbackCtl' => 1,
+ 'xinput:StringFeedbackCtl' => 1,
+ 'xinput:BellFeedbackCtl' => 1,
+ 'xinput:LedFeedbackCtl' => 1,
+ 'xinput:KeyState' => 1,
+ 'xinput:ButtonState' => 1,
+ 'xinput:ValuatorState' => 1,
+ 'xinput:DeviceResolutionState' => 1,
+ 'xinput:DeviceAbsCalibState' => 1,
+ 'xinput:DeviceAbsAreaState' => 1,
+ 'xinput:DeviceCoreState' => 1,
+ 'xinput:DeviceEnableState' => 1,
+ 'xinput:DeviceResolutionCtl' => 1,
+ 'xinput:DeviceAbsCalibCtl' => 1,
+ 'xinput:DeviceAbsAreaCtrl' => 1,
+ 'xinput:DeviceCoreCtrl' => 1,
+ 'xinput:DeviceEnableCtrl' => 1,
+ 'xinput:DeviceName' => 1,
+ 'xinput:AddMaster' => 1,
+ 'xinput:RemoveMaster' => 1,
+ 'xinput:AttachSlave' => 1,
+ 'xinput:DetachSlave' => 1,
+ 'xinput:ButtonClass' => 1,
+ 'xinput:KeyClass' => 1,
+ 'xinput:ScrollClass' => 1,
+ 'xinput:TouchClass' => 1,
+ 'xinput:ValuatorClass' => 1,
+
+ # structures defined by xv, but never used (bug in xcb?)
+ 'xv:Image' => 1,
+
+ # structures defined by xkb, but never used (except by each other)(bug in xcb?)
+ 'xkb:Key' => 1,
+ 'xkb:Outline' => 1,
+ 'xkb:Overlay' => 1,
+ 'xkb:OverlayKey' => 1,
+ 'xkb:OverlayRow' => 1,
+ 'xkb:Row' => 1,
+ 'xkb:Shape' => 1,
+);
+my %enum; # Not reset; contains enums already defined.
+my %enum_name;
+my %type_name;
+my $header;
+my $extname;
+my @incname;
+my %request;
+my %genericevent;
+my %event;
+my %reply;
+
+# Output files
+my $impl;
+my $reg;
+my $decl;
+my $error;
+
+# glRender sub-op output files
+my $enum;
+
+# Mesa API definitions keep moving
+my @mesas = ($srcdir . '/mesa/src/mapi/glapi/gen', # 2010-04-26
+ $srcdir . '/mesa/src/mesa/glapi/gen', # 2010-02-22
+ $srcdir . '/mesa/src/mesa/glapi'); # 2004-05-18
+my $mesadir = (grep { -d } @mesas)[0];
+
+sub mesa_category {
+ my ($t, $elt) = @_;
+ $t->purge;
+}
+
+#used to prevent duplication and sort enumerated values
+my %mesa_enum_hash = ();
+
+sub mesa_enum {
+ my ($t, $elt) = @_;
+ my $name = $elt->att('name');
+ my $value = $elt->att('value');
+ my $hex_value = hex($value); #convert string to hex value to catch leading zeros
+
+ #make sure value isn't already in the hash, to prevent duplication in value_string
+ if (!exists($mesa_enum_hash{$hex_value})) {
+ $mesa_enum_hash{$hex_value} = $name;
+ }
+ $t->purge;
+}
+
+sub mesa_type {
+ my ($t, $elt) = @_;
+
+ my $name = $elt->att('name');
+ my $size = $elt->att('size');
+ my $float = $elt->att('float');
+ my $unsigned = $elt->att('unsigned');
+ my $base;
+
+ $t->purge;
+
+ if($name eq 'enum') {
+ # enum does not have a direct X equivalent
+ $gltype{'GLenum'} = { size => 4, encoding => 'byte_order', type => 'FT_UINT32', base => 'BASE_HEX|BASE_EXT_STRING',
+ get => 'tvb_get_guint32', list => 'listOfCard32',
+ val => '&mesa_enum_ext', };
+ return;
+ }
+
+ $name = 'GL'.$name;
+ if (defined($float) && $float eq 'true') {
+ $base = 'float';
+ $base = 'double' if ($size == 8);
+ } else {
+ $base = 'INT';
+ if (defined($unsigned) && $unsigned eq 'true') {
+ $base = 'CARD';
+ }
+ $base .= ($size * 8);
+
+ $base = 'BOOL' if ($name eq 'bool');
+ $base = 'BYTE' if ($name eq 'void');
+ }
+
+ $gltype{$name} = $basictype{$base};
+}
+
+sub registered_name($$)
+{
+ my $name = shift;
+ my $field = shift;
+
+ return "hf_x11_$header"."_$name"."_$field";
+}
+
+sub mesa_function {
+ my ($t, $elt) = @_;
+ # rop == glRender sub-op
+ # sop == GLX minor opcode
+ my $glx = $elt->first_child('glx');
+ unless(defined $glx) { $t->purge; return; }
+
+ my $rop = $glx->att('rop');
+ unless (defined $rop) { $t->purge; return; }
+
+ # Ideally, we want the main name, not the alias name.
+ # Practically, we'd have to scan the file twice to find
+ # the functions that we want to skip.
+ my $alias = $elt->att('alias');
+ if (defined $alias) { $t->purge; return; }
+
+ my $name = $elt->att('name');
+ $request{$rop} = $name;
+
+ my $image;
+
+ my $length = 0;
+ my @elements = $elt->children('param');
+
+ # Wireshark defines _U_ to mean "Unused" (compiler specific define)
+ if (!@elements) {
+ print $impl <<eot
+static void mesa_$name(tvbuff_t *tvb _U_, int *offsetp _U_, proto_tree *t _U_, guint byte_order _U_, int length _U_)
+{
+eot
+;
+ } else {
+ print $impl <<eot
+static void mesa_$name(tvbuff_t *tvb, int *offsetp, proto_tree *t, guint byte_order, int length _U_)
+{
+eot
+;
+ }
+
+ my %type_param;
+ foreach my $e (@elements) {
+ # Detect count && variable_param
+ my $count = $e->att('count');
+ my $variable_param = $e->att('variable_param');
+ if (defined $count and defined $variable_param) {
+ $type_param{$variable_param} = 1;
+ }
+ }
+ foreach my $e (@elements) {
+ # Register field with wireshark
+
+ my $type = $e->att('type');
+ $type =~ s/^const //;
+ my $list;
+ $list = 1 if ($type =~ /\*$/);
+ $type =~ s/ \*$//;
+
+ my $fieldname = $e->att('name');
+ my $regname = registered_name($name, $fieldname);
+
+ my $info = $gltype{$type};
+ my $ft = $info->{'type'};
+ my $base = $info->{'base'};
+ my $val = $info->{'val'} // 'NULL';
+ my $count = $e->att('count');
+ my $variable_param = $e->att('variable_param');
+
+ if ($list and $count and $variable_param) {
+ print $decl "static int ${regname} = -1;\n";
+ print $reg "{ &$regname, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
+ print $decl "static int ${regname}_signed = -1;\n";
+ print $reg "{ &${regname}_signed, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_INT8, BASE_DEC, NULL, 0, NULL, HFILL }},\n";
+ print $decl "static int ${regname}_unsigned = -1;\n";
+ print $reg "{ &${regname}_unsigned, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_UINT8, BASE_DEC, NULL, 0, NULL, HFILL }},\n";
+ print $decl "static int ${regname}_item_card16 = -1;\n";
+ print $reg "{ &${regname}_item_card16, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }},\n";
+ print $decl "static int ${regname}_item_int16 = -1;\n";
+ print $reg "{ &${regname}_item_int16, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_INT16, BASE_DEC, NULL, 0, NULL, HFILL }},\n";
+ print $decl "static int ${regname}_item_card32 = -1;\n";
+ print $reg "{ &${regname}_item_card32, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }},\n";
+ print $decl "static int ${regname}_item_int32 = -1;\n";
+ print $reg "{ &${regname}_item_int32, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL }},\n";
+ print $decl "static int ${regname}_item_float = -1;\n";
+ print $reg "{ &${regname}_item_float, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_FLOAT, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
+ } else {
+ print $decl "static int $regname = -1;\n";
+ if ($list and $info->{'size'} > 1) {
+ print $reg "{ &$regname, { \"$fieldname\", \"x11.glx.render.$name.$fieldname.list\", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
+ $regname .= '_item';
+ print $decl "static int $regname = -1;\n";
+ }
+ print $reg "{ &$regname, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", $ft, $base, $val, 0, NULL, HFILL }},\n";
+
+ if ($e->att('counter') or $type_param{$fieldname}) {
+ print $impl " int $fieldname;\n";
+ }
+ }
+
+ if ($list) {
+ if ($e->att('img_format')) {
+ $image = 1;
+ foreach my $wholename (('swap bytes', 'lsb first')) {
+ # Boolean values
+ my $varname = $wholename;
+ $varname =~ s/\s//g;
+ my $regname = registered_name($name, $varname);
+ print $decl "static int $regname = -1;\n";
+ print $reg "{ &$regname, { \"$wholename\", \"x11.glx.render.$name.$varname\", FT_BOOLEAN, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
+ }
+ foreach my $wholename (('row length', 'skip rows', 'skip pixels', 'alignment')) {
+ # Integer values
+ my $varname = $wholename;
+ $varname =~ s/\s//g;
+ my $regname = registered_name($name, $varname);
+ print $decl "static int $regname = -1;\n";
+ print $reg "{ &$regname, { \"$wholename\", \"x11.glx.render.$name.$varname\", FT_UINT32, BASE_HEX_DEC, NULL, 0, NULL, HFILL }},\n";
+ }
+ }
+ }
+ }
+
+ # The image requests have a few implicit elements first:
+ if ($image) {
+ foreach my $wholename (('swap bytes', 'lsb first')) {
+ # Boolean values
+ my $varname = $wholename;
+ $varname =~ s/\s//g;
+ my $regname = registered_name($name, $varname);
+ print $impl " proto_tree_add_item(t, $regname, tvb, *offsetp, 1, byte_order);\n";
+ print $impl " *offsetp += 1;\n";
+ $length += 1;
+ }
+ print $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, 2, ENC_NA);\n";
+ print $impl " *offsetp += 2;\n";
+ $length += 2;
+ foreach my $wholename (('row length', 'skip rows', 'skip pixels', 'alignment')) {
+ # Integer values
+ my $varname = $wholename;
+ $varname =~ s/\s//g;
+ my $regname = registered_name($name, $varname);
+ print $impl " proto_tree_add_item(t, $regname, tvb, *offsetp, 4, byte_order);\n";
+ print $impl " *offsetp += 4;\n";
+ $length += 4;
+ }
+ }
+
+ foreach my $e (@elements) {
+ my $type = $e->att('type');
+ $type =~ s/^const //;
+ my $list;
+ $list = 1 if ($type =~ /\*$/);
+ $type =~ s/ \*$//;
+
+ my $fieldname = $e->att('name');
+ my $regname = registered_name($name, $fieldname);
+
+ my $info = $gltype{$type};
+ my $ft = $info->{'type'};
+ my $base = $info->{'base'};
+
+ if (!$list) {
+ my $size = $info->{'size'};
+ my $encoding = $info->{'encoding'};
+ my $get = $info->{'get'};
+
+ if ($e->att('counter') or $type_param{$fieldname}) {
+ if ($get ne "tvb_get_guint8") {
+ print $impl " $fieldname = $get(tvb, *offsetp, $encoding);\n";
+ } else {
+ print $impl " $fieldname = $get(tvb, *offsetp);\n";
+ }
+ }
+ print $impl " proto_tree_add_item(t, $regname, tvb, *offsetp, $size, $encoding);\n";
+ print $impl " *offsetp += $size;\n";
+ $length += $size;
+ } else { # list
+ my $list = $info->{'list'};
+ my $count = $e->att('count');
+ my $variable_param = $e->att('variable_param');
+
+ if (defined($count) && !defined($variable_param)) {
+ $regname .= ", $regname".'_item' if ($info->{'size'} > 1);
+ print $impl " $list(tvb, offsetp, t, $regname, $count, byte_order);\n";
+ } else {
+ if (defined($count)) {
+ # Currently, only CallLists has both a count and a variable_param
+ # The XML contains a size description of all the possibilities
+ # for CallLists, but not a type description. Implement by hand,
+ # with the caveat that more types may need to be added in the
+ # future.
+ say $impl " switch($variable_param) {";
+ say $impl " case 0x1400: /* BYTE */";
+ say $impl " listOfByte(tvb, offsetp, t, ${regname}_signed, $count, byte_order);";
+ say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - $count), ENC_NA);";
+ say $impl " *offsetp += (length - $length - $count);";
+ say $impl " break;";
+ say $impl " case 0x1401: /* UNSIGNED_BYTE */";
+ say $impl " listOfByte(tvb, offsetp, t, ${regname}_unsigned, $count, byte_order);";
+ say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - $count), ENC_NA);";
+ say $impl " *offsetp += (length - $length - $count);";
+ say $impl " break;";
+ say $impl " case 0x1402: /* SHORT */";
+ say $impl " listOfInt16(tvb, offsetp, t, $regname, ${regname}_item_int16, $count, byte_order);";
+ say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - 2 * $count), ENC_NA);";
+ say $impl " *offsetp += (length - $length - 2 * $count);";
+ say $impl " break;";
+ say $impl " case 0x1403: /* UNSIGNED_SHORT */";
+ say $impl " listOfCard16(tvb, offsetp, t, $regname, ${regname}_item_card16, $count, byte_order);";
+ say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - 2 * $count), ENC_NA);";
+ say $impl " *offsetp += (length - $length - 2 * $count);";
+ say $impl " break;";
+ say $impl " case 0x1404: /* INT */";
+ say $impl " listOfInt32(tvb, offsetp, t, $regname, ${regname}_item_int32, $count, byte_order);";
+ say $impl " break;";
+ say $impl " case 0x1405: /* UNSIGNED_INT */";
+ say $impl " listOfCard32(tvb, offsetp, t, $regname, ${regname}_item_card32, $count, byte_order);";
+ say $impl " break;";
+ say $impl " case 0x1406: /* FLOAT */";
+ say $impl " listOfFloat(tvb, offsetp, t, $regname, ${regname}_item_float, $count, byte_order);";
+ say $impl " break;";
+ say $impl " case 0x1407: /* 2_BYTES */";
+ say $impl " listOfCard16(tvb, offsetp, t, $regname, ${regname}_item_card16, $count, ENC_BIG_ENDIAN);";
+ say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - 2 * $count), ENC_NA);";
+ say $impl " *offsetp += (length - $length - 2 * $count);";
+ say $impl " break;";
+ say $impl " case 0x1408: /* 3_BYTES */";
+ say $impl " UNDECODED(3 * $count);";
+ say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - 3 * $count), ENC_NA);";
+ say $impl " *offsetp += (length - $length - 3 * $count);";
+ say $impl " break;";
+ say $impl " case 0x1409: /* 4_BYTES */";
+ say $impl " listOfCard32(tvb, offsetp, t, $regname, ${regname}_item_card32, $count, ENC_BIG_ENDIAN);";
+ say $impl " break;";
+ say $impl " case 0x140B: /* HALF_FLOAT */";
+ say $impl " UNDECODED(2 * $count);";
+ say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - 2 * $count), ENC_NA);";
+ say $impl " *offsetp += (length - $length - 2 * $count);";
+ say $impl " break;";
+ say $impl " default: /* Unknown */";
+ say $impl " UNDECODED(length - $length);";
+ say $impl " break;";
+ say $impl " }";
+ } else {
+ $regname .= ", $regname".'_item' if ($info->{'size'} > 1);
+ print $impl " $list(tvb, offsetp, t, $regname, (length - $length) / $gltype{$type}{'size'}, byte_order);\n";
+ }
+ }
+ }
+ }
+
+ print $impl "}\n\n";
+ $t->purge;
+}
+
+sub get_op($;$);
+sub get_unop($;$);
+
+sub get_ref($$)
+{
+ my $elt = shift;
+ my $refref = shift;
+ my $rv;
+
+ given($elt->name()) {
+ when ('fieldref') {
+ $rv = $elt->text();
+ $refref->{$rv} = 1;
+ $rv = 'f_'.$rv;
+ }
+ when ('value') { $rv = $elt->text(); }
+ when ('op') { $rv = get_op($elt, $refref); }
+ when (['unop','popcount']) { $rv = get_unop($elt, $refref); }
+ default { die "Invalid op fragment: $_" }
+ }
+ return $rv;
+}
+
+sub get_op($;$) {
+ my $op = shift;
+ my $refref = shift // {};
+
+ my @elements = $op->children(qr/fieldref|value|op|unop|popcount/);
+ (@elements == 2) or die ("Wrong number of children for 'op'\n");
+ my $left;
+ my $right;
+
+ $left = get_ref($elements[0], $refref);
+ $right = get_ref($elements[1], $refref);
+
+ return "($left " . $op->att('op') . " $right)";
+}
+
+sub get_unop($;$) {
+ my $op = shift;
+ my $refref = shift // {};
+
+ my @elements = $op->children(qr/fieldref|value|op|unop|popcount/);
+ (@elements == 1) or die ("Wrong number of children for 'unop'\n");
+ my $left;
+
+ $left = get_ref($elements[0], $refref);
+
+ given ($op->name()) {
+ when ('unop') {
+ return '(' . $op->att('op') . "$left)";
+ }
+ when ('popcount') {
+ return "ws_count_ones($left)";
+ }
+ default { die "Invalid unop element $op->name()\n"; }
+ }
+}
+
+sub qualname {
+ my $name = shift;
+ $name = $incname[0].':'.$name unless $name =~ /:/;
+ return $name
+}
+
+sub get_simple_info {
+ my $name = shift;
+ my $info = $basictype{$name};
+ return $info if (defined $info);
+ $info = $simpletype{$name};
+ return $info if (defined $info);
+ if (defined($type_name{$name})) {
+ return $simpletype{$type_name{$name}};
+ }
+ return undef
+}
+
+sub get_struct_info {
+ my $name = shift;
+ my $info = $struct{$name};
+ return $info if (defined $info);
+ if (defined($type_name{$name})) {
+ return $struct{$type_name{$name}};
+ }
+ return undef
+}
+
+sub getinfo {
+ my $name = shift;
+ my $info = get_simple_info($name) // get_struct_info($name);
+ # If the script fails here search for $name in this script and remove it from the black list
+ die "$name is defined to be unused in process-x11-xcb.pl but is actually used!" if (defined($info) && $info == "1");
+ return $info;
+}
+
+sub dump_enum_values($)
+{
+ my $e = shift;
+
+ defined($enum{$e}) or die("Enum $e not found");
+
+ my $enumname = "x11_enum_$e";
+ return $enumname if (defined $enum{$e}{done});
+
+ say $enum 'static const value_string '.$enumname.'[] = {';
+
+ my $value = $enum{$e}{value};
+ for my $val (sort { $a <=> $b } keys %$value) {
+ say $enum sprintf(" { %3d, \"%s\" },", $val, $$value{$val});
+ }
+ say $enum sprintf(" { %3d, NULL },", 0);
+ say $enum '};';
+ say $enum '';
+
+ $enum{$e}{done} = 1;
+ return $enumname;
+}
+
+# Find all references, so we can declare only the minimum necessary
+sub reference_elements($$);
+
+sub reference_elements($$)
+{
+ my $e = shift;
+ my $refref = shift;
+
+ given ($e->name()) {
+ when ('switch') {
+ my $lentype = $e->first_child();
+ if (defined $lentype) {
+ given ($lentype->name()) {
+ when ('fieldref') { $refref->{field}{$lentype->text()} = 1; }
+ when ('op') { get_op($lentype, $refref->{field}); }
+ }
+ }
+
+ my @elements = $e->children(qr/(bit)?case/);
+ for my $case (@elements) {
+ my @sub_elements = $case->children(qr/list|switch/);
+
+ foreach my $sub_e (@sub_elements) {
+ reference_elements($sub_e, $refref);
+ }
+ }
+ }
+ when ('list') {
+ my $type = $e->att('type');
+ my $info = getinfo($type);
+ if (defined $info->{paramref}) {
+ for my $pref (keys %{$info->{paramref}}) {
+ $refref->{field}{$pref} = 1;
+ }
+ }
+
+ my $lentype = $e->first_child();
+ if (defined $lentype) {
+ given ($lentype->name()) {
+ when ('fieldref') { $refref->{field}{$lentype->text()} = 1; }
+ when ('op') { get_op($lentype, $refref->{field}); }
+ when (['unop','popcount']) { get_unop($lentype, $refref->{field}); }
+ when ('sumof') { $refref->{sumof}{$lentype->att('ref')} = 1; }
+ }
+ } else {
+ $refref->{field}{'length'} = 1;
+ $refref->{'length'} = 1;
+ }
+ }
+ }
+}
+
+sub register_element($$$$;$)
+{
+ my $e = shift;
+ my $varpat = shift;
+ my $humanpat = shift;
+ my $refref = shift;
+ my $indent = shift // ' ' x 4;
+
+ given ($e->name()) {
+ when ('pad') { return; } # Pad has no variables
+ when ('switch') { return; } # Switch defines varaibles in a tighter scope to avoid collisions
+ }
+
+ # Register field with wireshark
+
+ my $fieldname = $e->att('name');
+ my $type = $e->att('type') or die ("Field $fieldname does not have a valid type\n");
+
+ my $regname = 'hf_x11_'.sprintf ($varpat, $fieldname);
+ my $humanname = 'x11.'.sprintf ($humanpat, $fieldname);
+
+ my $info = getinfo($type);
+ my $ft = $info->{'type'} // 'FT_NONE';
+ my $base = $info->{'base'} // 'BASE_NONE';
+ my $vals = 'NULL';
+
+ my $enum = $e->att('enum') // $e->att('altenum');
+ if (defined $enum) {
+ my $enumname = dump_enum_values($enum_name{$enum});
+ $vals = "VALS($enumname)";
+
+ # Wireshark does not allow FT_BYTES, FT_BOOLEAN, or BASE_NONE to have an enum
+ $ft =~ s/FT_BYTES/FT_UINT8/;
+ $ft =~ s/FT_BOOLEAN/FT_UINT8/;
+ $base =~ s/BASE_NONE/BASE_DEC/;
+ }
+
+ $enum = $e->att('mask');
+ if (defined $enum) {
+ # Create subtree items:
+ defined($enum{$enum_name{$enum}}) or die("Enum $enum not found");
+
+ # Wireshark does not allow FT_BYTES or BASE_NONE to have an enum
+ $ft =~ s/FT_BYTES/FT_UINT8/;
+ $base =~ s/BASE_NONE/BASE_DEC/;
+
+ my $bitsize = $info->{'size'} * 8;
+
+ my $bit = $enum{$enum_name{$enum}}{bit};
+ for my $val (sort { $a <=> $b } keys %$bit) {
+ my $itemname = $$bit{$val};
+ my $item = $regname . '_mask_' . $itemname;
+ my $itemhuman = $humanname . '.' . $itemname;
+ my $bitshift = "1U << $val";
+
+ say $decl "static int $item = -1;";
+ say $reg "{ &$item, { \"$itemname\", \"$itemhuman\", FT_BOOLEAN, $bitsize, NULL, $bitshift, NULL, HFILL }},";
+ }
+ }
+
+ print $decl "static int $regname = -1;\n";
+ if ($e->name() eq 'list' and defined $info->{'size'} and $info->{'size'} > 1) {
+ print $reg "{ &$regname, { \"$fieldname\", \"$humanname.list\", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
+ $regname .= '_item';
+ print $decl "static int $regname = -1;\n";
+ }
+ print $reg "{ &$regname, { \"$fieldname\", \"$humanname\", $ft, $base, $vals, 0, NULL, HFILL }},\n";
+
+ if ($refref->{sumof}{$fieldname}) {
+ print $impl $indent."int sumof_$fieldname = 0;\n";
+ }
+
+ if ($e->name() eq 'field') {
+ if ($refref->{field}{$fieldname} and get_simple_info($type)) {
+ # Pre-declare variable
+ if ($ft eq 'FT_FLOAT') {
+ print $impl $indent."gfloat f_$fieldname;\n";
+ } elsif ($ft eq 'FT_DOUBLE') {
+ print $impl $indent."gdouble f_$fieldname;\n";
+ } elsif ($ft eq 'FT_INT64' or $ft eq 'FT_UINT64') {
+ print $impl $indent."gint64 f_$fieldname;\n";
+ } else {
+ print $impl $indent."int f_$fieldname;\n";
+ }
+ }
+ }
+}
+
+sub dissect_element($$$$$;$$);
+
+sub dissect_element($$$$$;$$)
+{
+ my $e = shift;
+ my $varpat = shift;
+ my $humanpat = shift;
+ my $length = shift;
+ my $refref = shift;
+ my $adjustlength = shift;
+ my $indent = shift // ' ' x 4;
+
+ given ($e->name()) {
+ when ('pad') {
+ my $bytes = $e->att('bytes');
+ my $align = $e->att('align');
+ if (defined $bytes) {
+ print $impl $indent."proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, $bytes, ENC_NA);\n";
+ print $impl $indent."*offsetp += $bytes;\n";
+ $length += $bytes;
+ } else {
+ say $impl $indent.'if (*offsetp % '.$align.') {';
+ say $impl $indent." proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, ($align - *offsetp % $align), ENC_NA);";
+ say $impl $indent." *offsetp += ($align - *offsetp % $align);";
+ say $impl $indent."}";
+ if ($length % $align != 0) {
+ $length += $align - $length % $align;
+ }
+ if ($adjustlength) {
+ say $impl $indent.'length = ((length + '.($align-1).') & ~'.($align-1).');';
+ }
+ }
+ }
+ when ('field') {
+ my $fieldname = $e->att('name');
+ my $regname = 'hf_x11_'.sprintf ($varpat, $fieldname);
+ my $type = $e->att('type');
+
+ if (get_simple_info($type)) {
+ my $info = get_simple_info($type);
+ my $size = $info->{'size'};
+ my $encoding = $info->{'encoding'};
+ my $get = $info->{'get'};
+
+ if ($e->att('enum') // $e->att('altenum')) {
+ my $fieldsize = $size * 8;
+ print $impl $indent;
+ if ($refref->{field}{$fieldname}) {
+ print $impl "f_$fieldname = ";
+ }
+ say $impl "field$fieldsize(tvb, offsetp, t, $regname, byte_order);";
+ } elsif ($e->att('mask')) {
+ if ($refref->{field}{$fieldname}) {
+ if ($get ne "tvb_get_guint8") {
+ say $impl $indent."f_$fieldname = $get(tvb, *offsetp, byte_order);";
+ } else {
+ say $impl $indent."f_$fieldname = $get(tvb, *offsetp);";
+ }
+ }
+ my $bitmask_field = $fieldname . "_bits";
+ say $impl $indent."{";
+ say $impl $indent." int* const $bitmask_field [] = {";
+ my $bit = $enum{$enum_name{$e->att('mask')}}{bit};
+ for my $val (sort { $a <=> $b } keys %$bit) {
+ my $item = $regname . '_mask_' . $$bit{$val};
+ say $impl "$indent$indent&$item,";
+ }
+ say $impl "$indent$indent" . "NULL";
+ say $impl $indent." };";
+
+ say $impl $indent." proto_tree_add_bitmask(t, tvb, *offsetp, $regname, ett_x11_rectangle, $bitmask_field, $encoding);";
+ say $impl $indent."}";
+ say $impl $indent."*offsetp += $size;";
+ } else {
+ if ($refref->{field}{$fieldname}) {
+ if ($get ne "tvb_get_guint8") {
+ say $impl $indent."f_$fieldname = $get(tvb, *offsetp, byte_order);";
+ } else {
+ say $impl $indent."f_$fieldname = $get(tvb, *offsetp);";
+ }
+ }
+ print $impl $indent."proto_tree_add_item(t, $regname, tvb, *offsetp, $size, $encoding);\n";
+ print $impl $indent."*offsetp += $size;\n";
+ }
+ $length += $size;
+ } elsif (get_struct_info($type)) {
+ # TODO: variable-lengths (when $info->{'size'} == 0 )
+ my $info = get_struct_info($type);
+ $length += $info->{'size'};
+ print $impl $indent."struct_$info->{'name'}(tvb, offsetp, t, byte_order, 1);\n";
+ } else {
+ die ("Unrecognized type: $type\n");
+ }
+ }
+ when ('list') {
+ my $fieldname = $e->att('name');
+ my $regname = 'hf_x11_'.sprintf ($varpat, $fieldname);
+ my $type = $e->att('type');
+
+ my $info = getinfo($type);
+ my $lencalc;
+ my $lentype = $e->first_child();
+ if (defined $info->{'size'}) {
+ $lencalc = "(length - $length) / $info->{'size'}";
+ } else {
+ $lencalc = "(length - $length)";
+ }
+ if (defined $lentype) {
+ given ($lentype->name()) {
+ when ('value') { $lencalc = $lentype->text(); }
+ when ('fieldref') { $lencalc = 'f_'.$lentype->text(); }
+ when ('paramref') { $lencalc = 'p_'.$lentype->text(); }
+ when ('op') { $lencalc = get_op($lentype); }
+ when (['unop','popcount']) { $lencalc = get_unop($lentype); }
+ when ('sumof') { $lencalc = 'sumof_'.$lentype->att('ref'); }
+ }
+ }
+
+ if (get_simple_info($type)) {
+ my $list = $info->{'list'};
+ my $size = $info->{'size'};
+ $regname .= ", $regname".'_item' if ($size > 1);
+
+ if ($refref->{sumof}{$fieldname}) {
+ my $get = $info->{'get'};
+ say $impl $indent."{";
+ say $impl $indent." int i;";
+ say $impl $indent." for (i = 0; i < $lencalc; i++) {";
+ if ($get ne "tvb_get_guint8") {
+ say $impl $indent." sumof_$fieldname += $get(tvb, *offsetp + i * $size, byte_order);";
+ } else {
+ say $impl $indent." sumof_$fieldname += $get(tvb, *offsetp + i * $size);";
+ }
+ say $impl $indent." }";
+ say $impl $indent."}";
+ }
+
+ print $impl $indent."$list(tvb, offsetp, t, $regname, $lencalc, byte_order);\n";
+ } elsif (get_struct_info($type)) {
+ my $si = get_struct_info($type);
+ my $prefs = "";
+ foreach my $pref (sort keys %{$si->{paramref}}) {
+ $prefs .= ", f_$pref";
+ }
+
+ print $impl $indent."struct_$info->{'name'}(tvb, offsetp, t, byte_order, $lencalc$prefs);\n";
+ } else {
+ # TODO: Fix unrecognized type. Comment out for now to generate dissector
+ # die ("Unrecognized type: $type\n");
+ }
+
+ if ($adjustlength && defined($lentype)) {
+ # Some requests end with a list of unspecified length
+ # Adjust the length field here so that the next $lencalc will be accurate
+ if (defined $info->{'size'}) {
+ say $impl $indent."length -= $lencalc * $info->{'size'};";
+ } else {
+ say $impl $indent."length -= $lencalc * 1;";
+ }
+ }
+ }
+ when ('switch') {
+ my $switchtype = $e->first_child() or die("Switch element not defined");
+
+ my $switchon = get_ref($switchtype, {});
+ my @elements = $e->children(qr/(bit)?case/);
+ for my $case (@elements) {
+ my @refs = $case->children('enumref');
+ my @test;
+ my $fieldname;
+ foreach my $ref (@refs) {
+ my $enum_ref = $ref->att('ref');
+ my $field = $ref->text();
+ $fieldname //= $field; # Use first named field
+ if ($case->name() eq 'bitcase') {
+ my $bit = $enum{$enum_name{$enum_ref}}{rbit}{$field};
+ if (! defined($bit)) {
+ for my $foo (keys %{$enum{$enum_name{$enum_ref}}{rbit}}) { say "'$foo'"; }
+ die ("Field '$field' not found in '$enum_ref'");
+ }
+ push @test , "$switchon & (1U << $bit)";
+ } else {
+ my $val = $enum{$enum_name{$enum_ref}}{rvalue}{$field};
+ if (! defined($val)) {
+ for my $foo (keys %{$enum{$enum_name{$enum_ref}}{rvalue}}) { say "'$foo'"; }
+ die ("Field '$field' not found in '$enum_ref'");
+ }
+ push @test , "$switchon == $val";
+ }
+ }
+
+ if (@test > 1) {
+ # We have more than one conditional, add parentheses to them.
+ # We don't add parentheses to all the conditionals because
+ # clang complains about the extra parens if you do "if ((x == y))".
+ my @tests_with_parens;
+ foreach my $conditional (@test) {
+ push @tests_with_parens, "($conditional)";
+ }
+
+ @test = @tests_with_parens;
+ }
+
+ my $list = join ' || ', @test;
+ say $impl $indent."if ($list) {";
+
+ my $vp = $varpat;
+ my $hp = $humanpat;
+
+ $vp =~ s/%s/${fieldname}_%s/;
+ $hp =~ s/%s/${fieldname}.%s/;
+
+ my @sub_elements = $case->children(qr/pad|field|list|switch/);
+
+ my $subref = { field => {}, sumof => {} };
+ foreach my $sub_e (@sub_elements) {
+ reference_elements($sub_e, $subref);
+ }
+ foreach my $sub_e (@sub_elements) {
+ register_element($sub_e, $vp, $hp, $subref, $indent . ' ');
+ }
+ foreach my $sub_e (@sub_elements) {
+ $length = dissect_element($sub_e, $vp, $hp, $length, $subref, $adjustlength, $indent . ' ');
+ }
+
+ say $impl $indent."}";
+ }
+ }
+ default { die "Unknown field type: $_\n"; }
+ }
+ return $length;
+}
+
+sub struct {
+ my ($t, $elt) = @_;
+ my $name = $elt->att('name');
+ my $qualname = qualname($name);
+ $type_name{$name} = $qualname;
+
+ if (defined $struct{$qualname}) {
+ $t->purge;
+ return;
+ }
+
+ my @elements = $elt->children(qr/pad|field|list|switch/);
+
+ print(" - Struct $name\n");
+
+ $name = $qualname;
+ $name =~ s/:/_/;
+
+ my %refs;
+ my %paramrefs;
+ my $size = 0;
+ my $dynamic = 0;
+ my $needi = 0;
+ # Find struct size
+ foreach my $e (@elements) {
+ my $count;
+ $count = 1;
+ given ($e->name()) {
+ when ('pad') {
+ my $bytes = $e->att('bytes');
+ my $align = $e->att('align');
+ if (defined $bytes) {
+ $size += $bytes;
+ next;
+ }
+ if (!$dynamic) {
+ if ($size % $align) {
+ $size += $align - $size % $align;
+ }
+ }
+ next;
+ }
+ when ('list') {
+ my $type = $e->att('type');
+ my $info = getinfo($type);
+
+ $needi = 1 if ($info->{'size'} == 0);
+
+ my $value = $e->first_child();
+ given($value->name()) {
+ when ('fieldref') {
+ $refs{$value->text()} = 1;
+ $count = 0;
+ $dynamic = 1;
+ }
+ when ('paramref') {
+ $paramrefs{$value->text()} = $value->att('type');
+ $count = 0;
+ $dynamic = 1;
+ }
+ when ('op') {
+ get_op($value, \%refs);
+ $count = 0;
+ $dynamic = 1;
+ }
+ when (['unop','popcount']) {
+ get_unop($value, \%refs);
+ $count = 0;
+ $dynamic = 1;
+ }
+ when ('value') {
+ $count = $value->text();
+ }
+ default { die("Invalid list size $_\n"); }
+ }
+ }
+ when ('field') { }
+ when ('switch') {
+ $dynamic = 1;
+ next;
+ }
+ default { die("unrecognized field: $_\n"); }
+ }
+
+ my $type = $e->att('type');
+ my $info = getinfo($type);
+
+ $size += $info->{'size'} * $count;
+ }
+
+ my $prefs = "";
+
+ if ($dynamic) {
+ $size = 0;
+
+ foreach my $pref (sort keys %paramrefs) {
+ $prefs .= ", int p_$pref";
+ }
+
+ print $impl <<eot
+
+static int struct_size_$name(tvbuff_t *tvb _U_, int *offsetp _U_, guint byte_order _U_$prefs)
+{
+ int size = 0;
+eot
+;
+ say $impl ' int i, off;' if ($needi);
+
+ foreach my $ref (sort keys %refs) {
+ say $impl " int f_$ref;";
+ }
+
+ foreach my $e (@elements) {
+ my $count;
+ $count = 1;
+
+ my $type = $e->att('type') // '';
+ my $info = getinfo($type);
+
+ given ($e->name()) {
+ when ('pad') {
+ my $bytes = $e->att('bytes');
+ my $align = $e->att('align');
+ if (defined $bytes) {
+ $size += $bytes;
+ } else {
+ say $impl ' size = (size + '.($align-1).') & ~'.($align-1).';';
+ }
+ }
+ when ('list') {
+ my $len = $e->first_child();
+ my $infosize = $info->{'size'};
+ my $sizemul;
+
+ given ($len->name()) {
+ when ('op') { $sizemul = get_op($len, \%refs); }
+ when (['unop','popcount']) { $sizemul = get_unop($len, \%refs); }
+ when ('fieldref') { $sizemul = 'f_'.$len->text(); }
+ when ('paramref') { $sizemul = 'p_'.$len->text(); }
+ when ('value') {
+ if ($infosize) {
+ $size += $infosize * $len->text();
+ } else {
+ $sizemul = $len->text();
+ }
+ }
+ default { die "Invalid list size: $_\n"; }
+ }
+ if (defined $sizemul) {
+ if ($infosize) {
+ say $impl " size += $sizemul * $infosize;";
+ } else {
+ say $impl " for (i = 0; i < $sizemul; i++) {";
+ say $impl " off = (*offsetp) + size + $size;";
+ say $impl " size += struct_size_$info->{name}(tvb, &off, byte_order);";
+ say $impl ' }';
+ }
+ }
+ }
+ when ('field') {
+ my $fname = $e->att('name');
+ if (defined($refs{$fname})) {
+ my $get = $info->{'get'};
+ if ($get ne "tvb_get_guint8") {
+ say $impl " f_$fname = $info->{'get'}(tvb, *offsetp + size + $size, byte_order);";
+ } else {
+ say $impl " f_$fname = $info->{'get'}(tvb, *offsetp + size + $size);";
+ }
+ }
+ $size += $info->{'size'};
+ }
+ }
+ }
+ say $impl " return size + $size;";
+ say $impl '}';
+ $size = 0; # 0 means "dynamic calcuation required"
+ }
+
+ print $decl "static int hf_x11_struct_$name = -1;\n";
+ print $reg "{ &hf_x11_struct_$name, { \"$name\", \"x11.struct.$name\", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
+
+ print $impl <<eot
+
+static void struct_$name(tvbuff_t *tvb, int *offsetp, proto_tree *root, guint byte_order _U_, int count$prefs)
+{
+ int i;
+ for (i = 0; i < count; i++) {
+ proto_item *item;
+ proto_tree *t;
+eot
+;
+
+ my $varpat = 'struct_'.$name.'_%s';
+ my $humanpat = "struct.$name.%s";
+ my $refs = { field => {}, sumof => {} };
+
+ foreach my $e (@elements) {
+ reference_elements($e, $refs);
+ }
+ foreach my $e (@elements) {
+ register_element($e, $varpat, $humanpat, $refs, " ");
+ }
+
+ $prefs = "";
+ foreach my $pref (sort keys %paramrefs) {
+ $prefs .= ", p_$pref";
+ }
+
+ my $sizecalc = $size;
+ $size or $sizecalc = "struct_size_$name(tvb, offsetp, byte_order$prefs)";
+
+ print $impl <<eot
+
+ item = proto_tree_add_item(root, hf_x11_struct_$name, tvb, *offsetp, $sizecalc, ENC_NA);
+ t = proto_item_add_subtree(item, ett_x11_rectangle);
+eot
+;
+ my $length = 0;
+ foreach my $e (@elements) {
+ $length = dissect_element($e, $varpat, $humanpat, $length, $refs, 0, " ");
+ }
+
+ print $impl " }\n}\n";
+ $struct{$qualname} = { size => $size, name => $name, paramref => \%paramrefs };
+ $t->purge;
+}
+
+sub union {
+ # TODO proper dissection
+ #
+ # Right now, the only extension to use a union is randr.
+ # for now, punt.
+ my ($t, $elt) = @_;
+ my $name = $elt->att('name');
+ my $qualname = qualname($name);
+ $type_name{$name} = $qualname;
+
+ if (defined $struct{$qualname}) {
+ $t->purge;
+ return;
+ }
+
+ my @elements = $elt->children(qr/field/);
+ my @sizes;
+
+ print(" - Union $name\n");
+
+ $name = $qualname;
+ $name =~ s/:/_/;
+
+ # Find union size
+ foreach my $e (@elements) {
+ my $type = $e->att('type');
+ my $info = getinfo($type);
+
+ $info->{'size'} > 0 or die ("Error: Union containing variable sized struct $type\n");
+ push @sizes, $info->{'size'};
+ }
+ @sizes = sort {$b <=> $a} @sizes;
+ my $size = $sizes[0];
+
+ print $decl "static int hf_x11_union_$name = -1;\n";
+ print $reg "{ &hf_x11_union_$name, { \"$name\", \"x11.union.$name\", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
+
+ print $impl <<eot
+
+static void struct_$name(tvbuff_t *tvb, int *offsetp, proto_tree *root, guint byte_order, int count)
+{
+ int i;
+ int base = *offsetp;
+ for (i = 0; i < count; i++) {
+ proto_item *item;
+ proto_tree *t;
+eot
+;
+
+ my $varpat = 'union_'.$name.'_%s';
+ my $humanpat = "union.$name.%s";
+ my $refs = { field => {}, sumof => {} };
+
+ foreach my $e (@elements) {
+ reference_elements($e, $refs);
+ }
+ foreach my $e (@elements) {
+ register_element($e, $varpat, $humanpat, $refs, " ");
+ }
+
+ print $impl <<eot
+ item = proto_tree_add_item(root, hf_x11_union_$name, tvb, base, $size, ENC_NA);
+ t = proto_item_add_subtree(item, ett_x11_rectangle);
+
+eot
+;
+
+ foreach my $e (@elements) {
+ say $impl ' *offsetp = base;';
+ dissect_element($e, $varpat, $humanpat, 0, $refs, 0, " ");
+ }
+ say $impl " base += $size;";
+ say $impl ' }';
+ say $impl ' *offsetp = base;';
+ say $impl '}';
+
+ $struct{$qualname} = { size => $size, name => $name };
+ $t->purge;
+}
+
+sub enum {
+ my ($t, $elt) = @_;
+ my $name = $elt->att('name');
+ my $fullname = $incname[0].'_'.$name;
+
+ $enum_name{$name} = $fullname;
+ $enum_name{$incname[0].':'.$name} = $fullname;
+
+ if (defined $enum{$fullname}) {
+ $t->purge;
+ return;
+ }
+
+ my @elements = $elt->children('item');
+
+ print(" - Enum $name\n");
+
+ my $value = {};
+ my $bit = {};
+ my $rvalue = {};
+ my $rbit = {};
+ $enum{$fullname} = { value => $value, bit => $bit, rbit => $rbit, rvalue => $rvalue };
+
+ my $nextvalue = 0;
+
+ foreach my $e (@elements) {
+ my $n = $e->att('name');
+ my $valtype = $e->first_child(qr/value|bit/);
+ if (defined $valtype) {
+ my $val = int($valtype->text());
+ given ($valtype->name()) {
+ when ('value') {
+ $$value{$val} = $n;
+ $$rvalue{$n} = $val;
+ $nextvalue = $val + 1;
+
+ # Ugly hack to support (temporary, hopefully) ugly
+ # hack in xinput:ChangeDeviceProperty
+ # Register certain values as bits also
+ given ($val) {
+ when (8) {
+ $$bit{'3'} = $n;
+ $$rbit{$n} = 3;
+ }
+ when (16) {
+ $$bit{'4'} = $n;
+ $$rbit{$n} = 4;
+ }
+ when (32) {
+ $$bit{'5'} = $n;
+ $$rbit{$n} = 5;
+ }
+ }
+ }
+ when ('bit') {
+ $$bit{$val} = $n;
+ $$rbit{$n} = $val;
+ }
+ }
+ } else {
+ $$value{$nextvalue} = $n;
+ $nextvalue++;
+ }
+ }
+
+ $t->purge;
+}
+
+sub request {
+ my ($t, $elt) = @_;
+ my $name = $elt->att('name');
+
+ print(" - Request $name\n");
+ $request{$elt->att('opcode')} = $name;
+
+ my $length = 4;
+ my @elements = $elt->children(qr/pad|field|list|switch/);
+
+ # Wireshark defines _U_ to mean "Unused" (compiler specific define)
+ if (!@elements) {
+ print $impl <<eot
+
+static void $header$name(tvbuff_t *tvb _U_, packet_info *pinfo _U_, int *offsetp _U_, proto_tree *t _U_, guint byte_order _U_, int length _U_)
+{
+eot
+;
+ } else {
+ print $impl <<eot
+
+static void $header$name(tvbuff_t *tvb, packet_info *pinfo _U_, int *offsetp, proto_tree *t, guint byte_order, int length _U_)
+{
+eot
+;
+ }
+ my $varpat = $header.'_'.$name.'_%s';
+ my $humanpat = "$header.$name.%s";
+ my $refs = { field => {}, sumof => {} };
+
+ foreach my $e (@elements) {
+ reference_elements($e, $refs);
+ }
+ foreach my $e (@elements) {
+ register_element($e, $varpat, $humanpat, $refs);
+ }
+
+ foreach my $e (@elements) {
+ if ($e->name() eq 'list' && $name eq 'Render' && $e->att('name') eq 'data' && -e "$mesadir/gl_API.xml") {
+ # Special case: Use mesa-generated dissector for 'data'
+ print $impl " dispatch_glx_render(tvb, pinfo, offsetp, t, byte_order, (length - $length));\n";
+ } else {
+ $length = dissect_element($e, $varpat, $humanpat, $length, $refs, 1);
+ }
+ }
+
+ say $impl '}';
+
+ my $reply = $elt->first_child('reply');
+ if ($reply) {
+ $reply{$elt->att('opcode')} = $name;
+
+ $varpat = $header.'_'.$name.'_reply_%s';
+ $humanpat = "$header.$name.reply.%s";
+
+ @elements = $reply->children(qr/pad|field|list|switch/);
+
+ # Wireshark defines _U_ to mean "Unused" (compiler specific define)
+ if (!@elements) {
+ say $impl "static void $header$name"."_Reply(tvbuff_t *tvb _U_, packet_info *pinfo, int *offsetp _U_, proto_tree *t _U_, guint byte_order _U_)\n{";
+ } else {
+ say $impl "static void $header$name"."_Reply(tvbuff_t *tvb, packet_info *pinfo, int *offsetp, proto_tree *t, guint byte_order)\n{";
+ }
+ say $impl ' int sequence_number;' if (@elements);
+
+ my $refs = { field => {}, sumof => {} };
+ foreach my $e (@elements) {
+ reference_elements($e, $refs);
+ }
+
+ say $impl ' int f_length;' if ($refs->{field}{'length'});
+ say $impl ' int length;' if ($refs->{length});
+ foreach my $e (@elements) {
+ register_element($e, $varpat, $humanpat, $refs);
+ }
+
+ say $impl '';
+ say $impl ' col_append_fstr(pinfo->cinfo, COL_INFO, "-'.$name.'");';
+ say $impl '';
+ say $impl ' REPLY(reply);';
+
+ my $first = 1;
+ my $length = 1;
+ foreach my $e (@elements) {
+ $length = dissect_element($e, $varpat, $humanpat, $length, $refs);
+ if ($first) {
+ $first = 0;
+ say $impl ' sequence_number = tvb_get_guint16(tvb, *offsetp, byte_order);';
+ say $impl ' proto_tree_add_uint_format_value(t, hf_x11_reply_sequencenumber, tvb, *offsetp, 2, sequence_number,';
+ say $impl ' "%d ('.$header.'-'.$name.')", sequence_number);';
+ say $impl ' *offsetp += 2;';
+
+ if ($refs->{field}{length}) {
+ say $impl ' f_length = tvb_get_guint32(tvb, *offsetp, byte_order);';
+ }
+ if ($refs->{length}) {
+ say $impl ' length = f_length * 4 + 32;';
+ }
+ say $impl ' proto_tree_add_item(t, hf_x11_replylength, tvb, *offsetp, 4, byte_order);';
+ say $impl ' *offsetp += 4;';
+
+ $length += 6;
+ }
+ }
+
+ say $impl '}';
+ }
+ $t->purge;
+}
+
+sub defxid(@) {
+ my $name;
+ while ($name = shift) {
+ my $qualname = qualname($name);
+ $simpletype{$qualname} = { size => 4, encoding => 'byte_order', type => 'FT_UINT32', base => 'BASE_HEX', get => 'tvb_get_guint32', list => 'listOfCard32', };
+ $type_name{$name} = $qualname;
+ }
+}
+
+sub xidtype {
+ my ($t, $elt) = @_;
+ my $name = $elt->att('name');
+
+ defxid($name);
+
+ $t->purge;
+}
+
+sub typedef {
+ my ($t, $elt) = @_;
+ my $oldname = $elt->att('oldname');
+ my $newname = $elt->att('newname');
+ my $qualname = qualname($newname);
+
+ # Duplicate the type
+ my $info = get_simple_info($oldname);
+ if ($info) {
+ $simpletype{$qualname} = $info;
+ } elsif ($info = get_struct_info($oldname)) {
+ $struct{$qualname} = $info;
+ } else {
+ die ("$oldname not found while attempting to typedef $newname\n");
+ }
+ $type_name{$newname} = $qualname;
+
+ $t->purge;
+}
+
+sub error {
+ my ($t, $elt) = @_;
+
+ my $number = $elt->att('number');
+ if ($number >= 0) {
+ my $name = $elt->att('name');
+ print $error " \"$header-$name\",\n";
+ }
+
+ $t->purge;
+}
+
+sub event {
+ my ($t, $elt) = @_;
+
+ my $number = $elt->att('number');
+ $number or return;
+
+ my $name = $elt->att('name');
+ my $xge = $elt->att('xge');
+
+ if ($xge) {
+ $genericevent{$number} = $name;
+ } else {
+ $event{$number} = $name;
+ }
+
+ my $length = 1;
+ my @elements = $elt->children(qr/pad|field|list|switch/);
+
+ # Wireshark defines _U_ to mean "Unused" (compiler specific define)
+ if (!@elements) {
+ if ($xge) {
+ print $impl <<eot
+
+static void $header$name(tvbuff_t *tvb _U_, int length _U_, int *offsetp _U_, proto_tree *t _U_, guint byte_order _U_)
+{
+ } else {
+ print $impl <<eot
+
+static void $header$name(tvbuff_t *tvb _U_, int *offsetp _U_, proto_tree *t _U_, guint byte_order _U_)
+{
+eot
+;
+ }
+ } else {
+ if ($xge) {
+ $length = 10;
+ print $impl <<eot
+
+static void $header$name(tvbuff_t *tvb, int length _U_, int *offsetp, proto_tree *t, guint byte_order)
+{
+eot
+;
+ } else {
+ print $impl <<eot
+
+static void $header$name(tvbuff_t *tvb, int *offsetp, proto_tree *t, guint byte_order)
+{
+eot
+;
+ }
+ }
+
+ my $varpat = $header.'_'.$name.'_%s';
+ my $humanpat = "$header.$name.%s";
+ my $refs = { field => {}, sumof => {} };
+
+ foreach my $e (@elements) {
+ reference_elements($e, $refs);
+ }
+ foreach my $e (@elements) {
+ register_element($e, $varpat, $humanpat, $refs);
+ }
+
+ if ($xge) {
+ say $impl " proto_tree_add_uint_format_value(t, hf_x11_minor_opcode, tvb, *offsetp, 2, $number,";
+ say $impl " \"$name ($number)\");";
+ foreach my $e (@elements) {
+ $length = dissect_element($e, $varpat, $humanpat, $length, $refs);
+ }
+ } else {
+ my $first = 1;
+ foreach my $e (@elements) {
+ $length = dissect_element($e, $varpat, $humanpat, $length, $refs);
+ if ($first) {
+ $first = 0;
+ say $impl " CARD16(event_sequencenumber);";
+ }
+ }
+ }
+
+ say $impl "}\n";
+
+ $t->purge;
+}
+
+sub include_start {
+ my ($t, $elt) = @_;
+ my $header = $elt->att('header');
+ unshift @incname, $header;
+}
+
+sub include_end {
+ shift @incname;
+}
+
+sub include
+{
+ my ($t, $elt) = @_;
+ my $include = $elt->text();
+
+ print " - Import $include\n";
+ my $xml = XML::Twig->new(
+ start_tag_handlers => {
+ 'xcb' => \&include_start,
+ },
+ twig_roots => {
+ 'import' => \&include,
+ 'struct' => \&struct,
+ 'xidtype' => \&xidtype,
+ 'xidunion' => \&xidtype,
+ 'typedef' => \&typedef,
+ 'enum' => \&enum,
+ },
+ end_tag_handlers => {
+ 'xcb' => \&include_end,
+ });
+ $xml->parsefile("$srcdir/xcbproto/src/$include.xml") or die ("Cannot open $include.xml\n");
+
+ $t->purge;
+}
+
+
+sub xcb_start {
+ my ($t, $elt) = @_;
+ $header = $elt->att('header');
+ $extname = ($elt->att('extension-name') or $header);
+ unshift @incname, $header;
+
+ print("Extension $extname\n");
+
+ undef %request;
+ undef %genericevent;
+ undef %event;
+ undef %reply;
+
+ %simpletype = ();
+ %enum_name = ();
+ %type_name = ();
+
+ print $error "const char *$header"."_errors[] = {\n";
+}
+
+sub xcb {
+ my ($t, $elt) = @_;
+
+ my $xextname = $elt->att('extension-xname');
+ my $lookup_name = $header . "_extension_minor";
+ my $error_name = $header . "_errors";
+ my $event_name = $header . "_events";
+ my $genevent_name = 'NULL';
+ my $reply_name = $header . "_replies";
+
+ print $decl "static int hf_x11_$lookup_name = -1;\n\n";
+
+ print $impl "static const value_string $lookup_name"."[] = {\n";
+ foreach my $req (sort {$a <=> $b} keys %request) {
+ print $impl " { $req, \"$request{$req}\" },\n";
+ }
+ print $impl " { 0, NULL }\n";
+ print $impl "};\n";
+
+ say $impl "const x11_event_info $event_name".'[] = {';
+ foreach my $e (sort {$a <=> $b} keys %event) {
+ say $impl " { \"$header-$event{$e}\", $header$event{$e} },";
+ }
+ say $impl ' { NULL, NULL }';
+ say $impl '};';
+
+ if (%genericevent) {
+ $genevent_name = $header.'_generic_events';
+ say $impl 'static const x11_generic_event_info '.$genevent_name.'[] = {';
+
+ for my $val (sort { $a <=> $b } keys %genericevent) {
+ say $impl sprintf(" { %3d, %s },", $val, $header.$genericevent{$val});
+ }
+ say $impl sprintf(" { %3d, NULL },", 0);
+ say $impl '};';
+ say $impl '';
+ }
+
+ print $impl "static x11_reply_info $reply_name"."[] = {\n";
+ foreach my $e (sort {$a <=> $b} keys %reply) {
+ print $impl " { $e, $header$reply{$e}_Reply },\n";
+ }
+ print $impl " { 0, NULL }\n";
+ print $impl "};\n";
+
+ print $reg "{ &hf_x11_$lookup_name, { \"extension-minor\", \"x11.extension-minor\", FT_UINT8, BASE_DEC, VALS($lookup_name), 0, \"minor opcode\", HFILL }},\n\n";
+
+ print $impl <<eot
+
+static void dispatch_$header(tvbuff_t *tvb, packet_info *pinfo, int *offsetp, proto_tree *t, guint byte_order)
+{
+ int minor, length;
+ minor = CARD8($lookup_name);
+ length = REQUEST_LENGTH();
+
+ col_append_fstr(pinfo->cinfo, COL_INFO, "-%s",
+ val_to_str(minor, $lookup_name,
+ "<Unknown opcode %d>"));
+ switch (minor) {
+eot
+ ;
+
+ foreach my $req (sort {$a <=> $b} keys %request) {
+ print $impl " case $req:\n";
+ print $impl " $header$request{$req}(tvb, pinfo, offsetp, t, byte_order, length);\n";
+ print $impl " break;\n";
+ }
+ say $impl " /* No need for a default case here, since Unknown is printed above,";
+ say $impl " and UNDECODED() is taken care of by dissect_x11_request */";
+ print $impl " }\n}\n";
+ print $impl <<eot
+
+static void register_$header(void)
+{
+ set_handler("$xextname", dispatch_$header, $error_name, $event_name, $genevent_name, $reply_name);
+}
+eot
+ ;
+
+ print $error " NULL\n};\n\n";
+
+ push @register, $header;
+}
+
+sub find_version {
+ #my $git = `which git`;
+ #chomp($git);
+ #-x $git or return 'unknown';
+
+ my $lib = shift;
+ # this will generate an error on stderr if git isn't in our $PATH
+ # but that's OK. The version is still set to 'unknown' in that case
+ # and at least the operator could see it.
+ my $ver = `git --git-dir=$lib/.git describe --tags`;
+ $ver //= 'unknown';
+ chomp $ver;
+ return $ver;
+}
+
+sub add_generated_header {
+ my ($out, $using) = @_;
+ my $ver = find_version($using);
+
+ $using = File::Spec->abs2rel ($using, $srcdir);
+
+ print $out <<eot
+/* Do not modify this file. */
+/* It was automatically generated by $script_name
+ using $using version $ver */
+eot
+ ;
+
+ # Add license text
+ print $out <<eot
+/*
+ * Copyright 2008, 2009, 2013, 2014 Open Text Corporation <pharris[AT]opentext.com>
+ *
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald[AT]wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+eot
+ ;
+}
+
+# initialize core X11 protocol
+# Do this in the Makefile now
+#system('./process-x11-fields.pl < x11-fields');
+
+# Extension implementation
+$impl = new IO::File "> $srcdir/x11-extension-implementation.h"
+ or die ("Cannot open $srcdir/x11-extension-implementation.h for writing\n");
+$error = new IO::File "> $srcdir/x11-extension-errors.h"
+ or die ("Cannot open $srcdir/x11-extension-errors.h for writing\n");
+
+add_generated_header($impl, $srcdir . '/xcbproto');
+add_generated_header($error, $srcdir . '/xcbproto');
+
+# Open the files generated by process-x11-fields.pl for appending
+$reg = new IO::File ">> $srcdir/x11-register-info.h"
+ or die ("Cannot open $srcdir/x11-register-info.h for appending\n");
+$decl = new IO::File ">> $srcdir/x11-declarations.h"
+ or die ("Cannot open $srcdir/x11-declarations.h for appending\n");
+
+print $reg "\n/* Generated by $script_name below this line */\n";
+print $decl "\n/* Generated by $script_name below this line */\n";
+
+# Mesa for glRender
+if (-e "$mesadir/gl_API.xml") {
+ $enum = new IO::File "> $srcdir/x11-glx-render-enum.h"
+ or die ("Cannot open $srcdir/x11-glx-render-enum.h for writing\n");
+ add_generated_header($enum, $srcdir . '/mesa');
+ print $enum "static const value_string mesa_enum[] = {\n";
+ print $impl '#include "x11-glx-render-enum.h"'."\n\n";
+
+ print("Mesa glRender:\n");
+ $header = "glx_render";
+
+ my $xml = XML::Twig->new(
+ start_tag_handlers => {
+ },
+ twig_roots => {
+ 'category' => \&mesa_category,
+ 'enum' => \&mesa_enum,
+ 'type' => \&mesa_type,
+ 'function' => \&mesa_function,
+ });
+ $xml->parsefile("$mesadir/gl_API.xml") or die ("Cannot open gl_API\n");
+
+ for my $enum_key ( sort {$a<=>$b} keys %mesa_enum_hash) {
+ say $enum sprintf(" { 0x%04x, \"%s\" },", $enum_key, $mesa_enum_hash{$enum_key});
+ }
+ print $enum " { 0, NULL }\n";
+ print $enum "};\n";
+ $enum->close();
+
+ print $decl "static int hf_x11_glx_render_op_name = -1;\n\n";
+
+ print $impl "static const value_string glx_render_op_name"."[] = {\n";
+ foreach my $req (sort {$a <=> $b} keys %request) {
+ print $impl " { $req, \"gl$request{$req}\" },\n";
+ }
+ print $impl " { 0, NULL }\n";
+ print $impl "};\n";
+ print $impl "static value_string_ext mesa_enum_ext = VALUE_STRING_EXT_INIT(mesa_enum);\n";
+
+ print $reg "{ &hf_x11_glx_render_op_name, { \"render op\", \"x11.glx.render.op\", FT_UINT16, BASE_DEC, VALS(glx_render_op_name), 0, NULL, HFILL }},\n\n";
+
+# Uses ett_x11_list_of_rectangle, since I am unable to see how the subtree type matters.
+ print $impl <<eot
+
+static void dispatch_glx_render(tvbuff_t *tvb, packet_info *pinfo, int *offsetp, proto_tree *t, guint byte_order, int length)
+{
+ while (length >= 4) {
+ guint32 op, len;
+ int next;
+ proto_item *ti;
+ proto_tree *tt;
+
+ len = tvb_get_guint16(tvb, *offsetp, byte_order);
+
+ op = tvb_get_guint16(tvb, *offsetp + 2, byte_order);
+ ti = proto_tree_add_uint(t, hf_x11_glx_render_op_name, tvb, *offsetp, len, op);
+
+ tt = proto_item_add_subtree(ti, ett_x11_list_of_rectangle);
+
+ ti = proto_tree_add_item(tt, hf_x11_request_length, tvb, *offsetp, 2, byte_order);
+ *offsetp += 2;
+ proto_tree_add_item(tt, hf_x11_glx_render_op_name, tvb, *offsetp, 2, byte_order);
+ *offsetp += 2;
+
+ if (len < 4) {
+ expert_add_info(pinfo, ti, &ei_x11_request_length);
+ /* Eat the rest of the packet, mark it undecoded */
+ len = length;
+ op = -1;
+ }
+ len -= 4;
+
+ next = *offsetp + len;
+
+ switch (op) {
+eot
+ ;
+ foreach my $req (sort {$a <=> $b} keys %request) {
+ print $impl " case $req:\n";
+ print $impl " mesa_$request{$req}(tvb, offsetp, tt, byte_order, len);\n";
+ print $impl " break;\n";
+ }
+ print $impl " default:\n";
+ print $impl " proto_tree_add_item(tt, hf_x11_undecoded, tvb, *offsetp, len, ENC_NA);\n";
+ print $impl " *offsetp += len;\n";
+
+ print $impl " }\n";
+ print $impl " if (*offsetp < next) {\n";
+ print $impl " proto_tree_add_item(tt, hf_x11_unused, tvb, *offsetp, next - *offsetp, ENC_NA);\n";
+ print $impl " *offsetp = next;\n";
+ print $impl " }\n";
+ print $impl " length -= (len + 4);\n";
+ print $impl " }\n}\n";
+}
+
+$enum = new IO::File "> $srcdir/x11-enum.h"
+ or die ("Cannot open $srcdir/x11-enum.h for writing\n");
+add_generated_header($enum, $srcdir . '/xcbproto');
+print $impl '#include "x11-enum.h"'."\n\n";
+
+# XCB
+foreach my $ext (@reslist) {
+ my $xml = XML::Twig->new(
+ start_tag_handlers => {
+ 'xcb' => \&xcb_start,
+ },
+ twig_roots => {
+ 'xcb' => \&xcb,
+ 'import' => \&include,
+ 'request' => \&request,
+ 'struct' => \&struct,
+ 'union' => \&union,
+ 'xidtype' => \&xidtype,
+ 'xidunion' => \&xidtype,
+ 'typedef' => \&typedef,
+ 'error' => \&error,
+ 'errorcopy' => \&error,
+ 'event' => \&event,
+ 'enum' => \&enum,
+ });
+ $xml->parsefile($ext) or die ("Cannot open $ext\n");
+}
+
+print $impl "static void register_x11_extensions(void)\n{\n";
+foreach my $reg (@register) {
+ print $impl " register_$reg();\n";
+}
+print $impl "}\n";
+
+#
+# Editor modelines
+#
+# Local Variables:
+# c-basic-offset: 4
+# tab-width: 8
+# indent-tabs-mode: nil
+# End:
+#
+# ex: set shiftwidth=4 tabstop=8 expandtab:
+# :indentSize=4:tabSize=8:noTabs=true:
+#
diff --git a/tools/radiotap-gen/CMakeLists.txt b/tools/radiotap-gen/CMakeLists.txt
new file mode 100644
index 0000000..1be395c
--- /dev/null
+++ b/tools/radiotap-gen/CMakeLists.txt
@@ -0,0 +1,8 @@
+
+if(UNIX)
+
+ add_executable( radiotap-gen radiotap-gen.c )
+
+ target_link_libraries( radiotap-gen pcap ${GLIB2_LIBRARIES} )
+
+endif()
diff --git a/tools/radiotap-gen/radiotap-gen.c b/tools/radiotap-gen/radiotap-gen.c
new file mode 100644
index 0000000..3f319ab
--- /dev/null
+++ b/tools/radiotap-gen/radiotap-gen.c
@@ -0,0 +1,182 @@
+/*
+ * A generic packet generator application for U-SIG radiotap packets.
+ *
+ * Copyright Richard Sharpe, 2022.
+ *
+ * You will need libpcap installed.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * A sample program showing how to create packets with radiotap headers. This
+ * is mainly useful for those situations where you are adding a new radiotap
+ * TLV but the drivers for the hardware is not ready yet and you need to
+ * test your radiotap dissector.
+ */
+
+#include <errno.h>
+#include <glib.h>
+#include <pcap.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+struct u_sig_hdr {
+ uint16_t type;
+ uint16_t len;
+ uint32_t common;
+ uint32_t value;
+ uint32_t mask;
+} __attribute__((packed));
+
+struct radiotap_hdr {
+ uint8_t vers;
+ uint8_t pad;
+ uint16_t len;
+ uint32_t presence_flags;
+ uint32_t MAC_timestamp[2];
+ uint8_t flags;
+ uint8_t data_rate;
+ uint16_t channel_freq;
+ uint16_t pad2;
+ uint16_t pad3;
+ struct u_sig_hdr u_sig_hdr;
+} __attribute__((packed));
+
+struct complete_pkt {
+ struct radiotap_hdr radiotap;
+ uint8_t pkt_data[26];
+} __attribute__((packed));
+
+/* Some random 802.11 packet, an S1G beacon, I think */
+uint8_t pkt_data[26] = { 0x1c, 0x0b, 0x00, 0x00, 0x02, 0x00, 0xeb, 0x4b,
+ 0x02, 0x8b, 0x12, 0x52, 0xa7, 0x6b, 0x00, 0x62,
+ 0x9c, 0x6b, 0x64, 0x4e, 0x35, 0xae, 0x05, 0x02,
+ 0x00, 0x02 };
+
+#define PHY_VERSION_ID_KNOWN 0x00000001
+#define BW_KNOWN 0x00000002
+#define UL_DL_KNOWN 0x00000004
+#define BSS_COLOR_KNOWN 0x00000008
+#define UL_DL 0x00040000
+
+/*
+ * Generate some u_sig packets.
+ */
+static void gen_u_sig_pkts(pcap_dumper_t *dumper)
+{
+ struct pcap_pkthdr hdr;
+ struct complete_pkt pkt;
+ struct timeval ts;
+ /*
+ * Create the complete packet.
+ *
+ * 1. Set up the radiotap headers we need, including the TLVs.
+ */
+ pkt.radiotap.vers = 0;
+ pkt.radiotap.pad = 0;
+ pkt.radiotap.len = sizeof(struct radiotap_hdr);
+ pkt.radiotap.presence_flags = 0x1000000F;
+ pkt.radiotap.MAC_timestamp[0] = 0x17860500;
+ pkt.radiotap.MAC_timestamp[1] = 0x22ac9b1a;
+ pkt.radiotap.flags = 0;
+ pkt.radiotap.data_rate = 0x02;
+ pkt.radiotap.channel_freq = 5600;
+ pkt.radiotap.pad2 = 0x0100;
+ pkt.radiotap.pad3 = 0x0000;
+ pkt.radiotap.u_sig_hdr.type = 33; /* The TLV we want U-SIG */
+ pkt.radiotap.u_sig_hdr.len = 12;
+
+ /* Set the BW to 80MHz for the moment */
+ pkt.radiotap.u_sig_hdr.common = PHY_VERSION_ID_KNOWN | BW_KNOWN | \
+ UL_DL_KNOWN | 0x00012000;
+ /*
+ * The bits are: U-SIG-1 B20-25: all 1s.
+ * PPDU Type and Comp mode: 0
+ * Validate: 1
+ * Punctured Channel Information: 0 (no puncturing)
+ * Validate: 1
+ * EHT SIG MCS: 0 (EHT-MCS 0)
+ */
+ pkt.radiotap.u_sig_hdr.value = 0x0000413F;
+ pkt.radiotap.u_sig_hdr.mask = 0x003fbec0; /* The Intel value */
+
+ /* Copy the packet data in */
+ memcpy(pkt.pkt_data, pkt_data, sizeof(pkt.pkt_data));
+
+ gettimeofday(&ts, NULL);
+ hdr.ts.tv_sec = ts.tv_sec;
+ hdr.ts.tv_usec = ts.tv_usec;
+ hdr.caplen = sizeof(struct complete_pkt);
+ hdr.len = sizeof(struct complete_pkt);
+
+ pcap_dump((u_char *)dumper, &hdr, (u_char *)&pkt);
+
+ /* Dump another with different 160MHz */
+ /*
+ * The bits are: U-SIG-1 B20-25: all 1s.
+ * PPDU Type and Comp mode: 0
+ * Validate: 1
+ * Punctured Channel Information: 1 ([x 1 1 1]puncturing)
+ * Validate: 1
+ * EHT SIG MCS: 1 (EHT-MCS 1)
+ */
+ pkt.radiotap.u_sig_hdr.common = PHY_VERSION_ID_KNOWN | BW_KNOWN | \
+ UL_DL_KNOWN | 0x00018000;;
+ pkt.radiotap.u_sig_hdr.mask = 0x003fbec0;
+ pkt.radiotap.u_sig_hdr.value = 0x0001183F;
+
+ /* We should probably update the timestamp */
+ pcap_dump((u_char *)dumper, &hdr, (u_char *)&pkt);
+
+ /* Dump another with different 160MHz */
+ /*
+ * The bits are: U-SIG-1 B20-25: all 1s.
+ * PPDU Type and Comp mode: 0
+ * Validate: 1
+ * Punctured Channel Information: 1 ([x 1 1 1]puncturing)
+ * Validate: 1
+ * EHT SIG MCS: 1 (EHT-MCS 1)
+ */
+ pkt.radiotap.u_sig_hdr.common = PHY_VERSION_ID_KNOWN | BW_KNOWN | \
+ UL_DL_KNOWN | UL_DL | 0x00018000;
+ pkt.radiotap.u_sig_hdr.mask = 0x003fbec0;
+ pkt.radiotap.u_sig_hdr.value = 0x0001183F;
+
+ pcap_dump((u_char *)dumper, &hdr, (u_char *)&pkt);
+}
+
+int main(int argc, char *argv[])
+{
+ int err = -1;
+ pcap_t *pd = NULL;
+ pcap_dumper_t *dumper = NULL;
+
+ if (argc < 2) {
+ printf("Usage: %s <pcap-file-name>\n", argv[0]);
+ return 1;
+ }
+
+ pd = pcap_open_dead(DLT_IEEE802_11_RADIO, 65535);
+ if (pd == NULL) {
+ fprintf(stderr, "Unable to open pcap device: %s\n",
+ g_strerror(errno));
+ return -1;
+ }
+
+ dumper = pcap_dump_open(pd, argv[1]);
+ if (dumper == NULL) {
+ fprintf(stderr, "Unable to create dump file %s: %s\n",
+ argv[1], pcap_geterr(pd));
+ goto close_pd;
+ }
+
+ /*
+ * Add calls to any functions that generate packets.
+ */
+ gen_u_sig_pkts(dumper);
+
+ pcap_dump_close(dumper);
+close_pd:
+ pcap_close(pd);
+ return err;
+}
diff --git a/tools/randpkt-test.sh b/tools/randpkt-test.sh
new file mode 100755
index 0000000..b47646d
--- /dev/null
+++ b/tools/randpkt-test.sh
@@ -0,0 +1,171 @@
+#!/bin/bash
+
+# Randpkt testing script for TShark
+#
+# This script uses Randpkt to generate capture files with randomized
+# content. It runs TShark on each generated file and checks for errors.
+# The files are processed repeatedly until an error is found.
+
+TEST_TYPE="randpkt"
+# shellcheck source=tools/test-common.sh
+. "$( dirname "$0" )"/test-common.sh || exit 1
+
+# Run under valgrind ?
+VALGRIND=0
+
+# Run under AddressSanitizer ?
+ASAN=$CONFIGURED_WITH_ASAN
+
+# Trigger an abort if a dissector finds a bug.
+# Uncomment to disable
+export WIRESHARK_ABORT_ON_DISSECTOR_BUG="True"
+
+# The maximum permitted amount of memory leaked. Eventually this should be
+# worked down to zero, but right now that would fail on every single capture.
+# Only has effect when running under valgrind.
+MAX_LEAK=$(( 1024 * 100 ))
+
+# To do: add options for file names and limits
+while getopts "ab:d:gp:t:" OPTCHAR ; do
+ case $OPTCHAR in
+ a) ASAN=1 ;;
+ b) WIRESHARK_BIN_DIR=$OPTARG ;;
+ d) TMP_DIR=$OPTARG ;;
+ g) VALGRIND=1 ;;
+ p) MAX_PASSES=$OPTARG ;;
+ t) PKT_TYPES=$OPTARG ;;
+ *) printf "Unknown option: %s\\n" "$OPTARG"
+ esac
+done
+shift $(( OPTIND - 1 ))
+
+### usually you won't have to change anything below this line ###
+
+ws_bind_exec_paths
+ws_check_exec "$TSHARK" "$RANDPKT" "$DATE" "$TMP_DIR"
+
+[[ -z "$PKT_TYPES" ]] && PKT_TYPES=$($RANDPKT -h | awk '/^\t/ {print $1}')
+
+if [ $VALGRIND -eq 1 ]; then
+ RUNNER="$( dirname "$0" )/valgrind-wireshark.sh"
+ COMMON_ARGS="-b $WIRESHARK_BIN_DIR $COMMON_ARGS"
+ declare -a RUNNER_ARGS=("" "-T")
+ # Valgrind requires more resources, so permit 1.5x memory and 3x time
+ # (1.5x time is too small for a few large captures in the menagerie)
+ MAX_CPU_TIME=$(( 3 * "$MAX_CPU_TIME" ))
+ MAX_VMEM=$(( 3 * "$MAX_VMEM" / 2 ))
+else
+ # Not using valgrind, use regular tshark.
+ # TShark arguments (you won't have to change these)
+ # n Disable network object name resolution
+ # V Print a view of the details of the packet rather than a one-line summary of the packet
+ # x Cause TShark to print a hex and ASCII dump of the packet data after printing the summary or details
+ # r Read packet data from the following infile
+ RUNNER="$TSHARK"
+ declare -a RUNNER_ARGS=("-nVxr" "-nr")
+fi
+RANDPKT_ARGS="-b 2000 -c 5000"
+
+if [ $ASAN -ne 0 ]; then
+ echo -n "ASan enabled. Virtual memory limit is "
+ ulimit -v
+else
+ echo "ASan disabled. Virtual memory limit is $MAX_VMEM"
+fi
+
+HOWMANY="forever"
+if [ "$MAX_PASSES" -gt 0 ]; then
+ HOWMANY="$MAX_PASSES passes"
+fi
+echo -n "Running $RUNNER with args: "
+printf "\"%s\" " "${RUNNER_ARGS[@]}"
+echo "($HOWMANY)"
+echo "Running $RANDPKT with args: $RANDPKT_ARGS"
+echo ""
+
+# Clean up on <ctrl>C, etc
+trap_all() {
+ printf '\n\nCaught signal. Exiting.\n'
+ rm -f "$TMP_DIR/$TMP_FILE" "$TMP_DIR/$ERR_FILE"
+ exit 0
+}
+
+trap trap_all HUP INT TERM ABRT
+
+# Iterate over our capture files.
+PASS=0
+while [ $PASS -lt "$MAX_PASSES" ] || [ "$MAX_PASSES" -lt 1 ] ; do
+ PASS=$(( PASS + 1 ))
+ echo "Pass $PASS:"
+
+ for PKT_TYPE in $PKT_TYPES ; do
+ if [ $PASS -gt "$MAX_PASSES" ] && [ "$MAX_PASSES" -ge 1 ] ; then
+ break # We caught a signal
+ fi
+ echo -n " $PKT_TYPE: "
+
+ DISSECTOR_BUG=0
+ VG_ERR_CNT=0
+
+ # shellcheck disable=SC2086
+ "$RANDPKT" $RANDPKT_ARGS -t "$PKT_TYPE" "$TMP_DIR/$TMP_FILE" \
+ > /dev/null 2>&1
+
+ for ARGS in "${RUNNER_ARGS[@]}" ; do
+ echo -n "($ARGS) "
+ echo -e "Command and args: $RUNNER $ARGS\\n" > "$TMP_DIR/$ERR_FILE"
+
+ # Run in a child process with limits.
+ (
+ # Set some limits to the child processes, e.g. stop it if
+ # it's running longer than MAX_CPU_TIME seconds. (ulimit
+ # is not supported well on cygwin - it shows some warnings -
+ # and the features we use may not all be supported on some
+ # UN*X platforms.)
+ ulimit -S -t $MAX_CPU_TIME -s $MAX_STACK
+
+ # Allow core files to be generated
+ ulimit -c unlimited
+
+ # Don't enable ulimit -v when using ASAN. See
+ # https://github.com/google/sanitizers/wiki/AddressSanitizer#ulimit--v
+ if [ $ASAN -eq 0 ]; then
+ ulimit -S -v $MAX_VMEM
+ fi
+
+ # shellcheck disable=SC2086
+ "$RUNNER" $ARGS "$TMP_DIR/$TMP_FILE" \
+ > /dev/null 2>> "$TMP_DIR/$ERR_FILE"
+ )
+ RETVAL=$?
+
+ if [ $VALGRIND -eq 1 ]; then
+ VG_ERR_CNT=$( grep "ERROR SUMMARY:" "$TMP_DIR/$ERR_FILE" | cut -f4 -d' ' )
+ VG_DEF_LEAKED=$( grep "definitely lost:" "$TMP_DIR/$ERR_FILE" | cut -f7 -d' ' | tr -d , )
+ VG_IND_LEAKED=$( grep "indirectly lost:" "$TMP_DIR/$ERR_FILE" | cut -f7 -d' ' | tr -d , )
+ VG_TOTAL_LEAKED=$(( "$VG_DEF_LEAKED" + "$VG_IND_LEAKED" ))
+ if [ $RETVAL -ne 0 ] ; then
+ echo "General Valgrind failure."
+ VG_ERR_CNT=1
+ elif [ "$VG_TOTAL_LEAKED" -gt "$MAX_LEAK" ] ; then
+ echo "Definitely + indirectly ($VG_DEF_LEAKED + $VG_IND_LEAKED) exceeds max ($MAX_LEAK)."
+ echo "Definitely + indirectly ($VG_DEF_LEAKED + $VG_IND_LEAKED) exceeds max ($MAX_LEAK)." >> "$TMP_DIR/$ERR_FILE"
+ VG_ERR_CNT=1
+ fi
+ if grep -q "Valgrind cannot continue" "$TMP_DIR/$ERR_FILE" ; then
+ echo "Valgrind unable to continue."
+ VG_ERR_CNT=-1
+ fi
+ fi
+ if [ $RETVAL -ne 0 ] ; then break ; fi
+ done
+ grep -i "dissector bug" "$TMP_DIR/$ERR_FILE" \
+ > /dev/null 2>&1 && DISSECTOR_BUG=1
+
+ if [ $RETVAL -ne 0 ] || [ $DISSECTOR_BUG -ne 0 ] || [ $VG_ERR_CNT -ne 0 ] ; then
+ ws_exit_error
+ fi
+ echo " OK"
+ rm -f "$TMP_DIR/$TMP_FILE" "$TMP_DIR/$ERR_FILE"
+ done
+done
diff --git a/tools/rdps.py b/tools/rdps.py
new file mode 100755
index 0000000..baf9b2d
--- /dev/null
+++ b/tools/rdps.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+#
+# rdps.py
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+'''\
+takes the file listed as the first argument and creates the file listed
+as the second argument. It takes a PostScript file and creates a C source
+with 2 functions:
+ print_ps_preamble()
+ print_ps_finale()
+
+Ported to Python from rdps.c.
+'''
+
+import sys
+import os.path
+
+
+def ps_clean_string(raw_str):
+ ps_str = ''
+ for c in raw_str:
+ if c == '\\':
+ ps_str += '\\\\'
+ elif c == '\n':
+ ps_str += '\\n'
+ else:
+ ps_str += c
+ return ps_str
+
+
+def start_code(fd, name):
+ fd.write("static const char ps_%s[] =\n" % name)
+
+
+def write_code(fd, raw_str):
+ ps_str = ps_clean_string(raw_str)
+ fd.write("\t\"%s\"\n" % ps_str)
+
+
+def end_code(fd, name):
+ fd.write(";\n")
+ fd.write("\n")
+ fd.write("void print_ps_%s(FILE *fd) {\n" % name)
+ fd.write("\tfwrite(ps_%s, sizeof ps_%s - 1, 1, fd);\n" % ( name, name ) )
+ fd.write("}\n\n\n")
+
+
+def exit_err(msg=None, *param):
+ if msg is not None:
+ sys.stderr.write(msg % param)
+ sys.exit(1)
+
+
+# Globals
+STATE_NULL = 'null'
+STATE_PREAMBLE = 'preamble'
+STATE_FINALE = 'finale'
+
+
+def main():
+ state = STATE_NULL
+
+ if len(sys.argv) != 3:
+ exit_err("%s: input_file output_file\n", __file__)
+
+ input = open(sys.argv[1], 'r')
+ output = open(sys.argv[2], 'w')
+
+ script_name = os.path.split(__file__)[-1]
+
+ output.write('''\
+/* DO NOT EDIT
+ *
+ * Created by %s.
+ *
+ * ps.c
+ * Definitions for generating PostScript(R) packet output.
+ *
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald@wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <stdio.h>
+
+#include "ps.h"
+
+''' % script_name)
+
+ for line in input:
+ #line = line.rstrip()
+ if state == STATE_NULL:
+ if line.startswith("% ---- wireshark preamble start ---- %"):
+ state = STATE_PREAMBLE
+ start_code(output, "preamble")
+ continue
+ elif line.startswith("% ---- wireshark finale start ---- %"):
+ state = STATE_FINALE
+ start_code(output, "finale")
+ continue
+ elif state == STATE_PREAMBLE:
+ if line.startswith("% ---- wireshark preamble end ---- %"):
+ state = STATE_NULL
+ end_code(output, "preamble")
+ continue
+ else:
+ write_code(output, line)
+ elif state == STATE_FINALE:
+ if line.startswith("% ---- wireshark finale end ---- %"):
+ state = STATE_NULL
+ end_code(output, "finale")
+ continue
+ else:
+ write_code(output, line)
+ else:
+ exit_err("NO MATCH:%s", line)
+
+ sys.exit(0)
+
+if __name__ == "__main__":
+ main()
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 4
+# indent-tabs-mode: nil
+# End:
+#
+# vi: set shiftwidth=4 expandtab:
+# :indentSize=4:noTabs=true:
+#
diff --git a/tools/release-update-debian-soversions.sh b/tools/release-update-debian-soversions.sh
new file mode 100755
index 0000000..e788d71
--- /dev/null
+++ b/tools/release-update-debian-soversions.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+#
+# Compare ABIs of two Wireshark working copies
+#
+# Copyright 2017 Balint Reczey <balint.reczey@canonical.com>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# Set shared library package names and library versions in Debian packaging
+# matching the new major release's so versions
+
+set -e
+
+for i in codecs wireshark wiretap wsutil; do
+ NEW_VERSION=$(grep SOVERSION "$(grep -l lib${i} ./*/CMakeLists.txt)" | sed 's/.*SOVERSION \([0-9]*\).*/\1/')
+ rename "s/0\\./${NEW_VERSION}./" packaging/debian/lib${i}0.*
+ grep -l -R "lib${i}0" packaging/debian/ | xargs sed -i "s/lib${i}0/lib${i}${NEW_VERSION}/"
+ grep -l -R "lib${i}\\.so\\.0" packaging/debian/ | xargs sed -i "s/lib${i}\\.so\\.0/lib${i}.so.${NEW_VERSION}/"
+done
diff --git a/tools/rpm-setup.sh b/tools/rpm-setup.sh
new file mode 100755
index 0000000..23f0674
--- /dev/null
+++ b/tools/rpm-setup.sh
@@ -0,0 +1,358 @@
+#!/bin/bash
+# Setup development environment for RPM based systems such as Red Hat, Centos, Fedora, openSUSE
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# We drag in tools that might not be needed by all users; it's easier
+# that way.
+#
+
+set -e -u -o pipefail
+
+function print_usage() {
+ printf "\nUtility to setup a rpm-based system for Wireshark Development.\n"
+ printf "The basic usage installs the needed software\n\n"
+ printf "Usage: $0 [--install-optional] [...other options...]\n"
+ printf "\t--install-optional: install optional software as well\n"
+ printf "\t--install-rpm-deps: install packages required to build the .rpm file\n"
+ printf "\\t--install-qt5-deps: force installation of packages required to use Qt5\\n"
+ printf "\\t--install-qt6-deps: force installation of packages required to use Qt6\\n"
+ printf "\\t--install-all: install everything\\n"
+ printf "\t[other]: other options are passed as-is to the package manager\n"
+}
+
+ADDITIONAL=0
+RPMDEPS=0
+ADD_QT5=0
+ADD_QT6=0
+HAVE_ADD_QT=0
+OPTIONS=
+for arg; do
+ case $arg in
+ --help|-h)
+ print_usage
+ exit 0
+ ;;
+ --install-optional)
+ ADDITIONAL=1
+ ;;
+ --install-rpm-deps)
+ RPMDEPS=1
+ ;;
+ --install-qt5-deps)
+ ADD_QT5=1
+ HAVE_ADD_QT=1
+ ;;
+ --install-qt6-deps)
+ ADD_QT6=1
+ HAVE_ADD_QT=1
+ ;;
+ --install-all)
+ ADDITIONAL=1
+ RPMDEPS=1
+ ADD_QT5=1
+ ADD_QT6=1
+ HAVE_ADD_QT=1
+ ;;
+ *)
+ OPTIONS="$OPTIONS $arg"
+ ;;
+ esac
+done
+
+# Check if the user is root
+if [ $(id -u) -ne 0 ]
+then
+ echo "You must be root."
+ exit 1
+fi
+
+BASIC_LIST="cmake \
+ gcc \
+ gcc-c++ \
+ flex \
+ python3 \
+ desktop-file-utils \
+ git \
+ glib2-devel \
+ libpcap-devel \
+ pcre2-devel \
+ zlib-devel \
+ libgcrypt-devel"
+
+ADDITIONAL_LIST="libcap-devel \
+ libssh-devel \
+ krb5-devel \
+ perl-Parse-Yapp \
+ snappy-devel \
+ minizip-devel \
+ lz4 \
+ libxml2-devel \
+ perl \
+ spandsp-devel \
+ systemd-devel \
+ python3-pytest \
+ python3-pytest-xdist"
+
+# Uncomment to add PNG compression utilities used by compress-pngs:
+# ADDITIONAL_LIST="$ADDITIONAL_LIST \
+# advancecomp \
+# optipng \
+# oxipng \
+# pngcrush"
+
+# XXX
+RPMDEPS_LIST="rpm-build"
+
+# Guess which package manager we will use
+for PM in zypper dnf yum ''; do
+ if type "$PM" >/dev/null 2>&1; then
+ break
+ fi
+done
+
+if [ -z $PM ]
+then
+ echo "No package managers found, exiting"
+ exit 1
+fi
+
+PM_OPT=
+case $PM in
+ zypper)
+ PM_OPT="--non-interactive"
+ PM_SEARCH="search -x --provides"
+ ;;
+ dnf)
+ PM_SEARCH="info"
+ ;;
+ yum)
+ PM_SEARCH="info"
+ ;;
+esac
+
+echo "Using $PM ($PM_SEARCH)"
+
+# Adds package $2 to list variable $1 if the package is found
+add_package() {
+ local list="$1" pkgname="$2"
+
+ # fail if the package is not known
+ $PM $PM_SEARCH "$pkgname" &> /dev/null || return 1
+
+ # package is found, append it to list
+ eval "${list}=\"\${${list}} \${pkgname}\""
+}
+
+# Adds packages $2-$n to list variable $1 if all the packages are found
+add_packages() {
+ local list="$1" pkgnames="${@:2}"
+
+ # fail if any package is not known
+ for pkgname in $pkgnames; do
+ $PM $PM_SEARCH "$pkgname" &> /dev/null || return 1
+ done
+
+ # all packages are found, append it to list
+ eval "${list}=\"\${${list}} \${pkgnames}\""
+}
+
+add_package BASIC_LIST glib2 || add_package BASIC_LIST libglib-2_0-0 ||
+echo "Required package glib2|libglib-2_0-0 is unavailable" >&2
+
+# lua51, lua51-devel: OpenSUSE Leap 42.3 (lua would be fine too, as it installs lua52), OpenSUSE Leap 15.0 (lua installs lua53, so it wouldn't work)
+# compat-lua, compat-lua-devel: Fedora 28, Fedora 29, CentOS 8
+# lua, lua-devel: CentOS 7
+add_package BASIC_LIST lua51-devel || add_package BASIC_LIST compat-lua-devel || add_package BASIC_LIST lua-devel ||
+echo "Required package lua51-devel|compat-lua-devel|lua-devel is unavailable" >&2
+
+add_package BASIC_LIST lua51 || add_package BASIC_LIST compat-lua || add_package BASIC_LIST lua ||
+echo "Required package lua51|compat-lua|lua is unavailable" >&2
+
+add_package BASIC_LIST libpcap || add_package BASIC_LIST libpcap1 ||
+echo "Required package libpcap|libpcap1 is unavailable" >&2
+
+add_package BASIC_LIST zlib || add_package BASIC_LIST libz1 ||
+echo "Required package zlib|libz1 is unavailable" >&2
+
+add_package BASIC_LIST c-ares-devel || add_package BASIC_LIST libcares-devel ||
+echo "Required package c-ares-devel|libcares-devel is unavailable" >&2
+
+add_package BASIC_LIST speexdsp-devel || add_package BASIC_LIST speex-devel ||
+echo "Required package speexdsp-devel|speex-devel is unavailable" >&2
+
+if [ $HAVE_ADD_QT -eq 0 ]
+then
+ # Try to select Qt version from distro
+ test -e /etc/os-release && os_release='/etc/os-release' || os_release='/usr/lib/os-release'
+ # shellcheck disable=SC1090
+ . "${os_release}"
+
+ # Fedora 35 or later
+ if [ "${ID:-linux}" = "fedora" ] && [ "${VERSION_ID:-0}" -ge "35" ]; then
+ echo "Installing Qt6."
+ ADD_QT6=1
+ else
+ echo "Installing Qt5."
+ ADD_QT5=1
+ fi
+fi
+
+if [ $ADD_QT5 -ne 0 ]
+then
+ # qt5-linguist: CentOS, Fedora
+ # libqt5-linguist-devel: OpenSUSE
+ add_package BASIC_LIST qt5-linguist ||
+ add_package BASIC_LIST libqt5-linguist-devel ||
+ echo "Required package qt5-linguist|libqt5-linguist-devel is unavailable" >&2
+
+ # qt5-qtmultimedia: CentOS, Fedora, pulls in qt5-qtbase-devel (big dependency list!)
+ # libqt5-qtmultimedia-devel: OpenSUSE, pulls in Core, Gui, Multimedia, Network, Widgets
+ # OpenSUSE additionally has a separate Qt5PrintSupport package.
+ add_package BASIC_LIST qt5-qtmultimedia-devel ||
+ add_packages BASIC_LIST libqt5-qtmultimedia-devel libQt5PrintSupport-devel ||
+ echo "Required Qt5 Mutlimedia and/or Qt5 Print Support is unavailable" >&2
+
+ # This in only required on OpenSUSE
+ add_package BASIC_LIST libqt5-qtsvg-devel ||
+ echo "Required OpenSUSE package libqt5-qtsvg-devel is unavailable. Not required for other distributions." >&2
+
+ # This in only required on OpenSUSE
+ add_package BASIC_LIST libQt5Concurrent-devel ||
+ echo "Required OpenSUSE package libQt5Concurrent-devel is unavailable. Not required for other distributions." >&2
+
+ add_package ADDITIONAL_LIST qt5-qtimageformats ||
+ add_package ADDITIONAL_LIST libqt5-qtimageformats ||
+ echo "Optional Qt5 Image Formats is unavailable" >&2
+fi
+
+if [ $ADD_QT6 -ne 0 ]
+then
+ # Fedora Qt6 packages required from a minimal installation
+ QT6_LIST=(qt6-qtbase-devel
+ qt6-qttools-devel
+ qt6-qt5compat-devel
+ qt6-qtmultimedia-devel
+ libxkbcommon-devel)
+
+ for pkg in ${QT6_LIST[@]}
+ do
+ add_package BASIC_LIST "$pkg" ||
+ echo "Qt6 dependency $pkg is unavailable" >&2
+ done
+
+ add_package ADDITIONAL_LIST qt6-qtimageformats ||
+ echo "Optional Qt6 Image Formats is unavailable" >&2
+fi
+
+# This in only required on OpenSUSE
+add_packages BASIC_LIST hicolor-icon-theme xdg-utils ||
+echo "Required OpenSUSE packages hicolor-icon-theme and xdg-utils are unavailable. Not required for other distirbutions." >&2
+
+# This in only required (and available) on OpenSUSE
+add_package BASIC_LIST update-desktop-files ||
+echo "Required OpenSUSE package update-desktop-files is unavailable. Not required for other distributions." >&2
+
+# rubygem-asciidoctor.noarch: Centos, Fedora
+# (Added to RHEL/Centos 8: https://bugzilla.redhat.com/show_bug.cgi?id=1820896 )
+# ruby2.5-rubygem-asciidoctor: openSUSE 15.2
+add_package RPMDEPS_LIST rubygem-asciidoctor.noarch || add_package RPMDEPS_LIST ruby2.5-rubygem-asciidoctor ||
+echo "RPM dependency asciidoctor is unavailable" >&2
+
+# libcap: CentOS 7, Fedora 28, Fedora 29
+# libcap2: OpenSUSE Leap 42.3, OpenSUSE Leap 15.0
+add_package ADDITIONAL_LIST libcap || add_package ADDITIONAL_LIST libcap2 ||
+echo "Optional package libcap|libcap2 is unavailable" >&2
+
+add_package ADDITIONAL_LIST nghttp2-devel || add_package ADDITIONAL_LIST libnghttp2-devel ||
+echo "Optional package nghttp2-devel|libnghttp2-devel is unavailable" >&2
+
+add_package ADDITIONAL_LIST snappy || add_package ADDITIONAL_LIST libsnappy1 ||
+echo "Optional package snappy|libsnappy1 is unavailable" >&2
+
+add_package ADDITIONAL_LIST libzstd-devel || echo "Optional package lbzstd-devel is unavailable" >&2
+
+add_package ADDITIONAL_LIST lz4-devel || add_package ADDITIONAL_LIST liblz4-devel ||
+echo "Optional package lz4-devel|liblz4-devel is unavailable" >&2
+
+add_package ADDITIONAL_LIST libcap-progs || echo "Optional package libcap-progs is unavailable" >&2
+
+add_package ADDITIONAL_LIST libmaxminddb-devel ||
+echo "Optional package libmaxminddb-devel is unavailable" >&2
+
+add_package ADDITIONAL_LIST gnutls-devel || add_package ADDITIONAL_LIST libgnutls-devel ||
+echo "Optional package gnutls-devel|libgnutls-devel is unavailable" >&2
+
+add_package ADDITIONAL_LIST gettext-devel || add_package ADDITIONAL_LIST gettext-tools ||
+echo "Optional package gettext-devel|gettext-tools is unavailable" >&2
+
+add_package ADDITIONAL_LIST ninja || add_package ADDITIONAL_LIST ninja-build ||
+echo "Optional package ninja|ninja-build is unavailable" >&2
+
+add_package ADDITIONAL_LIST libxslt || add_package ADDITIONAL_LIST libxslt1 ||
+echo "Optional package libxslt|libxslt1 is unavailable" >&2
+
+add_package ADDITIONAL_LIST docbook-style-xsl || add_package ADDITIONAL_LIST docbook-xsl-stylesheets ||
+echo "Optional package docbook-style-xsl|docbook-xsl-stylesheets is unavailable" >&2
+
+add_package ADDITIONAL_LIST brotli-devel || add_packages ADDITIONAL_LIST libbrotli-devel libbrotlidec1 ||
+echo "Optional packages brotli-devel|libbrotli-devel is unavailable" >&2
+
+add_package ADDITIONAL_LIST libnl3-devel || add_package ADDITIONAL_LIST libnl-devel ||
+echo "Optional package libnl3-devel|libnl-devel are unavailable" >&2
+
+add_package ADDITIONAL_LIST ilbc-devel ||
+echo "Optional package ilbc-devel is unavailable" >&2
+
+# opus-devel: RHEL/CentOS, Fedora
+# libopus-devel: OpenSUSE
+add_package ADDITIONAL_LIST opus-devel || add_package ADDITIONAL_LIST libopus-devel ||
+echo "Optional package opus-devel|libopus-devel is unavailable" >&2
+
+add_package ADDITIONAL_LIST bcg729-devel ||
+echo "Optional package bcg729-devel is unavailable" >&2
+
+# RHEL 8 / CentOS 8 are missing the -devel packages for sbc and libsmi due to
+# RH deciding not to ship all -devel packages.
+# https://wiki.centos.org/FAQ/CentOS8/UnshippedPackages
+# There are CentOS bugs filed to add them to the Devel repository and eventually
+# RHEL 8 CRB / CentOS PowerTools, but make them optional for now.
+# https://bugs.centos.org/view.php?id=16504
+# https://bugs.centos.org/view.php?id=17824
+add_package ADDITIONAL_LIST sbc-devel ||
+echo "Optional package sbc-devel is unavailable"
+
+add_package ADDITIONAL_LIST libsmi-devel ||
+echo "Optional package libsmi-devel is unavailable"
+
+add_package ADDITIONAL_LIST opencore-amr-devel ||
+echo "Optional package opencore-amr-devel is unavailable" >&2
+
+ACTUAL_LIST=$BASIC_LIST
+
+# Now arrange for optional support libraries
+if [ $ADDITIONAL -ne 0 ]
+then
+ ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST"
+fi
+
+if [ $RPMDEPS -ne 0 ]
+then
+ ACTUAL_LIST="$ACTUAL_LIST $RPMDEPS_LIST"
+fi
+
+$PM $PM_OPT install $ACTUAL_LIST $OPTIONS
+
+if [ $ADDITIONAL -eq 0 ]
+then
+ echo -e "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n"
+fi
+
+if [ $RPMDEPS -eq 0 ]
+then
+ printf "\n*** RPM packages build deps not installed. Rerun with --install-rpm-deps to have them.\n"
+fi
diff --git a/tools/sharkd_shell.py b/tools/sharkd_shell.py
new file mode 100755
index 0000000..144a101
--- /dev/null
+++ b/tools/sharkd_shell.py
@@ -0,0 +1,311 @@
+#!/usr/bin/env python3
+# Convenience shell for using sharkd, including history and tab completion.
+#
+# Copyright (c) 2019 Peter Wu <peter@lekensteyn.nl>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+import argparse
+import contextlib
+import glob
+import json
+import logging
+import os
+import readline
+import selectors
+import signal
+import subprocess
+import sys
+
+_logger = logging.getLogger(__name__)
+
+# grep -Po 'tok_req, "\K\w+' sharkd_session.c
+all_commands = """
+load
+status
+analyse
+info
+check
+complete
+frames
+tap
+follow
+iograph
+intervals
+frame
+setcomment
+setconf
+dumpconf
+download
+bye
+""".split()
+all_commands += """
+!pretty
+!histfile
+!debug
+""".split()
+
+
+class SharkdShell:
+ def __init__(self, pretty, history_file):
+ self.pretty = pretty
+ self.history_file = history_file
+
+ def ignore_sigint(self):
+ # Avoid terminating the sharkd child when ^C in the shell.
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+ def sharkd_process(self):
+ sharkd = 'sharkd'
+ env = os.environ.copy()
+ # Avoid loading user preferences which may trigger deprecation warnings.
+ env['WIRESHARK_CONFIG_DIR'] = '/nonexistent'
+ proc = subprocess.Popen([sharkd, '-'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=env,
+ preexec_fn=self.ignore_sigint)
+ banner = proc.stderr.read1().decode('utf8')
+ if banner.strip() != 'Hello in child.':
+ _logger.warning('Unexpected banner: %r', banner)
+ return proc
+
+ def completer(self, text, state):
+ if state == 0:
+ origline = readline.get_line_buffer()
+ line = origline.lstrip()
+ skipped = len(origline) - len(line)
+ startpos = readline.get_begidx() - skipped
+ curpos = readline.get_endidx() - skipped
+ # _logger.debug('Completing: head=%r cur=%r tail=%r',
+ # line[:startpos], line[startpos:curpos], line[curpos:])
+ completions = []
+ if startpos == 0:
+ completions = all_commands
+ elif line[:1] == '!':
+ cmd = line[1:startpos].strip()
+ if cmd == 'pretty':
+ completions = ['jq', 'indent', 'off']
+ elif cmd == 'histfile':
+ # spaces in paths are not supported for now.
+ completions = glob.glob(glob.escape(text) + '*')
+ elif cmd == 'debug':
+ completions = ['on', 'off']
+ completions = [x for x in completions if x.startswith(text)]
+ if len(completions) == 1:
+ completions = [completions[0] + ' ']
+ self.completions = completions
+ try:
+ return self.completions[state]
+ except IndexError:
+ return None
+
+ def wrap_exceptions(self, fn):
+ # For debugging, any exception in the completion function is usually
+ # silently ignored by readline.
+ def wrapper(*args):
+ try:
+ return fn(*args)
+ except Exception as e:
+ _logger.exception(e)
+ raise
+ return wrapper
+
+ def add_history(self, line):
+ # Emulate HISTCONTROL=ignorespace to avoid adding to history.
+ if line.startswith(' '):
+ return
+ # Emulate HISTCONTROL=ignoredups to avoid duplicate history entries.
+ nitems = readline.get_current_history_length()
+ lastline = readline.get_history_item(nitems)
+ if lastline != line:
+ readline.add_history(line)
+
+ def parse_command(self, cmd):
+ '''Converts a user-supplied command to a sharkd one.'''
+ # Support 'foo {...}' as alias for '{"req": "foo", ...}'
+ if cmd[0].isalpha():
+ if ' ' in cmd:
+ req, cmd = cmd.split(' ', 1)
+ else:
+ req, cmd = cmd, '{}'
+ elif cmd[0] == '!':
+ return self.parse_special_command(cmd[1:])
+ else:
+ req = None
+ try:
+ c = json.loads(cmd)
+ if req is not None:
+ c['req'] = req
+ except json.JSONDecodeError as e:
+ _logger.error('Invalid command: %s', e)
+ return
+ if type(c) != dict or not 'req' in c:
+ _logger.error('Missing req key in request')
+ return
+ return c
+
+ def parse_special_command(self, cmd):
+ args = cmd.split()
+ if not args:
+ _logger.warning('Missing command')
+ return
+ if args[0] == 'pretty':
+ choices = ['jq', 'indent']
+ if len(args) >= 2:
+ self.pretty = args[1] if args[1] in choices else None
+ print('Pretty printing is now', self.pretty or 'disabled')
+ elif args[0] == 'histfile':
+ if len(args) >= 2:
+ self.history_file = args[1] if args[1] != 'off' else None
+ print('History is now', self.history_file or 'disabled')
+ elif args[0] == 'debug':
+ if len(args) >= 2 and args[1] in ('on', 'off'):
+ _logger.setLevel(
+ logging.DEBUG if args[1] == 'on' else logging.INFO)
+ print('Debug logging is now',
+ ['off', 'on'][_logger.level == logging.DEBUG])
+ else:
+ _logger.warning('Unsupported command %r', args[0])
+
+ @contextlib.contextmanager
+ def wrap_history(self):
+ '''Loads history at startup and saves history on exit.'''
+ readline.set_auto_history(False)
+ try:
+ if self.history_file:
+ readline.read_history_file(self.history_file)
+ h_len = readline.get_current_history_length()
+ except FileNotFoundError:
+ h_len = 0
+ try:
+ yield
+ finally:
+ new_items = readline.get_current_history_length() - h_len
+ if new_items > 0 and self.history_file:
+ open(self.history_file, 'a').close()
+ readline.append_history_file(new_items, self.history_file)
+
+ def shell_prompt(self):
+ '''Sets up the interactive prompt.'''
+ readline.parse_and_bind("tab: complete")
+ readline.set_completer(self.wrap_exceptions(self.completer))
+ readline.set_completer_delims(' ')
+ return self.wrap_history()
+
+ def read_command(self):
+ while True:
+ try:
+ origline = input('# ')
+ except EOFError:
+ raise
+ except KeyboardInterrupt:
+ print('^C', file=sys.stderr)
+ continue
+ cmd = origline.strip()
+ if not cmd:
+ return
+ self.add_history(origline)
+ c = self.parse_command(cmd)
+ if c:
+ return json.dumps(c)
+
+ def want_input(self):
+ '''Request the prompt to be displayed.'''
+ os.write(self.user_input_wr, b'x')
+
+ def main_loop(self):
+ sel = selectors.DefaultSelector()
+ user_input_rd, self.user_input_wr = os.pipe()
+ self.want_input()
+ with self.sharkd_process() as proc, self.shell_prompt():
+ self.process = proc
+ sel.register(proc.stdout, selectors.EVENT_READ, self.handle_stdout)
+ sel.register(proc.stderr, selectors.EVENT_READ, self.handle_stderr)
+ sel.register(user_input_rd, selectors.EVENT_READ, self.handle_user)
+ interrupts = 0
+ while True:
+ try:
+ events = sel.select()
+ _logger.debug('got events: %r', events)
+ if not events:
+ break
+ for key, mask in events:
+ key.data(key)
+ interrupts = 0
+ except KeyboardInterrupt:
+ print('Interrupt again to abort immediately.', file=sys.stderr)
+ interrupts += 1
+ if interrupts >= 2:
+ break
+ if self.want_command:
+ self.ask_for_command_and_run_it()
+ # Process died? Stop the shell.
+ if proc.poll() is not None:
+ break
+
+ def handle_user(self, key):
+ '''Received a notification that another prompt can be displayed.'''
+ os.read(key.fileobj, 4096)
+ self.want_command = True
+
+ def ask_for_command_and_run_it(self):
+ cmd = self.read_command()
+ if not cmd:
+ # Give a chance for the event loop to run again.
+ self.want_input()
+ return
+ self.want_command = False
+ _logger.debug('Running: %r', cmd)
+ self.process.stdin.write((cmd + '\n').encode('utf8'))
+ self.process.stdin.flush()
+
+ def handle_stdout(self, key):
+ resp = key.fileobj.readline().decode('utf8')
+ _logger.debug('Response: %r', resp)
+ if not resp:
+ raise EOFError
+ self.want_input()
+ resp = resp.strip()
+ if resp:
+ try:
+ if self.pretty == 'jq':
+ subprocess.run(['jq', '.'], input=resp,
+ universal_newlines=True)
+ elif self.pretty == 'indent':
+ r = json.loads(resp)
+ json.dump(r, sys.stdout, indent=' ')
+ print('')
+ else:
+ print(resp)
+ except Exception as e:
+ _logger.warning('Dumping output as-is due to: %s', e)
+ print(resp)
+
+ def handle_stderr(self, key):
+ data = key.fileobj.read1().decode('utf8')
+ print(data, end="", file=sys.stderr)
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--debug', action='store_true',
+ help='Enable verbose logging')
+parser.add_argument('--pretty', choices=['jq', 'indent'],
+ help='Pretty print responses (one of: %(choices)s)')
+parser.add_argument('--histfile',
+ help='Log shell history to this file')
+
+
+def main(args):
+ logging.basicConfig()
+ _logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
+ shell = SharkdShell(args.pretty, args.histfile)
+ try:
+ shell.main_loop()
+ except EOFError:
+ print('')
+
+
+if __name__ == '__main__':
+ main(parser.parse_args())
diff --git a/tools/test-captures.sh b/tools/test-captures.sh
new file mode 100755
index 0000000..43934d8
--- /dev/null
+++ b/tools/test-captures.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+# A little script to run tshark on capture file[s] (potentially ones that
+# failed fuzz testing). Useful because it sets up ulimits and other environment
+# variables for you to ensure things like misused ephemeral memory are caught.
+# (I'm writing this after having my machine hang up for like 15 minutes because
+# I wasn't paying attention while tshark was running on a fuzzed capture and
+# it used all my RAM + swap--which was pretty painful.)
+#
+# Copyright 2012 Jeff Morriss <jeff.morriss.ws [AT] gmail.com>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+TEST_TYPE="manual"
+# shellcheck source=tools/test-common.sh
+. "$( dirname "$0" )"/test-common.sh || exit 1
+
+# Run under AddressSanitizer ?
+ASAN=$CONFIGURED_WITH_ASAN
+
+while getopts "ab:" OPTCHAR ; do
+ case $OPTCHAR in
+ a) ASAN=1 ;;
+ b) WIRESHARK_BIN_DIR=$OPTARG ;;
+ *) printf "Unknown option: %s\\n" "$OPTARG"
+ esac
+done
+shift $(( OPTIND - 1 ))
+
+if [ $# -lt 1 ]
+then
+ printf "Usage: %s [-b bin_dir] /path/to/file[s].pcap\\n" "$( basename "$0" )"
+ exit 1
+fi
+
+ws_bind_exec_paths
+ws_check_exec "$TSHARK"
+
+# Set some limits to the child processes, e.g. stop it if it's running
+# longer than MAX_CPU_TIME seconds. (ulimit is not supported well on
+# cygwin - it shows some warnings - and the features we use may not all
+# be supported on some UN*X platforms.)
+ulimit -S -t $MAX_CPU_TIME
+
+# Allow core files to be generated
+ulimit -c unlimited
+
+# Don't enable ulimit -v when using ASAN. See
+# https://github.com/google/sanitizers/wiki/AddressSanitizer#ulimit--v
+if [ $ASAN -eq 0 ]; then
+ ulimit -S -v $MAX_VMEM
+fi
+
+for file in "$@"
+do
+ echo "Testing file $file..."
+ echo -n " - with tree... "
+ if $TSHARK -nVxr "$file" > /dev/null
+ then
+ echo "OK"
+ echo -n " - without tree... "
+ if "$WIRESHARK_BIN_DIR/tshark" -nr "$file" > /dev/null
+ then
+ echo "OK"
+ echo -n " - without tree but with a read filter... "
+ if "$WIRESHARK_BIN_DIR/tshark" -Yframe -nr "$file" > /dev/null
+ then
+ echo "OK"
+ else
+ echo "Failed"
+ exit 1
+ fi
+ else
+ echo "Failed"
+ exit 1
+ fi
+ else
+ echo "Failed"
+ exit 1
+ fi
+done
diff --git a/tools/test-common.sh b/tools/test-common.sh
new file mode 100755
index 0000000..2656eec
--- /dev/null
+++ b/tools/test-common.sh
@@ -0,0 +1,160 @@
+#!/bin/bash
+#
+# Copyright 2013 Gerald Combs <gerald@wireshark.org>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# Common variables and functions for fuzz and randpkt tests.
+
+# This needs to point to a 'date' that supports %s.
+if [ -z "$TEST_TYPE" ] ; then
+ echo "TEST_TYPE must be defined by the sourcing script."
+ exit 1
+fi
+
+DATE=/bin/date
+BASE_NAME=$TEST_TYPE-$($DATE +%Y-%m-%d)-$$
+
+# Directory containing binaries. Default: cmake run directory.
+if [ -z "$WIRESHARK_BIN_DIR" ]; then
+ WIRESHARK_BIN_DIR=run
+fi
+
+# Temporary file directory and names.
+# (had problems with this on cygwin, tried TMP_DIR=./ which worked)
+TMP_DIR=/tmp
+if [ "$OSTYPE" == "cygwin" ] ; then
+ TMP_DIR=$(cygpath --windows "$TMP_DIR")
+fi
+TMP_FILE=$BASE_NAME.pcap
+ERR_FILE=$BASE_NAME.err
+
+# Loop this many times (< 1 loops forever)
+MAX_PASSES=0
+
+# These may be set to your liking
+# Stop the child process if it's running longer than x seconds
+MAX_CPU_TIME=600
+# Stop the child process if it's using more than y * 1024 bytes
+MAX_VMEM=1000000
+# Stop the child process if its stack is larger than z * 1024 bytes
+# Windows XP: 2033
+# Windows 7: 2034
+# Mac OS X 10.6: 8192
+# Linux 2.6.24: 8192
+# Solaris 10: 8192
+MAX_STACK=2033
+# Insert z times an error into the capture file (0.02 seems to be a good value to find errors)
+ERR_PROB=0.02
+# Maximum number of packets to fuzz
+MAX_FUZZ_PACKETS=50000
+
+# Call *after* any changes to WIRESHARK_BIN_DIR (e.g., via command-line options)
+function ws_bind_exec_paths() {
+# Tweak the following to your liking. Editcap must support "-E".
+TSHARK="$WIRESHARK_BIN_DIR/tshark"
+EDITCAP="$WIRESHARK_BIN_DIR/editcap"
+CAPINFOS="$WIRESHARK_BIN_DIR/capinfos"
+RANDPKT="$WIRESHARK_BIN_DIR/randpkt"
+
+if [ "$WIRESHARK_BIN_DIR" = "." ]; then
+ export WIRESHARK_RUN_FROM_BUILD_DIRECTORY=1
+fi
+}
+
+function ws_check_exec() {
+NOTFOUND=0
+for i in "$@" ; do
+ if [ ! -x "$i" ]; then
+ echo "Couldn't find \"$i\""
+ NOTFOUND=1
+ fi
+done
+if [ $NOTFOUND -eq 1 ]; then
+ exit 1
+fi
+}
+
+source "$(dirname "$0")"/debug-alloc.env
+
+# Address Sanitizer options
+export ASAN_OPTIONS=detect_leaks=0
+
+# See if we were configured with gcc or clang's AddressSanitizer.
+CONFIGURED_WITH_ASAN=0
+# If tshark is built with ASAN this will generate an error. We could
+# also pass help=1 and look for help text.
+ASAN_OPTIONS=Invalid_Option_Flag $TSHARK -h > /dev/null 2>&1
+if [ $? -ne 0 ] ; then
+ CONFIGURED_WITH_ASAN=1
+fi
+export CONFIGURED_WITH_ASAN
+
+# Create an error report
+function ws_exit_error() {
+ echo -e "\n ERROR"
+ echo -e "Processing failed. Capture info follows:\n"
+ echo " Input file: $CF"
+ echo " Output file: $TMP_DIR/$TMP_FILE"
+ echo " Pass: $PASS"
+ echo
+
+ # Fill in build information
+ {
+ if [ -n "$CI_COMMIT_BRANCH" ] ; then
+ printf "Branch: %s\\n" "$CI_COMMIT_BRANCH"
+ else
+ printf "Branch: %s\\n" "$(git rev-parse --abbrev-ref HEAD)"
+ fi
+
+ printf "Input file: %s\\n" "$CF"
+
+ if [ -n "$CI_JOB_NAME" ] ; then
+ printf "CI job name: %s, ID: %s\\n" "$CI_JOB_NAME" "$CI_JOB_ID"
+ printf "CI job URL: %s\\n" "$CI_JOB_URL"
+ fi
+
+ printf "Return value: %s\\n" "$RETVAL"
+ printf "Dissector bug: %s\\n" "$DISSECTOR_BUG"
+ if [ "$VALGRIND" -eq 1 ] ; then
+ printf "Valgrind error count: %s\\n" "$VG_ERR_CNT"
+ fi
+
+ printf "Date and time: %s\\n" "$( date --utc )"
+
+ SINCE_HOURS=48
+ if [ -d "${GIT_DIR:-.git}" ] ; then
+ printf "\\nCommits in the last %s hours:\\n" $SINCE_HOURS
+ git --no-pager log --oneline --no-decorate --since=${SINCE_HOURS}hours
+ printf "\\n"
+ fi
+
+ printf "Build host information:\\n"
+ uname -srvm
+ lsb_release -a 2> /dev/null
+ printf "\\n"
+
+ } > "$TMP_DIR/${ERR_FILE}.header"
+
+ # Trim the stderr output if needed
+ ERR_SIZE=$(du -sk $TMP_DIR/$ERR_FILE | awk '{ print $1 }')
+ if [ $ERR_SIZE -ge 5000 ] ; then
+ mv $TMP_DIR/$ERR_FILE $TMP_DIR/${ERR_FILE}.full
+ head -n 2000 $TMP_DIR/${ERR_FILE}.full > $TMP_DIR/$ERR_FILE
+ echo -e "\n\n[ Output removed ]\n\n" >> $TMP_DIR/$ERR_FILE
+ tail -n 2000 $TMP_DIR/${ERR_FILE}.full >> $TMP_DIR/$ERR_FILE
+ rm -f $TMP_DIR/${ERR_FILE}.full
+ fi
+
+ cat $TMP_DIR/${ERR_FILE} >> $TMP_DIR/${ERR_FILE}.header
+ mv $TMP_DIR/${ERR_FILE}.header $TMP_DIR/${ERR_FILE}
+
+ echo -e "stderr follows:\n"
+ cat $TMP_DIR/$ERR_FILE
+
+ exit 255
+}
diff --git a/tools/update-appdata.py b/tools/update-appdata.py
new file mode 100755
index 0000000..b2960ee
--- /dev/null
+++ b/tools/update-appdata.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python3
+#
+# update-appdata.py - Update the <releases/> section of resources/freedesktop/org.wireshark.Wireshark.metainfo.xml.
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+'''Update the <release> tag in resources/freedesktop/org.wireshark.Wireshark.metainfo.xml
+
+According to https://www.freedesktop.org/software/appstream/docs/chap-Metadata.html
+the <releases/> tag in resources/freedesktop/org.wireshark.Wireshark.metainfo.xml should contain release
+information sorted newest to oldest.
+
+As part of our release process, when we create release tag x.y.z, we tag
+the next commit x.y.z+1rc0, e.g.
+
+v3.0.0 2019-02-28 release tag
+v3.0.1rc0 2019-02-28 next commit after v3.0.0
+v3.0.1 2019-04-08 release tag
+v3.0.2rc0 2019-04-08 next commit after v3.0.1
+
+Find a list of release versions based on our most recent rc0 tag and
+update the <releases/> section of resources/freedesktop/org.wireshark.Wireshark.metainfo.xml accordingly.
+Assume that the tag for the most recent release doesn't exist and use
+today's date for it.
+'''
+
+from datetime import date
+import io
+import os.path
+import re
+import subprocess
+import sys
+import time
+
+def main():
+ this_dir = os.path.dirname(__file__)
+ appdata_xml = os.path.join(this_dir, '..', 'resources', 'freedesktop', 'org.wireshark.Wireshark.metainfo.xml')
+
+ try:
+ cur_rc0 = subprocess.run(
+ ['git', 'describe', '--match', 'v*rc0'],
+ check=True,
+ encoding='UTF-8',
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout
+ except Exception:
+ print('Unable to fetch most recent rc0.')
+ raise
+
+ try:
+ ver_m = re.match('v(\d+\.\d+)\.(\d+)rc0.*', cur_rc0)
+ maj_min = ver_m.group(1)
+ next_micro = ver_m.group(2)
+ except Exception:
+ print('Unable to fetch major.minor version.')
+ raise
+
+ # https://www.freedesktop.org/software/appstream/docs/chap-Metadata.html#tag-releases
+ release_tag_fmt = '''\
+ <release version="{0}.{1}" date="{2}">
+ <url>https://www.wireshark.org/docs/relnotes/wireshark-{0}.{1}.html</url>
+ </release>
+'''
+ release_tag_l = [
+ f' <!-- Automatically generated by tools/{os.path.basename(__file__)} -->\n',
+ release_tag_fmt.format(maj_min, next_micro, date.fromtimestamp(time.time()).isoformat())
+ ]
+ for micro in range(int(next_micro) - 1, -1, -1):
+ try:
+ tag_date = subprocess.run(
+ ['git', 'log', '-1', '--format=%cd', '--date=format:%F', 'v{}.{}'.format(maj_min, micro)],
+ check=True,
+ encoding='UTF-8',
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.strip()
+ release_tag_l.append(release_tag_fmt.format(maj_min, micro, tag_date))
+ except Exception:
+ print('Unable to fetch release tag')
+ raise
+
+ ax_lines = []
+ with io.open(appdata_xml, 'r', encoding='UTF-8') as ax_fd:
+ in_releases = False
+ for line in ax_fd:
+ if '</releases>' in line:
+ in_releases = False
+ if in_releases:
+ continue
+ ax_lines.append(line)
+ if '<releases>' in line:
+ in_releases = True
+ ax_lines.extend(release_tag_l)
+
+ with io.open(appdata_xml, 'w', encoding='UTF-8') as ax_fd:
+ ax_fd.write(''.join(ax_lines))
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/update-tools-help.py b/tools/update-tools-help.py
new file mode 100755
index 0000000..f951e8e
--- /dev/null
+++ b/tools/update-tools-help.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python3
+#
+# update-tools-help.py - Update the command line help output in docbook/wsug_src.
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+'''Update tools help
+
+For each file that matches docbook/wsug_src/<command>-<flag>.txt, run
+that command and flag. Update the file if the output differs.
+'''
+
+import argparse
+import difflib
+import glob
+import io
+import os
+import re
+import subprocess
+import sys
+
+def main():
+ parser = argparse.ArgumentParser(description='Update Wireshark tools help')
+ parser.add_argument('-p', '--program-path', nargs=1, default=os.path.curdir, help='Path to Wireshark executables.')
+ args = parser.parse_args()
+
+ this_dir = os.path.dirname(__file__)
+ wsug_src_dir = os.path.join(this_dir, '..', 'docbook', 'wsug_src')
+
+ tools_help_files = glob.glob(os.path.join(wsug_src_dir, '*-*.txt'))
+ tools_help_files.sort()
+ tool_pat = re.compile('(\w+)(-\w).txt')
+
+ # If tshark is present, assume that our other executables are as well.
+ program_path = args.program_path[0]
+ if not os.path.isfile(os.path.join(program_path, 'tshark')):
+ print('tshark not found at {}\n'.format(program_path))
+ parser.print_usage()
+ sys.exit(1)
+
+ null_fd = open(os.devnull, 'w')
+
+ for thf in tools_help_files:
+ thf_base = os.path.basename(thf)
+ m = tool_pat.match(thf_base)
+ thf_command = os.path.join(program_path, m.group(1))
+ thf_flag = m.group(2)
+
+ if not os.path.isfile(thf_command):
+ print('{} not found. Skipping.'.format(thf_command))
+ continue
+
+ with io.open(thf, 'r', encoding='UTF-8') as fd:
+ cur_help = fd.read()
+
+ try:
+ new_help_data = subprocess.check_output((thf_command, thf_flag), stderr=null_fd)
+ except subprocess.CalledProcessError as e:
+ if thf_flag == '-h':
+ raise e
+
+ new_help = new_help_data.decode('UTF-8', 'replace')
+
+ cur_lines = cur_help.splitlines()
+ new_lines = new_help.splitlines()
+ # Assume we have an extended version. Strip it.
+ cur_lines[0] = re.split(' \(v\d+\.\d+\.\d+', cur_lines[0])[0]
+ new_lines[0] = re.split(' \(v\d+\.\d+\.\d+', new_lines[0])[0]
+ diff = list(difflib.unified_diff(cur_lines, new_lines))
+
+ if (len(diff) > 0):
+ print('Updating {} {}'.format(thf_command, thf_flag))
+ with io.open(thf, 'w', encoding='UTF-8') as fd:
+ fd.write(new_help)
+ else:
+ print('{} {} output unchanged.'.format(thf_command, thf_flag))
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/update-tx b/tools/update-tx
new file mode 100755
index 0000000..b91bcfa
--- /dev/null
+++ b/tools/update-tx
@@ -0,0 +1,72 @@
+#!/bin/bash
+# Copyright 2015, Alexis La Goutte (See AUTHORS file)
+#
+# Resync translation between Gerrit repo and Transifex
+
+NO_PUSH="False"
+while getopts "n" OPTCHAR ; do
+ case $OPTCHAR in
+ n) NO_PUSH="True" ;;
+ *) printf "Unknown option: %s\\n" "$OPTARG"
+ esac
+done
+shift $((OPTIND - 1))
+
+TOP_LEVEL=$(git rev-parse --show-toplevel)
+if ! cd "$TOP_LEVEL" ; then
+ echo "Can't change to the top-level source directory."
+ exit 1
+fi
+
+LUPDATE_INCLUDES=(-I .)
+while read -r ; do
+ LUPDATE_INCLUDES+=(-I "$REPLY")
+done < <(find "$TOP_LEVEL/ui/qt" -type d)
+
+# All .cpp, .h, and .ui files under ui/qt
+LUPDATE_FILES=()
+while read -r ; do
+ LUPDATE_FILES+=("$REPLY")
+done < <(find ui/qt -name '*.cpp' -o -name '*.h' -o -name '*.ui')
+
+# Add line numbers
+for i in ui/qt/*.ts ; do
+ lupdate -locations absolute "${LUPDATE_INCLUDES[@]}" "${LUPDATE_FILES[@]}" -ts "$i"
+done
+
+# Get last translation for Transifex
+tx pull -f
+
+# Regenerate last translation for repo
+for i in ui/qt/*.ts ; do
+ lupdate -locations absolute "${LUPDATE_INCLUDES[@]}" "${LUPDATE_FILES[@]}" -ts "$i"
+done
+
+# Push last change tranlastion on Transifex
+if [ "$NO_PUSH" != "True" ]; then
+ tx push -t -s
+fi
+
+# Remove line numbers
+for i in ui/qt/*.ts ; do
+ lupdate -locations none -no-ui-lines "${LUPDATE_INCLUDES[@]}" "${LUPDATE_FILES[@]}" -ts "$i"
+done
+
+#Add new commit with last translation update
+#git commit -a -m "TX: Update Translations (sync)"
+
+#Push update translation on Gerrit
+#git push origin HEAD:refs/for/master/tx
+
+#
+# Editor modelines
+#
+# Local Variables:
+# c-basic-offset: 4
+# tab-width: 8
+# indent-tabs-mode: nil
+# End:
+#
+# ex: set shiftwidth=4 tabstop=8 expandtab:
+# :indentSize=4:tabSize=8:noTabs=true:
+#
diff --git a/tools/valgrind-wireshark.sh b/tools/valgrind-wireshark.sh
new file mode 100755
index 0000000..c5592e0
--- /dev/null
+++ b/tools/valgrind-wireshark.sh
@@ -0,0 +1,123 @@
+#!/bin/bash
+
+# A small script to export some variables and run tshark or wireshark in
+# valgrind on a given capture file.
+#
+# Copyright 2012 Jeff Morriss <jeff.morriss.ws [AT] gmail.com>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# Directory containing tshark or wireshark. Default: cmake run directory.
+if [ -z "$WIRESHARK_BIN_DIR" ]; then
+ WIRESHARK_BIN_DIR=run
+fi
+
+# Use tshark by default
+COMMAND=tshark
+COMMAND_ARGS="-nr"
+COMMAND_ARGS2=
+VALID=0
+PCAP=""
+TOOL="memcheck"
+
+while getopts ":2a:b:C:lmnpP:rstTYwcevWdG" OPTCHAR ; do
+ case $OPTCHAR in
+ 2) COMMAND_ARGS="-2 $COMMAND_ARGS" ;;
+ a) ADDITIONAL_SUPPRESSION_FILE="$ADDITIONAL_SUPPRESSION_FILE --suppressions=$OPTARG" ;;
+ b) WIRESHARK_BIN_DIR=$OPTARG ;;
+ C) COMMAND_ARGS="-C $OPTARG $COMMAND_ARGS" ;;
+ l) LEAK_CHECK="--leak-check=full" ;;
+ m) TOOL="massif" ;;
+ n) COMMAND_ARGS="-v"
+ VALID=1 ;;
+ p) TOOL="callgrind" ;;
+ P) TOOL="callgrind"
+ CALLGRIND_OUT_FILE="--callgrind-out-file=$OPTARG" ;;
+ r) REACHABLE="--show-reachable=yes" ;;
+ s) GEN_SUPPRESSIONS="--gen-suppressions=yes" ;;
+ t) TRACK_ORIGINS="--track-origins=yes" ;;
+ T) COMMAND_ARGS="-Vx $COMMAND_ARGS" ;; # "build the Tree"
+ Y) COMMAND_ARGS="-Y frame $COMMAND_ARGS" ;; # Run with a read filter (but no tree)
+ w) COMMAND=wireshark
+ COMMAND_ARGS="-nr" ;;
+ c) COMMAND=capinfos
+ COMMAND_ARGS="" ;;
+ e) COMMAND=editcap
+ COMMAND_ARGS="-E 0.02"
+ # We don't care about the output of editcap
+ COMMAND_ARGS2="/dev/null" ;;
+ v) VERBOSE="--num-callers=256 -v" ;;
+ W) COMMAND=wireshark
+ COMMAND_ARGS=""
+ VALID=1 ;;
+ d) COMMAND=dumpcap
+ COMMAND_ARGS="-i eth1 -c 3000"
+ VALID=1 ;;
+ *) printf "Unknown option: %s\\n" "$OPTARG"
+ exit ;;
+ esac
+done
+shift $(( OPTIND - 1 ))
+
+# Sanitize parameters
+if [ "$COMMAND" != "tshark" ] && [[ $COMMAND_ARGS =~ Vx ]]
+then
+ printf "\\nYou can't use -T if you're not using tshark\\n\\n" >&2
+ exit 1
+fi
+
+if [ $# -ge 1 ]
+then
+ PCAP=$1
+ VALID=1
+fi
+
+if [ $VALID -eq 0 ]
+then
+ printf "\\nUsage: %s [-2] [-a file] [-b bin_dir] [-c] [-e] [-C config_profile] " "$(basename "$0")"
+ printf "[-l] [-m] [-n] [-p] [-r] [-s] [-t] [-T] [-w] [-v] /path/to/file.pcap\\n"
+ printf "\\n"
+ printf "[-2]: run tshark with 2-pass analysis\\n"
+ printf "[-a]: additional valgrind suppression file\\n"
+ printf "[-b]: tshark binary dir\\n"
+ printf "[-e]: use 'editcap -E 0.02' instead of tshark\\n"
+ printf "[-c]: use capinfos instead of tshark\\n"
+ printf "[-C]: binary profile file\\n"
+ printf "[-l]: add valgrind option --leak-check=full\\n"
+ printf "[-m]: use valgrind massif tool\\n"
+ printf "[-n]: print binary version\\n"
+ printf "[-p]: use callgrind massif tool\\n"
+ printf "[-r]: add valgrind option --show-reachable=yes\\n"
+ printf "[-s]: add valgrind option --gen-suppressions=yes\\n"
+ printf "[-t]: add valgrind option --track-origins=yes\\n"
+ printf "[-T]: build the tshark tree (-Vx)\\n"
+ printf "[-w]: use wireshark instead of tshark\\n"
+ printf "[-v]: run in verbose mode (--num-callers=256)\\n"
+ exit 1
+fi
+
+if [ "$WIRESHARK_BIN_DIR" = "." ]; then
+ export WIRESHARK_RUN_FROM_BUILD_DIRECTORY=
+fi
+
+if [ "$TOOL" != "callgrind" ]; then
+ export WIRESHARK_DEBUG_WMEM_OVERRIDE=simple
+ export G_SLICE=always-malloc # or debug-blocks
+fi
+
+COMMAND="$WIRESHARK_BIN_DIR/$COMMAND"
+
+cmdline="valgrind --suppressions=$( dirname "$0" )/vg-suppressions $ADDITIONAL_SUPPRESSION_FILE \
+--tool=$TOOL $CALLGRIND_OUT_FILE $VERBOSE $LEAK_CHECK $REACHABLE $GEN_SUPPRESSIONS $TRACK_ORIGINS \
+$COMMAND $COMMAND_ARGS $PCAP $COMMAND_ARGS2"
+
+if [ "$VERBOSE" != "" ];then
+ echo -e "\\n$cmdline\\n"
+fi
+
+# shellcheck disable=SC2086
+exec $cmdline > /dev/null
diff --git a/tools/validate-clang-check.sh b/tools/validate-clang-check.sh
new file mode 100755
index 0000000..2d295bc
--- /dev/null
+++ b/tools/validate-clang-check.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 2018, Alexis La Goutte (See AUTHORS file)
+#
+# Verifies last commit with clang-check (like scan-build) for Petri Dish
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+COMMIT_FILES=$( git diff-index --cached --name-status HEAD^ | grep -v "^D" | cut -f2 | grep "\\.c$\|cpp$" )
+CLANG_CHECK_CMD=clang-check
+
+while getopts c: OPTCHAR
+do
+ case $OPTCHAR in
+ c)
+ CLANG_CHECK_CMD="clang-check-$OPTARG"
+ ;;
+ *)
+ echo "Usage: $( basename "$0" ) [ -c <clang version> ]"
+ exit 0
+ esac
+done
+
+for FILE in $COMMIT_FILES; do
+ # Skip some special cases
+ FILE_BASENAME="$( basename "$FILE" )"
+ # If we don't have a build rule for this file, it's probably because we're missing
+ # necessary includes.
+ for BUILD_RULE_FILE in compile_commands.json build.ninja ; do
+ if [[ -f $BUILD_RULE_FILE ]] && ! grep "/$FILE_BASENAME\." $BUILD_RULE_FILE &> /dev/null ; then
+ echo "Don't know how to build $FILE_BASENAME. Skipping."
+ continue 2
+ fi
+ done
+ # wsutil/file_util.c is Windows-only.
+ if test "$FILE_BASENAME" = "file_util.c"
+ then
+ continue
+ fi
+ # iLBC: the file is not even compiled when ilbc is not installed
+ if test "$FILE_BASENAME" = "iLBCdecode.c"
+ then
+ continue
+ fi
+ # This is a template file, not a final '.c' file.
+ if echo "$FILE_BASENAME" | grep -Eq "packet-.*-template.c"
+ then
+ continue
+ fi
+
+ "$CLANG_CHECK_CMD" "../$FILE"
+ "$CLANG_CHECK_CMD" -analyze "../$FILE"
+done
diff --git a/tools/validate-commit.py b/tools/validate-commit.py
new file mode 100755
index 0000000..cf4980b
--- /dev/null
+++ b/tools/validate-commit.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python3
+# Verifies whether commit messages adhere to the standards.
+# Checks the author name and email and invokes the tools/commit-msg script.
+# Copy this into .git/hooks/post-commit
+#
+# Copyright (c) 2018 Peter Wu <peter@lekensteyn.nl>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from __future__ import print_function
+
+import argparse
+import difflib
+import json
+import os
+import subprocess
+import sys
+import tempfile
+import urllib.request
+import re
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument('commit', nargs='?', default='HEAD',
+ help='Commit ID to be checked (default %(default)s)')
+parser.add_argument('--commitmsg', help='commit-msg check', action='store')
+
+
+def print_git_user_instructions():
+ print('To configure your name and email for git, run:')
+ print('')
+ print(' git config --global user.name "Your Name"')
+ print(' git config --global user.email "you@example.com"')
+ print('')
+ print('After that update the author of your latest commit with:')
+ print('')
+ print(' git commit --amend --reset-author --no-edit')
+ print('')
+
+
+def verify_name(name):
+ name = name.lower().strip()
+ forbidden_names = ('unknown', 'root', 'user', 'your name')
+ if name in forbidden_names:
+ return False
+ # Warn about names without spaces. Sometimes it is a mistake where the
+ # developer accidentally committed using the system username.
+ if ' ' not in name:
+ print("WARNING: name '%s' does not contain a space." % (name,))
+ print_git_user_instructions()
+ return True
+
+
+def verify_email(email):
+ email = email.lower().strip()
+ try:
+ user, host = email.split('@')
+ except ValueError:
+ # Lacks a '@' (e.g. a plain domain or "foo[AT]example.com")
+ return False
+ tld = host.split('.')[-1]
+
+ # localhost, localhost.localdomain, my.local etc.
+ if 'local' in tld:
+ return False
+
+ # Possibly an IP address
+ if tld.isdigit():
+ return False
+
+ # forbid code.wireshark.org. Submissions could be submitted by other
+ # addresses if one would like to remain anonymous.
+ if host.endswith('.wireshark.org'):
+ return False
+
+ # For documentation purposes only.
+ if host == 'example.com':
+ return False
+
+ # 'peter-ubuntu32.(none)'
+ if '(none)' in host:
+ return False
+
+ return True
+
+
+def tools_dir():
+ if __file__.endswith('.py'):
+ # Assume direct invocation from tools directory
+ return os.path.dirname(__file__)
+ # Otherwise it is a git hook. To support git worktrees, do not manually look
+ # for the .git directory, but query the actual top level instead.
+ cmd = ['git', 'rev-parse', '--show-toplevel']
+ srcdir = subprocess.check_output(cmd, universal_newlines=True).strip()
+ return os.path.join(srcdir, 'tools')
+
+
+def extract_subject(subject):
+ '''Extracts the original subject (ignoring the Revert prefix).'''
+ subject = subject.rstrip('\r\n')
+ prefix = 'Revert "'
+ suffix = '"'
+ while subject.startswith(prefix) and subject.endswith(suffix):
+ subject = subject[len(prefix):-len(suffix)]
+ return subject
+
+
+def verify_body(body):
+ bodynocomments = re.sub('^#.*$', '', body, flags=re.MULTILINE)
+ old_lines = bodynocomments.splitlines(True)
+ is_good = True
+ if len(old_lines) >= 2 and old_lines[1].strip():
+ print('ERROR: missing blank line after the first subject line.')
+ is_good = False
+ cleaned_subject = extract_subject(old_lines[0])
+ if len(cleaned_subject) > 80:
+ # Note that this check is also invoked by the commit-msg hook.
+ print('Warning: keep lines in the commit message under 80 characters.')
+ is_good = False
+ if not is_good:
+ print('''
+Please rewrite your commit message to our standards, matching this format:
+
+ component: a very brief summary of the change
+
+ A commit message should start with a brief summary, followed by a single
+ blank line and an optional longer description. If the change is specific to
+ a single protocol, start the summary line with the abbreviated name of the
+ protocol and a colon.
+
+ Use paragraphs to improve readability. Limit each line to 80 characters.
+
+''')
+ if any(line.startswith('Bug:') or line.startswith('Ping-Bug:') for line in old_lines):
+ sys.stderr.write('''
+To close an issue, use "Closes #1234" or "Fixes #1234" instead of "Bug: 1234".
+To reference an issue, use "related to #1234" instead of "Ping-Bug: 1234". See
+https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically
+for details.
+''')
+ return False
+
+ # Cherry-picking can add an extra newline, which we'll allow.
+ cp_line = '\n(cherry picked from commit'
+ body = body.replace('\n' + cp_line, cp_line)
+
+ try:
+ cmd = ['git', 'stripspace']
+ newbody = subprocess.check_output(cmd, input=body, universal_newlines=True)
+ except OSError as ex:
+ print('Warning: unable to invoke git stripspace: %s' % (ex,))
+ return is_good
+ if newbody != body:
+ new_lines = newbody.splitlines(True)
+ diff = difflib.unified_diff(old_lines, new_lines,
+ fromfile='OLD/.git/COMMIT_EDITMSG',
+ tofile='NEW/.git/COMMIT_EDITMSG')
+ # Clearly mark trailing whitespace (GNU patch supports such comments).
+ diff = [
+ '# NOTE: trailing space on the next line\n%s' % (line,)
+ if len(line) > 2 and line[-2].isspace() else line
+ for line in diff
+ ]
+ print('The commit message does not follow our standards.')
+ print('Please rewrite it (there are likely whitespace issues):')
+ print('')
+ print(''.join(diff))
+ return False
+ return is_good
+
+
+
+def verify_merge_request():
+ # Not needed if/when https://gitlab.com/gitlab-org/gitlab/-/issues/23308 is fixed.
+ gitlab_api_pfx = "https://gitlab.com/api/v4"
+ # gitlab.com/wireshark/wireshark = 7898047
+ project_id = os.getenv('CI_MERGE_REQUEST_PROJECT_ID')
+ ansi_csi = '\x1b['
+ ansi_codes = {
+ 'black_white': ansi_csi + '30;47m',
+ 'bold_red': ansi_csi + '31;1m', # gitlab-runner errors
+ 'reset': ansi_csi + '0m'
+ }
+ m_r_iid = os.getenv('CI_MERGE_REQUEST_IID')
+ if project_id is None or m_r_iid is None:
+ print("This doesn't appear to be a merge request. CI_MERGE_REQUEST_PROJECT_ID={}, CI_MERGE_REQUEST_IID={}".format(project_id, m_r_iid))
+ return True
+
+ m_r_url = '{}/projects/{}/merge_requests/{}'.format(gitlab_api_pfx, project_id, m_r_iid)
+ req = urllib.request.Request(m_r_url)
+ # print('req', repr(req), m_r_url)
+ with urllib.request.urlopen(req) as resp:
+ resp_json = resp.read().decode('utf-8')
+ # print('resp', resp_json)
+ m_r_attrs = json.loads(resp_json)
+ try:
+ if not m_r_attrs['allow_collaboration']:
+ print('''\
+{bold_red}ERROR:{reset} Please edit your merge request and make sure the setting
+ {black_white}✅ Allow commits from members who can merge to the target branch{reset}
+is checked so that maintainers can rebase your change and make minor edits.\
+'''.format(**ansi_codes))
+ return False
+ except KeyError:
+ sys.stderr.write('This appears to be a merge request, but we were not able to fetch the "Allow commits" status\n')
+ return True
+
+
+def main():
+ args = parser.parse_args()
+ commit = args.commit
+
+ # If called from commit-msg script, just validate that part and return.
+ if args.commitmsg:
+ try:
+ with open(args.commitmsg) as f:
+ return 0 if verify_body(f.read()) else 1
+ except:
+ print("Couldn't verify body of message from file '", + args.commitmsg + "'");
+ return 1
+
+
+ if(os.getenv('CI_MERGE_REQUEST_EVENT_TYPE') == 'merge_train'):
+ print("If we were on the love train, people all over the world would be joining hands for this merge request.\nInstead, we're on a merge train so we're skipping commit validation checks. ")
+ return 0
+
+ cmd = ['git', 'show', '--no-patch',
+ '--format=%h%n%an%n%ae%n%B', commit, '--']
+ output = subprocess.check_output(cmd, universal_newlines=True)
+ # For some reason there is always an additional LF in the output, drop it.
+ if output.endswith('\n\n'):
+ output = output[:-1]
+ abbrev, author_name, author_email, body = output.split('\n', 3)
+ subject = body.split('\n', 1)[0]
+
+ # If called directly (from the tools directory), print the commit that was
+ # being validated. If called from a git hook (without .py extension), try to
+ # remain silent unless there are issues.
+ if __file__.endswith('.py'):
+ print('Checking commit: %s %s' % (abbrev, subject))
+
+ exit_code = 0
+ if not verify_name(author_name):
+ print('Disallowed author name: {}'.format(author_name))
+ exit_code = 1
+
+ if not verify_email(author_email):
+ print('Disallowed author email address: {}'.format(author_email))
+ exit_code = 1
+
+ if exit_code:
+ print_git_user_instructions()
+
+ if not verify_body(body):
+ exit_code = 1
+
+ if not verify_merge_request():
+ exit_code = 1
+
+ return exit_code
+
+
+if __name__ == '__main__':
+ try:
+ sys.exit(main())
+ except subprocess.CalledProcessError as ex:
+ print('\n%s' % ex)
+ sys.exit(ex.returncode)
+ except KeyboardInterrupt:
+ sys.exit(130)
diff --git a/tools/validate-diameter-xml.sh b/tools/validate-diameter-xml.sh
new file mode 100755
index 0000000..e1937f7
--- /dev/null
+++ b/tools/validate-diameter-xml.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+
+# A small script to run xmllint on the Diameter XML files (after doing some
+# fixups to those files).
+#
+# Copyright 2016 Jeff Morriss <jeff.morriss.ws [AT] gmail.com>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+if ! type -p sed > /dev/null
+then
+ echo "'sed' is needed to run $0." 1>&2
+ # Exit cleanly because we don't want pre-commit to fail just because
+ # someone doesn't have the tools...
+ exit 0
+fi
+if ! type -p xmllint > /dev/null
+then
+ echo "'xmllint' is needed to run $0." 1>&2
+ # Exit cleanly because we don't want pre-commit to fail just because
+ # someone doesn't have the tools...
+ exit 0
+fi
+
+src_dir="$(dirname "$0")/.."
+diameter_dir="$src_dir/resources/protocols/diameter"
+
+# Ideally this would work regardless of our cwd
+if [ ! -r "$diameter_dir/dictionary.xml" ]
+then
+ echo "Couldn't find $diameter_dir/dictionary.xml" 1>&2
+ exit 1
+fi
+if [ ! -r "$diameter_dir/dictionary.dtd" ]
+then
+ echo "Couldn't find $diameter_dir/dictionary.dtd" 1>&2
+ exit 1
+fi
+
+if ! tmpdir=$(mktemp -d); then
+ echo "Could not create temporary directory" >&2
+ exit 1
+fi
+trap 'rm -rf "$tmpdir"' EXIT
+
+# First edit all the AVP names that start with "3GPP" to indicate "TGPP".
+# XML doesn't allow ID's to start with a digit but:
+# 1) We don't *really* care if it's valid XML
+# 2) (but) we do want to use xmllint to find problems
+# 3) (and) users see the AVP names. Showing them "TGPP" instead of "3GPP"
+# is annoying enough to warrant this extra work.
+
+# Declare and populate associative exceptions array
+declare -A exceptions=(
+ ["3GPP"]="TGPP"
+ ["5QI"]="FiveQI"
+)
+
+# Loop through the exceptions, building the sed options
+sedopts=
+for e in ${!exceptions[@]}; do
+ sedopts="${sedopts}s/name=\"$e/name=\"${exceptions[$e]}/;"
+done
+
+# Delete the last character, i.e., the trailing semicolon
+sedopts=${sedopts%?}
+
+cp "$diameter_dir/dictionary.dtd" "$tmpdir" || exit 1
+for f in "$diameter_dir"/*.xml
+do
+ sed "${sedopts}" "$f" > "$tmpdir/${f##*/}" || exit 1
+done
+
+xmllint --noout --noent --postvalid "$tmpdir/dictionary.xml" &&
+ echo "Diameter dictionary is (mostly) valid XML."
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 8
+# tab-width: 8
+# indent-tabs-mode: t
+# End:
+#
+# vi: set shiftwidth=8 tabstop=8 noexpandtab:
+# :indentSize=8:tabSize=8:noTabs=false:
+#
diff --git a/tools/vg-suppressions b/tools/vg-suppressions
new file mode 100644
index 0000000..d778cc3
--- /dev/null
+++ b/tools/vg-suppressions
@@ -0,0 +1,119 @@
+# This file lists suppressions to hide valgrind errors in libraries we don't
+# control. Be careful adding to it, since overly-broad suppressions may hide
+# real errors in Wireshark!
+#
+# This is primarily targeted towards the set of libraries on the fuzz-bot (which
+# runs a valgrind step) but other entries are welcome as long as they are
+# sufficiently commented.
+{
+ Libgcrypt leak (gcry_control)
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:gcry_control
+ fun:epan_init
+ fun:main
+}
+
+{
+ Libgcrypt leak (gcry_check_version)
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:epan_get_runtime_version_info
+ fun:get_tshark_runtime_version_info
+ fun:get_runtime_version_info
+ fun:main
+}
+
+{
+ Glib Leak (g_get_charset)
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:*alloc
+ ...
+ fun:g_get_charset
+}
+
+{
+ Glib Leak (g_get_filename_charsets)
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:*alloc
+ ...
+ fun:g_get_filename_charsets
+}
+
+{
+ Glib Leak (g_strerror)
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:*alloc
+ ...
+ fun:g_strerror
+}
+
+{
+ Glib leak (g_get_home_dir)
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ ...
+ fun:g_get_home_dir
+}
+
+{
+ Glib leak (get_global_random) - requires glib debug symbols
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ ...
+ fun:get_global_random
+ fun:g_random_*
+}
+
+{
+ Glib leak (g_get_user_config_dir)
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ ...
+ fun:g_get_user_config_dir
+}
+
+{
+ Glib leak (g_module)
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ ...
+ fun:g_module_*
+ ...
+}
+
+{
+ Glib leak (g_private_get) - requires glib debugging symbols installed
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:g_private_get*
+}
+
+{
+ Glib leak (g_log)
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ fun:g_malloc
+ ...
+ fun:g_log_set_handler_full
+}
+
+{
+ Libc and GLib leak (dl_init)
+ Memcheck:Leak
+ fun:*alloc
+ ...
+ fun:call_init.part.0
+ ...
+ fun:_dl_init
+}
diff --git a/tools/win-setup.ps1 b/tools/win-setup.ps1
new file mode 100644
index 0000000..0e2f750
--- /dev/null
+++ b/tools/win-setup.ps1
@@ -0,0 +1,331 @@
+#
+# win-setup - Prepare a Windows development environment for building Wireshark.
+#
+# Copyright 2015 Gerald Combs <gerald@wireshark.org>
+#
+# Wireshark - Network traffic analyzer
+# By Gerald Combs <gerald@wireshark.org>
+# Copyright 1998 Gerald Combs
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+#requires -version 2
+
+# To do:
+# - Use Expand-Archive instead of `cmake -E tar`? That requires PS >= 5.0
+
+<#
+.SYNOPSIS
+Prepare a Windows development environment for building Wireshark.
+
+.DESCRIPTION
+This script downloads and extracts third-party libraries required to compile
+Wireshark.
+
+.PARAMETER Destination
+Specifies the destination directory for the text files. The path must
+contain the pattern "wireshark-*-libs-4.2".
+
+.PARAMETER Platform
+Target platform. Must be one of "win64" or "arm64".
+
+.PARAMETER CMakeExecutable
+Specifies the path to the CMake executable, which is used to extract archives.
+
+.INPUTS
+-Destination Destination directory.
+-Platform Target platform.
+-CMakeExecutable Path to CMake.
+
+.OUTPUTS
+A set of libraries required to compile Wireshark on Windows, along with
+their compressed archives.
+A manifest file (library-manifest.xml)
+
+.EXAMPLE
+C:\PS> .\tools\win-setup.ps1 -Destination C:\wireshark-master-64-libs-4.2 -Platform x64
+#>
+
+Param(
+ [Parameter(Mandatory=$true, Position=0)]
+ [ValidateScript({$_ -like "*[/\]wireshark-*-libs-4.2"})]
+ [String]
+ $Destination,
+
+ [Parameter(Mandatory=$true, Position=1)]
+ [ValidateSet("x64", "arm64")]
+ [String]
+ $Platform,
+
+ [Parameter(Mandatory=$false, Position=3)]
+ [ValidateScript({$_ | Test-Path -Type leaf })]
+ [String]
+ $CMakeExecutable = "CMake"
+)
+
+# Variables
+
+# We create and delete files and directories. Bail out at the first sign of
+# trouble instead of trying to catch exceptions everywhere.
+$ErrorActionPreference = "Stop"
+
+# Archive file / SHA256
+$X64Archives = @{
+ "AirPcap/AirPcap_Devpack_4_1_0_1622.zip" = "09d637f28a79b1d2ecb09f35436271a90c0f69bd0a1ee82b803abaaf63c18a69";
+ "bcg729/bcg729-1.0.4-win64ws.zip" = "9a095fda4c39860d96f0c568830faa6651cd17635f68e27aa6de46c689aa0ee2";
+ "brotli/brotli-1.0.9-1-win64ws.zip" = "3f8d24aec8668201994327ff8d8542fe507d1d468a500a1aec50d0415f695aab";
+ "c-ares/c-ares-1.19.1-1-x64-windows-ws.zip" = "cecd95f125a34b6f1d5dfc9586792077cb70820764ffc10d43b0617c1861ae85";
+ "gnutls/gnutls-3.8.2-1-x64-mingw-dynamic-ws.zip" = "6d8d30724e66fdf5370a78b67dbcbbdd00d2c4209cfb6eb43c0ee5a25fe18f1c";
+ "krb5/krb5-1.20.1-1-x64-windows-ws.zip" = "a1e5c582afce6e2f72f0f5bd66df2c0f3cc984532a1da5314fc89d7b7f29cdbf";
+ "libgcrypt/libgcrypt-1.10.2-2-x64-mingw-dynamic-ws.zip" = "477cfce91d791b34df75a5ad83626f1ac2ee147eff7965e52266a4fc3da0f920";
+ "libilbc/libilbc-2.0.2-4-x64-windows-ws.zip" = "4f35a1ffa03c89bf473f38249282a7867b203988d2b6d3d2f0924764619fd5f5";
+ "libmaxminddb/libmaxminddb-1.4.3-1-win64ws.zip" = "ee89944a19ab6e1c873bdecb9fc6205d317c41e6da6ec1d30bc892fddfd143da";
+ "libpcap/libpcap-1.10.1-1-win64ws.zip" = "59f8e0e90a3ab5671df561266ed2b02870a6f8f3a895b80c9db19fea9a12ffb2";
+ "libsmi/libsmi-2021-01-15-2-x64-windows-ws.zip" = "ee8e349427d2a4ee9c18fc6b5839bd6df41685ecba03506179c21425e04f3413";
+ "libssh/libssh-0.10.5-1-x64-mingw-dynamic-ws.zip" = "9c1410d1033a540d118e17938905144956291b4c6ca7a9b7af6959b2632a1aaa";
+ "lua/lua-5.2.4-unicode-win64-vc14.zip" = "e8968d2c7871ce1ea82cbd29ac1b3a2c59d3dec25e483c5e12de85df66f5d928";
+ "lz4/lz4-1.9.3-1-win64ws.zip" = "7129515893ffdc439f4ffe9673c4bc43f9042e910bb2607e68dde6b99a1ab058";
+ "minizip/minizip-1.3-1-x64-windows-ws.zip" = "eb0bb5fffda5328e192d0d7951ff0254e64dcd736d46909fde7db792c1c53bcc";
+ "nghttp2/nghttp2-1.57.0-1-x64-windows-ws.zip" = "94afb12d63d0830dc25e5605c30a6a91fe1f7284c1e6ddfff177d961d5b52bbd";
+ "nghttp3/nghttp3-1.0.0-1-x64-windows-ws.zip" = "219a0024b79627c00fa1c134085678edbfac72b7b5eaf45db84f36e2553e1638";
+ "opus/opus-1.3.1-3-win64ws.zip" = "1f7a55a6d2d7215dffa4a43bca8ca05024bd4ba1ac3d0d0c405fd38b09cc2205";
+ "sbc/sbc-2.0-1-x64-windows-ws.zip" = "d1a58f977dcffa168b11b280bd10228191582d263b7c901e50cde7c1c43d9c04";
+ "snappy/snappy-1.1.9-1-win64ws.zip" = "fa907724be019bcc55d27ebe88257ba8898b5c38b719099b8164ac78600d81cc";
+ "spandsp/spandsp-0.0.6-5-x64-windows-ws.zip" = "cbb18310876ec6f081662253a2d37f5174ac60c58b0b7cd6759852fbcfaa7d7f";
+ "speexdsp/speexdsp-1.21.1-1-win64ws.zip" = "d36db62e64ffaee38d9f607bef07d3778d8957ad29757f3eba169eb135f1a4e5";
+ "vcpkg-export/vcpkg-export-20231017-1-x64-windows-ws.zip" = "fc5ea8110ce5e905e3342197481a805b6c2c87e273b0370bcc6a5964316c20ee";
+ "WinSparkle/WinSparkle-0.8.0-4-gb320893.zip" = "3ae42326bcd34594bc21b1e7948863a839ee76e87d9f4cf6b59b9d9f9a083881";
+ "zstd/zstd-1.5.2-1-win64ws.zip" = "d920afe636951cfcf144824d9c075d1f2c13387f4739152fe185fd9c09fc58f2";
+}
+
+$Arm64Archives = @{
+ "bcg729/bcg729-1.1.1-1-win64armws.zip" = "f4d76b9acf0d0e12e87a020e9805d136a0e8775e061eeec23910a10828153625";
+ "brotli/brotli-1.0.9-1-win64armws.zip" = "5ba1b62ebc514d55c3eae85a00ff107e587b6e7cb1275e2d33fcddcd49f8e2af";
+ "c-ares/c-ares-1.19.1-1-arm64-windows-ws.zip" = "ec13f3ca07c1916872d08d3abaec3eaeac266dc2befdbc15d5a1317f2a1dbe3c";
+ "gnutls/gnutls-3.8.2-1-arm64-mingw-dynamic-ws.zip" = "7d47762a46f9d8985deccfbf600b57f7a8076f12bfc722dcd57923ea2812a956";
+ "krb5/krb5-1.20.1-1-arm64-windows-ws.zip" = "6afe3185ea7621224544683a89d7c724d32bef6f1b552738dbc713ceb2151437";
+ "libgcrypt/libgcrypt-1.10.2-2-arm64-mingw-dynamic-ws.zip" = "cd42fa2739a204e129d655e1b0dda83ceb27399812b8b2eccddae4a9ecd8d0ce";
+ "libilbc/libilbc-2.0.2-4-arm64-windows-ws.zip" = "00a506cc1aac8a2e31856e463a555d899b5a6ccf376485a124104858ccf0be6d";
+ "libmaxminddb/libmaxminddb-1.4.3-1-win64armws.zip" = "9996327f301cb4a4de797bc024ad0471acd95c1850a2afc849c57fcc93360610";
+ "libpcap/libpcap-1.10.1-1-win64armws.zip" = "c0c5d42d96cc407303d71ba5afd06615c660228fa2260d7ecbc8453140529137";
+ "libsmi/libsmi-2021-01-15-2-arm64-windows-ws.zip" = "3f5b7507a19436bd6494e2cbc89856a5980950f931f7cf0d637a8e764914d015";
+ "libssh/libssh-0.10.5-1-arm64-mingw-dynamic-ws.zip" = "b99c9573d9a30ba2898ce6ac131b23b1699009761d5dbe351a1a958cca0f85ca";
+ "lua/lua-5.2.4-unicode-arm64-windows-vc17.zip" = "5848e23352e35b69f4cdabaca3754c2c5fb11e5461bb92b71e059e558e4b2d12";
+ "lz4/lz4-1.9.4-1-win64armws.zip" = "59a3ed3f9161be7614a89afd2ca21c43f26dd916afd4aa7bfdc4b148fb10d485";
+ "minizip/minizip-1.3-1-arm64-windows-ws.zip" = "e5b35d064ff10f1ab1ee9193a0965fd1eb3d1e16eab5a905ab3fea9b14fb5afe";
+ "nghttp2/nghttp2-1.57.0-1-arm64-windows-ws.zip" = "3f264dc0ccb48850e07ec136dede5b0ad0e39e31ff2d2e6ab215348ce2d9e570";
+ "nghttp3/nghttp3-1.0.0-1-arm64-windows-ws.zip" = "cf53090b514d3193d75b81562235ae1e7a8a9d462e37f515f9a9a29c6b469236";
+ "opus/opus-1.4-1-win64armws.zip" = "51d10381360d5691b2022dde5b284266d9b0ce9a3c9bd7e86f9a4ff1a4f7d904";
+ "sbc/sbc-2.0-1-arm64-windows-ws.zip" = "83cfe4a8b6fa5bae253ecacc1c02e6e4c61b4ad9ad0e5e63f0f30422fb6eac96";
+ "snappy/snappy-1.1.9-1-win64armws.zip" = "f3f6ec841024d18df06934ff70f44068a4e8f1008eca1f363257645647f74d4a";
+ "spandsp/spandsp-0.0.6-5-arm64-windows-ws.zip" = "fdf01e3c33e739ff9399b7d42cd8230c97cb27ce51865a0f06285a8f68206b6c";
+ "speexdsp/speexdsp-1.2.1-1-win64armws.zip" = "1759a9193065f27e50dd79dbb1786d24031ac43ccc48c40dca46d8a48552e3bb";
+ "vcpkg-export/vcpkg-export-20231017-1-arm64-windows-ws.zip" = "2752e2e059ea13e8b4e1ef5f8892b81b745da6838e513bd6e4e548d290d9f472";
+ "WinSparkle/WinSparkle-0.8.0-4-gb320893.zip" = "3ae42326bcd34594bc21b1e7948863a839ee76e87d9f4cf6b59b9d9f9a083881";
+ "zstd/zstd-1.5.5-1-win64armws.zip" = "0e448875380cc5d5f5539d994062201bfa564e4a27466bc3fdfec84d9008e51d";
+}
+
+# Subdirectory to extract an archive to
+$ArchivesSubDirectory = @{
+ "AirPcap/AirPcap_Devpack_4_1_0_1622.zip" = "AirPcap_Devpack_4_1_0_1622";
+}
+
+# Plain file downloads
+
+$X64Files = @{
+ # Nothing here
+}
+
+$Arm64Files = @{
+ # Nothing here
+}
+
+$Archives = $X64Archives;
+$Files = $X64Files;
+
+if ($Platform -eq "arm64") {
+ $Archives = $Arm64Archives;
+ $Files = $Arm64Files;
+}
+
+$CurrentManifest = $Archives + $Files
+
+$CleanupItems = @(
+ "bcg729-1.0.4-win??ws"
+ "brotli-1.0.*-win??ws"
+ "c-ares-1.9.1-1-win??ws"
+ "c-ares-1.1*-win??ws"
+ "gnutls-3.?.*-*-win??ws"
+ "krb5-*-win??ws"
+ "libgcrypt-*-win??ws"
+ "libilbc-2.0.2-3-win??ws"
+ "libmaxminddb-1.4.3-1-win??ws"
+ "libpcap-1.9.1-1-win??ws"
+ "libsmi-0.4.8"
+ "libsmi-svn-40773-win??ws"
+ "libssh-0.*-win??ws"
+ "libxml2-*-win??ws"
+ "lua5.1.4"
+ "lua5.2.?"
+ "lua5.2.?-win??"
+ "lua-5.?.?-unicode-win??-vc??"
+ "lz4-*-win??ws"
+ "MaxMindDB-1.3.2-win??ws"
+ "minizip-*-win??ws"
+ "nghttp2-*-win??ws"
+ "opus-1.3.1-?-win??ws"
+ "pcre2-*-win??ws"
+ "sbc-1.3-win??ws"
+ "snappy-1.1.*-win??ws"
+ "spandsp-0.0.6-win??ws"
+ "speexdsp-*-win??ws"
+ "user-guide"
+ "vcpkg-export-*-win??ws"
+ "zstd-*-win??ws"
+ "AirPcap_Devpack_4_1_0_1622"
+ "WinSparkle-0.3-44-g2c8d9d3-win??ws"
+ "WinSparkle-0.5.?"
+ "current-tag.txt"
+ "library-manifest.xml"
+)
+
+# The dev-libs site repository is at
+# https://gitlab.com/wireshark/wireshark-development-libraries
+[Uri] $DownloadPrefix = "https://dev-libs.wireshark.org/windows/packages"
+$proxy = $null
+
+# Functions
+
+# Verifies the contents of a file against a SHA256 hash.
+# Returns success (0) if the file exists and verifies.
+# Returns error (1) if the file does not exist.
+# Returns error (2) if the integrity check fails (an error is also printed).
+function VerifyIntegrity($filename, $hash) {
+ # Use absolute path because PS and .NET may have different working directories.
+ $filepath = Convert-Path -Path $filename -ErrorAction SilentlyContinue
+ if (-not ($filepath)) {
+ return 1
+ }
+ # may throw due to permission error, I/O error, etc.
+ try { $stream = [IO.File]::OpenRead($filepath) } catch { throw }
+
+ try {
+ $sha256 = New-Object Security.Cryptography.SHA256Managed
+ $binaryHash = $sha256.ComputeHash([IO.Stream]$stream)
+ $hexHash = ([System.BitConverter]::ToString($binaryHash) -Replace "-").ToLower()
+ $hash = $hash.ToLower()
+ if ($hexHash -ne $hash) {
+ Write-Warning "$($filename): computed file hash $hexHash did NOT match $hash"
+ return 2
+ }
+ return 0
+ } finally {
+ $stream.Close()
+ }
+}
+
+# Downloads a file and checks its integrity. If a corrupt file already exists,
+# it is removed and re-downloaded. Succeeds only if the SHA256 hash matches.
+function DownloadFile($fileName, $fileHash, [Uri] $fileUrl = $null) {
+ if ([string]::IsNullOrEmpty($fileUrl)) {
+ $fileUrl = "$DownloadPrefix/$fileName"
+ }
+ $destinationFile = "$Destination\" + [string](Split-Path -Leaf $fileName)
+ if (Test-Path $destinationFile -PathType 'Leaf') {
+ if ((VerifyIntegrity $destinationFile $fileHash) -ne 0) {
+ Write-Output "$fileName is corrupt, removing and retrying download."
+ Remove-Item $destinationFile
+ } else {
+ Write-Output "$fileName already there; not retrieving."
+ return
+ }
+ }
+
+ if (-not ($Script:proxy)) {
+ $Script:proxy = [System.Net.WebRequest]::GetSystemWebProxy()
+ $Script:proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials
+ }
+
+ Write-Output "Downloading $fileUrl into $Destination"
+ $webClient = New-Object System.Net.WebClient
+ $webClient.proxy = $Script:proxy
+ $webClient.DownloadFile($fileUrl, "$destinationFile")
+ Write-Output "Verifying $destinationFile"
+ if ((VerifyIntegrity $destinationFile $fileHash) -ne 0) {
+ Write-Output "Download is corrupted, aborting!"
+ exit 1
+ }
+}
+
+function DownloadArchive($fileName, $fileHash, $subDir) {
+ DownloadFile $fileName $fileHash
+ $archiveFile = "$Destination\" + [string](Split-Path -Leaf $fileName)
+ $archiveDir = "$Destination\$subDir"
+ if ($subDir -and -not (Test-Path $archiveDir -PathType 'Container')) {
+ New-Item -ItemType Directory -Path $archiveDir > $null
+ }
+
+ $activity = "Extracting into $($archiveDir)"
+ Write-Progress -Activity "$activity" -Status "Extracting $archiveFile using CMake ..."
+ Push-Location "$archiveDir"
+ & "$CMakeExecutable" -E tar xf "$archiveFile" 2>&1 | Set-Variable -Name CMakeOut
+ $cmStatus = $LASTEXITCODE
+ Pop-Location
+ Write-Progress -Activity "$activity" -Status "Done" -Completed
+ if ($cmStatus -gt 0) {
+ Write-Output $CMakeOut
+ exit 1
+ }
+}
+
+# On with the show
+
+# Make sure $Destination exists and do our work there.
+if ( -not (Test-Path $Destination -PathType 'Container') ) {
+ New-Item -ItemType 'Container' "$Destination" > $null
+}
+
+# CMake's file TO_NATIVE_PATH passive-aggressively omits the drive letter.
+Set-Location "$Destination"
+$Destination = $(Get-Item -Path ".\")
+Write-Output "Working in $Destination"
+
+# Check our last known state
+$destinationManifest = @{ "INVALID" = "INVALID" }
+$manifestFile = "library-manifest.xml"
+if ((Test-Path $manifestFile -PathType 'Leaf') -and -not ($Force)) {
+ $destinationManifest = Import-Clixml $manifestFile
+}
+
+function ManifestList($manifestHash) {
+ $manifestHash.keys | Sort | ForEach-Object { "$_ : $($manifestHash[$_])" }
+}
+
+if (Compare-Object -ReferenceObject (ManifestList($destinationManifest)) -DifferenceObject (ManifestList($CurrentManifest))) {
+ Write-Output "Current library manifest not found. Refreshing."
+ $activity = "Removing directories"
+ foreach ($oldItem in $CleanupItems) {
+ if (Test-Path $oldItem) {
+ Write-Progress -Activity "$activity" -Status "Removing $oldItem"
+ Remove-Item -force -recurse $oldItem
+ }
+ }
+ Write-Progress -Activity "$activity" -Status "Done" -Completed
+} else {
+ Write-Output "Current library manifest found. Skipping download."
+ exit 0
+}
+
+# Download files
+foreach ($item in $Files.GetEnumerator() | Sort-Object -property key) {
+ DownloadFile $item.Name $item.Value
+}
+
+# Download and extract archives
+foreach ($item in $Archives.GetEnumerator() | Sort-Object -property key) {
+ $subDir = $ArchivesSubDirectory[$item.Name]
+ DownloadArchive $item.Name $item.Value $subDir
+}
+
+# Save our last known state
+$CurrentManifest | Export-Clixml -Path $manifestFile -Encoding utf8
diff --git a/tools/wireshark_be.py b/tools/wireshark_be.py
new file mode 100755
index 0000000..02fcf5a
--- /dev/null
+++ b/tools/wireshark_be.py
@@ -0,0 +1,260 @@
+# -*- python -*-
+#
+# File : wireshark_be.py
+#
+# Author : Frank Singleton (frank.singleton@ericsson.com)
+#
+# Copyright (C) 2001 Frank Singleton, Ericsson Inc.
+#
+# This file is a backend to "omniidl", used to generate "Wireshark"
+# dissectors from IDL descriptions. The output language generated
+# is "C". It will generate code to use the GIOP/IIOP get_CDR_XXX API.
+#
+# Please see packet-giop.h in Wireshark distro for API description.
+# Wireshark is available at https://www.wireshark.org/
+#
+# Omniidl is part of the OmniOrb distribution, and is available at
+# http://omniorb.sourceforge.net
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+
+# Description:
+#
+# Omniidl Back-end which parses an IDL data structure provided by the frontend
+# and generates packet-idl-xxx.[ch] for compiling as a dissector in Wireshark.
+#
+#
+# Strategy.
+#
+# Crawl all the way down all branches until I hit "Operation", "Enum", "Attribute",
+# "Struct" and "Union" nodes. Then store these nodes in lists.
+#
+# Pass these lists (via an object ref) to the src code
+# generator (wireshark_gen) class and let it do the hard work !
+#
+#
+# Don't forget structs can contain embedded structs etc .. so don't forget
+# to peek inside and check :-)
+
+
+"""Wireshark IDL compiler back-end."""
+
+from __future__ import print_function
+
+import string
+import sys
+from os import path
+
+from omniidl import idlast, idltype, output
+
+from wireshark_gen import wireshark_gen_C
+
+
+class WiresharkVisitor:
+ """This class finds the "Operation" nodes ,Enum Nodes, "Attribute" nodes, Struct Nodes
+ and Union Nodes. Then it hands them off to an instance of the source code generator
+ class "wireshark_gen" """
+
+ def __init__(self, st, debug=False):
+ self.DEBUG = debug
+ self.st = st
+ self.oplist = [] # list of operation nodes
+ self.enlist = [] # list of enum nodes
+ self.atlist = [] # list of attribute nodes
+ self.stlist = [] # list of struct nodes
+ self.unlist = [] # list of union nodes
+
+ def visitAST(self, node):
+ if self.DEBUG:
+ print("XXX visitAST() node = ", node)
+
+ for n in node.declarations():
+ if isinstance(n, idlast.Module):
+ self.visitModule(n)
+ if isinstance(n, idlast.Interface):
+ self.visitInterface(n)
+ if isinstance(n, idlast.Operation):
+ self.visitOperation(n)
+ if isinstance(n, idlast.Attribute):
+ self.visitAttribute(n)
+ if isinstance(n, idlast.Enum):
+ self.visitEnum(n)
+ if isinstance(n, idlast.Struct):
+ self.visitStruct(n)
+ if isinstance(n, idlast.Union):
+ self.visitUnion(n)
+
+ # Check for Typedef structs and unions
+
+ if isinstance(n, idlast.Typedef):
+ self.visitTypedef(n) # who are you ?
+
+ def visitModule(self, node):
+ if self.DEBUG:
+ print("XXX visitModule() node = ", node)
+
+ for n in node.definitions():
+ if isinstance(n, idlast.Module):
+ self.visitModule(n)
+ if isinstance(n, idlast.Interface):
+ self.visitInterface(n)
+ if isinstance(n, idlast.Operation):
+ self.visitOperation(n)
+ if isinstance(n, idlast.Attribute):
+ self.visitAttribute(n)
+ if isinstance(n, idlast.Enum):
+ self.visitEnum(n)
+ if isinstance(n, idlast.Struct):
+ self.visitStruct(n)
+ if isinstance(n, idlast.Union):
+ self.visitUnion(n)
+
+ # Check for Typedef structs and unions
+
+ if isinstance(n, idlast.Typedef):
+ self.visitTypedef(n) # who are you ?
+
+ def visitInterface(self, node):
+ if self.DEBUG:
+ print("XXX visitInterface() node = ", node)
+
+ for c in node.callables():
+ if isinstance(c, idlast.Operation):
+ self.visitOperation(c)
+ if isinstance(c, idlast.Attribute):
+ self.visitAttribute(c)
+
+ for d in node.contents():
+ if isinstance(d, idlast.Enum):
+ self.visitEnum(d)
+
+ if isinstance(d, idlast.Struct):
+ self.visitStruct(d)
+
+ if isinstance(d, idlast.Union):
+ self.visitUnion(d)
+
+ # Check for Typedef structs and unions
+
+ if isinstance(d, idlast.Typedef):
+ self.visitTypedef(d) # who are you ?
+
+ def visitOperation(self, opnode):
+ """populates the operations node list "oplist" """
+ if opnode not in self.oplist:
+ self.oplist.append(opnode) # store operation node
+
+ def visitAttribute(self, atnode):
+ """populates the attribute node list "atlist" """
+ if atnode not in self.atlist:
+ self.atlist.append(atnode) # store attribute node
+
+ def visitEnum(self, enode):
+ """populates the Enum node list "enlist" """
+ if enode not in self.enlist:
+ self.enlist.append(enode) # store enum node if unique
+
+ def visitTypedef(self, td):
+ """Search to see if its a typedef'd struct, union, or enum
+
+ eg: typdef enum colors {red, green, blue } mycolors;
+ """
+
+ d = td.aliasType() # get Type, possibly Declared
+ if isinstance(d, idltype.Declared):
+ self.visitDeclared(d)
+
+ def visitDeclared(self, d):
+ """Search to see if its a struct, union, or enum"""
+ if isinstance(d, idltype.Declared):
+ sue = d.decl() # grab the struct or union or enum
+
+ if isinstance(sue, idlast.Struct):
+ self.visitStruct(sue)
+ if isinstance(sue, idlast.Union):
+ self.visitUnion(sue)
+ if isinstance(sue, idlast.Enum):
+ self.visitEnum(sue)
+
+ def visitStruct(self, stnode):
+ # populates the struct node list "stlist"
+ # and checks its members also
+ if stnode not in self.stlist:
+ self.stlist.append(stnode) # store struct node if unique and avoid recursive loops
+ # if we come across recursive structs
+
+ for m in stnode.members(): # find embedded struct definitions within this
+ mt = m.memberType()
+ if isinstance(mt, idltype.Declared):
+ self.visitDeclared(mt) # if declared, then check it out
+
+ def visitUnion(self, unnode):
+ # populates the struct node list "unlist"
+ # and checks its members also
+ if unnode not in self.unlist:
+ self.unlist.append(unnode) # store union node if unique
+
+ if unnode.constrType(): # enum defined within switch type
+ if isinstance(unnode.switchType(), idltype.Declared):
+ self.visitDeclared(unnode.switchType())
+
+ for c in unnode.cases():
+ ct = c.caseType()
+ if isinstance(ct, idltype.Declared):
+ self.visitDeclared(ct) # if declared, then check it out
+
+
+def run(tree, args):
+
+ DEBUG = "debug" in args
+ AGGRESSIVE = "aggressive" in args
+
+ st = output.Stream(sys.stdout, 4) # set indent for stream
+ ev = WiresharkVisitor(st, DEBUG) # create visitor object
+
+ ev.visitAST(tree) # go find some operations
+
+ # Grab name of main IDL file being compiled.
+ #
+ # Assumption: Name is of the form abcdefg.xyz (eg: CosNaming.idl)
+
+ fname = path.basename(tree.file()) # grab basename only, don't care about path
+ nl = fname.split(".")[0] # split name of main IDL file using "." as separator
+ # and grab first field (eg: CosNaming)
+
+ if DEBUG:
+ for i in ev.oplist:
+ print("XXX - Operation node ", i, " repoId() = ", i.repoId())
+ for i in ev.atlist:
+ print("XXX - Attribute node ", i, " identifiers() = ", i.identifiers())
+ for i in ev.enlist:
+ print("XXX - Enum node ", i, " repoId() = ", i.repoId())
+ for i in ev.stlist:
+ print("XXX - Struct node ", i, " repoId() = ", i.repoId())
+ for i in ev.unlist:
+ print("XXX - Union node ", i, " repoId() = ", i.repoId())
+
+ # create a C generator object
+ # and generate some C code
+
+ eg = wireshark_gen_C(ev.st,
+ nl.upper(),
+ nl.lower(),
+ nl.capitalize() + " Dissector Using GIOP API",
+ debug=DEBUG,
+ aggressive=AGGRESSIVE)
+
+ eg.genCode(ev.oplist, ev.atlist, ev.enlist, ev.stlist, ev.unlist) # pass them onto the C generator
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 4
+# indent-tabs-mode: nil
+# End:
+#
+# vi: set shiftwidth=4 expandtab:
+# :indentSize=4:noTabs=true:
+#
diff --git a/tools/wireshark_gen.py b/tools/wireshark_gen.py
new file mode 100755
index 0000000..cf16f1f
--- /dev/null
+++ b/tools/wireshark_gen.py
@@ -0,0 +1,2789 @@
+# -*- python -*-
+#
+# wireshark_gen.py (part of idl2wrs)
+#
+# Author : Frank Singleton (frank.singleton@ericsson.com)
+#
+# Copyright (C) 2001 Frank Singleton, Ericsson Inc.
+#
+# This file is a backend to "omniidl", used to generate "Wireshark"
+# dissectors from CORBA IDL descriptions. The output language generated
+# is "C". It will generate code to use the GIOP/IIOP get_CDR_XXX API.
+#
+# Please see packet-giop.h in Wireshark distro for API description.
+# Wireshark is available at https://www.wireshark.org/
+#
+# Omniidl is part of the OmniOrb distribution, and is available at
+# http://omniorb.sourceforge.net
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+
+# Description:
+#
+# Omniidl Back-end which parses an IDL list of "Operation" nodes
+# passed from wireshark_be2.py and generates "C" code for compiling
+# as a dissector for Wireshark.
+#
+#
+# Strategy (sneaky but ...)
+#
+# problem: I don't know what variables to declare until AFTER the helper functions
+# have been built, so ...
+#
+# There are 2 passes through genHelpers, the first one is there just to
+# make sure the fn_hash data struct is populated properly.
+# The second pass is the real thing, generating code and declaring
+# variables (from the 1st pass) properly.
+
+
+"""Wireshark IDL compiler back-end."""
+
+from __future__ import print_function
+
+import collections
+import tempfile
+import string
+import random
+
+from omniidl import idlast, idltype, idlutil, output
+
+
+# Output class, generates "C" src code for the sub-dissector
+#
+# in:
+#
+#
+# self - me
+# st - output stream
+# node - a reference to an Operations object.
+# name - scoped name (Module::Module::Interface:: .. ::Operation
+
+
+
+# TODO -- FS
+#
+# 1. generate hf[] data for searchable fields (but what is searchable?) [done, could be improved]
+# 2. add item instead of add_text() [done]
+# 3. sequence handling [done]
+# 4. User Exceptions [done]
+# 5. Fix arrays, and structs containing arrays [done]
+# 6. Handle pragmas.
+# 7. Exception can be common to many operations, so handle them outside the
+# operation helper functions [done]
+# 8. Automatic variable declaration [done, improve, still get some collisions.add variable delegator function ]
+# For example, mutlidimensional arrays.
+# 9. wchar and wstring handling [giop API needs improving]
+# 10. Support Fixed [done]
+# 11. Support attributes (get/set) [started, needs language mapping option, perhaps wireshark GUI option
+# to set the attribute function prefix or suffix ? ] For now the prefix is "_get" and "_set"
+# eg: attribute string apple => _get_apple and _set_apple
+#
+# 12. Implement IDL "union" code [done]
+# 13. Implement support for plugins [done]
+# 14. Don't generate code for empty operations (cf: exceptions without members)
+# 15. Generate code to display Enums numerically and symbolically [done]
+# 16. Place structs/unions in subtrees [done]
+# 17. Recursive struct and union handling [done]
+# 18. Improve variable naming for display (eg: structs, unions etc) [done]
+#
+# Also test, Test, TEST
+
+
+# Strategy:
+# For every operation and attribute do
+# For return val and all parameters do
+# find basic IDL type for each parameter
+# output get_CDR_xxx
+# output exception handling code
+# output attribute handling code
+
+
+class wireshark_gen_C:
+
+ # Some string constants for our templates
+ c_u_octet8 = "guint64 u_octet8;"
+ c_s_octet8 = "gint64 s_octet8;"
+ c_u_octet4 = "guint32 u_octet4;"
+ c_s_octet4 = "gint32 s_octet4;"
+ c_u_octet2 = "guint16 u_octet2;"
+ c_s_octet2 = "gint16 s_octet2;"
+ c_u_octet1 = "guint8 u_octet1;"
+ c_s_octet1 = "gint8 s_octet1;"
+
+ c_float = "gfloat my_float;"
+ c_double = "gdouble my_double;"
+
+ c_seq = "const gchar *seq = NULL;" # pointer to buffer of gchars
+ c_i = "guint32 i_" # loop index
+ c_i_lim = "guint32 u_octet4_loop_" # loop limit
+ c_u_disc = "guint32 disc_u_" # unsigned int union discriminant variable name (enum)
+ c_s_disc = "gint32 disc_s_" # signed int union discriminant variable name (other cases, except Enum)
+
+ def __init__(self, st, protocol_name, dissector_name, description, debug=False, aggressive=False):
+ self.DEBUG = debug
+ self.AGGRESSIVE = aggressive
+
+ self.st = output.Stream(tempfile.TemporaryFile(mode="w"), 4) # for first pass only
+
+ self.st_save = st # where 2nd pass should go
+ self.protoname = protocol_name # Protocol Name (eg: ECHO)
+ self.dissname = dissector_name # Dissector name (eg: echo)
+ self.description = description # Detailed Protocol description (eg: Echo IDL Example)
+ self.exlist = [] # list of exceptions used in operations.
+ #self.curr_sname # scoped name of current opnode or exnode I am visiting, used for generating "C" var declares
+ self.fn_hash = {} # top level hash to contain key = function/exception and val = list of variable declarations
+ # ie a hash of lists
+ self.fn_hash_built = 0 # flag to indicate the 1st pass is complete, and the fn_hash is correctly
+ # populated with operations/vars and exceptions/vars
+
+ def genCode(self, oplist, atlist, enlist, stlist, unlist): # operation, attribute, enums, struct and union lists
+ """Main entry point, controls sequence of generated code."""
+
+ # sneaky .. call it now, to populate the fn_hash
+ # so when I come to that exception later, I have the variables to
+ # declare already.
+
+ # need to reverse the lists, so that the functions of the current IDL
+ # is properly processed, otherwise the first name wise declaration of
+ # an include is taken for the function generation. Same counts for
+ # structs and unions.
+ oplist = oplist[::-1]
+ stlist = stlist[::-1]
+ enlist = enlist[::-1]
+ unlist = unlist[::-1]
+
+
+ self.genHelpers(oplist, stlist, unlist)
+ self.genExceptionHelpers(oplist)
+ self.genAttributeHelpers(atlist)
+
+ self.fn_hash_built = 1 # DONE, so now I know , see genOperation()
+
+ self.st = self.st_save
+ self.genHeader() # initial dissector comments
+ self.genWrsCopyright()
+ self.genGPL()
+ self.genIncludes()
+ self.genPrototype()
+ self.genProtocol()
+ self.genDeclares(oplist, atlist, enlist, stlist, unlist)
+ if len(atlist) > 0:
+ self.genAtList(atlist) # string constant declares for Attributes
+ if len(enlist) > 0:
+ self.genEnList(enlist) # string constant declares for Enums
+ if len(unlist) > 0:
+ self.genUnList(unlist)
+
+ self.genExceptionHelpers(oplist) # helper function to decode user exceptions that have members
+ self.genExceptionDelegator(oplist) # finds the helper function to decode a user exception
+ if len(atlist) > 0:
+ self.genAttributeHelpers(atlist) # helper function to decode "attributes"
+
+ self.genHelpers(oplist, stlist, unlist) # operation, struct and union decode helper functions
+
+ self.genMainEntryStart(oplist)
+ self.genOpDelegator(oplist)
+ self.genAtDelegator(atlist)
+ self.genMainEntryEnd()
+
+ self.gen_proto_register(oplist, atlist, stlist, unlist)
+ self.gen_proto_reg_handoff(oplist)
+ # All the dissectors are now built-in
+ #self.gen_plugin_register()
+ if self.DEBUG:
+ self.dumpvars() # debug
+ self.genModelines()
+
+ def genHeader(self):
+ """Generate Standard Wireshark Header Comments"""
+ self.st.out(self.template_Header, dissector_name=self.dissname)
+ if self.DEBUG:
+ print("//XXX genHeader")
+
+ def genWrsCopyright(self):
+ if self.DEBUG:
+ print("//XXX genWrsCopyright")
+ self.st.out(self.template_wireshark_copyright)
+
+ def genModelines(self):
+ if self.DEBUG:
+ print("//XXX genModelines")
+
+ self.st.out(self.template_Modelines)
+
+ def genGPL(self):
+ if self.DEBUG:
+ print("//XXX genGPL")
+
+ self.st.out(self.template_GPL)
+
+ def genIncludes(self):
+ if self.DEBUG:
+ print("//XXX genIncludes")
+
+ self.st.out(self.template_Includes)
+
+ def genOpDeclares(self, op):
+ """" Generate hf variables for operation filters
+
+ in: opnode ( an operation node)
+ """
+
+ if self.DEBUG:
+ print("//XXX genOpDeclares")
+ print("//XXX return type = ", op.returnType().kind())
+
+ sname = self.namespace(op, "_")
+ rt = op.returnType()
+
+ if rt.kind() != idltype.tk_void:
+ if rt.kind() == idltype.tk_alias: # a typdef return val possibly ?
+ #self.get_CDR_alias(rt, rt.name())
+ if rt.unalias().kind() == idltype.tk_sequence:
+ self.st.out(self.template_hf, name=sname + "_return_loop")
+ if self.isSeqNativeType(rt.unalias().seqType()) or self.AGGRESSIVE:
+ self.st.out(self.template_hf, name=sname + "_return")
+ elif (rt.unalias().kind() != idltype.tk_struct and
+ rt.unalias().kind() != idltype.tk_objref and
+ rt.unalias().kind() != idltype.tk_any):
+ self.st.out(self.template_hf, name=sname + "_return")
+
+ elif (rt.kind() != idltype.tk_struct and
+ rt.kind() != idltype.tk_objref and
+ rt.kind() != idltype.tk_union and
+ rt.kind() != idltype.tk_any):
+ self.st.out(self.template_hf, name=sname + "_return")
+
+ for p in op.parameters():
+ if p.paramType().unalias().kind() == idltype.tk_sequence:
+ self.st.out(self.template_hf, name=sname + "_" + p.identifier() + "_loop")
+ if (self.isSeqNativeType(p.paramType().unalias().seqType())) or self.AGGRESSIVE:
+ self.st.out(self.template_hf, name=sname + "_" + p.identifier())
+ elif (p.paramType().unalias().kind() != idltype.tk_any and
+ p.paramType().unalias().kind() != idltype.tk_struct and
+ p.paramType().unalias().kind() != idltype.tk_objref and
+ p.paramType().unalias().kind() != idltype.tk_union):
+ if p.paramType().unalias().kind() == idltype.tk_wchar:
+ self.st.out(self.template_hf, name=sname + "_" + p.identifier() + "_len")
+ self.st.out(self.template_hf, name=sname + "_" + p.identifier())
+
+ def genAtDeclares(self, at):
+ """Generate hf variables for attributes
+
+ in: at ( an attribute)
+ """
+
+ if self.DEBUG:
+ print("//XXX genAtDeclares")
+
+ for decl in at.declarators():
+ sname = self.namespace(decl, "_")
+
+ self.st.out(self.template_hf, name="get" + "_" + sname + "_" + decl.identifier())
+ if self.AGGRESSIVE:
+ self.st.out(self.template_hf, name="get" + "_" + sname + "_" + decl.identifier()+"_loop")
+ if not at.readonly():
+ self.st.out(self.template_hf, name="set" + "_" + sname + "_" + decl.identifier())
+ if self.AGGRESSIVE:
+ self.st.out(self.template_hf, name="set" + "_" + sname + "_" + decl.identifier()+"_loop")
+
+ def genStDeclares(self, st):
+ """Generate hf variables for structs
+
+ in: st ( a struct)
+ """
+
+ if self.DEBUG:
+ print("//XXX genStDeclares")
+
+ sname = self.namespace(st, "_")
+
+ for m in st.members():
+ if (self.isSeqNativeType(m.memberType())
+ or m.memberType().unalias().kind() == idltype.tk_sequence
+ or m.memberType().unalias().kind() == idltype.tk_alias):
+ for decl in m.declarators():
+ if m.memberType().unalias().kind() == idltype.tk_sequence:
+ self.st.out(self.template_hf, name=sname + "_" + decl.identifier() + "_loop")
+ if (self.isSeqNativeType(m.memberType().unalias().seqType())) or self.AGGRESSIVE:
+ self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
+ else:
+ if m.memberType().unalias().kind() == idltype.tk_wchar:
+ self.st.out(self.template_hf, name=sname + "_" + decl.identifier() + "_len")
+ self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
+
+ def genExDeclares(self, ex):
+ """Generate hf variables for user exception filters
+
+ in: exnode ( an exception node)
+ """
+
+ if self.DEBUG:
+ print("//XXX genExDeclares")
+
+ sname = self.namespace(ex, "_")
+
+ for m in ex.members():
+ for decl in m.declarators():
+ if m.memberType().unalias().kind() == idltype.tk_sequence:
+ if self.isSeqNativeType(m.memberType().unalias().seqType()):
+ self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
+ self.st.out(self.template_hf, name=sname + "_" + decl.identifier() + "_loop")
+ elif m.memberType().unalias().kind() != idltype.tk_struct:
+ self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
+
+ def genUnionDeclares(self, un):
+ """Generate hf variables for union filters
+
+ in: un ( an union)
+ """
+
+ if self.DEBUG:
+ print("//XXX genUnionDeclares")
+
+ sname = self.namespace(un, "_")
+ self.st.out(self.template_hf, name=sname + "_" + un.identifier())
+
+ for uc in un.cases(): # for all UnionCase objects in this union
+ # TODO: Is this loop necessary? cl is not used
+ for cl in uc.labels(): # for all Caselabel objects in this UnionCase
+ if uc.caseType().unalias().kind() == idltype.tk_sequence:
+ self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier() + "_loop")
+ if self.isSeqNativeType(uc.caseType().unalias().seqType()):
+ self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier())
+ elif self.isSeqNativeType(uc.caseType()):
+ if uc.caseType().unalias().kind() == idltype.tk_wchar:
+ self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier() + "_len")
+ self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier())
+
+ def genExpertInfoDeclares(self):
+ """Generate ei variables for expert info filters"""
+ if self.DEBUG:
+ print("//XXX genExpertInfoDeclares")
+
+ self.st.out(self.template_proto_register_ei_filters, dissector_name=self.dissname)
+
+ def genDeclares(self, oplist, atlist, enlist, stlist, unlist):
+ """generate function prototypes if required
+
+ Currently this is used for struct and union helper function declarations.
+ """
+
+ if self.DEBUG:
+ print("//XXX genDeclares")
+
+ # prototype for operation filters
+ self.st.out(self.template_hf_operations)
+
+ # operation specific filters
+ if len(oplist) > 0:
+ self.st.out(self.template_proto_register_op_filter_comment)
+ for op in oplist:
+ self.genOpDeclares(op)
+
+ # attribute filters
+ if len(atlist) > 0:
+ self.st.out(self.template_proto_register_at_filter_comment)
+ for at in atlist:
+ self.genAtDeclares(at)
+
+ # struct filters
+ if len(stlist) > 0:
+ self.st.out(self.template_proto_register_st_filter_comment)
+ for st in stlist:
+ self.genStDeclares(st)
+
+ # exception List filters
+ exlist = self.get_exceptionList(oplist) # grab list of exception nodes
+ if len(exlist) > 0:
+ self.st.out(self.template_proto_register_ex_filter_comment)
+ for ex in exlist:
+ if ex.members(): # only if has members
+ self.genExDeclares(ex)
+
+ # union filters
+ if len(unlist) > 0:
+ self.st.out(self.template_proto_register_un_filter_comment)
+ for un in unlist:
+ self.genUnionDeclares(un)
+
+ # expert info filters
+ self.genExpertInfoDeclares()
+
+ # prototype for start_dissecting()
+
+ self.st.out(self.template_prototype_start_dissecting)
+
+ # struct prototypes
+
+ if len(stlist):
+ self.st.out(self.template_prototype_struct_start)
+ for st in stlist:
+ #print st.repoId()
+ sname = self.namespace(st, "_")
+ self.st.out(self.template_prototype_struct_body, stname=st.repoId(), name=sname)
+
+ self.st.out(self.template_prototype_struct_end)
+
+ # union prototypes
+ if len(unlist):
+ self.st.out(self.template_prototype_union_start)
+ for un in unlist:
+ sname = self.namespace(un, "_")
+ self.st.out(self.template_prototype_union_body, unname=un.repoId(), name=sname)
+ self.st.out(self.template_prototype_union_end)
+
+ def genPrototype(self):
+ self.st.out(self.template_prototype, dissector_name=self.dissname)
+
+ def genProtocol(self):
+ self.st.out(self.template_protocol, dissector_name=self.dissname)
+ self.st.out(self.template_init_boundary)
+
+
+ def genMainEntryStart(self, oplist):
+ self.st.out(self.template_main_dissector_start, dissname=self.dissname, disprot=self.protoname)
+ self.st.inc_indent()
+ self.st.out(self.template_main_dissector_switch_msgtype_start)
+ self.st.out(self.template_main_dissector_switch_msgtype_start_request_reply)
+ self.st.inc_indent()
+
+ def genMainEntryEnd(self):
+
+ self.st.out(self.template_main_dissector_switch_msgtype_end_request_reply)
+ self.st.dec_indent()
+ self.st.out(self.template_main_dissector_switch_msgtype_all_other_msgtype)
+ self.st.dec_indent()
+ self.st.out(self.template_main_dissector_end)
+
+
+ # NOTE: Mapping of attributes to operation(function) names is tricky.
+ #
+ # The actual accessor function names are language-mapping specific. The attribute name
+ # is subject to OMG IDL's name scoping rules; the accessor function names are
+ # guaranteed not to collide with any legal operation names specifiable in OMG IDL.
+ #
+ # eg:
+ #
+ # static const char get_Penguin_Echo_get_width_at[] = "get_width" ;
+ # static const char set_Penguin_Echo_set_width_at[] = "set_width" ;
+ #
+ # or:
+ #
+ # static const char get_Penguin_Echo_get_width_at[] = "_get_width" ;
+ # static const char set_Penguin_Echo_set_width_at[] = "_set_width" ;
+ #
+ # TODO: Implement some language dependent templates to handle naming conventions
+ # language <=> attribute. for C, C++. Java etc
+ #
+ # OR, just add a runtime GUI option to select language binding for attributes -- FS
+
+ def genAtList(self, atlist):
+ """in: atlist
+
+ out: C code for IDL attribute decalarations.
+
+ ie: def genAtlist(self,atlist,language)
+ """
+
+ self.st.out(self.template_comment_attributes_start)
+
+ for n in atlist:
+ for i in n.declarators(): #
+ sname = self.namespace(i, "_")
+ atname = i.identifier()
+ self.st.out(self.template_attributes_declare_Java_get, sname=sname, atname=atname)
+ if not n.readonly():
+ self.st.out(self.template_attributes_declare_Java_set, sname=sname, atname=atname)
+
+ self.st.out(self.template_comment_attributes_end)
+
+ def genEnList(self, enlist):
+ """in: enlist
+
+ out: C code for IDL Enum decalarations using "static const value_string" template
+ """
+
+ self.st.out(self.template_comment_enums_start)
+
+ for enum in enlist:
+ sname = self.namespace(enum, "_")
+
+ self.st.out(self.template_comment_enum_comment, ename=enum.repoId())
+ self.st.out(self.template_value_string_start, valstringname=sname)
+ for enumerator in enum.enumerators():
+ self.st.out(self.template_value_string_entry,
+ intval=str(self.valFromEnum(enum, enumerator)),
+ description=enumerator.identifier())
+
+ #atname = n.identifier()
+ self.st.out(self.template_value_string_end, valstringname=sname)
+
+ self.st.out(self.template_comment_enums_end)
+
+ def genUnList(self, unlist):
+ """in: unlist
+
+ out: C code for IDL Union declarations using "static const value_string template
+ """
+
+
+ for un in unlist:
+ if un.switchType().kind() == idltype.tk_enum:
+ continue # skip enums since they already have value-strings
+ sname = self.namespace(un, "_")
+ self.st.out(self.template_value_string_start, valstringname=sname)
+ for uc in un.cases():
+ for cl in uc.labels():
+ val = cl.value()
+ self.st.out(self.template_value_string_entry,
+ intval=str(val),
+ description=uc.declarator().identifier())
+ self.st.out(self.template_value_string_end, valstringname=sname)
+
+
+
+
+ def genExceptionDelegator(self, oplist):
+ """in: oplist
+
+ out: C code for User exception delegator
+ """
+
+ self.st.out(self.template_main_exception_delegator_start)
+ self.st.inc_indent()
+
+ exlist = self.get_exceptionList(oplist) # grab list of ALL UNIQUE exception nodes
+
+ for ex in exlist:
+ if self.DEBUG:
+ print("//XXX Exception ", ex.repoId())
+ print("//XXX Exception Identifier", ex.identifier())
+ print("//XXX Exception Scoped Name", ex.scopedName())
+
+ if ex.members(): # only if has members
+ sname = self.namespace(ex, "_")
+ self.st.out(self.template_ex_delegate_code, sname=sname, exname=ex.repoId())
+
+ self.st.dec_indent()
+ self.st.out(self.template_main_exception_delegator_end)
+
+ def genAttributeHelpers(self, atlist):
+ """Generate private helper functions to decode Attributes.
+
+ in: atlist
+
+ For readonly attribute - generate get_xxx()
+ If NOT readonly attribute - also generate set_xxx()
+ """
+
+ if self.DEBUG:
+ print("//XXX genAttributeHelpers: atlist = ", atlist)
+
+ self.st.out(self.template_attribute_helpers_start)
+
+ for attrib in atlist:
+ for decl in attrib.declarators():
+ self.genAtHelper(attrib, decl, "get") # get accessor
+ if not attrib.readonly():
+ self.genAtHelper(attrib, decl, "set") # set accessor
+
+ self.st.out(self.template_attribute_helpers_end)
+
+ def genAtHelper(self, attrib, decl, order):
+ """Generate private helper functions to decode an attribute
+
+ in: at - attribute node
+ in: decl - declarator belonging to this attribute
+ in: order - to generate a "get" or "set" helper
+ """
+
+ if self.DEBUG:
+ print("//XXX genAtHelper")
+
+ sname = order + "_" + self.namespace(decl, "_") # must use set or get prefix to avoid collision
+ self.curr_sname = sname # update current opnode/exnode scoped name
+
+ if not self.fn_hash_built:
+ self.fn_hash[sname] = [] # init empty list as val for this sname key
+ # but only if the fn_hash is not already built
+
+ self.st.out(self.template_attribute_helper_function_start, sname=sname, atname=decl.repoId())
+ self.st.inc_indent()
+ attr_type = attrib.attrType()
+ if self.DEBUG:
+ print("//XXX attrib = ", attrib)
+ print("//XXX attrib.attrType.unalias.kind = ", attr_type.unalias().kind())
+
+ if self.isItemVarType(attr_type):
+ self.st.out(self.template_proto_item)
+
+ if len(self.fn_hash[sname]) > 0:
+ self.st.out(self.template_helper_function_vars_start)
+ self.dumpCvars(sname)
+ self.st.out(self.template_helper_function_vars_end_item)
+
+ self.getCDR(attr_type, sname + "_" + decl.identifier())
+
+ self.st.dec_indent()
+ self.st.out(self.template_attribute_helper_function_end)
+
+ def genExceptionHelpers(self, oplist):
+ """Generate private helper functions to decode Exceptions used
+ within operations
+
+ in: oplist
+ """
+
+ exlist = self.get_exceptionList(oplist) # grab list of exception nodes
+ if self.DEBUG:
+ print("//XXX genExceptionHelpers: exlist = ", exlist)
+
+ self.st.out(self.template_exception_helpers_start)
+ for ex in exlist:
+ if ex.members(): # only if has members
+ #print("//XXX Exception = " + ex.identifier())
+ self.genExHelper(ex)
+
+ self.st.out(self.template_exception_helpers_end)
+
+ def genExHelper(self, ex):
+ """Generate private helper functions to decode User Exceptions
+
+ in: exnode ( an exception node)
+ """
+
+ if self.DEBUG:
+ print("//XXX genExHelper")
+
+ # check to see if we need an item
+ need_item = False
+ for m in ex.members():
+ if self.isItemVarType(m.memberType()):
+ need_item = True
+ break
+
+ sname = self.namespace(ex, "_")
+ self.curr_sname = sname # update current opnode/exnode scoped name
+ if not self.fn_hash_built:
+ self.fn_hash[sname] = [] # init empty list as val for this sname key
+ # but only if the fn_hash is not already built
+
+ self.st.out(self.template_exception_helper_function_start, sname=sname, exname=ex.repoId())
+ self.st.inc_indent()
+ if need_item:
+ self.st.out(self.template_proto_item)
+
+ if len(self.fn_hash[sname]) > 0:
+ self.st.out(self.template_helper_function_vars_start)
+ self.dumpCvars(sname)
+ if need_item:
+ self.st.out(self.template_helper_function_vars_end_item)
+ else:
+ self.st.out(self.template_helper_function_vars_end)
+
+ for m in ex.members():
+ if self.DEBUG:
+ print("//XXX genExhelper, member = ", m, "member type = ", m.memberType())
+
+ for decl in m.declarators():
+ if self.DEBUG:
+ print("//XXX genExhelper, d = ", decl)
+
+ if decl.sizes(): # an array
+ arr_nonce = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(12))
+ indices = self.get_indices_from_sizes(decl.sizes())
+ string_indices = '%i ' % indices # convert int to string
+ self.st.out(self.template_get_CDR_array_comment, aname=decl.identifier(), asize=string_indices)
+ self.st.out(self.template_get_CDR_array_start, nonce=arr_nonce, aname=decl.identifier(), aval=string_indices)
+ self.st.inc_indent()
+ self.addvar(self.c_i + decl.identifier() + ";")
+
+ self.st.inc_indent()
+ self.getCDR(m.memberType(), sname + "_" + decl.identifier())
+
+ self.st.dec_indent()
+ self.st.dec_indent()
+ self.st.out(self.template_get_CDR_array_end, nonce=arr_nonce)
+
+ else:
+ self.getCDR(m.memberType(), sname + "_" + decl.identifier())
+
+ self.st.dec_indent()
+ self.st.out(self.template_exception_helper_function_end)
+
+ def genHelpers(self, oplist, stlist, unlist):
+ """Generate private helper functions
+
+ Generate private helper functions for each IDL operation.
+ Generate private helper functions for each IDL struct.
+ Generate private helper functions for each IDL union.
+
+
+ in: oplist, stlist, unlist
+ """
+
+ for op in oplist:
+ self.genOperation(op)
+ for st in stlist:
+ self.genStructHelper(st)
+ for un in unlist:
+ self.genUnionHelper(un)
+
+ def genOperation(self, opnode):
+ """Generate private helper functions for a specific IDL operation.
+
+ in: opnode
+ """
+
+ if self.DEBUG:
+ print("//XXX genOperation called")
+ print("//opnode =", opnode)
+ print("//repoid =", opnode.repoId())
+
+ sname = self.namespace(opnode, "_")
+ if not self.fn_hash_built:
+ self.fn_hash[sname] = [] # init empty list as val for this sname key
+ # but only if the fn_hash is not already built
+
+ self.curr_sname = sname # update current opnode's scoped name
+ opname = opnode.identifier()
+
+ self.st.out(self.template_helper_function_comment, repoid=opnode.repoId())
+
+ self.st.out(self.template_helper_function_start, sname=sname)
+ self.st.inc_indent()
+
+ if len(self.fn_hash[sname]) > 0:
+ self.st.out(self.template_helper_function_vars_start)
+ self.dumpCvars(sname)
+ self.st.out(self.template_helper_function_vars_end_item)
+
+ self.st.out(self.template_helper_switch_msgtype_start)
+
+ self.st.out(self.template_helper_switch_msgtype_request_start)
+ self.st.inc_indent()
+ self.genOperationRequest(opnode)
+ self.st.out(self.template_helper_switch_msgtype_request_end)
+ self.st.dec_indent()
+
+ self.st.out(self.template_helper_switch_msgtype_reply_start)
+ self.st.inc_indent()
+
+ self.st.out(self.template_helper_switch_rep_status_start)
+
+ self.st.out(self.template_helper_switch_msgtype_reply_no_exception_start)
+ self.st.inc_indent()
+ self.genOperationReply(opnode)
+ self.st.out(self.template_helper_switch_msgtype_reply_no_exception_end)
+ self.st.dec_indent()
+
+ self.st.out(self.template_helper_switch_msgtype_reply_user_exception_start)
+ self.st.inc_indent()
+ self.genOpExceptions(opnode)
+ self.st.out(self.template_helper_switch_msgtype_reply_user_exception_end)
+ self.st.dec_indent()
+
+ self.st.out(self.template_helper_switch_msgtype_reply_default_start, dissector_name=self.dissname)
+ self.st.out(self.template_helper_switch_msgtype_reply_default_end)
+
+ self.st.out(self.template_helper_switch_rep_status_end)
+
+ self.st.dec_indent()
+
+ self.st.out(self.template_helper_switch_msgtype_default_start, dissector_name=self.dissname)
+ self.st.out(self.template_helper_switch_msgtype_default_end)
+
+ self.st.out(self.template_helper_switch_msgtype_end)
+ self.st.dec_indent()
+
+ self.st.out(self.template_helper_function_end, sname=sname)
+
+ def genOperationRequest(self, opnode):
+ """Decode function parameters for a GIOP request message"""
+ for p in opnode.parameters():
+ if p.is_in():
+ if self.DEBUG:
+ print("//XXX parameter = ", p)
+ print("//XXX parameter type = ", p.paramType())
+ print("//XXX parameter type kind = ", p.paramType().kind())
+
+ self.getCDR(p.paramType(), self.curr_sname + "_" + p.identifier())
+
+ def genOperationReply(self, opnode):
+ """Decode function parameters for a GIOP reply message"""
+ rt = opnode.returnType() # get return type
+ if self.DEBUG:
+ print("//XXX genOperationReply")
+ print("//XXX opnode = ", opnode)
+ print("//XXX return type = ", rt)
+ print("//XXX return type.unalias = ", rt.unalias())
+ print("//XXX return type.kind() = ", rt.kind())
+
+ sname = self.namespace(opnode, "_")
+
+ if rt.kind() == idltype.tk_alias: # a typdef return val possibly ?
+ #self.getCDR(rt.decl().alias().aliasType(),"dummy") # return value maybe a typedef
+ self.get_CDR_alias(rt, sname + "_return")
+ #self.get_CDR_alias(rt, rt.name())
+
+ else:
+ self.getCDR(rt, sname + "_return") # return value is NOT an alias
+
+ for p in opnode.parameters():
+ if p.is_out(): # out or inout
+ self.getCDR(p.paramType(), self.curr_sname + "_" + p.identifier())
+
+ #self.st.dec_indent()
+
+ # TODO: this method seems unnecessary
+ def genOpExceptions(self, opnode):
+ for ex in opnode.raises():
+ if ex.members():
+ #print ex.members()
+ for m in ex.members():
+ t = 0
+ #print m.memberType(), m.memberType().kind()
+
+ def genOpDelegator(self, oplist):
+ """Delegator for Operations"""
+ if len(oplist) == 0:
+ self.st.out(self.template_no_ops_to_delegate)
+ for op in oplist:
+ iname = "/".join(op.scopedName()[:-1])
+ opname = op.identifier()
+ sname = self.namespace(op, "_")
+ self.st.out(self.template_op_delegate_code, interface=iname, sname=sname, opname=opname)
+
+ def genAtDelegator(self, atlist):
+ """Delegator for Attributes"""
+ for a in atlist:
+ for i in a.declarators():
+ sname = self.namespace(i, "_")
+ self.st.out(self.template_at_delegate_code_get, sname=sname)
+ if not a.readonly():
+ self.st.out(self.template_at_delegate_code_set, sname=sname)
+
+ def addvar(self, var):
+ """Add a variable declaration to the hash of list"""
+ if var not in self.fn_hash[self.curr_sname]:
+ self.fn_hash[self.curr_sname].append(var)
+
+ def dumpvars(self):
+ """Print the variable declaration from the hash of list"""
+ for fn in self.fn_hash.keys():
+ print("FN = " + fn)
+ for v in self.fn_hash[fn]:
+ print("-> " + v)
+
+ def dumpCvars(self, sname):
+ """Print the "C" variable declaration from the hash of list
+ for a given scoped operation name (eg: tux_penguin_eat)"""
+ for v in self.fn_hash[sname]:
+ self.st.out(v)
+
+ def valFromEnum(self, enumNode, enumeratorNode):
+ """Given an enum node, and a enumerator node, return the enumerator's numerical value.
+
+ eg: enum Color {red,green,blue} should return
+ val = 1 for green
+ """
+
+ if self.DEBUG:
+ print("//XXX valFromEnum, enumNode = ", enumNode, " from ", enumNode.repoId())
+ print("//XXX valFromEnum, enumeratorNode = ", enumeratorNode, " from ", enumeratorNode.repoId())
+
+ if isinstance(enumeratorNode, idlast.Enumerator):
+ value = enumNode.enumerators().index(enumeratorNode)
+ return value
+
+
+# tk_null = 0
+# tk_void = 1
+# tk_short = 2
+# tk_long = 3
+# tk_ushort = 4
+# tk_ulong = 5
+# tk_float = 6
+# tk_double = 7
+# tk_boolean = 8
+# tk_char = 9
+# tk_octet = 10
+# tk_any = 11
+# tk_TypeCode = 12
+# tk_Principal = 13
+# tk_objref = 14
+# tk_struct = 15
+# tk_union = 16
+# tk_enum = 17
+# tk_string = 18
+# tk_sequence = 19
+# tk_array = 20
+# tk_alias = 21
+# tk_except = 22
+# tk_longlong = 23
+# tk_ulonglong = 24
+# tk_longdouble = 25
+# tk_wchar = 26
+# tk_wstring = 27
+# tk_fixed = 28
+# tk_value = 29
+# tk_value_box = 30
+# tk_native = 31
+# tk_abstract_interface = 32
+
+ def isSeqNativeType(self, type):
+ """Return true for "native" datatypes that will generate a direct proto_tree_add_xxx
+ call for a sequence. Used to determine if a separate hf variable is needed for
+ the loop over the sequence"""
+
+ pt = type.unalias().kind() # param CDR type
+
+ if self.DEBUG:
+ print("//XXX isSeqNativeType: kind = ", pt)
+
+ if pt == idltype.tk_ulong:
+ return 1
+ elif pt == idltype.tk_longlong:
+ return 1
+ elif pt == idltype.tk_ulonglong:
+ return 1
+ elif pt == idltype.tk_short:
+ return 1
+ elif pt == idltype.tk_long:
+ return 1
+ elif pt == idltype.tk_ushort:
+ return 1
+ elif pt == idltype.tk_float:
+ return 1
+ elif pt == idltype.tk_double:
+ return 1
+ elif pt == idltype.tk_boolean:
+ return 1
+ elif pt == idltype.tk_octet:
+ return 1
+ elif pt == idltype.tk_enum:
+ return 1
+ elif pt == idltype.tk_string:
+ return 1
+ elif pt == idltype.tk_wstring:
+ return 1
+ elif pt == idltype.tk_wchar:
+ return 1
+ elif pt == idltype.tk_char:
+ return 1
+ else:
+ return 0
+
+ def isItemVarType(self, type):
+
+ pt = type.unalias().kind() # param CDR type
+
+ if self.DEBUG:
+ print("//XXX isItemVarType: kind = ", pt)
+ inner_pt = None
+ if pt in [idltype.tk_struct, idltype.tk_fixed, idltype.tk_any]:
+ return 1
+ elif pt == idltype.tk_alias:
+ inner_pt = type.decl().alias().aliasType().unalias().kind()
+ elif pt == idltype.tk_sequence:
+ inner_pt = type.unalias().seqType().unalias().kind()
+ elif pt == idltype.tk_array:
+ inner_pt == type.decl().alias().aliasType().unalias().kind()
+ if inner_pt is not None and inner_pt in \
+ [idltype.tk_struct, idltype.tk_fixed, idltype.tk_any]:
+ return 1
+ elif inner_pt in [idltype.tk_alias, idltype.tk_sequence,\
+ idltype.tk_array]:
+ return self.isItemVarType(inner_pt)
+ return 0
+
+ def getCDR(self, type, name="fred"):
+ """This is the main "iterator" function. It takes a node, and tries to output
+ a get_CDR_XXX accessor method(s). It can call itself multiple times
+ if it finds nested structures etc."""
+
+ pt = type.unalias().kind() # param CDR type
+ pn = name # param name
+
+ if self.DEBUG:
+ print("//XXX getCDR: kind = ", pt)
+ print("//XXX getCDR: name = ", pn)
+
+ if pt == idltype.tk_ulong:
+ self.get_CDR_ulong(pn)
+ elif pt == idltype.tk_longlong:
+ self.get_CDR_longlong(pn)
+ elif pt == idltype.tk_ulonglong:
+ self.get_CDR_ulonglong(pn)
+ elif pt == idltype.tk_void:
+ self.get_CDR_void(pn)
+ elif pt == idltype.tk_short:
+ self.get_CDR_short(pn)
+ elif pt == idltype.tk_long:
+ self.get_CDR_long(pn)
+ elif pt == idltype.tk_ushort:
+ self.get_CDR_ushort(pn)
+ elif pt == idltype.tk_float:
+ self.get_CDR_float(pn)
+ elif pt == idltype.tk_double:
+ self.get_CDR_double(pn)
+ elif pt == idltype.tk_fixed:
+ self.get_CDR_fixed(type.unalias(), pn)
+ elif pt == idltype.tk_boolean:
+ self.get_CDR_boolean(pn)
+ elif pt == idltype.tk_char:
+ self.get_CDR_char(pn)
+ elif pt == idltype.tk_octet:
+ self.get_CDR_octet(pn)
+ elif pt == idltype.tk_any:
+ self.get_CDR_any(pn)
+ elif pt == idltype.tk_string:
+ self.get_CDR_string(pn)
+ elif pt == idltype.tk_wstring:
+ self.get_CDR_wstring(pn)
+ elif pt == idltype.tk_wchar:
+ self.get_CDR_wchar(pn)
+ elif pt == idltype.tk_enum:
+ #print type.decl()
+ self.get_CDR_enum(pn, type)
+ #self.get_CDR_enum(pn)
+
+ elif pt == idltype.tk_struct:
+ self.get_CDR_struct(type, pn)
+ elif pt == idltype.tk_TypeCode: # will I ever get here ?
+ self.get_CDR_TypeCode(pn)
+ elif pt == idltype.tk_sequence:
+ if type.unalias().seqType().kind() == idltype.tk_octet:
+ self.get_CDR_sequence_octet(type, pn)
+ else:
+ self.get_CDR_sequence(type, pn)
+ elif pt == idltype.tk_objref:
+ self.get_CDR_objref(type, pn)
+ elif pt == idltype.tk_array:
+ pass # Supported elsewhere
+ elif pt == idltype.tk_union:
+ self.get_CDR_union(type, pn)
+ elif pt == idltype.tk_alias:
+ if self.DEBUG:
+ print("//XXXXX Alias type XXXXX ", type)
+ self.get_CDR_alias(type, pn)
+ else:
+ self.genWARNING("Unknown typecode = " + '%i ' % pt) # put comment in source code
+
+ def get_CDR_ulong(self, pn):
+ self.st.out(self.template_get_CDR_ulong, hfname=pn)
+
+ def get_CDR_short(self, pn):
+ self.st.out(self.template_get_CDR_short, hfname=pn)
+
+ def get_CDR_void(self, pn):
+ self.st.out(self.template_get_CDR_void, hfname=pn)
+
+ def get_CDR_long(self, pn):
+ self.st.out(self.template_get_CDR_long, hfname=pn)
+
+ def get_CDR_ushort(self, pn):
+ self.st.out(self.template_get_CDR_ushort, hfname=pn)
+
+ def get_CDR_float(self, pn):
+ self.st.out(self.template_get_CDR_float, hfname=pn)
+
+ def get_CDR_double(self, pn):
+ self.st.out(self.template_get_CDR_double, hfname=pn)
+
+ def get_CDR_longlong(self, pn):
+ self.st.out(self.template_get_CDR_longlong, hfname=pn)
+
+ def get_CDR_ulonglong(self, pn):
+ self.st.out(self.template_get_CDR_ulonglong, hfname=pn)
+
+ def get_CDR_boolean(self, pn):
+ self.st.out(self.template_get_CDR_boolean, hfname=pn)
+
+ def get_CDR_fixed(self, type, pn):
+ if self.DEBUG:
+ print("//XXXX calling get_CDR_fixed, type = ", type)
+ print("//XXXX calling get_CDR_fixed, type.digits() = ", type.digits())
+ print("//XXXX calling get_CDR_fixed, type.scale() = ", type.scale())
+
+ string_digits = '%i ' % type.digits() # convert int to string
+ string_scale = '%i ' % type.scale() # convert int to string
+ string_length = '%i ' % self.dig_to_len(type.digits()) # how many octets to hilight for a number of digits
+
+ self.st.out(self.template_get_CDR_fixed, hfname=pn, digits=string_digits, scale=string_scale, length=string_length)
+ self.addvar(self.c_seq)
+
+ def get_CDR_char(self, pn):
+ self.st.out(self.template_get_CDR_char, hfname=pn)
+
+ def get_CDR_octet(self, pn):
+ self.st.out(self.template_get_CDR_octet, hfname=pn)
+
+ def get_CDR_any(self, pn):
+ self.st.out(self.template_get_CDR_any, varname=pn)
+
+ def get_CDR_enum(self, pn, type):
+ #self.st.out(self.template_get_CDR_enum, hfname=pn)
+ sname = self.namespace(type.unalias(), "_")
+ self.st.out(self.template_get_CDR_enum_symbolic, valstringarray=sname, hfname=pn)
+ self.addvar(self.c_u_octet4)
+
+ def get_CDR_string(self, pn):
+ self.st.out(self.template_get_CDR_string, hfname=pn)
+
+ def get_CDR_wstring(self, pn):
+ self.st.out(self.template_get_CDR_wstring, hfname=pn)
+ self.addvar(self.c_u_octet4)
+ self.addvar(self.c_seq)
+
+ def get_CDR_wchar(self, pn):
+ self.st.out(self.template_get_CDR_wchar, hfname=pn)
+ self.addvar(self.c_s_octet1)
+ self.addvar(self.c_seq)
+
+ def get_CDR_TypeCode(self, pn):
+ self.st.out(self.template_get_CDR_TypeCode, varname=pn)
+ self.addvar(self.c_u_octet4)
+
+ def get_CDR_objref(self, type, pn):
+ self.st.out(self.template_get_CDR_object)
+
+ def get_CDR_union(self, type, pn):
+ if self.DEBUG:
+ print("//XXX Union type =", type, " pn = ", pn)
+ print("//XXX Union type.decl()", type.decl())
+ print("//XXX Union Scoped Name", type.scopedName())
+
+ # If I am a typedef union {..}; node then find the union node
+
+ if isinstance(type.decl(), idlast.Declarator):
+ ntype = type.decl().alias().aliasType().decl()
+ else:
+ ntype = type.decl() # I am a union node
+
+ if self.DEBUG:
+ print("//XXX Union ntype =", ntype)
+
+ sname = self.namespace(ntype, "_")
+ self.st.out(self.template_union_start, name=sname)
+
+ # Output a call to the union helper function so I can handle recursive union also.
+
+ self.st.out(self.template_decode_union, name=sname)
+
+ self.st.out(self.template_union_end, name=sname)
+
+ def getCDR_hf(self, type, desc, filter, hf_name="fred", value_str=None):
+ """This takes a node, and tries to output the appropriate item for the
+ hf array."""
+ pt = type.unalias().kind() # param CDR type
+ pn = hf_name # param name
+
+ if self.DEBUG:
+ print("//XXX getCDR_hf: kind = ", pt)
+ print("//XXX getCDR_hf: name = ", pn)
+
+ if pt == idltype.tk_ulong:
+ self.get_CDR_ulong_hf(pn, desc, filter, self.dissname, value_str)
+ elif pt == idltype.tk_longlong:
+ self.get_CDR_longlong_hf(pn, desc, filter, self.dissname, value_str)
+ elif pt == idltype.tk_ulonglong:
+ self.get_CDR_ulonglong_hf(pn, desc, filter, self.dissname, value_str)
+ elif pt == idltype.tk_void:
+ pass # no hf_ variables needed
+ elif pt == idltype.tk_short:
+ self.get_CDR_short_hf(pn, desc, filter, self.dissname, value_str)
+ elif pt == idltype.tk_long:
+ self.get_CDR_long_hf(pn, desc, filter, self.dissname, value_str)
+ elif pt == idltype.tk_ushort:
+ self.get_CDR_ushort_hf(pn, desc, filter, self.dissname, value_str)
+ elif pt == idltype.tk_float:
+ self.get_CDR_float_hf(pn, desc, filter, self.dissname)
+ elif pt == idltype.tk_double:
+ self.get_CDR_double_hf(pn, desc, filter, self.dissname)
+ elif pt == idltype.tk_fixed:
+ self.get_CDR_fixed_hf(pn, desc, filter, self.dissname)
+ elif pt == idltype.tk_boolean:
+ self.get_CDR_boolean_hf(pn, desc, filter, self.dissname)
+ elif pt == idltype.tk_char:
+ self.get_CDR_char_hf(pn, desc, filter, self.dissname, value_str)
+ elif pt == idltype.tk_octet:
+ self.get_CDR_octet_hf(pn, desc, filter, self.dissname)
+ elif pt == idltype.tk_any:
+ pass # no hf_ variables needed
+ elif pt == idltype.tk_string:
+ self.get_CDR_string_hf(pn, desc, filter, self.dissname)
+ elif pt == idltype.tk_wstring:
+ self.get_CDR_wstring_hf(pn, desc, filter, self.dissname)
+ elif pt == idltype.tk_wchar:
+ self.get_CDR_wchar_hf(pn, desc, filter, self.dissname)
+ elif pt == idltype.tk_enum:
+ self.get_CDR_enum_hf(pn, type, desc, filter, self.dissname)
+ elif pt == idltype.tk_struct:
+ pass # no hf_ variables needed (should be already contained in struct members)
+ elif pt == idltype.tk_TypeCode: # will I ever get here ?
+ self.get_CDR_TypeCode_hf(pn, desc, filter, self.dissname)
+ elif pt == idltype.tk_sequence:
+ if type.unalias().seqType().kind() == idltype.tk_octet:
+ self.get_CDR_sequence_octet_hf(type, pn, desc, filter, self.dissname)
+ else:
+ self.get_CDR_sequence_hf(type, pn, desc, filter, self.dissname)
+ elif pt == idltype.tk_objref:
+ pass # no object specific hf_ variables used, use generic ones from giop dissector
+ elif pt == idltype.tk_array:
+ pass # Supported elsewhere
+ elif pt == idltype.tk_union:
+ pass # no hf_ variables needed (should be already contained in union members)
+ elif pt == idltype.tk_alias:
+ if self.DEBUG:
+ print("//XXXXX Alias type hf //XXXXX ", type)
+ self.get_CDR_alias_hf(type, desc, filter, pn)
+ else:
+ self.genWARNING("Unknown typecode = " + '%i ' % pt) # put comment in source code
+
+ def get_CDR_ulong_hf(self, pn, desc, filter, diss, value_str=None):
+ if value_str:
+ self.st.out(self.template_get_CDR_ulong_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+ else:
+ self.st.out(self.template_get_CDR_ulong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_short_hf(self, pn, desc, filter, diss, value_str=None):
+ if value_str:
+ self.st.out(self.template_get_CDR_short_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+ else:
+ self.st.out(self.template_get_CDR_short_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_long_hf(self, pn, desc, filter, diss, value_str=None):
+ if value_str:
+ self.st.out(self.template_get_CDR_long_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+ else:
+ self.st.out(self.template_get_CDR_long_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_ushort_hf(self, pn, desc, filter, diss, value_str=None):
+ if value_str:
+ self.st.out(self.template_get_CDR_ushort_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+ else:
+ self.st.out(self.template_get_CDR_ushort_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_float_hf(self, pn, desc, filter, diss):
+ self.st.out(self.template_get_CDR_float_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_double_hf(self, pn, desc, filter, diss):
+ self.st.out(self.template_get_CDR_double_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_fixed_hf(self, pn, desc, filter, diss):
+ self.st.out(self.template_get_CDR_fixed_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_longlong_hf(self, pn, desc, filter, diss, value_str=None):
+ if value_str:
+ self.st.out(self.template_get_CDR_longlong_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+ else:
+ self.st.out(self.template_get_CDR_longlong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_ulonglong_hf(self, pn, desc, filter, diss, value_str=None):
+ if value_str:
+ self.st.out(self.template_get_CDR_ulonglong_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+ else:
+ self.st.out(self.template_get_CDR_ulonglong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_boolean_hf(self, pn, desc, filter, diss):
+ self.st.out(self.template_get_CDR_boolean_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_char_hf(self, pn, desc, filter, diss, value_str=None):
+ if value_str:
+ self.st.out(self.template_get_CDR_char_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+ else:
+ self.st.out(self.template_get_CDR_char_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_octet_hf(self, pn, desc, filter, diss):
+ self.st.out(self.template_get_CDR_octet_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_enum_hf(self, pn, type, desc, filter, diss):
+ sname = self.namespace(type.unalias(), "_")
+ self.st.out(self.template_get_CDR_enum_symbolic_hf, valstringarray=sname, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_string_hf(self, pn, desc, filter, diss):
+ self.st.out(self.template_get_CDR_string_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_wstring_hf(self, pn, desc, filter, diss):
+ self.st.out(self.template_get_CDR_wstring_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+# self.addvar(self.c_u_octet4)
+# self.addvar(self.c_seq)
+
+ def get_CDR_wchar_hf(self, pn, desc, filter, diss):
+ self.st.out(self.template_get_CDR_wchar_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+# self.addvar(self.c_s_octet1)
+# self.addvar(self.c_seq)
+
+ def get_CDR_TypeCode_hf(self, pn, desc, filter, diss):
+ self.st.out(self.template_get_CDR_TypeCode_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_sequence_octet_hf(self, type, pn, desc, filter, diss):
+ self.st.out(self.template_get_CDR_sequence_octet_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+
+ def get_CDR_sequence_hf(self,type,pn,desc,filter,diss):
+ self.st.out(self.template_get_CDR_sequence_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
+ if self.isSeqNativeType(type.unalias().seqType()):
+ self.getCDR_hf(type.unalias().seqType(), desc, filter, pn)
+
+ def get_CDR_alias_hf(self, type, desc, filter, pn):
+ if self.DEBUG:
+ print("//XXX get_CDR_alias_hf, type = ", type, " pn = ", pn)
+ print("//XXX get_CDR_alias_hf, type.decl() = ", type.decl())
+ print("//XXX get_CDR_alias_hf, type.decl().alias() = ", type.decl().alias())
+
+ decl = type.decl() # get declarator object
+
+ if decl.sizes(): # a typedef array
+ #indices = self.get_indices_from_sizes(decl.sizes())
+ #string_indices = '%i ' % indices # convert int to string
+ #self.st.out(self.template_get_CDR_array_comment, aname=pn, asize=string_indices)
+
+ #self.st.out(self.template_get_CDR_array_start, aname=pn, aval=string_indices)
+ #self.addvar(self.c_i + pn + ";")
+ #self.st.inc_indent()
+ self.getCDR_hf(type.decl().alias().aliasType(), desc, filter, pn)
+
+ #self.st.dec_indent()
+ #self.st.out(self.template_get_CDR_array_end)
+
+ else: # a simple typdef
+ if self.DEBUG:
+ print("//XXX get_CDR_alias_hf, type = ", type, " pn = ", pn)
+ print("//XXX get_CDR_alias_hf, type.decl() = ", type.decl())
+
+ #self.getCDR_hf(type.unalias(), desc, filter, decl.identifier() )
+ self.getCDR_hf(type.unalias(), desc, filter, pn)
+
+ def genUnionHelper(self, un):
+ """Code to generate Union Helper functions
+
+ in: un - a union node
+ """
+
+ if self.DEBUG:
+ print("//XXX genUnionHelper called")
+ print("//XXX Union type =", un)
+ print("//XXX Union type.switchType()", un.switchType())
+ print("//XXX Union Scoped Name", un.scopedName())
+ print("//XXX Union switchType.unalias", un.switchType().unalias())
+ print("//XXX Union switchType.unalias.kind", un.switchType().unalias().kind())
+
+ # check to see if we need an item
+ un_need_item = False
+ if un.switchType().unalias().kind() == idltype.tk_enum:
+ for uc in un.cases(): # for all UnionCase objects in this union
+ if self.DEBUG:
+ print("//XXX checking", uc)
+ if self.isItemVarType(uc.caseType()):
+ if uc.caseType().unalias().kind() == idltype.tk_sequence:
+ if uc.caseType().unalias().seqType().kind() == idltype.tk_struct:
+ un_need_item = True
+ else:
+ un_need_item = True
+ if self.AGGRESSIVE:
+ un_need_item = True
+
+ if self.DEBUG:
+ print("//XXX need_item =", un_need_item)
+
+ sname = self.namespace(un, "_")
+ self.curr_sname = sname # update current opnode/exnode/stnode/unnode scoped name
+ if not self.fn_hash_built:
+ self.fn_hash[sname] = [] # init empty list as val for this sname key
+ # but only if the fn_hash is not already built
+
+ if un_need_item:
+ self.st.out(self.template_union_helper_function_start_with_item, sname=sname, unname=un.repoId())
+ else:
+ self.st.out(self.template_union_helper_function_start, sname=sname, unname=un.repoId())
+ self.st.inc_indent()
+
+ if len(self.fn_hash[sname]) > 0:
+ self.st.out(self.template_helper_function_vars_start)
+ self.dumpCvars(sname)
+ self.st.out(self.template_helper_function_vars_end_item)
+
+ st = un.switchType().unalias() # may be typedef switch type, so find real type
+
+ self.st.out(self.template_comment_union_code_start, uname=un.repoId())
+
+ self.getCDR(st, sname + "_" + un.identifier())
+
+ # Depending on what kind of discriminant I come accross (enum,integer,char,
+ # short, boolean), make sure I cast the return value of the get_XXX accessor
+ # to an appropriate value. Omniidl idlast.CaseLabel.value() accessor will
+ # return an integer, or an Enumerator object that is then converted to its
+ # integer equivalent.
+ #
+ #
+ # NOTE - May be able to skip some of this stuff, but leave it in for now -- FS
+ #
+
+ if st.kind() == idltype.tk_enum:
+ std = st.decl()
+ self.st.out(self.template_comment_union_code_discriminant, uname=std.repoId())
+
+ # count the number of cases to ensure variable is needed
+ num = 0
+ num_defaults = 0
+ for uc in un.cases(): # for all UnionCase objects in this union
+ num += len(uc.labels())
+ for cl in uc.labels():
+ if cl.default():
+ num_defaults += 1
+
+ if num != 1 or num_defaults != 1:
+ self.st.out(self.template_union_code_save_discriminant_enum, discname=un.identifier())
+ self.addvar(self.c_s_disc + un.identifier() + ";")
+
+ elif st.kind() == idltype.tk_long:
+ self.st.out(self.template_union_code_save_discriminant_long, discname=un.identifier())
+ self.addvar(self.c_s_disc + un.identifier() + ";")
+
+ elif st.kind() == idltype.tk_ulong:
+ self.st.out(self.template_union_code_save_discriminant_ulong, discname=un.identifier())
+ self.addvar(self.c_s_disc + un.identifier() + ";")
+
+ elif st.kind() == idltype.tk_short:
+ self.st.out(self.template_union_code_save_discriminant_short, discname=un.identifier())
+ self.addvar(self.c_s_disc + un.identifier() + ";")
+
+ elif st.kind() == idltype.tk_ushort:
+ self.st.out(self.template_union_code_save_discriminant_ushort, discname=un.identifier())
+ self.addvar(self.c_s_disc + un.identifier() + ";")
+
+ elif st.kind() == idltype.tk_boolean:
+ self.st.out(self.template_union_code_save_discriminant_boolean, discname=un.identifier())
+ self.addvar(self.c_s_disc + un.identifier() + ";")
+
+ elif st.kind() == idltype.tk_char:
+ self.st.out(self.template_union_code_save_discriminant_char, discname=un.identifier())
+ self.addvar(self.c_s_disc + un.identifier() + ";")
+
+ else:
+ print("//XXX Unknown st.kind() = ", st.kind())
+
+ # Loop over all cases in this union
+
+ for uc in un.cases(): # for all UnionCase objects in this union
+ for cl in uc.labels(): # for all Caselabel objects in this UnionCase
+
+ # get integer value, even if discriminant is
+ # an Enumerator node
+
+ if isinstance(cl.value(), idlast.Enumerator):
+ if self.DEBUG:
+ print("//XXX clv.identifier()", cl.value().identifier())
+ print("//XXX clv.repoId()", cl.value().repoId())
+ print("//XXX clv.scopedName()", cl.value().scopedName())
+
+ # find index of enumerator in enum declaration
+ # eg: RED is index 0 in enum Colors { RED, BLUE, GREEN }
+
+ clv = self.valFromEnum(std, cl.value())
+
+ else:
+ clv = cl.value()
+
+ #print "//XXX clv = ",clv
+
+ # if char, don't convert to int, but put inside single quotes so that it is understood by C.
+ # eg: if (disc == 'b')..
+ #
+ # TODO : handle \xxx chars generically from a function or table lookup rather than
+ # a whole bunch of "if" statements. -- FS
+
+ if st.kind() == idltype.tk_char:
+ if clv == '\n':
+ string_clv = "'\\n'"
+ elif clv == '\t':
+ string_clv = "'\\t'"
+ else:
+ string_clv = "'" + clv + "'"
+ else:
+ string_clv = '%i ' % clv
+
+ # If default case, then skp comparison with discriminator
+
+ if not cl.default():
+ self.st.out(self.template_comment_union_code_label_compare_start,
+ discname=un.identifier(), labelval=string_clv)
+ self.st.inc_indent()
+ else:
+ self.st.out(self.template_comment_union_code_label_default_start)
+
+ self.getCDR(uc.caseType(), sname + "_" + uc.declarator().identifier())
+
+ if not cl.default():
+ self.st.dec_indent()
+ self.st.out(self.template_comment_union_code_label_compare_end)
+ else:
+ self.st.out(self.template_comment_union_code_label_default_end)
+
+ self.st.dec_indent()
+ self.st.out(self.template_union_helper_function_end)
+
+ def get_CDR_alias(self, type, pn):
+ """Currently, get_CDR_alias is geared to finding typedef"""
+ if self.DEBUG:
+ print("//XXX get_CDR_alias, type = ", type, " pn = ", pn)
+ print("//XXX get_CDR_alias, type.decl() = ", type.decl())
+ print("//XXX get_CDR_alias, type.decl().alias() = ", type.decl().alias())
+
+ decl = type.decl() # get declarator object
+
+ if decl.sizes(): # a typedef array
+ indices = self.get_indices_from_sizes(decl.sizes())
+ string_indices = '%i ' % indices # convert int to string
+ self.st.out(self.template_get_CDR_array_comment, aname=pn, asize=string_indices)
+
+ arr_nonce = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(12))
+ self.st.out(self.template_get_CDR_array_start, nonce=arr_nonce, aname=pn, aval=string_indices)
+ self.st.inc_indent()
+ self.addvar(self.c_i + pn + ";")
+ self.st.inc_indent()
+ self.getCDR(type.decl().alias().aliasType(), pn)
+
+ self.st.dec_indent()
+ self.st.dec_indent()
+ self.st.out(self.template_get_CDR_array_end, nonce=arr_nonce)
+
+ else: # a simple typdef
+ if self.DEBUG:
+ print("//XXX type", type.__dict__)
+ print("//XXX type.unalias()", type.unalias().__dict__)
+ print("//XXX type.unalias().kind()", type.unalias().kind())
+ print("//XXX type.decl()", type.decl().__dict__)
+ self.getCDR(type.unalias(), pn)
+
+ def get_CDR_struct(self, type, pn):
+ """Handle structs, including recursive"""
+
+ # If I am a typedef struct {..}; node then find the struct node
+
+ if isinstance(type.decl(), idlast.Declarator):
+ ntype = type.decl().alias().aliasType().decl()
+ else:
+ ntype = type.decl() # I am a struct node
+
+ sname = self.namespace(ntype, "_")
+ self.st.out(self.template_structure_start, name=sname)
+
+ # Output a call to the struct helper function so I can handle recursive structs also.
+
+ self.st.out(self.template_decode_struct, name=sname)
+
+ self.st.out(self.template_structure_end, name=sname)
+
+ def genStructHelper(self, st):
+ """Generate private helper functions to decode a struct
+
+ in: stnode ( a struct node)
+ """
+
+ if self.DEBUG:
+ print("//XXX genStructHelper")
+
+ sname = self.namespace(st, "_")
+ self.curr_sname = sname # update current opnode/exnode/stnode scoped name
+ if not self.fn_hash_built:
+ self.fn_hash[sname] = [] # init empty list as val for this sname key
+ # but only if the fn_hash is not already built
+
+ self.st.out(self.template_struct_helper_function_start, sname=sname, stname=st.repoId())
+ self.st.inc_indent()
+
+ if len(self.fn_hash[sname]) > 0:
+ self.st.out(self.template_helper_function_vars_start)
+ self.dumpCvars(sname)
+ self.st.out(self.template_helper_function_vars_end_item)
+
+ for m in st.members():
+ for decl in m.declarators():
+ if decl.sizes(): # an array
+ arr_nonce = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(12))
+ indices = self.get_indices_from_sizes(decl.sizes())
+ string_indices = '%i ' % indices # convert int to string
+ self.st.out(self.template_get_CDR_array_comment, aname=decl.identifier(), asize=string_indices)
+ self.st.out(self.template_get_CDR_array_start, nonce=arr_nonce, aname=decl.identifier(), aval=string_indices)
+ self.st.inc_indent()
+ self.addvar(self.c_i + decl.identifier() + ";")
+
+ self.st.inc_indent()
+ self.getCDR(m.memberType(), sname + "_" + decl.identifier())
+ self.st.dec_indent()
+ self.st.dec_indent()
+ self.st.out(self.template_get_CDR_array_end, nonce=arr_nonce)
+
+ else:
+ self.getCDR(m.memberType(), sname + "_" + decl.identifier())
+
+ self.st.dec_indent()
+ self.st.out(self.template_struct_helper_function_end)
+
+ def get_CDR_sequence(self,type,pn):
+ """Generate code to access a sequence of a type"""
+ if self.DEBUG:
+ print("//XXX get_CDR_sequence")
+ self.st.out(self.template_get_CDR_sequence_length, seqname=pn)
+ seq_nonce = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(12))
+ self.st.out(self.template_get_CDR_sequence_loop_start, nonce=seq_nonce, seqname=pn)
+ self.addvar(self.c_i_lim + pn + ";")
+ self.addvar(self.c_i + pn + ";")
+
+ self.st.inc_indent()
+ self.st.inc_indent()
+ self.getCDR(type.unalias().seqType(), pn) # and start all over with the type
+ self.st.dec_indent()
+ self.st.dec_indent()
+
+ self.st.out(self.template_get_CDR_sequence_loop_end, nonce=seq_nonce)
+
+ def get_CDR_sequence_octet(self, type, pn):
+ """Generate code to access a sequence of octet"""
+ if self.DEBUG:
+ print("//XXX get_CDR_sequence_octet")
+
+ self.st.out(self.template_get_CDR_sequence_length, seqname=pn)
+ self.st.out(self.template_get_CDR_sequence_octet, seqname=pn)
+ self.addvar(self.c_i_lim + pn + ";")
+ self.addvar("const guint8 * binary_seq_" + pn + ";")
+ self.addvar("gchar * text_seq_" + pn + ";")
+
+ @staticmethod
+ def namespace(node, sep):
+ """in - op node
+
+ out - scoped operation name, using sep character instead of "::"
+
+ eg: Penguin::Echo::echoWString => Penguin_Echo_echoWString if sep = "_"
+ """
+
+ sname = idlutil.ccolonName(node.scopedName()).replace('::', sep)
+ #print("//XXX namespace: sname = " + sname)
+ return sname
+
+ def gen_plugin_register(self):
+ """generate code for plugin initialisation"""
+ self.st.out(self.template_plugin_register, description=self.description,
+ protocol_name=self.protoname, dissector_name=self.dissname)
+
+ # TODO - make this a command line option
+ #
+ # -e explicit
+ # -h heuristic
+
+ def gen_proto_reg_handoff(self, oplist):
+ """generate register_giop_user_module code, and register only
+ unique interfaces that contain operations. Also output
+ a heuristic register in case we want to use that."""
+
+ self.st.out(self.template_proto_reg_handoff_start, dissector_name=self.dissname)
+ self.st.inc_indent()
+
+ for iname in self.get_intlist(oplist):
+ self.st.out(self.template_proto_reg_handoff_body, dissector_name=self.dissname,
+ protocol_name=self.protoname, interface=iname)
+
+ self.st.out(self.template_proto_reg_handoff_heuristic, dissector_name=self.dissname,
+ protocol_name=self.protoname)
+ self.st.dec_indent()
+
+ self.st.out(self.template_proto_reg_handoff_end)
+
+ def genOp_hf(self, op):
+ """generate hf_ array element for operation, attribute, enums, struct and union lists"""
+ sname = self.namespace(op, "_")
+ opname = sname[sname.find("_")+1:]
+ opname = opname[:opname.find("_")]
+ rt = op.returnType()
+
+ if rt.kind() != idltype.tk_void:
+ if rt.kind() == idltype.tk_alias: # a typdef return val possibly ?
+ self.getCDR_hf(rt, rt.name(),
+ opname + "." + op.identifier() + ".return", sname + "_return")
+ else:
+ self.getCDR_hf(rt, "Return value",
+ opname + "." + op.identifier() + ".return", sname + "_return")
+
+ for p in op.parameters():
+ self.getCDR_hf(p.paramType(),
+ p.identifier(),
+ opname + "." + op.identifier() + "." + p.identifier(),
+ sname + "_" + p.identifier())
+
+ def genAt_hf(self, at):
+ for decl in at.declarators():
+ sname = self.namespace(decl, "_")
+ atname = sname[sname.find("_")+1:]
+ atname = atname[:atname.find("_")]
+
+ self.getCDR_hf(at.attrType(), decl.identifier(),
+ atname + "." + decl.identifier() + ".get", "get" + "_" + sname + "_" + decl.identifier())
+ if not at.readonly():
+ self.getCDR_hf(at.attrType(), decl.identifier(),
+ atname + "." + decl.identifier() + ".set", "set" + "_" + sname + "_" + decl.identifier())
+
+ def genSt_hf(self, st):
+ sname = self.namespace(st, "_")
+ stname = sname[sname.find("_")+1:]
+ stname = stname[:stname.find("_")]
+ for m in st.members():
+ for decl in m.declarators():
+ self.getCDR_hf(m.memberType(), st.identifier() + "_" + decl.identifier(),
+ st.identifier() + "." + decl.identifier(), sname + "_" + decl.identifier())
+
+ def genEx_hf(self, ex):
+ sname = self.namespace(ex, "_")
+ exname = sname[sname.find("_")+1:]
+ exname = exname[:exname.find("_")]
+ for m in ex.members():
+ for decl in m.declarators():
+ self.getCDR_hf(m.memberType(), ex.identifier() + "_" + decl.identifier(),
+ exname + "." + ex.identifier() + "_" + decl.identifier(), sname + "_" + decl.identifier())
+
+ def genUnion_hf(self, un):
+ sname = self.namespace(un, "_")
+ unname = sname[:sname.rfind("_")]
+ unname = unname.replace("_", ".")
+ if self.DEBUG:
+ print("//XXX genUnion_hf")
+ print("// sname =", sname)
+ print("// uname =", unname)
+
+ self.getCDR_hf(un.switchType().unalias(), un.identifier(),
+ unname + "." + un.identifier(), sname + "_" + un.identifier(), sname)
+
+ for uc in un.cases(): # for all UnionCase objects in this union
+ # TODO: is this loop necessary?
+ for cl in uc.labels(): # for all Caselabel objects in this UnionCase
+ self.getCDR_hf(uc.caseType(), un.identifier() + "_" + uc.declarator().identifier(),
+ unname + "." + un.identifier() + "." + uc.declarator().identifier(),
+ sname + "_" + uc.declarator().identifier())
+
+ def gen_proto_register(self, oplist, atlist, stlist, unlist):
+ """generate proto_register_<protoname> code,
+
+ in - oplist[], atlist[], stline[], unlist[]
+ """
+
+ self.st.out(self.template_proto_register_start, dissector_name=self.dissname)
+
+ # operation specific filters
+ self.st.out(self.template_proto_register_op_filter_comment)
+ for op in oplist:
+ self.genOp_hf(op)
+
+ # attribute filters
+ self.st.out(self.template_proto_register_at_filter_comment)
+ for at in atlist:
+ self.genAt_hf(at)
+
+ # struct filters
+ self.st.out(self.template_proto_register_st_filter_comment)
+ for st in stlist:
+ if st.members(): # only if has members
+ self.genSt_hf(st)
+
+ # exception List filters
+ exlist = self.get_exceptionList(oplist) # grab list of exception nodes
+ self.st.out(self.template_proto_register_ex_filter_comment)
+ for ex in exlist:
+ if ex.members(): # only if has members
+ self.genEx_hf(ex)
+
+ # Union filters
+ self.st.out(self.template_proto_register_un_filter_comment)
+ for un in unlist:
+ self.genUnion_hf(un)
+
+ self.st.out(self.template_proto_register_end, description=self.description,
+ protocol_name=self.protoname, dissector_name=self.dissname)
+
+ @staticmethod
+ def get_intlist(oplist):
+ """in - oplist[]
+
+ out - a list of unique interface names. This will be used in
+ register_giop_user_module(dissect_giop_auto, "TEST IDL", "Penguin/Echo" ); so the operation
+ name must be removed from the scope. And we also only want unique interfaces.
+ """
+
+ int_hash = {} # holds a hash of unique interfaces
+ for op in oplist:
+ sc = op.scopedName() # eg: penguin,tux,bite
+ sc1 = sc[:-1]
+ sn = idlutil.slashName(sc1) # penguin/tux
+ if sn not in int_hash:
+ int_hash[sn] = 0 # dummy val, but at least key is unique
+ ret = list(int_hash.keys())
+ ret.sort()
+ return ret
+
+ def get_exceptionList(self, oplist):
+ """in - oplist[]
+
+ out - a list of exception nodes (unique). This will be used in
+ to generate dissect_exception_XXX functions.
+ """
+
+ ex_hash = collections.OrderedDict() # holds a hash of unique exceptions.
+ for op in oplist:
+ for ex in op.raises():
+ if ex not in ex_hash:
+ ex_hash[ex] = 0 # dummy val, but at least key is unique
+ if self.DEBUG:
+ print("//XXX Exception = " + ex.identifier())
+ ret = list(ex_hash.keys())
+ return ret
+
+ @staticmethod
+ def get_indices_from_sizes(sizelist):
+ """Simple function to take a list of array sizes and find the total number of elements
+
+
+ eg: temp[4][3] = 12 elements
+ """
+
+ val = 1
+ for i in sizelist:
+ val = val * i
+
+ return val
+
+ @staticmethod
+ def dig_to_len(dignum):
+ """Determine how many octets contain requested number
+ of digits for an "fixed" IDL type "on the wire" """
+ return (dignum/2) + 1
+
+ def genTODO(self, message):
+ self.st.out(self.template_debug_TODO, message=message)
+
+ def genWARNING(self, message):
+ self.st.out(self.template_debug_WARNING, message=message)
+
+
+ # Templates for C code
+
+ template_helper_function_comment = """\
+/*
+ * @repoid@
+ */"""
+ template_helper_function_vars_start = """\
+/* Operation specific Variable declarations Begin */"""
+
+ template_helper_function_vars_end = """\
+/* Operation specific Variable declarations End */
+"""
+ template_helper_function_vars_end_item = """\
+/* Operation specific Variable declarations End */
+"""
+
+ template_helper_function_start = """\
+static void
+decode_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
+{"""
+
+ template_helper_function_end = """\
+}
+"""
+
+ template_proto_reg_handoff_start = """\
+/* register me as handler for these interfaces */
+void proto_reg_handoff_giop_@dissector_name@(void)
+{"""
+
+ template_proto_reg_handoff_body = """\
+/* Register for Explicit Dissection */
+register_giop_user_module(dissect_@dissector_name@, \"@protocol_name@\", \"@interface@\", proto_@dissector_name@ ); /* explicit dissector */
+"""
+
+ template_proto_reg_handoff_heuristic = """\
+/* Register for Heuristic Dissection */
+register_giop_user(dissect_@dissector_name@, \"@protocol_name@\" ,proto_@dissector_name@); /* heuristic dissector */
+"""
+
+ template_proto_reg_handoff_end = """\
+}
+"""
+
+ template_prototype = """
+void proto_register_giop_@dissector_name@(void);
+void proto_reg_handoff_giop_@dissector_name@(void);"""
+
+ # Initialize the protocol
+
+# template_protocol = """
+#/* Initialise the protocol and subtree pointers */
+#static int proto_@dissector_name@ = -1;
+#static gint ett_@dissector_name@ = -1;
+#"""
+ template_protocol = """
+/* Initialise the protocol and subtree pointers */
+static int proto_@dissector_name@ = -1;
+static gint ett_@dissector_name@ = -1;
+static int ett_giop_struct = -1;
+static int ett_giop_sequence = -1;
+static int ett_giop_array = -1;
+static int ett_giop_union = -1;
+"""
+
+ template_init_boundary = """
+/* Initialise the initial Alignment */
+static guint32 boundary = GIOP_HEADER_SIZE; /* initial value */"""
+
+ # plugin_register and plugin_reg_handoff templates
+
+ template_plugin_register = """
+#if 0
+
+WS_DLL_PUBLIC_DEF void
+plugin_register(void)
+{
+ if (proto_@dissector_name@ == -1) {
+ proto_register_giop_@dissector_name@();
+ }
+}
+
+WS_DLL_PUBLIC_DEF void
+plugin_reg_handoff(void){
+ proto_register_handoff_giop_@dissector_name@();
+}
+#endif
+"""
+
+ template_proto_register_start = """
+/* Register the protocol with Wireshark */
+void proto_register_giop_@dissector_name@(void)
+{
+ /* setup list of header fields */
+ static hf_register_info hf[] = {
+ /* field that indicates the currently ongoing request/reply exchange */
+ {&hf_operationrequest, {"Request_Operation","giop-@dissector_name@.Request_Operation",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
+
+ template_proto_register_end = """
+ };
+
+ static ei_register_info ei[] = {
+ { &ei_@dissector_name@_unknown_giop_msg, { "giop-@dissector_name@.unknown_giop_msg", PI_PROTOCOL, PI_WARN, "Unknown GIOP message", EXPFILL }},
+ { &ei_@dissector_name@_unknown_exception, { "giop-@dissector_name@.unknown_exception", PI_PROTOCOL, PI_WARN, "Unknown exception", EXPFILL }},
+ { &ei_@dissector_name@_unknown_reply_status, { "giop-@dissector_name@.unknown_reply_status", PI_PROTOCOL, PI_WARN, "Unknown reply status", EXPFILL }},
+ };
+
+ /* setup protocol subtree array */
+
+ static gint *ett[] = {
+ &ett_@dissector_name@,
+ &ett_giop_struct,
+ &ett_giop_sequence,
+ &ett_giop_array,
+ &ett_giop_union,
+ };
+
+ expert_module_t* expert_@dissector_name@;
+
+
+ /* Register the protocol name and description */
+ proto_@dissector_name@ = proto_register_protocol(\"@description@\" , \"GIOP/@protocol_name@\", \"giop-@dissector_name@\" );
+ proto_register_field_array(proto_@dissector_name@, hf, array_length(hf));
+ proto_register_subtree_array(ett, array_length(ett));
+
+ expert_@dissector_name@ = expert_register_protocol(proto_@dissector_name@);
+ expert_register_field_array(expert_@dissector_name@, ei, array_length(ei));
+}
+"""
+
+ template_proto_register_op_filter_comment = """\
+ /* Operation filters */"""
+
+ template_proto_register_at_filter_comment = """\
+ /* Attribute filters */"""
+
+ template_proto_register_st_filter_comment = """\
+ /* Struct filters */"""
+
+ template_proto_register_ex_filter_comment = """\
+ /* User exception filters */"""
+
+ template_proto_register_un_filter_comment = """\
+ /* Union filters */"""
+
+ template_proto_register_ei_filters = """\
+ /* Expert info filters */
+static expert_field ei_@dissector_name@_unknown_giop_msg = EI_INIT;
+static expert_field ei_@dissector_name@_unknown_exception = EI_INIT;
+static expert_field ei_@dissector_name@_unknown_reply_status = EI_INIT;
+"""
+
+ # template for delegation code
+
+ template_op_delegate_code = """\
+if (strcmp(operation, "@opname@") == 0
+ && (!idlname || strcmp(idlname, \"@interface@\") == 0)) {
+ item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
+ tree = start_dissecting(tvb, pinfo, ptree, offset);
+ decode_@sname@(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
+ return TRUE;
+}
+"""
+ template_no_ops_to_delegate = """\
+// NOTE: this should only appear if your IDL has absolutely no operations
+if (!idlname) {
+ return FALSE;
+}
+"""
+ # Templates for the helper functions
+
+ template_helper_switch_msgtype_start = """\
+switch(header->message_type) {"""
+
+ template_helper_switch_msgtype_default_start = """\
+default:
+ /* Unknown GIOP Message */
+ expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);"""
+
+ template_helper_switch_msgtype_default_end = """\
+ break;"""
+
+ template_helper_switch_msgtype_end = """\
+} /* switch(header->message_type) */"""
+
+ template_helper_switch_msgtype_request_start = """\
+case Request:"""
+
+ template_helper_switch_msgtype_request_end = """\
+break;"""
+
+ template_helper_switch_msgtype_reply_start = """\
+case Reply:"""
+
+ template_helper_switch_msgtype_reply_no_exception_start = """\
+case NO_EXCEPTION:"""
+
+ template_helper_switch_msgtype_reply_no_exception_end = """\
+break;"""
+
+ template_helper_switch_msgtype_reply_user_exception_start = """\
+case USER_EXCEPTION:"""
+
+ template_helper_switch_msgtype_reply_user_exception_end = """\
+break;"""
+
+ template_helper_switch_msgtype_reply_default_start = """\
+default:
+ /* Unknown Exception */
+ expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_exception, "Unknown exception %d", header->rep_status);"""
+
+ template_helper_switch_msgtype_reply_default_end = """\
+ break;"""
+
+ template_helper_switch_msgtype_reply_end = """\
+break;"""
+
+ template_helper_switch_rep_status_start = """\
+switch(header->rep_status) {"""
+
+ template_helper_switch_rep_status_default_start = """\
+default:
+ /* Unknown Reply Status */
+ expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_reply_status, "Unknown reply status %d", header->rep_status);"""
+
+ template_helper_switch_rep_status_default_end = """\
+ break;"""
+
+ template_helper_switch_rep_status_end = """\
+} /* switch(header->rep_status) */
+
+break;"""
+
+ # Templates for get_CDR_xxx accessors
+
+ template_get_CDR_ulong = """\
+proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_ulong(tvb,offset,stream_is_big_endian, boundary));
+"""
+ template_get_CDR_short = """\
+proto_tree_add_int(tree, hf_@hfname@, tvb, *offset-2, 2, get_CDR_short(tvb,offset,stream_is_big_endian, boundary));
+"""
+ template_get_CDR_void = """\
+/* Function returns void */
+"""
+ template_get_CDR_long = """\
+proto_tree_add_int(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_long(tvb,offset,stream_is_big_endian, boundary));
+"""
+ template_get_CDR_ushort = """\
+proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-2, 2, get_CDR_ushort(tvb,offset,stream_is_big_endian, boundary));
+"""
+ template_get_CDR_float = """\
+proto_tree_add_float(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_float(tvb,offset,stream_is_big_endian, boundary));
+"""
+ template_get_CDR_double = """\
+proto_tree_add_double(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_double(tvb,offset,stream_is_big_endian, boundary));
+"""
+ template_get_CDR_longlong = """\
+proto_tree_add_int64(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_long_long(tvb,offset,stream_is_big_endian, boundary));
+"""
+ template_get_CDR_ulonglong = """\
+proto_tree_add_uint64(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_ulong_long(tvb,offset,stream_is_big_endian, boundary));
+"""
+ template_get_CDR_boolean = """\
+proto_tree_add_boolean(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_boolean(tvb,offset));
+"""
+ template_get_CDR_char = """\
+proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_char(tvb,offset));
+"""
+ template_get_CDR_octet = """\
+proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_octet(tvb,offset));
+"""
+ template_get_CDR_any = """\
+get_CDR_any(tvb, pinfo, tree, item, offset, stream_is_big_endian, boundary, header);
+"""
+ template_get_CDR_fixed = """\
+get_CDR_fixed(tvb, pinfo, item, &seq, offset, @digits@, @scale@);
+proto_tree_add_string_format_value(tree, hf_@hfname@, tvb, *offset-@length@, @length@, seq, "< @digits@, @scale@> = %s", seq);
+"""
+ template_get_CDR_enum_symbolic = """\
+u_octet4 = get_CDR_enum(tvb,offset,stream_is_big_endian, boundary);
+proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-4, 4, u_octet4);
+"""
+ template_get_CDR_string = """\
+giop_add_CDR_string(tree, tvb, offset, stream_is_big_endian, boundary, hf_@hfname@);
+"""
+ template_get_CDR_wstring = """\
+u_octet4 = get_CDR_wstring(tvb, &seq, offset, stream_is_big_endian, boundary, header);
+proto_tree_add_string(tree, hf_@hfname@, tvb, *offset-u_octet4, u_octet4, (u_octet4 > 0) ? seq : \"\");
+"""
+ template_get_CDR_wchar = """\
+s_octet1 = get_CDR_wchar(tvb, &seq, offset, header);
+if (tree) {
+ if (s_octet1 > 0)
+ proto_tree_add_uint(tree, hf_@hfname@_len, tvb, *offset-1-s_octet1, 1, s_octet1);
+
+ if (s_octet1 < 0)
+ s_octet1 = -s_octet1;
+
+ if (s_octet1 > 0)
+ proto_tree_add_string(tree, hf_@hfname@, tvb, *offset-s_octet1, s_octet1, seq);
+}
+"""
+ template_get_CDR_TypeCode = """\
+u_octet4 = get_CDR_typeCode(tvb, pinfo, tree, offset, stream_is_big_endian, boundary, header);
+"""
+
+ template_get_CDR_object = """\
+get_CDR_object(tvb, pinfo, tree, offset, stream_is_big_endian, boundary);
+"""
+
+ template_get_CDR_sequence_length = """\
+u_octet4_loop_@seqname@ = get_CDR_ulong(tvb, offset, stream_is_big_endian, boundary);
+proto_tree_add_uint(tree, hf_@seqname@_loop, tvb,*offset-4, 4, u_octet4_loop_@seqname@);
+"""
+ template_get_CDR_sequence_length_item = """\
+u_octet4_loop_@seqname@ = get_CDR_ulong(tvb, offset, stream_is_big_endian, boundary);
+item = proto_tree_add_uint(tree, hf_@seqname@_loop, tvb,*offset-4, 4, u_octet4_loop_@seqname@);
+"""
+ template_get_CDR_sequence_loop_start = """\
+{
+ proto_tree *tree_bak_@nonce@ = tree;
+ tree = proto_tree_add_subtree(tree, tvb, *offset, -1, ett_giop_sequence, NULL, "sequence @seqname@");
+ for (i_@seqname@=0; i_@seqname@ < u_octet4_loop_@seqname@; i_@seqname@++) {
+"""
+ template_get_CDR_sequence_loop_end = """\
+ }
+ tree = tree_bak_@nonce@;
+}
+"""
+
+ template_get_CDR_sequence_octet = """\
+if (u_octet4_loop_@seqname@ > 0 && tree) {
+ get_CDR_octet_seq(tvb, &binary_seq_@seqname@, offset,
+ u_octet4_loop_@seqname@);
+ text_seq_@seqname@ = make_printable_string(binary_seq_@seqname@,
+ u_octet4_loop_@seqname@);
+ proto_tree_add_bytes_format_value(tree, hf_@seqname@, tvb, *offset - u_octet4_loop_@seqname@,
+ u_octet4_loop_@seqname@, binary_seq_@seqname@, \"%s\", text_seq_@seqname@);
+}
+"""
+ template_get_CDR_array_start = """\
+{
+ proto_tree *tree_bak_@nonce@ = tree;
+ tree = proto_tree_add_subtree(tree, tvb, *offset, -1, ett_giop_array, NULL, "array @aname@");
+ for (i_@aname@=0; i_@aname@ < @aval@; i_@aname@++) {
+"""
+ template_get_CDR_array_end = """\
+ }
+ tree = tree_bak_@nonce@;
+}
+"""
+ template_get_CDR_array_comment = """\
+/* Array: @aname@[ @asize@] */
+"""
+ template_structure_start = """\
+{ /* Begin struct \"@name@\" */
+proto_tree *struct_tree = proto_tree_add_subtree(tree, tvb, *offset, -1, ett_giop_struct, NULL, "struct @name@");
+"""
+ template_structure_end = """\
+} /* End struct \"@name@\" */"""
+
+ template_union_start = """\
+{ /* Begin union \"@name@\" */
+proto_tree *union_tree = proto_tree_add_subtree(tree, tvb, *offset, -1, ett_giop_union, NULL, "union @name@");
+"""
+ template_union_end = """\
+} /* End union \"@name@\" */"""
+
+ # Templates for get_CDR_xxx_hf accessors
+
+ template_get_CDR_ulong_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_ulong_symbolic_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
+
+ template_get_CDR_short_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_short_symbolic_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT16,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
+
+ template_get_CDR_long_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_long_symbolic_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT32,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
+
+ template_get_CDR_ushort_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_ushort_symbolic_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT16,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
+
+ template_get_CDR_float_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_FLOAT,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_double_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_DOUBLE,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_fixed_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_longlong_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT64,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_longlong_symbolic_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT64,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
+
+ template_get_CDR_ulonglong_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT64,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_ulonglong_symbolic_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT64,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
+
+ template_get_CDR_boolean_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_BOOLEAN,8,NULL,0x01,NULL,HFILL}},"""
+
+ template_get_CDR_char_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_char_symbolic_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
+
+ template_get_CDR_octet_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_HEX,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_enum_symbolic_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
+
+ template_get_CDR_string_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_wstring_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_wchar_hf = """\
+ {&hf_@hfname@_len, {"@descname@ Length","giop-@dissector_name@.@filtername@.len",FT_UINT8,BASE_DEC,NULL,0x0,NULL,HFILL}},
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_TypeCode_hf = """\
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_sequence_hf = """\
+ {&hf_@hfname@_loop, {"Seq length of @descname@","giop-@dissector_name@.@filtername@.size",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
+
+ template_get_CDR_sequence_octet_hf = """\
+ {&hf_@hfname@_loop, {"Seq length of @descname@","giop-@dissector_name@.@filtername@.size",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},
+ {&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_BYTES,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
+
+ template_Header = """\
+/* packet-@dissector_name@.c
+ *
+ * Routines for IDL dissection
+ *
+ * Autogenerated from idl2wrs
+ * Copyright 2001 Frank Singleton <frank.singleton@@ericsson.com>
+ */
+
+"""
+
+ template_wireshark_copyright = """\
+/*
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald@@wireshark.org>
+ * Copyright 1998 Gerald Combs
+ */
+"""
+
+ template_GPL = """\
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+"""
+
+ template_Modelines = """\
+/*
+ * Editor modelines - https://www.wireshark.org/tools/modelines.html
+ *
+ * Local Variables:
+ * c-basic-offset: 4
+ * tab-width: 8
+ * indent-tabs-mode: nil
+ * End:
+ *
+ * ex: set shiftwidth=4 tabstop=8 expandtab:
+ * :indentSize=4:tabSize=8:noTabs=true:
+ */"""
+
+ template_Includes = """\
+
+#include "config.h"
+
+#include <string.h>
+#include <epan/packet.h>
+#include <epan/proto.h>
+#include <epan/dissectors/packet-giop.h>
+#include <epan/expert.h>
+
+#include "ws_diag_control.h"
+#include "ws_compiler_tests.h"
+
+#ifdef _MSC_VER
+/* disable warning: "unreference local variable" */
+#pragma warning(disable:4101)
+#endif
+
+/* XXX this should be autogenerated, or the warnings fixed in the generator */
+DIAG_OFF(unused-function)
+DIAG_OFF(unused-variable)
+#if WS_IS_AT_LEAST_GNUC_VERSION(6,0)
+DIAG_OFF(unused-const-variable)
+#endif"""
+
+ template_main_dissector_start = """\
+/*
+ * Called once we accept the packet as being for us; it sets the
+ * Protocol and Info columns and creates the top-level protocol
+ * tree item.
+ */
+static proto_tree *
+start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset)
+{
+
+ proto_item *ti = NULL;
+ proto_tree *tree = NULL; /* init later, inside if(tree) */
+
+ col_set_str(pinfo->cinfo, COL_PROTOCOL, \"@disprot@\");
+
+ /*
+ * Do not clear COL_INFO, as nothing is being written there by
+ * this dissector yet. So leave it as is from the GIOP dissector.
+ * TODO: add something useful to COL_INFO
+ * col_clear(pinfo->cinfo, COL_INFO);
+ */
+
+ if (ptree) {
+ ti = proto_tree_add_item(ptree, proto_@dissname@, tvb, *offset, tvb_reported_length_remaining(tvb, *offset), ENC_NA);
+ tree = proto_item_add_subtree(ti, ett_@dissname@);
+ }
+ return tree;
+}
+
+static proto_item*
+process_RequestOperation(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, MessageHeader *header, const gchar *operation)
+{
+ proto_item *pi;
+ if(header->message_type == Reply) {
+ /* fill-up info column */
+ col_append_fstr(pinfo->cinfo, COL_INFO, " op = %s",operation);
+ }
+ /* fill-up the field */
+ pi=proto_tree_add_string(ptree, hf_operationrequest, tvb, 0, 0, operation);
+ proto_item_set_generated(pi);
+ return pi;
+}
+
+static gboolean
+dissect_@dissname@(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset, MessageHeader *header, const gchar *operation, gchar *idlname)
+{
+ proto_item *item _U_;
+ proto_tree *tree _U_;
+ gboolean stream_is_big_endian = is_big_endian(header); /* get endianess */
+
+ /* If we have a USER Exception, then decode it and return */
+ if ((header->message_type == Reply) && (header->rep_status == USER_EXCEPTION)) {
+ return decode_user_exception(tvb, pinfo, ptree, offset, header, operation, stream_is_big_endian);
+ }
+"""
+
+ template_main_dissector_switch_msgtype_start = """\
+switch(header->message_type) {
+"""
+ template_main_dissector_switch_msgtype_start_request_reply = """\
+case Request:
+case Reply:
+"""
+ template_main_dissector_switch_msgtype_end_request_reply = """\
+break;
+"""
+ template_main_dissector_switch_msgtype_all_other_msgtype = """\
+case CancelRequest:
+case LocateRequest:
+case LocateReply:
+case CloseConnection:
+case MessageError:
+case Fragment:
+ return FALSE; /* not handled yet */
+
+default:
+ return FALSE; /* not handled yet */
+
+} /* switch */
+"""
+ template_main_dissector_end = """\
+
+ return FALSE;
+
+} /* End of main dissector */
+"""
+
+
+#-------------------------------------------------------------#
+# Exception handling templates #
+#-------------------------------------------------------------#
+
+ template_exception_helpers_start = """\
+/* Begin Exception Helper Functions */
+
+"""
+ template_exception_helpers_end = """\
+
+/* End Exception Helper Functions */
+"""
+
+ template_main_exception_delegator_start = """\
+/*
+ * Main delegator for exception handling
+ *
+ */
+static gboolean
+decode_user_exception(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *ptree _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
+{
+ proto_tree *tree _U_;
+
+ if (!header->exception_id)
+ return FALSE;
+"""
+
+ template_ex_delegate_code = """\
+if (strcmp(header->exception_id, "@exname@") == 0) {
+ tree = start_dissecting(tvb, pinfo, ptree, offset);
+ decode_ex_@sname@(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian); /* @exname@ */
+ return TRUE;
+}
+"""
+
+ template_main_exception_delegator_end = """
+ return FALSE; /* user exception not found */
+}
+"""
+
+ template_exception_helper_function_start = """\
+/* Exception = @exname@ */
+static void
+decode_ex_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
+{
+"""
+
+ template_exception_helper_function_end = """\
+}
+"""
+
+ template_struct_helper_function_start = """\
+/* Struct = @stname@ */
+static void
+decode_@sname@_st(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
+{
+"""
+
+ template_struct_helper_function_end = """\
+}
+"""
+
+ template_union_helper_function_start = """\
+/* Union = @unname@ */
+static void
+decode_@sname@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
+{
+"""
+
+ template_union_helper_function_start_with_item = """\
+/* Union = @unname@ */
+static void
+decode_@sname@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
+{
+ proto_item* item = NULL;
+"""
+
+ template_union_helper_function_end = """\
+}
+"""
+
+#-------------------------------------------------------------#
+# Value string templates #
+#-------------------------------------------------------------#
+
+ template_value_string_start = """\
+static const value_string @valstringname@[] = {
+"""
+ template_value_string_entry = """\
+ { @intval@, \"@description@\" },"""
+
+ template_value_string_end = """\
+ { 0, NULL },
+};
+"""
+
+#-------------------------------------------------------------#
+# Enum handling templates #
+#-------------------------------------------------------------#
+
+ template_comment_enums_start = """\
+/*
+ * IDL Enums Start
+ */
+"""
+ template_comment_enums_end = """\
+/*
+ * IDL Enums End
+ */
+"""
+ template_comment_enum_comment = """\
+/*
+ * Enum = @ename@
+ */"""
+
+#-------------------------------------------------------------#
+# Attribute handling templates #
+#-------------------------------------------------------------#
+
+ template_comment_attributes_start = """\
+/*
+ * IDL Attributes Start
+ */
+"""
+
+ # get/set accessor method names are language mapping dependent.
+
+ template_attributes_declare_Java_get = """static const char get_@sname@_at[] = \"_get_@atname@\" ;"""
+ template_attributes_declare_Java_set = """static const char set_@sname@_at[] = \"_set_@atname@\" ;"""
+
+ template_comment_attributes_end = """
+/*
+ * IDL Attributes End
+ */
+"""
+
+
+ # template for Attribute delegation code
+ #
+ # Note: _get_xxx() should only be called for Reply with NO_EXCEPTION
+ # Note: _set_xxx() should only be called for Request
+
+ template_at_delegate_code_get = """\
+if (strcmp(operation, get_@sname@_at) == 0 && (header->message_type == Reply) && (header->rep_status == NO_EXCEPTION) ) {
+ tree = start_dissecting(tvb, pinfo, ptree, offset);
+ decode_get_@sname@_at(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
+ return TRUE;
+}
+"""
+ template_at_delegate_code_set = """\
+if (strcmp(operation, set_@sname@_at) == 0 && (header->message_type == Request) ) {
+ tree = start_dissecting(tvb, pinfo, ptree, offset);
+ decode_set_@sname@_at(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
+ return TRUE;
+}
+"""
+ template_attribute_helpers_start = """\
+/* Begin Attribute Helper Functions */
+"""
+ template_attribute_helpers_end = """\
+
+/* End Attribute Helper Functions */
+"""
+
+ template_attribute_helper_function_start = """\
+
+/* Attribute = @atname@ */
+static void
+decode_@sname@_at(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
+{
+"""
+
+ template_attribute_helper_function_end = """\
+}
+"""
+
+#-------------------------------------------------------------#
+# Debugging templates #
+#-------------------------------------------------------------#
+
+ # Template for outputting TODO "C" comments
+ # so user know I need to improve something.
+
+ template_debug_TODO = """\
+
+/* TODO - @message@ */
+"""
+ # Template for outputting WARNING "C" comments
+ # so user know if I have found a problem.
+
+ template_debug_WARNING = """\
+/* WARNING - @message@ */
+"""
+
+#-------------------------------------------------------------#
+# IDL Union templates #
+#-------------------------------------------------------------#
+
+ template_comment_union_code_start = """\
+/*
+ * IDL Union Start - @uname@
+ */
+"""
+ template_comment_union_code_end = """
+/*
+ * IDL union End - @uname@
+ */
+"""
+ template_comment_union_code_discriminant = """\
+/*
+ * IDL Union - Discriminant - @uname@
+ */
+"""
+
+ # Cast Unions types to something appropriate
+ # Enum value cast to guint32, all others cast to gint32
+ # as omniidl accessor returns integer or Enum.
+
+ template_union_code_save_discriminant_enum = """\
+disc_s_@discname@ = (gint32) u_octet4; /* save Enum Value discriminant and cast to gint32 */
+"""
+ template_union_code_save_discriminant_long = """\
+*offset -= 4; // rewind
+disc_s_@discname@ = (gint32) get_CDR_long(tvb,offset,stream_is_big_endian, boundary); /* save gint32 discriminant and cast to gint32 */
+"""
+
+ template_union_code_save_discriminant_ulong = """\
+*offset -= 4; // rewind
+disc_s_@discname@ = (gint32) get_CDR_ulong(tvb,offset,stream_is_big_endian, boundary); /* save guint32 discriminant and cast to gint32 */
+"""
+ template_union_code_save_discriminant_short = """\
+*offset -= 2; // rewind
+disc_s_@discname@ = (gint32) get_CDR_short(tvb,offset,stream_is_big_endian, boundary); /* save gint16 discriminant and cast to gint32 */
+"""
+
+ template_union_code_save_discriminant_ushort = """\
+*offset -= 2; // rewind
+disc_s_@discname@ = (gint32) get_CDR_ushort(tvb,offset,stream_is_big_endian, boundary); /* save gint16 discriminant and cast to gint32 */
+"""
+ template_union_code_save_discriminant_char = """\
+*offset -= 1; // rewind
+disc_s_@discname@ = (gint32) get_CDR_char(tvb,offset); /* save guint1 discriminant and cast to gint32 */
+"""
+ template_union_code_save_discriminant_boolean = """\
+*offset -= 1; // rewind
+disc_s_@discname@ = (gint32) get_CDR_boolean(tvb, offset); /* save guint1 discriminant and cast to gint32 */
+"""
+ template_comment_union_code_label_compare_start = """\
+if (disc_s_@discname@ == @labelval@) {
+"""
+ template_comment_union_code_label_compare_end = """\
+ return; /* End Compare for this discriminant type */
+}
+"""
+
+ template_comment_union_code_label_default_start = """
+/* Default Union Case Start */
+"""
+ template_comment_union_code_label_default_end = """\
+/* Default Union Case End */
+"""
+
+ # Templates for function prototypes.
+ # This is used in genDeclares() for declaring function prototypes
+ # for structs and union helper functions.
+
+ template_hf_operations = """
+static int hf_operationrequest = -1;/* Request_Operation field */
+"""
+
+ template_hf = """\
+static int hf_@name@ = -1;"""
+
+ template_prototype_start_dissecting = """
+static proto_tree *start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset);
+
+"""
+ template_prototype_struct_start = """\
+/* Struct prototype declaration Start */
+"""
+ template_prototype_struct_end = """\
+/* Struct prototype declaration End */
+"""
+ template_prototype_struct_body = """\
+/* Struct = @stname@ */
+static void decode_@name@_st(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_);
+"""
+ template_decode_struct = """\
+decode_@name@_st(tvb, pinfo, struct_tree, item, offset, header, operation, stream_is_big_endian);"""
+
+ template_prototype_union_start = """\
+/* Union prototype declaration Start */"""
+
+ template_prototype_union_end = """\
+/* Union prototype declaration End */"""
+
+ template_prototype_union_body = """
+/* Union = @unname@ */
+static void decode_@name@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_);
+"""
+ template_decode_union = """\
+decode_@name@_un(tvb, pinfo, union_tree, offset, header, operation, stream_is_big_endian);
+"""
+ template_proto_item = """\
+proto_item *item = (proto_item*) wmem_alloc0(wmem_packet_scope(), sizeof(proto_item));
+"""
+
+#
+# Editor modelines - https://www.wireshark.org/tools/modelines.html
+#
+# Local variables:
+# c-basic-offset: 4
+# indent-tabs-mode: nil
+# End:
+#
+# vi: set shiftwidth=4 expandtab:
+# :indentSize=4:noTabs=true:
+#
diff --git a/tools/wireshark_words.txt b/tools/wireshark_words.txt
new file mode 100644
index 0000000..7a0857a
--- /dev/null
+++ b/tools/wireshark_words.txt
@@ -0,0 +1,1857 @@
+0x%02x
+0x%08x
+10base
+10gig
+16apsk
+1xrtt
+20west
+3gpp2
+3pcap
+5views
+80211n
+80mhz
+abbrev
+accelerometer
+acceptor
+accessor
+accessors
+accuracies
+acked
+acknack
+acknowledgement
+acp133
+activations
+actuator
+acyclic
+addba
+additionals
+additionsr
+addon
+adjacency
+adlink
+administrable
+adpclk
+adspec
+advatek
+adwin
+aes128
+aes256
+aethra
+aggregations
+aggregator
+agnss
+aifsn
+aironet
+airpcap
+airtel
+alcap
+alcatel
+alljoyn
+alloc
+allocators
+alteon
+ampdu
+amperage
+ampere
+amperes
+anacap
+analyzers
+analyzes
+annexc
+annunc
+anonsvn
+anonymization
+aperiodic
+appdata
+appid
+appkey
+applicability
+appset
+arbitrated
+arduino
+arfcn
+arista
+asciidoc
+ashrae
+asn1
+asn1cnf
+asn2deb
+asn2wrs
+assignee
+assignor
+assoc
+assymetric
+async
+asynchronously
+asyncmap
+atheros
+atomically
+atsss
+attendee
+attrib
+attrs
+audigy
+authcitrix
+authen
+authenticates
+authenticator
+authenticators
+authgss
+authn
+authntransitioning
+authorizer
+authtoken
+authtype
+authz
+autoconfiguration
+autodiscovery
+autoneg
+autosar
+available
+avaya
+avrcp
+bacapp
+backedup
+backend
+backhaul
+backoff
+bacnet
+batched
+baudrate
+bayer
+bband
+bblog
+bcast
+beamformed
+beamformee
+beamformer
+beamforming
+bgpspec
+bibliographic
+bibliography
+bidirectional
+bidirectionally
+bigint
+binlog
+bitfield
+bitmask
+bitrate
+bitstring
+blackhole
+bnode
+boolflag
+bootfile
+bootloader
+bootopt
+bootp
+broadcom
+bsmap
+bssap
+bssid
+bssids
+bssmap
+btatt
+btbredr
+btcommon
+bthci
+btmesh
+btsdp
+btsnoop
+bugzilla
+buildbot
+builtin
+bulleted
+butype
+byte
+byteorder
+cablelabs
+cadenced
+callback
+callid
+callsign
+calorific
+canceled
+canceling
+cancelled
+canceller
+canfd
+canfdmessage
+cannot
+canonicalised
+canonicalize
+canonicalized
+capab
+capacitive
+capinfos
+caplen
+capsa
+captioning
+capwap
+cardbus
+carrierfreq
+carrierid
+casio
+categorizes
+cblock
+ccache
+cccid
+ccitt
+ccpch
+cctrch
+cdma2000
+cdmacallmode
+cdmachanneldata
+celeron
+cellid
+cellidentity
+centillion
+centiseconds
+centrino
+cfilters
+cframe
+chan1
+chan2
+changelog
+channelisation
+channelized
+charset
+charsets
+chauvet
+checkbox
+checkout
+checksum
+chksum
+chmod
+choco
+chocolatey
+choplen
+chromaticities
+chromaticity
+chunked
+cicam
+cinfo
+ciphered
+ciphering
+ciphersuite
+ciphertext
+ciplus
+cipso
+citrix
+cksum
+classifiers
+classmark
+classmark3
+claypaky
+clearallportcounters
+clientkey
+clientout
+clopts
+clsfr
+clustermap
+cmake
+cmdcontrol
+cmstatus
+codabar
+codebook
+codecs
+codepoint
+codeset
+codingrate
+codute
+collectd
+collimation
+colocated
+coloring
+colorise
+colorization
+colorize
+colorized
+colorizing
+colormap
+combi
+combiner
+combiners
+communication
+compat
+compilable
+compilers
+compr
+computable
+concatenate
+concatenated
+concatenates
+concurrent
+conferenced
+configitem
+configurable
+conformant
+congctrl
+connectionless
+connid
+connp
+const
+contactless
+contextp
+contiguity
+contiguously
+coord
+Coord3D
+copycss
+copyfile
+corba
+corrigendum
+couchbase
+coverity
+cpdlc
+cpich
+cppcheck
+cpubus
+cpuregisters
+cqich
+credential
+credentials
+credssp
+criticalextensions
+criticalextensionsfuture
+crnti
+crypto
+cryptographic
+csapi
+ctime
+ctxinfo
+ctype
+cumulated
+cumulatively
+customizable
+customization
+customizing
+cyphering
+daintree
+datagram
+datagrams
+dataitem
+datalen
+datarate
+datastate
+datetime
+dccreq
+dcerpc
+dct3trace
+deact
+deactivated
+deactivating
+deactivation
+deassert
+deasserted
+deassertion
+deauth
+deauthenticate
+deauthenticated
+deauthentication
+deauthentications
+debian
+debug
+decapsulation
+decca
+decentralization
+dechunk
+decompressing
+decompressor
+decrement
+decremented
+decrementing
+decrypt
+decrypted
+decrypting
+decryption
+dedup
+deduplicate
+deenabled
+deenablement
+defragment
+defragmentation
+defragmented
+defragmenting
+dehumidification
+deinterleaved
+delimited
+delimiters
+delimiting
+demodulator
+demultiplexed
+demultiplexer
+demultiplexers
+denso
+deobfuscated
+deobfuscation
+depassivated
+deprecated
+deprotection
+dequeue
+dequeued
+dereference
+dereferenced
+dereferencing
+dereg
+deregister
+deregistered
+deregistering
+deregistration
+derivate
+des40
+descr
+descriptors
+desegment
+desegmentation
+desegmenting
+deselect
+destip
+destport
+deutschland
+devcap
+deviceid
+devmode
+dfdpck
+dfilter
+dfilters
+dfsauth
+dftest
+dgotyp
+dgram
+dhaka
+dhcpv
+dialed
+dialup
+diffie
+Digicel
+digitizer
+digium
+diplexer
+directionality
+disambiguate
+disambiguation
+discriminant
+dissection
+dissector
+dissectors
+distinguisher
+diversifier
+divisor
+djiuav
+dlmap
+dlsch
+dmepi
+dnskey
+docsis
+dodag
+dot11Qos
+dot1q
+double
+downlink
+doxygen
+dpauxmon
+dpnss
+drbid
+drdynvc
+droppable
+dsmcc
+dstport
+dtwin
+dumpcap
+duple
+dword
+dwords
+eapol
+earcfn
+earfcn
+ebcdic
+ecdhe
+ecdsa
+ecpri
+editcap
+eeprom
+egprs
+egroup
+eigrp
+einval
+elektronik
+elided
+elink
+ellipsoid
+encap
+encaps
+encapsulations
+encapsulator
+encinfo
+enciphered
+encodings
+encrypt
+encrypting
+encryptionkey
+endace
+endian
+endianness
+endif
+endpoint
+engineid
+enodeb
+enqueue
+enrollee
+entityid
+entryid
+enttec
+enumerates
+enumerations
+enumerator
+envchange
+epasv
+epdcch
+eperm
+epsem
+equinf
+equiv
+ericsson
+erldp
+errinf
+errno
+errorcode
+errored
+errorportinfo
+erspan
+España
+esperanto
+etheraddr
+ethercat
+ethers
+ethertype
+etlfile
+ettarr
+etwdump
+etype
+eutra
+eutran
+eventlog
+executables
+exflags
+exocet
+extattr
+extcap
+extensibility
+extensible
+extern
+exthdr
+extlen
+extrainformation
+eyesdn
+facch
+failover
+fastcom
+fastip
+fastmsg
+fattr
+featureful
+fhandle
+fiber
+fileset
+firewall
+fixme
+flag1
+flag2
+flavored
+flexray
+flowid
+flowmod
+flowset
+flowspec
+fmconfig
+followup
+foobar
+format0
+fortigate
+fortinet
+fpiur
+fraghdr
+framenum
+framenumber
+frametype
+frcrpt
+freebsd
+frontend
+fsctl
+ftenum
+ftype
+ftypes
+fujitsu
+functionalities
+funkt
+fuzzed
+fuzzer
+fvalue
+g711a
+g711u
+gamepad
+ganss
+gboolean
+gchar
+gcrypt
+gendc
+gentoo
+geoip
+geonw
+geran
+getattr
+getentrybyname
+getgroupinfo
+getnext
+getter
+gidaddr
+gigabit
+gigamon
+gigpod
+github
+gitlab
+gluster
+gmail
+gmprs
+gnodeb
+gnutls
+goaway
+golomb
+google
+gpointer
+gprscdr
+gprsmeasurementparams3g
+gregex
+greyed
+groupa
+groupadd
+groupb
+groupcast
+groupmod
+gssapi
+guint
+gzipped
+handoff
+hangup
+harqid
+hartip
+hashed
+hashes
+hazelcast
+hcidump
+headend
+heuristic
+hfarr
+hfill,
+HI2Operations
+hnbap
+homedir
+homeplug
+hopcount
+hostname
+howto
+hpfeeds
+hresult
+hsdpa
+hsdsch
+hspdsch
+hssite
+hsupa
+htonl
+htons
+http2
+https
+huawei
+huffman
+hytec
+icmpv
+ident
+identifier
+idiographic
+idl2deb
+idl2wrs
+iec60870
+ieee1609dot
+ieee17221
+ieee80211
+iface
+ifconfig
+ifdef
+ifname
+ikev2
+illuminance
+imeisv
+immersive
+implementations
+implementer
+implementers
+implementor
+inactivated
+inband
+incits
+incremented
+incrementing
+indenting
+indirection
+infile
+infiniband
+infix
+infolist
+informationitem
+informationlist
+infos
+inited
+initialise
+initialising
+initialization
+initializations
+initialize
+initialized
+initializer
+initializers
+initializes
+initializing
+inline
+inode
+inodes
+inspiron
+instantiate
+instdir
+instrumented
+interferer
+interleaving
+interop
+interruptible
+interworking
+intval
+inuse
+invalidation
+invalidly
+ioctl
+ioerr
+ioflag
+iograph
+iotecha
+ipaccess
+ipaddr
+ipaddress
+ipcomp
+ipconfig
+iperf
+ipfix
+ipphone
+ipprim
+ipsec
+ipseckey
+iptables
+iptrace
+ipv4addr
+ipv6addr
+ipxnet
+ipxnets
+irqmask
+isakmp
+isatap
+iscsi
+iseries
+isobus
+isochronous
+italia
+iterating
+iterator
+itunes
+iwarp
+ixveriwave
+jacobson
+jetds
+jsonraw
+k12xx
+kademlia
+kasme
+kasumi
+kbytes
+kchip
+keepalive
+kerberos
+keydes
+keygen
+keyid
+keylen
+keylog
+keymap
+keypress
+keyring
+keyset
+keytab
+knxip
+l2cap
+l2vpn
+l3vpn
+laggy
+lanalyzer
+latencies
+lbmpdm
+lcgid
+lcids
+lcsap
+leasequery
+libgcrypt
+libpcap
+libsmi
+licmgr
+linearity
+linkaddr
+linkcss
+linker
+linkinfo
+linksys
+linux
+list1
+literals
+lithionics
+lnpdqp
+logcat
+loghans
+loglocal
+logoff
+logout
+logray
+lookups
+loopback
+lossy
+lscap
+lucent
+luminaire
+luminance
+lycamobile
+macaddr
+macaddress
+macosx
+macsec
+mailto
+malloc
+manarg
+mantissa
+manuf
+mappable
+mariadb
+marvell
+mathieson
+matrixes
+maxlen
+maybefcs
+mbits
+mbsfn
+mbytes
+mcast
+mcmemberrecord
+mcptt
+mcsset
+measurability
+measurements
+medion
+megabit
+megaco
+mellanox
+memcache
+memcpy
+menubar
+mergecap
+merkle
+meshcop
+messageid
+metadata
+meteorological
+metermod
+México
+mgmtmsg
+microapp
+microbit
+midamble
+millimeters
+milliwatt
+mingw
+miniport
+minislot
+minislots
+minus1
+mirrorlink
+misconfiguration
+misconfigured
+mitel
+mitsubishi
+mkdir
+mmdbresolve
+modbus
+mode01
+mode7
+modepage
+modespecificinfo
+modulo
+motorola
+mozilla
+mpeg4
+mplstp
+mpsse
+mptcp
+mrcpv
+msbuild
+mscldap
+msgid
+msglen
+msgreq
+msgsend
+msgtype
+msisdn
+MSM7627A
+mtftp
+mtime
+mtrace
+mudurl
+mulaw
+multiband
+multicarrier
+multicast
+multicasted
+multicore
+multiframe
+multiframes
+multihop
+multilateration
+multileg
+multipacket
+multipart
+multipath
+multiplexed
+multiplexer
+multiplexers
+multiplexing
+multiplicative
+multiplicator
+multirat
+multirate
+multislot
+multistate
+mumbai
+mycapture
+mycaptures
+mydns
+myhost
+mysql
+nacks
+namelen
+namespace
+naptr
+narrowband
+nbrar
+ndpsm
+negotiability
+nessie
+netboot
+netfilter
+netflow
+nethop
+netkey
+netkeyindex
+netlink
+netlogon
+netmask
+netmon
+netricity
+netscaler
+nettl
+netxray
+newpw
+nexthop
+nextseq
+nfs2err
+nfs4err
+nghttp
+ngran
+ngsniffer
+niagra
+nitnxlate
+nnsvc
+noascii
+noauth
+nodeid
+nofcs
+nokia
+nominals
+nonblock
+noncriticalextension
+noncriticalextensions
+nopad
+noqueue
+nordig
+nortel
+notarized
+notational
+notif
+notifier
+notset
+notused
+novell
+nowait
+nowrap
+npcap
+nprach
+nreport
+nrppa
+nrtcws
+nsapi
+nssai
+nssvc
+nstime
+nstrace
+ntlmssp
+ntohl
+ntohs
+ntwkconn
+nullptr
+nvmeof
+nvram
+oampdu
+obfuscated
+objectid
+objkey
+obsoleted
+octal
+octet
+octets
+octetstring
+oextcap
+ofdma
+offloadability
+ofpat
+ofpbac
+ofpbrc
+ofpet
+ofpgmfc
+ofpmp
+ofppf
+ofprr
+ofptfpt
+ofpxmt
+om2000
+omniidl
+onboarding
+onduration
+onoff
+ontime
+opcode
+opcodes
+opcua
+openssh
+openssl
+openstreetmap
+openvpn
+opflags
+oplock
+opnum
+optimisation
+optimizations
+optimizer
+optiplex
+optreq
+ordinal
+oscillatory
+oscore
+osdmap
+osmocom
+osmux
+ospf6
+outhdr
+pacch
+packetcable
+packetization
+packetized
+pagings
+parallelization
+param
+parameterization
+parameterized
+params
+paramset
+parens
+parlay
+parms
+parser
+parses
+passcode
+passivated
+passkey
+passthrough
+passwd
+pbcch
+pcapng
+pccch
+pcch
+pcell
+pcmax
+pcmaxc
+pcrepattern
+pdcch
+pdsch
+pdustatus
+peeraddr
+peerkey
+periodicities
+peristency
+persistency
+pfname
+pgpool
+pharos
+phaser
+phasor
+phich
+phonebook
+physcellid
+picmg
+pinfo
+pixmap
+plaintext
+plano
+plixer
+plugin
+pluginize
+plugins
+pluginsdir
+pmconfig
+pname
+polestar
+popup
+portcounters
+portid
+portinfo
+portmod
+portno
+portnumber
+portset
+portstatus
+posix
+postfix
+powercontrol
+pppdump
+pppoe
+prach
+preauth
+preconfiguration
+preconfigured
+predef
+preempting
+preemption
+prefname
+prefs
+preloaded
+prepay
+prepend
+preshared
+printf
+prioritization
+prioritized
+privkey
+procid
+profidrive
+profinet
+promisc
+promiscsniff
+promiscuously
+propertykey
+protected
+protoabbrev
+protobuf
+protocolie
+protos
+proxied
+proxying
+proxykey
+pscell
+pseudowire
+psname
+ptime
+ptvcursor
+ptype
+pubdir
+pubkey
+pucch
+pusch
+pwach
+pwrprof
+pxeclient
+pytest
+qam16
+qam64
+qmgmt
+qnet6
+qosinfo
+qsearch
+quadlet
+quadrature
+quadro
+quantifiers
+queryhit
+queryset
+quiescing
+quintuplet
+quintuplets
+r3info
+radcom
+radeon
+radiotap
+radix
+ralink
+ranap
+randomization
+randomize
+randomizer
+randpkt
+raster
+rdpudp
+rdtci
+reachability
+readme
+realloc
+realtek
+realtime
+reassembles
+reassigning
+reassignments
+reassigns
+reassociation
+reattach
+reattached
+reauth
+reauthenticate
+reauthentication
+reauthorize
+rebalance
+rebase
+rebinding
+rebooted
+reboots
+recalculate
+recalculating
+recalculation
+recalibrate
+recognizer
+recompiled
+recompiling
+recomputed
+reconf
+reconfig
+reconfigurable
+reconfigure
+reconfigured
+reconfigures
+reconfirm
+reconfrqst
+recursively
+redelivered
+redelivery
+redir
+redirector
+redirects
+redistributable
+redistributables
+reencyption
+reentry
+reestablishing
+reestablishment
+refactored
+referer
+referrer
+regex
+regexp
+regionid
+reimplemented
+reinitialization
+reinitialize
+reinitialized
+reinitializing
+reinjected
+reinjection
+reinvoke
+rekey
+rekeying
+relocatable
+remapping
+renumbering
+reoptimization
+reoptimized
+reordercap
+reorigination
+representable
+reprogrammable
+reprogramming
+requester
+requestor
+requeue
+reregister
+reroute
+rerouted
+rerouting
+resampled
+resampler
+rescan
+resegment
+resend
+resequencing
+reservable
+reserved
+reserved0
+reserved1
+reserved2
+reserved3
+reserved4
+reserved5
+resize
+resized
+resolvable
+resolver
+resolvers
+resub
+resubmission
+resynchronization
+resynchronize
+retrans
+retransmission
+retransmissions
+retransmit
+retransmits
+retransmitted
+retransmitter
+retries
+retry
+retrying
+retval
+retyping
+revalidate
+revalidation
+revertive
+revocations
+rfcomm
+rfmon
+rgoose
+ripemd
+rlcmac
+rmcap
+rngrsp
+rnsap
+roamer
+routable
+rowfmt
+rpcap
+rpmbuild
+rsocket
+rsrvd
+rtitcp
+rtpdump
+rtpevent
+rtpmidi
+rtpmux
+ruleset
+rxchannel
+rxlen
+rxlev
+rxreq
+s7comm
+sabme
+sacch
+sanicap
+sanitize
+sapgui
+satisfiable
+scalability
+scaleout
+scaler
+scannable
+scdma
+scell
+scoped
+scrollbar
+sdcch
+sdjournal
+sdusize
+sectorization
+sectorized
+segmenting
+segno
+semiautomatic
+sendto
+separability
+separators
+seqno
+seqnr
+seqnum
+sequenceno
+sercos
+serialize
+serialized
+servlet
+sessionid
+sessionkey
+setattr
+setcap
+setuid
+severities
+sfiocr
+sflow
+sftpserver
+sftserver
+sgdsn
+sgsap
+sha256
+sha384
+sha512
+sharkd
+shomiti
+siapp
+sidelink
+signaal
+signaling
+signon
+simulcast
+sistemas
+skippable
+skype
+slaac
+slimp
+slsch
+smpte
+smrse
+sname
+snaplen
+snow3g
+snprintf
+softkey
+solera
+someip
+someipsd
+sonet
+spare
+spare1
+spare2
+spare3
+spare4
+spare5
+spare6
+spare7
+spare8
+spare9
+spcell
+specifiers
+spectrograph
+speex
+spline
+spnego
+spoofing
+spooled
+srbid
+srcport
+srtcp
+srvcc
+sshdump
+sshkey
+ssupervisor
+stanag
+stateful
+statfs
+statusbar
+stderr
+stdin
+stdout
+strbuf
+strdup
+streamid
+stringz
+stringzpad
+struct
+structs
+subaddress
+subband
+subcarrier
+subcarriers
+subchannel
+subcode
+subdevice
+subdissector
+subdissectors
+subdoc
+subelem
+subelement
+subelements
+subframes
+subfunc
+subhd
+subheader
+subheaders
+subids
+subidx
+subindex
+subkey
+subm
+submode
+subnet
+subnets
+subobj
+subobject
+subopt
+suboption
+suboptions
+subparam
+subpdu
+subpm
+subprocesstest
+subquery
+subrects
+subselect
+subselection
+subslot
+subtlv
+subtree
+subtrees
+superset
+sverige
+svhdx
+switchinfo
+symantec
+synchronizer
+synchronizing
+synchronously
+syncman
+syniverse
+synphasor
+syntaxes
+sysdig
+sysex
+sysframe
+syslog
+sysmac
+systemd
+tablemod
+tabular
+tclas
+tcpdump
+tcpflags
+tcpip
+tcptrace
+tcpudp
+tdd128
+tdd384
+tdd768
+technica
+Tektronix
+Telecomunicaciones
+telefonica
+Telefónica
+Teléfonos
+telekom
+telenor
+teletex
+telfonica
+telia
+teredo
+tesla
+text2pcap
+textbox
+thermister
+thermistor
+thunderx
+timeout
+timeslot
+timestamp
+timestamps
+timezone
+tipcv
+toggled
+toggling
+toolbar
+toolongfragment
+toolset
+tooltip
+toprowflag
+topup
+toshiba
+totemsrp
+touchlink
+touchpad
+traceroute
+traff
+transceive
+transcoder
+transiently
+transifex
+transitioning
+transitivity
+transum
+transversal
+traveler
+traversal
+trcdbg
+trunc
+truncatable
+truncate
+truncates
+truncating
+tshark
+tspec
+tstamp
+tunid
+tunneled
+tunneling
+tuple
+tuples
+tvbparse
+tvbuff
+twamp
+twopc
+txchannel
+type1
+type2
+type3
+typedef
+typeof
+uarfcn
+uavcan
+uboot
+ubuntu
+ucast
+udpcp
+udpdump
+udphdr
+udrei
+uievent
+uint16
+uint32
+uint8
+ulmap
+ulsch
+unack
+unacked
+unadmitted
+unadvise
+unaligned
+unallocated
+unallowed
+unassign
+unassoc
+unauthenticated
+unbind
+unbuffered
+uncalculated
+uncalibrated
+uncategorized
+unchannelized
+unciphered
+uncoloured
+uncomment
+uncompensated
+uncompress
+uncompressed
+uncompressing
+uncompression
+unconfigurable
+unconfigured
+unconfirm
+uncontended
+uncorrectable
+undecidable
+undecipherable
+undecodable
+undecoded
+undecryptable
+undecrypted
+undelete
+undeliverable
+underflow
+underrun
+undisposed
+undissected
+unduplicated
+unencrypted
+unescaped
+unescaping
+unexported
+unformatted
+unfragmented
+unframed
+ungrab
+unhandled
+unhidden
+unicast
+unicode
+unicom
+unignore
+unimplemented
+uninformative
+uninitialized
+uninstall
+uninstallation
+uninstalled
+uninstaller
+uninterruptable
+universitaet
+unjoin
+unjoined
+unjoining
+unknown1
+unlink
+unlinked
+unmanaged
+unmap
+unmappable
+unmark
+unmarshal
+unmerged
+unmodulated
+unmute
+unmuted
+unnumb
+unoptimized
+unordered
+unparsable
+unparseable
+unparsed
+unprocessable
+unpublish
+unpunctuated
+unquoted
+unreach
+unreassembled
+unreceived
+unrecoverable
+unrecovered
+unregister
+unregistration
+unreportable
+unresolvable
+unresponded
+unroutable
+unsecure
+unsegmented
+unsequenced
+unspec
+unsubscribe
+unsubscribed
+unsynchronized
+untagged
+unterminated
+untruncated
+untrusted
+untunneled
+untyped
+unvisited
+unvoiced
+updatability
+updatable
+upiri
+uplink
+upload
+uploaded
+uploading
+uploads
+urlencoded
+urnti
+usability
+usbmon
+usbms
+usbpcap
+userauth
+userdata
+userinfo
+userlist
+userplane
+utf8mb
+utilization
+utils
+utran
+uuencoded
+v1250
+v1310
+v1410
+v1530
+v1610
+validator
+varint
+vcpkg
+vcredist
+vcxproj
+vector3d
+venusmngr
+verbosity
+verifier
+verizon
+version2
+version3
+version4
+version5
+version6
+version7
+versioned
+versioning
+vhdset
+viavi
+virtex
+virtio
+virtualization
+vlans
+vnode
+vocoder
+vodafone
+voipmetrics
+volerr
+vxlan
+wakeup
+wapforum
+wbxml
+webcam
+webkit
+websocket
+whoami
+wideband
+wifidump
+wikipedia
+wikis
+wimax
+wimaxasncp
+winflexbison
+winget
+winpcap
+winspool
+wiphy
+wireshark
+wiretap
+wisun
+withfcs
+withoutfcs
+wksta
+workarounds
+wowlan
+wpcap
+wrepl
+writable
+wsbuild
+wscale
+wscbor
+wslua
+wsluarm
+wsutil
+X32bit
+x509if
+x509sat
+xattr
+xauth
+xchannel
+xcode
+xetra
+xferext
+xmlns
+xsltproc
+xtreme
+z3950
+zbncp
+zeroes
+zigbee
+zugtyp
+zürich
diff --git a/tools/ws-coding-style.cfg b/tools/ws-coding-style.cfg
new file mode 100644
index 0000000..aabd05e
--- /dev/null
+++ b/tools/ws-coding-style.cfg
@@ -0,0 +1,370 @@
+# Ref: https://gitlab.com/wireshark/wireshark/-/issues/5924
+#
+# FF: uncrustify config file for Wireshark (based on cheese-indent.cfg and
+# linux.cfg... taken somewhere from the Net)
+# http://uncrustify.sourceforge.net/
+# typical usage:
+#
+# uncrustify -c ../../tools/ws-coding-style.cfg --replace packet-dccp.c
+#
+
+# The number of columns to indent per level.
+# Usually 2, 3, 4, or 8.
+indent_columns = 4 # number, FF: 8 on linux
+
+# How to use tabs when indenting code
+# 0=spaces only
+# 1=indent with tabs, align with spaces
+# 2=indent and align with tabs
+indent_with_tabs = 0 # number, FF: spaces only, questionable... as
+ # everything about this topic :-)
+
+# Spaces to indent '{' from level
+indent_brace = 0 # number
+
+# Spaces to indent 'case' from 'switch'
+# Usually 0 or indent_columns.
+indent_switch_case = 0 #indent_columns # number
+
+# Add or remove space around arithmetic operator '+', '-', '/', '*', etc
+sp_arith = force # ignore/add/remove/force
+
+# Add or remove space around assignment operator '=', '+=', etc
+sp_assign = force # ignore/add/remove/force
+
+# Add or remove space around assignment '=' in enum
+sp_enum_assign = force # ignore/add/remove/force
+
+# Add or remove space around boolean operators '&&' and '||'
+sp_bool = force # ignore/add/remove/force
+
+# Add or remove space around compare operator '<', '>', '==', etc
+sp_compare = force # ignore/add/remove/force
+
+# Add or remove space inside '(' and ')'
+sp_inside_paren = remove # ignore/add/remove/force
+
+# Add or remove space between nested parens
+sp_paren_paren = remove # ignore/add/remove/force
+
+# Add or remove space before pointer star '*'
+sp_before_ptr_star = force # ignore/add/remove/force
+
+# Add or remove space between pointer stars '*'
+sp_between_ptr_star = remove # ignore/add/remove/force
+
+# Add or remove space after pointer star '*', if followed by a word.
+sp_after_ptr_star = remove # ignore/add/remove/force
+
+# Add or remove space before reference sign '&'
+sp_before_byref = force # ignore/add/remove/force
+
+# Add or remove space after reference sign '&', if followed by a word.
+sp_after_byref = remove # ignore/add/remove/force
+
+# Add or remove space between type and word
+sp_after_type = force # ignore/add/remove/force
+
+# Add or remove space before '(' of 'if', 'for', 'switch', and 'while'
+sp_before_sparen = force # ignore/add/remove/force
+
+# Add or remove space inside if-condition '(' and ')'
+sp_inside_sparen = remove # ignore/add/remove/force
+
+# Add or remove space after ')' of 'if', 'for', 'switch', and 'while'
+sp_after_sparen = force # ignore/add/remove/force
+
+# Add or remove space between ')' and '{' of 'if', 'for', 'switch', and 'while'
+sp_sparen_brace = force # ignore/add/remove/force
+
+# Add or remove space before empty statement ';' on 'if', 'for' and 'while'
+sp_special_semi = remove # ignore/add/remove/force
+
+# Add or remove space before ';'
+sp_before_semi = remove # ignore/add/remove/force
+
+# Add or remove space before ';' in non-empty 'for' statements
+sp_before_semi_for = remove # ignore/add/remove/force
+
+# Add or remove space inside '[' and ']'
+sp_inside_square = remove # ignore/add/remove/force
+
+# Add or remove space before '[' (except '[]')
+sp_before_square = remove # ignore/add/remove/force
+
+# Add or remove space before '[]'
+sp_before_squares = remove # ignore/add/remove/force
+
+# Add or remove space after ','
+sp_after_comma = force # ignore/add/remove/force
+
+# Add or remove space before ','
+sp_before_comma = remove # ignore/add/remove/force
+
+# Add or remove space after C/D cast, ie 'cast(int)a' vs 'cast(int) a' or '(int)a' vs '(int) a'
+sp_after_cast = force # ignore/add/remove/force
+
+# Add or remove spaces inside cast parens
+sp_inside_paren_cast = remove # ignore/add/remove/force
+
+# Add or remove space between 'sizeof' and '('
+sp_sizeof_paren = force # ignore/add/remove/force
+
+# Add or remove space inside '{' and '}'
+sp_inside_braces = remove # ignore/add/remove/force
+
+# Add or remove space inside '{}'
+sp_inside_braces_empty = remove # ignore/add/remove/force
+
+# Add or remove space inside enum '{' and '}'
+sp_inside_braces_enum = remove # ignore/add/remove/force
+
+# Add or remove space inside struct/union '{' and '}'
+sp_inside_braces_struct = remove # ignore/add/remove/force
+
+# Add or remove space between function name and '(' on function declaration
+sp_func_proto_paren = remove # ignore/add/remove/force, FF was force
+
+# Add or remove space between function name and '(' on function definition
+sp_func_def_paren = remove # ignore/add/remove/force, FF was force
+
+# Add or remove space inside empty function '()'
+sp_inside_fparens = remove # ignore/add/remove/force
+
+# Add or remove space inside function '(' and ')'
+sp_inside_fparen = remove # ignore/add/remove/force
+
+# Add or remove space between function name and '(' on function calls
+sp_func_call_paren = remove # ignore/add/remove/force, FF: was 'force'
+sp_func_call_user_paren = remove # ignore/add/remove/force
+set func_call_user _ N_
+
+# Add or remove space between 'return' and '('
+sp_return_paren = force # ignore/add/remove/force
+
+# Add or remove space between 'defined' and '(' in '#if defined (FOO)'
+sp_defined_paren = force # ignore/add/remove/force
+
+# Add or remove space between macro and value
+sp_macro = force # ignore/add/remove/force
+
+# Add or remove space between macro function ')' and value
+sp_macro_func = force # ignore/add/remove/force
+
+# Add or remove space around the ':' in 'b ? t : f'
+sp_cond_colon = force # ignore/add/remove/force
+
+# Add or remove space around the '?' in 'b ? t : f'
+sp_cond_question = force # ignore/add/remove/force
+
+# Add or remove space before a semicolon of an empty part of a for statment.
+sp_before_semi_for_empty = force # ignore/add/remove/force
+
+# Space between close brace and else
+sp_brace_else = force # string (add/force/ignore/remove)
+
+# Space between close parenthesis and open brace
+sp_paren_brace = force # string (add/force/ignore/remove)
+
+# Space between else and open brace
+sp_else_brace = force # string (add/force/ignore/remove)
+
+# How to align the star in variable definitions.
+# 0=Part of the type
+# 1=Part of the variable
+# 2=Dangling
+align_var_def_star_style = 2 # number (FF: see align_typedef_star_style)
+
+# How to align the '&' in variable definitions.
+# 0=Part of the type
+# 1=Part of the variable
+# 2=Dangling
+align_var_def_amp_style = 2 # number
+
+# Align variable definitions in prototypes and functions
+align_func_params = true # false/true
+
+# Whether to align the colon in struct bit fields
+align_var_def_colon = true # false/true
+
+# Whether to align inline struct/enum/union variable definitions
+align_var_def_inline = true # false/true
+
+# The span for aligning function prototypes (0=don't align)
+align_func_proto_span = 1 # number
+
+# The span for aligning on '#define' bodies (0=don't align)
+align_pp_define_span = 0 # number
+
+# Controls the positioning of the '*' in typedefs. Just try it.
+# 0: Align on typdef type, ignore '*'
+# 1: The '*' is part of type name: typedef int *pint;
+# 2: The '*' is part of the type, but dangling: typedef int *pint;
+align_typedef_star_style = 2 # number
+
+# Controls the positioning of the '&' in typedefs. Just try it.
+# 0: Align on typdef type, ignore '&'
+# 1: The '&' is part of type name: typedef int &pint;
+# 2: The '&' is part of the type, but dangling: typedef int &pint;
+align_typedef_amp_style = 2 # number
+
+# Whether to align macros wrapped with a backslash and a newline.
+# This will not work right if the macro contains a multi-line comment.
+align_nl_cont = true # false/true
+
+# The span for aligning struct/union (0=don't align)
+align_var_struct_span = 1 # number
+
+# The threshold for aligning struct/union member definitions (0=no limit)
+align_var_struct_thresh = 1 # number
+
+# The gap for aligning struct/union member definitions
+align_var_struct_gap = 1 # number
+
+# The span for aligning struct initializer values (0=don't align)
+align_struct_init_span = 1 # number
+
+# The gap for aligning variable definitions
+align_var_def_gap = 1 # number
+
+# The span for aligning on '=' in assignments (0=don't align)
+align_assign_span = 0 # number
+
+# The span for aligning on '=' in enums (0=don't align)
+align_enum_equ_span = 0 # number
+
+# The span for aligning variable definitions (0=don't align)
+align_var_def_span = 0 # number
+
+# Add or remove newline at the end of the file
+nl_end_of_file = force # ignore/add/remove/force
+
+# The number of newlines at the end of the file (only used if nl_end_of_file is 'add' or 'force')
+nl_end_of_file_min = 1 # number
+
+# Add or remove newline between '=' and '{'
+nl_assign_brace = remove # ignore/add/remove/force
+
+# Add or remove newline between 'enum' and '{'
+nl_enum_brace = force # ignore/add/remove/force
+
+# Add or remove newline between 'struct and '{'
+nl_struct_brace = force # ignore/add/remove/force
+
+# Add or remove newline between 'union' and '{'
+nl_union_brace = force # ignore/add/remove/force
+
+# Add or remove newline between 'if' and '{'
+nl_if_brace = remove # ignore/add/remove/force, FF: was 'force'
+
+# Add or remove newline between '}' and 'else'
+nl_brace_else = remove # ignore/add/remove/force, FF: was 'force'
+
+# Add or remove newline between a function call's ')' and '{', as in:
+# list_for_each(item, &list) { }
+nl_fcall_brace = force # ignore/add/remove/force
+
+# Add or remove newline between 'else if' and '{'
+# If set to ignore, nl_if_brace is used instead
+nl_elseif_brace = remove # ignore/add/remove/force, FF: was 'force'
+
+# Add or remove newline between 'else' and '{'
+nl_else_brace = remove # ignore/add/remove/force, FF: was 'force'
+
+# Add or remove newline between 'else' and 'if'
+nl_else_if = remove # ignore/add/remove/force
+
+# Add or remove newline between 'for' and '{'
+nl_for_brace = remove # ignore/add/remove/force
+
+# Add or remove newline between 'while' and '{'
+nl_while_brace = remove # ignore/add/remove/force, FF: was 'force'
+
+# Add or remove newline between 'do' and '{'
+nl_do_brace = force # ignore/add/remove/force
+
+# Add or remove newline between '}' and 'while' of 'do' statement
+nl_brace_while = force # ignore/add/remove/force
+
+# Add or remove newline between 'switch' and '{'
+nl_switch_brace = remove # ignore/add/remove/force
+
+# Add or remove newline between return type and function name in definition
+nl_func_type_name = force # ignore/add/remove/force
+
+# Add or remove newline between return type and function name in a prototype
+nl_func_proto_type_name = remove # ignore/add/remove/force
+
+# Add or remove newline between a function name and the opening '('
+nl_func_paren = remove # ignore/add/remove/force
+
+# Add or remove newline after '(' in a function declaration
+nl_func_decl_start = remove # ignore/add/remove/force
+
+# Add or remove newline after each ',' in a function declaration
+nl_func_decl_args = ignore # ignore/add/remove/force
+
+# Add or remove newline before the ')' in a function declaration
+nl_func_decl_end = remove # ignore/add/remove/force
+
+# Add or remove newline between function signature and '{'
+nl_fdef_brace = force # ignore/add/remove/force
+
+# The number of newlines after '}' of a multi-line function body
+nl_after_func_body = 2 # number
+
+# The number of newlines after '}' of a single line function body
+nl_after_func_body_one_liner = 2 # number
+
+# The number of newlines after a block of variable definitions
+nl_func_var_def_blk = 1 # number
+
+# The minimum number of newlines before a multi-line comment.
+# Doesn't apply if after a brace open or another multi-line comment.
+nl_before_block_comment = 2 # number
+
+# The minimum number of newlines before a single-line C comment.
+# Doesn't apply if after a brace open or other single-line C comments.
+nl_before_c_comment = 2 # number
+
+# The minimum number of newlines before a CPP comment.
+# Doesn't apply if after a brace open or other CPP comments.
+nl_before_cpp_comment = 2 # number
+
+# Don't touch one-line braced assignments - 'foo_t f = { 1, 2 };'
+nl_assign_leave_one_liners = true # false/true
+
+# Whether to not put blanks after '#ifxx', '#elxx', or before '#endif'
+nl_squeeze_ifdef = true # false/true
+
+# Whether to remove blank lines after '{'
+eat_blanks_after_open_brace = true # false/true
+
+# Whether to remove blank lines before '}'
+eat_blanks_before_close_brace = true # false/true
+
+# Whether to put a star on subsequent comment lines
+cmt_star_cont = true # false/true
+
+# Whether to group c-comments that look like they are in a block
+cmt_c_group = true # false/true
+
+# Whether to group cpp-comments that look like they are in a block
+cmt_cpp_group = false # false/true
+
+# Whether to change cpp-comments into c-comments
+cmt_cpp_to_c = false # false/true
+
+# If pp_indent_at_level=false, specifies the number of columns to indent per
+# level. Default=1.
+pp_indent_count = indent_columns # number
+
+# Add or remove indent of preprocessor directives
+pp_indent = remove # ignore/add/remove/force FF: was 'force'
+
+# Try to limit code width to N number of columns
+code_width = 100 # number
+
+# Whether to fully split long function protos/calls at commas
+ls_func_split_full = false # false/true, FF: was 'true'
+
diff --git a/tools/yacc.py b/tools/yacc.py
new file mode 100644
index 0000000..1352e96
--- /dev/null
+++ b/tools/yacc.py
@@ -0,0 +1,3448 @@
+# -----------------------------------------------------------------------------
+# ply: yacc.py
+#
+# Copyright (C) 2001-2015,
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# -----------------------------------------------------------------------------
+#
+# This implements an LR parser that is constructed from grammar rules defined
+# as Python functions. The grammer is specified by supplying the BNF inside
+# Python documentation strings. The inspiration for this technique was borrowed
+# from John Aycock's Spark parsing system. PLY might be viewed as cross between
+# Spark and the GNU bison utility.
+#
+# The current implementation is only somewhat object-oriented. The
+# LR parser itself is defined in terms of an object (which allows multiple
+# parsers to co-exist). However, most of the variables used during table
+# construction are defined in terms of global variables. Users shouldn't
+# notice unless they are trying to define multiple parsers at the same
+# time using threads (in which case they should have their head examined).
+#
+# This implementation supports both SLR and LALR(1) parsing. LALR(1)
+# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
+# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
+# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
+# by the more efficient DeRemer and Pennello algorithm.
+#
+# :::::::: WARNING :::::::
+#
+# Construction of LR parsing tables is fairly complicated and expensive.
+# To make this module run fast, a *LOT* of work has been put into
+# optimization---often at the expensive of readability and what might
+# consider to be good Python "coding style." Modify the code at your
+# own risk!
+# ----------------------------------------------------------------------------
+
+import re
+import types
+import sys
+import os.path
+import inspect
+import base64
+import warnings
+
+__version__ = '3.8'
+__tabversion__ = '3.8'
+
+#-----------------------------------------------------------------------------
+# === User configurable parameters ===
+#
+# Change these to modify the default behavior of yacc (if you wish)
+#-----------------------------------------------------------------------------
+
+yaccdebug = True # Debugging mode. If set, yacc generates a
+ # a 'parser.out' file in the current directory
+
+debug_file = 'parser.out' # Default name of the debugging file
+tab_module = 'parsetab' # Default name of the table module
+default_lr = 'LALR' # Default LR table generation method
+
+error_count = 3 # Number of symbols that must be shifted to leave recovery mode
+
+yaccdevel = False # Set to True if developing yacc. This turns off optimized
+ # implementations of certain functions.
+
+resultlimit = 40 # Size limit of results when running in debug mode.
+
+pickle_protocol = 0 # Protocol to use when writing pickle files
+
+# String type-checking compatibility
+if sys.version_info[0] < 3:
+ string_types = basestring
+else:
+ string_types = str
+
+MAXINT = sys.maxsize
+
+# This object is a stand-in for a logging object created by the
+# logging module. PLY will use this by default to create things
+# such as the parser.out file. If a user wants more detailed
+# information, they can create their own logging object and pass
+# it into PLY.
+
+class PlyLogger(object):
+ def __init__(self, f):
+ self.f = f
+
+ def debug(self, msg, *args, **kwargs):
+ self.f.write((msg % args) + '\n')
+
+ info = debug
+
+ def warning(self, msg, *args, **kwargs):
+ self.f.write('WARNING: ' + (msg % args) + '\n')
+
+ def error(self, msg, *args, **kwargs):
+ self.f.write('ERROR: ' + (msg % args) + '\n')
+
+ critical = debug
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+ def __getattribute__(self, name):
+ return self
+
+ def __call__(self, *args, **kwargs):
+ return self
+
+# Exception raised for yacc-related errors
+class YaccError(Exception):
+ pass
+
+# Format the result message that the parser produces when running in debug mode.
+def format_result(r):
+ repr_str = repr(r)
+ if '\n' in repr_str:
+ repr_str = repr(repr_str)
+ if len(repr_str) > resultlimit:
+ repr_str = repr_str[:resultlimit] + ' ...'
+ result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
+ return result
+
+# Format stack entries when the parser is running in debug mode
+def format_stack_entry(r):
+ repr_str = repr(r)
+ if '\n' in repr_str:
+ repr_str = repr(repr_str)
+ if len(repr_str) < 16:
+ return repr_str
+ else:
+ return '<%s @ 0x%x>' % (type(r).__name__, id(r))
+
+# Panic mode error recovery support. This feature is being reworked--much of the
+# code here is to offer a deprecation/backwards compatible transition
+
+_errok = None
+_token = None
+_restart = None
+_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
+Instead, invoke the methods on the associated parser instance:
+
+ def p_error(p):
+ ...
+ # Use parser.errok(), parser.token(), parser.restart()
+ ...
+
+ parser = yacc.yacc()
+'''
+
+def errok():
+ warnings.warn(_warnmsg)
+ return _errok()
+
+def restart():
+ warnings.warn(_warnmsg)
+ return _restart()
+
+def token():
+ warnings.warn(_warnmsg)
+ return _token()
+
+# Utility function to call the p_error() function with some deprecation hacks
+def call_errorfunc(errorfunc, token, parser):
+ global _errok, _token, _restart
+ _errok = parser.errok
+ _token = parser.token
+ _restart = parser.restart
+ r = errorfunc(token)
+ try:
+ del _errok, _token, _restart
+ except NameError:
+ pass
+ return r
+
+#-----------------------------------------------------------------------------
+# === LR Parsing Engine ===
+#
+# The following classes are used for the LR parser itself. These are not
+# used during table construction and are independent of the actual LR
+# table generation algorithm
+#-----------------------------------------------------------------------------
+
+# This class is used to hold non-terminal grammar symbols during parsing.
+# It normally has the following attributes set:
+# .type = Grammar symbol type
+# .value = Symbol value
+# .lineno = Starting line number
+# .endlineno = Ending line number (optional, set automatically)
+# .lexpos = Starting lex position
+# .endlexpos = Ending lex position (optional, set automatically)
+
+class YaccSymbol:
+ def __str__(self):
+ return self.type
+
+ def __repr__(self):
+ return str(self)
+
+# This class is a wrapper around the objects actually passed to each
+# grammar rule. Index lookup and assignment actually assign the
+# .value attribute of the underlying YaccSymbol object.
+# The lineno() method returns the line number of a given
+# item (or 0 if not defined). The linespan() method returns
+# a tuple of (startline,endline) representing the range of lines
+# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
+# representing the range of positional information for a symbol.
+
+class YaccProduction:
+ def __init__(self, s, stack=None):
+ self.slice = s
+ self.stack = stack
+ self.lexer = None
+ self.parser = None
+
+ def __getitem__(self, n):
+ if isinstance(n, slice):
+ return [s.value for s in self.slice[n]]
+ elif n >= 0:
+ return self.slice[n].value
+ else:
+ return self.stack[n].value
+
+ def __setitem__(self, n, v):
+ self.slice[n].value = v
+
+ def __getslice__(self, i, j):
+ return [s.value for s in self.slice[i:j]]
+
+ def __len__(self):
+ return len(self.slice)
+
+ def lineno(self, n):
+ return getattr(self.slice[n], 'lineno', 0)
+
+ def set_lineno(self, n, lineno):
+ self.slice[n].lineno = lineno
+
+ def linespan(self, n):
+ startline = getattr(self.slice[n], 'lineno', 0)
+ endline = getattr(self.slice[n], 'endlineno', startline)
+ return startline, endline
+
+ def lexpos(self, n):
+ return getattr(self.slice[n], 'lexpos', 0)
+
+ def lexspan(self, n):
+ startpos = getattr(self.slice[n], 'lexpos', 0)
+ endpos = getattr(self.slice[n], 'endlexpos', startpos)
+ return startpos, endpos
+
+ def error(self):
+ raise SyntaxError
+
+# -----------------------------------------------------------------------------
+# == LRParser ==
+#
+# The LR Parsing engine.
+# -----------------------------------------------------------------------------
+
+class LRParser:
+ def __init__(self, lrtab, errorf):
+ self.productions = lrtab.lr_productions
+ self.action = lrtab.lr_action
+ self.goto = lrtab.lr_goto
+ self.errorfunc = errorf
+ self.set_defaulted_states()
+ self.errorok = True
+
+ def errok(self):
+ self.errorok = True
+
+ def restart(self):
+ del self.statestack[:]
+ del self.symstack[:]
+ sym = YaccSymbol()
+ sym.type = '$end'
+ self.symstack.append(sym)
+ self.statestack.append(0)
+
+ # Defaulted state support.
+ # This method identifies parser states where there is only one possible reduction action.
+ # For such states, the parser can make a choose to make a rule reduction without consuming
+ # the next look-ahead token. This delayed invocation of the tokenizer can be useful in
+ # certain kinds of advanced parsing situations where the lexer and parser interact with
+ # each other or change states (i.e., manipulation of scope, lexer states, etc.).
+ #
+ # See: https://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
+ def set_defaulted_states(self):
+ self.defaulted_states = {}
+ for state, actions in self.action.items():
+ rules = list(actions.values())
+ if len(rules) == 1 and rules[0] < 0:
+ self.defaulted_states[state] = rules[0]
+
+ def disable_defaulted_states(self):
+ self.defaulted_states = {}
+
+ def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
+ if debug or yaccdevel:
+ if isinstance(debug, int):
+ debug = PlyLogger(sys.stderr)
+ return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
+ elif tracking:
+ return self.parseopt(input, lexer, debug, tracking, tokenfunc)
+ else:
+ return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
+
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parsedebug().
+ #
+ # This is the debugging enabled version of parse(). All changes made to the
+ # parsing engine should be made here. Optimized versions of this function
+ # are automatically created by the ply/ygen.py script. This script cuts out
+ # sections enclosed in markers such as this:
+ #
+ # #--! DEBUG
+ # statements
+ # #--! DEBUG
+ #
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
+ #--! parsedebug-start
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ defaulted_states = self.defaulted_states # Local reference to defaulted states
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+ #--! DEBUG
+ debug.info('PLY: PARSE DEBUG START')
+ #--! DEBUG
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ from . import lex
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set the parser() token method (sometimes used in error recovery)
+ self.token = get_token
+
+ # Set up the state and symbol stacks
+
+ statestack = [] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while True:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+ #--! DEBUG
+ debug.debug('')
+ debug.debug('State : %s', state)
+ #--! DEBUG
+
+ if state not in defaulted_states:
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+ else:
+ t = defaulted_states[state]
+ #--! DEBUG
+ debug.debug('Defaulted state %s: Reduce using %d', state, -t)
+ #--! DEBUG
+
+ #--! DEBUG
+ debug.debug('Stack : %s',
+ ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
+ #--! DEBUG
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+ #--! DEBUG
+ debug.debug('Action : Shift and goto state %s', t)
+ #--! DEBUG
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount:
+ errorcount -= 1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+ #--! DEBUG
+ if plen:
+ debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
+ '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
+ goto[statestack[-1-plen]][pname])
+ else:
+ debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
+ goto[statestack[-1]][pname])
+
+ #--! DEBUG
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+ #--! TRACKING
+ if tracking:
+ t1 = targ[1]
+ sym.lineno = t1.lineno
+ sym.lexpos = t1.lexpos
+ t1 = targ[-1]
+ sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
+ sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
+ #--! TRACKING
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ del statestack[-plen:]
+ p.callable(pslice)
+ #--! DEBUG
+ debug.info('Result : %s', format_result(pslice[0]))
+ #--! DEBUG
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+ #--! TRACKING
+ if tracking:
+ sym.lineno = lexer.lineno
+ sym.lexpos = lexer.lexpos
+ #--! TRACKING
+
+ targ = [sym]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ p.callable(pslice)
+ #--! DEBUG
+ debug.info('Result : %s', format_result(pslice[0]))
+ #--! DEBUG
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ result = getattr(n, 'value', None)
+ #--! DEBUG
+ debug.info('Done : Returning %s', format_result(result))
+ debug.info('PLY: PARSE DEBUG END')
+ #--! DEBUG
+ return result
+
+ if t is None:
+
+ #--! DEBUG
+ debug.error('Error : %s',
+ ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
+ #--! DEBUG
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = False
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ if errtoken and not hasattr(errtoken, 'lexer'):
+ errtoken.lexer = lexer
+ tok = call_errorfunc(self.errorfunc, errtoken, self)
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken, 'lineno'):
+ lineno = lookahead.lineno
+ else:
+ lineno = 0
+ if lineno:
+ sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
+ else:
+ sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
+ else:
+ sys.stderr.write('yacc: Parse error in input. EOF\n')
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ #--! TRACKING
+ if tracking:
+ sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
+ sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
+ #--! TRACKING
+ lookahead = None
+ continue
+
+ # Create the error symbol for the first time and make it the new lookahead symbol
+ t = YaccSymbol()
+ t.type = 'error'
+
+ if hasattr(lookahead, 'lineno'):
+ t.lineno = t.endlineno = lookahead.lineno
+ if hasattr(lookahead, 'lexpos'):
+ t.lexpos = t.endlexpos = lookahead.lexpos
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ sym = symstack.pop()
+ #--! TRACKING
+ if tracking:
+ lookahead.lineno = sym.lineno
+ lookahead.lexpos = sym.lexpos
+ #--! TRACKING
+ statestack.pop()
+ state = statestack[-1]
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError('yacc: internal parser error!!!\n')
+
+ #--! parsedebug-end
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parseopt().
+ #
+ # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
+ # This code is automatically generated by the ply/ygen.py script. Make
+ # changes to the parsedebug() method instead.
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
+ #--! parseopt-start
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ defaulted_states = self.defaulted_states # Local reference to defaulted states
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ from . import lex
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set the parser() token method (sometimes used in error recovery)
+ self.token = get_token
+
+ # Set up the state and symbol stacks
+
+ statestack = [] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while True:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+
+ if state not in defaulted_states:
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+ else:
+ t = defaulted_states[state]
+
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount:
+ errorcount -= 1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+ #--! TRACKING
+ if tracking:
+ t1 = targ[1]
+ sym.lineno = t1.lineno
+ sym.lexpos = t1.lexpos
+ t1 = targ[-1]
+ sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
+ sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
+ #--! TRACKING
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ del statestack[-plen:]
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+ #--! TRACKING
+ if tracking:
+ sym.lineno = lexer.lineno
+ sym.lexpos = lexer.lexpos
+ #--! TRACKING
+
+ targ = [sym]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ result = getattr(n, 'value', None)
+ return result
+
+ if t is None:
+
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = False
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ if errtoken and not hasattr(errtoken, 'lexer'):
+ errtoken.lexer = lexer
+ tok = call_errorfunc(self.errorfunc, errtoken, self)
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken, 'lineno'):
+ lineno = lookahead.lineno
+ else:
+ lineno = 0
+ if lineno:
+ sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
+ else:
+ sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
+ else:
+ sys.stderr.write('yacc: Parse error in input. EOF\n')
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ #--! TRACKING
+ if tracking:
+ sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
+ sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
+ #--! TRACKING
+ lookahead = None
+ continue
+
+ # Create the error symbol for the first time and make it the new lookahead symbol
+ t = YaccSymbol()
+ t.type = 'error'
+
+ if hasattr(lookahead, 'lineno'):
+ t.lineno = t.endlineno = lookahead.lineno
+ if hasattr(lookahead, 'lexpos'):
+ t.lexpos = t.endlexpos = lookahead.lexpos
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ sym = symstack.pop()
+ #--! TRACKING
+ if tracking:
+ lookahead.lineno = sym.lineno
+ lookahead.lexpos = sym.lexpos
+ #--! TRACKING
+ statestack.pop()
+ state = statestack[-1]
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError('yacc: internal parser error!!!\n')
+
+ #--! parseopt-end
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parseopt_notrack().
+ #
+ # Optimized version of parseopt() with line number tracking removed.
+ # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
+ # by the ply/ygen.py script. Make changes to the parsedebug() method instead.
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
+ #--! parseopt-notrack-start
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ defaulted_states = self.defaulted_states # Local reference to defaulted states
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ from . import lex
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set the parser() token method (sometimes used in error recovery)
+ self.token = get_token
+
+ # Set up the state and symbol stacks
+
+ statestack = [] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while True:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+
+ if state not in defaulted_states:
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+ else:
+ t = defaulted_states[state]
+
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount:
+ errorcount -= 1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ del statestack[-plen:]
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+
+ targ = [sym]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ result = getattr(n, 'value', None)
+ return result
+
+ if t is None:
+
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = False
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ if errtoken and not hasattr(errtoken, 'lexer'):
+ errtoken.lexer = lexer
+ tok = call_errorfunc(self.errorfunc, errtoken, self)
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken, 'lineno'):
+ lineno = lookahead.lineno
+ else:
+ lineno = 0
+ if lineno:
+ sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
+ else:
+ sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
+ else:
+ sys.stderr.write('yacc: Parse error in input. EOF\n')
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ lookahead = None
+ continue
+
+ # Create the error symbol for the first time and make it the new lookahead symbol
+ t = YaccSymbol()
+ t.type = 'error'
+
+ if hasattr(lookahead, 'lineno'):
+ t.lineno = t.endlineno = lookahead.lineno
+ if hasattr(lookahead, 'lexpos'):
+ t.lexpos = t.endlexpos = lookahead.lexpos
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ sym = symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError('yacc: internal parser error!!!\n')
+
+ #--! parseopt-notrack-end
+
+# -----------------------------------------------------------------------------
+# === Grammar Representation ===
+#
+# The following functions, classes, and variables are used to represent and
+# manipulate the rules that make up a grammar.
+# -----------------------------------------------------------------------------
+
+# regex matching identifiers
+_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
+
+# -----------------------------------------------------------------------------
+# class Production:
+#
+# This class stores the raw information about a single production or grammar rule.
+# A grammar rule refers to a specification such as this:
+#
+# expr : expr PLUS term
+#
+# Here are the basic attributes defined on all productions
+#
+# name - Name of the production. For example 'expr'
+# prod - A list of symbols on the right side ['expr','PLUS','term']
+# prec - Production precedence level
+# number - Production number.
+# func - Function that executes on reduce
+# file - File where production function is defined
+# lineno - Line number where production function is defined
+#
+# The following attributes are defined or optional.
+#
+# len - Length of the production (number of symbols on right hand side)
+# usyms - Set of unique symbols found in the production
+# -----------------------------------------------------------------------------
+
+class Production(object):
+ reduced = 0
+ def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
+ self.name = name
+ self.prod = tuple(prod)
+ self.number = number
+ self.func = func
+ self.callable = None
+ self.file = file
+ self.line = line
+ self.prec = precedence
+
+ # Internal settings used during table construction
+
+ self.len = len(self.prod) # Length of the production
+
+ # Create a list of unique production symbols used in the production
+ self.usyms = []
+ for s in self.prod:
+ if s not in self.usyms:
+ self.usyms.append(s)
+
+ # List of all LR items for the production
+ self.lr_items = []
+ self.lr_next = None
+
+ # Create a string representation
+ if self.prod:
+ self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
+ else:
+ self.str = '%s -> <empty>' % self.name
+
+ def __str__(self):
+ return self.str
+
+ def __repr__(self):
+ return 'Production(' + str(self) + ')'
+
+ def __len__(self):
+ return len(self.prod)
+
+ def __nonzero__(self):
+ return 1
+
+ def __getitem__(self, index):
+ return self.prod[index]
+
+ # Return the nth lr_item from the production (or None if at the end)
+ def lr_item(self, n):
+ if n > len(self.prod):
+ return None
+ p = LRItem(self, n)
+ # Precompute the list of productions immediately following.
+ try:
+ p.lr_after = Prodnames[p.prod[n+1]]
+ except (IndexError, KeyError):
+ p.lr_after = []
+ try:
+ p.lr_before = p.prod[n-1]
+ except IndexError:
+ p.lr_before = None
+ return p
+
+ # Bind the production function name to a callable
+ def bind(self, pdict):
+ if self.func:
+ self.callable = pdict[self.func]
+
+# This class serves as a minimal standin for Production objects when
+# reading table data from files. It only contains information
+# actually used by the LR parsing engine, plus some additional
+# debugging information.
+class MiniProduction(object):
+ def __init__(self, str, name, len, func, file, line):
+ self.name = name
+ self.len = len
+ self.func = func
+ self.callable = None
+ self.file = file
+ self.line = line
+ self.str = str
+
+ def __str__(self):
+ return self.str
+
+ def __repr__(self):
+ return 'MiniProduction(%s)' % self.str
+
+ # Bind the production function name to a callable
+ def bind(self, pdict):
+ if self.func:
+ self.callable = pdict[self.func]
+
+
+# -----------------------------------------------------------------------------
+# class LRItem
+#
+# This class represents a specific stage of parsing a production rule. For
+# example:
+#
+# expr : expr . PLUS term
+#
+# In the above, the "." represents the current location of the parse. Here
+# basic attributes:
+#
+# name - Name of the production. For example 'expr'
+# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
+# number - Production number.
+#
+# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
+# then lr_next refers to 'expr -> expr PLUS . term'
+# lr_index - LR item index (location of the ".") in the prod list.
+# lookaheads - LALR lookahead symbols for this item
+# len - Length of the production (number of symbols on right hand side)
+# lr_after - List of all productions that immediately follow
+# lr_before - Grammar symbol immediately before
+# -----------------------------------------------------------------------------
+
+class LRItem(object):
+ def __init__(self, p, n):
+ self.name = p.name
+ self.prod = list(p.prod)
+ self.number = p.number
+ self.lr_index = n
+ self.lookaheads = {}
+ self.prod.insert(n, '.')
+ self.prod = tuple(self.prod)
+ self.len = len(self.prod)
+ self.usyms = p.usyms
+
+ def __str__(self):
+ if self.prod:
+ s = '%s -> %s' % (self.name, ' '.join(self.prod))
+ else:
+ s = '%s -> <empty>' % self.name
+ return s
+
+ def __repr__(self):
+ return 'LRItem(' + str(self) + ')'
+
+# -----------------------------------------------------------------------------
+# rightmost_terminal()
+#
+# Return the rightmost terminal from a list of symbols. Used in add_production()
+# -----------------------------------------------------------------------------
+def rightmost_terminal(symbols, terminals):
+ i = len(symbols) - 1
+ while i >= 0:
+ if symbols[i] in terminals:
+ return symbols[i]
+ i -= 1
+ return None
+
+# -----------------------------------------------------------------------------
+# === GRAMMAR CLASS ===
+#
+# The following class represents the contents of the specified grammar along
+# with various computed properties such as first sets, follow sets, LR items, etc.
+# This data is used for critical parts of the table generation process later.
+# -----------------------------------------------------------------------------
+
+class GrammarError(YaccError):
+ pass
+
+class Grammar(object):
+ def __init__(self, terminals):
+ self.Productions = [None] # A list of all of the productions. The first
+ # entry is always reserved for the purpose of
+ # building an augmented grammar
+
+ self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
+ # productions of that nonterminal.
+
+ self.Prodmap = {} # A dictionary that is only used to detect duplicate
+ # productions.
+
+ self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
+ # list of the rules where they are used.
+
+ for term in terminals:
+ self.Terminals[term] = []
+
+ self.Terminals['error'] = []
+
+ self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
+ # of rule numbers where they are used.
+
+ self.First = {} # A dictionary of precomputed FIRST(x) symbols
+
+ self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
+
+ self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
+ # form ('right',level) or ('nonassoc', level) or ('left',level)
+
+ self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
+ # This is only used to provide error checking and to generate
+ # a warning about unused precedence rules.
+
+ self.Start = None # Starting symbol for the grammar
+
+
+ def __len__(self):
+ return len(self.Productions)
+
+ def __getitem__(self, index):
+ return self.Productions[index]
+
+ # -----------------------------------------------------------------------------
+ # set_precedence()
+ #
+ # Sets the precedence for a given terminal. assoc is the associativity such as
+ # 'left','right', or 'nonassoc'. level is a numeric level.
+ #
+ # -----------------------------------------------------------------------------
+
+ def set_precedence(self, term, assoc, level):
+ assert self.Productions == [None], 'Must call set_precedence() before add_production()'
+ if term in self.Precedence:
+ raise GrammarError('Precedence already specified for terminal %r' % term)
+ if assoc not in ['left', 'right', 'nonassoc']:
+ raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
+ self.Precedence[term] = (assoc, level)
+
+ # -----------------------------------------------------------------------------
+ # add_production()
+ #
+ # Given an action function, this function assembles a production rule and
+ # computes its precedence level.
+ #
+ # The production rule is supplied as a list of symbols. For example,
+ # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
+ # symbols ['expr','PLUS','term'].
+ #
+ # Precedence is determined by the precedence of the right-most non-terminal
+ # or the precedence of a terminal specified by %prec.
+ #
+ # A variety of error checks are performed to make sure production symbols
+ # are valid and that %prec is used correctly.
+ # -----------------------------------------------------------------------------
+
+ def add_production(self, prodname, syms, func=None, file='', line=0):
+
+ if prodname in self.Terminals:
+ raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
+ if prodname == 'error':
+ raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
+ if not _is_identifier.match(prodname):
+ raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
+
+ # Look for literal tokens
+ for n, s in enumerate(syms):
+ if s[0] in "'\"":
+ try:
+ c = eval(s)
+ if (len(c) > 1):
+ raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
+ (file, line, s, prodname))
+ if c not in self.Terminals:
+ self.Terminals[c] = []
+ syms[n] = c
+ continue
+ except SyntaxError:
+ pass
+ if not _is_identifier.match(s) and s != '%prec':
+ raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
+
+ # Determine the precedence level
+ if '%prec' in syms:
+ if syms[-1] == '%prec':
+ raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
+ if syms[-2] != '%prec':
+ raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
+ (file, line))
+ precname = syms[-1]
+ prodprec = self.Precedence.get(precname)
+ if not prodprec:
+ raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
+ else:
+ self.UsedPrecedence.add(precname)
+ del syms[-2:] # Drop %prec from the rule
+ else:
+ # If no %prec, precedence is determined by the rightmost terminal symbol
+ precname = rightmost_terminal(syms, self.Terminals)
+ prodprec = self.Precedence.get(precname, ('right', 0))
+
+ # See if the rule is already in the rulemap
+ map = '%s -> %s' % (prodname, syms)
+ if map in self.Prodmap:
+ m = self.Prodmap[map]
+ raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
+ 'Previous definition at %s:%d' % (m.file, m.line))
+
+ # From this point on, everything is valid. Create a new Production instance
+ pnumber = len(self.Productions)
+ if prodname not in self.Nonterminals:
+ self.Nonterminals[prodname] = []
+
+ # Add the production number to Terminals and Nonterminals
+ for t in syms:
+ if t in self.Terminals:
+ self.Terminals[t].append(pnumber)
+ else:
+ if t not in self.Nonterminals:
+ self.Nonterminals[t] = []
+ self.Nonterminals[t].append(pnumber)
+
+ # Create a production and add it to the list of productions
+ p = Production(pnumber, prodname, syms, prodprec, func, file, line)
+ self.Productions.append(p)
+ self.Prodmap[map] = p
+
+ # Add to the global productions list
+ try:
+ self.Prodnames[prodname].append(p)
+ except KeyError:
+ self.Prodnames[prodname] = [p]
+
+ # -----------------------------------------------------------------------------
+ # set_start()
+ #
+ # Sets the starting symbol and creates the augmented grammar. Production
+ # rule 0 is S' -> start where start is the start symbol.
+ # -----------------------------------------------------------------------------
+
+ def set_start(self, start=None):
+ if not start:
+ start = self.Productions[1].name
+ if start not in self.Nonterminals:
+ raise GrammarError('start symbol %s undefined' % start)
+ self.Productions[0] = Production(0, "S'", [start])
+ self.Nonterminals[start].append(0)
+ self.Start = start
+
+ # -----------------------------------------------------------------------------
+ # find_unreachable()
+ #
+ # Find all of the nonterminal symbols that can't be reached from the starting
+ # symbol. Returns a list of nonterminals that can't be reached.
+ # -----------------------------------------------------------------------------
+
+ def find_unreachable(self):
+
+ # Mark all symbols that are reachable from a symbol s
+ def mark_reachable_from(s):
+ if s in reachable:
+ return
+ reachable.add(s)
+ for p in self.Prodnames.get(s, []):
+ for r in p.prod:
+ mark_reachable_from(r)
+
+ reachable = set()
+ mark_reachable_from(self.Productions[0].prod[0])
+ return [s for s in self.Nonterminals if s not in reachable]
+
+ # -----------------------------------------------------------------------------
+ # infinite_cycles()
+ #
+ # This function looks at the various parsing rules and tries to detect
+ # infinite recursion cycles (grammar rules where there is no possible way
+ # to derive a string of only terminals).
+ # -----------------------------------------------------------------------------
+
+ def infinite_cycles(self):
+ terminates = {}
+
+ # Terminals:
+ for t in self.Terminals:
+ terminates[t] = True
+
+ terminates['$end'] = True
+
+ # Nonterminals:
+
+ # Initialize to false:
+ for n in self.Nonterminals:
+ terminates[n] = False
+
+ # Then propagate termination until no change:
+ while True:
+ some_change = False
+ for (n, pl) in self.Prodnames.items():
+ # Nonterminal n terminates iff any of its productions terminates.
+ for p in pl:
+ # Production p terminates iff all of its rhs symbols terminate.
+ for s in p.prod:
+ if not terminates[s]:
+ # The symbol s does not terminate,
+ # so production p does not terminate.
+ p_terminates = False
+ break
+ else:
+ # didn't break from the loop,
+ # so every symbol s terminates
+ # so production p terminates.
+ p_terminates = True
+
+ if p_terminates:
+ # symbol n terminates!
+ if not terminates[n]:
+ terminates[n] = True
+ some_change = True
+ # Don't need to consider any more productions for this n.
+ break
+
+ if not some_change:
+ break
+
+ infinite = []
+ for (s, term) in terminates.items():
+ if not term:
+ if s not in self.Prodnames and s not in self.Terminals and s != 'error':
+ # s is used-but-not-defined, and we've already warned of that,
+ # so it would be overkill to say that it's also non-terminating.
+ pass
+ else:
+ infinite.append(s)
+
+ return infinite
+
+ # -----------------------------------------------------------------------------
+ # undefined_symbols()
+ #
+ # Find all symbols that were used the grammar, but not defined as tokens or
+ # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
+ # and prod is the production where the symbol was used.
+ # -----------------------------------------------------------------------------
+ def undefined_symbols(self):
+ result = []
+ for p in self.Productions:
+ if not p:
+ continue
+
+ for s in p.prod:
+ if s not in self.Prodnames and s not in self.Terminals and s != 'error':
+ result.append((s, p))
+ return result
+
+ # -----------------------------------------------------------------------------
+ # unused_terminals()
+ #
+ # Find all terminals that were defined, but not used by the grammar. Returns
+ # a list of all symbols.
+ # -----------------------------------------------------------------------------
+ def unused_terminals(self):
+ unused_tok = []
+ for s, v in self.Terminals.items():
+ if s != 'error' and not v:
+ unused_tok.append(s)
+
+ return unused_tok
+
+ # ------------------------------------------------------------------------------
+ # unused_rules()
+ #
+ # Find all grammar rules that were defined, but not used (maybe not reachable)
+ # Returns a list of productions.
+ # ------------------------------------------------------------------------------
+
+ def unused_rules(self):
+ unused_prod = []
+ for s, v in self.Nonterminals.items():
+ if not v:
+ p = self.Prodnames[s][0]
+ unused_prod.append(p)
+ return unused_prod
+
+ # -----------------------------------------------------------------------------
+ # unused_precedence()
+ #
+ # Returns a list of tuples (term,precedence) corresponding to precedence
+ # rules that were never used by the grammar. term is the name of the terminal
+ # on which precedence was applied and precedence is a string such as 'left' or
+ # 'right' corresponding to the type of precedence.
+ # -----------------------------------------------------------------------------
+
+ def unused_precedence(self):
+ unused = []
+ for termname in self.Precedence:
+ if not (termname in self.Terminals or termname in self.UsedPrecedence):
+ unused.append((termname, self.Precedence[termname][0]))
+
+ return unused
+
+ # -------------------------------------------------------------------------
+ # _first()
+ #
+ # Compute the value of FIRST1(beta) where beta is a tuple of symbols.
+ #
+ # During execution of compute_first1, the result may be incomplete.
+ # Afterward (e.g., when called from compute_follow()), it will be complete.
+ # -------------------------------------------------------------------------
+ def _first(self, beta):
+
+ # We are computing First(x1,x2,x3,...,xn)
+ result = []
+ for x in beta:
+ x_produces_empty = False
+
+ # Add all the non-<empty> symbols of First[x] to the result.
+ for f in self.First[x]:
+ if f == '<empty>':
+ x_produces_empty = True
+ else:
+ if f not in result:
+ result.append(f)
+
+ if x_produces_empty:
+ # We have to consider the next x in beta,
+ # i.e. stay in the loop.
+ pass
+ else:
+ # We don't have to consider any further symbols in beta.
+ break
+ else:
+ # There was no 'break' from the loop,
+ # so x_produces_empty was true for all x in beta,
+ # so beta produces empty as well.
+ result.append('<empty>')
+
+ return result
+
+ # -------------------------------------------------------------------------
+ # compute_first()
+ #
+ # Compute the value of FIRST1(X) for all symbols
+ # -------------------------------------------------------------------------
+ def compute_first(self):
+ if self.First:
+ return self.First
+
+ # Terminals:
+ for t in self.Terminals:
+ self.First[t] = [t]
+
+ self.First['$end'] = ['$end']
+
+ # Nonterminals:
+
+ # Initialize to the empty set:
+ for n in self.Nonterminals:
+ self.First[n] = []
+
+ # Then propagate symbols until no change:
+ while True:
+ some_change = False
+ for n in self.Nonterminals:
+ for p in self.Prodnames[n]:
+ for f in self._first(p.prod):
+ if f not in self.First[n]:
+ self.First[n].append(f)
+ some_change = True
+ if not some_change:
+ break
+
+ return self.First
+
+ # ---------------------------------------------------------------------
+ # compute_follow()
+ #
+ # Computes all of the follow sets for every non-terminal symbol. The
+ # follow set is the set of all symbols that might follow a given
+ # non-terminal. See the Dragon book, 2nd Ed. p. 189.
+ # ---------------------------------------------------------------------
+ def compute_follow(self, start=None):
+ # If already computed, return the result
+ if self.Follow:
+ return self.Follow
+
+ # If first sets not computed yet, do that first.
+ if not self.First:
+ self.compute_first()
+
+ # Add '$end' to the follow list of the start symbol
+ for k in self.Nonterminals:
+ self.Follow[k] = []
+
+ if not start:
+ start = self.Productions[1].name
+
+ self.Follow[start] = ['$end']
+
+ while True:
+ didadd = False
+ for p in self.Productions[1:]:
+ # Here is the production set
+ for i, B in enumerate(p.prod):
+ if B in self.Nonterminals:
+ # Okay. We got a non-terminal in a production
+ fst = self._first(p.prod[i+1:])
+ hasempty = False
+ for f in fst:
+ if f != '<empty>' and f not in self.Follow[B]:
+ self.Follow[B].append(f)
+ didadd = True
+ if f == '<empty>':
+ hasempty = True
+ if hasempty or i == (len(p.prod)-1):
+ # Add elements of follow(a) to follow(b)
+ for f in self.Follow[p.name]:
+ if f not in self.Follow[B]:
+ self.Follow[B].append(f)
+ didadd = True
+ if not didadd:
+ break
+ return self.Follow
+
+
+ # -----------------------------------------------------------------------------
+ # build_lritems()
+ #
+ # This function walks the list of productions and builds a complete set of the
+ # LR items. The LR items are stored in two ways: First, they are uniquely
+ # numbered and placed in the list _lritems. Second, a linked list of LR items
+ # is built for each production. For example:
+ #
+ # E -> E PLUS E
+ #
+ # Creates the list
+ #
+ # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
+ # -----------------------------------------------------------------------------
+
+ def build_lritems(self):
+ for p in self.Productions:
+ lastlri = p
+ i = 0
+ lr_items = []
+ while True:
+ if i > len(p):
+ lri = None
+ else:
+ lri = LRItem(p, i)
+ # Precompute the list of productions immediately following
+ try:
+ lri.lr_after = self.Prodnames[lri.prod[i+1]]
+ except (IndexError, KeyError):
+ lri.lr_after = []
+ try:
+ lri.lr_before = lri.prod[i-1]
+ except IndexError:
+ lri.lr_before = None
+
+ lastlri.lr_next = lri
+ if not lri:
+ break
+ lr_items.append(lri)
+ lastlri = lri
+ i += 1
+ p.lr_items = lr_items
+
+# -----------------------------------------------------------------------------
+# == Class LRTable ==
+#
+# This basic class represents a basic table of LR parsing information.
+# Methods for generating the tables are not defined here. They are defined
+# in the derived class LRGeneratedTable.
+# -----------------------------------------------------------------------------
+
+class VersionError(YaccError):
+ pass
+
+class LRTable(object):
+ def __init__(self):
+ self.lr_action = None
+ self.lr_goto = None
+ self.lr_productions = None
+ self.lr_method = None
+
+ def read_table(self, module):
+ if isinstance(module, types.ModuleType):
+ parsetab = module
+ else:
+ exec('import %s' % module)
+ parsetab = sys.modules[module]
+
+ if parsetab._tabversion != __tabversion__:
+ raise VersionError('yacc table file version is out of date')
+
+ self.lr_action = parsetab._lr_action
+ self.lr_goto = parsetab._lr_goto
+
+ self.lr_productions = []
+ for p in parsetab._lr_productions:
+ self.lr_productions.append(MiniProduction(*p))
+
+ self.lr_method = parsetab._lr_method
+ return parsetab._lr_signature
+
+ def read_pickle(self, filename):
+ try:
+ import cPickle as pickle
+ except ImportError:
+ import pickle
+
+ if not os.path.exists(filename):
+ raise ImportError
+
+ in_f = open(filename, 'rb')
+
+ tabversion = pickle.load(in_f)
+ if tabversion != __tabversion__:
+ raise VersionError('yacc table file version is out of date')
+ self.lr_method = pickle.load(in_f)
+ signature = pickle.load(in_f)
+ self.lr_action = pickle.load(in_f)
+ self.lr_goto = pickle.load(in_f)
+ productions = pickle.load(in_f)
+
+ self.lr_productions = []
+ for p in productions:
+ self.lr_productions.append(MiniProduction(*p))
+
+ in_f.close()
+ return signature
+
+ # Bind all production function names to callable objects in pdict
+ def bind_callables(self, pdict):
+ for p in self.lr_productions:
+ p.bind(pdict)
+
+
+# -----------------------------------------------------------------------------
+# === LR Generator ===
+#
+# The following classes and functions are used to generate LR parsing tables on
+# a grammar.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# digraph()
+# traverse()
+#
+# The following two functions are used to compute set valued functions
+# of the form:
+#
+# F(x) = F'(x) U U{F(y) | x R y}
+#
+# This is used to compute the values of Read() sets as well as FOLLOW sets
+# in LALR(1) generation.
+#
+# Inputs: X - An input set
+# R - A relation
+# FP - Set-valued function
+# ------------------------------------------------------------------------------
+
+def digraph(X, R, FP):
+ N = {}
+ for x in X:
+ N[x] = 0
+ stack = []
+ F = {}
+ for x in X:
+ if N[x] == 0:
+ traverse(x, N, stack, F, X, R, FP)
+ return F
+
+def traverse(x, N, stack, F, X, R, FP):
+ stack.append(x)
+ d = len(stack)
+ N[x] = d
+ F[x] = FP(x) # F(X) <- F'(x)
+
+ rel = R(x) # Get y's related to x
+ for y in rel:
+ if N[y] == 0:
+ traverse(y, N, stack, F, X, R, FP)
+ N[x] = min(N[x], N[y])
+ for a in F.get(y, []):
+ if a not in F[x]:
+ F[x].append(a)
+ if N[x] == d:
+ N[stack[-1]] = MAXINT
+ F[stack[-1]] = F[x]
+ element = stack.pop()
+ while element != x:
+ N[stack[-1]] = MAXINT
+ F[stack[-1]] = F[x]
+ element = stack.pop()
+
+class LALRError(YaccError):
+ pass
+
+# -----------------------------------------------------------------------------
+# == LRGeneratedTable ==
+#
+# This class implements the LR table generation algorithm. There are no
+# public methods except for write()
+# -----------------------------------------------------------------------------
+
+class LRGeneratedTable(LRTable):
+ def __init__(self, grammar, method='LALR', log=None):
+ if method not in ['SLR', 'LALR']:
+ raise LALRError('Unsupported method %s' % method)
+
+ self.grammar = grammar
+ self.lr_method = method
+
+ # Set up the logger
+ if not log:
+ log = NullLogger()
+ self.log = log
+
+ # Internal attributes
+ self.lr_action = {} # Action table
+ self.lr_goto = {} # Goto table
+ self.lr_productions = grammar.Productions # Copy of grammar Production array
+ self.lr_goto_cache = {} # Cache of computed gotos
+ self.lr0_cidhash = {} # Cache of closures
+
+ self._add_count = 0 # Internal counter used to detect cycles
+
+ # Diagonistic information filled in by the table generator
+ self.sr_conflict = 0
+ self.rr_conflict = 0
+ self.conflicts = [] # List of conflicts
+
+ self.sr_conflicts = []
+ self.rr_conflicts = []
+
+ # Build the tables
+ self.grammar.build_lritems()
+ self.grammar.compute_first()
+ self.grammar.compute_follow()
+ self.lr_parse_table()
+
+ # Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
+
+ def lr0_closure(self, I):
+ self._add_count += 1
+
+ # Add everything in I to J
+ J = I[:]
+ didadd = True
+ while didadd:
+ didadd = False
+ for j in J:
+ for x in j.lr_after:
+ if getattr(x, 'lr0_added', 0) == self._add_count:
+ continue
+ # Add B --> .G to J
+ J.append(x.lr_next)
+ x.lr0_added = self._add_count
+ didadd = True
+
+ return J
+
+ # Compute the LR(0) goto function goto(I,X) where I is a set
+ # of LR(0) items and X is a grammar symbol. This function is written
+ # in a way that guarantees uniqueness of the generated goto sets
+ # (i.e. the same goto set will never be returned as two different Python
+ # objects). With uniqueness, we can later do fast set comparisons using
+ # id(obj) instead of element-wise comparison.
+
+ def lr0_goto(self, I, x):
+ # First we look for a previously cached entry
+ g = self.lr_goto_cache.get((id(I), x))
+ if g:
+ return g
+
+ # Now we generate the goto set in a way that guarantees uniqueness
+ # of the result
+
+ s = self.lr_goto_cache.get(x)
+ if not s:
+ s = {}
+ self.lr_goto_cache[x] = s
+
+ gs = []
+ for p in I:
+ n = p.lr_next
+ if n and n.lr_before == x:
+ s1 = s.get(id(n))
+ if not s1:
+ s1 = {}
+ s[id(n)] = s1
+ gs.append(n)
+ s = s1
+ g = s.get('$end')
+ if not g:
+ if gs:
+ g = self.lr0_closure(gs)
+ s['$end'] = g
+ else:
+ s['$end'] = gs
+ self.lr_goto_cache[(id(I), x)] = g
+ return g
+
+ # Compute the LR(0) sets of item function
+ def lr0_items(self):
+ C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
+ i = 0
+ for I in C:
+ self.lr0_cidhash[id(I)] = i
+ i += 1
+
+ # Loop over the items in C and each grammar symbols
+ i = 0
+ while i < len(C):
+ I = C[i]
+ i += 1
+
+ # Collect all of the symbols that could possibly be in the goto(I,X) sets
+ asyms = {}
+ for ii in I:
+ for s in ii.usyms:
+ asyms[s] = None
+
+ for x in asyms:
+ g = self.lr0_goto(I, x)
+ if not g or id(g) in self.lr0_cidhash:
+ continue
+ self.lr0_cidhash[id(g)] = len(C)
+ C.append(g)
+
+ return C
+
+ # -----------------------------------------------------------------------------
+ # ==== LALR(1) Parsing ====
+ #
+ # LALR(1) parsing is almost exactly the same as SLR except that instead of
+ # relying upon Follow() sets when performing reductions, a more selective
+ # lookahead set that incorporates the state of the LR(0) machine is utilized.
+ # Thus, we mainly just have to focus on calculating the lookahead sets.
+ #
+ # The method used here is due to DeRemer and Pennelo (1982).
+ #
+ # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
+ # Lookahead Sets", ACM Transactions on Programming Languages and Systems,
+ # Vol. 4, No. 4, Oct. 1982, pp. 615-649
+ #
+ # Further details can also be found in:
+ #
+ # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
+ # McGraw-Hill Book Company, (1985).
+ #
+ # -----------------------------------------------------------------------------
+
+ # -----------------------------------------------------------------------------
+ # compute_nullable_nonterminals()
+ #
+ # Creates a dictionary containing all of the non-terminals that might produce
+ # an empty production.
+ # -----------------------------------------------------------------------------
+
+ def compute_nullable_nonterminals(self):
+ nullable = set()
+ num_nullable = 0
+ while True:
+ for p in self.grammar.Productions[1:]:
+ if p.len == 0:
+ nullable.add(p.name)
+ continue
+ for t in p.prod:
+ if t not in nullable:
+ break
+ else:
+ nullable.add(p.name)
+ if len(nullable) == num_nullable:
+ break
+ num_nullable = len(nullable)
+ return nullable
+
+ # -----------------------------------------------------------------------------
+ # find_nonterminal_trans(C)
+ #
+ # Given a set of LR(0) items, this functions finds all of the non-terminal
+ # transitions. These are transitions in which a dot appears immediately before
+ # a non-terminal. Returns a list of tuples of the form (state,N) where state
+ # is the state number and N is the nonterminal symbol.
+ #
+ # The input C is the set of LR(0) items.
+ # -----------------------------------------------------------------------------
+
+ def find_nonterminal_transitions(self, C):
+ trans = []
+ for stateno, state in enumerate(C):
+ for p in state:
+ if p.lr_index < p.len - 1:
+ t = (stateno, p.prod[p.lr_index+1])
+ if t[1] in self.grammar.Nonterminals:
+ if t not in trans:
+ trans.append(t)
+ return trans
+
+ # -----------------------------------------------------------------------------
+ # dr_relation()
+ #
+ # Computes the DR(p,A) relationships for non-terminal transitions. The input
+ # is a tuple (state,N) where state is a number and N is a nonterminal symbol.
+ #
+ # Returns a list of terminals.
+ # -----------------------------------------------------------------------------
+
+ def dr_relation(self, C, trans, nullable):
+ dr_set = {}
+ state, N = trans
+ terms = []
+
+ g = self.lr0_goto(C[state], N)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index+1]
+ if a in self.grammar.Terminals:
+ if a not in terms:
+ terms.append(a)
+
+ # This extra bit is to handle the start state
+ if state == 0 and N == self.grammar.Productions[0].prod[0]:
+ terms.append('$end')
+
+ return terms
+
+ # -----------------------------------------------------------------------------
+ # reads_relation()
+ #
+ # Computes the READS() relation (p,A) READS (t,C).
+ # -----------------------------------------------------------------------------
+
+ def reads_relation(self, C, trans, empty):
+ # Look for empty transitions
+ rel = []
+ state, N = trans
+
+ g = self.lr0_goto(C[state], N)
+ j = self.lr0_cidhash.get(id(g), -1)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index + 1]
+ if a in empty:
+ rel.append((j, a))
+
+ return rel
+
+ # -----------------------------------------------------------------------------
+ # compute_lookback_includes()
+ #
+ # Determines the lookback and includes relations
+ #
+ # LOOKBACK:
+ #
+ # This relation is determined by running the LR(0) state machine forward.
+ # For example, starting with a production "N : . A B C", we run it forward
+ # to obtain "N : A B C ." We then build a relationship between this final
+ # state and the starting state. These relationships are stored in a dictionary
+ # lookdict.
+ #
+ # INCLUDES:
+ #
+ # Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
+ #
+ # This relation is used to determine non-terminal transitions that occur
+ # inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
+ # if the following holds:
+ #
+ # B -> LAT, where T -> epsilon and p' -L-> p
+ #
+ # L is essentially a prefix (which may be empty), T is a suffix that must be
+ # able to derive an empty string. State p' must lead to state p with the string L.
+ #
+ # -----------------------------------------------------------------------------
+
+ def compute_lookback_includes(self, C, trans, nullable):
+ lookdict = {} # Dictionary of lookback relations
+ includedict = {} # Dictionary of include relations
+
+ # Make a dictionary of non-terminal transitions
+ dtrans = {}
+ for t in trans:
+ dtrans[t] = 1
+
+ # Loop over all transitions and compute lookbacks and includes
+ for state, N in trans:
+ lookb = []
+ includes = []
+ for p in C[state]:
+ if p.name != N:
+ continue
+
+ # Okay, we have a name match. We now follow the production all the way
+ # through the state machine until we get the . on the right hand side
+
+ lr_index = p.lr_index
+ j = state
+ while lr_index < p.len - 1:
+ lr_index = lr_index + 1
+ t = p.prod[lr_index]
+
+ # Check to see if this symbol and state are a non-terminal transition
+ if (j, t) in dtrans:
+ # Yes. Okay, there is some chance that this is an includes relation
+ # the only way to know for certain is whether the rest of the
+ # production derives empty
+
+ li = lr_index + 1
+ while li < p.len:
+ if p.prod[li] in self.grammar.Terminals:
+ break # No forget it
+ if p.prod[li] not in nullable:
+ break
+ li = li + 1
+ else:
+ # Appears to be a relation between (j,t) and (state,N)
+ includes.append((j, t))
+
+ g = self.lr0_goto(C[j], t) # Go to next set
+ j = self.lr0_cidhash.get(id(g), -1) # Go to next state
+
+ # When we get here, j is the final state, now we have to locate the production
+ for r in C[j]:
+ if r.name != p.name:
+ continue
+ if r.len != p.len:
+ continue
+ i = 0
+ # This look is comparing a production ". A B C" with "A B C ."
+ while i < r.lr_index:
+ if r.prod[i] != p.prod[i+1]:
+ break
+ i = i + 1
+ else:
+ lookb.append((j, r))
+ for i in includes:
+ if i not in includedict:
+ includedict[i] = []
+ includedict[i].append((state, N))
+ lookdict[(state, N)] = lookb
+
+ return lookdict, includedict
+
+ # -----------------------------------------------------------------------------
+ # compute_read_sets()
+ #
+ # Given a set of LR(0) items, this function computes the read sets.
+ #
+ # Inputs: C = Set of LR(0) items
+ # ntrans = Set of nonterminal transitions
+ # nullable = Set of empty transitions
+ #
+ # Returns a set containing the read sets
+ # -----------------------------------------------------------------------------
+
+ def compute_read_sets(self, C, ntrans, nullable):
+ FP = lambda x: self.dr_relation(C, x, nullable)
+ R = lambda x: self.reads_relation(C, x, nullable)
+ F = digraph(ntrans, R, FP)
+ return F
+
+ # -----------------------------------------------------------------------------
+ # compute_follow_sets()
+ #
+ # Given a set of LR(0) items, a set of non-terminal transitions, a readset,
+ # and an include set, this function computes the follow sets
+ #
+ # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
+ #
+ # Inputs:
+ # ntrans = Set of nonterminal transitions
+ # readsets = Readset (previously computed)
+ # inclsets = Include sets (previously computed)
+ #
+ # Returns a set containing the follow sets
+ # -----------------------------------------------------------------------------
+
+ def compute_follow_sets(self, ntrans, readsets, inclsets):
+ FP = lambda x: readsets[x]
+ R = lambda x: inclsets.get(x, [])
+ F = digraph(ntrans, R, FP)
+ return F
+
+ # -----------------------------------------------------------------------------
+ # add_lookaheads()
+ #
+ # Attaches the lookahead symbols to grammar rules.
+ #
+ # Inputs: lookbacks - Set of lookback relations
+ # followset - Computed follow set
+ #
+ # This function directly attaches the lookaheads to productions contained
+ # in the lookbacks set
+ # -----------------------------------------------------------------------------
+
+ def add_lookaheads(self, lookbacks, followset):
+ for trans, lb in lookbacks.items():
+ # Loop over productions in lookback
+ for state, p in lb:
+ if state not in p.lookaheads:
+ p.lookaheads[state] = []
+ f = followset.get(trans, [])
+ for a in f:
+ if a not in p.lookaheads[state]:
+ p.lookaheads[state].append(a)
+
+ # -----------------------------------------------------------------------------
+ # add_lalr_lookaheads()
+ #
+ # This function does all of the work of adding lookahead information for use
+ # with LALR parsing
+ # -----------------------------------------------------------------------------
+
+ def add_lalr_lookaheads(self, C):
+ # Determine all of the nullable nonterminals
+ nullable = self.compute_nullable_nonterminals()
+
+ # Find all non-terminal transitions
+ trans = self.find_nonterminal_transitions(C)
+
+ # Compute read sets
+ readsets = self.compute_read_sets(C, trans, nullable)
+
+ # Compute lookback/includes relations
+ lookd, included = self.compute_lookback_includes(C, trans, nullable)
+
+ # Compute LALR FOLLOW sets
+ followsets = self.compute_follow_sets(trans, readsets, included)
+
+ # Add all of the lookaheads
+ self.add_lookaheads(lookd, followsets)
+
+ # -----------------------------------------------------------------------------
+ # lr_parse_table()
+ #
+ # This function constructs the parse tables for SLR or LALR
+ # -----------------------------------------------------------------------------
+ def lr_parse_table(self):
+ Productions = self.grammar.Productions
+ Precedence = self.grammar.Precedence
+ goto = self.lr_goto # Goto array
+ action = self.lr_action # Action array
+ log = self.log # Logger for output
+
+ actionp = {} # Action production array (temporary)
+
+ log.info('Parsing method: %s', self.lr_method)
+
+ # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
+ # This determines the number of states
+
+ C = self.lr0_items()
+
+ if self.lr_method == 'LALR':
+ self.add_lalr_lookaheads(C)
+
+ # Build the parser table, state by state
+ st = 0
+ for I in C:
+ # Loop over each production in I
+ actlist = [] # List of actions
+ st_action = {}
+ st_actionp = {}
+ st_goto = {}
+ log.info('')
+ log.info('state %d', st)
+ log.info('')
+ for p in I:
+ log.info(' (%d) %s', p.number, p)
+ log.info('')
+
+ for p in I:
+ if p.len == p.lr_index + 1:
+ if p.name == "S'":
+ # Start symbol. Accept!
+ st_action['$end'] = 0
+ st_actionp['$end'] = p
+ else:
+ # We are at the end of a production. Reduce!
+ if self.lr_method == 'LALR':
+ laheads = p.lookaheads[st]
+ else:
+ laheads = self.grammar.Follow[p.name]
+ for a in laheads:
+ actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
+ r = st_action.get(a)
+ if r is not None:
+ # Whoa. Have a shift/reduce or reduce/reduce conflict
+ if r > 0:
+ # Need to decide on shift or reduce here
+ # By default we favor shifting. Need to add
+ # some precedence rules here.
+ sprec, slevel = Productions[st_actionp[a].number].prec
+ rprec, rlevel = Precedence.get(a, ('right', 0))
+ if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
+ # We really need to reduce here.
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ if not slevel and not rlevel:
+ log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
+ self.sr_conflicts.append((st, a, 'reduce'))
+ Productions[p.number].reduced += 1
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ st_action[a] = None
+ else:
+ # Hmmm. Guess we'll keep the shift
+ if not rlevel:
+ log.info(' ! shift/reduce conflict for %s resolved as shift', a)
+ self.sr_conflicts.append((st, a, 'shift'))
+ elif r < 0:
+ # Reduce/reduce conflict. In this case, we favor the rule
+ # that was defined first in the grammar file
+ oldp = Productions[-r]
+ pp = Productions[p.number]
+ if oldp.line > pp.line:
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ chosenp, rejectp = pp, oldp
+ Productions[p.number].reduced += 1
+ Productions[oldp.number].reduced -= 1
+ else:
+ chosenp, rejectp = oldp, pp
+ self.rr_conflicts.append((st, chosenp, rejectp))
+ log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
+ a, st_actionp[a].number, st_actionp[a])
+ else:
+ raise LALRError('Unknown conflict in state %d' % st)
+ else:
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ Productions[p.number].reduced += 1
+ else:
+ i = p.lr_index
+ a = p.prod[i+1] # Get symbol right after the "."
+ if a in self.grammar.Terminals:
+ g = self.lr0_goto(I, a)
+ j = self.lr0_cidhash.get(id(g), -1)
+ if j >= 0:
+ # We are in a shift state
+ actlist.append((a, p, 'shift and go to state %d' % j))
+ r = st_action.get(a)
+ if r is not None:
+ # Whoa have a shift/reduce or shift/shift conflict
+ if r > 0:
+ if r != j:
+ raise LALRError('Shift/shift conflict in state %d' % st)
+ elif r < 0:
+ # Do a precedence check.
+ # - if precedence of reduce rule is higher, we reduce.
+ # - if precedence of reduce is same and left assoc, we reduce.
+ # - otherwise we shift
+ rprec, rlevel = Productions[st_actionp[a].number].prec
+ sprec, slevel = Precedence.get(a, ('right', 0))
+ if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
+ # We decide to shift here... highest precedence to shift
+ Productions[st_actionp[a].number].reduced -= 1
+ st_action[a] = j
+ st_actionp[a] = p
+ if not rlevel:
+ log.info(' ! shift/reduce conflict for %s resolved as shift', a)
+ self.sr_conflicts.append((st, a, 'shift'))
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ st_action[a] = None
+ else:
+ # Hmmm. Guess we'll keep the reduce
+ if not slevel and not rlevel:
+ log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
+ self.sr_conflicts.append((st, a, 'reduce'))
+
+ else:
+ raise LALRError('Unknown conflict in state %d' % st)
+ else:
+ st_action[a] = j
+ st_actionp[a] = p
+
+ # Print the actions associated with each terminal
+ _actprint = {}
+ for a, p, m in actlist:
+ if a in st_action:
+ if p is st_actionp[a]:
+ log.info(' %-15s %s', a, m)
+ _actprint[(a, m)] = 1
+ log.info('')
+ # Print the actions that were not used. (debugging)
+ not_used = 0
+ for a, p, m in actlist:
+ if a in st_action:
+ if p is not st_actionp[a]:
+ if not (a, m) in _actprint:
+ log.debug(' ! %-15s [ %s ]', a, m)
+ not_used = 1
+ _actprint[(a, m)] = 1
+ if not_used:
+ log.debug('')
+
+ # Construct the goto table for this state
+
+ nkeys = {}
+ for ii in I:
+ for s in ii.usyms:
+ if s in self.grammar.Nonterminals:
+ nkeys[s] = None
+ for n in nkeys:
+ g = self.lr0_goto(I, n)
+ j = self.lr0_cidhash.get(id(g), -1)
+ if j >= 0:
+ st_goto[n] = j
+ log.info(' %-30s shift and go to state %d', n, j)
+
+ action[st] = st_action
+ actionp[st] = st_actionp
+ goto[st] = st_goto
+ st += 1
+
+ # -----------------------------------------------------------------------------
+ # write()
+ #
+ # This function writes the LR parsing tables to a file
+ # -----------------------------------------------------------------------------
+
+ def write_table(self, tabmodule, outputdir='', signature=''):
+ if isinstance(tabmodule, types.ModuleType):
+ raise IOError("Won't overwrite existing tabmodule")
+
+ basemodulename = tabmodule.split('.')[-1]
+ filename = os.path.join(outputdir, basemodulename) + '.py'
+ try:
+ f = open(filename, 'w')
+
+ f.write('''
+# %s
+# This file is automatically generated. Do not edit.
+_tabversion = %r
+
+_lr_method = %r
+
+_lr_signature = %r
+ ''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
+
+ # Change smaller to 0 to go back to original tables
+ smaller = 1
+
+ # Factor out names to try and make smaller
+ if smaller:
+ items = {}
+
+ for s, nd in self.lr_action.items():
+ for name, v in nd.items():
+ i = items.get(name)
+ if not i:
+ i = ([], [])
+ items[name] = i
+ i[0].append(s)
+ i[1].append(v)
+
+ f.write('\n_lr_action_items = {')
+ for k, v in items.items():
+ f.write('%r:([' % k)
+ for i in v[0]:
+ f.write('%r,' % i)
+ f.write('],[')
+ for i in v[1]:
+ f.write('%r,' % i)
+
+ f.write(']),')
+ f.write('}\n')
+
+ f.write('''
+_lr_action = {}
+for _k, _v in _lr_action_items.items():
+ for _x,_y in zip(_v[0],_v[1]):
+ if not _x in _lr_action: _lr_action[_x] = {}
+ _lr_action[_x][_k] = _y
+del _lr_action_items
+''')
+
+ else:
+ f.write('\n_lr_action = { ')
+ for k, v in self.lr_action.items():
+ f.write('(%r,%r):%r,' % (k[0], k[1], v))
+ f.write('}\n')
+
+ if smaller:
+ # Factor out names to try and make smaller
+ items = {}
+
+ for s, nd in self.lr_goto.items():
+ for name, v in nd.items():
+ i = items.get(name)
+ if not i:
+ i = ([], [])
+ items[name] = i
+ i[0].append(s)
+ i[1].append(v)
+
+ f.write('\n_lr_goto_items = {')
+ for k, v in items.items():
+ f.write('%r:([' % k)
+ for i in v[0]:
+ f.write('%r,' % i)
+ f.write('],[')
+ for i in v[1]:
+ f.write('%r,' % i)
+
+ f.write(']),')
+ f.write('}\n')
+
+ f.write('''
+_lr_goto = {}
+for _k, _v in _lr_goto_items.items():
+ for _x, _y in zip(_v[0], _v[1]):
+ if not _x in _lr_goto: _lr_goto[_x] = {}
+ _lr_goto[_x][_k] = _y
+del _lr_goto_items
+''')
+ else:
+ f.write('\n_lr_goto = { ')
+ for k, v in self.lr_goto.items():
+ f.write('(%r,%r):%r,' % (k[0], k[1], v))
+ f.write('}\n')
+
+ # Write production table
+ f.write('_lr_productions = [\n')
+ for p in self.lr_productions:
+ if p.func:
+ f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
+ p.func, os.path.basename(p.file), p.line))
+ else:
+ f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
+ f.write(']\n')
+ f.close()
+
+ except IOError as e:
+ raise
+
+
+ # -----------------------------------------------------------------------------
+ # pickle_table()
+ #
+ # This function pickles the LR parsing tables to a supplied file object
+ # -----------------------------------------------------------------------------
+
+ def pickle_table(self, filename, signature=''):
+ try:
+ import cPickle as pickle
+ except ImportError:
+ import pickle
+ with open(filename, 'wb') as outf:
+ pickle.dump(__tabversion__, outf, pickle_protocol)
+ pickle.dump(self.lr_method, outf, pickle_protocol)
+ pickle.dump(signature, outf, pickle_protocol)
+ pickle.dump(self.lr_action, outf, pickle_protocol)
+ pickle.dump(self.lr_goto, outf, pickle_protocol)
+
+ outp = []
+ for p in self.lr_productions:
+ if p.func:
+ outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
+ else:
+ outp.append((str(p), p.name, p.len, None, None, None))
+ pickle.dump(outp, outf, pickle_protocol)
+
+# -----------------------------------------------------------------------------
+# === INTROSPECTION ===
+#
+# The following functions and classes are used to implement the PLY
+# introspection features followed by the yacc() function itself.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack. This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
+
+def get_caller_module_dict(levels):
+ f = sys._getframe(levels)
+ ldict = f.f_globals.copy()
+ if f.f_globals != f.f_locals:
+ ldict.update(f.f_locals)
+ return ldict
+
+# -----------------------------------------------------------------------------
+# parse_grammar()
+#
+# This takes a raw grammar rule string and parses it into production data
+# -----------------------------------------------------------------------------
+def parse_grammar(doc, file, line):
+ grammar = []
+ # Split the doc string into lines
+ pstrings = doc.splitlines()
+ lastp = None
+ dline = line
+ for ps in pstrings:
+ dline += 1
+ p = ps.split()
+ if not p:
+ continue
+ try:
+ if p[0] == '|':
+ # This is a continuation of a previous rule
+ if not lastp:
+ raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
+ prodname = lastp
+ syms = p[1:]
+ else:
+ prodname = p[0]
+ lastp = prodname
+ syms = p[2:]
+ assign = p[1]
+ if assign != ':' and assign != '::=':
+ raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
+
+ grammar.append((file, dline, prodname, syms))
+ except SyntaxError:
+ raise
+ except Exception:
+ raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
+
+ return grammar
+
+# -----------------------------------------------------------------------------
+# ParserReflect()
+#
+# This class represents information extracted for building a parser including
+# start symbol, error function, tokens, precedence list, action functions,
+# etc.
+# -----------------------------------------------------------------------------
+class ParserReflect(object):
+ def __init__(self, pdict, log=None):
+ self.pdict = pdict
+ self.start = None
+ self.error_func = None
+ self.tokens = None
+ self.modules = set()
+ self.grammar = []
+ self.error = False
+
+ if log is None:
+ self.log = PlyLogger(sys.stderr)
+ else:
+ self.log = log
+
+ # Get all of the basic information
+ def get_all(self):
+ self.get_start()
+ self.get_error_func()
+ self.get_tokens()
+ self.get_precedence()
+ self.get_pfunctions()
+
+ # Validate all of the information
+ def validate_all(self):
+ self.validate_start()
+ self.validate_error_func()
+ self.validate_tokens()
+ self.validate_precedence()
+ self.validate_pfunctions()
+ self.validate_modules()
+ return self.error
+
+ # Compute a signature over the grammar
+ def signature(self):
+ try:
+ from hashlib import md5
+ except ImportError:
+ from md5 import md5
+ try:
+ sig = md5()
+ if self.start:
+ sig.update(self.start.encode('latin-1'))
+ if self.prec:
+ sig.update(''.join([''.join(p) for p in self.prec]).encode('latin-1'))
+ if self.tokens:
+ sig.update(' '.join(self.tokens).encode('latin-1'))
+ for f in self.pfuncs:
+ if f[3]:
+ sig.update(f[3].encode('latin-1'))
+ except (TypeError, ValueError):
+ pass
+
+ digest = base64.b16encode(sig.digest())
+ if sys.version_info[0] >= 3:
+ digest = digest.decode('latin-1')
+ return digest
+
+ # -----------------------------------------------------------------------------
+ # validate_modules()
+ #
+ # This method checks to see if there are duplicated p_rulename() functions
+ # in the parser module file. Without this function, it is really easy for
+ # users to make mistakes by cutting and pasting code fragments (and it's a real
+ # bugger to try and figure out why the resulting parser doesn't work). Therefore,
+ # we just do a little regular expression pattern matching of def statements
+ # to try and detect duplicates.
+ # -----------------------------------------------------------------------------
+
+ def validate_modules(self):
+ # Match def p_funcname(
+ fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
+
+ for module in self.modules:
+ lines, linen = inspect.getsourcelines(module)
+
+ counthash = {}
+ for linen, line in enumerate(lines):
+ linen += 1
+ m = fre.match(line)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ filename = inspect.getsourcefile(module)
+ self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
+ filename, linen, name, prev)
+
+ # Get the start symbol
+ def get_start(self):
+ self.start = self.pdict.get('start')
+
+ # Validate the start symbol
+ def validate_start(self):
+ if self.start is not None:
+ if not isinstance(self.start, string_types):
+ self.log.error("'start' must be a string")
+
+ # Look for error handler
+ def get_error_func(self):
+ self.error_func = self.pdict.get('p_error')
+
+ # Validate the error function
+ def validate_error_func(self):
+ if self.error_func:
+ if isinstance(self.error_func, types.FunctionType):
+ ismethod = 0
+ elif isinstance(self.error_func, types.MethodType):
+ ismethod = 1
+ else:
+ self.log.error("'p_error' defined, but is not a function or method")
+ self.error = True
+ return
+
+ eline = self.error_func.__code__.co_firstlineno
+ efile = self.error_func.__code__.co_filename
+ module = inspect.getmodule(self.error_func)
+ self.modules.add(module)
+
+ argcount = self.error_func.__code__.co_argcount - ismethod
+ if argcount != 1:
+ self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
+ self.error = True
+
+ # Get the tokens map
+ def get_tokens(self):
+ tokens = self.pdict.get('tokens')
+ if not tokens:
+ self.log.error('No token list is defined')
+ self.error = True
+ return
+
+ if not isinstance(tokens, (list, tuple)):
+ self.log.error('tokens must be a list or tuple')
+ self.error = True
+ return
+
+ if not tokens:
+ self.log.error('tokens is empty')
+ self.error = True
+ return
+
+ self.tokens = tokens
+
+ # Validate the tokens
+ def validate_tokens(self):
+ # Validate the tokens.
+ if 'error' in self.tokens:
+ self.log.error("Illegal token name 'error'. Is a reserved word")
+ self.error = True
+ return
+
+ terminals = set()
+ for n in self.tokens:
+ if n in terminals:
+ self.log.warning('Token %r multiply defined', n)
+ terminals.add(n)
+
+ # Get the precedence map (if any)
+ def get_precedence(self):
+ self.prec = self.pdict.get('precedence')
+
+ # Validate and parse the precedence map
+ def validate_precedence(self):
+ preclist = []
+ if self.prec:
+ if not isinstance(self.prec, (list, tuple)):
+ self.log.error('precedence must be a list or tuple')
+ self.error = True
+ return
+ for level, p in enumerate(self.prec):
+ if not isinstance(p, (list, tuple)):
+ self.log.error('Bad precedence table')
+ self.error = True
+ return
+
+ if len(p) < 2:
+ self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
+ self.error = True
+ return
+ assoc = p[0]
+ if not isinstance(assoc, string_types):
+ self.log.error('precedence associativity must be a string')
+ self.error = True
+ return
+ for term in p[1:]:
+ if not isinstance(term, string_types):
+ self.log.error('precedence items must be strings')
+ self.error = True
+ return
+ preclist.append((term, assoc, level+1))
+ self.preclist = preclist
+
+ # Get all p_functions from the grammar
+ def get_pfunctions(self):
+ p_functions = []
+ for name, item in self.pdict.items():
+ if not name.startswith('p_') or name == 'p_error':
+ continue
+ if isinstance(item, (types.FunctionType, types.MethodType)):
+ line = item.__code__.co_firstlineno
+ module = inspect.getmodule(item)
+ p_functions.append((line, module, name, item.__doc__))
+
+ # Sort all of the actions by line number; make sure to stringify
+ # modules to make them sortable, since `line` may not uniquely sort all
+ # p functions
+ p_functions.sort(key=lambda p_function: (
+ p_function[0],
+ str(p_function[1]),
+ p_function[2],
+ p_function[3]))
+ self.pfuncs = p_functions
+
+ # Validate all of the p_functions
+ def validate_pfunctions(self):
+ grammar = []
+ # Check for non-empty symbols
+ if len(self.pfuncs) == 0:
+ self.log.error('no rules of the form p_rulename are defined')
+ self.error = True
+ return
+
+ for line, module, name, doc in self.pfuncs:
+ file = inspect.getsourcefile(module)
+ func = self.pdict[name]
+ if isinstance(func, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ if func.__code__.co_argcount > reqargs:
+ self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
+ self.error = True
+ elif func.__code__.co_argcount < reqargs:
+ self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
+ self.error = True
+ elif not func.__doc__:
+ self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
+ file, line, func.__name__)
+ else:
+ try:
+ parsed_g = parse_grammar(doc, file, line)
+ for g in parsed_g:
+ grammar.append((name, g))
+ except SyntaxError as e:
+ self.log.error(str(e))
+ self.error = True
+
+ # Looks like a valid grammar rule
+ # Mark the file in which defined.
+ self.modules.add(module)
+
+ # Secondary validation step that looks for p_ definitions that are not functions
+ # or functions that look like they might be grammar rules.
+
+ for n, v in self.pdict.items():
+ if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
+ continue
+ if n.startswith('t_'):
+ continue
+ if n.startswith('p_') and n != 'p_error':
+ self.log.warning('%r not defined as a function', n)
+ if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
+ (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
+ if v.__doc__:
+ try:
+ doc = v.__doc__.split(' ')
+ if doc[1] == ':':
+ self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
+ v.__code__.co_filename, v.__code__.co_firstlineno, n)
+ except IndexError:
+ pass
+
+ self.grammar = grammar
+
+# -----------------------------------------------------------------------------
+# yacc(module)
+#
+# Build a parser
+# -----------------------------------------------------------------------------
+
+def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
+ check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
+ outputdir=None, debuglog=None, errorlog=None, picklefile=None):
+
+ if tabmodule is None:
+ tabmodule = tab_module
+
+ # Reference to the parsing method of the last built parser
+ global parse
+
+ # If pickling is enabled, table files are not created
+ if picklefile:
+ write_tables = 0
+
+ if errorlog is None:
+ errorlog = PlyLogger(sys.stderr)
+
+ # Get the module dictionary used for the parser
+ if module:
+ _items = [(k, getattr(module, k)) for k in dir(module)]
+ pdict = dict(_items)
+ # If no __file__ attribute is available, try to obtain it from the __module__ instead
+ if '__file__' not in pdict:
+ pdict['__file__'] = sys.modules[pdict['__module__']].__file__
+ else:
+ pdict = get_caller_module_dict(2)
+
+ if outputdir is None:
+ # If no output directory is set, the location of the output files
+ # is determined according to the following rules:
+ # - If tabmodule specifies a package, files go into that package directory
+ # - Otherwise, files go in the same directory as the specifying module
+ if isinstance(tabmodule, types.ModuleType):
+ srcfile = tabmodule.__file__
+ else:
+ if '.' not in tabmodule:
+ srcfile = pdict['__file__']
+ else:
+ parts = tabmodule.split('.')
+ pkgname = '.'.join(parts[:-1])
+ exec('import %s' % pkgname)
+ srcfile = getattr(sys.modules[pkgname], '__file__', '')
+ outputdir = os.path.dirname(srcfile)
+
+ # Determine if the module is package of a package or not.
+ # If so, fix the tabmodule setting so that tables load correctly
+ pkg = pdict.get('__package__')
+ if pkg and isinstance(tabmodule, str):
+ if '.' not in tabmodule:
+ tabmodule = pkg + '.' + tabmodule
+
+
+
+ # Set start symbol if it's specified directly using an argument
+ if start is not None:
+ pdict['start'] = start
+
+ # Collect parser information from the dictionary
+ pinfo = ParserReflect(pdict, log=errorlog)
+ pinfo.get_all()
+
+ if pinfo.error:
+ raise YaccError('Unable to build parser')
+
+ # Check signature against table files (if any)
+ signature = pinfo.signature()
+
+ # Read the tables
+ try:
+ lr = LRTable()
+ if picklefile:
+ read_signature = lr.read_pickle(picklefile)
+ else:
+ read_signature = lr.read_table(tabmodule)
+ if optimize or (read_signature == signature):
+ try:
+ lr.bind_callables(pinfo.pdict)
+ parser = LRParser(lr, pinfo.error_func)
+ parse = parser.parse
+ return parser
+ except Exception as e:
+ errorlog.warning('There was a problem loading the table file: %r', e)
+ except VersionError as e:
+ errorlog.warning(str(e))
+ except ImportError:
+ pass
+
+ if debuglog is None:
+ if debug:
+ try:
+ debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
+ except IOError as e:
+ errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
+ debuglog = NullLogger()
+ else:
+ debuglog = NullLogger()
+
+ debuglog.info('Created by PLY version %s (https://www.dabeaz.com/ply/)', __version__)
+
+ errors = False
+
+ # Validate the parser information
+ if pinfo.validate_all():
+ raise YaccError('Unable to build parser')
+
+ if not pinfo.error_func:
+ errorlog.warning('no p_error() function is defined')
+
+ # Create a grammar object
+ grammar = Grammar(pinfo.tokens)
+
+ # Set precedence level for terminals
+ for term, assoc, level in pinfo.preclist:
+ try:
+ grammar.set_precedence(term, assoc, level)
+ except GrammarError as e:
+ errorlog.warning('%s', e)
+
+ # Add productions to the grammar
+ for funcname, gram in pinfo.grammar:
+ file, line, prodname, syms = gram
+ try:
+ grammar.add_production(prodname, syms, funcname, file, line)
+ except GrammarError as e:
+ errorlog.error('%s', e)
+ errors = True
+
+ # Set the grammar start symbols
+ try:
+ if start is None:
+ grammar.set_start(pinfo.start)
+ else:
+ grammar.set_start(start)
+ except GrammarError as e:
+ errorlog.error(str(e))
+ errors = True
+
+ if errors:
+ raise YaccError('Unable to build parser')
+
+ # Verify the grammar structure
+ undefined_symbols = grammar.undefined_symbols()
+ for sym, prod in undefined_symbols:
+ errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
+ errors = True
+
+ unused_terminals = grammar.unused_terminals()
+ if unused_terminals:
+ debuglog.info('')
+ debuglog.info('Unused terminals:')
+ debuglog.info('')
+ for term in unused_terminals:
+ errorlog.warning('Token %r defined, but not used', term)
+ debuglog.info(' %s', term)
+
+ # Print out all productions to the debug log
+ if debug:
+ debuglog.info('')
+ debuglog.info('Grammar')
+ debuglog.info('')
+ for n, p in enumerate(grammar.Productions):
+ debuglog.info('Rule %-5d %s', n, p)
+
+ # Find unused non-terminals
+ unused_rules = grammar.unused_rules()
+ for prod in unused_rules:
+ errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
+
+ if len(unused_terminals) == 1:
+ errorlog.warning('There is 1 unused token')
+ if len(unused_terminals) > 1:
+ errorlog.warning('There are %d unused tokens', len(unused_terminals))
+
+ if len(unused_rules) == 1:
+ errorlog.warning('There is 1 unused rule')
+ if len(unused_rules) > 1:
+ errorlog.warning('There are %d unused rules', len(unused_rules))
+
+ if debug:
+ debuglog.info('')
+ debuglog.info('Terminals, with rules where they appear')
+ debuglog.info('')
+ terms = list(grammar.Terminals)
+ terms.sort()
+ for term in terms:
+ debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
+
+ debuglog.info('')
+ debuglog.info('Nonterminals, with rules where they appear')
+ debuglog.info('')
+ nonterms = list(grammar.Nonterminals)
+ nonterms.sort()
+ for nonterm in nonterms:
+ debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
+ debuglog.info('')
+
+ if check_recursion:
+ unreachable = grammar.find_unreachable()
+ for u in unreachable:
+ errorlog.warning('Symbol %r is unreachable', u)
+
+ infinite = grammar.infinite_cycles()
+ for inf in infinite:
+ errorlog.error('Infinite recursion detected for symbol %r', inf)
+ errors = True
+
+ unused_prec = grammar.unused_precedence()
+ for term, assoc in unused_prec:
+ errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
+ errors = True
+
+ if errors:
+ raise YaccError('Unable to build parser')
+
+ # Run the LRGeneratedTable on the grammar
+ if debug:
+ errorlog.debug('Generating %s tables', method)
+
+ lr = LRGeneratedTable(grammar, method, debuglog)
+
+ if debug:
+ num_sr = len(lr.sr_conflicts)
+
+ # Report shift/reduce and reduce/reduce conflicts
+ if num_sr == 1:
+ errorlog.warning('1 shift/reduce conflict')
+ elif num_sr > 1:
+ errorlog.warning('%d shift/reduce conflicts', num_sr)
+
+ num_rr = len(lr.rr_conflicts)
+ if num_rr == 1:
+ errorlog.warning('1 reduce/reduce conflict')
+ elif num_rr > 1:
+ errorlog.warning('%d reduce/reduce conflicts', num_rr)
+
+ # Write out conflicts to the output file
+ if debug and (lr.sr_conflicts or lr.rr_conflicts):
+ debuglog.warning('')
+ debuglog.warning('Conflicts:')
+ debuglog.warning('')
+
+ for state, tok, resolution in lr.sr_conflicts:
+ debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
+
+ already_reported = set()
+ for state, rule, rejected in lr.rr_conflicts:
+ if (state, id(rule), id(rejected)) in already_reported:
+ continue
+ debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
+ debuglog.warning('rejected rule (%s) in state %d', rejected, state)
+ errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
+ errorlog.warning('rejected rule (%s) in state %d', rejected, state)
+ already_reported.add((state, id(rule), id(rejected)))
+
+ warned_never = []
+ for state, rule, rejected in lr.rr_conflicts:
+ if not rejected.reduced and (rejected not in warned_never):
+ debuglog.warning('Rule (%s) is never reduced', rejected)
+ errorlog.warning('Rule (%s) is never reduced', rejected)
+ warned_never.append(rejected)
+
+ # Write the table file if requested
+ if write_tables:
+ try:
+ lr.write_table(tabmodule, outputdir, signature)
+ except IOError as e:
+ errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
+
+ # Write a pickled version of the tables
+ if picklefile:
+ try:
+ lr.pickle_table(picklefile, signature)
+ except IOError as e:
+ errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
+
+ # Build the parser
+ lr.bind_callables(pinfo.pdict)
+ parser = LRParser(lr, pinfo.error_func)
+
+ parse = parser.parse
+ return parser